1 /* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */
2 /* $DragonFly: src/sys/dev/netif/nfe/if_nfe.c,v 1.16 2007/08/14 13:30:35 sephe Exp $ */
5 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
7 * This code is derived from software contributed to The DragonFly Project
8 * by Sepherosa Ziehau <sepherosa@gmail.com> and
9 * Matthew Dillon <dillon@apollo.backplane.com>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
41 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
43 * Permission to use, copy, modify, and distribute this software for any
44 * purpose with or without fee is hereby granted, provided that the above
45 * copyright notice and this permission notice appear in all copies.
47 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
48 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
49 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
50 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
51 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
52 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
53 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
56 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
58 #include "opt_polling.h"
60 #include <sys/param.h>
61 #include <sys/endian.h>
62 #include <sys/kernel.h>
66 #include <sys/serialize.h>
67 #include <sys/socket.h>
68 #include <sys/sockio.h>
69 #include <sys/sysctl.h>
71 #include <net/ethernet.h>
74 #include <net/if_arp.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/ifq_var.h>
78 #include <net/if_types.h>
79 #include <net/if_var.h>
80 #include <net/vlan/if_vlan_var.h>
82 #include <bus/pci/pcireg.h>
83 #include <bus/pci/pcivar.h>
84 #include <bus/pci/pcidevs.h>
86 #include <dev/netif/mii_layer/mii.h>
87 #include <dev/netif/mii_layer/miivar.h>
89 #include "miibus_if.h"
91 #include <dev/netif/nfe/if_nfereg.h>
92 #include <dev/netif/nfe/if_nfevar.h>
95 #define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
97 static int nfe_probe(device_t
);
98 static int nfe_attach(device_t
);
99 static int nfe_detach(device_t
);
100 static void nfe_shutdown(device_t
);
101 static int nfe_resume(device_t
);
102 static int nfe_suspend(device_t
);
104 static int nfe_miibus_readreg(device_t
, int, int);
105 static void nfe_miibus_writereg(device_t
, int, int, int);
106 static void nfe_miibus_statchg(device_t
);
108 #ifdef DEVICE_POLLING
109 static void nfe_poll(struct ifnet
*, enum poll_cmd
, int);
111 static void nfe_intr(void *);
112 static int nfe_ioctl(struct ifnet
*, u_long
, caddr_t
, struct ucred
*);
113 static void nfe_rxeof(struct nfe_softc
*);
114 static void nfe_txeof(struct nfe_softc
*);
115 static int nfe_encap(struct nfe_softc
*, struct nfe_tx_ring
*,
117 static void nfe_start(struct ifnet
*);
118 static void nfe_watchdog(struct ifnet
*);
119 static void nfe_init(void *);
120 static void nfe_stop(struct nfe_softc
*);
121 static struct nfe_jbuf
*nfe_jalloc(struct nfe_softc
*);
122 static void nfe_jfree(void *);
123 static void nfe_jref(void *);
124 static int nfe_jpool_alloc(struct nfe_softc
*, struct nfe_rx_ring
*);
125 static void nfe_jpool_free(struct nfe_softc
*, struct nfe_rx_ring
*);
126 static int nfe_alloc_rx_ring(struct nfe_softc
*, struct nfe_rx_ring
*);
127 static void nfe_reset_rx_ring(struct nfe_softc
*, struct nfe_rx_ring
*);
128 static int nfe_init_rx_ring(struct nfe_softc
*, struct nfe_rx_ring
*);
129 static void nfe_free_rx_ring(struct nfe_softc
*, struct nfe_rx_ring
*);
130 static int nfe_alloc_tx_ring(struct nfe_softc
*, struct nfe_tx_ring
*);
131 static void nfe_reset_tx_ring(struct nfe_softc
*, struct nfe_tx_ring
*);
132 static int nfe_init_tx_ring(struct nfe_softc
*, struct nfe_tx_ring
*);
133 static void nfe_free_tx_ring(struct nfe_softc
*, struct nfe_tx_ring
*);
134 static int nfe_ifmedia_upd(struct ifnet
*);
135 static void nfe_ifmedia_sts(struct ifnet
*, struct ifmediareq
*);
136 static void nfe_setmulti(struct nfe_softc
*);
137 static void nfe_get_macaddr(struct nfe_softc
*, uint8_t *);
138 static void nfe_set_macaddr(struct nfe_softc
*, const uint8_t *);
139 static void nfe_tick(void *);
140 static void nfe_ring_dma_addr(void *, bus_dma_segment_t
*, int, int);
141 static void nfe_buf_dma_addr(void *, bus_dma_segment_t
*, int, bus_size_t
,
143 static void nfe_set_paddr_rxdesc(struct nfe_softc
*, struct nfe_rx_ring
*,
145 static void nfe_set_ready_rxdesc(struct nfe_softc
*, struct nfe_rx_ring
*,
147 static int nfe_newbuf_std(struct nfe_softc
*, struct nfe_rx_ring
*, int,
149 static int nfe_newbuf_jumbo(struct nfe_softc
*, struct nfe_rx_ring
*, int,
155 static int nfe_debug
= 0;
156 static int nfe_rx_ring_count
= NFE_RX_RING_DEF_COUNT
;
158 TUNABLE_INT("hw.nfe.rx_ring_count", &nfe_rx_ring_count
);
160 SYSCTL_NODE(_hw
, OID_AUTO
, nfe
, CTLFLAG_RD
, 0, "nVidia GigE parameters");
161 SYSCTL_INT(_hw_nfe
, OID_AUTO
, rx_ring_count
, CTLFLAG_RD
, &nfe_rx_ring_count
,
162 NFE_RX_RING_DEF_COUNT
, "rx ring count");
163 SYSCTL_INT(_hw_nfe
, OID_AUTO
, debug
, CTLFLAG_RW
, &nfe_debug
, 0,
164 "control debugging printfs");
166 #define DPRINTF(sc, fmt, ...) do { \
168 if_printf(&(sc)->arpcom.ac_if, \
173 #define DPRINTFN(sc, lv, fmt, ...) do { \
174 if (nfe_debug >= (lv)) { \
175 if_printf(&(sc)->arpcom.ac_if, \
180 #else /* !NFE_DEBUG */
182 #define DPRINTF(sc, fmt, ...)
183 #define DPRINTFN(sc, lv, fmt, ...)
185 #endif /* NFE_DEBUG */
189 bus_dma_segment_t
*segs
;
192 static const struct nfe_dev
{
197 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE_LAN
,
198 "NVIDIA nForce Fast Ethernet" },
200 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE2_LAN
,
201 "NVIDIA nForce2 Fast Ethernet" },
203 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1
,
204 "NVIDIA nForce3 Gigabit Ethernet" },
206 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb
207 chipset, and possibly also the 400R; it might be both nForce2- and
208 nForce3-based boards can use the same MCPs (= southbridges) */
209 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2
,
210 "NVIDIA nForce3 Gigabit Ethernet" },
212 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3
,
213 "NVIDIA nForce3 Gigabit Ethernet" },
215 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4
,
216 "NVIDIA nForce3 Gigabit Ethernet" },
218 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5
,
219 "NVIDIA nForce3 Gigabit Ethernet" },
221 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_CK804_LAN1
,
222 "NVIDIA CK804 Gigabit Ethernet" },
224 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_CK804_LAN2
,
225 "NVIDIA CK804 Gigabit Ethernet" },
227 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP04_LAN1
,
228 "NVIDIA MCP04 Gigabit Ethernet" },
230 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP04_LAN2
,
231 "NVIDIA MCP04 Gigabit Ethernet" },
233 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP51_LAN1
,
234 "NVIDIA MCP51 Gigabit Ethernet" },
236 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP51_LAN2
,
237 "NVIDIA MCP51 Gigabit Ethernet" },
239 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP55_LAN1
,
240 "NVIDIA MCP55 Gigabit Ethernet" },
242 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP55_LAN2
,
243 "NVIDIA MCP55 Gigabit Ethernet" },
245 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP61_LAN1
,
246 "NVIDIA MCP61 Gigabit Ethernet" },
248 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP61_LAN2
,
249 "NVIDIA MCP61 Gigabit Ethernet" },
251 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP61_LAN3
,
252 "NVIDIA MCP61 Gigabit Ethernet" },
254 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP61_LAN4
,
255 "NVIDIA MCP61 Gigabit Ethernet" },
257 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP65_LAN1
,
258 "NVIDIA MCP65 Gigabit Ethernet" },
260 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP65_LAN2
,
261 "NVIDIA MCP65 Gigabit Ethernet" },
263 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP65_LAN3
,
264 "NVIDIA MCP65 Gigabit Ethernet" },
266 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP65_LAN4
,
267 "NVIDIA MCP65 Gigabit Ethernet" },
269 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP67_LAN1
,
270 "NVIDIA MCP67 Gigabit Ethernet" },
272 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP67_LAN2
,
273 "NVIDIA MCP67 Gigabit Ethernet" },
275 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP67_LAN3
,
276 "NVIDIA MCP67 Gigabit Ethernet" },
278 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP67_LAN4
,
279 "NVIDIA MCP67 Gigabit Ethernet" }
282 static device_method_t nfe_methods
[] = {
283 /* Device interface */
284 DEVMETHOD(device_probe
, nfe_probe
),
285 DEVMETHOD(device_attach
, nfe_attach
),
286 DEVMETHOD(device_detach
, nfe_detach
),
287 DEVMETHOD(device_suspend
, nfe_suspend
),
288 DEVMETHOD(device_resume
, nfe_resume
),
289 DEVMETHOD(device_shutdown
, nfe_shutdown
),
292 DEVMETHOD(bus_print_child
, bus_generic_print_child
),
293 DEVMETHOD(bus_driver_added
, bus_generic_driver_added
),
296 DEVMETHOD(miibus_readreg
, nfe_miibus_readreg
),
297 DEVMETHOD(miibus_writereg
, nfe_miibus_writereg
),
298 DEVMETHOD(miibus_statchg
, nfe_miibus_statchg
),
303 static driver_t nfe_driver
= {
306 sizeof(struct nfe_softc
)
309 static devclass_t nfe_devclass
;
311 DECLARE_DUMMY_MODULE(if_nfe
);
312 MODULE_DEPEND(if_nfe
, miibus
, 1, 1, 1);
313 DRIVER_MODULE(if_nfe
, pci
, nfe_driver
, nfe_devclass
, 0, 0);
314 DRIVER_MODULE(miibus
, nfe
, miibus_driver
, miibus_devclass
, 0, 0);
317 nfe_probe(device_t dev
)
319 const struct nfe_dev
*n
;
322 vid
= pci_get_vendor(dev
);
323 did
= pci_get_device(dev
);
324 for (n
= nfe_devices
; n
->desc
!= NULL
; ++n
) {
325 if (vid
== n
->vid
&& did
== n
->did
) {
326 struct nfe_softc
*sc
= device_get_softc(dev
);
329 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2
:
330 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3
:
331 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4
:
332 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5
:
333 sc
->sc_flags
= NFE_JUMBO_SUP
|
336 case PCI_PRODUCT_NVIDIA_MCP51_LAN1
:
337 case PCI_PRODUCT_NVIDIA_MCP51_LAN2
:
338 case PCI_PRODUCT_NVIDIA_MCP61_LAN1
:
339 case PCI_PRODUCT_NVIDIA_MCP61_LAN2
:
340 case PCI_PRODUCT_NVIDIA_MCP61_LAN3
:
341 case PCI_PRODUCT_NVIDIA_MCP61_LAN4
:
342 case PCI_PRODUCT_NVIDIA_MCP67_LAN1
:
343 case PCI_PRODUCT_NVIDIA_MCP67_LAN2
:
344 case PCI_PRODUCT_NVIDIA_MCP67_LAN3
:
345 case PCI_PRODUCT_NVIDIA_MCP67_LAN4
:
346 sc
->sc_flags
= NFE_40BIT_ADDR
;
348 case PCI_PRODUCT_NVIDIA_CK804_LAN1
:
349 case PCI_PRODUCT_NVIDIA_CK804_LAN2
:
350 case PCI_PRODUCT_NVIDIA_MCP04_LAN1
:
351 case PCI_PRODUCT_NVIDIA_MCP04_LAN2
:
352 case PCI_PRODUCT_NVIDIA_MCP65_LAN1
:
353 case PCI_PRODUCT_NVIDIA_MCP65_LAN2
:
354 case PCI_PRODUCT_NVIDIA_MCP65_LAN3
:
355 case PCI_PRODUCT_NVIDIA_MCP65_LAN4
:
356 sc
->sc_flags
= NFE_JUMBO_SUP
|
360 case PCI_PRODUCT_NVIDIA_MCP55_LAN1
:
361 case PCI_PRODUCT_NVIDIA_MCP55_LAN2
:
362 sc
->sc_flags
= NFE_JUMBO_SUP
|
369 device_set_desc(dev
, n
->desc
);
370 device_set_async_attach(dev
, TRUE
);
378 nfe_attach(device_t dev
)
380 struct nfe_softc
*sc
= device_get_softc(dev
);
381 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
382 uint8_t eaddr
[ETHER_ADDR_LEN
];
385 if_initname(ifp
, device_get_name(dev
), device_get_unit(dev
));
386 lwkt_serialize_init(&sc
->sc_jbuf_serializer
);
388 sc
->sc_mem_rid
= PCIR_BAR(0);
391 if (pci_get_powerstate(dev
) != PCI_POWERSTATE_D0
) {
394 mem
= pci_read_config(dev
, sc
->sc_mem_rid
, 4);
395 irq
= pci_read_config(dev
, PCIR_INTLINE
, 4);
397 device_printf(dev
, "chip is in D%d power mode "
398 "-- setting to D0\n", pci_get_powerstate(dev
));
400 pci_set_powerstate(dev
, PCI_POWERSTATE_D0
);
402 pci_write_config(dev
, sc
->sc_mem_rid
, mem
, 4);
403 pci_write_config(dev
, PCIR_INTLINE
, irq
, 4);
405 #endif /* !BURN_BRIDGE */
407 /* Enable bus mastering */
408 pci_enable_busmaster(dev
);
410 /* Allocate IO memory */
411 sc
->sc_mem_res
= bus_alloc_resource_any(dev
, SYS_RES_MEMORY
,
412 &sc
->sc_mem_rid
, RF_ACTIVE
);
413 if (sc
->sc_mem_res
== NULL
) {
414 device_printf(dev
, "cound not allocate io memory\n");
417 sc
->sc_memh
= rman_get_bushandle(sc
->sc_mem_res
);
418 sc
->sc_memt
= rman_get_bustag(sc
->sc_mem_res
);
422 sc
->sc_irq_res
= bus_alloc_resource_any(dev
, SYS_RES_IRQ
,
424 RF_SHAREABLE
| RF_ACTIVE
);
425 if (sc
->sc_irq_res
== NULL
) {
426 device_printf(dev
, "could not allocate irq\n");
431 nfe_get_macaddr(sc
, eaddr
);
434 * Allocate Tx and Rx rings.
436 error
= nfe_alloc_tx_ring(sc
, &sc
->txq
);
438 device_printf(dev
, "could not allocate Tx ring\n");
442 error
= nfe_alloc_rx_ring(sc
, &sc
->rxq
);
444 device_printf(dev
, "could not allocate Rx ring\n");
448 error
= mii_phy_probe(dev
, &sc
->sc_miibus
, nfe_ifmedia_upd
,
451 device_printf(dev
, "MII without any phy\n");
456 ifp
->if_mtu
= ETHERMTU
;
457 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
458 ifp
->if_ioctl
= nfe_ioctl
;
459 ifp
->if_start
= nfe_start
;
460 #ifdef DEVICE_POLLING
461 ifp
->if_poll
= nfe_poll
;
463 ifp
->if_watchdog
= nfe_watchdog
;
464 ifp
->if_init
= nfe_init
;
465 ifq_set_maxlen(&ifp
->if_snd
, NFE_IFQ_MAXLEN
);
466 ifq_set_ready(&ifp
->if_snd
);
468 ifp
->if_capabilities
= IFCAP_VLAN_MTU
;
470 if (sc
->sc_flags
& NFE_HW_VLAN
)
471 ifp
->if_capabilities
|= IFCAP_VLAN_HWTAGGING
;
474 if (sc
->sc_flags
& NFE_HW_CSUM
) {
475 ifp
->if_capabilities
|= IFCAP_HWCSUM
;
476 ifp
->if_hwassist
= NFE_CSUM_FEATURES
;
479 sc
->sc_flags
&= ~NFE_HW_CSUM
;
481 ifp
->if_capenable
= ifp
->if_capabilities
;
483 callout_init(&sc
->sc_tick_ch
);
485 ether_ifattach(ifp
, eaddr
, NULL
);
487 error
= bus_setup_intr(dev
, sc
->sc_irq_res
, INTR_MPSAFE
, nfe_intr
, sc
,
488 &sc
->sc_ih
, ifp
->if_serializer
);
490 device_printf(dev
, "could not setup intr\n");
502 nfe_detach(device_t dev
)
504 struct nfe_softc
*sc
= device_get_softc(dev
);
506 if (device_is_attached(dev
)) {
507 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
509 lwkt_serialize_enter(ifp
->if_serializer
);
511 bus_teardown_intr(dev
, sc
->sc_irq_res
, sc
->sc_ih
);
512 lwkt_serialize_exit(ifp
->if_serializer
);
517 if (sc
->sc_miibus
!= NULL
)
518 device_delete_child(dev
, sc
->sc_miibus
);
519 bus_generic_detach(dev
);
521 if (sc
->sc_irq_res
!= NULL
) {
522 bus_release_resource(dev
, SYS_RES_IRQ
, sc
->sc_irq_rid
,
526 if (sc
->sc_mem_res
!= NULL
) {
527 bus_release_resource(dev
, SYS_RES_MEMORY
, sc
->sc_mem_rid
,
531 nfe_free_tx_ring(sc
, &sc
->txq
);
532 nfe_free_rx_ring(sc
, &sc
->rxq
);
538 nfe_shutdown(device_t dev
)
540 struct nfe_softc
*sc
= device_get_softc(dev
);
541 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
543 lwkt_serialize_enter(ifp
->if_serializer
);
545 lwkt_serialize_exit(ifp
->if_serializer
);
549 nfe_suspend(device_t dev
)
551 struct nfe_softc
*sc
= device_get_softc(dev
);
552 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
554 lwkt_serialize_enter(ifp
->if_serializer
);
556 lwkt_serialize_exit(ifp
->if_serializer
);
562 nfe_resume(device_t dev
)
564 struct nfe_softc
*sc
= device_get_softc(dev
);
565 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
567 lwkt_serialize_enter(ifp
->if_serializer
);
568 if (ifp
->if_flags
& IFF_UP
)
570 lwkt_serialize_exit(ifp
->if_serializer
);
576 nfe_miibus_statchg(device_t dev
)
578 struct nfe_softc
*sc
= device_get_softc(dev
);
579 struct mii_data
*mii
= device_get_softc(sc
->sc_miibus
);
580 uint32_t phy
, seed
, misc
= NFE_MISC1_MAGIC
, link
= NFE_MEDIA_SET
;
582 phy
= NFE_READ(sc
, NFE_PHY_IFACE
);
583 phy
&= ~(NFE_PHY_HDX
| NFE_PHY_100TX
| NFE_PHY_1000T
);
585 seed
= NFE_READ(sc
, NFE_RNDSEED
);
586 seed
&= ~NFE_SEED_MASK
;
588 if ((mii
->mii_media_active
& IFM_GMASK
) == IFM_HDX
) {
589 phy
|= NFE_PHY_HDX
; /* half-duplex */
590 misc
|= NFE_MISC1_HDX
;
593 switch (IFM_SUBTYPE(mii
->mii_media_active
)) {
594 case IFM_1000_T
: /* full-duplex only */
595 link
|= NFE_MEDIA_1000T
;
596 seed
|= NFE_SEED_1000T
;
597 phy
|= NFE_PHY_1000T
;
600 link
|= NFE_MEDIA_100TX
;
601 seed
|= NFE_SEED_100TX
;
602 phy
|= NFE_PHY_100TX
;
605 link
|= NFE_MEDIA_10T
;
606 seed
|= NFE_SEED_10T
;
610 NFE_WRITE(sc
, NFE_RNDSEED
, seed
); /* XXX: gigabit NICs only? */
612 NFE_WRITE(sc
, NFE_PHY_IFACE
, phy
);
613 NFE_WRITE(sc
, NFE_MISC1
, misc
);
614 NFE_WRITE(sc
, NFE_LINKSPEED
, link
);
618 nfe_miibus_readreg(device_t dev
, int phy
, int reg
)
620 struct nfe_softc
*sc
= device_get_softc(dev
);
624 NFE_WRITE(sc
, NFE_PHY_STATUS
, 0xf);
626 if (NFE_READ(sc
, NFE_PHY_CTL
) & NFE_PHY_BUSY
) {
627 NFE_WRITE(sc
, NFE_PHY_CTL
, NFE_PHY_BUSY
);
631 NFE_WRITE(sc
, NFE_PHY_CTL
, (phy
<< NFE_PHYADD_SHIFT
) | reg
);
633 for (ntries
= 0; ntries
< 1000; ntries
++) {
635 if (!(NFE_READ(sc
, NFE_PHY_CTL
) & NFE_PHY_BUSY
))
638 if (ntries
== 1000) {
639 DPRINTFN(sc
, 2, "timeout waiting for PHY %s\n", "");
643 if (NFE_READ(sc
, NFE_PHY_STATUS
) & NFE_PHY_ERROR
) {
644 DPRINTFN(sc
, 2, "could not read PHY %s\n", "");
648 val
= NFE_READ(sc
, NFE_PHY_DATA
);
649 if (val
!= 0xffffffff && val
!= 0)
650 sc
->mii_phyaddr
= phy
;
652 DPRINTFN(sc
, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy
, reg
, val
);
658 nfe_miibus_writereg(device_t dev
, int phy
, int reg
, int val
)
660 struct nfe_softc
*sc
= device_get_softc(dev
);
664 NFE_WRITE(sc
, NFE_PHY_STATUS
, 0xf);
666 if (NFE_READ(sc
, NFE_PHY_CTL
) & NFE_PHY_BUSY
) {
667 NFE_WRITE(sc
, NFE_PHY_CTL
, NFE_PHY_BUSY
);
671 NFE_WRITE(sc
, NFE_PHY_DATA
, val
);
672 ctl
= NFE_PHY_WRITE
| (phy
<< NFE_PHYADD_SHIFT
) | reg
;
673 NFE_WRITE(sc
, NFE_PHY_CTL
, ctl
);
675 for (ntries
= 0; ntries
< 1000; ntries
++) {
677 if (!(NFE_READ(sc
, NFE_PHY_CTL
) & NFE_PHY_BUSY
))
683 DPRINTFN(sc
, 2, "could not write to PHY %s\n", "");
687 #ifdef DEVICE_POLLING
690 nfe_poll(struct ifnet
*ifp
, enum poll_cmd cmd
, int count
)
692 struct nfe_softc
*sc
= ifp
->if_softc
;
696 /* Disable interrupts */
697 NFE_WRITE(sc
, NFE_IRQ_MASK
, 0);
699 case POLL_DEREGISTER
:
700 /* enable interrupts */
701 NFE_WRITE(sc
, NFE_IRQ_MASK
, NFE_IRQ_WANTED
);
703 case POLL_AND_CHECK_STATUS
:
706 if (ifp
->if_flags
& IFF_RUNNING
) {
719 struct nfe_softc
*sc
= arg
;
720 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
723 r
= NFE_READ(sc
, NFE_IRQ_STATUS
);
725 return; /* not for us */
726 NFE_WRITE(sc
, NFE_IRQ_STATUS
, r
);
728 DPRINTFN(sc
, 5, "%s: interrupt register %x\n", __func__
, r
);
730 if (r
& NFE_IRQ_LINK
) {
731 NFE_READ(sc
, NFE_PHY_STATUS
);
732 NFE_WRITE(sc
, NFE_PHY_STATUS
, 0xf);
733 DPRINTF(sc
, "link state changed %s\n", "");
736 if (ifp
->if_flags
& IFF_RUNNING
) {
746 nfe_ioctl(struct ifnet
*ifp
, u_long cmd
, caddr_t data
, struct ucred
*cr
)
748 struct nfe_softc
*sc
= ifp
->if_softc
;
749 struct ifreq
*ifr
= (struct ifreq
*)data
;
750 struct mii_data
*mii
;
755 if (((sc
->sc_flags
& NFE_JUMBO_SUP
) &&
756 ifr
->ifr_mtu
> NFE_JUMBO_MTU
) ||
757 ((sc
->sc_flags
& NFE_JUMBO_SUP
) == 0 &&
758 ifr
->ifr_mtu
> ETHERMTU
)) {
760 } else if (ifp
->if_mtu
!= ifr
->ifr_mtu
) {
761 ifp
->if_mtu
= ifr
->ifr_mtu
;
766 if (ifp
->if_flags
& IFF_UP
) {
768 * If only the PROMISC or ALLMULTI flag changes, then
769 * don't do a full re-init of the chip, just update
772 if ((ifp
->if_flags
& IFF_RUNNING
) &&
773 ((ifp
->if_flags
^ sc
->sc_if_flags
) &
774 (IFF_ALLMULTI
| IFF_PROMISC
)) != 0) {
777 if (!(ifp
->if_flags
& IFF_RUNNING
))
781 if (ifp
->if_flags
& IFF_RUNNING
)
784 sc
->sc_if_flags
= ifp
->if_flags
;
788 if (ifp
->if_flags
& IFF_RUNNING
)
793 mii
= device_get_softc(sc
->sc_miibus
);
794 error
= ifmedia_ioctl(ifp
, ifr
, &mii
->mii_media
, cmd
);
797 mask
= (ifr
->ifr_reqcap
^ ifp
->if_capenable
) & IFCAP_HWCSUM
;
798 if (mask
&& (ifp
->if_capabilities
& IFCAP_HWCSUM
)) {
799 ifp
->if_capenable
^= mask
;
800 if (IFCAP_TXCSUM
& ifp
->if_capenable
)
801 ifp
->if_hwassist
= NFE_CSUM_FEATURES
;
803 ifp
->if_hwassist
= 0;
805 if (ifp
->if_flags
& IFF_RUNNING
)
810 error
= ether_ioctl(ifp
, cmd
, data
);
817 nfe_rxeof(struct nfe_softc
*sc
)
819 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
820 struct nfe_rx_ring
*ring
= &sc
->rxq
;
824 bus_dmamap_sync(ring
->tag
, ring
->map
, BUS_DMASYNC_POSTREAD
);
827 struct nfe_rx_data
*data
= &ring
->data
[ring
->cur
];
832 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
833 struct nfe_desc64
*desc64
= &ring
->desc64
[ring
->cur
];
835 flags
= le16toh(desc64
->flags
);
836 len
= le16toh(desc64
->length
) & 0x3fff;
838 struct nfe_desc32
*desc32
= &ring
->desc32
[ring
->cur
];
840 flags
= le16toh(desc32
->flags
);
841 len
= le16toh(desc32
->length
) & 0x3fff;
844 if (flags
& NFE_RX_READY
)
849 if ((sc
->sc_flags
& (NFE_JUMBO_SUP
| NFE_40BIT_ADDR
)) == 0) {
850 if (!(flags
& NFE_RX_VALID_V1
))
853 if ((flags
& NFE_RX_FIXME_V1
) == NFE_RX_FIXME_V1
) {
854 flags
&= ~NFE_RX_ERROR
;
855 len
--; /* fix buffer length */
858 if (!(flags
& NFE_RX_VALID_V2
))
861 if ((flags
& NFE_RX_FIXME_V2
) == NFE_RX_FIXME_V2
) {
862 flags
&= ~NFE_RX_ERROR
;
863 len
--; /* fix buffer length */
867 if (flags
& NFE_RX_ERROR
) {
874 if (sc
->sc_flags
& NFE_USE_JUMBO
)
875 error
= nfe_newbuf_jumbo(sc
, ring
, ring
->cur
, 0);
877 error
= nfe_newbuf_std(sc
, ring
, ring
->cur
, 0);
884 m
->m_pkthdr
.len
= m
->m_len
= len
;
885 m
->m_pkthdr
.rcvif
= ifp
;
887 if ((ifp
->if_capenable
& IFCAP_RXCSUM
) &&
888 (flags
& NFE_RX_CSUMOK
)) {
889 if (flags
& NFE_RX_IP_CSUMOK_V2
) {
890 m
->m_pkthdr
.csum_flags
|= CSUM_IP_CHECKED
|
895 (NFE_RX_UDP_CSUMOK_V2
| NFE_RX_TCP_CSUMOK_V2
)) {
896 m
->m_pkthdr
.csum_flags
|= CSUM_DATA_VALID
|
898 CSUM_FRAG_NOT_CHECKED
;
899 m
->m_pkthdr
.csum_data
= 0xffff;
904 ifp
->if_input(ifp
, m
);
906 nfe_set_ready_rxdesc(sc
, ring
, ring
->cur
);
907 sc
->rxq
.cur
= (sc
->rxq
.cur
+ 1) % nfe_rx_ring_count
;
911 bus_dmamap_sync(ring
->tag
, ring
->map
, BUS_DMASYNC_PREWRITE
);
915 nfe_txeof(struct nfe_softc
*sc
)
917 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
918 struct nfe_tx_ring
*ring
= &sc
->txq
;
919 struct nfe_tx_data
*data
= NULL
;
921 bus_dmamap_sync(ring
->tag
, ring
->map
, BUS_DMASYNC_POSTREAD
);
922 while (ring
->next
!= ring
->cur
) {
925 if (sc
->sc_flags
& NFE_40BIT_ADDR
)
926 flags
= le16toh(ring
->desc64
[ring
->next
].flags
);
928 flags
= le16toh(ring
->desc32
[ring
->next
].flags
);
930 if (flags
& NFE_TX_VALID
)
933 data
= &ring
->data
[ring
->next
];
935 if ((sc
->sc_flags
& (NFE_JUMBO_SUP
| NFE_40BIT_ADDR
)) == 0) {
936 if (!(flags
& NFE_TX_LASTFRAG_V1
) && data
->m
== NULL
)
939 if ((flags
& NFE_TX_ERROR_V1
) != 0) {
940 if_printf(ifp
, "tx v1 error 0x%4b\n", flags
,
947 if (!(flags
& NFE_TX_LASTFRAG_V2
) && data
->m
== NULL
)
950 if ((flags
& NFE_TX_ERROR_V2
) != 0) {
951 if_printf(ifp
, "tx v2 error 0x%4b\n", flags
,
959 if (data
->m
== NULL
) { /* should not get there */
961 "last fragment bit w/o associated mbuf!\n");
965 /* last fragment of the mbuf chain transmitted */
966 bus_dmamap_sync(ring
->data_tag
, data
->map
,
967 BUS_DMASYNC_POSTWRITE
);
968 bus_dmamap_unload(ring
->data_tag
, data
->map
);
975 KKASSERT(ring
->queued
>= 0);
976 ring
->next
= (ring
->next
+ 1) % NFE_TX_RING_COUNT
;
979 if (data
!= NULL
) { /* at least one slot freed */
980 ifp
->if_flags
&= ~IFF_OACTIVE
;
986 nfe_encap(struct nfe_softc
*sc
, struct nfe_tx_ring
*ring
, struct mbuf
*m0
)
988 struct nfe_dma_ctx ctx
;
989 bus_dma_segment_t segs
[NFE_MAX_SCATTER
];
990 struct nfe_tx_data
*data
, *data_map
;
992 struct nfe_desc64
*desc64
= NULL
;
993 struct nfe_desc32
*desc32
= NULL
;
998 data
= &ring
->data
[ring
->cur
];
1000 data_map
= data
; /* Remember who owns the DMA map */
1002 ctx
.nsegs
= NFE_MAX_SCATTER
;
1004 error
= bus_dmamap_load_mbuf(ring
->data_tag
, map
, m0
,
1005 nfe_buf_dma_addr
, &ctx
, BUS_DMA_NOWAIT
);
1006 if (error
&& error
!= EFBIG
) {
1007 if_printf(&sc
->arpcom
.ac_if
, "could not map TX mbuf\n");
1011 if (error
) { /* error == EFBIG */
1014 m_new
= m_defrag(m0
, MB_DONTWAIT
);
1015 if (m_new
== NULL
) {
1016 if_printf(&sc
->arpcom
.ac_if
,
1017 "could not defrag TX mbuf\n");
1024 ctx
.nsegs
= NFE_MAX_SCATTER
;
1026 error
= bus_dmamap_load_mbuf(ring
->data_tag
, map
, m0
,
1027 nfe_buf_dma_addr
, &ctx
,
1030 if_printf(&sc
->arpcom
.ac_if
,
1031 "could not map defraged TX mbuf\n");
1038 if (ring
->queued
+ ctx
.nsegs
>= NFE_TX_RING_COUNT
- 1) {
1039 bus_dmamap_unload(ring
->data_tag
, map
);
1044 /* setup h/w VLAN tagging */
1045 if ((m0
->m_flags
& (M_PROTO1
| M_PKTHDR
)) == (M_PROTO1
| M_PKTHDR
) &&
1046 m0
->m_pkthdr
.rcvif
!= NULL
&&
1047 m0
->m_pkthdr
.rcvif
->if_type
== IFT_L2VLAN
) {
1048 struct ifvlan
*ifv
= m0
->m_pkthdr
.rcvif
->if_softc
;
1051 vtag
= NFE_TX_VTAG
| htons(ifv
->ifv_tag
);
1054 if (sc
->arpcom
.ac_if
.if_capenable
& IFCAP_TXCSUM
) {
1055 if (m0
->m_pkthdr
.csum_flags
& CSUM_IP
)
1056 flags
|= NFE_TX_IP_CSUM
;
1057 if (m0
->m_pkthdr
.csum_flags
& (CSUM_TCP
| CSUM_UDP
))
1058 flags
|= NFE_TX_TCP_CSUM
;
1062 * XXX urm. somebody is unaware of how hardware works. You
1063 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in
1064 * the ring until the entire chain is actually *VALID*. Otherwise
1065 * the hardware may encounter a partially initialized chain that
1066 * is marked as being ready to go when it in fact is not ready to
1070 for (i
= 0; i
< ctx
.nsegs
; i
++) {
1071 j
= (ring
->cur
+ i
) % NFE_TX_RING_COUNT
;
1072 data
= &ring
->data
[j
];
1074 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
1075 desc64
= &ring
->desc64
[j
];
1076 #if defined(__LP64__)
1077 desc64
->physaddr
[0] =
1078 htole32(segs
[i
].ds_addr
>> 32);
1080 desc64
->physaddr
[1] =
1081 htole32(segs
[i
].ds_addr
& 0xffffffff);
1082 desc64
->length
= htole16(segs
[i
].ds_len
- 1);
1083 desc64
->vtag
= htole32(vtag
);
1084 desc64
->flags
= htole16(flags
);
1086 desc32
= &ring
->desc32
[j
];
1087 desc32
->physaddr
= htole32(segs
[i
].ds_addr
);
1088 desc32
->length
= htole16(segs
[i
].ds_len
- 1);
1089 desc32
->flags
= htole16(flags
);
1092 /* csum flags and vtag belong to the first fragment only */
1093 flags
&= ~(NFE_TX_IP_CSUM
| NFE_TX_TCP_CSUM
);
1097 KKASSERT(ring
->queued
<= NFE_TX_RING_COUNT
);
1100 /* the whole mbuf chain has been DMA mapped, fix last descriptor */
1101 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
1102 desc64
->flags
|= htole16(NFE_TX_LASTFRAG_V2
);
1104 if (sc
->sc_flags
& NFE_JUMBO_SUP
)
1105 flags
= NFE_TX_LASTFRAG_V2
;
1107 flags
= NFE_TX_LASTFRAG_V1
;
1108 desc32
->flags
|= htole16(flags
);
1112 * Set NFE_TX_VALID backwards so the hardware doesn't see the
1113 * whole mess until the first descriptor in the map is flagged.
1115 for (i
= ctx
.nsegs
- 1; i
>= 0; --i
) {
1116 j
= (ring
->cur
+ i
) % NFE_TX_RING_COUNT
;
1117 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
1118 desc64
= &ring
->desc64
[j
];
1119 desc64
->flags
|= htole16(NFE_TX_VALID
);
1121 desc32
= &ring
->desc32
[j
];
1122 desc32
->flags
|= htole16(NFE_TX_VALID
);
1125 ring
->cur
= (ring
->cur
+ ctx
.nsegs
) % NFE_TX_RING_COUNT
;
1127 /* Exchange DMA map */
1128 data_map
->map
= data
->map
;
1132 bus_dmamap_sync(ring
->data_tag
, map
, BUS_DMASYNC_PREWRITE
);
1140 nfe_start(struct ifnet
*ifp
)
1142 struct nfe_softc
*sc
= ifp
->if_softc
;
1143 struct nfe_tx_ring
*ring
= &sc
->txq
;
1147 if (ifp
->if_flags
& IFF_OACTIVE
)
1150 if (ifq_is_empty(&ifp
->if_snd
))
1154 m0
= ifq_dequeue(&ifp
->if_snd
, NULL
);
1160 if (nfe_encap(sc
, ring
, m0
) != 0) {
1161 ifp
->if_flags
|= IFF_OACTIVE
;
1168 * `m0' may be freed in nfe_encap(), so
1169 * it should not be touched any more.
1172 if (count
== 0) /* nothing sent */
1175 /* Sync TX descriptor ring */
1176 bus_dmamap_sync(ring
->tag
, ring
->map
, BUS_DMASYNC_PREWRITE
);
1179 NFE_WRITE(sc
, NFE_RXTX_CTL
, NFE_RXTX_KICKTX
| sc
->rxtxctl
);
1182 * Set a timeout in case the chip goes out to lunch.
1188 nfe_watchdog(struct ifnet
*ifp
)
1190 struct nfe_softc
*sc
= ifp
->if_softc
;
1192 if (ifp
->if_flags
& IFF_RUNNING
) {
1193 if_printf(ifp
, "watchdog timeout - lost interrupt recovered\n");
1198 if_printf(ifp
, "watchdog timeout\n");
1200 nfe_init(ifp
->if_softc
);
1208 struct nfe_softc
*sc
= xsc
;
1209 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1217 * Switching between jumbo frames and normal frames should
1218 * be done _after_ nfe_stop() but _before_ nfe_init_rx_ring().
1220 if (ifp
->if_mtu
> ETHERMTU
) {
1221 sc
->sc_flags
|= NFE_USE_JUMBO
;
1222 sc
->rxq
.bufsz
= NFE_JBYTES
;
1224 if_printf(ifp
, "use jumbo frames\n");
1226 sc
->sc_flags
&= ~NFE_USE_JUMBO
;
1227 sc
->rxq
.bufsz
= MCLBYTES
;
1229 if_printf(ifp
, "use non-jumbo frames\n");
1232 error
= nfe_init_tx_ring(sc
, &sc
->txq
);
1238 error
= nfe_init_rx_ring(sc
, &sc
->rxq
);
1244 NFE_WRITE(sc
, NFE_TX_UNK
, 0);
1245 NFE_WRITE(sc
, NFE_STATUS
, 0);
1247 sc
->rxtxctl
= NFE_RXTX_BIT2
;
1248 if (sc
->sc_flags
& NFE_40BIT_ADDR
)
1249 sc
->rxtxctl
|= NFE_RXTX_V3MAGIC
;
1250 else if (sc
->sc_flags
& NFE_JUMBO_SUP
)
1251 sc
->rxtxctl
|= NFE_RXTX_V2MAGIC
;
1253 if (ifp
->if_capenable
& IFCAP_RXCSUM
)
1254 sc
->rxtxctl
|= NFE_RXTX_RXCSUM
;
1257 * Although the adapter is capable of stripping VLAN tags from received
1258 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1259 * purpose. This will be done in software by our network stack.
1261 if (sc
->sc_flags
& NFE_HW_VLAN
)
1262 sc
->rxtxctl
|= NFE_RXTX_VTAG_INSERT
;
1264 NFE_WRITE(sc
, NFE_RXTX_CTL
, NFE_RXTX_RESET
| sc
->rxtxctl
);
1266 NFE_WRITE(sc
, NFE_RXTX_CTL
, sc
->rxtxctl
);
1268 if (sc
->sc_flags
& NFE_HW_VLAN
)
1269 NFE_WRITE(sc
, NFE_VTAG_CTL
, NFE_VTAG_ENABLE
);
1271 NFE_WRITE(sc
, NFE_SETUP_R6
, 0);
1273 /* set MAC address */
1274 nfe_set_macaddr(sc
, sc
->arpcom
.ac_enaddr
);
1276 /* tell MAC where rings are in memory */
1278 NFE_WRITE(sc
, NFE_RX_RING_ADDR_HI
, sc
->rxq
.physaddr
>> 32);
1280 NFE_WRITE(sc
, NFE_RX_RING_ADDR_LO
, sc
->rxq
.physaddr
& 0xffffffff);
1282 NFE_WRITE(sc
, NFE_TX_RING_ADDR_HI
, sc
->txq
.physaddr
>> 32);
1284 NFE_WRITE(sc
, NFE_TX_RING_ADDR_LO
, sc
->txq
.physaddr
& 0xffffffff);
1286 NFE_WRITE(sc
, NFE_RING_SIZE
,
1287 (nfe_rx_ring_count
- 1) << 16 |
1288 (NFE_TX_RING_COUNT
- 1));
1290 NFE_WRITE(sc
, NFE_RXBUFSZ
, sc
->rxq
.bufsz
);
1292 /* force MAC to wakeup */
1293 tmp
= NFE_READ(sc
, NFE_PWR_STATE
);
1294 NFE_WRITE(sc
, NFE_PWR_STATE
, tmp
| NFE_PWR_WAKEUP
);
1296 tmp
= NFE_READ(sc
, NFE_PWR_STATE
);
1297 NFE_WRITE(sc
, NFE_PWR_STATE
, tmp
| NFE_PWR_VALID
);
1300 * NFE_IMTIMER generates a periodic interrupt via NFE_IRQ_TIMER.
1301 * It is unclear how wide the timer is. Base programming does
1302 * not seem to effect NFE_IRQ_TX_DONE or NFE_IRQ_RX_DONE so
1303 * we don't get any interrupt moderation. TX moderation is
1304 * possible by using the timer interrupt instead of TX_DONE.
1306 * It is unclear whether there are other bits that can be
1307 * set to make the NFE device actually do interrupt moderation
1310 * For now set a 128uS interval as a placemark, but don't use
1313 NFE_WRITE(sc
, NFE_IMTIMER
, NFE_IM_DEFAULT
);
1315 NFE_WRITE(sc
, NFE_SETUP_R1
, NFE_R1_MAGIC
);
1316 NFE_WRITE(sc
, NFE_SETUP_R2
, NFE_R2_MAGIC
);
1317 NFE_WRITE(sc
, NFE_SETUP_R6
, NFE_R6_MAGIC
);
1319 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1320 NFE_WRITE(sc
, NFE_STATUS
, sc
->mii_phyaddr
<< 24 | NFE_STATUS_MAGIC
);
1322 NFE_WRITE(sc
, NFE_SETUP_R4
, NFE_R4_MAGIC
);
1323 NFE_WRITE(sc
, NFE_WOL_CTL
, NFE_WOL_MAGIC
);
1325 sc
->rxtxctl
&= ~NFE_RXTX_BIT2
;
1326 NFE_WRITE(sc
, NFE_RXTX_CTL
, sc
->rxtxctl
);
1328 NFE_WRITE(sc
, NFE_RXTX_CTL
, NFE_RXTX_BIT1
| sc
->rxtxctl
);
1333 nfe_ifmedia_upd(ifp
);
1336 NFE_WRITE(sc
, NFE_RX_CTL
, NFE_RX_START
);
1339 NFE_WRITE(sc
, NFE_TX_CTL
, NFE_TX_START
);
1341 NFE_WRITE(sc
, NFE_PHY_STATUS
, 0xf);
1343 #ifdef DEVICE_POLLING
1344 if ((ifp
->if_flags
& IFF_POLLING
) == 0)
1346 /* enable interrupts */
1347 NFE_WRITE(sc
, NFE_IRQ_MASK
, NFE_IRQ_WANTED
);
1349 callout_reset(&sc
->sc_tick_ch
, hz
, nfe_tick
, sc
);
1351 ifp
->if_flags
|= IFF_RUNNING
;
1352 ifp
->if_flags
&= ~IFF_OACTIVE
;
1355 * If we had stuff in the tx ring before its all cleaned out now
1356 * so we are not going to get an interrupt, jump-start any pending
1363 nfe_stop(struct nfe_softc
*sc
)
1365 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1367 callout_stop(&sc
->sc_tick_ch
);
1370 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
1373 * Are NFE_TX_CTL and NFE_RX_CTL polled by the chip microcontroller
1374 * or do they directly reset/terminate the DMA hardware? Nobody
1379 * (1) Delay before zeroing out NFE_TX_CTL. This seems to help a
1380 * watchdog timeout that occurs after a stop/init sequence. I am
1381 * theorizing that a TX KICK occuring just prior to a reinit (e.g.
1382 * due to dhclient) is queueing an interrupt to the microcontroller
1383 * which gets delayed until after we clear the control registers
1384 * down below, resulting in mass confusion. TX KICK is clearly
1385 * hardware aided whereas the other bits in the control register
1386 * are more likely to be polled by the microcontroller.
1388 * (2) Delay after zeroing out TX and RX CTL registers, under the
1389 * assumption that primary DMA is initiated and terminated by
1390 * the microcontroller and not hardware (and anyway, one can hardly
1391 * expect the DMA engine to just instantly stop!). We don't want
1392 * to rip the rings out from under it before it has had a chance to
1398 NFE_WRITE(sc
, NFE_TX_CTL
, 0);
1401 NFE_WRITE(sc
, NFE_RX_CTL
, 0);
1403 /* Disable interrupts */
1404 NFE_WRITE(sc
, NFE_IRQ_MASK
, 0);
1408 /* Reset Tx and Rx rings */
1409 nfe_reset_tx_ring(sc
, &sc
->txq
);
1410 nfe_reset_rx_ring(sc
, &sc
->rxq
);
1414 nfe_alloc_rx_ring(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
)
1416 int i
, j
, error
, descsize
;
1419 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
1420 desc
= (void **)&ring
->desc64
;
1421 descsize
= sizeof(struct nfe_desc64
);
1423 desc
= (void **)&ring
->desc32
;
1424 descsize
= sizeof(struct nfe_desc32
);
1427 ring
->jbuf
= kmalloc(sizeof(struct nfe_jbuf
) * NFE_JPOOL_COUNT
,
1428 M_DEVBUF
, M_WAITOK
| M_ZERO
);
1429 ring
->data
= kmalloc(sizeof(struct nfe_rx_data
) * nfe_rx_ring_count
,
1430 M_DEVBUF
, M_WAITOK
| M_ZERO
);
1432 ring
->bufsz
= MCLBYTES
;
1433 ring
->cur
= ring
->next
= 0;
1435 error
= bus_dma_tag_create(NULL
, PAGE_SIZE
, 0,
1436 BUS_SPACE_MAXADDR_32BIT
, BUS_SPACE_MAXADDR
,
1438 nfe_rx_ring_count
* descsize
, 1,
1439 nfe_rx_ring_count
* descsize
,
1442 if_printf(&sc
->arpcom
.ac_if
,
1443 "could not create desc RX DMA tag\n");
1447 error
= bus_dmamem_alloc(ring
->tag
, desc
, BUS_DMA_WAITOK
| BUS_DMA_ZERO
,
1450 if_printf(&sc
->arpcom
.ac_if
,
1451 "could not allocate RX desc DMA memory\n");
1452 bus_dma_tag_destroy(ring
->tag
);
1457 error
= bus_dmamap_load(ring
->tag
, ring
->map
, *desc
,
1458 nfe_rx_ring_count
* descsize
,
1459 nfe_ring_dma_addr
, &ring
->physaddr
,
1462 if_printf(&sc
->arpcom
.ac_if
,
1463 "could not load RX desc DMA map\n");
1464 bus_dmamem_free(ring
->tag
, *desc
, ring
->map
);
1465 bus_dma_tag_destroy(ring
->tag
);
1470 if (sc
->sc_flags
& NFE_JUMBO_SUP
) {
1471 error
= nfe_jpool_alloc(sc
, ring
);
1473 if_printf(&sc
->arpcom
.ac_if
,
1474 "could not allocate jumbo frames\n");
1479 error
= bus_dma_tag_create(NULL
, 1, 0,
1480 BUS_SPACE_MAXADDR_32BIT
, BUS_SPACE_MAXADDR
,
1482 MCLBYTES
, 1, MCLBYTES
,
1483 0, &ring
->data_tag
);
1485 if_printf(&sc
->arpcom
.ac_if
,
1486 "could not create RX mbuf DMA tag\n");
1490 /* Create a spare RX mbuf DMA map */
1491 error
= bus_dmamap_create(ring
->data_tag
, 0, &ring
->data_tmpmap
);
1493 if_printf(&sc
->arpcom
.ac_if
,
1494 "could not create spare RX mbuf DMA map\n");
1495 bus_dma_tag_destroy(ring
->data_tag
);
1496 ring
->data_tag
= NULL
;
1500 for (i
= 0; i
< nfe_rx_ring_count
; i
++) {
1501 error
= bus_dmamap_create(ring
->data_tag
, 0,
1502 &ring
->data
[i
].map
);
1504 if_printf(&sc
->arpcom
.ac_if
,
1505 "could not create %dth RX mbuf DMA mapn", i
);
1511 for (j
= 0; j
< i
; ++j
)
1512 bus_dmamap_destroy(ring
->data_tag
, ring
->data
[i
].map
);
1513 bus_dmamap_destroy(ring
->data_tag
, ring
->data_tmpmap
);
1514 bus_dma_tag_destroy(ring
->data_tag
);
1515 ring
->data_tag
= NULL
;
1520 nfe_reset_rx_ring(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
)
1524 for (i
= 0; i
< nfe_rx_ring_count
; i
++) {
1525 struct nfe_rx_data
*data
= &ring
->data
[i
];
1527 if (data
->m
!= NULL
) {
1528 if ((sc
->sc_flags
& NFE_USE_JUMBO
) == 0)
1529 bus_dmamap_unload(ring
->data_tag
, data
->map
);
1534 bus_dmamap_sync(ring
->tag
, ring
->map
, BUS_DMASYNC_PREWRITE
);
1536 ring
->cur
= ring
->next
= 0;
1540 nfe_init_rx_ring(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
)
1544 for (i
= 0; i
< nfe_rx_ring_count
; ++i
) {
1547 /* XXX should use a function pointer */
1548 if (sc
->sc_flags
& NFE_USE_JUMBO
)
1549 error
= nfe_newbuf_jumbo(sc
, ring
, i
, 1);
1551 error
= nfe_newbuf_std(sc
, ring
, i
, 1);
1553 if_printf(&sc
->arpcom
.ac_if
,
1554 "could not allocate RX buffer\n");
1558 nfe_set_ready_rxdesc(sc
, ring
, i
);
1560 bus_dmamap_sync(ring
->tag
, ring
->map
, BUS_DMASYNC_PREWRITE
);
1566 nfe_free_rx_ring(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
)
1568 if (ring
->data_tag
!= NULL
) {
1569 struct nfe_rx_data
*data
;
1572 for (i
= 0; i
< nfe_rx_ring_count
; i
++) {
1573 data
= &ring
->data
[i
];
1575 if (data
->m
!= NULL
) {
1576 bus_dmamap_unload(ring
->data_tag
, data
->map
);
1579 bus_dmamap_destroy(ring
->data_tag
, data
->map
);
1581 bus_dmamap_destroy(ring
->data_tag
, ring
->data_tmpmap
);
1582 bus_dma_tag_destroy(ring
->data_tag
);
1585 nfe_jpool_free(sc
, ring
);
1587 if (ring
->jbuf
!= NULL
)
1588 kfree(ring
->jbuf
, M_DEVBUF
);
1589 if (ring
->data
!= NULL
)
1590 kfree(ring
->data
, M_DEVBUF
);
1592 if (ring
->tag
!= NULL
) {
1595 if (sc
->sc_flags
& NFE_40BIT_ADDR
)
1596 desc
= ring
->desc64
;
1598 desc
= ring
->desc32
;
1600 bus_dmamap_unload(ring
->tag
, ring
->map
);
1601 bus_dmamem_free(ring
->tag
, desc
, ring
->map
);
1602 bus_dma_tag_destroy(ring
->tag
);
1606 static struct nfe_jbuf
*
1607 nfe_jalloc(struct nfe_softc
*sc
)
1609 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1610 struct nfe_jbuf
*jbuf
;
1612 lwkt_serialize_enter(&sc
->sc_jbuf_serializer
);
1614 jbuf
= SLIST_FIRST(&sc
->rxq
.jfreelist
);
1616 SLIST_REMOVE_HEAD(&sc
->rxq
.jfreelist
, jnext
);
1619 if_printf(ifp
, "no free jumbo buffer\n");
1622 lwkt_serialize_exit(&sc
->sc_jbuf_serializer
);
1628 nfe_jfree(void *arg
)
1630 struct nfe_jbuf
*jbuf
= arg
;
1631 struct nfe_softc
*sc
= jbuf
->sc
;
1632 struct nfe_rx_ring
*ring
= jbuf
->ring
;
1634 if (&ring
->jbuf
[jbuf
->slot
] != jbuf
)
1635 panic("%s: free wrong jumbo buffer\n", __func__
);
1636 else if (jbuf
->inuse
== 0)
1637 panic("%s: jumbo buffer already freed\n", __func__
);
1639 lwkt_serialize_enter(&sc
->sc_jbuf_serializer
);
1640 atomic_subtract_int(&jbuf
->inuse
, 1);
1641 if (jbuf
->inuse
== 0)
1642 SLIST_INSERT_HEAD(&ring
->jfreelist
, jbuf
, jnext
);
1643 lwkt_serialize_exit(&sc
->sc_jbuf_serializer
);
1649 struct nfe_jbuf
*jbuf
= arg
;
1650 struct nfe_rx_ring
*ring
= jbuf
->ring
;
1652 if (&ring
->jbuf
[jbuf
->slot
] != jbuf
)
1653 panic("%s: ref wrong jumbo buffer\n", __func__
);
1654 else if (jbuf
->inuse
== 0)
1655 panic("%s: jumbo buffer already freed\n", __func__
);
1657 atomic_add_int(&jbuf
->inuse
, 1);
1661 nfe_jpool_alloc(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
)
1663 struct nfe_jbuf
*jbuf
;
1664 bus_addr_t physaddr
;
1669 * Allocate a big chunk of DMA'able memory.
1671 error
= bus_dma_tag_create(NULL
, PAGE_SIZE
, 0,
1672 BUS_SPACE_MAXADDR_32BIT
, BUS_SPACE_MAXADDR
,
1674 NFE_JPOOL_SIZE
, 1, NFE_JPOOL_SIZE
,
1677 if_printf(&sc
->arpcom
.ac_if
,
1678 "could not create jumbo DMA tag\n");
1682 error
= bus_dmamem_alloc(ring
->jtag
, (void **)&ring
->jpool
,
1683 BUS_DMA_WAITOK
, &ring
->jmap
);
1685 if_printf(&sc
->arpcom
.ac_if
,
1686 "could not allocate jumbo DMA memory\n");
1687 bus_dma_tag_destroy(ring
->jtag
);
1692 error
= bus_dmamap_load(ring
->jtag
, ring
->jmap
, ring
->jpool
,
1693 NFE_JPOOL_SIZE
, nfe_ring_dma_addr
, &physaddr
,
1696 if_printf(&sc
->arpcom
.ac_if
,
1697 "could not load jumbo DMA map\n");
1698 bus_dmamem_free(ring
->jtag
, ring
->jpool
, ring
->jmap
);
1699 bus_dma_tag_destroy(ring
->jtag
);
1704 /* ..and split it into 9KB chunks */
1705 SLIST_INIT(&ring
->jfreelist
);
1708 for (i
= 0; i
< NFE_JPOOL_COUNT
; i
++) {
1709 jbuf
= &ring
->jbuf
[i
];
1716 jbuf
->physaddr
= physaddr
;
1718 SLIST_INSERT_HEAD(&ring
->jfreelist
, jbuf
, jnext
);
1721 physaddr
+= NFE_JBYTES
;
1728 nfe_jpool_free(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
)
1730 if (ring
->jtag
!= NULL
) {
1731 bus_dmamap_unload(ring
->jtag
, ring
->jmap
);
1732 bus_dmamem_free(ring
->jtag
, ring
->jpool
, ring
->jmap
);
1733 bus_dma_tag_destroy(ring
->jtag
);
1738 nfe_alloc_tx_ring(struct nfe_softc
*sc
, struct nfe_tx_ring
*ring
)
1740 int i
, j
, error
, descsize
;
1743 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
1744 desc
= (void **)&ring
->desc64
;
1745 descsize
= sizeof(struct nfe_desc64
);
1747 desc
= (void **)&ring
->desc32
;
1748 descsize
= sizeof(struct nfe_desc32
);
1752 ring
->cur
= ring
->next
= 0;
1754 error
= bus_dma_tag_create(NULL
, PAGE_SIZE
, 0,
1755 BUS_SPACE_MAXADDR_32BIT
, BUS_SPACE_MAXADDR
,
1757 NFE_TX_RING_COUNT
* descsize
, 1,
1758 NFE_TX_RING_COUNT
* descsize
,
1761 if_printf(&sc
->arpcom
.ac_if
,
1762 "could not create TX desc DMA map\n");
1766 error
= bus_dmamem_alloc(ring
->tag
, desc
, BUS_DMA_WAITOK
| BUS_DMA_ZERO
,
1769 if_printf(&sc
->arpcom
.ac_if
,
1770 "could not allocate TX desc DMA memory\n");
1771 bus_dma_tag_destroy(ring
->tag
);
1776 error
= bus_dmamap_load(ring
->tag
, ring
->map
, *desc
,
1777 NFE_TX_RING_COUNT
* descsize
,
1778 nfe_ring_dma_addr
, &ring
->physaddr
,
1781 if_printf(&sc
->arpcom
.ac_if
,
1782 "could not load TX desc DMA map\n");
1783 bus_dmamem_free(ring
->tag
, *desc
, ring
->map
);
1784 bus_dma_tag_destroy(ring
->tag
);
1789 error
= bus_dma_tag_create(NULL
, PAGE_SIZE
, 0,
1790 BUS_SPACE_MAXADDR_32BIT
, BUS_SPACE_MAXADDR
,
1792 NFE_JBYTES
* NFE_MAX_SCATTER
,
1793 NFE_MAX_SCATTER
, NFE_JBYTES
,
1794 0, &ring
->data_tag
);
1796 if_printf(&sc
->arpcom
.ac_if
,
1797 "could not create TX buf DMA tag\n");
1801 for (i
= 0; i
< NFE_TX_RING_COUNT
; i
++) {
1802 error
= bus_dmamap_create(ring
->data_tag
, 0,
1803 &ring
->data
[i
].map
);
1805 if_printf(&sc
->arpcom
.ac_if
,
1806 "could not create %dth TX buf DMA map\n", i
);
1813 for (j
= 0; j
< i
; ++j
)
1814 bus_dmamap_destroy(ring
->data_tag
, ring
->data
[i
].map
);
1815 bus_dma_tag_destroy(ring
->data_tag
);
1816 ring
->data_tag
= NULL
;
1821 nfe_reset_tx_ring(struct nfe_softc
*sc
, struct nfe_tx_ring
*ring
)
1825 for (i
= 0; i
< NFE_TX_RING_COUNT
; i
++) {
1826 struct nfe_tx_data
*data
= &ring
->data
[i
];
1828 if (sc
->sc_flags
& NFE_40BIT_ADDR
)
1829 ring
->desc64
[i
].flags
= 0;
1831 ring
->desc32
[i
].flags
= 0;
1833 if (data
->m
!= NULL
) {
1834 bus_dmamap_sync(ring
->data_tag
, data
->map
,
1835 BUS_DMASYNC_POSTWRITE
);
1836 bus_dmamap_unload(ring
->data_tag
, data
->map
);
1841 bus_dmamap_sync(ring
->tag
, ring
->map
, BUS_DMASYNC_PREWRITE
);
1844 ring
->cur
= ring
->next
= 0;
1848 nfe_init_tx_ring(struct nfe_softc
*sc __unused
,
1849 struct nfe_tx_ring
*ring __unused
)
1855 nfe_free_tx_ring(struct nfe_softc
*sc
, struct nfe_tx_ring
*ring
)
1857 if (ring
->data_tag
!= NULL
) {
1858 struct nfe_tx_data
*data
;
1861 for (i
= 0; i
< NFE_TX_RING_COUNT
; ++i
) {
1862 data
= &ring
->data
[i
];
1864 if (data
->m
!= NULL
) {
1865 bus_dmamap_unload(ring
->data_tag
, data
->map
);
1868 bus_dmamap_destroy(ring
->data_tag
, data
->map
);
1871 bus_dma_tag_destroy(ring
->data_tag
);
1874 if (ring
->tag
!= NULL
) {
1877 if (sc
->sc_flags
& NFE_40BIT_ADDR
)
1878 desc
= ring
->desc64
;
1880 desc
= ring
->desc32
;
1882 bus_dmamap_unload(ring
->tag
, ring
->map
);
1883 bus_dmamem_free(ring
->tag
, desc
, ring
->map
);
1884 bus_dma_tag_destroy(ring
->tag
);
1889 nfe_ifmedia_upd(struct ifnet
*ifp
)
1891 struct nfe_softc
*sc
= ifp
->if_softc
;
1892 struct mii_data
*mii
= device_get_softc(sc
->sc_miibus
);
1894 if (mii
->mii_instance
!= 0) {
1895 struct mii_softc
*miisc
;
1897 LIST_FOREACH(miisc
, &mii
->mii_phys
, mii_list
)
1898 mii_phy_reset(miisc
);
1906 nfe_ifmedia_sts(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
1908 struct nfe_softc
*sc
= ifp
->if_softc
;
1909 struct mii_data
*mii
= device_get_softc(sc
->sc_miibus
);
1912 ifmr
->ifm_status
= mii
->mii_media_status
;
1913 ifmr
->ifm_active
= mii
->mii_media_active
;
1917 nfe_setmulti(struct nfe_softc
*sc
)
1919 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1920 struct ifmultiaddr
*ifma
;
1921 uint8_t addr
[ETHER_ADDR_LEN
], mask
[ETHER_ADDR_LEN
];
1922 uint32_t filter
= NFE_RXFILTER_MAGIC
;
1925 if ((ifp
->if_flags
& (IFF_ALLMULTI
| IFF_PROMISC
)) != 0) {
1926 bzero(addr
, ETHER_ADDR_LEN
);
1927 bzero(mask
, ETHER_ADDR_LEN
);
1931 bcopy(etherbroadcastaddr
, addr
, ETHER_ADDR_LEN
);
1932 bcopy(etherbroadcastaddr
, mask
, ETHER_ADDR_LEN
);
1934 LIST_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
1937 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
1940 maddr
= LLADDR((struct sockaddr_dl
*)ifma
->ifma_addr
);
1941 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++) {
1942 addr
[i
] &= maddr
[i
];
1943 mask
[i
] &= ~maddr
[i
];
1947 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++)
1951 addr
[0] |= 0x01; /* make sure multicast bit is set */
1953 NFE_WRITE(sc
, NFE_MULTIADDR_HI
,
1954 addr
[3] << 24 | addr
[2] << 16 | addr
[1] << 8 | addr
[0]);
1955 NFE_WRITE(sc
, NFE_MULTIADDR_LO
,
1956 addr
[5] << 8 | addr
[4]);
1957 NFE_WRITE(sc
, NFE_MULTIMASK_HI
,
1958 mask
[3] << 24 | mask
[2] << 16 | mask
[1] << 8 | mask
[0]);
1959 NFE_WRITE(sc
, NFE_MULTIMASK_LO
,
1960 mask
[5] << 8 | mask
[4]);
1962 filter
|= (ifp
->if_flags
& IFF_PROMISC
) ? NFE_PROMISC
: NFE_U2M
;
1963 NFE_WRITE(sc
, NFE_RXFILTER
, filter
);
1967 nfe_get_macaddr(struct nfe_softc
*sc
, uint8_t *addr
)
1971 tmp
= NFE_READ(sc
, NFE_MACADDR_LO
);
1972 addr
[0] = (tmp
>> 8) & 0xff;
1973 addr
[1] = (tmp
& 0xff);
1975 tmp
= NFE_READ(sc
, NFE_MACADDR_HI
);
1976 addr
[2] = (tmp
>> 24) & 0xff;
1977 addr
[3] = (tmp
>> 16) & 0xff;
1978 addr
[4] = (tmp
>> 8) & 0xff;
1979 addr
[5] = (tmp
& 0xff);
1983 nfe_set_macaddr(struct nfe_softc
*sc
, const uint8_t *addr
)
1985 NFE_WRITE(sc
, NFE_MACADDR_LO
,
1986 addr
[5] << 8 | addr
[4]);
1987 NFE_WRITE(sc
, NFE_MACADDR_HI
,
1988 addr
[3] << 24 | addr
[2] << 16 | addr
[1] << 8 | addr
[0]);
1994 struct nfe_softc
*sc
= arg
;
1995 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1996 struct mii_data
*mii
= device_get_softc(sc
->sc_miibus
);
1998 lwkt_serialize_enter(ifp
->if_serializer
);
2001 callout_reset(&sc
->sc_tick_ch
, hz
, nfe_tick
, sc
);
2003 lwkt_serialize_exit(ifp
->if_serializer
);
2007 nfe_ring_dma_addr(void *arg
, bus_dma_segment_t
*seg
, int nseg
, int error
)
2012 KASSERT(nseg
== 1, ("too many segments, should be 1\n"));
2014 *((uint32_t *)arg
) = seg
->ds_addr
;
2018 nfe_buf_dma_addr(void *arg
, bus_dma_segment_t
*segs
, int nsegs
,
2019 bus_size_t mapsz __unused
, int error
)
2021 struct nfe_dma_ctx
*ctx
= arg
;
2027 KASSERT(nsegs
<= ctx
->nsegs
,
2028 ("too many segments(%d), should be <= %d\n",
2029 nsegs
, ctx
->nsegs
));
2032 for (i
= 0; i
< nsegs
; ++i
)
2033 ctx
->segs
[i
] = segs
[i
];
2037 nfe_newbuf_std(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
, int idx
,
2040 struct nfe_rx_data
*data
= &ring
->data
[idx
];
2041 struct nfe_dma_ctx ctx
;
2042 bus_dma_segment_t seg
;
2047 m
= m_getcl(wait
? MB_WAIT
: MB_DONTWAIT
, MT_DATA
, M_PKTHDR
);
2050 m
->m_len
= m
->m_pkthdr
.len
= MCLBYTES
;
2054 error
= bus_dmamap_load_mbuf(ring
->data_tag
, ring
->data_tmpmap
,
2055 m
, nfe_buf_dma_addr
, &ctx
,
2056 wait
? BUS_DMA_WAITOK
: BUS_DMA_NOWAIT
);
2059 if_printf(&sc
->arpcom
.ac_if
, "could map RX mbuf %d\n", error
);
2063 /* Unload originally mapped mbuf */
2064 bus_dmamap_unload(ring
->data_tag
, data
->map
);
2066 /* Swap this DMA map with tmp DMA map */
2068 data
->map
= ring
->data_tmpmap
;
2069 ring
->data_tmpmap
= map
;
2071 /* Caller is assumed to have collected the old mbuf */
2074 nfe_set_paddr_rxdesc(sc
, ring
, idx
, seg
.ds_addr
);
2076 bus_dmamap_sync(ring
->data_tag
, data
->map
, BUS_DMASYNC_PREREAD
);
2081 nfe_newbuf_jumbo(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
, int idx
,
2084 struct nfe_rx_data
*data
= &ring
->data
[idx
];
2085 struct nfe_jbuf
*jbuf
;
2088 MGETHDR(m
, wait
? MB_WAIT
: MB_DONTWAIT
, MT_DATA
);
2092 jbuf
= nfe_jalloc(sc
);
2095 if_printf(&sc
->arpcom
.ac_if
, "jumbo allocation failed "
2096 "-- packet dropped!\n");
2100 m
->m_ext
.ext_arg
= jbuf
;
2101 m
->m_ext
.ext_buf
= jbuf
->buf
;
2102 m
->m_ext
.ext_free
= nfe_jfree
;
2103 m
->m_ext
.ext_ref
= nfe_jref
;
2104 m
->m_ext
.ext_size
= NFE_JBYTES
;
2106 m
->m_data
= m
->m_ext
.ext_buf
;
2107 m
->m_flags
|= M_EXT
;
2108 m
->m_len
= m
->m_pkthdr
.len
= m
->m_ext
.ext_size
;
2110 /* Caller is assumed to have collected the old mbuf */
2113 nfe_set_paddr_rxdesc(sc
, ring
, idx
, jbuf
->physaddr
);
2115 bus_dmamap_sync(ring
->jtag
, ring
->jmap
, BUS_DMASYNC_PREREAD
);
2120 nfe_set_paddr_rxdesc(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
, int idx
,
2121 bus_addr_t physaddr
)
2123 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
2124 struct nfe_desc64
*desc64
= &ring
->desc64
[idx
];
2126 #if defined(__LP64__)
2127 desc64
->physaddr
[0] = htole32(physaddr
>> 32);
2129 desc64
->physaddr
[1] = htole32(physaddr
& 0xffffffff);
2131 struct nfe_desc32
*desc32
= &ring
->desc32
[idx
];
2133 desc32
->physaddr
= htole32(physaddr
);
2138 nfe_set_ready_rxdesc(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
, int idx
)
2140 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
2141 struct nfe_desc64
*desc64
= &ring
->desc64
[idx
];
2143 desc64
->length
= htole16(ring
->bufsz
);
2144 desc64
->flags
= htole16(NFE_RX_READY
);
2146 struct nfe_desc32
*desc32
= &ring
->desc32
[idx
];
2148 desc32
->length
= htole16(ring
->bufsz
);
2149 desc32
->flags
= htole16(NFE_RX_READY
);