1 /* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */
2 /* $DragonFly: src/sys/dev/netif/nfe/if_nfe.c,v 1.11 2007/08/08 11:38:51 sephe Exp $ */
5 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
7 * This code is derived from software contributed to The DragonFly Project
8 * by Sepherosa Ziehau <sepherosa@gmail.com> and
9 * Matthew Dillon <dillon@apollo.backplane.com>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
41 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
43 * Permission to use, copy, modify, and distribute this software for any
44 * purpose with or without fee is hereby granted, provided that the above
45 * copyright notice and this permission notice appear in all copies.
47 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
48 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
49 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
50 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
51 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
52 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
53 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
56 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
58 #include "opt_polling.h"
60 #include <sys/param.h>
61 #include <sys/endian.h>
62 #include <sys/kernel.h>
66 #include <sys/serialize.h>
67 #include <sys/socket.h>
68 #include <sys/sockio.h>
69 #include <sys/sysctl.h>
71 #include <net/ethernet.h>
74 #include <net/if_arp.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/ifq_var.h>
78 #include <net/if_types.h>
79 #include <net/if_var.h>
80 #include <net/vlan/if_vlan_var.h>
82 #include <bus/pci/pcireg.h>
83 #include <bus/pci/pcivar.h>
84 #include <bus/pci/pcidevs.h>
86 #include <dev/netif/mii_layer/mii.h>
87 #include <dev/netif/mii_layer/miivar.h>
89 #include "miibus_if.h"
91 #include <dev/netif/nfe/if_nfereg.h>
92 #include <dev/netif/nfe/if_nfevar.h>
95 #define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
97 static int nfe_probe(device_t
);
98 static int nfe_attach(device_t
);
99 static int nfe_detach(device_t
);
100 static void nfe_shutdown(device_t
);
101 static int nfe_resume(device_t
);
102 static int nfe_suspend(device_t
);
104 static int nfe_miibus_readreg(device_t
, int, int);
105 static void nfe_miibus_writereg(device_t
, int, int, int);
106 static void nfe_miibus_statchg(device_t
);
108 #ifdef DEVICE_POLLING
109 static void nfe_poll(struct ifnet
*, enum poll_cmd
, int);
111 static void nfe_intr(void *);
112 static int nfe_ioctl(struct ifnet
*, u_long
, caddr_t
, struct ucred
*);
113 static void nfe_rxeof(struct nfe_softc
*);
114 static void nfe_txeof(struct nfe_softc
*);
115 static int nfe_encap(struct nfe_softc
*, struct nfe_tx_ring
*,
117 static void nfe_start(struct ifnet
*);
118 static void nfe_watchdog(struct ifnet
*);
119 static void nfe_init(void *);
120 static void nfe_stop(struct nfe_softc
*);
121 static struct nfe_jbuf
*nfe_jalloc(struct nfe_softc
*);
122 static void nfe_jfree(void *);
123 static void nfe_jref(void *);
124 static int nfe_jpool_alloc(struct nfe_softc
*, struct nfe_rx_ring
*);
125 static void nfe_jpool_free(struct nfe_softc
*, struct nfe_rx_ring
*);
126 static int nfe_alloc_rx_ring(struct nfe_softc
*, struct nfe_rx_ring
*);
127 static void nfe_reset_rx_ring(struct nfe_softc
*, struct nfe_rx_ring
*);
128 static int nfe_init_rx_ring(struct nfe_softc
*, struct nfe_rx_ring
*);
129 static void nfe_free_rx_ring(struct nfe_softc
*, struct nfe_rx_ring
*);
130 static int nfe_alloc_tx_ring(struct nfe_softc
*, struct nfe_tx_ring
*);
131 static void nfe_reset_tx_ring(struct nfe_softc
*, struct nfe_tx_ring
*);
132 static int nfe_init_tx_ring(struct nfe_softc
*, struct nfe_tx_ring
*);
133 static void nfe_free_tx_ring(struct nfe_softc
*, struct nfe_tx_ring
*);
134 static int nfe_ifmedia_upd(struct ifnet
*);
135 static void nfe_ifmedia_sts(struct ifnet
*, struct ifmediareq
*);
136 static void nfe_setmulti(struct nfe_softc
*);
137 static void nfe_get_macaddr(struct nfe_softc
*, uint8_t *);
138 static void nfe_set_macaddr(struct nfe_softc
*, const uint8_t *);
139 static void nfe_tick(void *);
140 static void nfe_ring_dma_addr(void *, bus_dma_segment_t
*, int, int);
141 static void nfe_buf_dma_addr(void *, bus_dma_segment_t
*, int, bus_size_t
,
143 static void nfe_set_paddr_rxdesc(struct nfe_softc
*, struct nfe_rx_ring
*,
145 static void nfe_set_ready_rxdesc(struct nfe_softc
*, struct nfe_rx_ring
*,
147 static int nfe_newbuf_std(struct nfe_softc
*, struct nfe_rx_ring
*, int,
149 static int nfe_newbuf_jumbo(struct nfe_softc
*, struct nfe_rx_ring
*, int,
155 static int nfe_debug
= 0;
156 static int nfe_rx_ring_count
= NFE_RX_RING_DEF_COUNT
;
158 TUNABLE_INT("hw.nfe.rx_ring_count", &nfe_rx_ring_count
);
160 SYSCTL_NODE(_hw
, OID_AUTO
, nfe
, CTLFLAG_RD
, 0, "nVidia GigE parameters");
161 SYSCTL_INT(_hw_nfe
, OID_AUTO
, rx_ring_count
, CTLFLAG_RD
, &nfe_rx_ring_count
,
162 NFE_RX_RING_DEF_COUNT
, "rx ring count");
163 SYSCTL_INT(_hw_nfe
, OID_AUTO
, debug
, CTLFLAG_RW
, &nfe_debug
, 0,
164 "control debugging printfs");
166 #define DPRINTF(sc, fmt, ...) do { \
168 if_printf(&(sc)->arpcom.ac_if, \
173 #define DPRINTFN(sc, lv, fmt, ...) do { \
174 if (nfe_debug >= (lv)) { \
175 if_printf(&(sc)->arpcom.ac_if, \
180 #else /* !NFE_DEBUG */
182 #define DPRINTF(sc, fmt, ...)
183 #define DPRINTFN(sc, lv, fmt, ...)
185 #endif /* NFE_DEBUG */
189 bus_dma_segment_t
*segs
;
192 static const struct nfe_dev
{
197 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE_LAN
,
198 "NVIDIA nForce Fast Ethernet" },
200 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE2_LAN
,
201 "NVIDIA nForce2 Fast Ethernet" },
203 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1
,
204 "NVIDIA nForce3 Gigabit Ethernet" },
206 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb
207 chipset, and possibly also the 400R; it might be both nForce2- and
208 nForce3-based boards can use the same MCPs (= southbridges) */
209 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2
,
210 "NVIDIA nForce3 Gigabit Ethernet" },
212 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3
,
213 "NVIDIA nForce3 Gigabit Ethernet" },
215 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4
,
216 "NVIDIA nForce3 Gigabit Ethernet" },
218 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5
,
219 "NVIDIA nForce3 Gigabit Ethernet" },
221 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_CK804_LAN1
,
222 "NVIDIA CK804 Gigabit Ethernet" },
224 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_CK804_LAN2
,
225 "NVIDIA CK804 Gigabit Ethernet" },
227 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP04_LAN1
,
228 "NVIDIA MCP04 Gigabit Ethernet" },
230 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP04_LAN2
,
231 "NVIDIA MCP04 Gigabit Ethernet" },
233 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP51_LAN1
,
234 "NVIDIA MCP51 Gigabit Ethernet" },
236 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP51_LAN2
,
237 "NVIDIA MCP51 Gigabit Ethernet" },
239 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP55_LAN1
,
240 "NVIDIA MCP55 Gigabit Ethernet" },
242 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP55_LAN2
,
243 "NVIDIA MCP55 Gigabit Ethernet" },
245 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP61_LAN1
,
246 "NVIDIA MCP61 Gigabit Ethernet" },
248 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP61_LAN2
,
249 "NVIDIA MCP61 Gigabit Ethernet" },
251 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP61_LAN3
,
252 "NVIDIA MCP61 Gigabit Ethernet" },
254 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP61_LAN4
,
255 "NVIDIA MCP61 Gigabit Ethernet" },
257 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP65_LAN1
,
258 "NVIDIA MCP65 Gigabit Ethernet" },
260 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP65_LAN2
,
261 "NVIDIA MCP65 Gigabit Ethernet" },
263 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP65_LAN3
,
264 "NVIDIA MCP65 Gigabit Ethernet" },
266 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP65_LAN4
,
267 "NVIDIA MCP65 Gigabit Ethernet" },
269 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP67_LAN1
,
270 "NVIDIA MCP67 Gigabit Ethernet" },
272 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP67_LAN2
,
273 "NVIDIA MCP67 Gigabit Ethernet" },
275 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP67_LAN3
,
276 "NVIDIA MCP67 Gigabit Ethernet" },
278 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP67_LAN4
,
279 "NVIDIA MCP67 Gigabit Ethernet" }
282 static device_method_t nfe_methods
[] = {
283 /* Device interface */
284 DEVMETHOD(device_probe
, nfe_probe
),
285 DEVMETHOD(device_attach
, nfe_attach
),
286 DEVMETHOD(device_detach
, nfe_detach
),
287 DEVMETHOD(device_suspend
, nfe_suspend
),
288 DEVMETHOD(device_resume
, nfe_resume
),
289 DEVMETHOD(device_shutdown
, nfe_shutdown
),
292 DEVMETHOD(bus_print_child
, bus_generic_print_child
),
293 DEVMETHOD(bus_driver_added
, bus_generic_driver_added
),
296 DEVMETHOD(miibus_readreg
, nfe_miibus_readreg
),
297 DEVMETHOD(miibus_writereg
, nfe_miibus_writereg
),
298 DEVMETHOD(miibus_statchg
, nfe_miibus_statchg
),
303 static driver_t nfe_driver
= {
306 sizeof(struct nfe_softc
)
309 static devclass_t nfe_devclass
;
311 DECLARE_DUMMY_MODULE(if_nfe
);
312 MODULE_DEPEND(if_nfe
, miibus
, 1, 1, 1);
313 DRIVER_MODULE(if_nfe
, pci
, nfe_driver
, nfe_devclass
, 0, 0);
314 DRIVER_MODULE(miibus
, nfe
, miibus_driver
, miibus_devclass
, 0, 0);
317 nfe_probe(device_t dev
)
319 const struct nfe_dev
*n
;
322 vid
= pci_get_vendor(dev
);
323 did
= pci_get_device(dev
);
324 for (n
= nfe_devices
; n
->desc
!= NULL
; ++n
) {
325 if (vid
== n
->vid
&& did
== n
->did
) {
326 struct nfe_softc
*sc
= device_get_softc(dev
);
329 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2
:
330 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3
:
331 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4
:
332 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5
:
333 sc
->sc_flags
= NFE_JUMBO_SUP
|
336 case PCI_PRODUCT_NVIDIA_MCP51_LAN1
:
337 case PCI_PRODUCT_NVIDIA_MCP51_LAN2
:
338 case PCI_PRODUCT_NVIDIA_MCP61_LAN1
:
339 case PCI_PRODUCT_NVIDIA_MCP61_LAN2
:
340 case PCI_PRODUCT_NVIDIA_MCP61_LAN3
:
341 case PCI_PRODUCT_NVIDIA_MCP61_LAN4
:
342 case PCI_PRODUCT_NVIDIA_MCP67_LAN1
:
343 case PCI_PRODUCT_NVIDIA_MCP67_LAN2
:
344 case PCI_PRODUCT_NVIDIA_MCP67_LAN3
:
345 case PCI_PRODUCT_NVIDIA_MCP67_LAN4
:
346 sc
->sc_flags
= NFE_40BIT_ADDR
;
348 case PCI_PRODUCT_NVIDIA_CK804_LAN1
:
349 case PCI_PRODUCT_NVIDIA_CK804_LAN2
:
350 case PCI_PRODUCT_NVIDIA_MCP04_LAN1
:
351 case PCI_PRODUCT_NVIDIA_MCP04_LAN2
:
352 case PCI_PRODUCT_NVIDIA_MCP65_LAN1
:
353 case PCI_PRODUCT_NVIDIA_MCP65_LAN2
:
354 case PCI_PRODUCT_NVIDIA_MCP65_LAN3
:
355 case PCI_PRODUCT_NVIDIA_MCP65_LAN4
:
356 sc
->sc_flags
= NFE_JUMBO_SUP
|
360 case PCI_PRODUCT_NVIDIA_MCP55_LAN1
:
361 case PCI_PRODUCT_NVIDIA_MCP55_LAN2
:
362 sc
->sc_flags
= NFE_JUMBO_SUP
|
369 device_set_desc(dev
, n
->desc
);
370 device_set_async_attach(dev
, TRUE
);
378 nfe_attach(device_t dev
)
380 struct nfe_softc
*sc
= device_get_softc(dev
);
381 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
382 uint8_t eaddr
[ETHER_ADDR_LEN
];
385 if_initname(ifp
, device_get_name(dev
), device_get_unit(dev
));
386 lwkt_serialize_init(&sc
->sc_jbuf_serializer
);
388 sc
->sc_mem_rid
= PCIR_BAR(0);
391 if (pci_get_powerstate(dev
) != PCI_POWERSTATE_D0
) {
394 mem
= pci_read_config(dev
, sc
->sc_mem_rid
, 4);
395 irq
= pci_read_config(dev
, PCIR_INTLINE
, 4);
397 device_printf(dev
, "chip is in D%d power mode "
398 "-- setting to D0\n", pci_get_powerstate(dev
));
400 pci_set_powerstate(dev
, PCI_POWERSTATE_D0
);
402 pci_write_config(dev
, sc
->sc_mem_rid
, mem
, 4);
403 pci_write_config(dev
, PCIR_INTLINE
, irq
, 4);
405 #endif /* !BURN_BRIDGE */
407 /* Enable bus mastering */
408 pci_enable_busmaster(dev
);
410 /* Allocate IO memory */
411 sc
->sc_mem_res
= bus_alloc_resource_any(dev
, SYS_RES_MEMORY
,
412 &sc
->sc_mem_rid
, RF_ACTIVE
);
413 if (sc
->sc_mem_res
== NULL
) {
414 device_printf(dev
, "cound not allocate io memory\n");
417 sc
->sc_memh
= rman_get_bushandle(sc
->sc_mem_res
);
418 sc
->sc_memt
= rman_get_bustag(sc
->sc_mem_res
);
422 sc
->sc_irq_res
= bus_alloc_resource_any(dev
, SYS_RES_IRQ
,
424 RF_SHAREABLE
| RF_ACTIVE
);
425 if (sc
->sc_irq_res
== NULL
) {
426 device_printf(dev
, "could not allocate irq\n");
431 nfe_get_macaddr(sc
, eaddr
);
434 * Allocate Tx and Rx rings.
436 error
= nfe_alloc_tx_ring(sc
, &sc
->txq
);
438 device_printf(dev
, "could not allocate Tx ring\n");
442 error
= nfe_alloc_rx_ring(sc
, &sc
->rxq
);
444 device_printf(dev
, "could not allocate Rx ring\n");
448 error
= mii_phy_probe(dev
, &sc
->sc_miibus
, nfe_ifmedia_upd
,
451 device_printf(dev
, "MII without any phy\n");
456 ifp
->if_mtu
= ETHERMTU
;
457 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
458 ifp
->if_ioctl
= nfe_ioctl
;
459 ifp
->if_start
= nfe_start
;
460 #ifdef DEVICE_POLLING
461 ifp
->if_poll
= nfe_poll
;
463 ifp
->if_watchdog
= nfe_watchdog
;
464 ifp
->if_init
= nfe_init
;
465 ifq_set_maxlen(&ifp
->if_snd
, NFE_IFQ_MAXLEN
);
466 ifq_set_ready(&ifp
->if_snd
);
468 ifp
->if_capabilities
= IFCAP_VLAN_MTU
;
470 if (sc
->sc_flags
& NFE_HW_VLAN
)
471 ifp
->if_capabilities
|= IFCAP_VLAN_HWTAGGING
;
474 if (sc
->sc_flags
& NFE_HW_CSUM
) {
475 ifp
->if_capabilities
|= IFCAP_HWCSUM
;
476 ifp
->if_hwassist
= NFE_CSUM_FEATURES
;
479 sc
->sc_flags
&= ~NFE_HW_CSUM
;
481 ifp
->if_capenable
= ifp
->if_capabilities
;
483 callout_init(&sc
->sc_tick_ch
);
485 ether_ifattach(ifp
, eaddr
, NULL
);
487 error
= bus_setup_intr(dev
, sc
->sc_irq_res
, INTR_MPSAFE
, nfe_intr
, sc
,
488 &sc
->sc_ih
, ifp
->if_serializer
);
490 device_printf(dev
, "could not setup intr\n");
502 nfe_detach(device_t dev
)
504 struct nfe_softc
*sc
= device_get_softc(dev
);
506 if (device_is_attached(dev
)) {
507 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
509 lwkt_serialize_enter(ifp
->if_serializer
);
511 bus_teardown_intr(dev
, sc
->sc_irq_res
, sc
->sc_ih
);
512 lwkt_serialize_exit(ifp
->if_serializer
);
517 if (sc
->sc_miibus
!= NULL
)
518 device_delete_child(dev
, sc
->sc_miibus
);
519 bus_generic_detach(dev
);
521 if (sc
->sc_irq_res
!= NULL
) {
522 bus_release_resource(dev
, SYS_RES_IRQ
, sc
->sc_irq_rid
,
526 if (sc
->sc_mem_res
!= NULL
) {
527 bus_release_resource(dev
, SYS_RES_MEMORY
, sc
->sc_mem_rid
,
531 nfe_free_tx_ring(sc
, &sc
->txq
);
532 nfe_free_rx_ring(sc
, &sc
->rxq
);
538 nfe_shutdown(device_t dev
)
540 struct nfe_softc
*sc
= device_get_softc(dev
);
541 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
543 lwkt_serialize_enter(ifp
->if_serializer
);
545 lwkt_serialize_exit(ifp
->if_serializer
);
549 nfe_suspend(device_t dev
)
551 struct nfe_softc
*sc
= device_get_softc(dev
);
552 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
554 lwkt_serialize_enter(ifp
->if_serializer
);
556 lwkt_serialize_exit(ifp
->if_serializer
);
562 nfe_resume(device_t dev
)
564 struct nfe_softc
*sc
= device_get_softc(dev
);
565 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
567 lwkt_serialize_enter(ifp
->if_serializer
);
568 if (ifp
->if_flags
& IFF_UP
) {
570 if (ifp
->if_flags
& IFF_RUNNING
)
573 lwkt_serialize_exit(ifp
->if_serializer
);
579 nfe_miibus_statchg(device_t dev
)
581 struct nfe_softc
*sc
= device_get_softc(dev
);
582 struct mii_data
*mii
= device_get_softc(sc
->sc_miibus
);
583 uint32_t phy
, seed
, misc
= NFE_MISC1_MAGIC
, link
= NFE_MEDIA_SET
;
585 phy
= NFE_READ(sc
, NFE_PHY_IFACE
);
586 phy
&= ~(NFE_PHY_HDX
| NFE_PHY_100TX
| NFE_PHY_1000T
);
588 seed
= NFE_READ(sc
, NFE_RNDSEED
);
589 seed
&= ~NFE_SEED_MASK
;
591 if ((mii
->mii_media_active
& IFM_GMASK
) == IFM_HDX
) {
592 phy
|= NFE_PHY_HDX
; /* half-duplex */
593 misc
|= NFE_MISC1_HDX
;
596 switch (IFM_SUBTYPE(mii
->mii_media_active
)) {
597 case IFM_1000_T
: /* full-duplex only */
598 link
|= NFE_MEDIA_1000T
;
599 seed
|= NFE_SEED_1000T
;
600 phy
|= NFE_PHY_1000T
;
603 link
|= NFE_MEDIA_100TX
;
604 seed
|= NFE_SEED_100TX
;
605 phy
|= NFE_PHY_100TX
;
608 link
|= NFE_MEDIA_10T
;
609 seed
|= NFE_SEED_10T
;
613 NFE_WRITE(sc
, NFE_RNDSEED
, seed
); /* XXX: gigabit NICs only? */
615 NFE_WRITE(sc
, NFE_PHY_IFACE
, phy
);
616 NFE_WRITE(sc
, NFE_MISC1
, misc
);
617 NFE_WRITE(sc
, NFE_LINKSPEED
, link
);
621 nfe_miibus_readreg(device_t dev
, int phy
, int reg
)
623 struct nfe_softc
*sc
= device_get_softc(dev
);
627 NFE_WRITE(sc
, NFE_PHY_STATUS
, 0xf);
629 if (NFE_READ(sc
, NFE_PHY_CTL
) & NFE_PHY_BUSY
) {
630 NFE_WRITE(sc
, NFE_PHY_CTL
, NFE_PHY_BUSY
);
634 NFE_WRITE(sc
, NFE_PHY_CTL
, (phy
<< NFE_PHYADD_SHIFT
) | reg
);
636 for (ntries
= 0; ntries
< 1000; ntries
++) {
638 if (!(NFE_READ(sc
, NFE_PHY_CTL
) & NFE_PHY_BUSY
))
641 if (ntries
== 1000) {
642 DPRINTFN(sc
, 2, "timeout waiting for PHY %s\n", "");
646 if (NFE_READ(sc
, NFE_PHY_STATUS
) & NFE_PHY_ERROR
) {
647 DPRINTFN(sc
, 2, "could not read PHY %s\n", "");
651 val
= NFE_READ(sc
, NFE_PHY_DATA
);
652 if (val
!= 0xffffffff && val
!= 0)
653 sc
->mii_phyaddr
= phy
;
655 DPRINTFN(sc
, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy
, reg
, val
);
661 nfe_miibus_writereg(device_t dev
, int phy
, int reg
, int val
)
663 struct nfe_softc
*sc
= device_get_softc(dev
);
667 NFE_WRITE(sc
, NFE_PHY_STATUS
, 0xf);
669 if (NFE_READ(sc
, NFE_PHY_CTL
) & NFE_PHY_BUSY
) {
670 NFE_WRITE(sc
, NFE_PHY_CTL
, NFE_PHY_BUSY
);
674 NFE_WRITE(sc
, NFE_PHY_DATA
, val
);
675 ctl
= NFE_PHY_WRITE
| (phy
<< NFE_PHYADD_SHIFT
) | reg
;
676 NFE_WRITE(sc
, NFE_PHY_CTL
, ctl
);
678 for (ntries
= 0; ntries
< 1000; ntries
++) {
680 if (!(NFE_READ(sc
, NFE_PHY_CTL
) & NFE_PHY_BUSY
))
686 DPRINTFN(sc
, 2, "could not write to PHY %s\n", "");
690 #ifdef DEVICE_POLLING
693 nfe_poll(struct ifnet
*ifp
, enum poll_cmd cmd
, int count
)
695 struct nfe_softc
*sc
= ifp
->if_softc
;
699 /* Disable interrupts */
700 NFE_WRITE(sc
, NFE_IRQ_MASK
, 0);
702 case POLL_DEREGISTER
:
703 /* enable interrupts */
704 NFE_WRITE(sc
, NFE_IRQ_MASK
, NFE_IRQ_WANTED
);
706 case POLL_AND_CHECK_STATUS
:
709 if (ifp
->if_flags
& IFF_RUNNING
) {
722 struct nfe_softc
*sc
= arg
;
723 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
726 r
= NFE_READ(sc
, NFE_IRQ_STATUS
);
728 return; /* not for us */
729 NFE_WRITE(sc
, NFE_IRQ_STATUS
, r
);
731 DPRINTFN(sc
, 5, "%s: interrupt register %x\n", __func__
, r
);
733 if (r
& NFE_IRQ_LINK
) {
734 NFE_READ(sc
, NFE_PHY_STATUS
);
735 NFE_WRITE(sc
, NFE_PHY_STATUS
, 0xf);
736 DPRINTF(sc
, "link state changed %s\n", "");
739 if (ifp
->if_flags
& IFF_RUNNING
) {
749 nfe_ioctl(struct ifnet
*ifp
, u_long cmd
, caddr_t data
, struct ucred
*cr
)
751 struct nfe_softc
*sc
= ifp
->if_softc
;
752 struct ifreq
*ifr
= (struct ifreq
*)data
;
753 struct mii_data
*mii
;
758 if (((sc
->sc_flags
& NFE_JUMBO_SUP
) &&
759 ifr
->ifr_mtu
> NFE_JUMBO_MTU
) ||
760 ((sc
->sc_flags
& NFE_JUMBO_SUP
) == 0 &&
761 ifr
->ifr_mtu
> ETHERMTU
)) {
763 } else if (ifp
->if_mtu
!= ifr
->ifr_mtu
) {
764 ifp
->if_mtu
= ifr
->ifr_mtu
;
769 if (ifp
->if_flags
& IFF_UP
) {
771 * If only the PROMISC or ALLMULTI flag changes, then
772 * don't do a full re-init of the chip, just update
775 if ((ifp
->if_flags
& IFF_RUNNING
) &&
776 ((ifp
->if_flags
^ sc
->sc_if_flags
) &
777 (IFF_ALLMULTI
| IFF_PROMISC
)) != 0) {
780 if (!(ifp
->if_flags
& IFF_RUNNING
))
784 if (ifp
->if_flags
& IFF_RUNNING
)
787 sc
->sc_if_flags
= ifp
->if_flags
;
791 if (ifp
->if_flags
& IFF_RUNNING
)
796 mii
= device_get_softc(sc
->sc_miibus
);
797 error
= ifmedia_ioctl(ifp
, ifr
, &mii
->mii_media
, cmd
);
800 mask
= ifr
->ifr_reqcap
^ ifp
->if_capenable
;
801 if ((mask
& IFCAP_HWCSUM
) &&
802 (ifp
->if_capabilities
& IFCAP_HWCSUM
)) {
803 if (IFCAP_HWCSUM
& ifp
->if_capenable
) {
804 ifp
->if_capenable
&= ~IFCAP_HWCSUM
;
805 ifp
->if_hwassist
= 0;
807 ifp
->if_capenable
|= IFCAP_HWCSUM
;
808 ifp
->if_hwassist
= NFE_CSUM_FEATURES
;
811 if (ifp
->if_flags
& IFF_RUNNING
)
816 error
= ether_ioctl(ifp
, cmd
, data
);
823 nfe_rxeof(struct nfe_softc
*sc
)
825 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
826 struct nfe_rx_ring
*ring
= &sc
->rxq
;
830 bus_dmamap_sync(ring
->tag
, ring
->map
, BUS_DMASYNC_POSTREAD
);
833 struct nfe_rx_data
*data
= &ring
->data
[ring
->cur
];
838 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
839 struct nfe_desc64
*desc64
= &ring
->desc64
[ring
->cur
];
841 flags
= le16toh(desc64
->flags
);
842 len
= le16toh(desc64
->length
) & 0x3fff;
844 struct nfe_desc32
*desc32
= &ring
->desc32
[ring
->cur
];
846 flags
= le16toh(desc32
->flags
);
847 len
= le16toh(desc32
->length
) & 0x3fff;
850 if (flags
& NFE_RX_READY
)
855 if ((sc
->sc_flags
& (NFE_JUMBO_SUP
| NFE_40BIT_ADDR
)) == 0) {
856 if (!(flags
& NFE_RX_VALID_V1
))
859 if ((flags
& NFE_RX_FIXME_V1
) == NFE_RX_FIXME_V1
) {
860 flags
&= ~NFE_RX_ERROR
;
861 len
--; /* fix buffer length */
864 if (!(flags
& NFE_RX_VALID_V2
))
867 if ((flags
& NFE_RX_FIXME_V2
) == NFE_RX_FIXME_V2
) {
868 flags
&= ~NFE_RX_ERROR
;
869 len
--; /* fix buffer length */
873 if (flags
& NFE_RX_ERROR
) {
880 if (sc
->sc_flags
& NFE_USE_JUMBO
)
881 error
= nfe_newbuf_jumbo(sc
, ring
, ring
->cur
, 0);
883 error
= nfe_newbuf_std(sc
, ring
, ring
->cur
, 0);
890 m
->m_pkthdr
.len
= m
->m_len
= len
;
891 m
->m_pkthdr
.rcvif
= ifp
;
893 if ((ifp
->if_capenable
& IFCAP_HWCSUM
) &&
894 (flags
& NFE_RX_CSUMOK
)) {
895 m
->m_pkthdr
.csum_flags
|= CSUM_IP_CHECKED
;
897 if (flags
& NFE_RX_IP_CSUMOK_V2
)
898 m
->m_pkthdr
.csum_flags
|= CSUM_IP_VALID
;
901 (NFE_RX_UDP_CSUMOK_V2
| NFE_RX_TCP_CSUMOK_V2
)) {
902 m
->m_pkthdr
.csum_flags
|= CSUM_DATA_VALID
|
904 m
->m_pkthdr
.csum_data
= 0xffff;
909 ifp
->if_input(ifp
, m
);
911 nfe_set_ready_rxdesc(sc
, ring
, ring
->cur
);
912 sc
->rxq
.cur
= (sc
->rxq
.cur
+ 1) % nfe_rx_ring_count
;
916 bus_dmamap_sync(ring
->tag
, ring
->map
, BUS_DMASYNC_PREWRITE
);
920 nfe_txeof(struct nfe_softc
*sc
)
922 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
923 struct nfe_tx_ring
*ring
= &sc
->txq
;
924 struct nfe_tx_data
*data
= NULL
;
926 bus_dmamap_sync(ring
->tag
, ring
->map
, BUS_DMASYNC_POSTREAD
);
927 while (ring
->next
!= ring
->cur
) {
930 if (sc
->sc_flags
& NFE_40BIT_ADDR
)
931 flags
= le16toh(ring
->desc64
[ring
->next
].flags
);
933 flags
= le16toh(ring
->desc32
[ring
->next
].flags
);
935 if (flags
& NFE_TX_VALID
)
938 data
= &ring
->data
[ring
->next
];
940 if ((sc
->sc_flags
& (NFE_JUMBO_SUP
| NFE_40BIT_ADDR
)) == 0) {
941 if (!(flags
& NFE_TX_LASTFRAG_V1
) && data
->m
== NULL
)
944 if ((flags
& NFE_TX_ERROR_V1
) != 0) {
945 if_printf(ifp
, "tx v1 error 0x%4b\n", flags
,
952 if (!(flags
& NFE_TX_LASTFRAG_V2
) && data
->m
== NULL
)
955 if ((flags
& NFE_TX_ERROR_V2
) != 0) {
956 if_printf(ifp
, "tx v2 error 0x%4b\n", flags
,
964 if (data
->m
== NULL
) { /* should not get there */
966 "last fragment bit w/o associated mbuf!\n");
970 /* last fragment of the mbuf chain transmitted */
971 bus_dmamap_sync(ring
->data_tag
, data
->map
,
972 BUS_DMASYNC_POSTWRITE
);
973 bus_dmamap_unload(ring
->data_tag
, data
->map
);
980 KKASSERT(ring
->queued
>= 0);
981 ring
->next
= (ring
->next
+ 1) % NFE_TX_RING_COUNT
;
984 if (data
!= NULL
) { /* at least one slot freed */
985 ifp
->if_flags
&= ~IFF_OACTIVE
;
991 nfe_encap(struct nfe_softc
*sc
, struct nfe_tx_ring
*ring
, struct mbuf
*m0
)
993 struct nfe_dma_ctx ctx
;
994 bus_dma_segment_t segs
[NFE_MAX_SCATTER
];
995 struct nfe_tx_data
*data
, *data_map
;
997 struct nfe_desc64
*desc64
= NULL
;
998 struct nfe_desc32
*desc32
= NULL
;
1003 data
= &ring
->data
[ring
->cur
];
1005 data_map
= data
; /* Remember who owns the DMA map */
1007 ctx
.nsegs
= NFE_MAX_SCATTER
;
1009 error
= bus_dmamap_load_mbuf(ring
->data_tag
, map
, m0
,
1010 nfe_buf_dma_addr
, &ctx
, BUS_DMA_NOWAIT
);
1011 if (error
&& error
!= EFBIG
) {
1012 if_printf(&sc
->arpcom
.ac_if
, "could not map TX mbuf\n");
1016 if (error
) { /* error == EFBIG */
1019 m_new
= m_defrag(m0
, MB_DONTWAIT
);
1020 if (m_new
== NULL
) {
1021 if_printf(&sc
->arpcom
.ac_if
,
1022 "could not defrag TX mbuf\n");
1029 ctx
.nsegs
= NFE_MAX_SCATTER
;
1031 error
= bus_dmamap_load_mbuf(ring
->data_tag
, map
, m0
,
1032 nfe_buf_dma_addr
, &ctx
,
1035 if_printf(&sc
->arpcom
.ac_if
,
1036 "could not map defraged TX mbuf\n");
1043 if (ring
->queued
+ ctx
.nsegs
>= NFE_TX_RING_COUNT
- 1) {
1044 bus_dmamap_unload(ring
->data_tag
, map
);
1049 /* setup h/w VLAN tagging */
1050 if ((m0
->m_flags
& (M_PROTO1
| M_PKTHDR
)) == (M_PROTO1
| M_PKTHDR
) &&
1051 m0
->m_pkthdr
.rcvif
!= NULL
&&
1052 m0
->m_pkthdr
.rcvif
->if_type
== IFT_L2VLAN
) {
1053 struct ifvlan
*ifv
= m0
->m_pkthdr
.rcvif
->if_softc
;
1056 vtag
= NFE_TX_VTAG
| htons(ifv
->ifv_tag
);
1059 if (sc
->arpcom
.ac_if
.if_capenable
& IFCAP_HWCSUM
) {
1060 if (m0
->m_pkthdr
.csum_flags
& CSUM_IP
)
1061 flags
|= NFE_TX_IP_CSUM
;
1062 if (m0
->m_pkthdr
.csum_flags
& (CSUM_TCP
| CSUM_UDP
))
1063 flags
|= NFE_TX_TCP_CSUM
;
1067 * XXX urm. somebody is unaware of how hardware works. You
1068 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in
1069 * the ring until the entire chain is actually *VALID*. Otherwise
1070 * the hardware may encounter a partially initialized chain that
1071 * is marked as being ready to go when it in fact is not ready to
1075 for (i
= 0; i
< ctx
.nsegs
; i
++) {
1076 j
= (ring
->cur
+ i
) % NFE_TX_RING_COUNT
;
1077 data
= &ring
->data
[j
];
1079 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
1080 desc64
= &ring
->desc64
[j
];
1081 #if defined(__LP64__)
1082 desc64
->physaddr
[0] =
1083 htole32(segs
[i
].ds_addr
>> 32);
1085 desc64
->physaddr
[1] =
1086 htole32(segs
[i
].ds_addr
& 0xffffffff);
1087 desc64
->length
= htole16(segs
[i
].ds_len
- 1);
1088 desc64
->vtag
= htole32(vtag
);
1089 desc64
->flags
= htole16(flags
);
1091 desc32
= &ring
->desc32
[j
];
1092 desc32
->physaddr
= htole32(segs
[i
].ds_addr
);
1093 desc32
->length
= htole16(segs
[i
].ds_len
- 1);
1094 desc32
->flags
= htole16(flags
);
1097 /* csum flags and vtag belong to the first fragment only */
1098 flags
&= ~(NFE_TX_IP_CSUM
| NFE_TX_TCP_CSUM
);
1102 KKASSERT(ring
->queued
<= NFE_TX_RING_COUNT
);
1105 /* the whole mbuf chain has been DMA mapped, fix last descriptor */
1106 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
1107 desc64
->flags
|= htole16(NFE_TX_LASTFRAG_V2
);
1109 if (sc
->sc_flags
& NFE_JUMBO_SUP
)
1110 flags
= NFE_TX_LASTFRAG_V2
;
1112 flags
= NFE_TX_LASTFRAG_V1
;
1113 desc32
->flags
|= htole16(flags
);
1117 * Set NFE_TX_VALID backwards so the hardware doesn't see the
1118 * whole mess until the first descriptor in the map is flagged.
1120 for (i
= ctx
.nsegs
- 1; i
>= 0; --i
) {
1121 j
= (ring
->cur
+ i
) % NFE_TX_RING_COUNT
;
1122 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
1123 desc64
= &ring
->desc64
[j
];
1124 desc64
->flags
|= htole16(NFE_TX_VALID
);
1126 desc32
= &ring
->desc32
[j
];
1127 desc32
->flags
|= htole16(NFE_TX_VALID
);
1130 ring
->cur
= (ring
->cur
+ ctx
.nsegs
) % NFE_TX_RING_COUNT
;
1132 /* Exchange DMA map */
1133 data_map
->map
= data
->map
;
1137 bus_dmamap_sync(ring
->data_tag
, map
, BUS_DMASYNC_PREWRITE
);
1145 nfe_start(struct ifnet
*ifp
)
1147 struct nfe_softc
*sc
= ifp
->if_softc
;
1148 struct nfe_tx_ring
*ring
= &sc
->txq
;
1152 if (ifp
->if_flags
& IFF_OACTIVE
)
1155 if (ifq_is_empty(&ifp
->if_snd
))
1159 m0
= ifq_dequeue(&ifp
->if_snd
, NULL
);
1165 if (nfe_encap(sc
, ring
, m0
) != 0) {
1166 ifp
->if_flags
|= IFF_OACTIVE
;
1173 * `m0' may be freed in nfe_encap(), so
1174 * it should not be touched any more.
1177 if (count
== 0) /* nothing sent */
1180 /* Sync TX descriptor ring */
1181 bus_dmamap_sync(ring
->tag
, ring
->map
, BUS_DMASYNC_PREWRITE
);
1184 NFE_WRITE(sc
, NFE_RXTX_CTL
, NFE_RXTX_KICKTX
| sc
->rxtxctl
);
1187 * Set a timeout in case the chip goes out to lunch.
1193 nfe_watchdog(struct ifnet
*ifp
)
1195 struct nfe_softc
*sc
= ifp
->if_softc
;
1197 if (ifp
->if_flags
& IFF_RUNNING
) {
1198 if_printf(ifp
, "watchdog timeout - lost interrupt recovered\n");
1203 if_printf(ifp
, "watchdog timeout\n");
1205 nfe_init(ifp
->if_softc
);
1209 if (!ifq_is_empty(&ifp
->if_snd
))
1216 struct nfe_softc
*sc
= xsc
;
1217 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1225 * Switching between jumbo frames and normal frames should
1226 * be done _after_ nfe_stop() but _before_ nfe_init_rx_ring().
1228 if (ifp
->if_mtu
> ETHERMTU
) {
1229 sc
->sc_flags
|= NFE_USE_JUMBO
;
1230 sc
->rxq
.bufsz
= NFE_JBYTES
;
1232 if_printf(ifp
, "use jumbo frames\n");
1234 sc
->sc_flags
&= ~NFE_USE_JUMBO
;
1235 sc
->rxq
.bufsz
= MCLBYTES
;
1237 if_printf(ifp
, "use non-jumbo frames\n");
1240 error
= nfe_init_tx_ring(sc
, &sc
->txq
);
1246 error
= nfe_init_rx_ring(sc
, &sc
->rxq
);
1252 NFE_WRITE(sc
, NFE_TX_UNK
, 0);
1253 NFE_WRITE(sc
, NFE_STATUS
, 0);
1255 sc
->rxtxctl
= NFE_RXTX_BIT2
;
1256 if (sc
->sc_flags
& NFE_40BIT_ADDR
)
1257 sc
->rxtxctl
|= NFE_RXTX_V3MAGIC
;
1258 else if (sc
->sc_flags
& NFE_JUMBO_SUP
)
1259 sc
->rxtxctl
|= NFE_RXTX_V2MAGIC
;
1261 if (ifp
->if_capenable
& IFCAP_HWCSUM
)
1262 sc
->rxtxctl
|= NFE_RXTX_RXCSUM
;
1265 * Although the adapter is capable of stripping VLAN tags from received
1266 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1267 * purpose. This will be done in software by our network stack.
1269 if (sc
->sc_flags
& NFE_HW_VLAN
)
1270 sc
->rxtxctl
|= NFE_RXTX_VTAG_INSERT
;
1272 NFE_WRITE(sc
, NFE_RXTX_CTL
, NFE_RXTX_RESET
| sc
->rxtxctl
);
1274 NFE_WRITE(sc
, NFE_RXTX_CTL
, sc
->rxtxctl
);
1276 if (sc
->sc_flags
& NFE_HW_VLAN
)
1277 NFE_WRITE(sc
, NFE_VTAG_CTL
, NFE_VTAG_ENABLE
);
1279 NFE_WRITE(sc
, NFE_SETUP_R6
, 0);
1281 /* set MAC address */
1282 nfe_set_macaddr(sc
, sc
->arpcom
.ac_enaddr
);
1284 /* tell MAC where rings are in memory */
1286 NFE_WRITE(sc
, NFE_RX_RING_ADDR_HI
, sc
->rxq
.physaddr
>> 32);
1288 NFE_WRITE(sc
, NFE_RX_RING_ADDR_LO
, sc
->rxq
.physaddr
& 0xffffffff);
1290 NFE_WRITE(sc
, NFE_TX_RING_ADDR_HI
, sc
->txq
.physaddr
>> 32);
1292 NFE_WRITE(sc
, NFE_TX_RING_ADDR_LO
, sc
->txq
.physaddr
& 0xffffffff);
1294 NFE_WRITE(sc
, NFE_RING_SIZE
,
1295 (nfe_rx_ring_count
- 1) << 16 |
1296 (NFE_TX_RING_COUNT
- 1));
1298 NFE_WRITE(sc
, NFE_RXBUFSZ
, sc
->rxq
.bufsz
);
1300 /* force MAC to wakeup */
1301 tmp
= NFE_READ(sc
, NFE_PWR_STATE
);
1302 NFE_WRITE(sc
, NFE_PWR_STATE
, tmp
| NFE_PWR_WAKEUP
);
1304 tmp
= NFE_READ(sc
, NFE_PWR_STATE
);
1305 NFE_WRITE(sc
, NFE_PWR_STATE
, tmp
| NFE_PWR_VALID
);
1308 * NFE_IMTIMER generates a periodic interrupt via NFE_IRQ_TIMER.
1309 * It is unclear how wide the timer is. Base programming does
1310 * not seem to effect NFE_IRQ_TX_DONE or NFE_IRQ_RX_DONE so
1311 * we don't get any interrupt moderation. TX moderation is
1312 * possible by using the timer interrupt instead of TX_DONE.
1314 * It is unclear whether there are other bits that can be
1315 * set to make the NFE device actually do interrupt moderation
1318 * For now set a 128uS interval as a placemark, but don't use
1321 NFE_WRITE(sc
, NFE_IMTIMER
, NFE_IM_DEFAULT
);
1323 NFE_WRITE(sc
, NFE_SETUP_R1
, NFE_R1_MAGIC
);
1324 NFE_WRITE(sc
, NFE_SETUP_R2
, NFE_R2_MAGIC
);
1325 NFE_WRITE(sc
, NFE_SETUP_R6
, NFE_R6_MAGIC
);
1327 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1328 NFE_WRITE(sc
, NFE_STATUS
, sc
->mii_phyaddr
<< 24 | NFE_STATUS_MAGIC
);
1330 NFE_WRITE(sc
, NFE_SETUP_R4
, NFE_R4_MAGIC
);
1331 NFE_WRITE(sc
, NFE_WOL_CTL
, NFE_WOL_MAGIC
);
1333 sc
->rxtxctl
&= ~NFE_RXTX_BIT2
;
1334 NFE_WRITE(sc
, NFE_RXTX_CTL
, sc
->rxtxctl
);
1336 NFE_WRITE(sc
, NFE_RXTX_CTL
, NFE_RXTX_BIT1
| sc
->rxtxctl
);
1341 nfe_ifmedia_upd(ifp
);
1344 NFE_WRITE(sc
, NFE_RX_CTL
, NFE_RX_START
);
1347 NFE_WRITE(sc
, NFE_TX_CTL
, NFE_TX_START
);
1349 NFE_WRITE(sc
, NFE_PHY_STATUS
, 0xf);
1351 #ifdef DEVICE_POLLING
1352 if ((ifp
->if_flags
& IFF_POLLING
) == 0)
1354 /* enable interrupts */
1355 NFE_WRITE(sc
, NFE_IRQ_MASK
, NFE_IRQ_WANTED
);
1357 callout_reset(&sc
->sc_tick_ch
, hz
, nfe_tick
, sc
);
1359 ifp
->if_flags
|= IFF_RUNNING
;
1360 ifp
->if_flags
&= ~IFF_OACTIVE
;
1364 nfe_stop(struct nfe_softc
*sc
)
1366 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1368 callout_stop(&sc
->sc_tick_ch
);
1371 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
1374 NFE_WRITE(sc
, NFE_TX_CTL
, 0);
1377 NFE_WRITE(sc
, NFE_RX_CTL
, 0);
1379 /* Disable interrupts */
1380 NFE_WRITE(sc
, NFE_IRQ_MASK
, 0);
1382 /* Reset Tx and Rx rings */
1383 nfe_reset_tx_ring(sc
, &sc
->txq
);
1384 nfe_reset_rx_ring(sc
, &sc
->rxq
);
1388 nfe_alloc_rx_ring(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
)
1390 int i
, j
, error
, descsize
;
1393 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
1394 desc
= (void **)&ring
->desc64
;
1395 descsize
= sizeof(struct nfe_desc64
);
1397 desc
= (void **)&ring
->desc32
;
1398 descsize
= sizeof(struct nfe_desc32
);
1401 ring
->jbuf
= kmalloc(sizeof(struct nfe_jbuf
) * NFE_JPOOL_COUNT
,
1402 M_DEVBUF
, M_WAITOK
| M_ZERO
);
1403 ring
->data
= kmalloc(sizeof(struct nfe_rx_data
) * nfe_rx_ring_count
,
1404 M_DEVBUF
, M_WAITOK
| M_ZERO
);
1406 ring
->bufsz
= MCLBYTES
;
1407 ring
->cur
= ring
->next
= 0;
1409 error
= bus_dma_tag_create(NULL
, PAGE_SIZE
, 0,
1410 BUS_SPACE_MAXADDR_32BIT
, BUS_SPACE_MAXADDR
,
1412 nfe_rx_ring_count
* descsize
, 1,
1413 nfe_rx_ring_count
* descsize
,
1416 if_printf(&sc
->arpcom
.ac_if
,
1417 "could not create desc RX DMA tag\n");
1421 error
= bus_dmamem_alloc(ring
->tag
, desc
, BUS_DMA_WAITOK
| BUS_DMA_ZERO
,
1424 if_printf(&sc
->arpcom
.ac_if
,
1425 "could not allocate RX desc DMA memory\n");
1426 bus_dma_tag_destroy(ring
->tag
);
1431 error
= bus_dmamap_load(ring
->tag
, ring
->map
, *desc
,
1432 nfe_rx_ring_count
* descsize
,
1433 nfe_ring_dma_addr
, &ring
->physaddr
,
1436 if_printf(&sc
->arpcom
.ac_if
,
1437 "could not load RX desc DMA map\n");
1438 bus_dmamem_free(ring
->tag
, *desc
, ring
->map
);
1439 bus_dma_tag_destroy(ring
->tag
);
1444 if (sc
->sc_flags
& NFE_JUMBO_SUP
) {
1445 error
= nfe_jpool_alloc(sc
, ring
);
1447 if_printf(&sc
->arpcom
.ac_if
,
1448 "could not allocate jumbo frames\n");
1453 error
= bus_dma_tag_create(NULL
, 1, 0,
1454 BUS_SPACE_MAXADDR_32BIT
, BUS_SPACE_MAXADDR
,
1456 MCLBYTES
, 1, MCLBYTES
,
1457 0, &ring
->data_tag
);
1459 if_printf(&sc
->arpcom
.ac_if
,
1460 "could not create RX mbuf DMA tag\n");
1464 /* Create a spare RX mbuf DMA map */
1465 error
= bus_dmamap_create(ring
->data_tag
, 0, &ring
->data_tmpmap
);
1467 if_printf(&sc
->arpcom
.ac_if
,
1468 "could not create spare RX mbuf DMA map\n");
1469 bus_dma_tag_destroy(ring
->data_tag
);
1470 ring
->data_tag
= NULL
;
1474 for (i
= 0; i
< nfe_rx_ring_count
; i
++) {
1475 error
= bus_dmamap_create(ring
->data_tag
, 0,
1476 &ring
->data
[i
].map
);
1478 if_printf(&sc
->arpcom
.ac_if
,
1479 "could not create %dth RX mbuf DMA mapn", i
);
1485 for (j
= 0; j
< i
; ++j
)
1486 bus_dmamap_destroy(ring
->data_tag
, ring
->data
[i
].map
);
1487 bus_dmamap_destroy(ring
->data_tag
, ring
->data_tmpmap
);
1488 bus_dma_tag_destroy(ring
->data_tag
);
1489 ring
->data_tag
= NULL
;
1494 nfe_reset_rx_ring(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
)
1498 for (i
= 0; i
< nfe_rx_ring_count
; i
++) {
1499 struct nfe_rx_data
*data
= &ring
->data
[i
];
1501 if (data
->m
!= NULL
) {
1502 if ((sc
->sc_flags
& NFE_USE_JUMBO
) == 0)
1503 bus_dmamap_unload(ring
->data_tag
, data
->map
);
1508 bus_dmamap_sync(ring
->tag
, ring
->map
, BUS_DMASYNC_PREWRITE
);
1510 ring
->cur
= ring
->next
= 0;
1514 nfe_init_rx_ring(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
)
1518 for (i
= 0; i
< nfe_rx_ring_count
; ++i
) {
1521 /* XXX should use a function pointer */
1522 if (sc
->sc_flags
& NFE_USE_JUMBO
)
1523 error
= nfe_newbuf_jumbo(sc
, ring
, i
, 1);
1525 error
= nfe_newbuf_std(sc
, ring
, i
, 1);
1527 if_printf(&sc
->arpcom
.ac_if
,
1528 "could not allocate RX buffer\n");
1532 nfe_set_ready_rxdesc(sc
, ring
, i
);
1534 bus_dmamap_sync(ring
->tag
, ring
->map
, BUS_DMASYNC_PREWRITE
);
1540 nfe_free_rx_ring(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
)
1542 if (ring
->data_tag
!= NULL
) {
1543 struct nfe_rx_data
*data
;
1546 for (i
= 0; i
< nfe_rx_ring_count
; i
++) {
1547 data
= &ring
->data
[i
];
1549 if (data
->m
!= NULL
) {
1550 bus_dmamap_unload(ring
->data_tag
, data
->map
);
1553 bus_dmamap_destroy(ring
->data_tag
, data
->map
);
1555 bus_dmamap_destroy(ring
->data_tag
, ring
->data_tmpmap
);
1556 bus_dma_tag_destroy(ring
->data_tag
);
1559 nfe_jpool_free(sc
, ring
);
1561 if (ring
->jbuf
!= NULL
)
1562 kfree(ring
->jbuf
, M_DEVBUF
);
1563 if (ring
->data
!= NULL
)
1564 kfree(ring
->data
, M_DEVBUF
);
1566 if (ring
->tag
!= NULL
) {
1569 if (sc
->sc_flags
& NFE_40BIT_ADDR
)
1570 desc
= ring
->desc64
;
1572 desc
= ring
->desc32
;
1574 bus_dmamap_unload(ring
->tag
, ring
->map
);
1575 bus_dmamem_free(ring
->tag
, desc
, ring
->map
);
1576 bus_dma_tag_destroy(ring
->tag
);
1580 static struct nfe_jbuf
*
1581 nfe_jalloc(struct nfe_softc
*sc
)
1583 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1584 struct nfe_jbuf
*jbuf
;
1586 lwkt_serialize_enter(&sc
->sc_jbuf_serializer
);
1588 jbuf
= SLIST_FIRST(&sc
->rxq
.jfreelist
);
1590 SLIST_REMOVE_HEAD(&sc
->rxq
.jfreelist
, jnext
);
1593 if_printf(ifp
, "no free jumbo buffer\n");
1596 lwkt_serialize_exit(&sc
->sc_jbuf_serializer
);
1602 nfe_jfree(void *arg
)
1604 struct nfe_jbuf
*jbuf
= arg
;
1605 struct nfe_softc
*sc
= jbuf
->sc
;
1606 struct nfe_rx_ring
*ring
= jbuf
->ring
;
1608 if (&ring
->jbuf
[jbuf
->slot
] != jbuf
)
1609 panic("%s: free wrong jumbo buffer\n", __func__
);
1610 else if (jbuf
->inuse
== 0)
1611 panic("%s: jumbo buffer already freed\n", __func__
);
1613 lwkt_serialize_enter(&sc
->sc_jbuf_serializer
);
1614 atomic_subtract_int(&jbuf
->inuse
, 1);
1615 if (jbuf
->inuse
== 0)
1616 SLIST_INSERT_HEAD(&ring
->jfreelist
, jbuf
, jnext
);
1617 lwkt_serialize_exit(&sc
->sc_jbuf_serializer
);
1623 struct nfe_jbuf
*jbuf
= arg
;
1624 struct nfe_rx_ring
*ring
= jbuf
->ring
;
1626 if (&ring
->jbuf
[jbuf
->slot
] != jbuf
)
1627 panic("%s: ref wrong jumbo buffer\n", __func__
);
1628 else if (jbuf
->inuse
== 0)
1629 panic("%s: jumbo buffer already freed\n", __func__
);
1631 atomic_add_int(&jbuf
->inuse
, 1);
1635 nfe_jpool_alloc(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
)
1637 struct nfe_jbuf
*jbuf
;
1638 bus_addr_t physaddr
;
1643 * Allocate a big chunk of DMA'able memory.
1645 error
= bus_dma_tag_create(NULL
, PAGE_SIZE
, 0,
1646 BUS_SPACE_MAXADDR_32BIT
, BUS_SPACE_MAXADDR
,
1648 NFE_JPOOL_SIZE
, 1, NFE_JPOOL_SIZE
,
1651 if_printf(&sc
->arpcom
.ac_if
,
1652 "could not create jumbo DMA tag\n");
1656 error
= bus_dmamem_alloc(ring
->jtag
, (void **)&ring
->jpool
,
1657 BUS_DMA_WAITOK
, &ring
->jmap
);
1659 if_printf(&sc
->arpcom
.ac_if
,
1660 "could not allocate jumbo DMA memory\n");
1661 bus_dma_tag_destroy(ring
->jtag
);
1666 error
= bus_dmamap_load(ring
->jtag
, ring
->jmap
, ring
->jpool
,
1667 NFE_JPOOL_SIZE
, nfe_ring_dma_addr
, &physaddr
,
1670 if_printf(&sc
->arpcom
.ac_if
,
1671 "could not load jumbo DMA map\n");
1672 bus_dmamem_free(ring
->jtag
, ring
->jpool
, ring
->jmap
);
1673 bus_dma_tag_destroy(ring
->jtag
);
1678 /* ..and split it into 9KB chunks */
1679 SLIST_INIT(&ring
->jfreelist
);
1682 for (i
= 0; i
< NFE_JPOOL_COUNT
; i
++) {
1683 jbuf
= &ring
->jbuf
[i
];
1690 jbuf
->physaddr
= physaddr
;
1692 SLIST_INSERT_HEAD(&ring
->jfreelist
, jbuf
, jnext
);
1695 physaddr
+= NFE_JBYTES
;
1702 nfe_jpool_free(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
)
1704 if (ring
->jtag
!= NULL
) {
1705 bus_dmamap_unload(ring
->jtag
, ring
->jmap
);
1706 bus_dmamem_free(ring
->jtag
, ring
->jpool
, ring
->jmap
);
1707 bus_dma_tag_destroy(ring
->jtag
);
1712 nfe_alloc_tx_ring(struct nfe_softc
*sc
, struct nfe_tx_ring
*ring
)
1714 int i
, j
, error
, descsize
;
1717 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
1718 desc
= (void **)&ring
->desc64
;
1719 descsize
= sizeof(struct nfe_desc64
);
1721 desc
= (void **)&ring
->desc32
;
1722 descsize
= sizeof(struct nfe_desc32
);
1726 ring
->cur
= ring
->next
= 0;
1728 error
= bus_dma_tag_create(NULL
, PAGE_SIZE
, 0,
1729 BUS_SPACE_MAXADDR_32BIT
, BUS_SPACE_MAXADDR
,
1731 NFE_TX_RING_COUNT
* descsize
, 1,
1732 NFE_TX_RING_COUNT
* descsize
,
1735 if_printf(&sc
->arpcom
.ac_if
,
1736 "could not create TX desc DMA map\n");
1740 error
= bus_dmamem_alloc(ring
->tag
, desc
, BUS_DMA_WAITOK
| BUS_DMA_ZERO
,
1743 if_printf(&sc
->arpcom
.ac_if
,
1744 "could not allocate TX desc DMA memory\n");
1745 bus_dma_tag_destroy(ring
->tag
);
1750 error
= bus_dmamap_load(ring
->tag
, ring
->map
, *desc
,
1751 NFE_TX_RING_COUNT
* descsize
,
1752 nfe_ring_dma_addr
, &ring
->physaddr
,
1755 if_printf(&sc
->arpcom
.ac_if
,
1756 "could not load TX desc DMA map\n");
1757 bus_dmamem_free(ring
->tag
, *desc
, ring
->map
);
1758 bus_dma_tag_destroy(ring
->tag
);
1763 error
= bus_dma_tag_create(NULL
, PAGE_SIZE
, 0,
1764 BUS_SPACE_MAXADDR_32BIT
, BUS_SPACE_MAXADDR
,
1766 NFE_JBYTES
* NFE_MAX_SCATTER
,
1767 NFE_MAX_SCATTER
, NFE_JBYTES
,
1768 0, &ring
->data_tag
);
1770 if_printf(&sc
->arpcom
.ac_if
,
1771 "could not create TX buf DMA tag\n");
1775 for (i
= 0; i
< NFE_TX_RING_COUNT
; i
++) {
1776 error
= bus_dmamap_create(ring
->data_tag
, 0,
1777 &ring
->data
[i
].map
);
1779 if_printf(&sc
->arpcom
.ac_if
,
1780 "could not create %dth TX buf DMA map\n", i
);
1787 for (j
= 0; j
< i
; ++j
)
1788 bus_dmamap_destroy(ring
->data_tag
, ring
->data
[i
].map
);
1789 bus_dma_tag_destroy(ring
->data_tag
);
1790 ring
->data_tag
= NULL
;
1795 nfe_reset_tx_ring(struct nfe_softc
*sc
, struct nfe_tx_ring
*ring
)
1799 for (i
= 0; i
< NFE_TX_RING_COUNT
; i
++) {
1800 struct nfe_tx_data
*data
= &ring
->data
[i
];
1802 if (sc
->sc_flags
& NFE_40BIT_ADDR
)
1803 ring
->desc64
[i
].flags
= 0;
1805 ring
->desc32
[i
].flags
= 0;
1807 if (data
->m
!= NULL
) {
1808 bus_dmamap_sync(ring
->data_tag
, data
->map
,
1809 BUS_DMASYNC_POSTWRITE
);
1810 bus_dmamap_unload(ring
->data_tag
, data
->map
);
1815 bus_dmamap_sync(ring
->tag
, ring
->map
, BUS_DMASYNC_PREWRITE
);
1818 ring
->cur
= ring
->next
= 0;
1822 nfe_init_tx_ring(struct nfe_softc
*sc __unused
,
1823 struct nfe_tx_ring
*ring __unused
)
1829 nfe_free_tx_ring(struct nfe_softc
*sc
, struct nfe_tx_ring
*ring
)
1831 if (ring
->data_tag
!= NULL
) {
1832 struct nfe_tx_data
*data
;
1835 for (i
= 0; i
< NFE_TX_RING_COUNT
; ++i
) {
1836 data
= &ring
->data
[i
];
1838 if (data
->m
!= NULL
) {
1839 bus_dmamap_unload(ring
->data_tag
, data
->map
);
1842 bus_dmamap_destroy(ring
->data_tag
, data
->map
);
1845 bus_dma_tag_destroy(ring
->data_tag
);
1848 if (ring
->tag
!= NULL
) {
1851 if (sc
->sc_flags
& NFE_40BIT_ADDR
)
1852 desc
= ring
->desc64
;
1854 desc
= ring
->desc32
;
1856 bus_dmamap_unload(ring
->tag
, ring
->map
);
1857 bus_dmamem_free(ring
->tag
, desc
, ring
->map
);
1858 bus_dma_tag_destroy(ring
->tag
);
1863 nfe_ifmedia_upd(struct ifnet
*ifp
)
1865 struct nfe_softc
*sc
= ifp
->if_softc
;
1866 struct mii_data
*mii
= device_get_softc(sc
->sc_miibus
);
1868 if (mii
->mii_instance
!= 0) {
1869 struct mii_softc
*miisc
;
1871 LIST_FOREACH(miisc
, &mii
->mii_phys
, mii_list
)
1872 mii_phy_reset(miisc
);
1880 nfe_ifmedia_sts(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
1882 struct nfe_softc
*sc
= ifp
->if_softc
;
1883 struct mii_data
*mii
= device_get_softc(sc
->sc_miibus
);
1886 ifmr
->ifm_status
= mii
->mii_media_status
;
1887 ifmr
->ifm_active
= mii
->mii_media_active
;
1891 nfe_setmulti(struct nfe_softc
*sc
)
1893 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1894 struct ifmultiaddr
*ifma
;
1895 uint8_t addr
[ETHER_ADDR_LEN
], mask
[ETHER_ADDR_LEN
];
1896 uint32_t filter
= NFE_RXFILTER_MAGIC
;
1899 if ((ifp
->if_flags
& (IFF_ALLMULTI
| IFF_PROMISC
)) != 0) {
1900 bzero(addr
, ETHER_ADDR_LEN
);
1901 bzero(mask
, ETHER_ADDR_LEN
);
1905 bcopy(etherbroadcastaddr
, addr
, ETHER_ADDR_LEN
);
1906 bcopy(etherbroadcastaddr
, mask
, ETHER_ADDR_LEN
);
1908 LIST_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
1911 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
1914 maddr
= LLADDR((struct sockaddr_dl
*)ifma
->ifma_addr
);
1915 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++) {
1916 addr
[i
] &= maddr
[i
];
1917 mask
[i
] &= ~maddr
[i
];
1921 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++)
1925 addr
[0] |= 0x01; /* make sure multicast bit is set */
1927 NFE_WRITE(sc
, NFE_MULTIADDR_HI
,
1928 addr
[3] << 24 | addr
[2] << 16 | addr
[1] << 8 | addr
[0]);
1929 NFE_WRITE(sc
, NFE_MULTIADDR_LO
,
1930 addr
[5] << 8 | addr
[4]);
1931 NFE_WRITE(sc
, NFE_MULTIMASK_HI
,
1932 mask
[3] << 24 | mask
[2] << 16 | mask
[1] << 8 | mask
[0]);
1933 NFE_WRITE(sc
, NFE_MULTIMASK_LO
,
1934 mask
[5] << 8 | mask
[4]);
1936 filter
|= (ifp
->if_flags
& IFF_PROMISC
) ? NFE_PROMISC
: NFE_U2M
;
1937 NFE_WRITE(sc
, NFE_RXFILTER
, filter
);
1941 nfe_get_macaddr(struct nfe_softc
*sc
, uint8_t *addr
)
1945 tmp
= NFE_READ(sc
, NFE_MACADDR_LO
);
1946 addr
[0] = (tmp
>> 8) & 0xff;
1947 addr
[1] = (tmp
& 0xff);
1949 tmp
= NFE_READ(sc
, NFE_MACADDR_HI
);
1950 addr
[2] = (tmp
>> 24) & 0xff;
1951 addr
[3] = (tmp
>> 16) & 0xff;
1952 addr
[4] = (tmp
>> 8) & 0xff;
1953 addr
[5] = (tmp
& 0xff);
1957 nfe_set_macaddr(struct nfe_softc
*sc
, const uint8_t *addr
)
1959 NFE_WRITE(sc
, NFE_MACADDR_LO
,
1960 addr
[5] << 8 | addr
[4]);
1961 NFE_WRITE(sc
, NFE_MACADDR_HI
,
1962 addr
[3] << 24 | addr
[2] << 16 | addr
[1] << 8 | addr
[0]);
1968 struct nfe_softc
*sc
= arg
;
1969 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1970 struct mii_data
*mii
= device_get_softc(sc
->sc_miibus
);
1972 lwkt_serialize_enter(ifp
->if_serializer
);
1975 callout_reset(&sc
->sc_tick_ch
, hz
, nfe_tick
, sc
);
1977 lwkt_serialize_exit(ifp
->if_serializer
);
1981 nfe_ring_dma_addr(void *arg
, bus_dma_segment_t
*seg
, int nseg
, int error
)
1986 KASSERT(nseg
== 1, ("too many segments, should be 1\n"));
1988 *((uint32_t *)arg
) = seg
->ds_addr
;
1992 nfe_buf_dma_addr(void *arg
, bus_dma_segment_t
*segs
, int nsegs
,
1993 bus_size_t mapsz __unused
, int error
)
1995 struct nfe_dma_ctx
*ctx
= arg
;
2001 KASSERT(nsegs
<= ctx
->nsegs
,
2002 ("too many segments(%d), should be <= %d\n",
2003 nsegs
, ctx
->nsegs
));
2006 for (i
= 0; i
< nsegs
; ++i
)
2007 ctx
->segs
[i
] = segs
[i
];
2011 nfe_newbuf_std(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
, int idx
,
2014 struct nfe_rx_data
*data
= &ring
->data
[idx
];
2015 struct nfe_dma_ctx ctx
;
2016 bus_dma_segment_t seg
;
2021 m
= m_getcl(wait
? MB_WAIT
: MB_DONTWAIT
, MT_DATA
, M_PKTHDR
);
2024 m
->m_len
= m
->m_pkthdr
.len
= MCLBYTES
;
2028 error
= bus_dmamap_load_mbuf(ring
->data_tag
, ring
->data_tmpmap
,
2029 m
, nfe_buf_dma_addr
, &ctx
,
2030 wait
? BUS_DMA_WAITOK
: BUS_DMA_NOWAIT
);
2033 if_printf(&sc
->arpcom
.ac_if
, "could map RX mbuf %d\n", error
);
2037 /* Unload originally mapped mbuf */
2038 bus_dmamap_unload(ring
->data_tag
, data
->map
);
2040 /* Swap this DMA map with tmp DMA map */
2042 data
->map
= ring
->data_tmpmap
;
2043 ring
->data_tmpmap
= map
;
2045 /* Caller is assumed to have collected the old mbuf */
2048 nfe_set_paddr_rxdesc(sc
, ring
, idx
, seg
.ds_addr
);
2050 bus_dmamap_sync(ring
->data_tag
, data
->map
, BUS_DMASYNC_PREREAD
);
2055 nfe_newbuf_jumbo(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
, int idx
,
2058 struct nfe_rx_data
*data
= &ring
->data
[idx
];
2059 struct nfe_jbuf
*jbuf
;
2062 MGETHDR(m
, wait
? MB_WAIT
: MB_DONTWAIT
, MT_DATA
);
2066 jbuf
= nfe_jalloc(sc
);
2069 if_printf(&sc
->arpcom
.ac_if
, "jumbo allocation failed "
2070 "-- packet dropped!\n");
2074 m
->m_ext
.ext_arg
= jbuf
;
2075 m
->m_ext
.ext_buf
= jbuf
->buf
;
2076 m
->m_ext
.ext_free
= nfe_jfree
;
2077 m
->m_ext
.ext_ref
= nfe_jref
;
2078 m
->m_ext
.ext_size
= NFE_JBYTES
;
2080 m
->m_data
= m
->m_ext
.ext_buf
;
2081 m
->m_flags
|= M_EXT
;
2082 m
->m_len
= m
->m_pkthdr
.len
= m
->m_ext
.ext_size
;
2084 /* Caller is assumed to have collected the old mbuf */
2087 nfe_set_paddr_rxdesc(sc
, ring
, idx
, jbuf
->physaddr
);
2089 bus_dmamap_sync(ring
->jtag
, ring
->jmap
, BUS_DMASYNC_PREREAD
);
2094 nfe_set_paddr_rxdesc(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
, int idx
,
2095 bus_addr_t physaddr
)
2097 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
2098 struct nfe_desc64
*desc64
= &ring
->desc64
[idx
];
2100 #if defined(__LP64__)
2101 desc64
->physaddr
[0] = htole32(physaddr
>> 32);
2103 desc64
->physaddr
[1] = htole32(physaddr
& 0xffffffff);
2105 struct nfe_desc32
*desc32
= &ring
->desc32
[idx
];
2107 desc32
->physaddr
= htole32(physaddr
);
2112 nfe_set_ready_rxdesc(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
, int idx
)
2114 if (sc
->sc_flags
& NFE_40BIT_ADDR
) {
2115 struct nfe_desc64
*desc64
= &ring
->desc64
[idx
];
2117 desc64
->length
= htole16(ring
->bufsz
);
2118 desc64
->flags
= htole16(NFE_RX_READY
);
2120 struct nfe_desc32
*desc32
= &ring
->desc32
[idx
];
2122 desc32
->length
= htole16(ring
->bufsz
);
2123 desc32
->flags
= htole16(NFE_RX_READY
);