- Embed ether vlan tag in mbuf packet header. Add an mbuf flag to mark that
[dragonfly.git] / sys / dev / netif / nfe / if_nfe.c
blob783478efba8315c7d69e55ae7bf7ba4238796939
1 /* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */
2 /* $DragonFly: src/sys/dev/netif/nfe/if_nfe.c,v 1.18 2008/03/10 10:47:57 sephe Exp $ */
4 /*
5 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
6 *
7 * This code is derived from software contributed to The DragonFly Project
8 * by Sepherosa Ziehau <sepherosa@gmail.com> and
9 * Matthew Dillon <dillon@apollo.backplane.com>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
40 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
41 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
43 * Permission to use, copy, modify, and distribute this software for any
44 * purpose with or without fee is hereby granted, provided that the above
45 * copyright notice and this permission notice appear in all copies.
47 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
48 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
49 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
50 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
51 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
52 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
53 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
56 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
58 #include "opt_polling.h"
60 #include <sys/param.h>
61 #include <sys/endian.h>
62 #include <sys/kernel.h>
63 #include <sys/bus.h>
64 #include <sys/proc.h>
65 #include <sys/rman.h>
66 #include <sys/serialize.h>
67 #include <sys/socket.h>
68 #include <sys/sockio.h>
69 #include <sys/sysctl.h>
71 #include <net/ethernet.h>
72 #include <net/if.h>
73 #include <net/bpf.h>
74 #include <net/if_arp.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/ifq_var.h>
78 #include <net/if_types.h>
79 #include <net/if_var.h>
80 #include <net/vlan/if_vlan_var.h>
82 #include <bus/pci/pcireg.h>
83 #include <bus/pci/pcivar.h>
84 #include <bus/pci/pcidevs.h>
86 #include <dev/netif/mii_layer/mii.h>
87 #include <dev/netif/mii_layer/miivar.h>
89 #include "miibus_if.h"
91 #include <dev/netif/nfe/if_nfereg.h>
92 #include <dev/netif/nfe/if_nfevar.h>
94 #define NFE_CSUM
95 #define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
97 static int nfe_probe(device_t);
98 static int nfe_attach(device_t);
99 static int nfe_detach(device_t);
100 static void nfe_shutdown(device_t);
101 static int nfe_resume(device_t);
102 static int nfe_suspend(device_t);
104 static int nfe_miibus_readreg(device_t, int, int);
105 static void nfe_miibus_writereg(device_t, int, int, int);
106 static void nfe_miibus_statchg(device_t);
108 #ifdef DEVICE_POLLING
109 static void nfe_poll(struct ifnet *, enum poll_cmd, int);
110 #endif
111 static void nfe_intr(void *);
112 static int nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
113 static void nfe_rxeof(struct nfe_softc *);
114 static void nfe_txeof(struct nfe_softc *);
115 static int nfe_encap(struct nfe_softc *, struct nfe_tx_ring *,
116 struct mbuf *);
117 static void nfe_start(struct ifnet *);
118 static void nfe_watchdog(struct ifnet *);
119 static void nfe_init(void *);
120 static void nfe_stop(struct nfe_softc *);
121 static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *);
122 static void nfe_jfree(void *);
123 static void nfe_jref(void *);
124 static int nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *);
125 static void nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *);
126 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
127 static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
128 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
129 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
130 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
131 static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
132 static int nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
133 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
134 static int nfe_ifmedia_upd(struct ifnet *);
135 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
136 static void nfe_setmulti(struct nfe_softc *);
137 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
138 static void nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
139 static void nfe_tick(void *);
140 static void nfe_ring_dma_addr(void *, bus_dma_segment_t *, int, int);
141 static void nfe_buf_dma_addr(void *, bus_dma_segment_t *, int, bus_size_t,
142 int);
143 static void nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
144 int, bus_addr_t);
145 static void nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
146 int);
147 static int nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int,
148 int);
149 static int nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int,
150 int);
152 static int nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS);
154 #define NFE_DEBUG
155 #ifdef NFE_DEBUG
157 static int nfe_debug = 0;
158 static int nfe_rx_ring_count = NFE_RX_RING_DEF_COUNT;
159 static int nfe_imtime = -1;
161 TUNABLE_INT("hw.nfe.rx_ring_count", &nfe_rx_ring_count);
162 TUNABLE_INT("hw.nfe.imtime", &nfe_imtime);
163 TUNABLE_INT("hw.nfe.debug", &nfe_debug);
165 #define DPRINTF(sc, fmt, ...) do { \
166 if ((sc)->sc_debug) { \
167 if_printf(&(sc)->arpcom.ac_if, \
168 fmt, __VA_ARGS__); \
170 } while (0)
172 #define DPRINTFN(sc, lv, fmt, ...) do { \
173 if ((sc)->sc_debug >= (lv)) { \
174 if_printf(&(sc)->arpcom.ac_if, \
175 fmt, __VA_ARGS__); \
177 } while (0)
179 #else /* !NFE_DEBUG */
181 #define DPRINTF(sc, fmt, ...)
182 #define DPRINTFN(sc, lv, fmt, ...)
184 #endif /* NFE_DEBUG */
186 struct nfe_dma_ctx {
187 int nsegs;
188 bus_dma_segment_t *segs;
191 static const struct nfe_dev {
192 uint16_t vid;
193 uint16_t did;
194 const char *desc;
195 } nfe_devices[] = {
196 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
197 "NVIDIA nForce Fast Ethernet" },
199 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
200 "NVIDIA nForce2 Fast Ethernet" },
202 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
203 "NVIDIA nForce3 Gigabit Ethernet" },
205 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb
206 chipset, and possibly also the 400R; it might be both nForce2- and
207 nForce3-based boards can use the same MCPs (= southbridges) */
208 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2,
209 "NVIDIA nForce3 Gigabit Ethernet" },
211 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3,
212 "NVIDIA nForce3 Gigabit Ethernet" },
214 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
215 "NVIDIA nForce3 Gigabit Ethernet" },
217 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5,
218 "NVIDIA nForce3 Gigabit Ethernet" },
220 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1,
221 "NVIDIA CK804 Gigabit Ethernet" },
223 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2,
224 "NVIDIA CK804 Gigabit Ethernet" },
226 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
227 "NVIDIA MCP04 Gigabit Ethernet" },
229 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
230 "NVIDIA MCP04 Gigabit Ethernet" },
232 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1,
233 "NVIDIA MCP51 Gigabit Ethernet" },
235 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2,
236 "NVIDIA MCP51 Gigabit Ethernet" },
238 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
239 "NVIDIA MCP55 Gigabit Ethernet" },
241 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
242 "NVIDIA MCP55 Gigabit Ethernet" },
244 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
245 "NVIDIA MCP61 Gigabit Ethernet" },
247 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
248 "NVIDIA MCP61 Gigabit Ethernet" },
250 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
251 "NVIDIA MCP61 Gigabit Ethernet" },
253 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
254 "NVIDIA MCP61 Gigabit Ethernet" },
256 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
257 "NVIDIA MCP65 Gigabit Ethernet" },
259 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
260 "NVIDIA MCP65 Gigabit Ethernet" },
262 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
263 "NVIDIA MCP65 Gigabit Ethernet" },
265 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
266 "NVIDIA MCP65 Gigabit Ethernet" },
268 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
269 "NVIDIA MCP67 Gigabit Ethernet" },
271 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
272 "NVIDIA MCP67 Gigabit Ethernet" },
274 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
275 "NVIDIA MCP67 Gigabit Ethernet" },
277 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
278 "NVIDIA MCP67 Gigabit Ethernet" }
281 static device_method_t nfe_methods[] = {
282 /* Device interface */
283 DEVMETHOD(device_probe, nfe_probe),
284 DEVMETHOD(device_attach, nfe_attach),
285 DEVMETHOD(device_detach, nfe_detach),
286 DEVMETHOD(device_suspend, nfe_suspend),
287 DEVMETHOD(device_resume, nfe_resume),
288 DEVMETHOD(device_shutdown, nfe_shutdown),
290 /* Bus interface */
291 DEVMETHOD(bus_print_child, bus_generic_print_child),
292 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
294 /* MII interface */
295 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
296 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
297 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
299 { 0, 0 }
302 static driver_t nfe_driver = {
303 "nfe",
304 nfe_methods,
305 sizeof(struct nfe_softc)
308 static devclass_t nfe_devclass;
310 DECLARE_DUMMY_MODULE(if_nfe);
311 MODULE_DEPEND(if_nfe, miibus, 1, 1, 1);
312 DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, 0, 0);
313 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
315 static int
316 nfe_probe(device_t dev)
318 const struct nfe_dev *n;
319 uint16_t vid, did;
321 vid = pci_get_vendor(dev);
322 did = pci_get_device(dev);
323 for (n = nfe_devices; n->desc != NULL; ++n) {
324 if (vid == n->vid && did == n->did) {
325 struct nfe_softc *sc = device_get_softc(dev);
327 switch (did) {
328 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
329 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
330 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
331 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
332 sc->sc_flags = NFE_JUMBO_SUP |
333 NFE_HW_CSUM;
334 break;
335 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
336 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
337 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
338 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
339 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
340 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
341 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
342 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
343 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
344 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
345 sc->sc_flags = NFE_40BIT_ADDR;
346 break;
347 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
348 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
349 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
350 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
351 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
352 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
353 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
354 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
355 sc->sc_flags = NFE_JUMBO_SUP |
356 NFE_40BIT_ADDR |
357 NFE_HW_CSUM;
358 break;
359 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
360 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
361 sc->sc_flags = NFE_JUMBO_SUP |
362 NFE_40BIT_ADDR |
363 NFE_HW_CSUM |
364 NFE_HW_VLAN;
365 break;
368 device_set_desc(dev, n->desc);
369 device_set_async_attach(dev, TRUE);
370 return 0;
373 return ENXIO;
376 static int
377 nfe_attach(device_t dev)
379 struct nfe_softc *sc = device_get_softc(dev);
380 struct ifnet *ifp = &sc->arpcom.ac_if;
381 uint8_t eaddr[ETHER_ADDR_LEN];
382 int error;
384 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
385 lwkt_serialize_init(&sc->sc_jbuf_serializer);
388 * Initialize sysctl variables
390 sc->sc_imtime = nfe_imtime;
391 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc);
392 sc->sc_rx_ring_count = nfe_rx_ring_count;
393 sc->sc_debug = nfe_debug;
395 sc->sc_mem_rid = PCIR_BAR(0);
397 #ifndef BURN_BRIDGES
398 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
399 uint32_t mem, irq;
401 mem = pci_read_config(dev, sc->sc_mem_rid, 4);
402 irq = pci_read_config(dev, PCIR_INTLINE, 4);
404 device_printf(dev, "chip is in D%d power mode "
405 "-- setting to D0\n", pci_get_powerstate(dev));
407 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
409 pci_write_config(dev, sc->sc_mem_rid, mem, 4);
410 pci_write_config(dev, PCIR_INTLINE, irq, 4);
412 #endif /* !BURN_BRIDGE */
414 /* Enable bus mastering */
415 pci_enable_busmaster(dev);
417 /* Allocate IO memory */
418 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
419 &sc->sc_mem_rid, RF_ACTIVE);
420 if (sc->sc_mem_res == NULL) {
421 device_printf(dev, "cound not allocate io memory\n");
422 return ENXIO;
424 sc->sc_memh = rman_get_bushandle(sc->sc_mem_res);
425 sc->sc_memt = rman_get_bustag(sc->sc_mem_res);
427 /* Allocate IRQ */
428 sc->sc_irq_rid = 0;
429 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
430 &sc->sc_irq_rid,
431 RF_SHAREABLE | RF_ACTIVE);
432 if (sc->sc_irq_res == NULL) {
433 device_printf(dev, "could not allocate irq\n");
434 error = ENXIO;
435 goto fail;
438 nfe_get_macaddr(sc, eaddr);
441 * Allocate Tx and Rx rings.
443 error = nfe_alloc_tx_ring(sc, &sc->txq);
444 if (error) {
445 device_printf(dev, "could not allocate Tx ring\n");
446 goto fail;
449 error = nfe_alloc_rx_ring(sc, &sc->rxq);
450 if (error) {
451 device_printf(dev, "could not allocate Rx ring\n");
452 goto fail;
456 * Create sysctl tree
458 sysctl_ctx_init(&sc->sc_sysctl_ctx);
459 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx,
460 SYSCTL_STATIC_CHILDREN(_hw),
461 OID_AUTO,
462 device_get_nameunit(dev),
463 CTLFLAG_RD, 0, "");
464 if (sc->sc_sysctl_tree == NULL) {
465 device_printf(dev, "can't add sysctl node\n");
466 error = ENXIO;
467 goto fail;
469 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
470 SYSCTL_CHILDREN(sc->sc_sysctl_tree),
471 OID_AUTO, "imtimer", CTLTYPE_INT | CTLFLAG_RW,
472 sc, 0, nfe_sysctl_imtime, "I",
473 "Interrupt moderation time (usec). "
474 "-1 to disable interrupt moderation.");
475 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
476 "rx_ring_count", CTLFLAG_RD, &sc->sc_rx_ring_count,
477 0, "RX ring count");
478 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
479 "debug", CTLFLAG_RW, &sc->sc_debug,
480 0, "control debugging printfs");
482 error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd,
483 nfe_ifmedia_sts);
484 if (error) {
485 device_printf(dev, "MII without any phy\n");
486 goto fail;
489 ifp->if_softc = sc;
490 ifp->if_mtu = ETHERMTU;
491 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
492 ifp->if_ioctl = nfe_ioctl;
493 ifp->if_start = nfe_start;
494 #ifdef DEVICE_POLLING
495 ifp->if_poll = nfe_poll;
496 #endif
497 ifp->if_watchdog = nfe_watchdog;
498 ifp->if_init = nfe_init;
499 ifq_set_maxlen(&ifp->if_snd, NFE_IFQ_MAXLEN);
500 ifq_set_ready(&ifp->if_snd);
502 ifp->if_capabilities = IFCAP_VLAN_MTU;
504 if (sc->sc_flags & NFE_HW_VLAN)
505 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
507 #ifdef NFE_CSUM
508 if (sc->sc_flags & NFE_HW_CSUM) {
509 ifp->if_capabilities |= IFCAP_HWCSUM;
510 ifp->if_hwassist = NFE_CSUM_FEATURES;
512 #else
513 sc->sc_flags &= ~NFE_HW_CSUM;
514 #endif
515 ifp->if_capenable = ifp->if_capabilities;
517 callout_init(&sc->sc_tick_ch);
519 ether_ifattach(ifp, eaddr, NULL);
521 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc,
522 &sc->sc_ih, ifp->if_serializer);
523 if (error) {
524 device_printf(dev, "could not setup intr\n");
525 ether_ifdetach(ifp);
526 goto fail;
529 return 0;
530 fail:
531 nfe_detach(dev);
532 return error;
535 static int
536 nfe_detach(device_t dev)
538 struct nfe_softc *sc = device_get_softc(dev);
540 if (device_is_attached(dev)) {
541 struct ifnet *ifp = &sc->arpcom.ac_if;
543 lwkt_serialize_enter(ifp->if_serializer);
544 nfe_stop(sc);
545 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih);
546 lwkt_serialize_exit(ifp->if_serializer);
548 ether_ifdetach(ifp);
551 if (sc->sc_miibus != NULL)
552 device_delete_child(dev, sc->sc_miibus);
553 bus_generic_detach(dev);
555 if (sc->sc_sysctl_tree != NULL)
556 sysctl_ctx_free(&sc->sc_sysctl_ctx);
558 if (sc->sc_irq_res != NULL) {
559 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
560 sc->sc_irq_res);
563 if (sc->sc_mem_res != NULL) {
564 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
565 sc->sc_mem_res);
568 nfe_free_tx_ring(sc, &sc->txq);
569 nfe_free_rx_ring(sc, &sc->rxq);
571 return 0;
574 static void
575 nfe_shutdown(device_t dev)
577 struct nfe_softc *sc = device_get_softc(dev);
578 struct ifnet *ifp = &sc->arpcom.ac_if;
580 lwkt_serialize_enter(ifp->if_serializer);
581 nfe_stop(sc);
582 lwkt_serialize_exit(ifp->if_serializer);
585 static int
586 nfe_suspend(device_t dev)
588 struct nfe_softc *sc = device_get_softc(dev);
589 struct ifnet *ifp = &sc->arpcom.ac_if;
591 lwkt_serialize_enter(ifp->if_serializer);
592 nfe_stop(sc);
593 lwkt_serialize_exit(ifp->if_serializer);
595 return 0;
598 static int
599 nfe_resume(device_t dev)
601 struct nfe_softc *sc = device_get_softc(dev);
602 struct ifnet *ifp = &sc->arpcom.ac_if;
604 lwkt_serialize_enter(ifp->if_serializer);
605 if (ifp->if_flags & IFF_UP)
606 nfe_init(sc);
607 lwkt_serialize_exit(ifp->if_serializer);
609 return 0;
612 static void
613 nfe_miibus_statchg(device_t dev)
615 struct nfe_softc *sc = device_get_softc(dev);
616 struct mii_data *mii = device_get_softc(sc->sc_miibus);
617 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
619 phy = NFE_READ(sc, NFE_PHY_IFACE);
620 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
622 seed = NFE_READ(sc, NFE_RNDSEED);
623 seed &= ~NFE_SEED_MASK;
625 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
626 phy |= NFE_PHY_HDX; /* half-duplex */
627 misc |= NFE_MISC1_HDX;
630 switch (IFM_SUBTYPE(mii->mii_media_active)) {
631 case IFM_1000_T: /* full-duplex only */
632 link |= NFE_MEDIA_1000T;
633 seed |= NFE_SEED_1000T;
634 phy |= NFE_PHY_1000T;
635 break;
636 case IFM_100_TX:
637 link |= NFE_MEDIA_100TX;
638 seed |= NFE_SEED_100TX;
639 phy |= NFE_PHY_100TX;
640 break;
641 case IFM_10_T:
642 link |= NFE_MEDIA_10T;
643 seed |= NFE_SEED_10T;
644 break;
647 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
649 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
650 NFE_WRITE(sc, NFE_MISC1, misc);
651 NFE_WRITE(sc, NFE_LINKSPEED, link);
654 static int
655 nfe_miibus_readreg(device_t dev, int phy, int reg)
657 struct nfe_softc *sc = device_get_softc(dev);
658 uint32_t val;
659 int ntries;
661 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
663 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
664 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
665 DELAY(100);
668 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
670 for (ntries = 0; ntries < 1000; ntries++) {
671 DELAY(100);
672 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
673 break;
675 if (ntries == 1000) {
676 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", "");
677 return 0;
680 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
681 DPRINTFN(sc, 2, "could not read PHY %s\n", "");
682 return 0;
685 val = NFE_READ(sc, NFE_PHY_DATA);
686 if (val != 0xffffffff && val != 0)
687 sc->mii_phyaddr = phy;
689 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
691 return val;
694 static void
695 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
697 struct nfe_softc *sc = device_get_softc(dev);
698 uint32_t ctl;
699 int ntries;
701 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
703 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
704 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
705 DELAY(100);
708 NFE_WRITE(sc, NFE_PHY_DATA, val);
709 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
710 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
712 for (ntries = 0; ntries < 1000; ntries++) {
713 DELAY(100);
714 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
715 break;
718 #ifdef NFE_DEBUG
719 if (ntries == 1000)
720 DPRINTFN(sc, 2, "could not write to PHY %s\n", "");
721 #endif
724 #ifdef DEVICE_POLLING
726 static void
727 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
729 struct nfe_softc *sc = ifp->if_softc;
731 ASSERT_SERIALIZED(ifp->if_serializer);
733 switch(cmd) {
734 case POLL_REGISTER:
735 /* Disable interrupts */
736 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
737 break;
738 case POLL_DEREGISTER:
739 /* enable interrupts */
740 NFE_WRITE(sc, NFE_IRQ_MASK, sc->sc_irq_enable);
741 break;
742 case POLL_AND_CHECK_STATUS:
743 /* fall through */
744 case POLL_ONLY:
745 if (ifp->if_flags & IFF_RUNNING) {
746 nfe_rxeof(sc);
747 nfe_txeof(sc);
749 break;
753 #endif
755 static void
756 nfe_intr(void *arg)
758 struct nfe_softc *sc = arg;
759 struct ifnet *ifp = &sc->arpcom.ac_if;
760 uint32_t r;
762 r = NFE_READ(sc, NFE_IRQ_STATUS);
763 if (r == 0)
764 return; /* not for us */
765 NFE_WRITE(sc, NFE_IRQ_STATUS, r);
767 DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r);
769 if (r & NFE_IRQ_LINK) {
770 NFE_READ(sc, NFE_PHY_STATUS);
771 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
772 DPRINTF(sc, "link state changed %s\n", "");
775 if (ifp->if_flags & IFF_RUNNING) {
776 /* check Rx ring */
777 nfe_rxeof(sc);
779 /* check Tx ring */
780 nfe_txeof(sc);
784 static int
785 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
787 struct nfe_softc *sc = ifp->if_softc;
788 struct ifreq *ifr = (struct ifreq *)data;
789 struct mii_data *mii;
790 int error = 0, mask;
792 switch (cmd) {
793 case SIOCSIFMTU:
794 if (((sc->sc_flags & NFE_JUMBO_SUP) &&
795 ifr->ifr_mtu > NFE_JUMBO_MTU) ||
796 ((sc->sc_flags & NFE_JUMBO_SUP) == 0 &&
797 ifr->ifr_mtu > ETHERMTU)) {
798 return EINVAL;
799 } else if (ifp->if_mtu != ifr->ifr_mtu) {
800 ifp->if_mtu = ifr->ifr_mtu;
801 nfe_init(sc);
803 break;
804 case SIOCSIFFLAGS:
805 if (ifp->if_flags & IFF_UP) {
807 * If only the PROMISC or ALLMULTI flag changes, then
808 * don't do a full re-init of the chip, just update
809 * the Rx filter.
811 if ((ifp->if_flags & IFF_RUNNING) &&
812 ((ifp->if_flags ^ sc->sc_if_flags) &
813 (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
814 nfe_setmulti(sc);
815 } else {
816 if (!(ifp->if_flags & IFF_RUNNING))
817 nfe_init(sc);
819 } else {
820 if (ifp->if_flags & IFF_RUNNING)
821 nfe_stop(sc);
823 sc->sc_if_flags = ifp->if_flags;
824 break;
825 case SIOCADDMULTI:
826 case SIOCDELMULTI:
827 if (ifp->if_flags & IFF_RUNNING)
828 nfe_setmulti(sc);
829 break;
830 case SIOCSIFMEDIA:
831 case SIOCGIFMEDIA:
832 mii = device_get_softc(sc->sc_miibus);
833 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
834 break;
835 case SIOCSIFCAP:
836 mask = (ifr->ifr_reqcap ^ ifp->if_capenable) & IFCAP_HWCSUM;
837 if (mask && (ifp->if_capabilities & IFCAP_HWCSUM)) {
838 ifp->if_capenable ^= mask;
839 if (IFCAP_TXCSUM & ifp->if_capenable)
840 ifp->if_hwassist = NFE_CSUM_FEATURES;
841 else
842 ifp->if_hwassist = 0;
844 if (ifp->if_flags & IFF_RUNNING)
845 nfe_init(sc);
847 break;
848 default:
849 error = ether_ioctl(ifp, cmd, data);
850 break;
852 return error;
855 static void
856 nfe_rxeof(struct nfe_softc *sc)
858 struct ifnet *ifp = &sc->arpcom.ac_if;
859 struct nfe_rx_ring *ring = &sc->rxq;
860 int reap;
862 reap = 0;
863 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD);
865 for (;;) {
866 struct nfe_rx_data *data = &ring->data[ring->cur];
867 struct mbuf *m;
868 uint16_t flags;
869 int len, error;
871 if (sc->sc_flags & NFE_40BIT_ADDR) {
872 struct nfe_desc64 *desc64 = &ring->desc64[ring->cur];
874 flags = le16toh(desc64->flags);
875 len = le16toh(desc64->length) & 0x3fff;
876 } else {
877 struct nfe_desc32 *desc32 = &ring->desc32[ring->cur];
879 flags = le16toh(desc32->flags);
880 len = le16toh(desc32->length) & 0x3fff;
883 if (flags & NFE_RX_READY)
884 break;
886 reap = 1;
888 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
889 if (!(flags & NFE_RX_VALID_V1))
890 goto skip;
892 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
893 flags &= ~NFE_RX_ERROR;
894 len--; /* fix buffer length */
896 } else {
897 if (!(flags & NFE_RX_VALID_V2))
898 goto skip;
900 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
901 flags &= ~NFE_RX_ERROR;
902 len--; /* fix buffer length */
906 if (flags & NFE_RX_ERROR) {
907 ifp->if_ierrors++;
908 goto skip;
911 m = data->m;
913 if (sc->sc_flags & NFE_USE_JUMBO)
914 error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0);
915 else
916 error = nfe_newbuf_std(sc, ring, ring->cur, 0);
917 if (error) {
918 ifp->if_ierrors++;
919 goto skip;
922 /* finalize mbuf */
923 m->m_pkthdr.len = m->m_len = len;
924 m->m_pkthdr.rcvif = ifp;
926 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
927 (flags & NFE_RX_CSUMOK)) {
928 if (flags & NFE_RX_IP_CSUMOK_V2) {
929 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED |
930 CSUM_IP_VALID;
933 if (flags &
934 (NFE_RX_UDP_CSUMOK_V2 | NFE_RX_TCP_CSUMOK_V2)) {
935 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
936 CSUM_PSEUDO_HDR |
937 CSUM_FRAG_NOT_CHECKED;
938 m->m_pkthdr.csum_data = 0xffff;
942 ifp->if_ipackets++;
943 ifp->if_input(ifp, m);
944 skip:
945 nfe_set_ready_rxdesc(sc, ring, ring->cur);
946 sc->rxq.cur = (sc->rxq.cur + 1) % sc->sc_rx_ring_count;
949 if (reap)
950 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
953 static void
954 nfe_txeof(struct nfe_softc *sc)
956 struct ifnet *ifp = &sc->arpcom.ac_if;
957 struct nfe_tx_ring *ring = &sc->txq;
958 struct nfe_tx_data *data = NULL;
960 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD);
961 while (ring->next != ring->cur) {
962 uint16_t flags;
964 if (sc->sc_flags & NFE_40BIT_ADDR)
965 flags = le16toh(ring->desc64[ring->next].flags);
966 else
967 flags = le16toh(ring->desc32[ring->next].flags);
969 if (flags & NFE_TX_VALID)
970 break;
972 data = &ring->data[ring->next];
974 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
975 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
976 goto skip;
978 if ((flags & NFE_TX_ERROR_V1) != 0) {
979 if_printf(ifp, "tx v1 error 0x%4b\n", flags,
980 NFE_V1_TXERR);
981 ifp->if_oerrors++;
982 } else {
983 ifp->if_opackets++;
985 } else {
986 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
987 goto skip;
989 if ((flags & NFE_TX_ERROR_V2) != 0) {
990 if_printf(ifp, "tx v2 error 0x%4b\n", flags,
991 NFE_V2_TXERR);
992 ifp->if_oerrors++;
993 } else {
994 ifp->if_opackets++;
998 if (data->m == NULL) { /* should not get there */
999 if_printf(ifp,
1000 "last fragment bit w/o associated mbuf!\n");
1001 goto skip;
1004 /* last fragment of the mbuf chain transmitted */
1005 bus_dmamap_sync(ring->data_tag, data->map,
1006 BUS_DMASYNC_POSTWRITE);
1007 bus_dmamap_unload(ring->data_tag, data->map);
1008 m_freem(data->m);
1009 data->m = NULL;
1011 ifp->if_timer = 0;
1012 skip:
1013 ring->queued--;
1014 KKASSERT(ring->queued >= 0);
1015 ring->next = (ring->next + 1) % NFE_TX_RING_COUNT;
1018 if (data != NULL) { /* at least one slot freed */
1019 ifp->if_flags &= ~IFF_OACTIVE;
1020 ifp->if_start(ifp);
1024 static int
1025 nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0)
1027 struct nfe_dma_ctx ctx;
1028 bus_dma_segment_t segs[NFE_MAX_SCATTER];
1029 struct nfe_tx_data *data, *data_map;
1030 bus_dmamap_t map;
1031 struct nfe_desc64 *desc64 = NULL;
1032 struct nfe_desc32 *desc32 = NULL;
1033 uint16_t flags = 0;
1034 uint32_t vtag = 0;
1035 int error, i, j;
1037 data = &ring->data[ring->cur];
1038 map = data->map;
1039 data_map = data; /* Remember who owns the DMA map */
1041 ctx.nsegs = NFE_MAX_SCATTER;
1042 ctx.segs = segs;
1043 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0,
1044 nfe_buf_dma_addr, &ctx, BUS_DMA_NOWAIT);
1045 if (error && error != EFBIG) {
1046 if_printf(&sc->arpcom.ac_if, "could not map TX mbuf\n");
1047 goto back;
1050 if (error) { /* error == EFBIG */
1051 struct mbuf *m_new;
1053 m_new = m_defrag(m0, MB_DONTWAIT);
1054 if (m_new == NULL) {
1055 if_printf(&sc->arpcom.ac_if,
1056 "could not defrag TX mbuf\n");
1057 error = ENOBUFS;
1058 goto back;
1059 } else {
1060 m0 = m_new;
1063 ctx.nsegs = NFE_MAX_SCATTER;
1064 ctx.segs = segs;
1065 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0,
1066 nfe_buf_dma_addr, &ctx,
1067 BUS_DMA_NOWAIT);
1068 if (error) {
1069 if_printf(&sc->arpcom.ac_if,
1070 "could not map defraged TX mbuf\n");
1071 goto back;
1075 error = 0;
1077 if (ring->queued + ctx.nsegs >= NFE_TX_RING_COUNT - 1) {
1078 bus_dmamap_unload(ring->data_tag, map);
1079 error = ENOBUFS;
1080 goto back;
1083 /* setup h/w VLAN tagging */
1084 if (m0->m_flags & M_VLANTAG)
1085 vtag = m0->m_pkthdr.ether_vlantag;
1087 if (sc->arpcom.ac_if.if_capenable & IFCAP_TXCSUM) {
1088 if (m0->m_pkthdr.csum_flags & CSUM_IP)
1089 flags |= NFE_TX_IP_CSUM;
1090 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
1091 flags |= NFE_TX_TCP_CSUM;
1095 * XXX urm. somebody is unaware of how hardware works. You
1096 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in
1097 * the ring until the entire chain is actually *VALID*. Otherwise
1098 * the hardware may encounter a partially initialized chain that
1099 * is marked as being ready to go when it in fact is not ready to
1100 * go.
1103 for (i = 0; i < ctx.nsegs; i++) {
1104 j = (ring->cur + i) % NFE_TX_RING_COUNT;
1105 data = &ring->data[j];
1107 if (sc->sc_flags & NFE_40BIT_ADDR) {
1108 desc64 = &ring->desc64[j];
1109 #if defined(__LP64__)
1110 desc64->physaddr[0] =
1111 htole32(segs[i].ds_addr >> 32);
1112 #endif
1113 desc64->physaddr[1] =
1114 htole32(segs[i].ds_addr & 0xffffffff);
1115 desc64->length = htole16(segs[i].ds_len - 1);
1116 desc64->vtag = htole32(vtag);
1117 desc64->flags = htole16(flags);
1118 } else {
1119 desc32 = &ring->desc32[j];
1120 desc32->physaddr = htole32(segs[i].ds_addr);
1121 desc32->length = htole16(segs[i].ds_len - 1);
1122 desc32->flags = htole16(flags);
1125 /* csum flags and vtag belong to the first fragment only */
1126 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
1127 vtag = 0;
1129 ring->queued++;
1130 KKASSERT(ring->queued <= NFE_TX_RING_COUNT);
1133 /* the whole mbuf chain has been DMA mapped, fix last descriptor */
1134 if (sc->sc_flags & NFE_40BIT_ADDR) {
1135 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
1136 } else {
1137 if (sc->sc_flags & NFE_JUMBO_SUP)
1138 flags = NFE_TX_LASTFRAG_V2;
1139 else
1140 flags = NFE_TX_LASTFRAG_V1;
1141 desc32->flags |= htole16(flags);
1145 * Set NFE_TX_VALID backwards so the hardware doesn't see the
1146 * whole mess until the first descriptor in the map is flagged.
1148 for (i = ctx.nsegs - 1; i >= 0; --i) {
1149 j = (ring->cur + i) % NFE_TX_RING_COUNT;
1150 if (sc->sc_flags & NFE_40BIT_ADDR) {
1151 desc64 = &ring->desc64[j];
1152 desc64->flags |= htole16(NFE_TX_VALID);
1153 } else {
1154 desc32 = &ring->desc32[j];
1155 desc32->flags |= htole16(NFE_TX_VALID);
1158 ring->cur = (ring->cur + ctx.nsegs) % NFE_TX_RING_COUNT;
1160 /* Exchange DMA map */
1161 data_map->map = data->map;
1162 data->map = map;
1163 data->m = m0;
1165 bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE);
1166 back:
1167 if (error)
1168 m_freem(m0);
1169 return error;
1172 static void
1173 nfe_start(struct ifnet *ifp)
1175 struct nfe_softc *sc = ifp->if_softc;
1176 struct nfe_tx_ring *ring = &sc->txq;
1177 int count = 0;
1178 struct mbuf *m0;
1180 if (ifp->if_flags & IFF_OACTIVE)
1181 return;
1183 if (ifq_is_empty(&ifp->if_snd))
1184 return;
1186 for (;;) {
1187 m0 = ifq_dequeue(&ifp->if_snd, NULL);
1188 if (m0 == NULL)
1189 break;
1191 BPF_MTAP(ifp, m0);
1193 if (nfe_encap(sc, ring, m0) != 0) {
1194 ifp->if_flags |= IFF_OACTIVE;
1195 break;
1197 ++count;
1200 * NOTE:
1201 * `m0' may be freed in nfe_encap(), so
1202 * it should not be touched any more.
1205 if (count == 0) /* nothing sent */
1206 return;
1208 /* Sync TX descriptor ring */
1209 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1211 /* Kick Tx */
1212 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1215 * Set a timeout in case the chip goes out to lunch.
1217 ifp->if_timer = 5;
1220 static void
1221 nfe_watchdog(struct ifnet *ifp)
1223 struct nfe_softc *sc = ifp->if_softc;
1225 if (ifp->if_flags & IFF_RUNNING) {
1226 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n");
1227 nfe_txeof(sc);
1228 return;
1231 if_printf(ifp, "watchdog timeout\n");
1233 nfe_init(ifp->if_softc);
1235 ifp->if_oerrors++;
1238 static void
1239 nfe_init(void *xsc)
1241 struct nfe_softc *sc = xsc;
1242 struct ifnet *ifp = &sc->arpcom.ac_if;
1243 uint32_t tmp;
1244 int error;
1246 nfe_stop(sc);
1249 * NOTE:
1250 * Switching between jumbo frames and normal frames should
1251 * be done _after_ nfe_stop() but _before_ nfe_init_rx_ring().
1253 if (ifp->if_mtu > ETHERMTU) {
1254 sc->sc_flags |= NFE_USE_JUMBO;
1255 sc->rxq.bufsz = NFE_JBYTES;
1256 if (bootverbose)
1257 if_printf(ifp, "use jumbo frames\n");
1258 } else {
1259 sc->sc_flags &= ~NFE_USE_JUMBO;
1260 sc->rxq.bufsz = MCLBYTES;
1261 if (bootverbose)
1262 if_printf(ifp, "use non-jumbo frames\n");
1265 error = nfe_init_tx_ring(sc, &sc->txq);
1266 if (error) {
1267 nfe_stop(sc);
1268 return;
1271 error = nfe_init_rx_ring(sc, &sc->rxq);
1272 if (error) {
1273 nfe_stop(sc);
1274 return;
1277 NFE_WRITE(sc, NFE_TX_UNK, 0);
1278 NFE_WRITE(sc, NFE_STATUS, 0);
1280 sc->rxtxctl = NFE_RXTX_BIT2;
1281 if (sc->sc_flags & NFE_40BIT_ADDR)
1282 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
1283 else if (sc->sc_flags & NFE_JUMBO_SUP)
1284 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
1286 if (ifp->if_capenable & IFCAP_RXCSUM)
1287 sc->rxtxctl |= NFE_RXTX_RXCSUM;
1290 * Although the adapter is capable of stripping VLAN tags from received
1291 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1292 * purpose. This will be done in software by our network stack.
1294 if (sc->sc_flags & NFE_HW_VLAN)
1295 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT;
1297 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1298 DELAY(10);
1299 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1301 if (sc->sc_flags & NFE_HW_VLAN)
1302 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1304 NFE_WRITE(sc, NFE_SETUP_R6, 0);
1306 /* set MAC address */
1307 nfe_set_macaddr(sc, sc->arpcom.ac_enaddr);
1309 /* tell MAC where rings are in memory */
1310 #ifdef __LP64__
1311 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1312 #endif
1313 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1314 #ifdef __LP64__
1315 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1316 #endif
1317 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1319 NFE_WRITE(sc, NFE_RING_SIZE,
1320 (sc->sc_rx_ring_count - 1) << 16 |
1321 (NFE_TX_RING_COUNT - 1));
1323 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1325 /* force MAC to wakeup */
1326 tmp = NFE_READ(sc, NFE_PWR_STATE);
1327 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1328 DELAY(10);
1329 tmp = NFE_READ(sc, NFE_PWR_STATE);
1330 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1333 * NFE_IMTIMER generates a periodic interrupt via NFE_IRQ_TIMER.
1334 * It is unclear how wide the timer is. Base programming does
1335 * not seem to effect NFE_IRQ_TX_DONE or NFE_IRQ_RX_DONE so
1336 * we don't get any interrupt moderation. TX moderation is
1337 * possible by using the timer interrupt instead of TX_DONE.
1339 * It is unclear whether there are other bits that can be
1340 * set to make the NFE device actually do interrupt moderation
1341 * on the RX side.
1343 * For now set a 128uS interval as a placemark, but don't use
1344 * the timer.
1346 if (sc->sc_imtime < 0)
1347 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME_DEFAULT);
1348 else
1349 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME(sc->sc_imtime));
1351 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1352 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1353 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1355 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1356 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1358 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1359 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
1361 sc->rxtxctl &= ~NFE_RXTX_BIT2;
1362 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1363 DELAY(10);
1364 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1366 /* set Rx filter */
1367 nfe_setmulti(sc);
1369 nfe_ifmedia_upd(ifp);
1371 /* enable Rx */
1372 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1374 /* enable Tx */
1375 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1377 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1379 #ifdef DEVICE_POLLING
1380 if ((ifp->if_flags & IFF_POLLING) == 0)
1381 #endif
1382 /* enable interrupts */
1383 NFE_WRITE(sc, NFE_IRQ_MASK, sc->sc_irq_enable);
1385 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
1387 ifp->if_flags |= IFF_RUNNING;
1388 ifp->if_flags &= ~IFF_OACTIVE;
1391 * If we had stuff in the tx ring before its all cleaned out now
1392 * so we are not going to get an interrupt, jump-start any pending
1393 * output.
1395 ifp->if_start(ifp);
1398 static void
1399 nfe_stop(struct nfe_softc *sc)
1401 struct ifnet *ifp = &sc->arpcom.ac_if;
1403 callout_stop(&sc->sc_tick_ch);
1405 ifp->if_timer = 0;
1406 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1409 * Are NFE_TX_CTL and NFE_RX_CTL polled by the chip microcontroller
1410 * or do they directly reset/terminate the DMA hardware? Nobody
1411 * knows.
1413 * Add two delays:
1415 * (1) Delay before zeroing out NFE_TX_CTL. This seems to help a
1416 * watchdog timeout that occurs after a stop/init sequence. I am
1417 * theorizing that a TX KICK occuring just prior to a reinit (e.g.
1418 * due to dhclient) is queueing an interrupt to the microcontroller
1419 * which gets delayed until after we clear the control registers
1420 * down below, resulting in mass confusion. TX KICK is clearly
1421 * hardware aided whereas the other bits in the control register
1422 * are more likely to be polled by the microcontroller.
1424 * (2) Delay after zeroing out TX and RX CTL registers, under the
1425 * assumption that primary DMA is initiated and terminated by
1426 * the microcontroller and not hardware (and anyway, one can hardly
1427 * expect the DMA engine to just instantly stop!). We don't want
1428 * to rip the rings out from under it before it has had a chance to
1429 * actually stop!
1431 DELAY(1000);
1433 /* Abort Tx */
1434 NFE_WRITE(sc, NFE_TX_CTL, 0);
1436 /* Disable Rx */
1437 NFE_WRITE(sc, NFE_RX_CTL, 0);
1439 /* Disable interrupts */
1440 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1442 DELAY(1000);
1444 /* Reset Tx and Rx rings */
1445 nfe_reset_tx_ring(sc, &sc->txq);
1446 nfe_reset_rx_ring(sc, &sc->rxq);
1449 static int
1450 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1452 int i, j, error, descsize;
1453 void **desc;
1455 if (sc->sc_flags & NFE_40BIT_ADDR) {
1456 desc = (void **)&ring->desc64;
1457 descsize = sizeof(struct nfe_desc64);
1458 } else {
1459 desc = (void **)&ring->desc32;
1460 descsize = sizeof(struct nfe_desc32);
1463 ring->jbuf = kmalloc(sizeof(struct nfe_jbuf) * NFE_JPOOL_COUNT,
1464 M_DEVBUF, M_WAITOK | M_ZERO);
1465 ring->data = kmalloc(sizeof(struct nfe_rx_data) * sc->sc_rx_ring_count,
1466 M_DEVBUF, M_WAITOK | M_ZERO);
1468 ring->bufsz = MCLBYTES;
1469 ring->cur = ring->next = 0;
1471 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1472 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1473 NULL, NULL,
1474 sc->sc_rx_ring_count * descsize, 1,
1475 sc->sc_rx_ring_count * descsize,
1476 0, &ring->tag);
1477 if (error) {
1478 if_printf(&sc->arpcom.ac_if,
1479 "could not create desc RX DMA tag\n");
1480 return error;
1483 error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO,
1484 &ring->map);
1485 if (error) {
1486 if_printf(&sc->arpcom.ac_if,
1487 "could not allocate RX desc DMA memory\n");
1488 bus_dma_tag_destroy(ring->tag);
1489 ring->tag = NULL;
1490 return error;
1493 error = bus_dmamap_load(ring->tag, ring->map, *desc,
1494 sc->sc_rx_ring_count * descsize,
1495 nfe_ring_dma_addr, &ring->physaddr,
1496 BUS_DMA_WAITOK);
1497 if (error) {
1498 if_printf(&sc->arpcom.ac_if,
1499 "could not load RX desc DMA map\n");
1500 bus_dmamem_free(ring->tag, *desc, ring->map);
1501 bus_dma_tag_destroy(ring->tag);
1502 ring->tag = NULL;
1503 return error;
1506 if (sc->sc_flags & NFE_JUMBO_SUP) {
1507 error = nfe_jpool_alloc(sc, ring);
1508 if (error) {
1509 if_printf(&sc->arpcom.ac_if,
1510 "could not allocate jumbo frames\n");
1511 return error;
1515 error = bus_dma_tag_create(NULL, 1, 0,
1516 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1517 NULL, NULL,
1518 MCLBYTES, 1, MCLBYTES,
1519 0, &ring->data_tag);
1520 if (error) {
1521 if_printf(&sc->arpcom.ac_if,
1522 "could not create RX mbuf DMA tag\n");
1523 return error;
1526 /* Create a spare RX mbuf DMA map */
1527 error = bus_dmamap_create(ring->data_tag, 0, &ring->data_tmpmap);
1528 if (error) {
1529 if_printf(&sc->arpcom.ac_if,
1530 "could not create spare RX mbuf DMA map\n");
1531 bus_dma_tag_destroy(ring->data_tag);
1532 ring->data_tag = NULL;
1533 return error;
1536 for (i = 0; i < sc->sc_rx_ring_count; i++) {
1537 error = bus_dmamap_create(ring->data_tag, 0,
1538 &ring->data[i].map);
1539 if (error) {
1540 if_printf(&sc->arpcom.ac_if,
1541 "could not create %dth RX mbuf DMA mapn", i);
1542 goto fail;
1545 return 0;
1546 fail:
1547 for (j = 0; j < i; ++j)
1548 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
1549 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1550 bus_dma_tag_destroy(ring->data_tag);
1551 ring->data_tag = NULL;
1552 return error;
1555 static void
1556 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1558 int i;
1560 for (i = 0; i < sc->sc_rx_ring_count; i++) {
1561 struct nfe_rx_data *data = &ring->data[i];
1563 if (data->m != NULL) {
1564 if ((sc->sc_flags & NFE_USE_JUMBO) == 0)
1565 bus_dmamap_unload(ring->data_tag, data->map);
1566 m_freem(data->m);
1567 data->m = NULL;
1570 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1572 ring->cur = ring->next = 0;
1575 static int
1576 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1578 int i;
1580 for (i = 0; i < sc->sc_rx_ring_count; ++i) {
1581 int error;
1583 /* XXX should use a function pointer */
1584 if (sc->sc_flags & NFE_USE_JUMBO)
1585 error = nfe_newbuf_jumbo(sc, ring, i, 1);
1586 else
1587 error = nfe_newbuf_std(sc, ring, i, 1);
1588 if (error) {
1589 if_printf(&sc->arpcom.ac_if,
1590 "could not allocate RX buffer\n");
1591 return error;
1594 nfe_set_ready_rxdesc(sc, ring, i);
1596 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1598 return 0;
1601 static void
1602 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1604 if (ring->data_tag != NULL) {
1605 struct nfe_rx_data *data;
1606 int i;
1608 for (i = 0; i < sc->sc_rx_ring_count; i++) {
1609 data = &ring->data[i];
1611 if (data->m != NULL) {
1612 bus_dmamap_unload(ring->data_tag, data->map);
1613 m_freem(data->m);
1615 bus_dmamap_destroy(ring->data_tag, data->map);
1617 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1618 bus_dma_tag_destroy(ring->data_tag);
1621 nfe_jpool_free(sc, ring);
1623 if (ring->jbuf != NULL)
1624 kfree(ring->jbuf, M_DEVBUF);
1625 if (ring->data != NULL)
1626 kfree(ring->data, M_DEVBUF);
1628 if (ring->tag != NULL) {
1629 void *desc;
1631 if (sc->sc_flags & NFE_40BIT_ADDR)
1632 desc = ring->desc64;
1633 else
1634 desc = ring->desc32;
1636 bus_dmamap_unload(ring->tag, ring->map);
1637 bus_dmamem_free(ring->tag, desc, ring->map);
1638 bus_dma_tag_destroy(ring->tag);
1642 static struct nfe_jbuf *
1643 nfe_jalloc(struct nfe_softc *sc)
1645 struct ifnet *ifp = &sc->arpcom.ac_if;
1646 struct nfe_jbuf *jbuf;
1648 lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1650 jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
1651 if (jbuf != NULL) {
1652 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
1653 jbuf->inuse = 1;
1654 } else {
1655 if_printf(ifp, "no free jumbo buffer\n");
1658 lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1660 return jbuf;
1663 static void
1664 nfe_jfree(void *arg)
1666 struct nfe_jbuf *jbuf = arg;
1667 struct nfe_softc *sc = jbuf->sc;
1668 struct nfe_rx_ring *ring = jbuf->ring;
1670 if (&ring->jbuf[jbuf->slot] != jbuf)
1671 panic("%s: free wrong jumbo buffer\n", __func__);
1672 else if (jbuf->inuse == 0)
1673 panic("%s: jumbo buffer already freed\n", __func__);
1675 lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1676 atomic_subtract_int(&jbuf->inuse, 1);
1677 if (jbuf->inuse == 0)
1678 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1679 lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1682 static void
1683 nfe_jref(void *arg)
1685 struct nfe_jbuf *jbuf = arg;
1686 struct nfe_rx_ring *ring = jbuf->ring;
1688 if (&ring->jbuf[jbuf->slot] != jbuf)
1689 panic("%s: ref wrong jumbo buffer\n", __func__);
1690 else if (jbuf->inuse == 0)
1691 panic("%s: jumbo buffer already freed\n", __func__);
1693 atomic_add_int(&jbuf->inuse, 1);
1696 static int
1697 nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1699 struct nfe_jbuf *jbuf;
1700 bus_addr_t physaddr;
1701 caddr_t buf;
1702 int i, error;
1705 * Allocate a big chunk of DMA'able memory.
1707 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1708 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1709 NULL, NULL,
1710 NFE_JPOOL_SIZE, 1, NFE_JPOOL_SIZE,
1711 0, &ring->jtag);
1712 if (error) {
1713 if_printf(&sc->arpcom.ac_if,
1714 "could not create jumbo DMA tag\n");
1715 return error;
1718 error = bus_dmamem_alloc(ring->jtag, (void **)&ring->jpool,
1719 BUS_DMA_WAITOK, &ring->jmap);
1720 if (error) {
1721 if_printf(&sc->arpcom.ac_if,
1722 "could not allocate jumbo DMA memory\n");
1723 bus_dma_tag_destroy(ring->jtag);
1724 ring->jtag = NULL;
1725 return error;
1728 error = bus_dmamap_load(ring->jtag, ring->jmap, ring->jpool,
1729 NFE_JPOOL_SIZE, nfe_ring_dma_addr, &physaddr,
1730 BUS_DMA_WAITOK);
1731 if (error) {
1732 if_printf(&sc->arpcom.ac_if,
1733 "could not load jumbo DMA map\n");
1734 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap);
1735 bus_dma_tag_destroy(ring->jtag);
1736 ring->jtag = NULL;
1737 return error;
1740 /* ..and split it into 9KB chunks */
1741 SLIST_INIT(&ring->jfreelist);
1743 buf = ring->jpool;
1744 for (i = 0; i < NFE_JPOOL_COUNT; i++) {
1745 jbuf = &ring->jbuf[i];
1747 jbuf->sc = sc;
1748 jbuf->ring = ring;
1749 jbuf->inuse = 0;
1750 jbuf->slot = i;
1751 jbuf->buf = buf;
1752 jbuf->physaddr = physaddr;
1754 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1756 buf += NFE_JBYTES;
1757 physaddr += NFE_JBYTES;
1760 return 0;
1763 static void
1764 nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1766 if (ring->jtag != NULL) {
1767 bus_dmamap_unload(ring->jtag, ring->jmap);
1768 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap);
1769 bus_dma_tag_destroy(ring->jtag);
1773 static int
1774 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1776 int i, j, error, descsize;
1777 void **desc;
1779 if (sc->sc_flags & NFE_40BIT_ADDR) {
1780 desc = (void **)&ring->desc64;
1781 descsize = sizeof(struct nfe_desc64);
1782 } else {
1783 desc = (void **)&ring->desc32;
1784 descsize = sizeof(struct nfe_desc32);
1787 ring->queued = 0;
1788 ring->cur = ring->next = 0;
1790 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1791 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1792 NULL, NULL,
1793 NFE_TX_RING_COUNT * descsize, 1,
1794 NFE_TX_RING_COUNT * descsize,
1795 0, &ring->tag);
1796 if (error) {
1797 if_printf(&sc->arpcom.ac_if,
1798 "could not create TX desc DMA map\n");
1799 return error;
1802 error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO,
1803 &ring->map);
1804 if (error) {
1805 if_printf(&sc->arpcom.ac_if,
1806 "could not allocate TX desc DMA memory\n");
1807 bus_dma_tag_destroy(ring->tag);
1808 ring->tag = NULL;
1809 return error;
1812 error = bus_dmamap_load(ring->tag, ring->map, *desc,
1813 NFE_TX_RING_COUNT * descsize,
1814 nfe_ring_dma_addr, &ring->physaddr,
1815 BUS_DMA_WAITOK);
1816 if (error) {
1817 if_printf(&sc->arpcom.ac_if,
1818 "could not load TX desc DMA map\n");
1819 bus_dmamem_free(ring->tag, *desc, ring->map);
1820 bus_dma_tag_destroy(ring->tag);
1821 ring->tag = NULL;
1822 return error;
1825 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1826 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1827 NULL, NULL,
1828 NFE_JBYTES * NFE_MAX_SCATTER,
1829 NFE_MAX_SCATTER, NFE_JBYTES,
1830 0, &ring->data_tag);
1831 if (error) {
1832 if_printf(&sc->arpcom.ac_if,
1833 "could not create TX buf DMA tag\n");
1834 return error;
1837 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1838 error = bus_dmamap_create(ring->data_tag, 0,
1839 &ring->data[i].map);
1840 if (error) {
1841 if_printf(&sc->arpcom.ac_if,
1842 "could not create %dth TX buf DMA map\n", i);
1843 goto fail;
1847 return 0;
1848 fail:
1849 for (j = 0; j < i; ++j)
1850 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
1851 bus_dma_tag_destroy(ring->data_tag);
1852 ring->data_tag = NULL;
1853 return error;
1856 static void
1857 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1859 int i;
1861 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1862 struct nfe_tx_data *data = &ring->data[i];
1864 if (sc->sc_flags & NFE_40BIT_ADDR)
1865 ring->desc64[i].flags = 0;
1866 else
1867 ring->desc32[i].flags = 0;
1869 if (data->m != NULL) {
1870 bus_dmamap_sync(ring->data_tag, data->map,
1871 BUS_DMASYNC_POSTWRITE);
1872 bus_dmamap_unload(ring->data_tag, data->map);
1873 m_freem(data->m);
1874 data->m = NULL;
1877 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1879 ring->queued = 0;
1880 ring->cur = ring->next = 0;
1883 static int
1884 nfe_init_tx_ring(struct nfe_softc *sc __unused,
1885 struct nfe_tx_ring *ring __unused)
1887 return 0;
1890 static void
1891 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1893 if (ring->data_tag != NULL) {
1894 struct nfe_tx_data *data;
1895 int i;
1897 for (i = 0; i < NFE_TX_RING_COUNT; ++i) {
1898 data = &ring->data[i];
1900 if (data->m != NULL) {
1901 bus_dmamap_unload(ring->data_tag, data->map);
1902 m_freem(data->m);
1904 bus_dmamap_destroy(ring->data_tag, data->map);
1907 bus_dma_tag_destroy(ring->data_tag);
1910 if (ring->tag != NULL) {
1911 void *desc;
1913 if (sc->sc_flags & NFE_40BIT_ADDR)
1914 desc = ring->desc64;
1915 else
1916 desc = ring->desc32;
1918 bus_dmamap_unload(ring->tag, ring->map);
1919 bus_dmamem_free(ring->tag, desc, ring->map);
1920 bus_dma_tag_destroy(ring->tag);
1924 static int
1925 nfe_ifmedia_upd(struct ifnet *ifp)
1927 struct nfe_softc *sc = ifp->if_softc;
1928 struct mii_data *mii = device_get_softc(sc->sc_miibus);
1930 if (mii->mii_instance != 0) {
1931 struct mii_softc *miisc;
1933 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1934 mii_phy_reset(miisc);
1936 mii_mediachg(mii);
1938 return 0;
1941 static void
1942 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1944 struct nfe_softc *sc = ifp->if_softc;
1945 struct mii_data *mii = device_get_softc(sc->sc_miibus);
1947 mii_pollstat(mii);
1948 ifmr->ifm_status = mii->mii_media_status;
1949 ifmr->ifm_active = mii->mii_media_active;
1952 static void
1953 nfe_setmulti(struct nfe_softc *sc)
1955 struct ifnet *ifp = &sc->arpcom.ac_if;
1956 struct ifmultiaddr *ifma;
1957 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1958 uint32_t filter = NFE_RXFILTER_MAGIC;
1959 int i;
1961 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1962 bzero(addr, ETHER_ADDR_LEN);
1963 bzero(mask, ETHER_ADDR_LEN);
1964 goto done;
1967 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1968 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1970 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1971 caddr_t maddr;
1973 if (ifma->ifma_addr->sa_family != AF_LINK)
1974 continue;
1976 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1977 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1978 addr[i] &= maddr[i];
1979 mask[i] &= ~maddr[i];
1983 for (i = 0; i < ETHER_ADDR_LEN; i++)
1984 mask[i] |= addr[i];
1986 done:
1987 addr[0] |= 0x01; /* make sure multicast bit is set */
1989 NFE_WRITE(sc, NFE_MULTIADDR_HI,
1990 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1991 NFE_WRITE(sc, NFE_MULTIADDR_LO,
1992 addr[5] << 8 | addr[4]);
1993 NFE_WRITE(sc, NFE_MULTIMASK_HI,
1994 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1995 NFE_WRITE(sc, NFE_MULTIMASK_LO,
1996 mask[5] << 8 | mask[4]);
1998 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1999 NFE_WRITE(sc, NFE_RXFILTER, filter);
2002 static void
2003 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
2005 uint32_t tmp;
2007 tmp = NFE_READ(sc, NFE_MACADDR_LO);
2008 addr[0] = (tmp >> 8) & 0xff;
2009 addr[1] = (tmp & 0xff);
2011 tmp = NFE_READ(sc, NFE_MACADDR_HI);
2012 addr[2] = (tmp >> 24) & 0xff;
2013 addr[3] = (tmp >> 16) & 0xff;
2014 addr[4] = (tmp >> 8) & 0xff;
2015 addr[5] = (tmp & 0xff);
2018 static void
2019 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
2021 NFE_WRITE(sc, NFE_MACADDR_LO,
2022 addr[5] << 8 | addr[4]);
2023 NFE_WRITE(sc, NFE_MACADDR_HI,
2024 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2027 static void
2028 nfe_tick(void *arg)
2030 struct nfe_softc *sc = arg;
2031 struct ifnet *ifp = &sc->arpcom.ac_if;
2032 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2034 lwkt_serialize_enter(ifp->if_serializer);
2036 mii_tick(mii);
2037 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
2039 lwkt_serialize_exit(ifp->if_serializer);
2042 static void
2043 nfe_ring_dma_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
2045 if (error)
2046 return;
2048 KASSERT(nseg == 1, ("too many segments, should be 1\n"));
2050 *((uint32_t *)arg) = seg->ds_addr;
2053 static void
2054 nfe_buf_dma_addr(void *arg, bus_dma_segment_t *segs, int nsegs,
2055 bus_size_t mapsz __unused, int error)
2057 struct nfe_dma_ctx *ctx = arg;
2058 int i;
2060 if (error)
2061 return;
2063 KASSERT(nsegs <= ctx->nsegs,
2064 ("too many segments(%d), should be <= %d\n",
2065 nsegs, ctx->nsegs));
2067 ctx->nsegs = nsegs;
2068 for (i = 0; i < nsegs; ++i)
2069 ctx->segs[i] = segs[i];
2072 static int
2073 nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2074 int wait)
2076 struct nfe_rx_data *data = &ring->data[idx];
2077 struct nfe_dma_ctx ctx;
2078 bus_dma_segment_t seg;
2079 bus_dmamap_t map;
2080 struct mbuf *m;
2081 int error;
2083 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2084 if (m == NULL)
2085 return ENOBUFS;
2086 m->m_len = m->m_pkthdr.len = MCLBYTES;
2088 ctx.nsegs = 1;
2089 ctx.segs = &seg;
2090 error = bus_dmamap_load_mbuf(ring->data_tag, ring->data_tmpmap,
2091 m, nfe_buf_dma_addr, &ctx,
2092 wait ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
2093 if (error) {
2094 m_freem(m);
2095 if_printf(&sc->arpcom.ac_if, "could map RX mbuf %d\n", error);
2096 return error;
2099 /* Unload originally mapped mbuf */
2100 bus_dmamap_unload(ring->data_tag, data->map);
2102 /* Swap this DMA map with tmp DMA map */
2103 map = data->map;
2104 data->map = ring->data_tmpmap;
2105 ring->data_tmpmap = map;
2107 /* Caller is assumed to have collected the old mbuf */
2108 data->m = m;
2110 nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr);
2112 bus_dmamap_sync(ring->data_tag, data->map, BUS_DMASYNC_PREREAD);
2113 return 0;
2116 static int
2117 nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2118 int wait)
2120 struct nfe_rx_data *data = &ring->data[idx];
2121 struct nfe_jbuf *jbuf;
2122 struct mbuf *m;
2124 MGETHDR(m, wait ? MB_WAIT : MB_DONTWAIT, MT_DATA);
2125 if (m == NULL)
2126 return ENOBUFS;
2128 jbuf = nfe_jalloc(sc);
2129 if (jbuf == NULL) {
2130 m_freem(m);
2131 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
2132 "-- packet dropped!\n");
2133 return ENOBUFS;
2136 m->m_ext.ext_arg = jbuf;
2137 m->m_ext.ext_buf = jbuf->buf;
2138 m->m_ext.ext_free = nfe_jfree;
2139 m->m_ext.ext_ref = nfe_jref;
2140 m->m_ext.ext_size = NFE_JBYTES;
2142 m->m_data = m->m_ext.ext_buf;
2143 m->m_flags |= M_EXT;
2144 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2146 /* Caller is assumed to have collected the old mbuf */
2147 data->m = m;
2149 nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr);
2151 bus_dmamap_sync(ring->jtag, ring->jmap, BUS_DMASYNC_PREREAD);
2152 return 0;
2155 static void
2156 nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2157 bus_addr_t physaddr)
2159 if (sc->sc_flags & NFE_40BIT_ADDR) {
2160 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2162 #if defined(__LP64__)
2163 desc64->physaddr[0] = htole32(physaddr >> 32);
2164 #endif
2165 desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
2166 } else {
2167 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2169 desc32->physaddr = htole32(physaddr);
2173 static void
2174 nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx)
2176 if (sc->sc_flags & NFE_40BIT_ADDR) {
2177 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2179 desc64->length = htole16(ring->bufsz);
2180 desc64->flags = htole16(NFE_RX_READY);
2181 } else {
2182 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2184 desc32->length = htole16(ring->bufsz);
2185 desc32->flags = htole16(NFE_RX_READY);
2189 static int
2190 nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS)
2192 struct nfe_softc *sc = arg1;
2193 struct ifnet *ifp = &sc->arpcom.ac_if;
2194 int error, v;
2196 lwkt_serialize_enter(ifp->if_serializer);
2198 v = sc->sc_imtime;
2199 error = sysctl_handle_int(oidp, &v, 0, req);
2200 if (error || req->newptr == NULL)
2201 goto back;
2202 if (v == 0) {
2203 error = EINVAL;
2204 goto back;
2207 if (sc->sc_imtime != v) {
2208 int old_imtime = sc->sc_imtime;
2210 sc->sc_imtime = v;
2211 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc);
2213 if ((ifp->if_flags & (IFF_POLLING | IFF_RUNNING))
2214 == IFF_RUNNING) {
2215 if (old_imtime > 0 && sc->sc_imtime > 0) {
2216 NFE_WRITE(sc, NFE_IMTIMER,
2217 NFE_IMTIME(sc->sc_imtime));
2218 } else if ((old_imtime * sc->sc_imtime) < 0) {
2219 ifp->if_init(sc);
2223 back:
2224 lwkt_serialize_exit(ifp->if_serializer);
2225 return error;