Add hardware csum offload support.
[dragonfly/port-amd64.git] / sys / dev / netif / nfe / if_nfe.c
blob4eaa32f19a13325585bd3ac212fd78ca23ee664d
1 /* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */
2 /* $DragonFly: src/sys/dev/netif/nfe/if_nfe.c,v 1.11 2007/08/08 11:38:51 sephe Exp $ */
4 /*
5 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
6 *
7 * This code is derived from software contributed to The DragonFly Project
8 * by Sepherosa Ziehau <sepherosa@gmail.com> and
9 * Matthew Dillon <dillon@apollo.backplane.com>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
40 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
41 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
43 * Permission to use, copy, modify, and distribute this software for any
44 * purpose with or without fee is hereby granted, provided that the above
45 * copyright notice and this permission notice appear in all copies.
47 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
48 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
49 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
50 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
51 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
52 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
53 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
56 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
58 #include "opt_polling.h"
60 #include <sys/param.h>
61 #include <sys/endian.h>
62 #include <sys/kernel.h>
63 #include <sys/bus.h>
64 #include <sys/proc.h>
65 #include <sys/rman.h>
66 #include <sys/serialize.h>
67 #include <sys/socket.h>
68 #include <sys/sockio.h>
69 #include <sys/sysctl.h>
71 #include <net/ethernet.h>
72 #include <net/if.h>
73 #include <net/bpf.h>
74 #include <net/if_arp.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/ifq_var.h>
78 #include <net/if_types.h>
79 #include <net/if_var.h>
80 #include <net/vlan/if_vlan_var.h>
82 #include <bus/pci/pcireg.h>
83 #include <bus/pci/pcivar.h>
84 #include <bus/pci/pcidevs.h>
86 #include <dev/netif/mii_layer/mii.h>
87 #include <dev/netif/mii_layer/miivar.h>
89 #include "miibus_if.h"
91 #include <dev/netif/nfe/if_nfereg.h>
92 #include <dev/netif/nfe/if_nfevar.h>
94 #define NFE_CSUM
95 #define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
97 static int nfe_probe(device_t);
98 static int nfe_attach(device_t);
99 static int nfe_detach(device_t);
100 static void nfe_shutdown(device_t);
101 static int nfe_resume(device_t);
102 static int nfe_suspend(device_t);
104 static int nfe_miibus_readreg(device_t, int, int);
105 static void nfe_miibus_writereg(device_t, int, int, int);
106 static void nfe_miibus_statchg(device_t);
108 #ifdef DEVICE_POLLING
109 static void nfe_poll(struct ifnet *, enum poll_cmd, int);
110 #endif
111 static void nfe_intr(void *);
112 static int nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
113 static void nfe_rxeof(struct nfe_softc *);
114 static void nfe_txeof(struct nfe_softc *);
115 static int nfe_encap(struct nfe_softc *, struct nfe_tx_ring *,
116 struct mbuf *);
117 static void nfe_start(struct ifnet *);
118 static void nfe_watchdog(struct ifnet *);
119 static void nfe_init(void *);
120 static void nfe_stop(struct nfe_softc *);
121 static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *);
122 static void nfe_jfree(void *);
123 static void nfe_jref(void *);
124 static int nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *);
125 static void nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *);
126 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
127 static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
128 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
129 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
130 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
131 static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
132 static int nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
133 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
134 static int nfe_ifmedia_upd(struct ifnet *);
135 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
136 static void nfe_setmulti(struct nfe_softc *);
137 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
138 static void nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
139 static void nfe_tick(void *);
140 static void nfe_ring_dma_addr(void *, bus_dma_segment_t *, int, int);
141 static void nfe_buf_dma_addr(void *, bus_dma_segment_t *, int, bus_size_t,
142 int);
143 static void nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
144 int, bus_addr_t);
145 static void nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
146 int);
147 static int nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int,
148 int);
149 static int nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int,
150 int);
152 #define NFE_DEBUG
153 #ifdef NFE_DEBUG
155 static int nfe_debug = 0;
156 static int nfe_rx_ring_count = NFE_RX_RING_DEF_COUNT;
158 TUNABLE_INT("hw.nfe.rx_ring_count", &nfe_rx_ring_count);
160 SYSCTL_NODE(_hw, OID_AUTO, nfe, CTLFLAG_RD, 0, "nVidia GigE parameters");
161 SYSCTL_INT(_hw_nfe, OID_AUTO, rx_ring_count, CTLFLAG_RD, &nfe_rx_ring_count,
162 NFE_RX_RING_DEF_COUNT, "rx ring count");
163 SYSCTL_INT(_hw_nfe, OID_AUTO, debug, CTLFLAG_RW, &nfe_debug, 0,
164 "control debugging printfs");
166 #define DPRINTF(sc, fmt, ...) do { \
167 if (nfe_debug) { \
168 if_printf(&(sc)->arpcom.ac_if, \
169 fmt, __VA_ARGS__); \
171 } while (0)
173 #define DPRINTFN(sc, lv, fmt, ...) do { \
174 if (nfe_debug >= (lv)) { \
175 if_printf(&(sc)->arpcom.ac_if, \
176 fmt, __VA_ARGS__); \
178 } while (0)
180 #else /* !NFE_DEBUG */
182 #define DPRINTF(sc, fmt, ...)
183 #define DPRINTFN(sc, lv, fmt, ...)
185 #endif /* NFE_DEBUG */
187 struct nfe_dma_ctx {
188 int nsegs;
189 bus_dma_segment_t *segs;
192 static const struct nfe_dev {
193 uint16_t vid;
194 uint16_t did;
195 const char *desc;
196 } nfe_devices[] = {
197 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
198 "NVIDIA nForce Fast Ethernet" },
200 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
201 "NVIDIA nForce2 Fast Ethernet" },
203 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
204 "NVIDIA nForce3 Gigabit Ethernet" },
206 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb
207 chipset, and possibly also the 400R; it might be both nForce2- and
208 nForce3-based boards can use the same MCPs (= southbridges) */
209 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2,
210 "NVIDIA nForce3 Gigabit Ethernet" },
212 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3,
213 "NVIDIA nForce3 Gigabit Ethernet" },
215 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
216 "NVIDIA nForce3 Gigabit Ethernet" },
218 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5,
219 "NVIDIA nForce3 Gigabit Ethernet" },
221 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1,
222 "NVIDIA CK804 Gigabit Ethernet" },
224 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2,
225 "NVIDIA CK804 Gigabit Ethernet" },
227 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
228 "NVIDIA MCP04 Gigabit Ethernet" },
230 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
231 "NVIDIA MCP04 Gigabit Ethernet" },
233 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1,
234 "NVIDIA MCP51 Gigabit Ethernet" },
236 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2,
237 "NVIDIA MCP51 Gigabit Ethernet" },
239 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
240 "NVIDIA MCP55 Gigabit Ethernet" },
242 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
243 "NVIDIA MCP55 Gigabit Ethernet" },
245 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
246 "NVIDIA MCP61 Gigabit Ethernet" },
248 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
249 "NVIDIA MCP61 Gigabit Ethernet" },
251 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
252 "NVIDIA MCP61 Gigabit Ethernet" },
254 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
255 "NVIDIA MCP61 Gigabit Ethernet" },
257 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
258 "NVIDIA MCP65 Gigabit Ethernet" },
260 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
261 "NVIDIA MCP65 Gigabit Ethernet" },
263 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
264 "NVIDIA MCP65 Gigabit Ethernet" },
266 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
267 "NVIDIA MCP65 Gigabit Ethernet" },
269 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
270 "NVIDIA MCP67 Gigabit Ethernet" },
272 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
273 "NVIDIA MCP67 Gigabit Ethernet" },
275 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
276 "NVIDIA MCP67 Gigabit Ethernet" },
278 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
279 "NVIDIA MCP67 Gigabit Ethernet" }
282 static device_method_t nfe_methods[] = {
283 /* Device interface */
284 DEVMETHOD(device_probe, nfe_probe),
285 DEVMETHOD(device_attach, nfe_attach),
286 DEVMETHOD(device_detach, nfe_detach),
287 DEVMETHOD(device_suspend, nfe_suspend),
288 DEVMETHOD(device_resume, nfe_resume),
289 DEVMETHOD(device_shutdown, nfe_shutdown),
291 /* Bus interface */
292 DEVMETHOD(bus_print_child, bus_generic_print_child),
293 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
295 /* MII interface */
296 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
297 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
298 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
300 { 0, 0 }
303 static driver_t nfe_driver = {
304 "nfe",
305 nfe_methods,
306 sizeof(struct nfe_softc)
309 static devclass_t nfe_devclass;
311 DECLARE_DUMMY_MODULE(if_nfe);
312 MODULE_DEPEND(if_nfe, miibus, 1, 1, 1);
313 DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, 0, 0);
314 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
316 static int
317 nfe_probe(device_t dev)
319 const struct nfe_dev *n;
320 uint16_t vid, did;
322 vid = pci_get_vendor(dev);
323 did = pci_get_device(dev);
324 for (n = nfe_devices; n->desc != NULL; ++n) {
325 if (vid == n->vid && did == n->did) {
326 struct nfe_softc *sc = device_get_softc(dev);
328 switch (did) {
329 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
330 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
331 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
332 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
333 sc->sc_flags = NFE_JUMBO_SUP |
334 NFE_HW_CSUM;
335 break;
336 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
337 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
338 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
339 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
340 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
341 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
342 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
343 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
344 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
345 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
346 sc->sc_flags = NFE_40BIT_ADDR;
347 break;
348 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
349 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
350 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
351 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
352 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
353 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
354 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
355 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
356 sc->sc_flags = NFE_JUMBO_SUP |
357 NFE_40BIT_ADDR |
358 NFE_HW_CSUM;
359 break;
360 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
361 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
362 sc->sc_flags = NFE_JUMBO_SUP |
363 NFE_40BIT_ADDR |
364 NFE_HW_CSUM |
365 NFE_HW_VLAN;
366 break;
369 device_set_desc(dev, n->desc);
370 device_set_async_attach(dev, TRUE);
371 return 0;
374 return ENXIO;
377 static int
378 nfe_attach(device_t dev)
380 struct nfe_softc *sc = device_get_softc(dev);
381 struct ifnet *ifp = &sc->arpcom.ac_if;
382 uint8_t eaddr[ETHER_ADDR_LEN];
383 int error;
385 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
386 lwkt_serialize_init(&sc->sc_jbuf_serializer);
388 sc->sc_mem_rid = PCIR_BAR(0);
390 #ifndef BURN_BRIDGES
391 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
392 uint32_t mem, irq;
394 mem = pci_read_config(dev, sc->sc_mem_rid, 4);
395 irq = pci_read_config(dev, PCIR_INTLINE, 4);
397 device_printf(dev, "chip is in D%d power mode "
398 "-- setting to D0\n", pci_get_powerstate(dev));
400 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
402 pci_write_config(dev, sc->sc_mem_rid, mem, 4);
403 pci_write_config(dev, PCIR_INTLINE, irq, 4);
405 #endif /* !BURN_BRIDGE */
407 /* Enable bus mastering */
408 pci_enable_busmaster(dev);
410 /* Allocate IO memory */
411 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
412 &sc->sc_mem_rid, RF_ACTIVE);
413 if (sc->sc_mem_res == NULL) {
414 device_printf(dev, "cound not allocate io memory\n");
415 return ENXIO;
417 sc->sc_memh = rman_get_bushandle(sc->sc_mem_res);
418 sc->sc_memt = rman_get_bustag(sc->sc_mem_res);
420 /* Allocate IRQ */
421 sc->sc_irq_rid = 0;
422 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
423 &sc->sc_irq_rid,
424 RF_SHAREABLE | RF_ACTIVE);
425 if (sc->sc_irq_res == NULL) {
426 device_printf(dev, "could not allocate irq\n");
427 error = ENXIO;
428 goto fail;
431 nfe_get_macaddr(sc, eaddr);
434 * Allocate Tx and Rx rings.
436 error = nfe_alloc_tx_ring(sc, &sc->txq);
437 if (error) {
438 device_printf(dev, "could not allocate Tx ring\n");
439 goto fail;
442 error = nfe_alloc_rx_ring(sc, &sc->rxq);
443 if (error) {
444 device_printf(dev, "could not allocate Rx ring\n");
445 goto fail;
448 error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd,
449 nfe_ifmedia_sts);
450 if (error) {
451 device_printf(dev, "MII without any phy\n");
452 goto fail;
455 ifp->if_softc = sc;
456 ifp->if_mtu = ETHERMTU;
457 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
458 ifp->if_ioctl = nfe_ioctl;
459 ifp->if_start = nfe_start;
460 #ifdef DEVICE_POLLING
461 ifp->if_poll = nfe_poll;
462 #endif
463 ifp->if_watchdog = nfe_watchdog;
464 ifp->if_init = nfe_init;
465 ifq_set_maxlen(&ifp->if_snd, NFE_IFQ_MAXLEN);
466 ifq_set_ready(&ifp->if_snd);
468 ifp->if_capabilities = IFCAP_VLAN_MTU;
470 if (sc->sc_flags & NFE_HW_VLAN)
471 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
473 #ifdef NFE_CSUM
474 if (sc->sc_flags & NFE_HW_CSUM) {
475 ifp->if_capabilities |= IFCAP_HWCSUM;
476 ifp->if_hwassist = NFE_CSUM_FEATURES;
478 #else
479 sc->sc_flags &= ~NFE_HW_CSUM;
480 #endif
481 ifp->if_capenable = ifp->if_capabilities;
483 callout_init(&sc->sc_tick_ch);
485 ether_ifattach(ifp, eaddr, NULL);
487 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc,
488 &sc->sc_ih, ifp->if_serializer);
489 if (error) {
490 device_printf(dev, "could not setup intr\n");
491 ether_ifdetach(ifp);
492 goto fail;
495 return 0;
496 fail:
497 nfe_detach(dev);
498 return error;
501 static int
502 nfe_detach(device_t dev)
504 struct nfe_softc *sc = device_get_softc(dev);
506 if (device_is_attached(dev)) {
507 struct ifnet *ifp = &sc->arpcom.ac_if;
509 lwkt_serialize_enter(ifp->if_serializer);
510 nfe_stop(sc);
511 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih);
512 lwkt_serialize_exit(ifp->if_serializer);
514 ether_ifdetach(ifp);
517 if (sc->sc_miibus != NULL)
518 device_delete_child(dev, sc->sc_miibus);
519 bus_generic_detach(dev);
521 if (sc->sc_irq_res != NULL) {
522 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
523 sc->sc_irq_res);
526 if (sc->sc_mem_res != NULL) {
527 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
528 sc->sc_mem_res);
531 nfe_free_tx_ring(sc, &sc->txq);
532 nfe_free_rx_ring(sc, &sc->rxq);
534 return 0;
537 static void
538 nfe_shutdown(device_t dev)
540 struct nfe_softc *sc = device_get_softc(dev);
541 struct ifnet *ifp = &sc->arpcom.ac_if;
543 lwkt_serialize_enter(ifp->if_serializer);
544 nfe_stop(sc);
545 lwkt_serialize_exit(ifp->if_serializer);
548 static int
549 nfe_suspend(device_t dev)
551 struct nfe_softc *sc = device_get_softc(dev);
552 struct ifnet *ifp = &sc->arpcom.ac_if;
554 lwkt_serialize_enter(ifp->if_serializer);
555 nfe_stop(sc);
556 lwkt_serialize_exit(ifp->if_serializer);
558 return 0;
561 static int
562 nfe_resume(device_t dev)
564 struct nfe_softc *sc = device_get_softc(dev);
565 struct ifnet *ifp = &sc->arpcom.ac_if;
567 lwkt_serialize_enter(ifp->if_serializer);
568 if (ifp->if_flags & IFF_UP) {
569 nfe_init(sc);
570 if (ifp->if_flags & IFF_RUNNING)
571 ifp->if_start(ifp);
573 lwkt_serialize_exit(ifp->if_serializer);
575 return 0;
578 static void
579 nfe_miibus_statchg(device_t dev)
581 struct nfe_softc *sc = device_get_softc(dev);
582 struct mii_data *mii = device_get_softc(sc->sc_miibus);
583 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
585 phy = NFE_READ(sc, NFE_PHY_IFACE);
586 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
588 seed = NFE_READ(sc, NFE_RNDSEED);
589 seed &= ~NFE_SEED_MASK;
591 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
592 phy |= NFE_PHY_HDX; /* half-duplex */
593 misc |= NFE_MISC1_HDX;
596 switch (IFM_SUBTYPE(mii->mii_media_active)) {
597 case IFM_1000_T: /* full-duplex only */
598 link |= NFE_MEDIA_1000T;
599 seed |= NFE_SEED_1000T;
600 phy |= NFE_PHY_1000T;
601 break;
602 case IFM_100_TX:
603 link |= NFE_MEDIA_100TX;
604 seed |= NFE_SEED_100TX;
605 phy |= NFE_PHY_100TX;
606 break;
607 case IFM_10_T:
608 link |= NFE_MEDIA_10T;
609 seed |= NFE_SEED_10T;
610 break;
613 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
615 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
616 NFE_WRITE(sc, NFE_MISC1, misc);
617 NFE_WRITE(sc, NFE_LINKSPEED, link);
620 static int
621 nfe_miibus_readreg(device_t dev, int phy, int reg)
623 struct nfe_softc *sc = device_get_softc(dev);
624 uint32_t val;
625 int ntries;
627 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
629 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
630 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
631 DELAY(100);
634 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
636 for (ntries = 0; ntries < 1000; ntries++) {
637 DELAY(100);
638 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
639 break;
641 if (ntries == 1000) {
642 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", "");
643 return 0;
646 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
647 DPRINTFN(sc, 2, "could not read PHY %s\n", "");
648 return 0;
651 val = NFE_READ(sc, NFE_PHY_DATA);
652 if (val != 0xffffffff && val != 0)
653 sc->mii_phyaddr = phy;
655 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
657 return val;
660 static void
661 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
663 struct nfe_softc *sc = device_get_softc(dev);
664 uint32_t ctl;
665 int ntries;
667 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
669 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
670 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
671 DELAY(100);
674 NFE_WRITE(sc, NFE_PHY_DATA, val);
675 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
676 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
678 for (ntries = 0; ntries < 1000; ntries++) {
679 DELAY(100);
680 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
681 break;
684 #ifdef NFE_DEBUG
685 if (ntries == 1000)
686 DPRINTFN(sc, 2, "could not write to PHY %s\n", "");
687 #endif
690 #ifdef DEVICE_POLLING
692 static void
693 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
695 struct nfe_softc *sc = ifp->if_softc;
697 switch(cmd) {
698 case POLL_REGISTER:
699 /* Disable interrupts */
700 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
701 break;
702 case POLL_DEREGISTER:
703 /* enable interrupts */
704 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
705 break;
706 case POLL_AND_CHECK_STATUS:
707 /* fall through */
708 case POLL_ONLY:
709 if (ifp->if_flags & IFF_RUNNING) {
710 nfe_rxeof(sc);
711 nfe_txeof(sc);
713 break;
717 #endif
719 static void
720 nfe_intr(void *arg)
722 struct nfe_softc *sc = arg;
723 struct ifnet *ifp = &sc->arpcom.ac_if;
724 uint32_t r;
726 r = NFE_READ(sc, NFE_IRQ_STATUS);
727 if (r == 0)
728 return; /* not for us */
729 NFE_WRITE(sc, NFE_IRQ_STATUS, r);
731 DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r);
733 if (r & NFE_IRQ_LINK) {
734 NFE_READ(sc, NFE_PHY_STATUS);
735 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
736 DPRINTF(sc, "link state changed %s\n", "");
739 if (ifp->if_flags & IFF_RUNNING) {
740 /* check Rx ring */
741 nfe_rxeof(sc);
743 /* check Tx ring */
744 nfe_txeof(sc);
748 static int
749 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
751 struct nfe_softc *sc = ifp->if_softc;
752 struct ifreq *ifr = (struct ifreq *)data;
753 struct mii_data *mii;
754 int error = 0, mask;
756 switch (cmd) {
757 case SIOCSIFMTU:
758 if (((sc->sc_flags & NFE_JUMBO_SUP) &&
759 ifr->ifr_mtu > NFE_JUMBO_MTU) ||
760 ((sc->sc_flags & NFE_JUMBO_SUP) == 0 &&
761 ifr->ifr_mtu > ETHERMTU)) {
762 return EINVAL;
763 } else if (ifp->if_mtu != ifr->ifr_mtu) {
764 ifp->if_mtu = ifr->ifr_mtu;
765 nfe_init(sc);
767 break;
768 case SIOCSIFFLAGS:
769 if (ifp->if_flags & IFF_UP) {
771 * If only the PROMISC or ALLMULTI flag changes, then
772 * don't do a full re-init of the chip, just update
773 * the Rx filter.
775 if ((ifp->if_flags & IFF_RUNNING) &&
776 ((ifp->if_flags ^ sc->sc_if_flags) &
777 (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
778 nfe_setmulti(sc);
779 } else {
780 if (!(ifp->if_flags & IFF_RUNNING))
781 nfe_init(sc);
783 } else {
784 if (ifp->if_flags & IFF_RUNNING)
785 nfe_stop(sc);
787 sc->sc_if_flags = ifp->if_flags;
788 break;
789 case SIOCADDMULTI:
790 case SIOCDELMULTI:
791 if (ifp->if_flags & IFF_RUNNING)
792 nfe_setmulti(sc);
793 break;
794 case SIOCSIFMEDIA:
795 case SIOCGIFMEDIA:
796 mii = device_get_softc(sc->sc_miibus);
797 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
798 break;
799 case SIOCSIFCAP:
800 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
801 if ((mask & IFCAP_HWCSUM) &&
802 (ifp->if_capabilities & IFCAP_HWCSUM)) {
803 if (IFCAP_HWCSUM & ifp->if_capenable) {
804 ifp->if_capenable &= ~IFCAP_HWCSUM;
805 ifp->if_hwassist = 0;
806 } else {
807 ifp->if_capenable |= IFCAP_HWCSUM;
808 ifp->if_hwassist = NFE_CSUM_FEATURES;
811 if (ifp->if_flags & IFF_RUNNING)
812 nfe_init(sc);
814 break;
815 default:
816 error = ether_ioctl(ifp, cmd, data);
817 break;
819 return error;
822 static void
823 nfe_rxeof(struct nfe_softc *sc)
825 struct ifnet *ifp = &sc->arpcom.ac_if;
826 struct nfe_rx_ring *ring = &sc->rxq;
827 int reap;
829 reap = 0;
830 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD);
832 for (;;) {
833 struct nfe_rx_data *data = &ring->data[ring->cur];
834 struct mbuf *m;
835 uint16_t flags;
836 int len, error;
838 if (sc->sc_flags & NFE_40BIT_ADDR) {
839 struct nfe_desc64 *desc64 = &ring->desc64[ring->cur];
841 flags = le16toh(desc64->flags);
842 len = le16toh(desc64->length) & 0x3fff;
843 } else {
844 struct nfe_desc32 *desc32 = &ring->desc32[ring->cur];
846 flags = le16toh(desc32->flags);
847 len = le16toh(desc32->length) & 0x3fff;
850 if (flags & NFE_RX_READY)
851 break;
853 reap = 1;
855 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
856 if (!(flags & NFE_RX_VALID_V1))
857 goto skip;
859 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
860 flags &= ~NFE_RX_ERROR;
861 len--; /* fix buffer length */
863 } else {
864 if (!(flags & NFE_RX_VALID_V2))
865 goto skip;
867 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
868 flags &= ~NFE_RX_ERROR;
869 len--; /* fix buffer length */
873 if (flags & NFE_RX_ERROR) {
874 ifp->if_ierrors++;
875 goto skip;
878 m = data->m;
880 if (sc->sc_flags & NFE_USE_JUMBO)
881 error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0);
882 else
883 error = nfe_newbuf_std(sc, ring, ring->cur, 0);
884 if (error) {
885 ifp->if_ierrors++;
886 goto skip;
889 /* finalize mbuf */
890 m->m_pkthdr.len = m->m_len = len;
891 m->m_pkthdr.rcvif = ifp;
893 if ((ifp->if_capenable & IFCAP_HWCSUM) &&
894 (flags & NFE_RX_CSUMOK)) {
895 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
897 if (flags & NFE_RX_IP_CSUMOK_V2)
898 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
900 if (flags &
901 (NFE_RX_UDP_CSUMOK_V2 | NFE_RX_TCP_CSUMOK_V2)) {
902 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
903 CSUM_PSEUDO_HDR;
904 m->m_pkthdr.csum_data = 0xffff;
908 ifp->if_ipackets++;
909 ifp->if_input(ifp, m);
910 skip:
911 nfe_set_ready_rxdesc(sc, ring, ring->cur);
912 sc->rxq.cur = (sc->rxq.cur + 1) % nfe_rx_ring_count;
915 if (reap)
916 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
919 static void
920 nfe_txeof(struct nfe_softc *sc)
922 struct ifnet *ifp = &sc->arpcom.ac_if;
923 struct nfe_tx_ring *ring = &sc->txq;
924 struct nfe_tx_data *data = NULL;
926 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD);
927 while (ring->next != ring->cur) {
928 uint16_t flags;
930 if (sc->sc_flags & NFE_40BIT_ADDR)
931 flags = le16toh(ring->desc64[ring->next].flags);
932 else
933 flags = le16toh(ring->desc32[ring->next].flags);
935 if (flags & NFE_TX_VALID)
936 break;
938 data = &ring->data[ring->next];
940 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
941 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
942 goto skip;
944 if ((flags & NFE_TX_ERROR_V1) != 0) {
945 if_printf(ifp, "tx v1 error 0x%4b\n", flags,
946 NFE_V1_TXERR);
947 ifp->if_oerrors++;
948 } else {
949 ifp->if_opackets++;
951 } else {
952 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
953 goto skip;
955 if ((flags & NFE_TX_ERROR_V2) != 0) {
956 if_printf(ifp, "tx v2 error 0x%4b\n", flags,
957 NFE_V2_TXERR);
958 ifp->if_oerrors++;
959 } else {
960 ifp->if_opackets++;
964 if (data->m == NULL) { /* should not get there */
965 if_printf(ifp,
966 "last fragment bit w/o associated mbuf!\n");
967 goto skip;
970 /* last fragment of the mbuf chain transmitted */
971 bus_dmamap_sync(ring->data_tag, data->map,
972 BUS_DMASYNC_POSTWRITE);
973 bus_dmamap_unload(ring->data_tag, data->map);
974 m_freem(data->m);
975 data->m = NULL;
977 ifp->if_timer = 0;
978 skip:
979 ring->queued--;
980 KKASSERT(ring->queued >= 0);
981 ring->next = (ring->next + 1) % NFE_TX_RING_COUNT;
984 if (data != NULL) { /* at least one slot freed */
985 ifp->if_flags &= ~IFF_OACTIVE;
986 ifp->if_start(ifp);
990 static int
991 nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0)
993 struct nfe_dma_ctx ctx;
994 bus_dma_segment_t segs[NFE_MAX_SCATTER];
995 struct nfe_tx_data *data, *data_map;
996 bus_dmamap_t map;
997 struct nfe_desc64 *desc64 = NULL;
998 struct nfe_desc32 *desc32 = NULL;
999 uint16_t flags = 0;
1000 uint32_t vtag = 0;
1001 int error, i, j;
1003 data = &ring->data[ring->cur];
1004 map = data->map;
1005 data_map = data; /* Remember who owns the DMA map */
1007 ctx.nsegs = NFE_MAX_SCATTER;
1008 ctx.segs = segs;
1009 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0,
1010 nfe_buf_dma_addr, &ctx, BUS_DMA_NOWAIT);
1011 if (error && error != EFBIG) {
1012 if_printf(&sc->arpcom.ac_if, "could not map TX mbuf\n");
1013 goto back;
1016 if (error) { /* error == EFBIG */
1017 struct mbuf *m_new;
1019 m_new = m_defrag(m0, MB_DONTWAIT);
1020 if (m_new == NULL) {
1021 if_printf(&sc->arpcom.ac_if,
1022 "could not defrag TX mbuf\n");
1023 error = ENOBUFS;
1024 goto back;
1025 } else {
1026 m0 = m_new;
1029 ctx.nsegs = NFE_MAX_SCATTER;
1030 ctx.segs = segs;
1031 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0,
1032 nfe_buf_dma_addr, &ctx,
1033 BUS_DMA_NOWAIT);
1034 if (error) {
1035 if_printf(&sc->arpcom.ac_if,
1036 "could not map defraged TX mbuf\n");
1037 goto back;
1041 error = 0;
1043 if (ring->queued + ctx.nsegs >= NFE_TX_RING_COUNT - 1) {
1044 bus_dmamap_unload(ring->data_tag, map);
1045 error = ENOBUFS;
1046 goto back;
1049 /* setup h/w VLAN tagging */
1050 if ((m0->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) &&
1051 m0->m_pkthdr.rcvif != NULL &&
1052 m0->m_pkthdr.rcvif->if_type == IFT_L2VLAN) {
1053 struct ifvlan *ifv = m0->m_pkthdr.rcvif->if_softc;
1055 if (ifv != NULL)
1056 vtag = NFE_TX_VTAG | htons(ifv->ifv_tag);
1059 if (sc->arpcom.ac_if.if_capenable & IFCAP_HWCSUM) {
1060 if (m0->m_pkthdr.csum_flags & CSUM_IP)
1061 flags |= NFE_TX_IP_CSUM;
1062 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
1063 flags |= NFE_TX_TCP_CSUM;
1067 * XXX urm. somebody is unaware of how hardware works. You
1068 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in
1069 * the ring until the entire chain is actually *VALID*. Otherwise
1070 * the hardware may encounter a partially initialized chain that
1071 * is marked as being ready to go when it in fact is not ready to
1072 * go.
1075 for (i = 0; i < ctx.nsegs; i++) {
1076 j = (ring->cur + i) % NFE_TX_RING_COUNT;
1077 data = &ring->data[j];
1079 if (sc->sc_flags & NFE_40BIT_ADDR) {
1080 desc64 = &ring->desc64[j];
1081 #if defined(__LP64__)
1082 desc64->physaddr[0] =
1083 htole32(segs[i].ds_addr >> 32);
1084 #endif
1085 desc64->physaddr[1] =
1086 htole32(segs[i].ds_addr & 0xffffffff);
1087 desc64->length = htole16(segs[i].ds_len - 1);
1088 desc64->vtag = htole32(vtag);
1089 desc64->flags = htole16(flags);
1090 } else {
1091 desc32 = &ring->desc32[j];
1092 desc32->physaddr = htole32(segs[i].ds_addr);
1093 desc32->length = htole16(segs[i].ds_len - 1);
1094 desc32->flags = htole16(flags);
1097 /* csum flags and vtag belong to the first fragment only */
1098 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
1099 vtag = 0;
1101 ring->queued++;
1102 KKASSERT(ring->queued <= NFE_TX_RING_COUNT);
1105 /* the whole mbuf chain has been DMA mapped, fix last descriptor */
1106 if (sc->sc_flags & NFE_40BIT_ADDR) {
1107 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
1108 } else {
1109 if (sc->sc_flags & NFE_JUMBO_SUP)
1110 flags = NFE_TX_LASTFRAG_V2;
1111 else
1112 flags = NFE_TX_LASTFRAG_V1;
1113 desc32->flags |= htole16(flags);
1117 * Set NFE_TX_VALID backwards so the hardware doesn't see the
1118 * whole mess until the first descriptor in the map is flagged.
1120 for (i = ctx.nsegs - 1; i >= 0; --i) {
1121 j = (ring->cur + i) % NFE_TX_RING_COUNT;
1122 if (sc->sc_flags & NFE_40BIT_ADDR) {
1123 desc64 = &ring->desc64[j];
1124 desc64->flags |= htole16(NFE_TX_VALID);
1125 } else {
1126 desc32 = &ring->desc32[j];
1127 desc32->flags |= htole16(NFE_TX_VALID);
1130 ring->cur = (ring->cur + ctx.nsegs) % NFE_TX_RING_COUNT;
1132 /* Exchange DMA map */
1133 data_map->map = data->map;
1134 data->map = map;
1135 data->m = m0;
1137 bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE);
1138 back:
1139 if (error)
1140 m_freem(m0);
1141 return error;
1144 static void
1145 nfe_start(struct ifnet *ifp)
1147 struct nfe_softc *sc = ifp->if_softc;
1148 struct nfe_tx_ring *ring = &sc->txq;
1149 int count = 0;
1150 struct mbuf *m0;
1152 if (ifp->if_flags & IFF_OACTIVE)
1153 return;
1155 if (ifq_is_empty(&ifp->if_snd))
1156 return;
1158 for (;;) {
1159 m0 = ifq_dequeue(&ifp->if_snd, NULL);
1160 if (m0 == NULL)
1161 break;
1163 BPF_MTAP(ifp, m0);
1165 if (nfe_encap(sc, ring, m0) != 0) {
1166 ifp->if_flags |= IFF_OACTIVE;
1167 break;
1169 ++count;
1172 * NOTE:
1173 * `m0' may be freed in nfe_encap(), so
1174 * it should not be touched any more.
1177 if (count == 0) /* nothing sent */
1178 return;
1180 /* Sync TX descriptor ring */
1181 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1183 /* Kick Tx */
1184 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1187 * Set a timeout in case the chip goes out to lunch.
1189 ifp->if_timer = 5;
1192 static void
1193 nfe_watchdog(struct ifnet *ifp)
1195 struct nfe_softc *sc = ifp->if_softc;
1197 if (ifp->if_flags & IFF_RUNNING) {
1198 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n");
1199 nfe_txeof(sc);
1200 return;
1203 if_printf(ifp, "watchdog timeout\n");
1205 nfe_init(ifp->if_softc);
1207 ifp->if_oerrors++;
1209 if (!ifq_is_empty(&ifp->if_snd))
1210 ifp->if_start(ifp);
1213 static void
1214 nfe_init(void *xsc)
1216 struct nfe_softc *sc = xsc;
1217 struct ifnet *ifp = &sc->arpcom.ac_if;
1218 uint32_t tmp;
1219 int error;
1221 nfe_stop(sc);
1224 * NOTE:
1225 * Switching between jumbo frames and normal frames should
1226 * be done _after_ nfe_stop() but _before_ nfe_init_rx_ring().
1228 if (ifp->if_mtu > ETHERMTU) {
1229 sc->sc_flags |= NFE_USE_JUMBO;
1230 sc->rxq.bufsz = NFE_JBYTES;
1231 if (bootverbose)
1232 if_printf(ifp, "use jumbo frames\n");
1233 } else {
1234 sc->sc_flags &= ~NFE_USE_JUMBO;
1235 sc->rxq.bufsz = MCLBYTES;
1236 if (bootverbose)
1237 if_printf(ifp, "use non-jumbo frames\n");
1240 error = nfe_init_tx_ring(sc, &sc->txq);
1241 if (error) {
1242 nfe_stop(sc);
1243 return;
1246 error = nfe_init_rx_ring(sc, &sc->rxq);
1247 if (error) {
1248 nfe_stop(sc);
1249 return;
1252 NFE_WRITE(sc, NFE_TX_UNK, 0);
1253 NFE_WRITE(sc, NFE_STATUS, 0);
1255 sc->rxtxctl = NFE_RXTX_BIT2;
1256 if (sc->sc_flags & NFE_40BIT_ADDR)
1257 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
1258 else if (sc->sc_flags & NFE_JUMBO_SUP)
1259 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
1261 if (ifp->if_capenable & IFCAP_HWCSUM)
1262 sc->rxtxctl |= NFE_RXTX_RXCSUM;
1265 * Although the adapter is capable of stripping VLAN tags from received
1266 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1267 * purpose. This will be done in software by our network stack.
1269 if (sc->sc_flags & NFE_HW_VLAN)
1270 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT;
1272 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1273 DELAY(10);
1274 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1276 if (sc->sc_flags & NFE_HW_VLAN)
1277 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1279 NFE_WRITE(sc, NFE_SETUP_R6, 0);
1281 /* set MAC address */
1282 nfe_set_macaddr(sc, sc->arpcom.ac_enaddr);
1284 /* tell MAC where rings are in memory */
1285 #ifdef __LP64__
1286 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1287 #endif
1288 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1289 #ifdef __LP64__
1290 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1291 #endif
1292 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1294 NFE_WRITE(sc, NFE_RING_SIZE,
1295 (nfe_rx_ring_count - 1) << 16 |
1296 (NFE_TX_RING_COUNT - 1));
1298 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1300 /* force MAC to wakeup */
1301 tmp = NFE_READ(sc, NFE_PWR_STATE);
1302 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1303 DELAY(10);
1304 tmp = NFE_READ(sc, NFE_PWR_STATE);
1305 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1308 * NFE_IMTIMER generates a periodic interrupt via NFE_IRQ_TIMER.
1309 * It is unclear how wide the timer is. Base programming does
1310 * not seem to effect NFE_IRQ_TX_DONE or NFE_IRQ_RX_DONE so
1311 * we don't get any interrupt moderation. TX moderation is
1312 * possible by using the timer interrupt instead of TX_DONE.
1314 * It is unclear whether there are other bits that can be
1315 * set to make the NFE device actually do interrupt moderation
1316 * on the RX side.
1318 * For now set a 128uS interval as a placemark, but don't use
1319 * the timer.
1321 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
1323 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1324 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1325 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1327 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1328 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1330 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1331 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
1333 sc->rxtxctl &= ~NFE_RXTX_BIT2;
1334 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1335 DELAY(10);
1336 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1338 /* set Rx filter */
1339 nfe_setmulti(sc);
1341 nfe_ifmedia_upd(ifp);
1343 /* enable Rx */
1344 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1346 /* enable Tx */
1347 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1349 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1351 #ifdef DEVICE_POLLING
1352 if ((ifp->if_flags & IFF_POLLING) == 0)
1353 #endif
1354 /* enable interrupts */
1355 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1357 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
1359 ifp->if_flags |= IFF_RUNNING;
1360 ifp->if_flags &= ~IFF_OACTIVE;
1363 static void
1364 nfe_stop(struct nfe_softc *sc)
1366 struct ifnet *ifp = &sc->arpcom.ac_if;
1368 callout_stop(&sc->sc_tick_ch);
1370 ifp->if_timer = 0;
1371 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1373 /* Abort Tx */
1374 NFE_WRITE(sc, NFE_TX_CTL, 0);
1376 /* Disable Rx */
1377 NFE_WRITE(sc, NFE_RX_CTL, 0);
1379 /* Disable interrupts */
1380 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1382 /* Reset Tx and Rx rings */
1383 nfe_reset_tx_ring(sc, &sc->txq);
1384 nfe_reset_rx_ring(sc, &sc->rxq);
1387 static int
1388 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1390 int i, j, error, descsize;
1391 void **desc;
1393 if (sc->sc_flags & NFE_40BIT_ADDR) {
1394 desc = (void **)&ring->desc64;
1395 descsize = sizeof(struct nfe_desc64);
1396 } else {
1397 desc = (void **)&ring->desc32;
1398 descsize = sizeof(struct nfe_desc32);
1401 ring->jbuf = kmalloc(sizeof(struct nfe_jbuf) * NFE_JPOOL_COUNT,
1402 M_DEVBUF, M_WAITOK | M_ZERO);
1403 ring->data = kmalloc(sizeof(struct nfe_rx_data) * nfe_rx_ring_count,
1404 M_DEVBUF, M_WAITOK | M_ZERO);
1406 ring->bufsz = MCLBYTES;
1407 ring->cur = ring->next = 0;
1409 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1410 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1411 NULL, NULL,
1412 nfe_rx_ring_count * descsize, 1,
1413 nfe_rx_ring_count * descsize,
1414 0, &ring->tag);
1415 if (error) {
1416 if_printf(&sc->arpcom.ac_if,
1417 "could not create desc RX DMA tag\n");
1418 return error;
1421 error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO,
1422 &ring->map);
1423 if (error) {
1424 if_printf(&sc->arpcom.ac_if,
1425 "could not allocate RX desc DMA memory\n");
1426 bus_dma_tag_destroy(ring->tag);
1427 ring->tag = NULL;
1428 return error;
1431 error = bus_dmamap_load(ring->tag, ring->map, *desc,
1432 nfe_rx_ring_count * descsize,
1433 nfe_ring_dma_addr, &ring->physaddr,
1434 BUS_DMA_WAITOK);
1435 if (error) {
1436 if_printf(&sc->arpcom.ac_if,
1437 "could not load RX desc DMA map\n");
1438 bus_dmamem_free(ring->tag, *desc, ring->map);
1439 bus_dma_tag_destroy(ring->tag);
1440 ring->tag = NULL;
1441 return error;
1444 if (sc->sc_flags & NFE_JUMBO_SUP) {
1445 error = nfe_jpool_alloc(sc, ring);
1446 if (error) {
1447 if_printf(&sc->arpcom.ac_if,
1448 "could not allocate jumbo frames\n");
1449 return error;
1453 error = bus_dma_tag_create(NULL, 1, 0,
1454 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1455 NULL, NULL,
1456 MCLBYTES, 1, MCLBYTES,
1457 0, &ring->data_tag);
1458 if (error) {
1459 if_printf(&sc->arpcom.ac_if,
1460 "could not create RX mbuf DMA tag\n");
1461 return error;
1464 /* Create a spare RX mbuf DMA map */
1465 error = bus_dmamap_create(ring->data_tag, 0, &ring->data_tmpmap);
1466 if (error) {
1467 if_printf(&sc->arpcom.ac_if,
1468 "could not create spare RX mbuf DMA map\n");
1469 bus_dma_tag_destroy(ring->data_tag);
1470 ring->data_tag = NULL;
1471 return error;
1474 for (i = 0; i < nfe_rx_ring_count; i++) {
1475 error = bus_dmamap_create(ring->data_tag, 0,
1476 &ring->data[i].map);
1477 if (error) {
1478 if_printf(&sc->arpcom.ac_if,
1479 "could not create %dth RX mbuf DMA mapn", i);
1480 goto fail;
1483 return 0;
1484 fail:
1485 for (j = 0; j < i; ++j)
1486 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
1487 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1488 bus_dma_tag_destroy(ring->data_tag);
1489 ring->data_tag = NULL;
1490 return error;
1493 static void
1494 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1496 int i;
1498 for (i = 0; i < nfe_rx_ring_count; i++) {
1499 struct nfe_rx_data *data = &ring->data[i];
1501 if (data->m != NULL) {
1502 if ((sc->sc_flags & NFE_USE_JUMBO) == 0)
1503 bus_dmamap_unload(ring->data_tag, data->map);
1504 m_freem(data->m);
1505 data->m = NULL;
1508 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1510 ring->cur = ring->next = 0;
1513 static int
1514 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1516 int i;
1518 for (i = 0; i < nfe_rx_ring_count; ++i) {
1519 int error;
1521 /* XXX should use a function pointer */
1522 if (sc->sc_flags & NFE_USE_JUMBO)
1523 error = nfe_newbuf_jumbo(sc, ring, i, 1);
1524 else
1525 error = nfe_newbuf_std(sc, ring, i, 1);
1526 if (error) {
1527 if_printf(&sc->arpcom.ac_if,
1528 "could not allocate RX buffer\n");
1529 return error;
1532 nfe_set_ready_rxdesc(sc, ring, i);
1534 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1536 return 0;
1539 static void
1540 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1542 if (ring->data_tag != NULL) {
1543 struct nfe_rx_data *data;
1544 int i;
1546 for (i = 0; i < nfe_rx_ring_count; i++) {
1547 data = &ring->data[i];
1549 if (data->m != NULL) {
1550 bus_dmamap_unload(ring->data_tag, data->map);
1551 m_freem(data->m);
1553 bus_dmamap_destroy(ring->data_tag, data->map);
1555 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1556 bus_dma_tag_destroy(ring->data_tag);
1559 nfe_jpool_free(sc, ring);
1561 if (ring->jbuf != NULL)
1562 kfree(ring->jbuf, M_DEVBUF);
1563 if (ring->data != NULL)
1564 kfree(ring->data, M_DEVBUF);
1566 if (ring->tag != NULL) {
1567 void *desc;
1569 if (sc->sc_flags & NFE_40BIT_ADDR)
1570 desc = ring->desc64;
1571 else
1572 desc = ring->desc32;
1574 bus_dmamap_unload(ring->tag, ring->map);
1575 bus_dmamem_free(ring->tag, desc, ring->map);
1576 bus_dma_tag_destroy(ring->tag);
1580 static struct nfe_jbuf *
1581 nfe_jalloc(struct nfe_softc *sc)
1583 struct ifnet *ifp = &sc->arpcom.ac_if;
1584 struct nfe_jbuf *jbuf;
1586 lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1588 jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
1589 if (jbuf != NULL) {
1590 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
1591 jbuf->inuse = 1;
1592 } else {
1593 if_printf(ifp, "no free jumbo buffer\n");
1596 lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1598 return jbuf;
1601 static void
1602 nfe_jfree(void *arg)
1604 struct nfe_jbuf *jbuf = arg;
1605 struct nfe_softc *sc = jbuf->sc;
1606 struct nfe_rx_ring *ring = jbuf->ring;
1608 if (&ring->jbuf[jbuf->slot] != jbuf)
1609 panic("%s: free wrong jumbo buffer\n", __func__);
1610 else if (jbuf->inuse == 0)
1611 panic("%s: jumbo buffer already freed\n", __func__);
1613 lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1614 atomic_subtract_int(&jbuf->inuse, 1);
1615 if (jbuf->inuse == 0)
1616 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1617 lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1620 static void
1621 nfe_jref(void *arg)
1623 struct nfe_jbuf *jbuf = arg;
1624 struct nfe_rx_ring *ring = jbuf->ring;
1626 if (&ring->jbuf[jbuf->slot] != jbuf)
1627 panic("%s: ref wrong jumbo buffer\n", __func__);
1628 else if (jbuf->inuse == 0)
1629 panic("%s: jumbo buffer already freed\n", __func__);
1631 atomic_add_int(&jbuf->inuse, 1);
1634 static int
1635 nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1637 struct nfe_jbuf *jbuf;
1638 bus_addr_t physaddr;
1639 caddr_t buf;
1640 int i, error;
1643 * Allocate a big chunk of DMA'able memory.
1645 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1646 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1647 NULL, NULL,
1648 NFE_JPOOL_SIZE, 1, NFE_JPOOL_SIZE,
1649 0, &ring->jtag);
1650 if (error) {
1651 if_printf(&sc->arpcom.ac_if,
1652 "could not create jumbo DMA tag\n");
1653 return error;
1656 error = bus_dmamem_alloc(ring->jtag, (void **)&ring->jpool,
1657 BUS_DMA_WAITOK, &ring->jmap);
1658 if (error) {
1659 if_printf(&sc->arpcom.ac_if,
1660 "could not allocate jumbo DMA memory\n");
1661 bus_dma_tag_destroy(ring->jtag);
1662 ring->jtag = NULL;
1663 return error;
1666 error = bus_dmamap_load(ring->jtag, ring->jmap, ring->jpool,
1667 NFE_JPOOL_SIZE, nfe_ring_dma_addr, &physaddr,
1668 BUS_DMA_WAITOK);
1669 if (error) {
1670 if_printf(&sc->arpcom.ac_if,
1671 "could not load jumbo DMA map\n");
1672 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap);
1673 bus_dma_tag_destroy(ring->jtag);
1674 ring->jtag = NULL;
1675 return error;
1678 /* ..and split it into 9KB chunks */
1679 SLIST_INIT(&ring->jfreelist);
1681 buf = ring->jpool;
1682 for (i = 0; i < NFE_JPOOL_COUNT; i++) {
1683 jbuf = &ring->jbuf[i];
1685 jbuf->sc = sc;
1686 jbuf->ring = ring;
1687 jbuf->inuse = 0;
1688 jbuf->slot = i;
1689 jbuf->buf = buf;
1690 jbuf->physaddr = physaddr;
1692 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1694 buf += NFE_JBYTES;
1695 physaddr += NFE_JBYTES;
1698 return 0;
1701 static void
1702 nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1704 if (ring->jtag != NULL) {
1705 bus_dmamap_unload(ring->jtag, ring->jmap);
1706 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap);
1707 bus_dma_tag_destroy(ring->jtag);
1711 static int
1712 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1714 int i, j, error, descsize;
1715 void **desc;
1717 if (sc->sc_flags & NFE_40BIT_ADDR) {
1718 desc = (void **)&ring->desc64;
1719 descsize = sizeof(struct nfe_desc64);
1720 } else {
1721 desc = (void **)&ring->desc32;
1722 descsize = sizeof(struct nfe_desc32);
1725 ring->queued = 0;
1726 ring->cur = ring->next = 0;
1728 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1729 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1730 NULL, NULL,
1731 NFE_TX_RING_COUNT * descsize, 1,
1732 NFE_TX_RING_COUNT * descsize,
1733 0, &ring->tag);
1734 if (error) {
1735 if_printf(&sc->arpcom.ac_if,
1736 "could not create TX desc DMA map\n");
1737 return error;
1740 error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO,
1741 &ring->map);
1742 if (error) {
1743 if_printf(&sc->arpcom.ac_if,
1744 "could not allocate TX desc DMA memory\n");
1745 bus_dma_tag_destroy(ring->tag);
1746 ring->tag = NULL;
1747 return error;
1750 error = bus_dmamap_load(ring->tag, ring->map, *desc,
1751 NFE_TX_RING_COUNT * descsize,
1752 nfe_ring_dma_addr, &ring->physaddr,
1753 BUS_DMA_WAITOK);
1754 if (error) {
1755 if_printf(&sc->arpcom.ac_if,
1756 "could not load TX desc DMA map\n");
1757 bus_dmamem_free(ring->tag, *desc, ring->map);
1758 bus_dma_tag_destroy(ring->tag);
1759 ring->tag = NULL;
1760 return error;
1763 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1764 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1765 NULL, NULL,
1766 NFE_JBYTES * NFE_MAX_SCATTER,
1767 NFE_MAX_SCATTER, NFE_JBYTES,
1768 0, &ring->data_tag);
1769 if (error) {
1770 if_printf(&sc->arpcom.ac_if,
1771 "could not create TX buf DMA tag\n");
1772 return error;
1775 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1776 error = bus_dmamap_create(ring->data_tag, 0,
1777 &ring->data[i].map);
1778 if (error) {
1779 if_printf(&sc->arpcom.ac_if,
1780 "could not create %dth TX buf DMA map\n", i);
1781 goto fail;
1785 return 0;
1786 fail:
1787 for (j = 0; j < i; ++j)
1788 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
1789 bus_dma_tag_destroy(ring->data_tag);
1790 ring->data_tag = NULL;
1791 return error;
1794 static void
1795 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1797 int i;
1799 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1800 struct nfe_tx_data *data = &ring->data[i];
1802 if (sc->sc_flags & NFE_40BIT_ADDR)
1803 ring->desc64[i].flags = 0;
1804 else
1805 ring->desc32[i].flags = 0;
1807 if (data->m != NULL) {
1808 bus_dmamap_sync(ring->data_tag, data->map,
1809 BUS_DMASYNC_POSTWRITE);
1810 bus_dmamap_unload(ring->data_tag, data->map);
1811 m_freem(data->m);
1812 data->m = NULL;
1815 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1817 ring->queued = 0;
1818 ring->cur = ring->next = 0;
1821 static int
1822 nfe_init_tx_ring(struct nfe_softc *sc __unused,
1823 struct nfe_tx_ring *ring __unused)
1825 return 0;
1828 static void
1829 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1831 if (ring->data_tag != NULL) {
1832 struct nfe_tx_data *data;
1833 int i;
1835 for (i = 0; i < NFE_TX_RING_COUNT; ++i) {
1836 data = &ring->data[i];
1838 if (data->m != NULL) {
1839 bus_dmamap_unload(ring->data_tag, data->map);
1840 m_freem(data->m);
1842 bus_dmamap_destroy(ring->data_tag, data->map);
1845 bus_dma_tag_destroy(ring->data_tag);
1848 if (ring->tag != NULL) {
1849 void *desc;
1851 if (sc->sc_flags & NFE_40BIT_ADDR)
1852 desc = ring->desc64;
1853 else
1854 desc = ring->desc32;
1856 bus_dmamap_unload(ring->tag, ring->map);
1857 bus_dmamem_free(ring->tag, desc, ring->map);
1858 bus_dma_tag_destroy(ring->tag);
1862 static int
1863 nfe_ifmedia_upd(struct ifnet *ifp)
1865 struct nfe_softc *sc = ifp->if_softc;
1866 struct mii_data *mii = device_get_softc(sc->sc_miibus);
1868 if (mii->mii_instance != 0) {
1869 struct mii_softc *miisc;
1871 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1872 mii_phy_reset(miisc);
1874 mii_mediachg(mii);
1876 return 0;
1879 static void
1880 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1882 struct nfe_softc *sc = ifp->if_softc;
1883 struct mii_data *mii = device_get_softc(sc->sc_miibus);
1885 mii_pollstat(mii);
1886 ifmr->ifm_status = mii->mii_media_status;
1887 ifmr->ifm_active = mii->mii_media_active;
1890 static void
1891 nfe_setmulti(struct nfe_softc *sc)
1893 struct ifnet *ifp = &sc->arpcom.ac_if;
1894 struct ifmultiaddr *ifma;
1895 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1896 uint32_t filter = NFE_RXFILTER_MAGIC;
1897 int i;
1899 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1900 bzero(addr, ETHER_ADDR_LEN);
1901 bzero(mask, ETHER_ADDR_LEN);
1902 goto done;
1905 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1906 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1908 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1909 caddr_t maddr;
1911 if (ifma->ifma_addr->sa_family != AF_LINK)
1912 continue;
1914 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1915 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1916 addr[i] &= maddr[i];
1917 mask[i] &= ~maddr[i];
1921 for (i = 0; i < ETHER_ADDR_LEN; i++)
1922 mask[i] |= addr[i];
1924 done:
1925 addr[0] |= 0x01; /* make sure multicast bit is set */
1927 NFE_WRITE(sc, NFE_MULTIADDR_HI,
1928 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1929 NFE_WRITE(sc, NFE_MULTIADDR_LO,
1930 addr[5] << 8 | addr[4]);
1931 NFE_WRITE(sc, NFE_MULTIMASK_HI,
1932 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1933 NFE_WRITE(sc, NFE_MULTIMASK_LO,
1934 mask[5] << 8 | mask[4]);
1936 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1937 NFE_WRITE(sc, NFE_RXFILTER, filter);
1940 static void
1941 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1943 uint32_t tmp;
1945 tmp = NFE_READ(sc, NFE_MACADDR_LO);
1946 addr[0] = (tmp >> 8) & 0xff;
1947 addr[1] = (tmp & 0xff);
1949 tmp = NFE_READ(sc, NFE_MACADDR_HI);
1950 addr[2] = (tmp >> 24) & 0xff;
1951 addr[3] = (tmp >> 16) & 0xff;
1952 addr[4] = (tmp >> 8) & 0xff;
1953 addr[5] = (tmp & 0xff);
1956 static void
1957 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1959 NFE_WRITE(sc, NFE_MACADDR_LO,
1960 addr[5] << 8 | addr[4]);
1961 NFE_WRITE(sc, NFE_MACADDR_HI,
1962 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1965 static void
1966 nfe_tick(void *arg)
1968 struct nfe_softc *sc = arg;
1969 struct ifnet *ifp = &sc->arpcom.ac_if;
1970 struct mii_data *mii = device_get_softc(sc->sc_miibus);
1972 lwkt_serialize_enter(ifp->if_serializer);
1974 mii_tick(mii);
1975 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
1977 lwkt_serialize_exit(ifp->if_serializer);
1980 static void
1981 nfe_ring_dma_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
1983 if (error)
1984 return;
1986 KASSERT(nseg == 1, ("too many segments, should be 1\n"));
1988 *((uint32_t *)arg) = seg->ds_addr;
1991 static void
1992 nfe_buf_dma_addr(void *arg, bus_dma_segment_t *segs, int nsegs,
1993 bus_size_t mapsz __unused, int error)
1995 struct nfe_dma_ctx *ctx = arg;
1996 int i;
1998 if (error)
1999 return;
2001 KASSERT(nsegs <= ctx->nsegs,
2002 ("too many segments(%d), should be <= %d\n",
2003 nsegs, ctx->nsegs));
2005 ctx->nsegs = nsegs;
2006 for (i = 0; i < nsegs; ++i)
2007 ctx->segs[i] = segs[i];
2010 static int
2011 nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2012 int wait)
2014 struct nfe_rx_data *data = &ring->data[idx];
2015 struct nfe_dma_ctx ctx;
2016 bus_dma_segment_t seg;
2017 bus_dmamap_t map;
2018 struct mbuf *m;
2019 int error;
2021 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2022 if (m == NULL)
2023 return ENOBUFS;
2024 m->m_len = m->m_pkthdr.len = MCLBYTES;
2026 ctx.nsegs = 1;
2027 ctx.segs = &seg;
2028 error = bus_dmamap_load_mbuf(ring->data_tag, ring->data_tmpmap,
2029 m, nfe_buf_dma_addr, &ctx,
2030 wait ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
2031 if (error) {
2032 m_freem(m);
2033 if_printf(&sc->arpcom.ac_if, "could map RX mbuf %d\n", error);
2034 return error;
2037 /* Unload originally mapped mbuf */
2038 bus_dmamap_unload(ring->data_tag, data->map);
2040 /* Swap this DMA map with tmp DMA map */
2041 map = data->map;
2042 data->map = ring->data_tmpmap;
2043 ring->data_tmpmap = map;
2045 /* Caller is assumed to have collected the old mbuf */
2046 data->m = m;
2048 nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr);
2050 bus_dmamap_sync(ring->data_tag, data->map, BUS_DMASYNC_PREREAD);
2051 return 0;
2054 static int
2055 nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2056 int wait)
2058 struct nfe_rx_data *data = &ring->data[idx];
2059 struct nfe_jbuf *jbuf;
2060 struct mbuf *m;
2062 MGETHDR(m, wait ? MB_WAIT : MB_DONTWAIT, MT_DATA);
2063 if (m == NULL)
2064 return ENOBUFS;
2066 jbuf = nfe_jalloc(sc);
2067 if (jbuf == NULL) {
2068 m_freem(m);
2069 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
2070 "-- packet dropped!\n");
2071 return ENOBUFS;
2074 m->m_ext.ext_arg = jbuf;
2075 m->m_ext.ext_buf = jbuf->buf;
2076 m->m_ext.ext_free = nfe_jfree;
2077 m->m_ext.ext_ref = nfe_jref;
2078 m->m_ext.ext_size = NFE_JBYTES;
2080 m->m_data = m->m_ext.ext_buf;
2081 m->m_flags |= M_EXT;
2082 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2084 /* Caller is assumed to have collected the old mbuf */
2085 data->m = m;
2087 nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr);
2089 bus_dmamap_sync(ring->jtag, ring->jmap, BUS_DMASYNC_PREREAD);
2090 return 0;
2093 static void
2094 nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2095 bus_addr_t physaddr)
2097 if (sc->sc_flags & NFE_40BIT_ADDR) {
2098 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2100 #if defined(__LP64__)
2101 desc64->physaddr[0] = htole32(physaddr >> 32);
2102 #endif
2103 desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
2104 } else {
2105 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2107 desc32->physaddr = htole32(physaddr);
2111 static void
2112 nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx)
2114 if (sc->sc_flags & NFE_40BIT_ADDR) {
2115 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2117 desc64->length = htole16(ring->bufsz);
2118 desc64->flags = htole16(NFE_RX_READY);
2119 } else {
2120 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2122 desc32->length = htole16(ring->bufsz);
2123 desc32->flags = htole16(NFE_RX_READY);