HAMMER Utilities: MFC work to date.
[dragonfly.git] / sys / dev / netif / stge / if_stge.c
blobbc9509ebfface655cef44498cc6f1c58792f48e2
1 /* $NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $ */
2 /* $FreeBSD: src/sys/dev/stge/if_stge.c,v 1.2 2006/08/12 01:21:36 yongari Exp $ */
3 /* $DragonFly: src/sys/dev/netif/stge/if_stge.c,v 1.6 2008/05/16 13:19:12 sephe Exp $ */
5 /*-
6 * Copyright (c) 2001 The NetBSD Foundation, Inc.
7 * All rights reserved.
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Jason R. Thorpe.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the NetBSD
23 * Foundation, Inc. and its contributors.
24 * 4. Neither the name of The NetBSD Foundation nor the names of its
25 * contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
42 * Device driver for the Sundance Tech. TC9021 10/100/1000
43 * Ethernet controller.
46 #include "opt_polling.h"
48 #include <sys/param.h>
49 #include <sys/bus.h>
50 #include <sys/endian.h>
51 #include <sys/kernel.h>
52 #include <sys/interrupt.h>
53 #include <sys/malloc.h>
54 #include <sys/mbuf.h>
55 #include <sys/module.h>
56 #include <sys/rman.h>
57 #include <sys/serialize.h>
58 #include <sys/socket.h>
59 #include <sys/sockio.h>
60 #include <sys/sysctl.h>
62 #include <net/bpf.h>
63 #include <net/ethernet.h>
64 #include <net/if.h>
65 #include <net/if_arp.h>
66 #include <net/if_dl.h>
67 #include <net/if_media.h>
68 #include <net/if_types.h>
69 #include <net/ifq_var.h>
70 #include <net/vlan/if_vlan_var.h>
71 #include <net/vlan/if_vlan_ether.h>
73 #include <dev/netif/mii_layer/mii.h>
74 #include <dev/netif/mii_layer/miivar.h>
76 #include <bus/pci/pcireg.h>
77 #include <bus/pci/pcivar.h>
79 #include "if_stgereg.h"
80 #include "if_stgevar.h"
82 #define STGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
84 /* "device miibus" required. See GENERIC if you get errors here. */
85 #include "miibus_if.h"
88 * Devices supported by this driver.
90 static struct stge_product {
91 uint16_t stge_vendorid;
92 uint16_t stge_deviceid;
93 const char *stge_name;
94 } stge_products[] = {
95 { VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST1023,
96 "Sundance ST-1023 Gigabit Ethernet" },
98 { VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST2021,
99 "Sundance ST-2021 Gigabit Ethernet" },
101 { VENDOR_TAMARACK, DEVICEID_TAMARACK_TC9021,
102 "Tamarack TC9021 Gigabit Ethernet" },
104 { VENDOR_TAMARACK, DEVICEID_TAMARACK_TC9021_ALT,
105 "Tamarack TC9021 Gigabit Ethernet" },
108 * The Sundance sample boards use the Sundance vendor ID,
109 * but the Tamarack product ID.
111 { VENDOR_SUNDANCETI, DEVICEID_TAMARACK_TC9021,
112 "Sundance TC9021 Gigabit Ethernet" },
114 { VENDOR_SUNDANCETI, DEVICEID_TAMARACK_TC9021_ALT,
115 "Sundance TC9021 Gigabit Ethernet" },
117 { VENDOR_DLINK, DEVICEID_DLINK_DL2000,
118 "D-Link DL-2000 Gigabit Ethernet" },
120 { VENDOR_ANTARES, DEVICEID_ANTARES_TC9021,
121 "Antares Gigabit Ethernet" },
123 { 0, 0, NULL }
126 static int stge_probe(device_t);
127 static int stge_attach(device_t);
128 static int stge_detach(device_t);
129 static void stge_shutdown(device_t);
130 static int stge_suspend(device_t);
131 static int stge_resume(device_t);
133 static int stge_encap(struct stge_softc *, struct mbuf **);
134 static void stge_start(struct ifnet *);
135 static void stge_watchdog(struct ifnet *);
136 static int stge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
137 static void stge_init(void *);
138 static void stge_vlan_setup(struct stge_softc *);
139 static void stge_stop(struct stge_softc *);
140 static void stge_start_tx(struct stge_softc *);
141 static void stge_start_rx(struct stge_softc *);
142 static void stge_stop_tx(struct stge_softc *);
143 static void stge_stop_rx(struct stge_softc *);
145 static void stge_reset(struct stge_softc *, uint32_t);
146 static int stge_eeprom_wait(struct stge_softc *);
147 static void stge_read_eeprom(struct stge_softc *, int, uint16_t *);
148 static void stge_tick(void *);
149 static void stge_stats_update(struct stge_softc *);
150 static void stge_set_filter(struct stge_softc *);
151 static void stge_set_multi(struct stge_softc *);
153 static void stge_link(struct stge_softc *);
154 static void stge_intr(void *);
155 static __inline int stge_tx_error(struct stge_softc *);
156 static void stge_txeof(struct stge_softc *);
157 static void stge_rxeof(struct stge_softc *, int);
158 static __inline void stge_discard_rxbuf(struct stge_softc *, int);
159 static int stge_newbuf(struct stge_softc *, int, int);
160 #ifndef __i386__
161 static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *);
162 #endif
164 static void stge_mii_sync(struct stge_softc *);
165 static void stge_mii_send(struct stge_softc *, uint32_t, int);
166 static int stge_mii_readreg(struct stge_softc *, struct stge_mii_frame *);
167 static int stge_mii_writereg(struct stge_softc *, struct stge_mii_frame *);
168 static int stge_miibus_readreg(device_t, int, int);
169 static int stge_miibus_writereg(device_t, int, int, int);
170 static void stge_miibus_statchg(device_t);
171 static int stge_mediachange(struct ifnet *);
172 static void stge_mediastatus(struct ifnet *, struct ifmediareq *);
174 static void stge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
175 static void stge_mbuf_dmamap_cb(void *, bus_dma_segment_t *, int,
176 bus_size_t, int);
177 static int stge_dma_alloc(struct stge_softc *);
178 static void stge_dma_free(struct stge_softc *);
179 static void stge_dma_wait(struct stge_softc *);
180 static void stge_init_tx_ring(struct stge_softc *);
181 static int stge_init_rx_ring(struct stge_softc *);
182 #ifdef DEVICE_POLLING
183 static void stge_poll(struct ifnet *, enum poll_cmd, int);
184 #endif
186 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
187 static int sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS);
188 static int sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS);
190 static device_method_t stge_methods[] = {
191 /* Device interface */
192 DEVMETHOD(device_probe, stge_probe),
193 DEVMETHOD(device_attach, stge_attach),
194 DEVMETHOD(device_detach, stge_detach),
195 DEVMETHOD(device_shutdown, stge_shutdown),
196 DEVMETHOD(device_suspend, stge_suspend),
197 DEVMETHOD(device_resume, stge_resume),
199 /* MII interface */
200 DEVMETHOD(miibus_readreg, stge_miibus_readreg),
201 DEVMETHOD(miibus_writereg, stge_miibus_writereg),
202 DEVMETHOD(miibus_statchg, stge_miibus_statchg),
204 { 0, 0 }
208 static driver_t stge_driver = {
209 "stge",
210 stge_methods,
211 sizeof(struct stge_softc)
214 static devclass_t stge_devclass;
216 DECLARE_DUMMY_MODULE(if_stge);
217 MODULE_DEPEND(if_stge, miibus, 1, 1, 1);
218 DRIVER_MODULE(if_stge, pci, stge_driver, stge_devclass, 0, 0);
219 DRIVER_MODULE(miibus, stge, miibus_driver, miibus_devclass, 0, 0);
221 #define MII_SET(x) \
222 CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) | (x))
223 #define MII_CLR(x) \
224 CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) & ~(x))
227 * Sync the PHYs by setting data bit and strobing the clock 32 times.
229 static void
230 stge_mii_sync(struct stge_softc *sc)
232 int i;
234 MII_SET(PC_MgmtDir | PC_MgmtData);
236 for (i = 0; i < 32; i++) {
237 MII_SET(PC_MgmtClk);
238 DELAY(1);
239 MII_CLR(PC_MgmtClk);
240 DELAY(1);
245 * Clock a series of bits through the MII.
247 static void
248 stge_mii_send(struct stge_softc *sc, uint32_t bits, int cnt)
250 int i;
252 MII_CLR(PC_MgmtClk);
254 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
255 if (bits & i)
256 MII_SET(PC_MgmtData);
257 else
258 MII_CLR(PC_MgmtData);
259 DELAY(1);
260 MII_CLR(PC_MgmtClk);
261 DELAY(1);
262 MII_SET(PC_MgmtClk);
267 * Read an PHY register through the MII.
269 static int
270 stge_mii_readreg(struct stge_softc *sc, struct stge_mii_frame *frame)
272 int i, ack;
275 * Set up frame for RX.
277 frame->mii_stdelim = STGE_MII_STARTDELIM;
278 frame->mii_opcode = STGE_MII_READOP;
279 frame->mii_turnaround = 0;
280 frame->mii_data = 0;
282 CSR_WRITE_1(sc, STGE_PhyCtrl, 0 | sc->sc_PhyCtrl);
284 * Turn on data xmit.
286 MII_SET(PC_MgmtDir);
288 stge_mii_sync(sc);
291 * Send command/address info.
293 stge_mii_send(sc, frame->mii_stdelim, 2);
294 stge_mii_send(sc, frame->mii_opcode, 2);
295 stge_mii_send(sc, frame->mii_phyaddr, 5);
296 stge_mii_send(sc, frame->mii_regaddr, 5);
298 /* Turn off xmit. */
299 MII_CLR(PC_MgmtDir);
301 /* Idle bit */
302 MII_CLR((PC_MgmtClk | PC_MgmtData));
303 DELAY(1);
304 MII_SET(PC_MgmtClk);
305 DELAY(1);
307 /* Check for ack */
308 MII_CLR(PC_MgmtClk);
309 DELAY(1);
310 ack = CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData;
311 MII_SET(PC_MgmtClk);
312 DELAY(1);
315 * Now try reading data bits. If the ack failed, we still
316 * need to clock through 16 cycles to keep the PHY(s) in sync.
318 if (ack) {
319 for(i = 0; i < 16; i++) {
320 MII_CLR(PC_MgmtClk);
321 DELAY(1);
322 MII_SET(PC_MgmtClk);
323 DELAY(1);
325 goto fail;
328 for (i = 0x8000; i; i >>= 1) {
329 MII_CLR(PC_MgmtClk);
330 DELAY(1);
331 if (!ack) {
332 if (CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData)
333 frame->mii_data |= i;
334 DELAY(1);
336 MII_SET(PC_MgmtClk);
337 DELAY(1);
340 fail:
341 MII_CLR(PC_MgmtClk);
342 DELAY(1);
343 MII_SET(PC_MgmtClk);
344 DELAY(1);
346 if (ack)
347 return(1);
348 return(0);
352 * Write to a PHY register through the MII.
354 static int
355 stge_mii_writereg(struct stge_softc *sc, struct stge_mii_frame *frame)
359 * Set up frame for TX.
361 frame->mii_stdelim = STGE_MII_STARTDELIM;
362 frame->mii_opcode = STGE_MII_WRITEOP;
363 frame->mii_turnaround = STGE_MII_TURNAROUND;
366 * Turn on data output.
368 MII_SET(PC_MgmtDir);
370 stge_mii_sync(sc);
372 stge_mii_send(sc, frame->mii_stdelim, 2);
373 stge_mii_send(sc, frame->mii_opcode, 2);
374 stge_mii_send(sc, frame->mii_phyaddr, 5);
375 stge_mii_send(sc, frame->mii_regaddr, 5);
376 stge_mii_send(sc, frame->mii_turnaround, 2);
377 stge_mii_send(sc, frame->mii_data, 16);
379 /* Idle bit. */
380 MII_SET(PC_MgmtClk);
381 DELAY(1);
382 MII_CLR(PC_MgmtClk);
383 DELAY(1);
386 * Turn off xmit.
388 MII_CLR(PC_MgmtDir);
390 return(0);
394 * sc_miibus_readreg: [mii interface function]
396 * Read a PHY register on the MII of the TC9021.
398 static int
399 stge_miibus_readreg(device_t dev, int phy, int reg)
401 struct stge_softc *sc;
402 struct stge_mii_frame frame;
403 int error;
405 sc = device_get_softc(dev);
407 if (reg == STGE_PhyCtrl) {
408 /* XXX allow ip1000phy read STGE_PhyCtrl register. */
409 error = CSR_READ_1(sc, STGE_PhyCtrl);
410 return (error);
412 bzero(&frame, sizeof(frame));
413 frame.mii_phyaddr = phy;
414 frame.mii_regaddr = reg;
416 error = stge_mii_readreg(sc, &frame);
418 if (error != 0) {
419 /* Don't show errors for PHY probe request */
420 if (reg != 1)
421 device_printf(sc->sc_dev, "phy read fail\n");
422 return (0);
424 return (frame.mii_data);
428 * stge_miibus_writereg: [mii interface function]
430 * Write a PHY register on the MII of the TC9021.
432 static int
433 stge_miibus_writereg(device_t dev, int phy, int reg, int val)
435 struct stge_softc *sc;
436 struct stge_mii_frame frame;
437 int error;
439 sc = device_get_softc(dev);
441 bzero(&frame, sizeof(frame));
442 frame.mii_phyaddr = phy;
443 frame.mii_regaddr = reg;
444 frame.mii_data = val;
446 error = stge_mii_writereg(sc, &frame);
448 if (error != 0)
449 device_printf(sc->sc_dev, "phy write fail\n");
450 return (0);
454 * stge_miibus_statchg: [mii interface function]
456 * Callback from MII layer when media changes.
458 static void
459 stge_miibus_statchg(device_t dev)
461 struct stge_softc *sc;
462 struct mii_data *mii;
464 sc = device_get_softc(dev);
465 mii = device_get_softc(sc->sc_miibus);
467 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)
468 return;
470 sc->sc_MACCtrl = 0;
471 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
472 sc->sc_MACCtrl |= MC_DuplexSelect;
473 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) != 0)
474 sc->sc_MACCtrl |= MC_RxFlowControlEnable;
475 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) != 0)
476 sc->sc_MACCtrl |= MC_TxFlowControlEnable;
478 stge_link(sc);
482 * stge_mediastatus: [ifmedia interface function]
484 * Get the current interface media status.
486 static void
487 stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
489 struct stge_softc *sc;
490 struct mii_data *mii;
492 sc = ifp->if_softc;
493 mii = device_get_softc(sc->sc_miibus);
495 mii_pollstat(mii);
496 ifmr->ifm_status = mii->mii_media_status;
497 ifmr->ifm_active = mii->mii_media_active;
501 * stge_mediachange: [ifmedia interface function]
503 * Set hardware to newly-selected media.
505 static int
506 stge_mediachange(struct ifnet *ifp)
508 struct stge_softc *sc;
509 struct mii_data *mii;
511 sc = ifp->if_softc;
512 mii = device_get_softc(sc->sc_miibus);
513 mii_mediachg(mii);
515 return (0);
518 static int
519 stge_eeprom_wait(struct stge_softc *sc)
521 int i;
523 for (i = 0; i < STGE_TIMEOUT; i++) {
524 DELAY(1000);
525 if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0)
526 return (0);
528 return (1);
532 * stge_read_eeprom:
534 * Read data from the serial EEPROM.
536 static void
537 stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data)
540 if (stge_eeprom_wait(sc))
541 device_printf(sc->sc_dev, "EEPROM failed to come ready\n");
543 CSR_WRITE_2(sc, STGE_EepromCtrl,
544 EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR));
545 if (stge_eeprom_wait(sc))
546 device_printf(sc->sc_dev, "EEPROM read timed out\n");
547 *data = CSR_READ_2(sc, STGE_EepromData);
551 static int
552 stge_probe(device_t dev)
554 struct stge_product *sp;
555 uint16_t vendor, devid;
557 vendor = pci_get_vendor(dev);
558 devid = pci_get_device(dev);
560 for (sp = stge_products; sp->stge_name != NULL; sp++) {
561 if (vendor == sp->stge_vendorid &&
562 devid == sp->stge_deviceid) {
563 device_set_desc(dev, sp->stge_name);
564 return (0);
568 return (ENXIO);
571 static int
572 stge_attach(device_t dev)
574 struct stge_softc *sc;
575 struct ifnet *ifp;
576 uint8_t enaddr[ETHER_ADDR_LEN];
577 int error, i;
578 uint16_t cmd;
579 uint32_t val;
581 error = 0;
582 sc = device_get_softc(dev);
583 sc->sc_dev = dev;
584 ifp = &sc->arpcom.ac_if;
586 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
588 callout_init(&sc->sc_tick_ch);
590 #ifndef BURN_BRIDGES
592 * Handle power management nonsense.
594 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
595 uint32_t iobase, membase, irq;
597 /* Save important PCI config data. */
598 iobase = pci_read_config(dev, STGE_PCIR_LOIO, 4);
599 membase = pci_read_config(dev, STGE_PCIR_LOMEM, 4);
600 irq = pci_read_config(dev, PCIR_INTLINE, 4);
602 /* Reset the power state. */
603 device_printf(dev, "chip is in D%d power mode "
604 "-- setting to D0\n", pci_get_powerstate(dev));
606 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
608 /* Restore PCI config data. */
609 pci_write_config(dev, STGE_PCIR_LOIO, iobase, 4);
610 pci_write_config(dev, STGE_PCIR_LOMEM, membase, 4);
611 pci_write_config(dev, PCIR_INTLINE, irq, 4);
613 #endif
616 * Map the device.
618 pci_enable_busmaster(dev);
619 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
620 val = pci_read_config(dev, STGE_PCIR_LOMEM, 4);
622 if ((val & 0x01) != 0) {
623 sc->sc_res_rid = STGE_PCIR_LOMEM;
624 sc->sc_res_type = SYS_RES_MEMORY;
625 } else {
626 sc->sc_res_rid = STGE_PCIR_LOIO;
627 sc->sc_res_type = SYS_RES_IOPORT;
629 val = pci_read_config(dev, sc->sc_res_rid, 4);
630 if ((val & 0x01) == 0) {
631 device_printf(dev, "couldn't locate IO BAR\n");
632 return ENXIO;
636 sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
637 &sc->sc_res_rid, RF_ACTIVE);
638 if (sc->sc_res == NULL) {
639 device_printf(dev, "couldn't allocate resource\n");
640 return ENXIO;
642 sc->sc_btag = rman_get_bustag(sc->sc_res);
643 sc->sc_bhandle = rman_get_bushandle(sc->sc_res);
645 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
646 &sc->sc_irq_rid,
647 RF_ACTIVE | RF_SHAREABLE);
648 if (sc->sc_irq == NULL) {
649 device_printf(dev, "couldn't allocate IRQ\n");
650 error = ENXIO;
651 goto fail;
654 sc->sc_rev = pci_get_revid(dev);
656 sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
657 sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
659 sysctl_ctx_init(&sc->sc_sysctl_ctx);
660 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx,
661 SYSCTL_STATIC_CHILDREN(_hw),
662 OID_AUTO,
663 device_get_nameunit(dev),
664 CTLFLAG_RD, 0, "");
665 if (sc->sc_sysctl_tree == NULL) {
666 device_printf(dev, "can't add sysctl node\n");
667 error = ENXIO;
668 goto fail;
671 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
672 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
673 "rxint_nframe", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_nframe, 0,
674 sysctl_hw_stge_rxint_nframe, "I", "stge rx interrupt nframe");
676 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
677 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
678 "rxint_dmawait", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_dmawait, 0,
679 sysctl_hw_stge_rxint_dmawait, "I", "stge rx interrupt dmawait");
681 if ((error = stge_dma_alloc(sc) != 0))
682 goto fail;
685 * Determine if we're copper or fiber. It affects how we
686 * reset the card.
688 if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia)
689 sc->sc_usefiber = 1;
690 else
691 sc->sc_usefiber = 0;
693 /* Load LED configuration from EEPROM. */
694 stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led);
697 * Reset the chip to a known state.
699 stge_reset(sc, STGE_RESET_FULL);
702 * Reading the station address from the EEPROM doesn't seem
703 * to work, at least on my sample boards. Instead, since
704 * the reset sequence does AutoInit, read it from the station
705 * address registers. For Sundance 1023 you can only read it
706 * from EEPROM.
708 if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) {
709 uint16_t v;
711 v = CSR_READ_2(sc, STGE_StationAddress0);
712 enaddr[0] = v & 0xff;
713 enaddr[1] = v >> 8;
714 v = CSR_READ_2(sc, STGE_StationAddress1);
715 enaddr[2] = v & 0xff;
716 enaddr[3] = v >> 8;
717 v = CSR_READ_2(sc, STGE_StationAddress2);
718 enaddr[4] = v & 0xff;
719 enaddr[5] = v >> 8;
720 sc->sc_stge1023 = 0;
721 } else {
722 uint16_t myaddr[ETHER_ADDR_LEN / 2];
723 for (i = 0; i <ETHER_ADDR_LEN / 2; i++) {
724 stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i,
725 &myaddr[i]);
726 myaddr[i] = le16toh(myaddr[i]);
728 bcopy(myaddr, enaddr, sizeof(enaddr));
729 sc->sc_stge1023 = 1;
732 ifp->if_softc = sc;
733 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
734 ifp->if_ioctl = stge_ioctl;
735 ifp->if_start = stge_start;
736 ifp->if_watchdog = stge_watchdog;
737 ifp->if_init = stge_init;
738 #ifdef DEVICE_POLLING
739 ifp->if_poll = stge_poll;
740 #endif
741 ifp->if_mtu = ETHERMTU;
742 ifq_set_maxlen(&ifp->if_snd, STGE_TX_RING_CNT - 1);
743 ifq_set_ready(&ifp->if_snd);
744 /* Revision B3 and earlier chips have checksum bug. */
745 if (sc->sc_rev >= 0x0c) {
746 ifp->if_hwassist = STGE_CSUM_FEATURES;
747 ifp->if_capabilities = IFCAP_HWCSUM;
748 } else {
749 ifp->if_hwassist = 0;
750 ifp->if_capabilities = 0;
752 ifp->if_capenable = ifp->if_capabilities;
755 * Read some important bits from the PhyCtrl register.
757 sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) &
758 (PC_PhyDuplexPolarity | PC_PhyLnkPolarity);
760 /* Set up MII bus. */
761 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, stge_mediachange,
762 stge_mediastatus)) != 0) {
763 device_printf(sc->sc_dev, "no PHY found!\n");
764 goto fail;
767 ether_ifattach(ifp, enaddr, NULL);
769 /* VLAN capability setup */
770 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
771 #ifdef notyet
772 if (sc->sc_rev >= 0x0c)
773 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
774 #endif
775 ifp->if_capenable = ifp->if_capabilities;
778 * Tell the upper layer(s) we support long frames.
779 * Must appear after the call to ether_ifattach() because
780 * ether_ifattach() sets ifi_hdrlen to the default value.
782 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
785 * The manual recommends disabling early transmit, so we
786 * do. It's disabled anyway, if using IP checksumming,
787 * since the entire packet must be in the FIFO in order
788 * for the chip to perform the checksum.
790 sc->sc_txthresh = 0x0fff;
793 * Disable MWI if the PCI layer tells us to.
795 sc->sc_DMACtrl = 0;
796 if ((cmd & PCIM_CMD_MWRICEN) == 0)
797 sc->sc_DMACtrl |= DMAC_MWIDisable;
800 * Hookup IRQ
802 error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE, stge_intr, sc,
803 &sc->sc_ih, ifp->if_serializer);
804 if (error != 0) {
805 ether_ifdetach(ifp);
806 device_printf(sc->sc_dev, "couldn't set up IRQ\n");
807 goto fail;
810 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->sc_irq));
811 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
813 fail:
814 if (error != 0)
815 stge_detach(dev);
817 return (error);
820 static int
821 stge_detach(device_t dev)
823 struct stge_softc *sc = device_get_softc(dev);
824 struct ifnet *ifp = &sc->arpcom.ac_if;
826 if (device_is_attached(dev)) {
827 lwkt_serialize_enter(ifp->if_serializer);
828 /* XXX */
829 sc->sc_detach = 1;
830 stge_stop(sc);
831 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
832 lwkt_serialize_exit(ifp->if_serializer);
834 ether_ifdetach(ifp);
837 if (sc->sc_sysctl_tree != NULL)
838 sysctl_ctx_free(&sc->sc_sysctl_ctx);
840 if (sc->sc_miibus != NULL)
841 device_delete_child(dev, sc->sc_miibus);
842 bus_generic_detach(dev);
844 stge_dma_free(sc);
846 if (sc->sc_irq != NULL) {
847 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
848 sc->sc_irq);
850 if (sc->sc_res != NULL) {
851 bus_release_resource(dev, sc->sc_res_type, sc->sc_res_rid,
852 sc->sc_res);
855 return (0);
858 struct stge_dmamap_arg {
859 bus_addr_t stge_busaddr;
862 static void
863 stge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
865 struct stge_dmamap_arg *ctx;
867 if (error != 0)
868 return;
870 KASSERT(nseg == 1, ("too many segments %d\n", nseg));
872 ctx = (struct stge_dmamap_arg *)arg;
873 ctx->stge_busaddr = segs[0].ds_addr;
876 struct stge_mbuf_dmamap_arg {
877 int nsegs;
878 bus_dma_segment_t *segs;
881 static void
882 stge_mbuf_dmamap_cb(void *xarg, bus_dma_segment_t *segs, int nsegs,
883 bus_size_t mapsz __unused, int error)
885 struct stge_mbuf_dmamap_arg *arg = xarg;
886 int i;
888 if (error) {
889 arg->nsegs = 0;
890 return;
893 KASSERT(nsegs <= arg->nsegs,
894 ("too many segments(%d), should be <= %d\n",
895 nsegs, arg->nsegs));
897 arg->nsegs = nsegs;
898 for (i = 0; i < nsegs; ++i)
899 arg->segs[i] = segs[i];
902 static int
903 stge_dma_alloc(struct stge_softc *sc)
905 struct stge_dmamap_arg ctx;
906 struct stge_txdesc *txd;
907 struct stge_rxdesc *rxd;
908 int error, i;
910 /* create parent tag. */
911 error = bus_dma_tag_create(NULL, /* parent */
912 1, 0, /* algnmnt, boundary */
913 STGE_DMA_MAXADDR, /* lowaddr */
914 BUS_SPACE_MAXADDR, /* highaddr */
915 NULL, NULL, /* filter, filterarg */
916 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
917 0, /* nsegments */
918 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
919 0, /* flags */
920 &sc->sc_cdata.stge_parent_tag);
921 if (error != 0) {
922 device_printf(sc->sc_dev, "failed to create parent DMA tag\n");
923 goto fail;
925 /* create tag for Tx ring. */
926 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
927 STGE_RING_ALIGN, 0, /* algnmnt, boundary */
928 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
929 BUS_SPACE_MAXADDR, /* highaddr */
930 NULL, NULL, /* filter, filterarg */
931 STGE_TX_RING_SZ, /* maxsize */
932 1, /* nsegments */
933 STGE_TX_RING_SZ, /* maxsegsize */
934 0, /* flags */
935 &sc->sc_cdata.stge_tx_ring_tag);
936 if (error != 0) {
937 device_printf(sc->sc_dev,
938 "failed to allocate Tx ring DMA tag\n");
939 goto fail;
942 /* create tag for Rx ring. */
943 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
944 STGE_RING_ALIGN, 0, /* algnmnt, boundary */
945 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
946 BUS_SPACE_MAXADDR, /* highaddr */
947 NULL, NULL, /* filter, filterarg */
948 STGE_RX_RING_SZ, /* maxsize */
949 1, /* nsegments */
950 STGE_RX_RING_SZ, /* maxsegsize */
951 0, /* flags */
952 &sc->sc_cdata.stge_rx_ring_tag);
953 if (error != 0) {
954 device_printf(sc->sc_dev,
955 "failed to allocate Rx ring DMA tag\n");
956 goto fail;
959 /* create tag for Tx buffers. */
960 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
961 1, 0, /* algnmnt, boundary */
962 BUS_SPACE_MAXADDR, /* lowaddr */
963 BUS_SPACE_MAXADDR, /* highaddr */
964 NULL, NULL, /* filter, filterarg */
965 MCLBYTES * STGE_MAXTXSEGS, /* maxsize */
966 STGE_MAXTXSEGS, /* nsegments */
967 MCLBYTES, /* maxsegsize */
968 0, /* flags */
969 &sc->sc_cdata.stge_tx_tag);
970 if (error != 0) {
971 device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n");
972 goto fail;
975 /* create tag for Rx buffers. */
976 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
977 1, 0, /* algnmnt, boundary */
978 BUS_SPACE_MAXADDR, /* lowaddr */
979 BUS_SPACE_MAXADDR, /* highaddr */
980 NULL, NULL, /* filter, filterarg */
981 MCLBYTES, /* maxsize */
982 1, /* nsegments */
983 MCLBYTES, /* maxsegsize */
984 0, /* flags */
985 &sc->sc_cdata.stge_rx_tag);
986 if (error != 0) {
987 device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n");
988 goto fail;
991 /* allocate DMA'able memory and load the DMA map for Tx ring. */
992 error = bus_dmamem_alloc(sc->sc_cdata.stge_tx_ring_tag,
993 (void **)&sc->sc_rdata.stge_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
994 &sc->sc_cdata.stge_tx_ring_map);
995 if (error != 0) {
996 device_printf(sc->sc_dev,
997 "failed to allocate DMA'able memory for Tx ring\n");
998 goto fail;
1001 ctx.stge_busaddr = 0;
1002 error = bus_dmamap_load(sc->sc_cdata.stge_tx_ring_tag,
1003 sc->sc_cdata.stge_tx_ring_map, sc->sc_rdata.stge_tx_ring,
1004 STGE_TX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1005 if (error != 0 || ctx.stge_busaddr == 0) {
1006 device_printf(sc->sc_dev,
1007 "failed to load DMA'able memory for Tx ring\n");
1008 goto fail;
1010 sc->sc_rdata.stge_tx_ring_paddr = ctx.stge_busaddr;
1012 /* allocate DMA'able memory and load the DMA map for Rx ring. */
1013 error = bus_dmamem_alloc(sc->sc_cdata.stge_rx_ring_tag,
1014 (void **)&sc->sc_rdata.stge_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1015 &sc->sc_cdata.stge_rx_ring_map);
1016 if (error != 0) {
1017 device_printf(sc->sc_dev,
1018 "failed to allocate DMA'able memory for Rx ring\n");
1019 goto fail;
1022 ctx.stge_busaddr = 0;
1023 error = bus_dmamap_load(sc->sc_cdata.stge_rx_ring_tag,
1024 sc->sc_cdata.stge_rx_ring_map, sc->sc_rdata.stge_rx_ring,
1025 STGE_RX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1026 if (error != 0 || ctx.stge_busaddr == 0) {
1027 device_printf(sc->sc_dev,
1028 "failed to load DMA'able memory for Rx ring\n");
1029 goto fail;
1031 sc->sc_rdata.stge_rx_ring_paddr = ctx.stge_busaddr;
1033 /* create DMA maps for Tx buffers. */
1034 for (i = 0; i < STGE_TX_RING_CNT; i++) {
1035 txd = &sc->sc_cdata.stge_txdesc[i];
1036 txd->tx_m = NULL;
1037 txd->tx_dmamap = 0;
1038 error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag, 0,
1039 &txd->tx_dmamap);
1040 if (error != 0) {
1041 device_printf(sc->sc_dev,
1042 "failed to create Tx dmamap\n");
1043 goto fail;
1046 /* create DMA maps for Rx buffers. */
1047 if ((error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
1048 &sc->sc_cdata.stge_rx_sparemap)) != 0) {
1049 device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n");
1050 goto fail;
1052 for (i = 0; i < STGE_RX_RING_CNT; i++) {
1053 rxd = &sc->sc_cdata.stge_rxdesc[i];
1054 rxd->rx_m = NULL;
1055 rxd->rx_dmamap = 0;
1056 error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
1057 &rxd->rx_dmamap);
1058 if (error != 0) {
1059 device_printf(sc->sc_dev,
1060 "failed to create Rx dmamap\n");
1061 goto fail;
1065 fail:
1066 return (error);
1069 static void
1070 stge_dma_free(struct stge_softc *sc)
1072 struct stge_txdesc *txd;
1073 struct stge_rxdesc *rxd;
1074 int i;
1076 /* Tx ring */
1077 if (sc->sc_cdata.stge_tx_ring_tag) {
1078 if (sc->sc_cdata.stge_tx_ring_map)
1079 bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag,
1080 sc->sc_cdata.stge_tx_ring_map);
1081 if (sc->sc_cdata.stge_tx_ring_map &&
1082 sc->sc_rdata.stge_tx_ring)
1083 bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag,
1084 sc->sc_rdata.stge_tx_ring,
1085 sc->sc_cdata.stge_tx_ring_map);
1086 sc->sc_rdata.stge_tx_ring = NULL;
1087 sc->sc_cdata.stge_tx_ring_map = 0;
1088 bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag);
1089 sc->sc_cdata.stge_tx_ring_tag = NULL;
1091 /* Rx ring */
1092 if (sc->sc_cdata.stge_rx_ring_tag) {
1093 if (sc->sc_cdata.stge_rx_ring_map)
1094 bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag,
1095 sc->sc_cdata.stge_rx_ring_map);
1096 if (sc->sc_cdata.stge_rx_ring_map &&
1097 sc->sc_rdata.stge_rx_ring)
1098 bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag,
1099 sc->sc_rdata.stge_rx_ring,
1100 sc->sc_cdata.stge_rx_ring_map);
1101 sc->sc_rdata.stge_rx_ring = NULL;
1102 sc->sc_cdata.stge_rx_ring_map = 0;
1103 bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag);
1104 sc->sc_cdata.stge_rx_ring_tag = NULL;
1106 /* Tx buffers */
1107 if (sc->sc_cdata.stge_tx_tag) {
1108 for (i = 0; i < STGE_TX_RING_CNT; i++) {
1109 txd = &sc->sc_cdata.stge_txdesc[i];
1110 if (txd->tx_dmamap) {
1111 bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag,
1112 txd->tx_dmamap);
1113 txd->tx_dmamap = 0;
1116 bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag);
1117 sc->sc_cdata.stge_tx_tag = NULL;
1119 /* Rx buffers */
1120 if (sc->sc_cdata.stge_rx_tag) {
1121 for (i = 0; i < STGE_RX_RING_CNT; i++) {
1122 rxd = &sc->sc_cdata.stge_rxdesc[i];
1123 if (rxd->rx_dmamap) {
1124 bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
1125 rxd->rx_dmamap);
1126 rxd->rx_dmamap = 0;
1129 if (sc->sc_cdata.stge_rx_sparemap) {
1130 bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
1131 sc->sc_cdata.stge_rx_sparemap);
1132 sc->sc_cdata.stge_rx_sparemap = 0;
1134 bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag);
1135 sc->sc_cdata.stge_rx_tag = NULL;
1138 if (sc->sc_cdata.stge_parent_tag) {
1139 bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag);
1140 sc->sc_cdata.stge_parent_tag = NULL;
1145 * stge_shutdown:
1147 * Make sure the interface is stopped at reboot time.
1149 static void
1150 stge_shutdown(device_t dev)
1152 struct stge_softc *sc = device_get_softc(dev);
1153 struct ifnet *ifp = &sc->arpcom.ac_if;
1155 lwkt_serialize_enter(ifp->if_serializer);
1156 stge_stop(sc);
1157 lwkt_serialize_exit(ifp->if_serializer);
1160 static int
1161 stge_suspend(device_t dev)
1163 struct stge_softc *sc = device_get_softc(dev);
1164 struct ifnet *ifp = &sc->arpcom.ac_if;
1166 lwkt_serialize_enter(ifp->if_serializer);
1167 stge_stop(sc);
1168 sc->sc_suspended = 1;
1169 lwkt_serialize_exit(ifp->if_serializer);
1171 return (0);
1174 static int
1175 stge_resume(device_t dev)
1177 struct stge_softc *sc = device_get_softc(dev);
1178 struct ifnet *ifp = &sc->arpcom.ac_if;
1180 lwkt_serialize_enter(ifp->if_serializer);
1181 if (ifp->if_flags & IFF_UP)
1182 stge_init(sc);
1183 sc->sc_suspended = 0;
1184 lwkt_serialize_exit(ifp->if_serializer);
1186 return (0);
1189 static void
1190 stge_dma_wait(struct stge_softc *sc)
1192 int i;
1194 for (i = 0; i < STGE_TIMEOUT; i++) {
1195 DELAY(2);
1196 if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0)
1197 break;
1200 if (i == STGE_TIMEOUT)
1201 device_printf(sc->sc_dev, "DMA wait timed out\n");
1204 static int
1205 stge_encap(struct stge_softc *sc, struct mbuf **m_head)
1207 struct stge_txdesc *txd;
1208 struct stge_tfd *tfd;
1209 struct mbuf *m;
1210 struct stge_mbuf_dmamap_arg arg;
1211 bus_dma_segment_t txsegs[STGE_MAXTXSEGS];
1212 int error, i, si;
1213 uint64_t csum_flags, tfc;
1215 if ((txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq)) == NULL)
1216 return (ENOBUFS);
1218 arg.nsegs = STGE_MAXTXSEGS;
1219 arg.segs = txsegs;
1220 error = bus_dmamap_load_mbuf(sc->sc_cdata.stge_tx_tag,
1221 txd->tx_dmamap, *m_head,
1222 stge_mbuf_dmamap_cb, &arg,
1223 BUS_DMA_NOWAIT);
1224 if (error == EFBIG) {
1225 m = m_defrag(*m_head, MB_DONTWAIT);
1226 if (m == NULL) {
1227 m_freem(*m_head);
1228 *m_head = NULL;
1229 return (ENOMEM);
1231 *m_head = m;
1232 error = bus_dmamap_load_mbuf(sc->sc_cdata.stge_tx_tag,
1233 txd->tx_dmamap, *m_head,
1234 stge_mbuf_dmamap_cb, &arg,
1235 BUS_DMA_NOWAIT);
1236 if (error != 0) {
1237 m_freem(*m_head);
1238 *m_head = NULL;
1239 return (error);
1241 } else if (error != 0)
1242 return (error);
1243 if (arg.nsegs == 0) {
1244 m_freem(*m_head);
1245 *m_head = NULL;
1246 return (EIO);
1249 m = *m_head;
1250 csum_flags = 0;
1251 if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) {
1252 if (m->m_pkthdr.csum_flags & CSUM_IP)
1253 csum_flags |= TFD_IPChecksumEnable;
1254 if (m->m_pkthdr.csum_flags & CSUM_TCP)
1255 csum_flags |= TFD_TCPChecksumEnable;
1256 else if (m->m_pkthdr.csum_flags & CSUM_UDP)
1257 csum_flags |= TFD_UDPChecksumEnable;
1260 si = sc->sc_cdata.stge_tx_prod;
1261 tfd = &sc->sc_rdata.stge_tx_ring[si];
1262 for (i = 0; i < arg.nsegs; i++) {
1263 tfd->tfd_frags[i].frag_word0 =
1264 htole64(FRAG_ADDR(txsegs[i].ds_addr) |
1265 FRAG_LEN(txsegs[i].ds_len));
1267 sc->sc_cdata.stge_tx_cnt++;
1269 tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) |
1270 TFD_FragCount(arg.nsegs) | csum_flags;
1271 if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT)
1272 tfc |= TFD_TxDMAIndicate;
1274 /* Update producer index. */
1275 sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT;
1277 /* Check if we have a VLAN tag to insert. */
1278 if (m->m_flags & M_VLANTAG)
1279 tfc |= TFD_VLANTagInsert | TFD_VID(m->m_pkthdr.ether_vlantag);
1280 tfd->tfd_control = htole64(tfc);
1282 /* Update Tx Queue. */
1283 STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q);
1284 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q);
1285 txd->tx_m = m;
1287 /* Sync descriptors. */
1288 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1289 BUS_DMASYNC_PREWRITE);
1290 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1291 sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1293 return (0);
1297 * stge_start: [ifnet interface function]
1299 * Start packet transmission on the interface.
1301 static void
1302 stge_start(struct ifnet *ifp)
1304 struct stge_softc *sc;
1305 struct mbuf *m_head;
1306 int enq;
1308 sc = ifp->if_softc;
1310 ASSERT_SERIALIZED(ifp->if_serializer);
1312 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) !=
1313 IFF_RUNNING)
1314 return;
1316 for (enq = 0; !ifq_is_empty(&ifp->if_snd); ) {
1317 if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) {
1318 ifp->if_flags |= IFF_OACTIVE;
1319 break;
1322 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1323 if (m_head == NULL)
1324 break;
1326 * Pack the data into the transmit ring. If we
1327 * don't have room, set the OACTIVE flag and wait
1328 * for the NIC to drain the ring.
1330 if (stge_encap(sc, &m_head)) {
1331 if (m_head != NULL) {
1332 m_freem(m_head);
1333 ifp->if_flags |= IFF_OACTIVE;
1335 break;
1338 enq++;
1340 * If there's a BPF listener, bounce a copy of this frame
1341 * to him.
1343 ETHER_BPF_MTAP(ifp, m_head);
1346 if (enq > 0) {
1347 /* Transmit */
1348 CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow);
1350 /* Set a timeout in case the chip goes out to lunch. */
1351 ifp->if_timer = 5;
1356 * stge_watchdog: [ifnet interface function]
1358 * Watchdog timer handler.
1360 static void
1361 stge_watchdog(struct ifnet *ifp)
1363 ASSERT_SERIALIZED(ifp->if_serializer);
1365 if_printf(ifp, "device timeout\n");
1366 ifp->if_oerrors++;
1367 stge_init(ifp->if_softc);
1371 * stge_ioctl: [ifnet interface function]
1373 * Handle control requests from the operator.
1375 static int
1376 stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1378 struct stge_softc *sc;
1379 struct ifreq *ifr;
1380 struct mii_data *mii;
1381 int error, mask;
1383 ASSERT_SERIALIZED(ifp->if_serializer);
1385 sc = ifp->if_softc;
1386 ifr = (struct ifreq *)data;
1387 error = 0;
1388 switch (cmd) {
1389 case SIOCSIFMTU:
1390 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > STGE_JUMBO_MTU)
1391 error = EINVAL;
1392 else if (ifp->if_mtu != ifr->ifr_mtu) {
1393 ifp->if_mtu = ifr->ifr_mtu;
1394 stge_init(sc);
1396 break;
1397 case SIOCSIFFLAGS:
1398 if ((ifp->if_flags & IFF_UP) != 0) {
1399 if ((ifp->if_flags & IFF_RUNNING) != 0) {
1400 if (((ifp->if_flags ^ sc->sc_if_flags)
1401 & IFF_PROMISC) != 0)
1402 stge_set_filter(sc);
1403 } else {
1404 if (sc->sc_detach == 0)
1405 stge_init(sc);
1407 } else {
1408 if ((ifp->if_flags & IFF_RUNNING) != 0)
1409 stge_stop(sc);
1411 sc->sc_if_flags = ifp->if_flags;
1412 break;
1413 case SIOCADDMULTI:
1414 case SIOCDELMULTI:
1415 if ((ifp->if_flags & IFF_RUNNING) != 0)
1416 stge_set_multi(sc);
1417 break;
1418 case SIOCSIFMEDIA:
1419 case SIOCGIFMEDIA:
1420 mii = device_get_softc(sc->sc_miibus);
1421 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1422 break;
1423 case SIOCSIFCAP:
1424 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1425 if ((mask & IFCAP_HWCSUM) != 0) {
1426 ifp->if_capenable ^= IFCAP_HWCSUM;
1427 if ((IFCAP_HWCSUM & ifp->if_capenable) != 0 &&
1428 (IFCAP_HWCSUM & ifp->if_capabilities) != 0)
1429 ifp->if_hwassist = STGE_CSUM_FEATURES;
1430 else
1431 ifp->if_hwassist = 0;
1433 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
1434 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1435 if (ifp->if_flags & IFF_RUNNING)
1436 stge_vlan_setup(sc);
1438 #if 0
1439 VLAN_CAPABILITIES(ifp);
1440 #endif
1441 break;
1442 default:
1443 error = ether_ioctl(ifp, cmd, data);
1444 break;
1447 return (error);
1450 static void
1451 stge_link(struct stge_softc *sc)
1453 uint32_t v, ac;
1454 int i;
1457 * Update STGE_MACCtrl register depending on link status.
1458 * (duplex, flow control etc)
1460 v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
1461 v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable);
1462 v |= sc->sc_MACCtrl;
1463 CSR_WRITE_4(sc, STGE_MACCtrl, v);
1464 if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) {
1465 /* Duplex setting changed, reset Tx/Rx functions. */
1466 ac = CSR_READ_4(sc, STGE_AsicCtrl);
1467 ac |= AC_TxReset | AC_RxReset;
1468 CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1469 for (i = 0; i < STGE_TIMEOUT; i++) {
1470 DELAY(100);
1471 if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1472 break;
1474 if (i == STGE_TIMEOUT)
1475 device_printf(sc->sc_dev, "reset failed to complete\n");
1479 static __inline int
1480 stge_tx_error(struct stge_softc *sc)
1482 uint32_t txstat;
1483 int error;
1485 for (error = 0;;) {
1486 txstat = CSR_READ_4(sc, STGE_TxStatus);
1487 if ((txstat & TS_TxComplete) == 0)
1488 break;
1489 /* Tx underrun */
1490 if ((txstat & TS_TxUnderrun) != 0) {
1492 * XXX
1493 * There should be a more better way to recover
1494 * from Tx underrun instead of a full reset.
1496 if (sc->sc_nerr++ < STGE_MAXERR)
1497 device_printf(sc->sc_dev, "Tx underrun, "
1498 "resetting...\n");
1499 if (sc->sc_nerr == STGE_MAXERR)
1500 device_printf(sc->sc_dev, "too many errors; "
1501 "not reporting any more\n");
1502 error = -1;
1503 break;
1505 /* Maximum/Late collisions, Re-enable Tx MAC. */
1506 if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0)
1507 CSR_WRITE_4(sc, STGE_MACCtrl,
1508 (CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) |
1509 MC_TxEnable);
1512 return (error);
1516 * stge_intr:
1518 * Interrupt service routine.
1520 static void
1521 stge_intr(void *arg)
1523 struct stge_softc *sc = arg;
1524 struct ifnet *ifp = &sc->arpcom.ac_if;
1525 int reinit;
1526 uint16_t status;
1528 ASSERT_SERIALIZED(ifp->if_serializer);
1530 status = CSR_READ_2(sc, STGE_IntStatus);
1531 if (sc->sc_suspended || (status & IS_InterruptStatus) == 0)
1532 return;
1534 /* Disable interrupts. */
1535 for (reinit = 0;;) {
1536 status = CSR_READ_2(sc, STGE_IntStatusAck);
1537 status &= sc->sc_IntEnable;
1538 if (status == 0)
1539 break;
1540 /* Host interface errors. */
1541 if ((status & IS_HostError) != 0) {
1542 device_printf(sc->sc_dev,
1543 "Host interface error, resetting...\n");
1544 reinit = 1;
1545 goto force_init;
1548 /* Receive interrupts. */
1549 if ((status & IS_RxDMAComplete) != 0) {
1550 stge_rxeof(sc, -1);
1551 if ((status & IS_RFDListEnd) != 0)
1552 CSR_WRITE_4(sc, STGE_DMACtrl,
1553 DMAC_RxDMAPollNow);
1556 /* Transmit interrupts. */
1557 if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0)
1558 stge_txeof(sc);
1560 /* Transmission errors.*/
1561 if ((status & IS_TxComplete) != 0) {
1562 if ((reinit = stge_tx_error(sc)) != 0)
1563 break;
1567 force_init:
1568 if (reinit != 0)
1569 stge_init(sc);
1571 /* Re-enable interrupts. */
1572 CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1574 /* Try to get more packets going. */
1575 if (!ifq_is_empty(&ifp->if_snd))
1576 if_devstart(ifp);
1580 * stge_txeof:
1582 * Helper; handle transmit interrupts.
1584 static void
1585 stge_txeof(struct stge_softc *sc)
1587 struct ifnet *ifp = &sc->arpcom.ac_if;
1588 struct stge_txdesc *txd;
1589 uint64_t control;
1590 int cons;
1592 txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1593 if (txd == NULL)
1594 return;
1595 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1596 sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_POSTREAD);
1599 * Go through our Tx list and free mbufs for those
1600 * frames which have been transmitted.
1602 for (cons = sc->sc_cdata.stge_tx_cons;;
1603 cons = (cons + 1) % STGE_TX_RING_CNT) {
1604 if (sc->sc_cdata.stge_tx_cnt <= 0)
1605 break;
1606 control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control);
1607 if ((control & TFD_TFDDone) == 0)
1608 break;
1609 sc->sc_cdata.stge_tx_cnt--;
1610 ifp->if_flags &= ~IFF_OACTIVE;
1612 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1613 BUS_DMASYNC_POSTWRITE);
1614 bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap);
1616 /* Output counter is updated with statistics register */
1617 m_freem(txd->tx_m);
1618 txd->tx_m = NULL;
1619 STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q);
1620 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
1621 txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1623 sc->sc_cdata.stge_tx_cons = cons;
1624 if (sc->sc_cdata.stge_tx_cnt == 0)
1625 ifp->if_timer = 0;
1627 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1628 sc->sc_cdata.stge_tx_ring_map,
1629 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1632 static __inline void
1633 stge_discard_rxbuf(struct stge_softc *sc, int idx)
1635 struct stge_rfd *rfd;
1637 rfd = &sc->sc_rdata.stge_rx_ring[idx];
1638 rfd->rfd_status = 0;
1641 #ifndef __i386__
1643 * It seems that TC9021's DMA engine has alignment restrictions in
1644 * DMA scatter operations. The first DMA segment has no address
1645 * alignment restrictins but the rest should be aligned on 4(?) bytes
1646 * boundary. Otherwise it would corrupt random memory. Since we don't
1647 * know which one is used for the first segment in advance we simply
1648 * don't align at all.
1649 * To avoid copying over an entire frame to align, we allocate a new
1650 * mbuf and copy ethernet header to the new mbuf. The new mbuf is
1651 * prepended into the existing mbuf chain.
1653 static __inline struct mbuf *
1654 stge_fixup_rx(struct stge_softc *sc, struct mbuf *m)
1656 struct mbuf *n;
1658 n = NULL;
1659 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
1660 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
1661 m->m_data += ETHER_HDR_LEN;
1662 n = m;
1663 } else {
1664 MGETHDR(n, MB_DONTWAIT, MT_DATA);
1665 if (n != NULL) {
1666 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
1667 m->m_data += ETHER_HDR_LEN;
1668 m->m_len -= ETHER_HDR_LEN;
1669 n->m_len = ETHER_HDR_LEN;
1670 M_MOVE_PKTHDR(n, m);
1671 n->m_next = m;
1672 } else
1673 m_freem(m);
1676 return (n);
1678 #endif
1681 * stge_rxeof:
1683 * Helper; handle receive interrupts.
1685 static void
1686 stge_rxeof(struct stge_softc *sc, int count)
1688 struct ifnet *ifp = &sc->arpcom.ac_if;
1689 struct stge_rxdesc *rxd;
1690 struct mbuf *mp, *m;
1691 uint64_t status64;
1692 uint32_t status;
1693 int cons, prog;
1695 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1696 sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_POSTREAD);
1698 prog = 0;
1699 for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT;
1700 prog++, cons = (cons + 1) % STGE_RX_RING_CNT) {
1701 #ifdef DEVICE_POLLING
1702 if (count >= 0 && count-- == 0)
1703 break;
1704 #endif
1706 status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status);
1707 status = RFD_RxStatus(status64);
1708 if ((status & RFD_RFDDone) == 0)
1709 break;
1711 prog++;
1712 rxd = &sc->sc_cdata.stge_rxdesc[cons];
1713 mp = rxd->rx_m;
1716 * If the packet had an error, drop it. Note we count
1717 * the error later in the periodic stats update.
1719 if ((status & RFD_FrameEnd) != 0 && (status &
1720 (RFD_RxFIFOOverrun | RFD_RxRuntFrame |
1721 RFD_RxAlignmentError | RFD_RxFCSError |
1722 RFD_RxLengthError)) != 0) {
1723 stge_discard_rxbuf(sc, cons);
1724 if (sc->sc_cdata.stge_rxhead != NULL) {
1725 m_freem(sc->sc_cdata.stge_rxhead);
1726 STGE_RXCHAIN_RESET(sc);
1728 continue;
1731 * Add a new receive buffer to the ring.
1733 if (stge_newbuf(sc, cons, 0) != 0) {
1734 ifp->if_iqdrops++;
1735 stge_discard_rxbuf(sc, cons);
1736 if (sc->sc_cdata.stge_rxhead != NULL) {
1737 m_freem(sc->sc_cdata.stge_rxhead);
1738 STGE_RXCHAIN_RESET(sc);
1740 continue;
1743 if ((status & RFD_FrameEnd) != 0)
1744 mp->m_len = RFD_RxDMAFrameLen(status) -
1745 sc->sc_cdata.stge_rxlen;
1746 sc->sc_cdata.stge_rxlen += mp->m_len;
1748 /* Chain mbufs. */
1749 if (sc->sc_cdata.stge_rxhead == NULL) {
1750 sc->sc_cdata.stge_rxhead = mp;
1751 sc->sc_cdata.stge_rxtail = mp;
1752 } else {
1753 mp->m_flags &= ~M_PKTHDR;
1754 sc->sc_cdata.stge_rxtail->m_next = mp;
1755 sc->sc_cdata.stge_rxtail = mp;
1758 if ((status & RFD_FrameEnd) != 0) {
1759 m = sc->sc_cdata.stge_rxhead;
1760 m->m_pkthdr.rcvif = ifp;
1761 m->m_pkthdr.len = sc->sc_cdata.stge_rxlen;
1763 if (m->m_pkthdr.len > sc->sc_if_framesize) {
1764 m_freem(m);
1765 STGE_RXCHAIN_RESET(sc);
1766 continue;
1769 * Set the incoming checksum information for
1770 * the packet.
1772 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1773 if ((status & RFD_IPDetected) != 0) {
1774 m->m_pkthdr.csum_flags |=
1775 CSUM_IP_CHECKED;
1776 if ((status & RFD_IPError) == 0)
1777 m->m_pkthdr.csum_flags |=
1778 CSUM_IP_VALID;
1780 if (((status & RFD_TCPDetected) != 0 &&
1781 (status & RFD_TCPError) == 0) ||
1782 ((status & RFD_UDPDetected) != 0 &&
1783 (status & RFD_UDPError) == 0)) {
1784 m->m_pkthdr.csum_flags |=
1785 (CSUM_DATA_VALID |
1786 CSUM_PSEUDO_HDR |
1787 CSUM_FRAG_NOT_CHECKED);
1788 m->m_pkthdr.csum_data = 0xffff;
1792 #ifndef __i386__
1793 if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) {
1794 if ((m = stge_fixup_rx(sc, m)) == NULL) {
1795 STGE_RXCHAIN_RESET(sc);
1796 continue;
1799 #endif
1801 /* Check for VLAN tagged packets. */
1802 if ((status & RFD_VLANDetected) != 0 &&
1803 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
1804 m->m_flags |= M_VLANTAG;
1805 m->m_pkthdr.ether_vlantag = RFD_TCI(status64);
1807 /* Pass it on. */
1808 ifp->if_input(ifp, m);
1810 STGE_RXCHAIN_RESET(sc);
1814 if (prog > 0) {
1815 /* Update the consumer index. */
1816 sc->sc_cdata.stge_rx_cons = cons;
1817 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1818 sc->sc_cdata.stge_rx_ring_map,
1819 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1823 #ifdef DEVICE_POLLING
1824 static void
1825 stge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1827 struct stge_softc *sc;
1828 uint16_t status;
1830 sc = ifp->if_softc;
1832 switch (cmd) {
1833 case POLL_REGISTER:
1834 CSR_WRITE_2(sc, STGE_IntEnable, 0);
1835 break;
1836 case POLL_DEREGISTER:
1837 CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1838 break;
1839 case POLL_ONLY:
1840 case POLL_AND_CHECK_STATUS:
1841 sc->sc_cdata.stge_rxcycles = count;
1842 stge_rxeof(sc, count);
1843 stge_txeof(sc);
1845 if (cmd == POLL_AND_CHECK_STATUS) {
1846 status = CSR_READ_2(sc, STGE_IntStatus);
1847 status &= sc->sc_IntEnable;
1848 if (status != 0) {
1849 if (status & IS_HostError) {
1850 device_printf(sc->sc_dev,
1851 "Host interface error, "
1852 "resetting...\n");
1853 stge_init(sc);
1855 if ((status & IS_TxComplete) != 0 &&
1856 stge_tx_error(sc) != 0)
1857 stge_init(sc);
1862 if (!ifq_is_empty(&ifp->if_snd))
1863 if_devstart(ifp);
1866 #endif /* DEVICE_POLLING */
1869 * stge_tick:
1871 * One second timer, used to tick the MII.
1873 static void
1874 stge_tick(void *arg)
1876 struct stge_softc *sc = arg;
1877 struct ifnet *ifp = &sc->arpcom.ac_if;
1878 struct mii_data *mii;
1880 lwkt_serialize_enter(ifp->if_serializer);
1882 mii = device_get_softc(sc->sc_miibus);
1883 mii_tick(mii);
1885 /* Update statistics counters. */
1886 stge_stats_update(sc);
1889 * Relcaim any pending Tx descriptors to release mbufs in a
1890 * timely manner as we don't generate Tx completion interrupts
1891 * for every frame. This limits the delay to a maximum of one
1892 * second.
1894 if (sc->sc_cdata.stge_tx_cnt != 0)
1895 stge_txeof(sc);
1897 callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
1899 lwkt_serialize_exit(ifp->if_serializer);
1903 * stge_stats_update:
1905 * Read the TC9021 statistics counters.
1907 static void
1908 stge_stats_update(struct stge_softc *sc)
1910 struct ifnet *ifp = &sc->arpcom.ac_if;
1912 CSR_READ_4(sc,STGE_OctetRcvOk);
1914 ifp->if_ipackets += CSR_READ_4(sc, STGE_FramesRcvdOk);
1916 ifp->if_ierrors += CSR_READ_2(sc, STGE_FramesLostRxErrors);
1918 CSR_READ_4(sc, STGE_OctetXmtdOk);
1920 ifp->if_opackets += CSR_READ_4(sc, STGE_FramesXmtdOk);
1922 ifp->if_collisions +=
1923 CSR_READ_4(sc, STGE_LateCollisions) +
1924 CSR_READ_4(sc, STGE_MultiColFrames) +
1925 CSR_READ_4(sc, STGE_SingleColFrames);
1927 ifp->if_oerrors +=
1928 CSR_READ_2(sc, STGE_FramesAbortXSColls) +
1929 CSR_READ_2(sc, STGE_FramesWEXDeferal);
1933 * stge_reset:
1935 * Perform a soft reset on the TC9021.
1937 static void
1938 stge_reset(struct stge_softc *sc, uint32_t how)
1940 uint32_t ac;
1941 uint8_t v;
1942 int i, dv;
1944 dv = 5000;
1945 ac = CSR_READ_4(sc, STGE_AsicCtrl);
1946 switch (how) {
1947 case STGE_RESET_TX:
1948 ac |= AC_TxReset | AC_FIFO;
1949 dv = 100;
1950 break;
1951 case STGE_RESET_RX:
1952 ac |= AC_RxReset | AC_FIFO;
1953 dv = 100;
1954 break;
1955 case STGE_RESET_FULL:
1956 default:
1958 * Only assert RstOut if we're fiber. We need GMII clocks
1959 * to be present in order for the reset to complete on fiber
1960 * cards.
1962 ac |= AC_GlobalReset | AC_RxReset | AC_TxReset |
1963 AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit |
1964 (sc->sc_usefiber ? AC_RstOut : 0);
1965 break;
1968 CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1970 /* Account for reset problem at 10Mbps. */
1971 DELAY(dv);
1973 for (i = 0; i < STGE_TIMEOUT; i++) {
1974 if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1975 break;
1976 DELAY(dv);
1979 if (i == STGE_TIMEOUT)
1980 device_printf(sc->sc_dev, "reset failed to complete\n");
1982 /* Set LED, from Linux IPG driver. */
1983 ac = CSR_READ_4(sc, STGE_AsicCtrl);
1984 ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1);
1985 if ((sc->sc_led & 0x01) != 0)
1986 ac |= AC_LEDMode;
1987 if ((sc->sc_led & 0x03) != 0)
1988 ac |= AC_LEDModeBit1;
1989 if ((sc->sc_led & 0x08) != 0)
1990 ac |= AC_LEDSpeed;
1991 CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1993 /* Set PHY, from Linux IPG driver */
1994 v = CSR_READ_1(sc, STGE_PhySet);
1995 v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet);
1996 v |= ((sc->sc_led & 0x70) >> 4);
1997 CSR_WRITE_1(sc, STGE_PhySet, v);
2001 * stge_init: [ ifnet interface function ]
2003 * Initialize the interface.
2005 static void
2006 stge_init(void *xsc)
2008 struct stge_softc *sc = xsc;
2009 struct ifnet *ifp = &sc->arpcom.ac_if;
2010 struct mii_data *mii;
2011 uint16_t eaddr[3];
2012 uint32_t v;
2013 int error;
2015 ASSERT_SERIALIZED(ifp->if_serializer);
2017 mii = device_get_softc(sc->sc_miibus);
2020 * Cancel any pending I/O.
2022 stge_stop(sc);
2024 /* Init descriptors. */
2025 error = stge_init_rx_ring(sc);
2026 if (error != 0) {
2027 device_printf(sc->sc_dev,
2028 "initialization failed: no memory for rx buffers\n");
2029 stge_stop(sc);
2030 goto out;
2032 stge_init_tx_ring(sc);
2034 /* Set the station address. */
2035 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2036 CSR_WRITE_2(sc, STGE_StationAddress0, htole16(eaddr[0]));
2037 CSR_WRITE_2(sc, STGE_StationAddress1, htole16(eaddr[1]));
2038 CSR_WRITE_2(sc, STGE_StationAddress2, htole16(eaddr[2]));
2041 * Set the statistics masks. Disable all the RMON stats,
2042 * and disable selected stats in the non-RMON stats registers.
2044 CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff);
2045 CSR_WRITE_4(sc, STGE_StatisticsMask,
2046 (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) |
2047 (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) |
2048 (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) |
2049 (1U << 21));
2051 /* Set up the receive filter. */
2052 stge_set_filter(sc);
2053 /* Program multicast filter. */
2054 stge_set_multi(sc);
2057 * Give the transmit and receive ring to the chip.
2059 CSR_WRITE_4(sc, STGE_TFDListPtrHi,
2060 STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0)));
2061 CSR_WRITE_4(sc, STGE_TFDListPtrLo,
2062 STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0)));
2064 CSR_WRITE_4(sc, STGE_RFDListPtrHi,
2065 STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0)));
2066 CSR_WRITE_4(sc, STGE_RFDListPtrLo,
2067 STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0)));
2070 * Initialize the Tx auto-poll period. It's OK to make this number
2071 * large (255 is the max, but we use 127) -- we explicitly kick the
2072 * transmit engine when there's actually a packet.
2074 CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2076 /* ..and the Rx auto-poll period. */
2077 CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2079 /* Initialize the Tx start threshold. */
2080 CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh);
2082 /* Rx DMA thresholds, from Linux */
2083 CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30);
2084 CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30);
2086 /* Rx early threhold, from Linux */
2087 CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff);
2089 /* Tx DMA thresholds, from Linux */
2090 CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30);
2091 CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04);
2094 * Initialize the Rx DMA interrupt control register. We
2095 * request an interrupt after every incoming packet, but
2096 * defer it for sc_rxint_dmawait us. When the number of
2097 * interrupts pending reaches STGE_RXINT_NFRAME, we stop
2098 * deferring the interrupt, and signal it immediately.
2100 CSR_WRITE_4(sc, STGE_RxDMAIntCtrl,
2101 RDIC_RxFrameCount(sc->sc_rxint_nframe) |
2102 RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait)));
2105 * Initialize the interrupt mask.
2107 sc->sc_IntEnable = IS_HostError | IS_TxComplete |
2108 IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd;
2109 #ifdef DEVICE_POLLING
2110 /* Disable interrupts if we are polling. */
2111 if (ifp->if_flags & IFF_POLLING)
2112 CSR_WRITE_2(sc, STGE_IntEnable, 0);
2113 else
2114 #endif
2115 CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
2118 * Configure the DMA engine.
2119 * XXX Should auto-tune TxBurstLimit.
2121 CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3));
2124 * Send a PAUSE frame when we reach 29,696 bytes in the Rx
2125 * FIFO, and send an un-PAUSE frame when we reach 3056 bytes
2126 * in the Rx FIFO.
2128 CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16);
2129 CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16);
2132 * Set the maximum frame size.
2134 sc->sc_if_framesize = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2135 CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize);
2138 * Initialize MacCtrl -- do it before setting the media,
2139 * as setting the media will actually program the register.
2141 * Note: We have to poke the IFS value before poking
2142 * anything else.
2144 /* Tx/Rx MAC should be disabled before programming IFS.*/
2145 CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit));
2147 stge_vlan_setup(sc);
2149 if (sc->sc_rev >= 6) { /* >= B.2 */
2150 /* Multi-frag frame bug work-around. */
2151 CSR_WRITE_2(sc, STGE_DebugCtrl,
2152 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200);
2154 /* Tx Poll Now bug work-around. */
2155 CSR_WRITE_2(sc, STGE_DebugCtrl,
2156 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010);
2157 /* Tx Poll Now bug work-around. */
2158 CSR_WRITE_2(sc, STGE_DebugCtrl,
2159 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020);
2162 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2163 v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable;
2164 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2166 * It seems that transmitting frames without checking the state of
2167 * Rx/Tx MAC wedge the hardware.
2169 stge_start_tx(sc);
2170 stge_start_rx(sc);
2173 * Set the current media.
2175 mii_mediachg(mii);
2178 * Start the one second MII clock.
2180 callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
2183 * ...all done!
2185 ifp->if_flags |= IFF_RUNNING;
2186 ifp->if_flags &= ~IFF_OACTIVE;
2188 out:
2189 if (error != 0)
2190 device_printf(sc->sc_dev, "interface not running\n");
2193 static void
2194 stge_vlan_setup(struct stge_softc *sc)
2196 struct ifnet *ifp = &sc->arpcom.ac_if;
2197 uint32_t v;
2200 * The NIC always copy a VLAN tag regardless of STGE_MACCtrl
2201 * MC_AutoVLANuntagging bit.
2202 * MC_AutoVLANtagging bit selects which VLAN source to use
2203 * between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert
2204 * bit has priority over MC_AutoVLANtagging bit. So we always
2205 * use TFC instead of STGE_VLANTag register.
2207 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2208 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2209 v |= MC_AutoVLANuntagging;
2210 else
2211 v &= ~MC_AutoVLANuntagging;
2212 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2216 * Stop transmission on the interface.
2218 static void
2219 stge_stop(struct stge_softc *sc)
2221 struct ifnet *ifp = &sc->arpcom.ac_if;
2222 struct stge_txdesc *txd;
2223 struct stge_rxdesc *rxd;
2224 uint32_t v;
2225 int i;
2227 ASSERT_SERIALIZED(ifp->if_serializer);
2230 * Stop the one second clock.
2232 callout_stop(&sc->sc_tick_ch);
2235 * Reset the chip to a known state.
2237 stge_reset(sc, STGE_RESET_FULL);
2240 * Disable interrupts.
2242 CSR_WRITE_2(sc, STGE_IntEnable, 0);
2245 * Stop receiver, transmitter, and stats update.
2247 stge_stop_rx(sc);
2248 stge_stop_tx(sc);
2249 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2250 v |= MC_StatisticsDisable;
2251 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2254 * Stop the transmit and receive DMA.
2256 stge_dma_wait(sc);
2257 CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0);
2258 CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0);
2259 CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0);
2260 CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0);
2263 * Free RX and TX mbufs still in the queues.
2265 for (i = 0; i < STGE_RX_RING_CNT; i++) {
2266 rxd = &sc->sc_cdata.stge_rxdesc[i];
2267 if (rxd->rx_m != NULL) {
2268 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag,
2269 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2270 bus_dmamap_unload(sc->sc_cdata.stge_rx_tag,
2271 rxd->rx_dmamap);
2272 m_freem(rxd->rx_m);
2273 rxd->rx_m = NULL;
2276 for (i = 0; i < STGE_TX_RING_CNT; i++) {
2277 txd = &sc->sc_cdata.stge_txdesc[i];
2278 if (txd->tx_m != NULL) {
2279 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag,
2280 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2281 bus_dmamap_unload(sc->sc_cdata.stge_tx_tag,
2282 txd->tx_dmamap);
2283 m_freem(txd->tx_m);
2284 txd->tx_m = NULL;
2289 * Mark the interface down and cancel the watchdog timer.
2291 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2292 ifp->if_timer = 0;
2295 static void
2296 stge_start_tx(struct stge_softc *sc)
2298 uint32_t v;
2299 int i;
2301 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2302 if ((v & MC_TxEnabled) != 0)
2303 return;
2304 v |= MC_TxEnable;
2305 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2306 CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2307 for (i = STGE_TIMEOUT; i > 0; i--) {
2308 DELAY(10);
2309 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2310 if ((v & MC_TxEnabled) != 0)
2311 break;
2313 if (i == 0)
2314 device_printf(sc->sc_dev, "Starting Tx MAC timed out\n");
2317 static void
2318 stge_start_rx(struct stge_softc *sc)
2320 uint32_t v;
2321 int i;
2323 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2324 if ((v & MC_RxEnabled) != 0)
2325 return;
2326 v |= MC_RxEnable;
2327 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2328 CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2329 for (i = STGE_TIMEOUT; i > 0; i--) {
2330 DELAY(10);
2331 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2332 if ((v & MC_RxEnabled) != 0)
2333 break;
2335 if (i == 0)
2336 device_printf(sc->sc_dev, "Starting Rx MAC timed out\n");
2339 static void
2340 stge_stop_tx(struct stge_softc *sc)
2342 uint32_t v;
2343 int i;
2345 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2346 if ((v & MC_TxEnabled) == 0)
2347 return;
2348 v |= MC_TxDisable;
2349 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2350 for (i = STGE_TIMEOUT; i > 0; i--) {
2351 DELAY(10);
2352 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2353 if ((v & MC_TxEnabled) == 0)
2354 break;
2356 if (i == 0)
2357 device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n");
2360 static void
2361 stge_stop_rx(struct stge_softc *sc)
2363 uint32_t v;
2364 int i;
2366 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2367 if ((v & MC_RxEnabled) == 0)
2368 return;
2369 v |= MC_RxDisable;
2370 CSR_WRITE_4(sc, STGE_MACCtrl, v);
2371 for (i = STGE_TIMEOUT; i > 0; i--) {
2372 DELAY(10);
2373 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2374 if ((v & MC_RxEnabled) == 0)
2375 break;
2377 if (i == 0)
2378 device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n");
2381 static void
2382 stge_init_tx_ring(struct stge_softc *sc)
2384 struct stge_ring_data *rd;
2385 struct stge_txdesc *txd;
2386 bus_addr_t addr;
2387 int i;
2389 STAILQ_INIT(&sc->sc_cdata.stge_txfreeq);
2390 STAILQ_INIT(&sc->sc_cdata.stge_txbusyq);
2392 sc->sc_cdata.stge_tx_prod = 0;
2393 sc->sc_cdata.stge_tx_cons = 0;
2394 sc->sc_cdata.stge_tx_cnt = 0;
2396 rd = &sc->sc_rdata;
2397 bzero(rd->stge_tx_ring, STGE_TX_RING_SZ);
2398 for (i = 0; i < STGE_TX_RING_CNT; i++) {
2399 if (i == (STGE_TX_RING_CNT - 1))
2400 addr = STGE_TX_RING_ADDR(sc, 0);
2401 else
2402 addr = STGE_TX_RING_ADDR(sc, i + 1);
2403 rd->stge_tx_ring[i].tfd_next = htole64(addr);
2404 rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone);
2405 txd = &sc->sc_cdata.stge_txdesc[i];
2406 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
2409 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
2410 sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_PREWRITE);
2413 static int
2414 stge_init_rx_ring(struct stge_softc *sc)
2416 struct stge_ring_data *rd;
2417 bus_addr_t addr;
2418 int i;
2420 sc->sc_cdata.stge_rx_cons = 0;
2421 STGE_RXCHAIN_RESET(sc);
2423 rd = &sc->sc_rdata;
2424 bzero(rd->stge_rx_ring, STGE_RX_RING_SZ);
2425 for (i = 0; i < STGE_RX_RING_CNT; i++) {
2426 if (stge_newbuf(sc, i, 1) != 0)
2427 return (ENOBUFS);
2428 if (i == (STGE_RX_RING_CNT - 1))
2429 addr = STGE_RX_RING_ADDR(sc, 0);
2430 else
2431 addr = STGE_RX_RING_ADDR(sc, i + 1);
2432 rd->stge_rx_ring[i].rfd_next = htole64(addr);
2433 rd->stge_rx_ring[i].rfd_status = 0;
2436 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
2437 sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_PREWRITE);
2439 return (0);
2443 * stge_newbuf:
2445 * Add a receive buffer to the indicated descriptor.
2447 static int
2448 stge_newbuf(struct stge_softc *sc, int idx, int waitok)
2450 struct stge_rxdesc *rxd;
2451 struct stge_rfd *rfd;
2452 struct mbuf *m;
2453 struct stge_mbuf_dmamap_arg arg;
2454 bus_dma_segment_t segs[1];
2455 bus_dmamap_t map;
2457 m = m_getcl(waitok ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2458 if (m == NULL)
2459 return (ENOBUFS);
2460 m->m_len = m->m_pkthdr.len = MCLBYTES;
2462 * The hardware requires 4bytes aligned DMA address when JUMBO
2463 * frame is used.
2465 if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN))
2466 m_adj(m, ETHER_ALIGN);
2468 arg.segs = segs;
2469 arg.nsegs = 1;
2470 if (bus_dmamap_load_mbuf(sc->sc_cdata.stge_rx_tag,
2471 sc->sc_cdata.stge_rx_sparemap, m, stge_mbuf_dmamap_cb, &arg,
2472 waitok ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) != 0) {
2473 m_freem(m);
2474 return (ENOBUFS);
2477 rxd = &sc->sc_cdata.stge_rxdesc[idx];
2478 if (rxd->rx_m != NULL) {
2479 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2480 BUS_DMASYNC_POSTREAD);
2481 bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap);
2483 map = rxd->rx_dmamap;
2484 rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap;
2485 sc->sc_cdata.stge_rx_sparemap = map;
2486 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2487 BUS_DMASYNC_PREREAD);
2488 rxd->rx_m = m;
2490 rfd = &sc->sc_rdata.stge_rx_ring[idx];
2491 rfd->rfd_frag.frag_word0 =
2492 htole64(FRAG_ADDR(segs[0].ds_addr) | FRAG_LEN(segs[0].ds_len));
2493 rfd->rfd_status = 0;
2495 return (0);
2499 * stge_set_filter:
2501 * Set up the receive filter.
2503 static void
2504 stge_set_filter(struct stge_softc *sc)
2506 struct ifnet *ifp = &sc->arpcom.ac_if;
2507 uint16_t mode;
2509 mode = CSR_READ_2(sc, STGE_ReceiveMode);
2510 mode |= RM_ReceiveUnicast;
2511 if ((ifp->if_flags & IFF_BROADCAST) != 0)
2512 mode |= RM_ReceiveBroadcast;
2513 else
2514 mode &= ~RM_ReceiveBroadcast;
2515 if ((ifp->if_flags & IFF_PROMISC) != 0)
2516 mode |= RM_ReceiveAllFrames;
2517 else
2518 mode &= ~RM_ReceiveAllFrames;
2520 CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2523 static void
2524 stge_set_multi(struct stge_softc *sc)
2526 struct ifnet *ifp = &sc->arpcom.ac_if;
2527 struct ifmultiaddr *ifma;
2528 uint32_t crc;
2529 uint32_t mchash[2];
2530 uint16_t mode;
2531 int count;
2533 mode = CSR_READ_2(sc, STGE_ReceiveMode);
2534 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2535 if ((ifp->if_flags & IFF_PROMISC) != 0)
2536 mode |= RM_ReceiveAllFrames;
2537 else if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2538 mode |= RM_ReceiveMulticast;
2539 CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2540 return;
2543 /* clear existing filters. */
2544 CSR_WRITE_4(sc, STGE_HashTable0, 0);
2545 CSR_WRITE_4(sc, STGE_HashTable1, 0);
2548 * Set up the multicast address filter by passing all multicast
2549 * addresses through a CRC generator, and then using the low-order
2550 * 6 bits as an index into the 64 bit multicast hash table. The
2551 * high order bits select the register, while the rest of the bits
2552 * select the bit within the register.
2555 bzero(mchash, sizeof(mchash));
2557 count = 0;
2558 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2559 if (ifma->ifma_addr->sa_family != AF_LINK)
2560 continue;
2561 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2562 ifma->ifma_addr), ETHER_ADDR_LEN);
2564 /* Just want the 6 least significant bits. */
2565 crc &= 0x3f;
2567 /* Set the corresponding bit in the hash table. */
2568 mchash[crc >> 5] |= 1 << (crc & 0x1f);
2569 count++;
2572 mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames);
2573 if (count > 0)
2574 mode |= RM_ReceiveMulticastHash;
2575 else
2576 mode &= ~RM_ReceiveMulticastHash;
2578 CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]);
2579 CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]);
2580 CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2583 static int
2584 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2586 int error, value;
2588 if (!arg1)
2589 return (EINVAL);
2590 value = *(int *)arg1;
2591 error = sysctl_handle_int(oidp, &value, 0, req);
2592 if (error || !req->newptr)
2593 return (error);
2594 if (value < low || value > high)
2595 return (EINVAL);
2596 *(int *)arg1 = value;
2598 return (0);
2601 static int
2602 sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS)
2604 return (sysctl_int_range(oidp, arg1, arg2, req,
2605 STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX));
2608 static int
2609 sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS)
2611 return (sysctl_int_range(oidp, arg1, arg2, req,
2612 STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX));