Use ether_ioctl for the default case and merge the cases which just
[dragonfly/netmp.git] / sys / dev / netif / gx / if_gx.c
blob476dfad207c6410af9d43b07bdc33c22c9288884
1 /*-
2 * Copyright (c) 1999,2000,2001 Jonathan Lemon
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
29 * $FreeBSD: src/sys/dev/gx/if_gx.c,v 1.2.2.3 2001/12/14 19:51:39 jlemon Exp $
30 * $DragonFly: src/sys/dev/netif/gx/Attic/if_gx.c,v 1.16 2005/05/27 15:36:09 joerg Exp $
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sockio.h>
36 #include <sys/mbuf.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/socket.h>
40 #include <sys/queue.h>
42 #include <net/if.h>
43 #include <net/ifq_var.h>
44 #include <net/if_arp.h>
45 #include <net/ethernet.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
49 #include <net/bpf.h>
50 #include <net/if_types.h>
51 #include <net/vlan/if_vlan_var.h>
53 #include <netinet/in_systm.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
59 #include <vm/vm.h> /* for vtophys */
60 #include <vm/pmap.h> /* for vtophys */
61 #include <machine/clock.h> /* for DELAY */
62 #include <machine/bus_memio.h>
63 #include <machine/bus.h>
64 #include <machine/resource.h>
65 #include <sys/bus.h>
66 #include <sys/rman.h>
68 #include <bus/pci/pcireg.h>
69 #include <bus/pci/pcivar.h>
71 #include "../mii_layer/mii.h"
72 #include "../mii_layer/miivar.h"
74 #include "if_gxreg.h"
75 #include "if_gxvar.h"
77 #include "miibus_if.h"
79 #define TUNABLE_TX_INTR_DELAY 100
80 #define TUNABLE_RX_INTR_DELAY 100
82 #define GX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_IP_FRAGS)
85 * Various supported device vendors/types and their names.
87 struct gx_device {
88 u_int16_t vendor;
89 u_int16_t device;
90 int version_flags;
91 u_int32_t version_ipg;
92 char *name;
95 static struct gx_device gx_devs[] = {
96 { INTEL_VENDORID, DEVICEID_WISEMAN,
97 GXF_FORCE_TBI | GXF_OLD_REGS,
98 10 | 2 << 10 | 10 << 20,
99 "Intel Gigabit Ethernet (82542)" },
100 { INTEL_VENDORID, DEVICEID_LIVINGOOD_FIBER,
101 GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
102 6 | 8 << 10 | 6 << 20,
103 "Intel Gigabit Ethernet (82543GC-F)" },
104 { INTEL_VENDORID, DEVICEID_LIVINGOOD_COPPER,
105 GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
106 8 | 8 << 10 | 6 << 20,
107 "Intel Gigabit Ethernet (82543GC-T)" },
108 #if 0
109 /* notyet.. */
110 { INTEL_VENDORID, DEVICEID_CORDOVA_FIBER,
111 GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
112 6 | 8 << 10 | 6 << 20,
113 "Intel Gigabit Ethernet (82544EI-F)" },
114 { INTEL_VENDORID, DEVICEID_CORDOVA_COPPER,
115 GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
116 8 | 8 << 10 | 6 << 20,
117 "Intel Gigabit Ethernet (82544EI-T)" },
118 { INTEL_VENDORID, DEVICEID_CORDOVA2_COPPER,
119 GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
120 8 | 8 << 10 | 6 << 20,
121 "Intel Gigabit Ethernet (82544GC-T)" },
122 #endif
123 { 0, 0, 0, NULL }
126 static struct gx_regs new_regs = {
127 GX_RX_RING_BASE, GX_RX_RING_LEN,
128 GX_RX_RING_HEAD, GX_RX_RING_TAIL,
129 GX_RX_INTR_DELAY, GX_RX_DMA_CTRL,
131 GX_TX_RING_BASE, GX_TX_RING_LEN,
132 GX_TX_RING_HEAD, GX_TX_RING_TAIL,
133 GX_TX_INTR_DELAY, GX_TX_DMA_CTRL,
135 static struct gx_regs old_regs = {
136 GX_RX_OLD_RING_BASE, GX_RX_OLD_RING_LEN,
137 GX_RX_OLD_RING_HEAD, GX_RX_OLD_RING_TAIL,
138 GX_RX_OLD_INTR_DELAY, GX_RX_OLD_DMA_CTRL,
140 GX_TX_OLD_RING_BASE, GX_TX_OLD_RING_LEN,
141 GX_TX_OLD_RING_HEAD, GX_TX_OLD_RING_TAIL,
142 GX_TX_OLD_INTR_DELAY, GX_TX_OLD_DMA_CTRL,
145 static int gx_probe(device_t dev);
146 static int gx_attach(device_t dev);
147 static int gx_detach(device_t dev);
148 static void gx_shutdown(device_t dev);
150 static void gx_intr(void *xsc);
151 static void gx_init(void *xsc);
153 static struct gx_device *gx_match(device_t dev);
154 static void gx_eeprom_getword(struct gx_softc *gx, int addr,
155 u_int16_t *dest);
156 static int gx_read_eeprom(struct gx_softc *gx, caddr_t dest, int off,
157 int cnt);
158 static int gx_ifmedia_upd(struct ifnet *ifp);
159 static void gx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
160 static int gx_miibus_readreg(device_t dev, int phy, int reg);
161 static void gx_miibus_writereg(device_t dev, int phy, int reg, int value);
162 static void gx_miibus_statchg(device_t dev);
163 static int gx_ioctl(struct ifnet *ifp, u_long command, caddr_t data,
164 struct ucred *);
165 static void gx_setmulti(struct gx_softc *gx);
166 static void gx_reset(struct gx_softc *gx);
167 static void gx_phy_reset(struct gx_softc *gx);
168 static void gx_release(struct gx_softc *gx);
169 static void gx_stop(struct gx_softc *gx);
170 static void gx_watchdog(struct ifnet *ifp);
171 static void gx_start(struct ifnet *ifp);
173 static int gx_init_rx_ring(struct gx_softc *gx);
174 static void gx_free_rx_ring(struct gx_softc *gx);
175 static int gx_init_tx_ring(struct gx_softc *gx);
176 static void gx_free_tx_ring(struct gx_softc *gx);
178 static device_method_t gx_methods[] = {
179 /* Device interface */
180 DEVMETHOD(device_probe, gx_probe),
181 DEVMETHOD(device_attach, gx_attach),
182 DEVMETHOD(device_detach, gx_detach),
183 DEVMETHOD(device_shutdown, gx_shutdown),
185 /* MII interface */
186 DEVMETHOD(miibus_readreg, gx_miibus_readreg),
187 DEVMETHOD(miibus_writereg, gx_miibus_writereg),
188 DEVMETHOD(miibus_statchg, gx_miibus_statchg),
190 { 0, 0 }
193 static driver_t gx_driver = {
194 "gx",
195 gx_methods,
196 sizeof(struct gx_softc)
199 static devclass_t gx_devclass;
201 DECLARE_DUMMY_MODULE(if_gx);
202 MODULE_DEPEND(if_gx, miibus, 1, 1, 1);
203 DRIVER_MODULE(if_gx, pci, gx_driver, gx_devclass, 0, 0);
204 DRIVER_MODULE(miibus, gx, miibus_driver, miibus_devclass, 0, 0);
206 static struct gx_device *
207 gx_match(device_t dev)
209 int i;
211 for (i = 0; gx_devs[i].name != NULL; i++) {
212 if ((pci_get_vendor(dev) == gx_devs[i].vendor) &&
213 (pci_get_device(dev) == gx_devs[i].device))
214 return (&gx_devs[i]);
216 return (NULL);
219 static int
220 gx_probe(device_t dev)
222 struct gx_device *gx_dev;
224 gx_dev = gx_match(dev);
225 if (gx_dev == NULL)
226 return (ENXIO);
228 device_set_desc(dev, gx_dev->name);
229 return (0);
232 static int
233 gx_attach(device_t dev)
235 struct gx_softc *gx;
236 struct gx_device *gx_dev;
237 struct ifnet *ifp;
238 u_int32_t command;
239 int rid, s;
240 int error = 0;
242 s = splimp();
244 gx = device_get_softc(dev);
245 bzero(gx, sizeof(struct gx_softc));
246 gx->gx_dev = dev;
248 gx_dev = gx_match(dev);
249 gx->gx_vflags = gx_dev->version_flags;
250 gx->gx_ipg = gx_dev->version_ipg;
252 mtx_init(&gx->gx_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE);
254 GX_LOCK(gx);
257 * Map control/status registers.
259 command = pci_read_config(dev, PCIR_COMMAND, 4);
260 command |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
261 if (gx->gx_vflags & GXF_ENABLE_MWI)
262 command |= PCIM_CMD_MWIEN;
263 pci_write_config(dev, PCIR_COMMAND, command, 4);
264 command = pci_read_config(dev, PCIR_COMMAND, 4);
266 /* XXX check cache line size? */
268 if ((command & PCIM_CMD_MEMEN) == 0) {
269 device_printf(dev, "failed to enable memory mapping!\n");
270 error = ENXIO;
271 goto fail;
274 rid = GX_PCI_LOMEM;
275 gx->gx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
276 RF_ACTIVE);
277 #if 0
278 /* support PIO mode */
279 rid = PCI_LOIO;
280 gx->gx_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
281 RF_ACTIVE);
282 #endif
284 if (gx->gx_res == NULL) {
285 device_printf(dev, "couldn't map memory\n");
286 error = ENXIO;
287 goto fail;
290 gx->gx_btag = rman_get_bustag(gx->gx_res);
291 gx->gx_bhandle = rman_get_bushandle(gx->gx_res);
293 /* Allocate interrupt */
294 rid = 0;
295 gx->gx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
296 RF_SHAREABLE | RF_ACTIVE);
298 if (gx->gx_irq == NULL) {
299 device_printf(dev, "couldn't map interrupt\n");
300 error = ENXIO;
301 goto fail;
304 error = bus_setup_intr(dev, gx->gx_irq, INTR_TYPE_NET,
305 gx_intr, gx, &gx->gx_intrhand, NULL);
306 if (error) {
307 device_printf(dev, "couldn't setup irq\n");
308 goto fail;
311 /* compensate for different register mappings */
312 if (gx->gx_vflags & GXF_OLD_REGS)
313 gx->gx_reg = old_regs;
314 else
315 gx->gx_reg = new_regs;
317 if (gx_read_eeprom(gx, (caddr_t)&gx->arpcom.ac_enaddr,
318 GX_EEMAP_MAC, 3)) {
319 device_printf(dev, "failed to read station address\n");
320 error = ENXIO;
321 goto fail;
324 /* Allocate the ring buffers. */
325 gx->gx_rdata = contigmalloc(sizeof(struct gx_ring_data), M_DEVBUF,
326 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
328 if (gx->gx_rdata == NULL) {
329 device_printf(dev, "no memory for list buffers!\n");
330 error = ENXIO;
331 goto fail;
333 bzero(gx->gx_rdata, sizeof(struct gx_ring_data));
335 /* Set default tuneable values. */
336 gx->gx_tx_intr_delay = TUNABLE_TX_INTR_DELAY;
337 gx->gx_rx_intr_delay = TUNABLE_RX_INTR_DELAY;
339 /* Set up ifnet structure */
340 ifp = &gx->arpcom.ac_if;
341 ifp->if_softc = gx;
342 if_initname(ifp, "gx", device_get_unit(dev));
343 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
344 ifp->if_ioctl = gx_ioctl;
345 ifp->if_start = gx_start;
346 ifp->if_watchdog = gx_watchdog;
347 ifp->if_init = gx_init;
348 ifp->if_mtu = ETHERMTU;
349 ifq_set_maxlen(&ifp->if_snd, GX_TX_RING_CNT - 1);
350 ifq_set_ready(&ifp->if_snd);
352 /* see if we can enable hardware checksumming */
353 if (gx->gx_vflags & GXF_CSUM) {
354 ifp->if_capabilities = IFCAP_HWCSUM;
355 ifp->if_capenable = ifp->if_capabilities;
358 /* figure out transciever type */
359 if (gx->gx_vflags & GXF_FORCE_TBI ||
360 CSR_READ_4(gx, GX_STATUS) & GX_STAT_TBIMODE)
361 gx->gx_tbimode = 1;
363 if (gx->gx_tbimode) {
364 /* SERDES transceiver */
365 ifmedia_init(&gx->gx_media, IFM_IMASK, gx_ifmedia_upd,
366 gx_ifmedia_sts);
367 ifmedia_add(&gx->gx_media,
368 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
369 ifmedia_add(&gx->gx_media, IFM_ETHER|IFM_AUTO, 0, NULL);
370 ifmedia_set(&gx->gx_media, IFM_ETHER|IFM_AUTO);
371 } else {
372 /* GMII/MII transceiver */
373 gx_phy_reset(gx);
374 if (mii_phy_probe(dev, &gx->gx_miibus, gx_ifmedia_upd,
375 gx_ifmedia_sts)) {
376 device_printf(dev, "GMII/MII, PHY not detected\n");
377 error = ENXIO;
378 goto fail;
383 * Call MI attach routines.
385 ether_ifattach(ifp, gx->arpcom.ac_enaddr);
387 GX_UNLOCK(gx);
388 splx(s);
389 return (0);
391 fail:
392 GX_UNLOCK(gx);
393 gx_release(gx);
394 splx(s);
395 return (error);
398 static void
399 gx_release(struct gx_softc *gx)
402 bus_generic_detach(gx->gx_dev);
403 if (gx->gx_miibus)
404 device_delete_child(gx->gx_dev, gx->gx_miibus);
406 if (gx->gx_intrhand)
407 bus_teardown_intr(gx->gx_dev, gx->gx_irq, gx->gx_intrhand);
408 if (gx->gx_irq)
409 bus_release_resource(gx->gx_dev, SYS_RES_IRQ, 0, gx->gx_irq);
410 if (gx->gx_res)
411 bus_release_resource(gx->gx_dev, SYS_RES_MEMORY,
412 GX_PCI_LOMEM, gx->gx_res);
415 static void
416 gx_init(void *xsc)
418 struct gx_softc *gx = (struct gx_softc *)xsc;
419 struct ifmedia *ifm;
420 struct ifnet *ifp;
421 device_t dev;
422 u_int16_t *m;
423 u_int32_t ctrl;
424 int s, i, tmp;
426 dev = gx->gx_dev;
427 ifp = &gx->arpcom.ac_if;
429 s = splimp();
430 GX_LOCK(gx);
432 /* Disable host interrupts, halt chip. */
433 gx_reset(gx);
435 /* disable I/O, flush RX/TX FIFOs, and free RX/TX buffers */
436 gx_stop(gx);
438 /* Load our MAC address, invalidate other 15 RX addresses. */
439 m = (u_int16_t *)&gx->arpcom.ac_enaddr[0];
440 CSR_WRITE_4(gx, GX_RX_ADDR_BASE, (m[1] << 16) | m[0]);
441 CSR_WRITE_4(gx, GX_RX_ADDR_BASE + 4, m[2] | GX_RA_VALID);
442 for (i = 1; i < 16; i++)
443 CSR_WRITE_8(gx, GX_RX_ADDR_BASE + i * 8, (u_quad_t)0);
445 /* Program multicast filter. */
446 gx_setmulti(gx);
448 /* Init RX ring. */
449 gx_init_rx_ring(gx);
451 /* Init TX ring. */
452 gx_init_tx_ring(gx);
454 if (gx->gx_vflags & GXF_DMA) {
455 /* set up DMA control */
456 CSR_WRITE_4(gx, gx->gx_reg.r_rx_dma_ctrl, 0x00010000);
457 CSR_WRITE_4(gx, gx->gx_reg.r_tx_dma_ctrl, 0x00000000);
460 /* enable receiver */
461 ctrl = GX_RXC_ENABLE | GX_RXC_RX_THOLD_EIGHTH | GX_RXC_RX_BSIZE_2K;
462 ctrl |= GX_RXC_BCAST_ACCEPT;
464 /* Enable or disable promiscuous mode as needed. */
465 if (ifp->if_flags & IFF_PROMISC)
466 ctrl |= GX_RXC_UNI_PROMISC;
468 /* This is required if we want to accept jumbo frames */
469 if (ifp->if_mtu > ETHERMTU)
470 ctrl |= GX_RXC_LONG_PKT_ENABLE;
472 /* setup receive checksum control */
473 if (ifp->if_capenable & IFCAP_RXCSUM)
474 CSR_WRITE_4(gx, GX_RX_CSUM_CONTROL,
475 GX_CSUM_TCP/* | GX_CSUM_IP*/);
477 /* setup transmit checksum control */
478 if (ifp->if_capenable & IFCAP_TXCSUM)
479 ifp->if_hwassist = GX_CSUM_FEATURES;
481 ctrl |= GX_RXC_STRIP_ETHERCRC; /* not on 82542? */
482 CSR_WRITE_4(gx, GX_RX_CONTROL, ctrl);
484 /* enable transmitter */
485 ctrl = GX_TXC_ENABLE | GX_TXC_PAD_SHORT_PKTS | GX_TXC_COLL_RETRY_16;
487 /* XXX we should support half-duplex here too... */
488 ctrl |= GX_TXC_COLL_TIME_FDX;
490 CSR_WRITE_4(gx, GX_TX_CONTROL, ctrl);
493 * set up recommended IPG times, which vary depending on chip type:
494 * IPG transmit time: 80ns
495 * IPG receive time 1: 20ns
496 * IPG receive time 2: 80ns
498 CSR_WRITE_4(gx, GX_TX_IPG, gx->gx_ipg);
500 /* set up 802.3x MAC flow control address -- 01:80:c2:00:00:01 */
501 CSR_WRITE_4(gx, GX_FLOW_CTRL_BASE, 0x00C28001);
502 CSR_WRITE_4(gx, GX_FLOW_CTRL_BASE+4, 0x00000100);
504 /* set up 802.3x MAC flow control type -- 88:08 */
505 CSR_WRITE_4(gx, GX_FLOW_CTRL_TYPE, 0x8808);
507 /* Set up tuneables */
508 CSR_WRITE_4(gx, gx->gx_reg.r_rx_delay, gx->gx_rx_intr_delay);
509 CSR_WRITE_4(gx, gx->gx_reg.r_tx_delay, gx->gx_tx_intr_delay);
512 * Configure chip for correct operation.
514 ctrl = GX_CTRL_DUPLEX;
515 #if BYTE_ORDER == BIG_ENDIAN
516 ctrl |= GX_CTRL_BIGENDIAN;
517 #endif
518 ctrl |= GX_CTRL_VLAN_ENABLE;
520 if (gx->gx_tbimode) {
522 * It seems that TXCW must be initialized from the EEPROM
523 * manually.
525 * XXX
526 * should probably read the eeprom and re-insert the
527 * values here.
529 #define TXCONFIG_WORD 0x000001A0
530 CSR_WRITE_4(gx, GX_TX_CONFIG, TXCONFIG_WORD);
532 /* turn on hardware autonegotiate */
533 GX_SETBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
534 } else {
536 * Auto-detect speed from PHY, instead of using direct
537 * indication. The SLU bit doesn't force the link, but
538 * must be present for ASDE to work.
540 gx_phy_reset(gx);
541 ctrl |= GX_CTRL_SET_LINK_UP | GX_CTRL_AUTOSPEED;
545 * Take chip out of reset and start it running.
547 CSR_WRITE_4(gx, GX_CTRL, ctrl);
549 /* Turn interrupts on. */
550 CSR_WRITE_4(gx, GX_INT_MASK_SET, GX_INT_WANTED);
552 ifp->if_flags |= IFF_RUNNING;
553 ifp->if_flags &= ~IFF_OACTIVE;
556 * Set the current media.
558 if (gx->gx_miibus != NULL) {
559 mii_mediachg(device_get_softc(gx->gx_miibus));
560 } else {
561 ifm = &gx->gx_media;
562 tmp = ifm->ifm_media;
563 ifm->ifm_media = ifm->ifm_cur->ifm_media;
564 gx_ifmedia_upd(ifp);
565 ifm->ifm_media = tmp;
569 * XXX
570 * Have the LINK0 flag force the link in TBI mode.
572 if (gx->gx_tbimode && ifp->if_flags & IFF_LINK0) {
573 GX_CLRBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
574 GX_SETBIT(gx, GX_CTRL, GX_CTRL_SET_LINK_UP);
577 #if 0
578 printf("66mhz: %s 64bit: %s\n",
579 CSR_READ_4(gx, GX_STATUS) & GX_STAT_PCI66 ? "yes" : "no",
580 CSR_READ_4(gx, GX_STATUS) & GX_STAT_BUS64 ? "yes" : "no");
581 #endif
583 GX_UNLOCK(gx);
584 splx(s);
588 * Stop all chip I/O so that the kernel's probe routines don't
589 * get confused by errant DMAs when rebooting.
591 static void
592 gx_shutdown(device_t dev)
594 struct gx_softc *gx;
596 gx = device_get_softc(dev);
597 gx_reset(gx);
598 gx_stop(gx);
601 static int
602 gx_detach(device_t dev)
604 struct gx_softc *gx;
605 struct ifnet *ifp;
606 int s;
608 s = splimp();
610 gx = device_get_softc(dev);
611 ifp = &gx->arpcom.ac_if;
612 GX_LOCK(gx);
614 ether_ifdetach(ifp);
615 gx_reset(gx);
616 gx_stop(gx);
617 ifmedia_removeall(&gx->gx_media);
618 gx_release(gx);
620 contigfree(gx->gx_rdata, sizeof(struct gx_ring_data), M_DEVBUF);
622 GX_UNLOCK(gx);
623 mtx_destroy(&gx->gx_mtx);
624 splx(s);
626 return (0);
629 static void
630 gx_eeprom_getword(struct gx_softc *gx, int addr, u_int16_t *dest)
632 u_int16_t word = 0;
633 u_int32_t base, reg;
634 int x;
636 addr = (GX_EE_OPC_READ << GX_EE_ADDR_SIZE) |
637 (addr & ((1 << GX_EE_ADDR_SIZE) - 1));
639 base = CSR_READ_4(gx, GX_EEPROM_CTRL);
640 base &= ~(GX_EE_DATA_OUT | GX_EE_DATA_IN | GX_EE_CLOCK);
641 base |= GX_EE_SELECT;
643 CSR_WRITE_4(gx, GX_EEPROM_CTRL, base);
645 for (x = 1 << ((GX_EE_OPC_SIZE + GX_EE_ADDR_SIZE) - 1); x; x >>= 1) {
646 reg = base | (addr & x ? GX_EE_DATA_IN : 0);
647 CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg);
648 DELAY(10);
649 CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg | GX_EE_CLOCK);
650 DELAY(10);
651 CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg);
652 DELAY(10);
655 for (x = 1 << 15; x; x >>= 1) {
656 CSR_WRITE_4(gx, GX_EEPROM_CTRL, base | GX_EE_CLOCK);
657 DELAY(10);
658 reg = CSR_READ_4(gx, GX_EEPROM_CTRL);
659 if (reg & GX_EE_DATA_OUT)
660 word |= x;
661 CSR_WRITE_4(gx, GX_EEPROM_CTRL, base);
662 DELAY(10);
665 CSR_WRITE_4(gx, GX_EEPROM_CTRL, base & ~GX_EE_SELECT);
666 DELAY(10);
668 *dest = word;
671 static int
672 gx_read_eeprom(struct gx_softc *gx, caddr_t dest, int off, int cnt)
674 u_int16_t *word;
675 int i;
677 word = (u_int16_t *)dest;
678 for (i = 0; i < cnt; i ++) {
679 gx_eeprom_getword(gx, off + i, word);
680 word++;
682 return (0);
686 * Set media options.
688 static int
689 gx_ifmedia_upd(struct ifnet *ifp)
691 struct gx_softc *gx;
692 struct ifmedia *ifm;
693 struct mii_data *mii;
695 gx = ifp->if_softc;
697 if (gx->gx_tbimode) {
698 ifm = &gx->gx_media;
699 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
700 return (EINVAL);
701 switch (IFM_SUBTYPE(ifm->ifm_media)) {
702 case IFM_AUTO:
703 GX_SETBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
704 GX_SETBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
705 GX_CLRBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
706 break;
707 case IFM_1000_SX:
708 device_printf(gx->gx_dev,
709 "manual config not supported yet.\n");
710 #if 0
711 GX_CLRBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
712 config = /* bit symbols for 802.3z */0;
713 ctrl |= GX_CTRL_SET_LINK_UP;
714 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
715 ctrl |= GX_CTRL_DUPLEX;
716 #endif
717 break;
718 default:
719 return (EINVAL);
721 } else {
722 ifm = &gx->gx_media;
725 * 1000TX half duplex does not work.
727 if (IFM_TYPE(ifm->ifm_media) == IFM_ETHER &&
728 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T &&
729 (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) == 0)
730 return (EINVAL);
731 mii = device_get_softc(gx->gx_miibus);
732 mii_mediachg(mii);
734 return (0);
738 * Report current media status.
740 static void
741 gx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
743 struct gx_softc *gx;
744 struct mii_data *mii;
745 u_int32_t status;
747 gx = ifp->if_softc;
749 if (gx->gx_tbimode) {
750 ifmr->ifm_status = IFM_AVALID;
751 ifmr->ifm_active = IFM_ETHER;
753 status = CSR_READ_4(gx, GX_STATUS);
754 if ((status & GX_STAT_LINKUP) == 0)
755 return;
757 ifmr->ifm_status |= IFM_ACTIVE;
758 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
759 } else {
760 mii = device_get_softc(gx->gx_miibus);
761 mii_pollstat(mii);
762 if ((mii->mii_media_active & (IFM_1000_T | IFM_HDX)) ==
763 (IFM_1000_T | IFM_HDX))
764 mii->mii_media_active = IFM_ETHER | IFM_NONE;
765 ifmr->ifm_active = mii->mii_media_active;
766 ifmr->ifm_status = mii->mii_media_status;
770 static void
771 gx_mii_shiftin(struct gx_softc *gx, int data, int length)
773 u_int32_t reg, x;
776 * Set up default GPIO direction + PHY data out.
778 reg = CSR_READ_4(gx, GX_CTRL);
779 reg &= ~(GX_CTRL_GPIO_DIR_MASK | GX_CTRL_PHY_IO | GX_CTRL_PHY_CLK);
780 reg |= GX_CTRL_GPIO_DIR | GX_CTRL_PHY_IO_DIR;
783 * Shift in data to PHY.
785 for (x = 1 << (length - 1); x; x >>= 1) {
786 if (data & x)
787 reg |= GX_CTRL_PHY_IO;
788 else
789 reg &= ~GX_CTRL_PHY_IO;
790 CSR_WRITE_4(gx, GX_CTRL, reg);
791 DELAY(10);
792 CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
793 DELAY(10);
794 CSR_WRITE_4(gx, GX_CTRL, reg);
795 DELAY(10);
799 static u_int16_t
800 gx_mii_shiftout(struct gx_softc *gx)
802 u_int32_t reg;
803 u_int16_t data;
804 int x;
807 * Set up default GPIO direction + PHY data in.
809 reg = CSR_READ_4(gx, GX_CTRL);
810 reg &= ~(GX_CTRL_GPIO_DIR_MASK | GX_CTRL_PHY_IO | GX_CTRL_PHY_CLK);
811 reg |= GX_CTRL_GPIO_DIR;
813 CSR_WRITE_4(gx, GX_CTRL, reg);
814 DELAY(10);
815 CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
816 DELAY(10);
817 CSR_WRITE_4(gx, GX_CTRL, reg);
818 DELAY(10);
820 * Shift out data from PHY.
822 data = 0;
823 for (x = 1 << 15; x; x >>= 1) {
824 CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
825 DELAY(10);
826 if (CSR_READ_4(gx, GX_CTRL) & GX_CTRL_PHY_IO)
827 data |= x;
828 CSR_WRITE_4(gx, GX_CTRL, reg);
829 DELAY(10);
831 CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
832 DELAY(10);
833 CSR_WRITE_4(gx, GX_CTRL, reg);
834 DELAY(10);
836 return (data);
839 static int
840 gx_miibus_readreg(device_t dev, int phy, int reg)
842 struct gx_softc *gx;
844 gx = device_get_softc(dev);
845 if (gx->gx_tbimode)
846 return (0);
849 * XXX
850 * Note: Cordova has a MDIC register. livingood and < have mii bits
853 gx_mii_shiftin(gx, GX_PHY_PREAMBLE, GX_PHY_PREAMBLE_LEN);
854 gx_mii_shiftin(gx, (GX_PHY_SOF << 12) | (GX_PHY_OP_READ << 10) |
855 (phy << 5) | reg, GX_PHY_READ_LEN);
856 return (gx_mii_shiftout(gx));
859 static void
860 gx_miibus_writereg(device_t dev, int phy, int reg, int value)
862 struct gx_softc *gx;
864 gx = device_get_softc(dev);
865 if (gx->gx_tbimode)
866 return;
868 gx_mii_shiftin(gx, GX_PHY_PREAMBLE, GX_PHY_PREAMBLE_LEN);
869 gx_mii_shiftin(gx, (GX_PHY_SOF << 30) | (GX_PHY_OP_WRITE << 28) |
870 (phy << 23) | (reg << 18) | (GX_PHY_TURNAROUND << 16) |
871 (value & 0xffff), GX_PHY_WRITE_LEN);
874 static void
875 gx_miibus_statchg(device_t dev)
877 struct gx_softc *gx;
878 struct mii_data *mii;
879 int reg, s;
881 gx = device_get_softc(dev);
882 if (gx->gx_tbimode)
883 return;
886 * Set flow control behavior to mirror what PHY negotiated.
888 mii = device_get_softc(gx->gx_miibus);
890 s = splimp();
891 GX_LOCK(gx);
893 reg = CSR_READ_4(gx, GX_CTRL);
894 if (mii->mii_media_active & IFM_FLAG0)
895 reg |= GX_CTRL_RX_FLOWCTRL;
896 else
897 reg &= ~GX_CTRL_RX_FLOWCTRL;
898 if (mii->mii_media_active & IFM_FLAG1)
899 reg |= GX_CTRL_TX_FLOWCTRL;
900 else
901 reg &= ~GX_CTRL_TX_FLOWCTRL;
902 CSR_WRITE_4(gx, GX_CTRL, reg);
904 GX_UNLOCK(gx);
905 splx(s);
908 static int
909 gx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
911 struct gx_softc *gx = ifp->if_softc;
912 struct ifreq *ifr = (struct ifreq *)data;
913 struct mii_data *mii;
914 int s, mask, error = 0;
916 s = splimp();
917 GX_LOCK(gx);
919 switch (command) {
920 case SIOCSIFMTU:
921 if (ifr->ifr_mtu > GX_MAX_MTU) {
922 error = EINVAL;
923 } else {
924 ifp->if_mtu = ifr->ifr_mtu;
925 gx_init(gx);
927 break;
928 case SIOCSIFFLAGS:
929 if ((ifp->if_flags & IFF_UP) == 0) {
930 gx_stop(gx);
931 } else if (ifp->if_flags & IFF_RUNNING &&
932 ((ifp->if_flags & IFF_PROMISC) !=
933 (gx->gx_if_flags & IFF_PROMISC))) {
934 if (ifp->if_flags & IFF_PROMISC)
935 GX_SETBIT(gx, GX_RX_CONTROL, GX_RXC_UNI_PROMISC);
936 else
937 GX_CLRBIT(gx, GX_RX_CONTROL, GX_RXC_UNI_PROMISC);
938 } else {
939 gx_init(gx);
941 gx->gx_if_flags = ifp->if_flags;
942 break;
943 case SIOCADDMULTI:
944 case SIOCDELMULTI:
945 if (ifp->if_flags & IFF_RUNNING)
946 gx_setmulti(gx);
947 break;
948 case SIOCSIFMEDIA:
949 case SIOCGIFMEDIA:
950 if (gx->gx_miibus != NULL) {
951 mii = device_get_softc(gx->gx_miibus);
952 error = ifmedia_ioctl(ifp, ifr,
953 &mii->mii_media, command);
954 } else {
955 error = ifmedia_ioctl(ifp, ifr, &gx->gx_media, command);
957 break;
958 case SIOCSIFCAP:
959 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
960 if (mask & IFCAP_HWCSUM) {
961 if (IFCAP_HWCSUM & ifp->if_capenable)
962 ifp->if_capenable &= ~IFCAP_HWCSUM;
963 else
964 ifp->if_capenable |= IFCAP_HWCSUM;
965 if (ifp->if_flags & IFF_RUNNING)
966 gx_init(gx);
968 break;
969 default:
970 error = ether_ioctl(ifp, command, data);
971 break;
974 GX_UNLOCK(gx);
975 splx(s);
976 return (error);
979 static void
980 gx_phy_reset(struct gx_softc *gx)
982 int reg;
984 GX_SETBIT(gx, GX_CTRL, GX_CTRL_SET_LINK_UP);
987 * PHY reset is active low.
989 reg = CSR_READ_4(gx, GX_CTRL_EXT);
990 reg &= ~(GX_CTRLX_GPIO_DIR_MASK | GX_CTRLX_PHY_RESET);
991 reg |= GX_CTRLX_GPIO_DIR;
993 CSR_WRITE_4(gx, GX_CTRL_EXT, reg | GX_CTRLX_PHY_RESET);
994 DELAY(10);
995 CSR_WRITE_4(gx, GX_CTRL_EXT, reg);
996 DELAY(10);
997 CSR_WRITE_4(gx, GX_CTRL_EXT, reg | GX_CTRLX_PHY_RESET);
998 DELAY(10);
1000 #if 0
1001 /* post-livingood (cordova) only */
1002 GX_SETBIT(gx, GX_CTRL, 0x80000000);
1003 DELAY(1000);
1004 GX_CLRBIT(gx, GX_CTRL, 0x80000000);
1005 #endif
1008 static void
1009 gx_reset(struct gx_softc *gx)
1012 /* Disable host interrupts. */
1013 CSR_WRITE_4(gx, GX_INT_MASK_CLR, GX_INT_ALL);
1015 /* reset chip (THWAP!) */
1016 GX_SETBIT(gx, GX_CTRL, GX_CTRL_DEVICE_RESET);
1017 DELAY(10);
1020 static void
1021 gx_stop(struct gx_softc *gx)
1023 struct ifnet *ifp;
1025 ifp = &gx->arpcom.ac_if;
1027 /* reset and flush transmitter */
1028 CSR_WRITE_4(gx, GX_TX_CONTROL, GX_TXC_RESET);
1030 /* reset and flush receiver */
1031 CSR_WRITE_4(gx, GX_RX_CONTROL, GX_RXC_RESET);
1033 /* reset link */
1034 if (gx->gx_tbimode)
1035 GX_SETBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
1037 /* Free the RX lists. */
1038 gx_free_rx_ring(gx);
1040 /* Free TX buffers. */
1041 gx_free_tx_ring(gx);
1043 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1046 static void
1047 gx_watchdog(struct ifnet *ifp)
1049 struct gx_softc *gx;
1051 gx = ifp->if_softc;
1053 device_printf(gx->gx_dev, "watchdog timeout -- resetting\n");
1054 gx_reset(gx);
1055 gx_init(gx);
1057 ifp->if_oerrors++;
1061 * Intialize a receive ring descriptor.
1063 static int
1064 gx_newbuf(struct gx_softc *gx, int idx, struct mbuf *m)
1066 struct mbuf *m_new = NULL;
1067 struct gx_rx_desc *r;
1069 if (m == NULL) {
1070 MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
1071 if (m_new == NULL) {
1072 device_printf(gx->gx_dev,
1073 "mbuf allocation failed -- packet dropped\n");
1074 return (ENOBUFS);
1076 MCLGET(m_new, MB_DONTWAIT);
1077 if ((m_new->m_flags & M_EXT) == 0) {
1078 device_printf(gx->gx_dev,
1079 "cluster allocation failed -- packet dropped\n");
1080 m_freem(m_new);
1081 return (ENOBUFS);
1083 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1084 } else {
1085 m->m_len = m->m_pkthdr.len = MCLBYTES;
1086 m->m_data = m->m_ext.ext_buf;
1087 m->m_next = NULL;
1088 m_new = m;
1092 * XXX
1093 * this will _NOT_ work for large MTU's; it will overwrite
1094 * the end of the buffer. E.g.: take this out for jumbograms,
1095 * but then that breaks alignment.
1097 if (gx->arpcom.ac_if.if_mtu <= ETHERMTU)
1098 m_adj(m_new, ETHER_ALIGN);
1100 gx->gx_cdata.gx_rx_chain[idx] = m_new;
1101 r = &gx->gx_rdata->gx_rx_ring[idx];
1102 r->rx_addr = vtophys(mtod(m_new, caddr_t));
1103 r->rx_staterr = 0;
1105 return (0);
1109 * The receive ring can have up to 64K descriptors, which at 2K per mbuf
1110 * cluster, could add up to 128M of memory. Due to alignment constraints,
1111 * the number of descriptors must be a multiple of 8. For now, we
1112 * allocate 256 entries and hope that our CPU is fast enough to keep up
1113 * with the NIC.
1115 static int
1116 gx_init_rx_ring(struct gx_softc *gx)
1118 int i, error;
1120 for (i = 0; i < GX_RX_RING_CNT; i++) {
1121 error = gx_newbuf(gx, i, NULL);
1122 if (error)
1123 return (error);
1126 /* bring receiver out of reset state, leave disabled */
1127 CSR_WRITE_4(gx, GX_RX_CONTROL, 0);
1129 /* set up ring registers */
1130 CSR_WRITE_8(gx, gx->gx_reg.r_rx_base,
1131 (u_quad_t)vtophys(gx->gx_rdata->gx_rx_ring));
1133 CSR_WRITE_4(gx, gx->gx_reg.r_rx_length,
1134 GX_RX_RING_CNT * sizeof(struct gx_rx_desc));
1135 CSR_WRITE_4(gx, gx->gx_reg.r_rx_head, 0);
1136 CSR_WRITE_4(gx, gx->gx_reg.r_rx_tail, GX_RX_RING_CNT - 1);
1137 gx->gx_rx_tail_idx = 0;
1139 return (0);
1142 static void
1143 gx_free_rx_ring(struct gx_softc *gx)
1145 struct mbuf **mp;
1146 int i;
1148 mp = gx->gx_cdata.gx_rx_chain;
1149 for (i = 0; i < GX_RX_RING_CNT; i++, mp++) {
1150 if (*mp != NULL) {
1151 m_freem(*mp);
1152 *mp = NULL;
1155 bzero((void *)gx->gx_rdata->gx_rx_ring,
1156 GX_RX_RING_CNT * sizeof(struct gx_rx_desc));
1158 /* release any partially-received packet chain */
1159 if (gx->gx_pkthdr != NULL) {
1160 m_freem(gx->gx_pkthdr);
1161 gx->gx_pkthdr = NULL;
1165 static int
1166 gx_init_tx_ring(struct gx_softc *gx)
1169 /* bring transmitter out of reset state, leave disabled */
1170 CSR_WRITE_4(gx, GX_TX_CONTROL, 0);
1172 /* set up ring registers */
1173 CSR_WRITE_8(gx, gx->gx_reg.r_tx_base,
1174 (u_quad_t)vtophys(gx->gx_rdata->gx_tx_ring));
1175 CSR_WRITE_4(gx, gx->gx_reg.r_tx_length,
1176 GX_TX_RING_CNT * sizeof(struct gx_tx_desc));
1177 CSR_WRITE_4(gx, gx->gx_reg.r_tx_head, 0);
1178 CSR_WRITE_4(gx, gx->gx_reg.r_tx_tail, 0);
1179 gx->gx_tx_head_idx = 0;
1180 gx->gx_tx_tail_idx = 0;
1181 gx->gx_txcnt = 0;
1183 /* set up initial TX context */
1184 gx->gx_txcontext = GX_TXCONTEXT_NONE;
1186 return (0);
1189 static void
1190 gx_free_tx_ring(struct gx_softc *gx)
1192 struct mbuf **mp;
1193 int i;
1195 mp = gx->gx_cdata.gx_tx_chain;
1196 for (i = 0; i < GX_TX_RING_CNT; i++, mp++) {
1197 if (*mp != NULL) {
1198 m_freem(*mp);
1199 *mp = NULL;
1202 bzero((void *)&gx->gx_rdata->gx_tx_ring,
1203 GX_TX_RING_CNT * sizeof(struct gx_tx_desc));
1206 static void
1207 gx_setmulti(struct gx_softc *gx)
1209 int i;
1211 /* wipe out the multicast table */
1212 for (i = 1; i < 128; i++)
1213 CSR_WRITE_4(gx, GX_MULTICAST_BASE + i * 4, 0);
1216 static void
1217 gx_rxeof(struct gx_softc *gx)
1219 struct gx_rx_desc *rx;
1220 struct ifnet *ifp;
1221 int idx, staterr, len;
1222 struct mbuf *m;
1224 gx->gx_rx_interrupts++;
1226 ifp = &gx->arpcom.ac_if;
1227 idx = gx->gx_rx_tail_idx;
1229 while (gx->gx_rdata->gx_rx_ring[idx].rx_staterr & GX_RXSTAT_COMPLETED) {
1231 rx = &gx->gx_rdata->gx_rx_ring[idx];
1232 m = gx->gx_cdata.gx_rx_chain[idx];
1234 * gx_newbuf overwrites status and length bits, so we
1235 * make a copy of them here.
1237 len = rx->rx_len;
1238 staterr = rx->rx_staterr;
1240 if (staterr & GX_INPUT_ERROR)
1241 goto ierror;
1243 if (gx_newbuf(gx, idx, NULL) == ENOBUFS)
1244 goto ierror;
1246 GX_INC(idx, GX_RX_RING_CNT);
1248 if (staterr & GX_RXSTAT_INEXACT_MATCH) {
1250 * multicast packet, must verify against
1251 * multicast address.
1255 if ((staterr & GX_RXSTAT_END_OF_PACKET) == 0) {
1256 if (gx->gx_pkthdr == NULL) {
1257 m->m_len = len;
1258 m->m_pkthdr.len = len;
1259 gx->gx_pkthdr = m;
1260 gx->gx_pktnextp = &m->m_next;
1261 } else {
1262 m->m_len = len;
1263 m->m_flags &= ~M_PKTHDR;
1264 gx->gx_pkthdr->m_pkthdr.len += len;
1265 *(gx->gx_pktnextp) = m;
1266 gx->gx_pktnextp = &m->m_next;
1268 continue;
1271 if (gx->gx_pkthdr == NULL) {
1272 m->m_len = len;
1273 m->m_pkthdr.len = len;
1274 } else {
1275 m->m_len = len;
1276 m->m_flags &= ~M_PKTHDR;
1277 gx->gx_pkthdr->m_pkthdr.len += len;
1278 *(gx->gx_pktnextp) = m;
1279 m = gx->gx_pkthdr;
1280 gx->gx_pkthdr = NULL;
1283 ifp->if_ipackets++;
1284 m->m_pkthdr.rcvif = ifp;
1286 #define IP_CSMASK (GX_RXSTAT_IGNORE_CSUM | GX_RXSTAT_HAS_IP_CSUM)
1287 #define TCP_CSMASK \
1288 (GX_RXSTAT_IGNORE_CSUM | GX_RXSTAT_HAS_TCP_CSUM | GX_RXERR_TCP_CSUM)
1289 if (ifp->if_capenable & IFCAP_RXCSUM) {
1290 #if 0
1292 * Intel Erratum #23 indicates that the Receive IP
1293 * Checksum offload feature has been completely
1294 * disabled.
1296 if ((staterr & IP_CSUM_MASK) == GX_RXSTAT_HAS_IP_CSUM) {
1297 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1298 if ((staterr & GX_RXERR_IP_CSUM) == 0)
1299 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1301 #endif
1302 if ((staterr & TCP_CSMASK) == GX_RXSTAT_HAS_TCP_CSUM) {
1303 m->m_pkthdr.csum_flags |=
1304 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1305 m->m_pkthdr.csum_data = 0xffff;
1309 * If we received a packet with a vlan tag, pass it
1310 * to vlan_input() instead of ether_input().
1312 if (staterr & GX_RXSTAT_VLAN_PKT)
1313 VLAN_INPUT_TAG(m, rx->rx_special);
1314 else
1315 (*ifp->if_input)(ifp, m);
1316 continue;
1318 ierror:
1319 ifp->if_ierrors++;
1320 gx_newbuf(gx, idx, m);
1323 * XXX
1324 * this isn't quite right. Suppose we have a packet that
1325 * spans 5 descriptors (9K split into 2K buffers). If
1326 * the 3rd descriptor sets an error, we need to ignore
1327 * the last two. The way things stand now, the last two
1328 * will be accepted as a single packet.
1330 * we don't worry about this -- the chip may not set an
1331 * error in this case, and the checksum of the upper layers
1332 * will catch the error.
1334 if (gx->gx_pkthdr != NULL) {
1335 m_freem(gx->gx_pkthdr);
1336 gx->gx_pkthdr = NULL;
1338 GX_INC(idx, GX_RX_RING_CNT);
1341 gx->gx_rx_tail_idx = idx;
1342 if (--idx < 0)
1343 idx = GX_RX_RING_CNT - 1;
1344 CSR_WRITE_4(gx, gx->gx_reg.r_rx_tail, idx);
1347 static void
1348 gx_txeof(struct gx_softc *gx)
1350 struct ifnet *ifp;
1351 int idx, cnt;
1353 gx->gx_tx_interrupts++;
1355 ifp = &gx->arpcom.ac_if;
1356 idx = gx->gx_tx_head_idx;
1357 cnt = gx->gx_txcnt;
1360 * If the system chipset performs I/O write buffering, it is
1361 * possible for the PIO read of the head descriptor to bypass the
1362 * memory write of the descriptor, resulting in reading a descriptor
1363 * which has not been updated yet.
1365 while (cnt) {
1366 struct gx_tx_desc_old *tx;
1368 tx = (struct gx_tx_desc_old *)&gx->gx_rdata->gx_tx_ring[idx];
1369 cnt--;
1371 if ((tx->tx_command & GX_TXOLD_END_OF_PKT) == 0) {
1372 GX_INC(idx, GX_TX_RING_CNT);
1373 continue;
1376 if ((tx->tx_status & GX_TXSTAT_DONE) == 0)
1377 break;
1379 ifp->if_opackets++;
1381 m_freem(gx->gx_cdata.gx_tx_chain[idx]);
1382 gx->gx_cdata.gx_tx_chain[idx] = NULL;
1383 gx->gx_txcnt = cnt;
1384 ifp->if_timer = 0;
1386 GX_INC(idx, GX_TX_RING_CNT);
1387 gx->gx_tx_head_idx = idx;
1390 if (gx->gx_txcnt == 0)
1391 ifp->if_flags &= ~IFF_OACTIVE;
1394 static void
1395 gx_intr(void *xsc)
1397 struct gx_softc *gx;
1398 struct ifnet *ifp;
1399 u_int32_t intr;
1400 int s;
1402 gx = xsc;
1403 ifp = &gx->arpcom.ac_if;
1405 s = splimp();
1407 gx->gx_interrupts++;
1409 /* Disable host interrupts. */
1410 CSR_WRITE_4(gx, GX_INT_MASK_CLR, GX_INT_ALL);
1413 * find out why we're being bothered.
1414 * reading this register automatically clears all bits.
1416 intr = CSR_READ_4(gx, GX_INT_READ);
1418 /* Check RX return ring producer/consumer */
1419 if (intr & (GX_INT_RCV_TIMER | GX_INT_RCV_THOLD | GX_INT_RCV_OVERRUN))
1420 gx_rxeof(gx);
1422 /* Check TX ring producer/consumer */
1423 if (intr & (GX_INT_XMIT_DONE | GX_INT_XMIT_EMPTY))
1424 gx_txeof(gx);
1427 * handle other interrupts here.
1431 * Link change interrupts are not reliable; the interrupt may
1432 * not be generated if the link is lost. However, the register
1433 * read is reliable, so check that. Use SEQ errors to possibly
1434 * indicate that the link has changed.
1436 if (intr & GX_INT_LINK_CHANGE) {
1437 if ((CSR_READ_4(gx, GX_STATUS) & GX_STAT_LINKUP) == 0) {
1438 device_printf(gx->gx_dev, "link down\n");
1439 } else {
1440 device_printf(gx->gx_dev, "link up\n");
1444 /* Turn interrupts on. */
1445 CSR_WRITE_4(gx, GX_INT_MASK_SET, GX_INT_WANTED);
1447 if (ifp->if_flags & IFF_RUNNING && !ifq_is_empty(&ifp->if_snd))
1448 gx_start(ifp);
1450 splx(s);
1454 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
1455 * pointers to descriptors.
1457 static int
1458 gx_encap(struct gx_softc *gx, struct mbuf *m_head)
1460 struct gx_tx_desc_data *tx = NULL;
1461 struct gx_tx_desc_ctx *tctx;
1462 struct mbuf *m;
1463 int idx, cnt, csumopts, txcontext;
1464 struct ifvlan *ifv = NULL;
1466 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1467 m_head->m_pkthdr.rcvif != NULL &&
1468 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
1469 ifv = m_head->m_pkthdr.rcvif->if_softc;
1471 cnt = gx->gx_txcnt;
1472 idx = gx->gx_tx_tail_idx;
1473 txcontext = gx->gx_txcontext;
1476 * Insure we have at least 4 descriptors pre-allocated.
1478 if (cnt >= GX_TX_RING_CNT - 4)
1479 return (ENOBUFS);
1482 * Set up the appropriate offload context if necessary.
1484 csumopts = 0;
1485 if (m_head->m_pkthdr.csum_flags) {
1486 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1487 csumopts |= GX_TXTCP_OPT_IP_CSUM;
1488 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) {
1489 csumopts |= GX_TXTCP_OPT_TCP_CSUM;
1490 txcontext = GX_TXCONTEXT_TCPIP;
1491 } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) {
1492 csumopts |= GX_TXTCP_OPT_TCP_CSUM;
1493 txcontext = GX_TXCONTEXT_UDPIP;
1494 } else if (txcontext == GX_TXCONTEXT_NONE)
1495 txcontext = GX_TXCONTEXT_TCPIP;
1496 if (txcontext == gx->gx_txcontext)
1497 goto context_done;
1499 tctx = (struct gx_tx_desc_ctx *)&gx->gx_rdata->gx_tx_ring[idx];
1500 tctx->tx_ip_csum_start = ETHER_HDR_LEN;
1501 tctx->tx_ip_csum_end = ETHER_HDR_LEN + sizeof(struct ip) - 1;
1502 tctx->tx_ip_csum_offset =
1503 ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
1504 tctx->tx_tcp_csum_start = ETHER_HDR_LEN + sizeof(struct ip);
1505 tctx->tx_tcp_csum_end = 0;
1506 if (txcontext == GX_TXCONTEXT_TCPIP)
1507 tctx->tx_tcp_csum_offset = ETHER_HDR_LEN +
1508 sizeof(struct ip) + offsetof(struct tcphdr, th_sum);
1509 else
1510 tctx->tx_tcp_csum_offset = ETHER_HDR_LEN +
1511 sizeof(struct ip) + offsetof(struct udphdr, uh_sum);
1512 tctx->tx_command = GX_TXCTX_EXTENSION | GX_TXCTX_INT_DELAY;
1513 tctx->tx_type = 0;
1514 tctx->tx_status = 0;
1515 GX_INC(idx, GX_TX_RING_CNT);
1516 cnt++;
1518 context_done:
1521 * Start packing the mbufs in this chain into the transmit
1522 * descriptors. Stop when we run out of descriptors or hit
1523 * the end of the mbuf chain.
1525 for (m = m_head; m != NULL; m = m->m_next) {
1526 if (m->m_len == 0)
1527 continue;
1529 if (cnt == GX_TX_RING_CNT) {
1530 printf("overflow(2): %d, %d\n", cnt, GX_TX_RING_CNT);
1531 return (ENOBUFS);
1534 tx = (struct gx_tx_desc_data *)&gx->gx_rdata->gx_tx_ring[idx];
1535 tx->tx_addr = vtophys(mtod(m, vm_offset_t));
1536 tx->tx_status = 0;
1537 tx->tx_len = m->m_len;
1538 if (gx->arpcom.ac_if.if_hwassist) {
1539 tx->tx_type = 1;
1540 tx->tx_command = GX_TXTCP_EXTENSION;
1541 tx->tx_options = csumopts;
1542 } else {
1544 * This is really a struct gx_tx_desc_old.
1546 tx->tx_command = 0;
1548 GX_INC(idx, GX_TX_RING_CNT);
1549 cnt++;
1552 if (tx != NULL) {
1553 tx->tx_command |= GX_TXTCP_REPORT_STATUS | GX_TXTCP_INT_DELAY |
1554 GX_TXTCP_ETHER_CRC | GX_TXTCP_END_OF_PKT;
1555 if (ifv != NULL) {
1556 tx->tx_command |= GX_TXTCP_VLAN_ENABLE;
1557 tx->tx_vlan = ifv->ifv_tag;
1559 gx->gx_txcnt = cnt;
1560 gx->gx_tx_tail_idx = idx;
1561 gx->gx_txcontext = txcontext;
1562 idx = GX_PREV(idx, GX_TX_RING_CNT);
1563 gx->gx_cdata.gx_tx_chain[idx] = m_head;
1565 CSR_WRITE_4(gx, gx->gx_reg.r_tx_tail, gx->gx_tx_tail_idx);
1568 return (0);
1572 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1573 * to the mbuf data regions directly in the transmit descriptors.
1575 static void
1576 gx_start(struct ifnet *ifp)
1578 struct gx_softc *gx;
1579 struct mbuf *m_head;
1580 int s;
1582 s = splimp();
1584 gx = ifp->if_softc;
1586 for (;;) {
1587 m_head = ifq_poll(&ifp->if_snd);
1588 if (m_head == NULL)
1589 break;
1592 * Pack the data into the transmit ring. If we
1593 * don't have room, set the OACTIVE flag and wait
1594 * for the NIC to drain the ring.
1596 if (gx_encap(gx, m_head) != 0) {
1597 ifp->if_flags |= IFF_OACTIVE;
1598 break;
1600 m_head = ifq_dequeue(&ifp->if_snd);
1602 BPF_MTAP(ifp, m_head);
1605 * Set a timeout in case the chip goes out to lunch.
1607 ifp->if_timer = 5;
1610 splx(s);