BGE_MBX_TX_HOST_PROD0_LO is write-only, avoid reading it.
[dragonfly/vkernel-mp.git] / sys / dev / netif / bge / if_bge.c
blobb47f16714a967458144f65a37c1c325efd46d331
1 /*
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
34 * $DragonFly: src/sys/dev/netif/bge/if_bge.c,v 1.65 2007/04/14 04:22:14 sephe Exp $
39 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
41 * Written by Bill Paul <wpaul@windriver.com>
42 * Senior Engineer, Wind River Systems
46 * The Broadcom BCM5700 is based on technology originally developed by
47 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51 * frames, highly configurable RX filtering, and 16 RX and TX queues
52 * (which, along with RX filter rules, can be used for QOS applications).
53 * Other features, such as TCP segmentation, may be available as part
54 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55 * firmware images can be stored in hardware and need not be compiled
56 * into the driver.
58 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
61 * The BCM5701 is a single-chip solution incorporating both the BCM5700
62 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63 * does not support external SSRAM.
65 * Broadcom also produces a variation of the BCM5700 under the "Altima"
66 * brand name, which is functionally similar but lacks PCI-X support.
68 * Without external SSRAM, you can only have at most 4 TX rings,
69 * and the use of the mini RX ring is disabled. This seems to imply
70 * that these features are simply not available on the BCM5701. As a
71 * result, this driver does not implement any support for the mini RX
72 * ring.
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/sockio.h>
78 #include <sys/mbuf.h>
79 #include <sys/malloc.h>
80 #include <sys/kernel.h>
81 #include <sys/socket.h>
82 #include <sys/queue.h>
83 #include <sys/serialize.h>
84 #include <sys/thread2.h>
86 #include <net/if.h>
87 #include <net/ifq_var.h>
88 #include <net/if_arp.h>
89 #include <net/ethernet.h>
90 #include <net/if_dl.h>
91 #include <net/if_media.h>
93 #include <net/bpf.h>
95 #include <net/if_types.h>
96 #include <net/vlan/if_vlan_var.h>
98 #include <netinet/in_systm.h>
99 #include <netinet/in.h>
100 #include <netinet/ip.h>
102 #include <vm/vm.h> /* for vtophys */
103 #include <vm/pmap.h> /* for vtophys */
104 #include <sys/bus.h>
105 #include <sys/rman.h>
107 #include <dev/netif/mii_layer/mii.h>
108 #include <dev/netif/mii_layer/miivar.h>
109 #include <dev/netif/mii_layer/miidevs.h>
110 #include <dev/netif/mii_layer/brgphyreg.h>
112 #include <bus/pci/pcidevs.h>
113 #include <bus/pci/pcireg.h>
114 #include <bus/pci/pcivar.h>
116 #include "if_bgereg.h"
118 #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
120 /* "controller miibus0" required. See GENERIC if you get errors here. */
121 #include "miibus_if.h"
124 * Various supported device vendors/types and their names. Note: the
125 * spec seems to indicate that the hardware still has Alteon's vendor
126 * ID burned into it, though it will always be overriden by the vendor
127 * ID in the EEPROM. Just to be safe, we cover all possibilities.
129 #define BGE_DEVDESC_MAX 64 /* Maximum device description length */
131 static struct bge_type bge_devs[] = {
132 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
133 "Alteon BCM5700 Gigabit Ethernet" },
134 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701,
135 "Alteon BCM5701 Gigabit Ethernet" },
136 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700,
137 "Broadcom BCM5700 Gigabit Ethernet" },
138 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701,
139 "Broadcom BCM5701 Gigabit Ethernet" },
140 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X,
141 "Broadcom BCM5702X Gigabit Ethernet" },
142 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT,
143 "Broadcom BCM5702 Gigabit Ethernet" },
144 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X,
145 "Broadcom BCM5703X Gigabit Ethernet" },
146 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703A3,
147 "Broadcom BCM5703 Gigabit Ethernet" },
148 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C,
149 "Broadcom BCM5704C Dual Gigabit Ethernet" },
150 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S,
151 "Broadcom BCM5704S Dual Gigabit Ethernet" },
152 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705,
153 "Broadcom BCM5705 Gigabit Ethernet" },
154 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K,
155 "Broadcom BCM5705K Gigabit Ethernet" },
156 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M,
157 "Broadcom BCM5705M Gigabit Ethernet" },
158 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
159 "Broadcom BCM5705M Gigabit Ethernet" },
160 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714,
161 "Broadcom BCM5714C Gigabit Ethernet" },
162 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721,
163 "Broadcom BCM5721 Gigabit Ethernet" },
164 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750,
165 "Broadcom BCM5750 Gigabit Ethernet" },
166 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M,
167 "Broadcom BCM5750M Gigabit Ethernet" },
168 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751,
169 "Broadcom BCM5751 Gigabit Ethernet" },
170 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M,
171 "Broadcom BCM5751M Gigabit Ethernet" },
172 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752,
173 "Broadcom BCM5752 Gigabit Ethernet" },
174 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782,
175 "Broadcom BCM5782 Gigabit Ethernet" },
176 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788,
177 "Broadcom BCM5788 Gigabit Ethernet" },
178 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789,
179 "Broadcom BCM5789 Gigabit Ethernet" },
180 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901,
181 "Broadcom BCM5901 Fast Ethernet" },
182 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2,
183 "Broadcom BCM5901A2 Fast Ethernet" },
184 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
185 "SysKonnect Gigabit Ethernet" },
186 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000,
187 "Altima AC1000 Gigabit Ethernet" },
188 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001,
189 "Altima AC1002 Gigabit Ethernet" },
190 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100,
191 "Altima AC9100 Gigabit Ethernet" },
192 { 0, 0, NULL }
195 static int bge_probe(device_t);
196 static int bge_attach(device_t);
197 static int bge_detach(device_t);
198 static void bge_release_resources(struct bge_softc *);
199 static void bge_txeof(struct bge_softc *);
200 static void bge_rxeof(struct bge_softc *);
202 static void bge_tick(void *);
203 static void bge_tick_serialized(void *);
204 static void bge_stats_update(struct bge_softc *);
205 static void bge_stats_update_regs(struct bge_softc *);
206 static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *);
208 static void bge_intr(void *);
209 static void bge_start(struct ifnet *);
210 static int bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
211 static void bge_init(void *);
212 static void bge_stop(struct bge_softc *);
213 static void bge_watchdog(struct ifnet *);
214 static void bge_shutdown(device_t);
215 static int bge_suspend(device_t);
216 static int bge_resume(device_t);
217 static int bge_ifmedia_upd(struct ifnet *);
218 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
220 static uint8_t bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *);
221 static int bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t);
223 static void bge_setmulti(struct bge_softc *);
224 static void bge_setpromisc(struct bge_softc *);
226 static void bge_handle_events(struct bge_softc *);
227 static int bge_alloc_jumbo_mem(struct bge_softc *);
228 static void bge_free_jumbo_mem(struct bge_softc *);
229 static struct bge_jslot
230 *bge_jalloc(struct bge_softc *);
231 static void bge_jfree(void *);
232 static void bge_jref(void *);
233 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
234 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
235 static int bge_init_rx_ring_std(struct bge_softc *);
236 static void bge_free_rx_ring_std(struct bge_softc *);
237 static int bge_init_rx_ring_jumbo(struct bge_softc *);
238 static void bge_free_rx_ring_jumbo(struct bge_softc *);
239 static void bge_free_tx_ring(struct bge_softc *);
240 static int bge_init_tx_ring(struct bge_softc *);
242 static int bge_chipinit(struct bge_softc *);
243 static int bge_blockinit(struct bge_softc *);
245 #ifdef notdef
246 static uint8_t bge_vpd_readbyte(struct bge_softc *, uint32_t);
247 static void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, uint32_t);
248 static void bge_vpd_read(struct bge_softc *);
249 #endif
251 static uint32_t bge_readmem_ind(struct bge_softc *, uint32_t);
252 static void bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t);
253 #ifdef notdef
254 static uint32_t bge_readreg_ind(struct bge_softc *, uint32_t);
255 #endif
256 static void bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t);
258 static int bge_miibus_readreg(device_t, int, int);
259 static int bge_miibus_writereg(device_t, int, int, int);
260 static void bge_miibus_statchg(device_t);
262 static void bge_reset(struct bge_softc *);
265 * Set following tunable to 1 for some IBM blade servers with the DNLK
266 * switch module. Auto negotiation is broken for those configurations.
268 static int bge_fake_autoneg = 0;
269 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
271 static device_method_t bge_methods[] = {
272 /* Device interface */
273 DEVMETHOD(device_probe, bge_probe),
274 DEVMETHOD(device_attach, bge_attach),
275 DEVMETHOD(device_detach, bge_detach),
276 DEVMETHOD(device_shutdown, bge_shutdown),
277 DEVMETHOD(device_suspend, bge_suspend),
278 DEVMETHOD(device_resume, bge_resume),
280 /* bus interface */
281 DEVMETHOD(bus_print_child, bus_generic_print_child),
282 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
284 /* MII interface */
285 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
286 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
287 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
289 { 0, 0 }
292 static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc));
293 static devclass_t bge_devclass;
295 DECLARE_DUMMY_MODULE(if_bge);
296 DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0);
297 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
299 static uint32_t
300 bge_readmem_ind(struct bge_softc *sc, uint32_t off)
302 device_t dev = sc->bge_dev;
304 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
305 return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
308 static void
309 bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
311 device_t dev = sc->bge_dev;
313 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
314 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
317 #ifdef notdef
318 static uint32_t
319 bge_readreg_ind(struct bge_softc *sc, uin32_t off)
321 device_t dev = sc->bge_dev;
323 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
324 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
326 #endif
328 static void
329 bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
331 device_t dev = sc->bge_dev;
333 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
334 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
337 #ifdef notdef
338 static uint8_t
339 bge_vpd_readbyte(struct bge_softc *sc, uint32_t addr)
341 device_t dev = sc->bge_dev;
342 uint32_t val;
343 int i;
345 pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
346 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
347 DELAY(10);
348 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
349 break;
352 if (i == BGE_TIMEOUT) {
353 device_printf(sc->bge_dev, "VPD read timed out\n");
354 return(0);
357 val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
359 return((val >> ((addr % 4) * 8)) & 0xFF);
362 static void
363 bge_vpd_read_res(struct bge_softc *sc, struct vpd_res *res, uint32_t addr)
365 size_t i;
366 uint8_t *ptr;
368 ptr = (uint8_t *)res;
369 for (i = 0; i < sizeof(struct vpd_res); i++)
370 ptr[i] = bge_vpd_readbyte(sc, i + addr);
372 return;
375 static void
376 bge_vpd_read(struct bge_softc *sc)
378 int pos = 0, i;
379 struct vpd_res res;
381 if (sc->bge_vpd_prodname != NULL)
382 kfree(sc->bge_vpd_prodname, M_DEVBUF);
383 if (sc->bge_vpd_readonly != NULL)
384 kfree(sc->bge_vpd_readonly, M_DEVBUF);
385 sc->bge_vpd_prodname = NULL;
386 sc->bge_vpd_readonly = NULL;
388 bge_vpd_read_res(sc, &res, pos);
390 if (res.vr_id != VPD_RES_ID) {
391 device_printf(sc->bge_dev,
392 "bad VPD resource id: expected %x got %x\n",
393 VPD_RES_ID, res.vr_id);
394 return;
397 pos += sizeof(res);
398 sc->bge_vpd_prodname = kmalloc(res.vr_len + 1, M_DEVBUF, M_INTWAIT);
399 for (i = 0; i < res.vr_len; i++)
400 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
401 sc->bge_vpd_prodname[i] = '\0';
402 pos += i;
404 bge_vpd_read_res(sc, &res, pos);
406 if (res.vr_id != VPD_RES_READ) {
407 device_printf(sc->bge_dev,
408 "bad VPD resource id: expected %x got %x\n",
409 VPD_RES_READ, res.vr_id);
410 return;
413 pos += sizeof(res);
414 sc->bge_vpd_readonly = kmalloc(res.vr_len, M_DEVBUF, M_INTWAIT);
415 for (i = 0; i < res.vr_len + 1; i++)
416 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
418 #endif
421 * Read a byte of data stored in the EEPROM at address 'addr.' The
422 * BCM570x supports both the traditional bitbang interface and an
423 * auto access interface for reading the EEPROM. We use the auto
424 * access method.
426 static uint8_t
427 bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest)
429 int i;
430 uint32_t byte = 0;
433 * Enable use of auto EEPROM access so we can avoid
434 * having to use the bitbang method.
436 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
438 /* Reset the EEPROM, load the clock period. */
439 CSR_WRITE_4(sc, BGE_EE_ADDR,
440 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
441 DELAY(20);
443 /* Issue the read EEPROM command. */
444 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
446 /* Wait for completion */
447 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
448 DELAY(10);
449 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
450 break;
453 if (i == BGE_TIMEOUT) {
454 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
455 return(0);
458 /* Get result. */
459 byte = CSR_READ_4(sc, BGE_EE_DATA);
461 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
463 return(0);
467 * Read a sequence of bytes from the EEPROM.
469 static int
470 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len)
472 size_t i;
473 int err;
474 uint8_t byte;
476 for (byte = 0, err = 0, i = 0; i < len; i++) {
477 err = bge_eeprom_getbyte(sc, off + i, &byte);
478 if (err)
479 break;
480 *(dest + i) = byte;
483 return(err ? 1 : 0);
486 static int
487 bge_miibus_readreg(device_t dev, int phy, int reg)
489 struct bge_softc *sc;
490 struct ifnet *ifp;
491 uint32_t val, autopoll;
492 int i;
494 sc = device_get_softc(dev);
495 ifp = &sc->arpcom.ac_if;
498 * Broadcom's own driver always assumes the internal
499 * PHY is at GMII address 1. On some chips, the PHY responds
500 * to accesses at all addresses, which could cause us to
501 * bogusly attach the PHY 32 times at probe type. Always
502 * restricting the lookup to address 1 is simpler than
503 * trying to figure out which chips revisions should be
504 * special-cased.
506 if (phy != 1)
507 return(0);
509 /* Reading with autopolling on may trigger PCI errors */
510 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
511 if (autopoll & BGE_MIMODE_AUTOPOLL) {
512 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
513 DELAY(40);
516 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
517 BGE_MIPHY(phy)|BGE_MIREG(reg));
519 for (i = 0; i < BGE_TIMEOUT; i++) {
520 val = CSR_READ_4(sc, BGE_MI_COMM);
521 if (!(val & BGE_MICOMM_BUSY))
522 break;
525 if (i == BGE_TIMEOUT) {
526 if_printf(ifp, "PHY read timed out\n");
527 val = 0;
528 goto done;
531 val = CSR_READ_4(sc, BGE_MI_COMM);
533 done:
534 if (autopoll & BGE_MIMODE_AUTOPOLL) {
535 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
536 DELAY(40);
539 if (val & BGE_MICOMM_READFAIL)
540 return(0);
542 return(val & 0xFFFF);
545 static int
546 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
548 struct bge_softc *sc;
549 uint32_t autopoll;
550 int i;
552 sc = device_get_softc(dev);
554 /* Reading with autopolling on may trigger PCI errors */
555 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
556 if (autopoll & BGE_MIMODE_AUTOPOLL) {
557 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
558 DELAY(40);
561 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
562 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
564 for (i = 0; i < BGE_TIMEOUT; i++) {
565 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
566 break;
569 if (autopoll & BGE_MIMODE_AUTOPOLL) {
570 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
571 DELAY(40);
574 if (i == BGE_TIMEOUT) {
575 if_printf(&sc->arpcom.ac_if, "PHY read timed out\n");
576 return(0);
579 return(0);
582 static void
583 bge_miibus_statchg(device_t dev)
585 struct bge_softc *sc;
586 struct mii_data *mii;
588 sc = device_get_softc(dev);
589 mii = device_get_softc(sc->bge_miibus);
591 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
592 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
593 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
594 } else {
595 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
598 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
599 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
600 } else {
601 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
606 * Handle events that have triggered interrupts.
608 static void
609 bge_handle_events(struct bge_softc *sc)
614 * Memory management for jumbo frames.
616 static int
617 bge_alloc_jumbo_mem(struct bge_softc *sc)
619 struct bge_jslot *entry;
620 caddr_t ptr;
621 int i;
623 /* Grab a big chunk o' storage. */
624 sc->bge_cdata.bge_jumbo_buf = contigmalloc(BGE_JMEM, M_DEVBUF,
625 M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0);
627 if (sc->bge_cdata.bge_jumbo_buf == NULL) {
628 if_printf(&sc->arpcom.ac_if, "no memory for jumbo buffers!\n");
629 return(ENOBUFS);
632 SLIST_INIT(&sc->bge_jfree_listhead);
635 * Now divide it up into 9K pieces and save the addresses
636 * in an array. Note that we play an evil trick here by using
637 * the first few bytes in the buffer to hold the the address
638 * of the softc structure for this interface. This is because
639 * bge_jfree() needs it, but it is called by the mbuf management
640 * code which will not pass it to us explicitly.
642 ptr = sc->bge_cdata.bge_jumbo_buf;
643 for (i = 0; i < BGE_JSLOTS; i++) {
644 entry = &sc->bge_cdata.bge_jslots[i];
645 entry->bge_sc = sc;
646 entry->bge_buf = ptr;
647 entry->bge_inuse = 0;
648 entry->bge_slot = i;
649 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link);
650 ptr += BGE_JLEN;
653 return(0);
656 static void
657 bge_free_jumbo_mem(struct bge_softc *sc)
659 if (sc->bge_cdata.bge_jumbo_buf)
660 contigfree(sc->bge_cdata.bge_jumbo_buf, BGE_JMEM, M_DEVBUF);
664 * Allocate a jumbo buffer.
666 static struct bge_jslot *
667 bge_jalloc(struct bge_softc *sc)
669 struct bge_jslot *entry;
671 lwkt_serialize_enter(&sc->bge_jslot_serializer);
672 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
673 if (entry) {
674 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link);
675 entry->bge_inuse = 1;
676 } else {
677 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
679 lwkt_serialize_exit(&sc->bge_jslot_serializer);
680 return(entry);
684 * Adjust usage count on a jumbo buffer.
686 static void
687 bge_jref(void *arg)
689 struct bge_jslot *entry = (struct bge_jslot *)arg;
690 struct bge_softc *sc = entry->bge_sc;
692 if (sc == NULL)
693 panic("bge_jref: can't find softc pointer!");
695 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
696 panic("bge_jref: asked to reference buffer "
697 "that we don't manage!");
698 } else if (entry->bge_inuse == 0) {
699 panic("bge_jref: buffer already free!");
700 } else {
701 atomic_add_int(&entry->bge_inuse, 1);
706 * Release a jumbo buffer.
708 static void
709 bge_jfree(void *arg)
711 struct bge_jslot *entry = (struct bge_jslot *)arg;
712 struct bge_softc *sc = entry->bge_sc;
714 if (sc == NULL)
715 panic("bge_jfree: can't find softc pointer!");
717 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
718 panic("bge_jfree: asked to free buffer that we don't manage!");
719 } else if (entry->bge_inuse == 0) {
720 panic("bge_jfree: buffer already free!");
721 } else {
723 * Possible MP race to 0, use the serializer. The atomic insn
724 * is still needed for races against bge_jref().
726 lwkt_serialize_enter(&sc->bge_jslot_serializer);
727 atomic_subtract_int(&entry->bge_inuse, 1);
728 if (entry->bge_inuse == 0) {
729 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
730 entry, jslot_link);
732 lwkt_serialize_exit(&sc->bge_jslot_serializer);
738 * Intialize a standard receive ring descriptor.
740 static int
741 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
743 struct mbuf *m_new = NULL;
744 struct bge_rx_bd *r;
746 if (m == NULL) {
747 m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
748 if (m_new == NULL)
749 return (ENOBUFS);
750 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
751 } else {
752 m_new = m;
753 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
754 m_new->m_data = m_new->m_ext.ext_buf;
757 if (!sc->bge_rx_alignment_bug)
758 m_adj(m_new, ETHER_ALIGN);
759 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
760 r = &sc->bge_rdata->bge_rx_std_ring[i];
761 BGE_HOSTADDR(r->bge_addr, vtophys(mtod(m_new, caddr_t)));
762 r->bge_flags = BGE_RXBDFLAG_END;
763 r->bge_len = m_new->m_len;
764 r->bge_idx = i;
766 return(0);
770 * Initialize a jumbo receive ring descriptor. This allocates
771 * a jumbo buffer from the pool managed internally by the driver.
773 static int
774 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
776 struct mbuf *m_new = NULL;
777 struct bge_rx_bd *r;
779 if (m == NULL) {
780 struct bge_jslot *buf;
782 /* Allocate the mbuf. */
783 MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
784 if (m_new == NULL)
785 return(ENOBUFS);
787 /* Allocate the jumbo buffer */
788 buf = bge_jalloc(sc);
789 if (buf == NULL) {
790 m_freem(m_new);
791 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
792 "-- packet dropped!\n");
793 return(ENOBUFS);
796 /* Attach the buffer to the mbuf. */
797 m_new->m_ext.ext_arg = buf;
798 m_new->m_ext.ext_buf = buf->bge_buf;
799 m_new->m_ext.ext_free = bge_jfree;
800 m_new->m_ext.ext_ref = bge_jref;
801 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
803 m_new->m_data = m_new->m_ext.ext_buf;
804 m_new->m_flags |= M_EXT;
805 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
806 } else {
807 m_new = m;
808 m_new->m_data = m_new->m_ext.ext_buf;
809 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
812 if (!sc->bge_rx_alignment_bug)
813 m_adj(m_new, ETHER_ALIGN);
814 /* Set up the descriptor. */
815 r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
816 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
817 BGE_HOSTADDR(r->bge_addr, vtophys(mtod(m_new, caddr_t)));
818 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
819 r->bge_len = m_new->m_len;
820 r->bge_idx = i;
822 return(0);
826 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
827 * that's 1MB or memory, which is a lot. For now, we fill only the first
828 * 256 ring entries and hope that our CPU is fast enough to keep up with
829 * the NIC.
831 static int
832 bge_init_rx_ring_std(struct bge_softc *sc)
834 int i;
836 for (i = 0; i < BGE_SSLOTS; i++) {
837 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
838 return(ENOBUFS);
841 sc->bge_std = i - 1;
842 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
844 return(0);
847 static void
848 bge_free_rx_ring_std(struct bge_softc *sc)
850 int i;
852 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
853 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
854 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
855 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
857 bzero(&sc->bge_rdata->bge_rx_std_ring[i],
858 sizeof(struct bge_rx_bd));
862 static int
863 bge_init_rx_ring_jumbo(struct bge_softc *sc)
865 int i;
866 struct bge_rcb *rcb;
868 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
869 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
870 return(ENOBUFS);
873 sc->bge_jumbo = i - 1;
875 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
876 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
877 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
879 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
881 return(0);
884 static void
885 bge_free_rx_ring_jumbo(struct bge_softc *sc)
887 int i;
889 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
890 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
891 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
892 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
894 bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i],
895 sizeof(struct bge_rx_bd));
899 static void
900 bge_free_tx_ring(struct bge_softc *sc)
902 int i;
904 if (sc->bge_rdata->bge_tx_ring == NULL)
905 return;
907 for (i = 0; i < BGE_TX_RING_CNT; i++) {
908 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
909 m_freem(sc->bge_cdata.bge_tx_chain[i]);
910 sc->bge_cdata.bge_tx_chain[i] = NULL;
912 bzero(&sc->bge_rdata->bge_tx_ring[i],
913 sizeof(struct bge_tx_bd));
917 static int
918 bge_init_tx_ring(struct bge_softc *sc)
920 sc->bge_txcnt = 0;
921 sc->bge_tx_saved_considx = 0;
922 sc->bge_tx_prodidx = 0;
924 /* Initialize transmit producer index for host-memory send ring. */
925 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
927 /* 5700 b2 errata */
928 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
929 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
931 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
932 /* 5700 b2 errata */
933 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
934 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
936 return(0);
939 static void
940 bge_setmulti(struct bge_softc *sc)
942 struct ifnet *ifp;
943 struct ifmultiaddr *ifma;
944 uint32_t hashes[4] = { 0, 0, 0, 0 };
945 int h, i;
947 ifp = &sc->arpcom.ac_if;
949 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
950 for (i = 0; i < 4; i++)
951 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
952 return;
955 /* First, zot all the existing filters. */
956 for (i = 0; i < 4; i++)
957 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
959 /* Now program new ones. */
960 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
961 if (ifma->ifma_addr->sa_family != AF_LINK)
962 continue;
963 h = ether_crc32_le(
964 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
965 ETHER_ADDR_LEN) & 0x7f;
966 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
969 for (i = 0; i < 4; i++)
970 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
974 * Do endian, PCI and DMA initialization. Also check the on-board ROM
975 * self-test results.
977 static int
978 bge_chipinit(struct bge_softc *sc)
980 int i;
981 uint32_t dma_rw_ctl;
983 /* Set endianness before we access any non-PCI registers. */
984 #if BYTE_ORDER == BIG_ENDIAN
985 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
986 BGE_BIGENDIAN_INIT, 4);
987 #else
988 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
989 BGE_LITTLEENDIAN_INIT, 4);
990 #endif
993 * Check the 'ROM failed' bit on the RX CPU to see if
994 * self-tests passed.
996 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
997 if_printf(&sc->arpcom.ac_if,
998 "RX CPU self-diagnostics failed!\n");
999 return(ENODEV);
1002 /* Clear the MAC control register */
1003 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1006 * Clear the MAC statistics block in the NIC's
1007 * internal memory.
1009 for (i = BGE_STATS_BLOCK;
1010 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1011 BGE_MEMWIN_WRITE(sc, i, 0);
1013 for (i = BGE_STATUS_BLOCK;
1014 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1015 BGE_MEMWIN_WRITE(sc, i, 0);
1017 /* Set up the PCI DMA control register. */
1018 if (sc->bge_pcie) {
1019 /* PCI Express */
1020 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1021 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1022 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1023 } else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1024 BGE_PCISTATE_PCI_BUSMODE) {
1025 /* Conventional PCI bus */
1026 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1027 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1028 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1029 (0x0F);
1030 } else {
1031 /* PCI-X bus */
1033 * The 5704 uses a different encoding of read/write
1034 * watermarks.
1036 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1037 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1038 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1039 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1040 else
1041 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1042 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1043 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1044 (0x0F);
1047 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1048 * for hardware bugs.
1050 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1051 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1052 uint32_t tmp;
1054 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1055 if (tmp == 0x6 || tmp == 0x7)
1056 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1060 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1061 sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1062 sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1063 sc->bge_asicrev == BGE_ASICREV_BCM5750)
1064 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1065 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1068 * Set up general mode register.
1070 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME|
1071 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1072 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1073 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1076 * Disable memory write invalidate. Apparently it is not supported
1077 * properly by these devices.
1079 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1081 /* Set the timer prescaler (always 66Mhz) */
1082 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1084 return(0);
1087 static int
1088 bge_blockinit(struct bge_softc *sc)
1090 struct bge_rcb *rcb;
1091 volatile struct bge_rcb *vrcb;
1092 int i;
1095 * Initialize the memory window pointer register so that
1096 * we can access the first 32K of internal NIC RAM. This will
1097 * allow us to set up the TX send ring RCBs and the RX return
1098 * ring RCBs, plus other things which live in NIC memory.
1100 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1102 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1104 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1105 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1106 /* Configure mbuf memory pool */
1107 if (sc->bge_extram) {
1108 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1109 BGE_EXT_SSRAM);
1110 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1111 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1112 else
1113 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1114 } else {
1115 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1116 BGE_BUFFPOOL_1);
1117 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1118 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1119 else
1120 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1123 /* Configure DMA resource pool */
1124 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1125 BGE_DMA_DESCRIPTORS);
1126 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1129 /* Configure mbuf pool watermarks */
1130 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1131 sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1132 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1133 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1134 } else {
1135 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1136 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1138 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1140 /* Configure DMA resource watermarks */
1141 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1142 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1144 /* Enable buffer manager */
1145 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1146 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1147 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1148 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1150 /* Poll for buffer manager start indication */
1151 for (i = 0; i < BGE_TIMEOUT; i++) {
1152 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1153 break;
1154 DELAY(10);
1157 if (i == BGE_TIMEOUT) {
1158 if_printf(&sc->arpcom.ac_if,
1159 "buffer manager failed to start\n");
1160 return(ENXIO);
1164 /* Enable flow-through queues */
1165 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1166 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1168 /* Wait until queue initialization is complete */
1169 for (i = 0; i < BGE_TIMEOUT; i++) {
1170 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1171 break;
1172 DELAY(10);
1175 if (i == BGE_TIMEOUT) {
1176 if_printf(&sc->arpcom.ac_if,
1177 "flow-through queue init failed\n");
1178 return(ENXIO);
1181 /* Initialize the standard RX ring control block */
1182 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1183 BGE_HOSTADDR(rcb->bge_hostaddr,
1184 vtophys(&sc->bge_rdata->bge_rx_std_ring));
1185 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1186 sc->bge_asicrev == BGE_ASICREV_BCM5750)
1187 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1188 else
1189 rcb->bge_maxlen_flags =
1190 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1191 if (sc->bge_extram)
1192 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1193 else
1194 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1195 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1196 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1197 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1198 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1201 * Initialize the jumbo RX ring control block
1202 * We set the 'ring disabled' bit in the flags
1203 * field until we're actually ready to start
1204 * using this ring (i.e. once we set the MTU
1205 * high enough to require it).
1207 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1208 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1209 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1210 BGE_HOSTADDR(rcb->bge_hostaddr,
1211 vtophys(&sc->bge_rdata->bge_rx_jumbo_ring));
1212 rcb->bge_maxlen_flags =
1213 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1214 BGE_RCB_FLAG_RING_DISABLED);
1215 if (sc->bge_extram)
1216 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1217 else
1218 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1219 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1220 rcb->bge_hostaddr.bge_addr_hi);
1221 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1222 rcb->bge_hostaddr.bge_addr_lo);
1223 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1224 rcb->bge_maxlen_flags);
1225 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1227 /* Set up dummy disabled mini ring RCB */
1228 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1229 rcb->bge_maxlen_flags =
1230 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1231 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1232 rcb->bge_maxlen_flags);
1236 * Set the BD ring replentish thresholds. The recommended
1237 * values are 1/8th the number of descriptors allocated to
1238 * each ring.
1240 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1241 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1244 * Disable all unused send rings by setting the 'ring disabled'
1245 * bit in the flags field of all the TX send ring control blocks.
1246 * These are located in NIC memory.
1248 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1249 BGE_SEND_RING_RCB);
1250 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1251 vrcb->bge_maxlen_flags =
1252 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1253 vrcb->bge_nicaddr = 0;
1254 vrcb++;
1257 /* Configure TX RCB 0 (we use only the first ring) */
1258 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1259 BGE_SEND_RING_RCB);
1260 vrcb->bge_hostaddr.bge_addr_hi = 0;
1261 BGE_HOSTADDR(vrcb->bge_hostaddr, vtophys(&sc->bge_rdata->bge_tx_ring));
1262 vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
1263 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1264 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1265 vrcb->bge_maxlen_flags =
1266 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
1268 /* Disable all unused RX return rings */
1269 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1270 BGE_RX_RETURN_RING_RCB);
1271 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1272 vrcb->bge_hostaddr.bge_addr_hi = 0;
1273 vrcb->bge_hostaddr.bge_addr_lo = 0;
1274 vrcb->bge_maxlen_flags =
1275 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1276 BGE_RCB_FLAG_RING_DISABLED);
1277 vrcb->bge_nicaddr = 0;
1278 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1279 (i * (sizeof(uint64_t))), 0);
1280 vrcb++;
1283 /* Initialize RX ring indexes */
1284 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1285 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1286 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1289 * Set up RX return ring 0
1290 * Note that the NIC address for RX return rings is 0x00000000.
1291 * The return rings live entirely within the host, so the
1292 * nicaddr field in the RCB isn't used.
1294 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1295 BGE_RX_RETURN_RING_RCB);
1296 vrcb->bge_hostaddr.bge_addr_hi = 0;
1297 BGE_HOSTADDR(vrcb->bge_hostaddr,
1298 vtophys(&sc->bge_rdata->bge_rx_return_ring));
1299 vrcb->bge_nicaddr = 0x00000000;
1300 vrcb->bge_maxlen_flags =
1301 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0);
1303 /* Set random backoff seed for TX */
1304 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1305 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1306 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1307 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1308 BGE_TX_BACKOFF_SEED_MASK);
1310 /* Set inter-packet gap */
1311 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1314 * Specify which ring to use for packets that don't match
1315 * any RX rules.
1317 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1320 * Configure number of RX lists. One interrupt distribution
1321 * list, sixteen active lists, one bad frames class.
1323 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1325 /* Inialize RX list placement stats mask. */
1326 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1327 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1329 /* Disable host coalescing until we get it set up */
1330 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1332 /* Poll to make sure it's shut down. */
1333 for (i = 0; i < BGE_TIMEOUT; i++) {
1334 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1335 break;
1336 DELAY(10);
1339 if (i == BGE_TIMEOUT) {
1340 if_printf(&sc->arpcom.ac_if,
1341 "host coalescing engine failed to idle\n");
1342 return(ENXIO);
1345 /* Set up host coalescing defaults */
1346 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1347 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1348 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1349 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1350 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1351 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1352 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1353 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1355 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1356 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1358 /* Set up address of statistics block */
1359 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1360 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1361 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0);
1362 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1363 vtophys(&sc->bge_rdata->bge_info.bge_stats));
1365 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1366 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1367 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1370 /* Set up address of status block */
1371 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 0);
1372 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1373 vtophys(&sc->bge_rdata->bge_status_block));
1375 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1376 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1378 /* Turn on host coalescing state machine */
1379 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1381 /* Turn on RX BD completion state machine and enable attentions */
1382 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1383 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1385 /* Turn on RX list placement state machine */
1386 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1388 /* Turn on RX list selector state machine. */
1389 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1390 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1391 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1393 /* Turn on DMA, clear stats */
1394 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1395 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1396 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1397 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1398 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1400 /* Set misc. local control, enable interrupts on attentions */
1401 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1403 #ifdef notdef
1404 /* Assert GPIO pins for PHY reset */
1405 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1406 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1407 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1408 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1409 #endif
1411 /* Turn on DMA completion state machine */
1412 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1413 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1414 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1416 /* Turn on write DMA state machine */
1417 CSR_WRITE_4(sc, BGE_WDMA_MODE,
1418 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1420 /* Turn on read DMA state machine */
1421 CSR_WRITE_4(sc, BGE_RDMA_MODE,
1422 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1424 /* Turn on RX data completion state machine */
1425 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1427 /* Turn on RX BD initiator state machine */
1428 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1430 /* Turn on RX data and RX BD initiator state machine */
1431 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1433 /* Turn on Mbuf cluster free state machine */
1434 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1435 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1436 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1438 /* Turn on send BD completion state machine */
1439 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1441 /* Turn on send data completion state machine */
1442 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1444 /* Turn on send data initiator state machine */
1445 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1447 /* Turn on send BD initiator state machine */
1448 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1450 /* Turn on send BD selector state machine */
1451 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1453 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1454 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1455 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1457 /* ack/clear link change events */
1458 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1459 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1460 BGE_MACSTAT_LINK_CHANGED);
1462 /* Enable PHY auto polling (for MII/GMII only) */
1463 if (sc->bge_tbi) {
1464 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1465 } else {
1466 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1467 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1468 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1469 BGE_EVTENB_MI_INTERRUPT);
1472 /* Enable link state change attentions. */
1473 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1475 return(0);
1479 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1480 * against our list and return its name if we find a match. Note
1481 * that since the Broadcom controller contains VPD support, we
1482 * can get the device name string from the controller itself instead
1483 * of the compiled-in string. This is a little slow, but it guarantees
1484 * we'll always announce the right product name.
1486 static int
1487 bge_probe(device_t dev)
1489 struct bge_softc *sc;
1490 struct bge_type *t;
1491 char *descbuf;
1492 uint16_t product, vendor;
1494 product = pci_get_device(dev);
1495 vendor = pci_get_vendor(dev);
1497 for (t = bge_devs; t->bge_name != NULL; t++) {
1498 if (vendor == t->bge_vid && product == t->bge_did)
1499 break;
1502 if (t->bge_name == NULL)
1503 return(ENXIO);
1505 sc = device_get_softc(dev);
1506 #ifdef notdef
1507 sc->bge_dev = dev;
1509 bge_vpd_read(sc);
1510 device_set_desc(dev, sc->bge_vpd_prodname);
1511 #endif
1512 descbuf = kmalloc(BGE_DEVDESC_MAX, M_TEMP, M_WAITOK);
1513 ksnprintf(descbuf, BGE_DEVDESC_MAX, "%s, ASIC rev. %#04x", t->bge_name,
1514 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1515 device_set_desc_copy(dev, descbuf);
1516 if (pci_get_subvendor(dev) == PCI_VENDOR_DELL)
1517 sc->bge_no_3_led = 1;
1518 kfree(descbuf, M_TEMP);
1519 return(0);
1522 static int
1523 bge_attach(device_t dev)
1525 struct ifnet *ifp;
1526 struct bge_softc *sc;
1527 uint32_t hwcfg = 0;
1528 uint32_t mac_addr = 0;
1529 int error = 0, rid;
1530 uint8_t ether_addr[ETHER_ADDR_LEN];
1532 sc = device_get_softc(dev);
1533 sc->bge_dev = dev;
1534 callout_init(&sc->bge_stat_timer);
1535 lwkt_serialize_init(&sc->bge_jslot_serializer);
1538 * Map control/status registers.
1540 pci_enable_busmaster(dev);
1542 rid = BGE_PCI_BAR0;
1543 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1544 RF_ACTIVE);
1546 if (sc->bge_res == NULL) {
1547 device_printf(dev, "couldn't map memory\n");
1548 error = ENXIO;
1549 return(error);
1552 sc->bge_btag = rman_get_bustag(sc->bge_res);
1553 sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
1554 sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res);
1556 /* Allocate interrupt */
1557 rid = 0;
1559 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1560 RF_SHAREABLE | RF_ACTIVE);
1562 if (sc->bge_irq == NULL) {
1563 device_printf(dev, "couldn't map interrupt\n");
1564 error = ENXIO;
1565 goto fail;
1568 /* Save ASIC rev. */
1569 sc->bge_chipid =
1570 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1571 BGE_PCIMISCCTL_ASICREV;
1572 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1573 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1576 * Treat the 5714 and the 5752 like the 5750 until we have more info
1577 * on this chip.
1579 if (sc->bge_asicrev == BGE_ASICREV_BCM5714 ||
1580 sc->bge_asicrev == BGE_ASICREV_BCM5752)
1581 sc->bge_asicrev = BGE_ASICREV_BCM5750;
1584 * XXX: Broadcom Linux driver. Not in specs or eratta.
1585 * PCI-Express?
1587 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1588 uint32_t v;
1590 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
1591 if (((v >> 8) & 0xff) == BGE_PCIE_MSI_CAPID) {
1592 v = pci_read_config(dev, BGE_PCIE_MSI_CAPID, 4);
1593 if ((v & 0xff) == BGE_PCIE_MSI_CAPID_VAL)
1594 sc->bge_pcie = 1;
1598 ifp = &sc->arpcom.ac_if;
1599 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1601 /* Try to reset the chip. */
1602 bge_reset(sc);
1604 if (bge_chipinit(sc)) {
1605 device_printf(dev, "chip initialization failed\n");
1606 error = ENXIO;
1607 goto fail;
1611 * Get station address from the EEPROM.
1613 mac_addr = bge_readmem_ind(sc, 0x0c14);
1614 if ((mac_addr >> 16) == 0x484b) {
1615 ether_addr[0] = (uint8_t)(mac_addr >> 8);
1616 ether_addr[1] = (uint8_t)mac_addr;
1617 mac_addr = bge_readmem_ind(sc, 0x0c18);
1618 ether_addr[2] = (uint8_t)(mac_addr >> 24);
1619 ether_addr[3] = (uint8_t)(mac_addr >> 16);
1620 ether_addr[4] = (uint8_t)(mac_addr >> 8);
1621 ether_addr[5] = (uint8_t)mac_addr;
1622 } else if (bge_read_eeprom(sc, ether_addr,
1623 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
1624 device_printf(dev, "failed to read station address\n");
1625 error = ENXIO;
1626 goto fail;
1629 /* Allocate the general information block and ring buffers. */
1630 sc->bge_rdata = contigmalloc(sizeof(struct bge_ring_data), M_DEVBUF,
1631 M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0);
1633 if (sc->bge_rdata == NULL) {
1634 error = ENXIO;
1635 device_printf(dev, "no memory for list buffers!\n");
1636 goto fail;
1639 bzero(sc->bge_rdata, sizeof(struct bge_ring_data));
1642 * Try to allocate memory for jumbo buffers.
1643 * The 5705/5750 does not appear to support jumbo frames.
1645 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1646 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1647 if (bge_alloc_jumbo_mem(sc)) {
1648 device_printf(dev, "jumbo buffer allocation failed\n");
1649 error = ENXIO;
1650 goto fail;
1654 /* Set default tuneable values. */
1655 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1656 sc->bge_rx_coal_ticks = 150;
1657 sc->bge_tx_coal_ticks = 150;
1658 sc->bge_rx_max_coal_bds = 64;
1659 sc->bge_tx_max_coal_bds = 128;
1661 /* 5705/5750 limits RX return ring to 512 entries. */
1662 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1663 sc->bge_asicrev == BGE_ASICREV_BCM5750)
1664 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1665 else
1666 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1668 /* Set up ifnet structure */
1669 ifp->if_softc = sc;
1670 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1671 ifp->if_ioctl = bge_ioctl;
1672 ifp->if_start = bge_start;
1673 ifp->if_watchdog = bge_watchdog;
1674 ifp->if_init = bge_init;
1675 ifp->if_mtu = ETHERMTU;
1676 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1677 ifq_set_ready(&ifp->if_snd);
1678 ifp->if_hwassist = BGE_CSUM_FEATURES;
1679 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
1680 IFCAP_VLAN_MTU;
1681 ifp->if_capenable = ifp->if_capabilities;
1684 * Figure out what sort of media we have by checking the
1685 * hardware config word in the first 32k of NIC internal memory,
1686 * or fall back to examining the EEPROM if necessary.
1687 * Note: on some BCM5700 cards, this value appears to be unset.
1688 * If that's the case, we have to rely on identifying the NIC
1689 * by its PCI subsystem ID, as we do below for the SysKonnect
1690 * SK-9D41.
1692 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
1693 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1694 else {
1695 bge_read_eeprom(sc, (caddr_t)&hwcfg,
1696 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
1697 hwcfg = ntohl(hwcfg);
1700 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1701 sc->bge_tbi = 1;
1703 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
1704 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41)
1705 sc->bge_tbi = 1;
1707 if (sc->bge_tbi) {
1708 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
1709 bge_ifmedia_upd, bge_ifmedia_sts);
1710 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1711 ifmedia_add(&sc->bge_ifmedia,
1712 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
1713 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1714 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
1715 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
1716 } else {
1718 * Do transceiver setup.
1720 if (mii_phy_probe(dev, &sc->bge_miibus,
1721 bge_ifmedia_upd, bge_ifmedia_sts)) {
1722 device_printf(dev, "MII without any PHY!\n");
1723 error = ENXIO;
1724 goto fail;
1729 * When using the BCM5701 in PCI-X mode, data corruption has
1730 * been observed in the first few bytes of some received packets.
1731 * Aligning the packet buffer in memory eliminates the corruption.
1732 * Unfortunately, this misaligns the packet payloads. On platforms
1733 * which do not support unaligned accesses, we will realign the
1734 * payloads by copying the received packets.
1736 switch (sc->bge_chipid) {
1737 case BGE_CHIPID_BCM5701_A0:
1738 case BGE_CHIPID_BCM5701_B0:
1739 case BGE_CHIPID_BCM5701_B2:
1740 case BGE_CHIPID_BCM5701_B5:
1741 /* If in PCI-X mode, work around the alignment bug. */
1742 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
1743 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
1744 BGE_PCISTATE_PCI_BUSSPEED)
1745 sc->bge_rx_alignment_bug = 1;
1746 break;
1750 * Call MI attach routine.
1752 ether_ifattach(ifp, ether_addr, NULL);
1754 error = bus_setup_intr(dev, sc->bge_irq, INTR_NETSAFE,
1755 bge_intr, sc, &sc->bge_intrhand,
1756 ifp->if_serializer);
1757 if (error) {
1758 ether_ifdetach(ifp);
1759 device_printf(dev, "couldn't set up irq\n");
1760 goto fail;
1763 return(0);
1765 fail:
1766 bge_detach(dev);
1768 return(error);
1771 static int
1772 bge_detach(device_t dev)
1774 struct bge_softc *sc = device_get_softc(dev);
1775 struct ifnet *ifp = &sc->arpcom.ac_if;
1777 if (device_is_attached(dev)) {
1778 lwkt_serialize_enter(ifp->if_serializer);
1779 bge_stop(sc);
1780 bge_reset(sc);
1781 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
1782 lwkt_serialize_exit(ifp->if_serializer);
1784 ether_ifdetach(ifp);
1786 if (sc->bge_tbi)
1787 ifmedia_removeall(&sc->bge_ifmedia);
1788 if (sc->bge_miibus)
1789 device_delete_child(dev, sc->bge_miibus);
1790 bus_generic_detach(dev);
1792 bge_release_resources(sc);
1794 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1795 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1796 bge_free_jumbo_mem(sc);
1798 return(0);
1801 static void
1802 bge_release_resources(struct bge_softc *sc)
1804 device_t dev;
1806 dev = sc->bge_dev;
1808 if (sc->bge_vpd_prodname != NULL)
1809 kfree(sc->bge_vpd_prodname, M_DEVBUF);
1811 if (sc->bge_vpd_readonly != NULL)
1812 kfree(sc->bge_vpd_readonly, M_DEVBUF);
1814 if (sc->bge_irq != NULL)
1815 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
1817 if (sc->bge_res != NULL)
1818 bus_release_resource(dev, SYS_RES_MEMORY,
1819 BGE_PCI_BAR0, sc->bge_res);
1821 if (sc->bge_rdata != NULL)
1822 contigfree(sc->bge_rdata, sizeof(struct bge_ring_data),
1823 M_DEVBUF);
1825 return;
1828 static void
1829 bge_reset(struct bge_softc *sc)
1831 device_t dev;
1832 uint32_t cachesize, command, pcistate, reset;
1833 int i, val = 0;
1835 dev = sc->bge_dev;
1837 /* Save some important PCI state. */
1838 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
1839 command = pci_read_config(dev, BGE_PCI_CMD, 4);
1840 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
1842 pci_write_config(dev, BGE_PCI_MISC_CTL,
1843 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1844 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1846 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
1848 /* XXX: Broadcom Linux driver. */
1849 if (sc->bge_pcie) {
1850 if (CSR_READ_4(sc, 0x7e2c) == 0x60) /* PCIE 1.0 */
1851 CSR_WRITE_4(sc, 0x7e2c, 0x20);
1852 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
1853 /* Prevent PCIE link training during global reset */
1854 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
1855 reset |= (1<<29);
1859 /* Issue global reset */
1860 bge_writereg_ind(sc, BGE_MISC_CFG, reset);
1862 DELAY(1000);
1864 /* XXX: Broadcom Linux driver. */
1865 if (sc->bge_pcie) {
1866 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
1867 uint32_t v;
1869 DELAY(500000); /* wait for link training to complete */
1870 v = pci_read_config(dev, 0xc4, 4);
1871 pci_write_config(dev, 0xc4, v | (1<<15), 4);
1873 /* Set PCIE max payload size and clear error status. */
1874 pci_write_config(dev, 0xd8, 0xf5000, 4);
1877 /* Reset some of the PCI state that got zapped by reset */
1878 pci_write_config(dev, BGE_PCI_MISC_CTL,
1879 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1880 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1881 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
1882 pci_write_config(dev, BGE_PCI_CMD, command, 4);
1883 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
1885 /* Enable memory arbiter. */
1886 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1887 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
1890 * Prevent PXE restart: write a magic number to the
1891 * general communications memory at 0xB50.
1893 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1895 * Poll the value location we just wrote until
1896 * we see the 1's complement of the magic number.
1897 * This indicates that the firmware initialization
1898 * is complete.
1900 for (i = 0; i < BGE_TIMEOUT; i++) {
1901 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
1902 if (val == ~BGE_MAGIC_NUMBER)
1903 break;
1904 DELAY(10);
1907 if (i == BGE_TIMEOUT) {
1908 if_printf(&sc->arpcom.ac_if, "firmware handshake timed out\n");
1909 return;
1913 * XXX Wait for the value of the PCISTATE register to
1914 * return to its original pre-reset state. This is a
1915 * fairly good indicator of reset completion. If we don't
1916 * wait for the reset to fully complete, trying to read
1917 * from the device's non-PCI registers may yield garbage
1918 * results.
1920 for (i = 0; i < BGE_TIMEOUT; i++) {
1921 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
1922 break;
1923 DELAY(10);
1926 /* Fix up byte swapping */
1927 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME|
1928 BGE_MODECTL_BYTESWAP_DATA);
1930 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1933 * The 5704 in TBI mode apparently needs some special
1934 * adjustment to insure the SERDES drive level is set
1935 * to 1.2V.
1937 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
1938 uint32_t serdescfg;
1940 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
1941 serdescfg = (serdescfg & ~0xFFF) | 0x880;
1942 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
1945 /* XXX: Broadcom Linux driver. */
1946 if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
1947 uint32_t v;
1949 v = CSR_READ_4(sc, 0x7c00);
1950 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
1953 DELAY(10000);
1957 * Frame reception handling. This is called if there's a frame
1958 * on the receive return list.
1960 * Note: we have to be able to handle two possibilities here:
1961 * 1) the frame is from the jumbo recieve ring
1962 * 2) the frame is from the standard receive ring
1965 static void
1966 bge_rxeof(struct bge_softc *sc)
1968 struct ifnet *ifp;
1969 int stdcnt = 0, jumbocnt = 0;
1971 ifp = &sc->arpcom.ac_if;
1973 while(sc->bge_rx_saved_considx !=
1974 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
1975 struct bge_rx_bd *cur_rx;
1976 uint32_t rxidx;
1977 struct mbuf *m = NULL;
1978 uint16_t vlan_tag = 0;
1979 int have_tag = 0;
1981 cur_rx =
1982 &sc->bge_rdata->bge_rx_return_ring[sc->bge_rx_saved_considx];
1984 rxidx = cur_rx->bge_idx;
1985 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
1987 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
1988 have_tag = 1;
1989 vlan_tag = cur_rx->bge_vlan_tag;
1992 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
1993 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1994 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
1995 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
1996 jumbocnt++;
1997 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
1998 ifp->if_ierrors++;
1999 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2000 continue;
2002 if (bge_newbuf_jumbo(sc,
2003 sc->bge_jumbo, NULL) == ENOBUFS) {
2004 ifp->if_ierrors++;
2005 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2006 continue;
2008 } else {
2009 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2010 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2011 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2012 stdcnt++;
2013 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2014 ifp->if_ierrors++;
2015 bge_newbuf_std(sc, sc->bge_std, m);
2016 continue;
2018 if (bge_newbuf_std(sc, sc->bge_std,
2019 NULL) == ENOBUFS) {
2020 ifp->if_ierrors++;
2021 bge_newbuf_std(sc, sc->bge_std, m);
2022 continue;
2026 ifp->if_ipackets++;
2027 #ifndef __i386__
2029 * The i386 allows unaligned accesses, but for other
2030 * platforms we must make sure the payload is aligned.
2032 if (sc->bge_rx_alignment_bug) {
2033 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2034 cur_rx->bge_len);
2035 m->m_data += ETHER_ALIGN;
2037 #endif
2038 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2039 m->m_pkthdr.rcvif = ifp;
2041 #if 0 /* currently broken for some packets, possibly related to TCP options */
2042 if (ifp->if_hwassist) {
2043 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2044 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2045 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2046 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2047 m->m_pkthdr.csum_data =
2048 cur_rx->bge_tcp_udp_csum;
2049 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2052 #endif
2055 * If we received a packet with a vlan tag, pass it
2056 * to vlan_input() instead of ether_input().
2058 if (have_tag) {
2059 VLAN_INPUT_TAG(m, vlan_tag);
2060 have_tag = vlan_tag = 0;
2061 } else {
2062 ifp->if_input(ifp, m);
2066 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2067 if (stdcnt)
2068 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2069 if (jumbocnt)
2070 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2073 static void
2074 bge_txeof(struct bge_softc *sc)
2076 struct bge_tx_bd *cur_tx = NULL;
2077 struct ifnet *ifp;
2079 ifp = &sc->arpcom.ac_if;
2082 * Go through our tx ring and free mbufs for those
2083 * frames that have been sent.
2085 while (sc->bge_tx_saved_considx !=
2086 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
2087 uint32_t idx = 0;
2089 idx = sc->bge_tx_saved_considx;
2090 cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
2091 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2092 ifp->if_opackets++;
2093 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2094 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2095 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2097 sc->bge_txcnt--;
2098 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2099 ifp->if_timer = 0;
2102 if (cur_tx != NULL)
2103 ifp->if_flags &= ~IFF_OACTIVE;
2106 static void
2107 bge_intr(void *xsc)
2109 struct bge_softc *sc = xsc;
2110 struct ifnet *ifp = &sc->arpcom.ac_if;
2111 uint32_t status, statusword, mimode;
2113 /* XXX */
2114 statusword = loadandclear(&sc->bge_rdata->bge_status_block.bge_status);
2116 #ifdef notdef
2117 /* Avoid this for now -- checking this register is expensive. */
2118 /* Make sure this is really our interrupt. */
2119 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2120 return;
2121 #endif
2122 /* Ack interrupt and stop others from occuring. */
2123 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2126 * Process link state changes.
2127 * Grrr. The link status word in the status block does
2128 * not work correctly on the BCM5700 rev AX and BX chips,
2129 * according to all available information. Hence, we have
2130 * to enable MII interrupts in order to properly obtain
2131 * async link changes. Unfortunately, this also means that
2132 * we have to read the MAC status register to detect link
2133 * changes, thereby adding an additional register access to
2134 * the interrupt handler.
2137 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
2138 status = CSR_READ_4(sc, BGE_MAC_STS);
2139 if (status & BGE_MACSTAT_MI_INTERRUPT) {
2140 sc->bge_link = 0;
2141 callout_stop(&sc->bge_stat_timer);
2142 bge_tick_serialized(sc);
2143 /* Clear the interrupt */
2144 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2145 BGE_EVTENB_MI_INTERRUPT);
2146 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
2147 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
2148 BRGPHY_INTRS);
2150 } else {
2151 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) {
2153 * Sometimes PCS encoding errors are detected in
2154 * TBI mode (on fiber NICs), and for some reason
2155 * the chip will signal them as link changes.
2156 * If we get a link change event, but the 'PCS
2157 * encoding error' bit in the MAC status register
2158 * is set, don't bother doing a link check.
2159 * This avoids spurious "gigabit link up" messages
2160 * that sometimes appear on fiber NICs during
2161 * periods of heavy traffic. (There should be no
2162 * effect on copper NICs.)
2164 * If we do have a copper NIC (bge_tbi == 0) then
2165 * check that the AUTOPOLL bit is set before
2166 * processing the event as a real link change.
2167 * Turning AUTOPOLL on and off in the MII read/write
2168 * functions will often trigger a link status
2169 * interrupt for no reason.
2171 status = CSR_READ_4(sc, BGE_MAC_STS);
2172 mimode = CSR_READ_4(sc, BGE_MI_MODE);
2173 if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR |
2174 BGE_MACSTAT_MI_COMPLETE)) &&
2175 (!sc->bge_tbi && (mimode & BGE_MIMODE_AUTOPOLL))) {
2176 sc->bge_link = 0;
2177 callout_stop(&sc->bge_stat_timer);
2178 bge_tick_serialized(sc);
2180 sc->bge_link = 0;
2181 callout_stop(&sc->bge_stat_timer);
2182 bge_tick_serialized(sc);
2183 /* Clear the interrupt */
2184 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2185 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2186 BGE_MACSTAT_LINK_CHANGED);
2188 /* Force flush the status block cached by PCI bridge */
2189 CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
2193 if (ifp->if_flags & IFF_RUNNING) {
2194 /* Check RX return ring producer/consumer */
2195 bge_rxeof(sc);
2197 /* Check TX ring producer/consumer */
2198 bge_txeof(sc);
2201 bge_handle_events(sc);
2203 /* Re-enable interrupts. */
2204 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2206 if ((ifp->if_flags & IFF_RUNNING) && !ifq_is_empty(&ifp->if_snd))
2207 (*ifp->if_start)(ifp);
2210 static void
2211 bge_tick(void *xsc)
2213 struct bge_softc *sc = xsc;
2214 struct ifnet *ifp = &sc->arpcom.ac_if;
2216 lwkt_serialize_enter(ifp->if_serializer);
2217 bge_tick_serialized(xsc);
2218 lwkt_serialize_exit(ifp->if_serializer);
2221 static void
2222 bge_tick_serialized(void *xsc)
2224 struct bge_softc *sc = xsc;
2225 struct ifnet *ifp = &sc->arpcom.ac_if;
2226 struct mii_data *mii = NULL;
2227 struct ifmedia *ifm = NULL;
2229 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2230 sc->bge_asicrev == BGE_ASICREV_BCM5750)
2231 bge_stats_update_regs(sc);
2232 else
2233 bge_stats_update(sc);
2235 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2237 if (sc->bge_link) {
2238 return;
2241 if (sc->bge_tbi) {
2242 ifm = &sc->bge_ifmedia;
2243 if (CSR_READ_4(sc, BGE_MAC_STS) &
2244 BGE_MACSTAT_TBI_PCS_SYNCHED) {
2245 sc->bge_link++;
2246 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
2247 BGE_CLRBIT(sc, BGE_MAC_MODE,
2248 BGE_MACMODE_TBI_SEND_CFGS);
2250 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
2251 if_printf(ifp, "gigabit link up\n");
2252 if (!ifq_is_empty(&ifp->if_snd))
2253 (*ifp->if_start)(ifp);
2255 return;
2258 mii = device_get_softc(sc->bge_miibus);
2259 mii_tick(mii);
2261 if (!sc->bge_link) {
2262 mii_pollstat(mii);
2263 if (mii->mii_media_status & IFM_ACTIVE &&
2264 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2265 sc->bge_link++;
2266 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
2267 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
2268 if_printf(ifp, "gigabit link up\n");
2269 if (!ifq_is_empty(&ifp->if_snd))
2270 (*ifp->if_start)(ifp);
2275 static void
2276 bge_stats_update_regs(struct bge_softc *sc)
2278 struct ifnet *ifp = &sc->arpcom.ac_if;
2279 struct bge_mac_stats_regs stats;
2280 uint32_t *s;
2281 int i;
2283 s = (uint32_t *)&stats;
2284 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2285 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2286 s++;
2289 ifp->if_collisions +=
2290 (stats.dot3StatsSingleCollisionFrames +
2291 stats.dot3StatsMultipleCollisionFrames +
2292 stats.dot3StatsExcessiveCollisions +
2293 stats.dot3StatsLateCollisions) -
2294 ifp->if_collisions;
2297 static void
2298 bge_stats_update(struct bge_softc *sc)
2300 struct ifnet *ifp = &sc->arpcom.ac_if;
2301 struct bge_stats *stats;
2303 stats = (struct bge_stats *)(sc->bge_vhandle +
2304 BGE_MEMWIN_START + BGE_STATS_BLOCK);
2306 ifp->if_collisions +=
2307 (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo +
2308 stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo +
2309 stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo +
2310 stats->txstats.dot3StatsLateCollisions.bge_addr_lo) -
2311 ifp->if_collisions;
2313 #ifdef notdef
2314 ifp->if_collisions +=
2315 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2316 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2317 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2318 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2319 ifp->if_collisions;
2320 #endif
2324 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2325 * pointers to descriptors.
2327 static int
2328 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
2330 struct bge_tx_bd *f = NULL;
2331 struct mbuf *m;
2332 uint32_t frag, cur, cnt = 0;
2333 uint16_t csum_flags = 0;
2334 struct ifvlan *ifv = NULL;
2336 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
2337 m_head->m_pkthdr.rcvif != NULL &&
2338 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
2339 ifv = m_head->m_pkthdr.rcvif->if_softc;
2341 m = m_head;
2342 cur = frag = *txidx;
2344 if (m_head->m_pkthdr.csum_flags) {
2345 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2346 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2347 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2348 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2349 if (m_head->m_flags & M_LASTFRAG)
2350 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2351 else if (m_head->m_flags & M_FRAG)
2352 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2355 * Start packing the mbufs in this chain into
2356 * the fragment pointers. Stop when we run out
2357 * of fragments or hit the end of the mbuf chain.
2359 for (m = m_head; m != NULL; m = m->m_next) {
2360 if (m->m_len != 0) {
2361 f = &sc->bge_rdata->bge_tx_ring[frag];
2362 if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
2363 break;
2364 BGE_HOSTADDR(f->bge_addr,
2365 vtophys(mtod(m, vm_offset_t)));
2366 f->bge_len = m->m_len;
2367 f->bge_flags = csum_flags;
2368 if (ifv != NULL) {
2369 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2370 f->bge_vlan_tag = ifv->ifv_tag;
2371 } else {
2372 f->bge_vlan_tag = 0;
2375 * Sanity check: avoid coming within 16 descriptors
2376 * of the end of the ring.
2378 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16)
2379 return(ENOBUFS);
2380 cur = frag;
2381 BGE_INC(frag, BGE_TX_RING_CNT);
2382 cnt++;
2386 if (m != NULL)
2387 return(ENOBUFS);
2389 if (frag == sc->bge_tx_saved_considx)
2390 return(ENOBUFS);
2392 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
2393 sc->bge_cdata.bge_tx_chain[cur] = m_head;
2394 sc->bge_txcnt += cnt;
2396 *txidx = frag;
2398 return(0);
2402 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2403 * to the mbuf data regions directly in the transmit descriptors.
2405 static void
2406 bge_start(struct ifnet *ifp)
2408 struct bge_softc *sc;
2409 struct mbuf *m_head = NULL;
2410 uint32_t prodidx = 0;
2411 int need_trans;
2413 sc = ifp->if_softc;
2415 if (!sc->bge_link)
2416 return;
2418 prodidx = sc->bge_tx_prodidx;
2420 need_trans = 0;
2421 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2422 m_head = ifq_poll(&ifp->if_snd);
2423 if (m_head == NULL)
2424 break;
2427 * XXX
2428 * safety overkill. If this is a fragmented packet chain
2429 * with delayed TCP/UDP checksums, then only encapsulate
2430 * it if we have enough descriptors to handle the entire
2431 * chain at once.
2432 * (paranoia -- may not actually be needed)
2434 if (m_head->m_flags & M_FIRSTFRAG &&
2435 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
2436 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2437 m_head->m_pkthdr.csum_data + 16) {
2438 ifp->if_flags |= IFF_OACTIVE;
2439 break;
2444 * Pack the data into the transmit ring. If we
2445 * don't have room, set the OACTIVE flag and wait
2446 * for the NIC to drain the ring.
2448 if (bge_encap(sc, m_head, &prodidx)) {
2449 ifp->if_flags |= IFF_OACTIVE;
2450 break;
2452 ifq_dequeue(&ifp->if_snd, m_head);
2453 need_trans = 1;
2455 BPF_MTAP(ifp, m_head);
2458 if (!need_trans)
2459 return;
2461 /* Transmit */
2462 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2463 /* 5700 b2 errata */
2464 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
2465 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2467 sc->bge_tx_prodidx = prodidx;
2470 * Set a timeout in case the chip goes out to lunch.
2472 ifp->if_timer = 5;
2475 static void
2476 bge_init(void *xsc)
2478 struct bge_softc *sc = xsc;
2479 struct ifnet *ifp = &sc->arpcom.ac_if;
2480 uint16_t *m;
2482 ASSERT_SERIALIZED(ifp->if_serializer);
2484 if (ifp->if_flags & IFF_RUNNING)
2485 return;
2487 /* Cancel pending I/O and flush buffers. */
2488 bge_stop(sc);
2489 bge_reset(sc);
2490 bge_chipinit(sc);
2493 * Init the various state machines, ring
2494 * control blocks and firmware.
2496 if (bge_blockinit(sc)) {
2497 if_printf(ifp, "initialization failure\n");
2498 return;
2501 /* Specify MTU. */
2502 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2503 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
2505 /* Load our MAC address. */
2506 m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
2507 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2508 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2510 /* Enable or disable promiscuous mode as needed. */
2511 bge_setpromisc(sc);
2513 /* Program multicast filter. */
2514 bge_setmulti(sc);
2516 /* Init RX ring. */
2517 bge_init_rx_ring_std(sc);
2520 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
2521 * memory to insure that the chip has in fact read the first
2522 * entry of the ring.
2524 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
2525 uint32_t v, i;
2526 for (i = 0; i < 10; i++) {
2527 DELAY(20);
2528 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
2529 if (v == (MCLBYTES - ETHER_ALIGN))
2530 break;
2532 if (i == 10)
2533 if_printf(ifp, "5705 A0 chip failed to load RX ring\n");
2536 /* Init jumbo RX ring. */
2537 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2538 bge_init_rx_ring_jumbo(sc);
2540 /* Init our RX return ring index */
2541 sc->bge_rx_saved_considx = 0;
2543 /* Init TX ring. */
2544 bge_init_tx_ring(sc);
2546 /* Turn on transmitter */
2547 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
2549 /* Turn on receiver */
2550 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2552 /* Tell firmware we're alive. */
2553 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2555 /* Enable host interrupts. */
2556 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
2557 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2558 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2560 bge_ifmedia_upd(ifp);
2562 ifp->if_flags |= IFF_RUNNING;
2563 ifp->if_flags &= ~IFF_OACTIVE;
2565 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2569 * Set media options.
2571 static int
2572 bge_ifmedia_upd(struct ifnet *ifp)
2574 struct bge_softc *sc = ifp->if_softc;
2575 struct ifmedia *ifm = &sc->bge_ifmedia;
2576 struct mii_data *mii;
2578 /* If this is a 1000baseX NIC, enable the TBI port. */
2579 if (sc->bge_tbi) {
2580 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2581 return(EINVAL);
2582 switch(IFM_SUBTYPE(ifm->ifm_media)) {
2583 case IFM_AUTO:
2585 * The BCM5704 ASIC appears to have a special
2586 * mechanism for programming the autoneg
2587 * advertisement registers in TBI mode.
2589 if (!bge_fake_autoneg &&
2590 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
2591 uint32_t sgdig;
2593 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
2594 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
2595 sgdig |= BGE_SGDIGCFG_AUTO |
2596 BGE_SGDIGCFG_PAUSE_CAP |
2597 BGE_SGDIGCFG_ASYM_PAUSE;
2598 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
2599 sgdig | BGE_SGDIGCFG_SEND);
2600 DELAY(5);
2601 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
2603 break;
2604 case IFM_1000_SX:
2605 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
2606 BGE_CLRBIT(sc, BGE_MAC_MODE,
2607 BGE_MACMODE_HALF_DUPLEX);
2608 } else {
2609 BGE_SETBIT(sc, BGE_MAC_MODE,
2610 BGE_MACMODE_HALF_DUPLEX);
2612 break;
2613 default:
2614 return(EINVAL);
2616 return(0);
2619 mii = device_get_softc(sc->bge_miibus);
2620 sc->bge_link = 0;
2621 if (mii->mii_instance) {
2622 struct mii_softc *miisc;
2623 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2624 mii_phy_reset(miisc);
2626 mii_mediachg(mii);
2628 return(0);
2632 * Report current media status.
2634 static void
2635 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2637 struct bge_softc *sc = ifp->if_softc;
2638 struct mii_data *mii;
2640 if (sc->bge_tbi) {
2641 ifmr->ifm_status = IFM_AVALID;
2642 ifmr->ifm_active = IFM_ETHER;
2643 if (CSR_READ_4(sc, BGE_MAC_STS) &
2644 BGE_MACSTAT_TBI_PCS_SYNCHED)
2645 ifmr->ifm_status |= IFM_ACTIVE;
2646 ifmr->ifm_active |= IFM_1000_SX;
2647 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
2648 ifmr->ifm_active |= IFM_HDX;
2649 else
2650 ifmr->ifm_active |= IFM_FDX;
2651 return;
2654 mii = device_get_softc(sc->bge_miibus);
2655 mii_pollstat(mii);
2656 ifmr->ifm_active = mii->mii_media_active;
2657 ifmr->ifm_status = mii->mii_media_status;
2660 static int
2661 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
2663 struct bge_softc *sc = ifp->if_softc;
2664 struct ifreq *ifr = (struct ifreq *) data;
2665 int mask, error = 0;
2666 struct mii_data *mii;
2668 ASSERT_SERIALIZED(ifp->if_serializer);
2670 switch(command) {
2671 case SIOCSIFMTU:
2672 /* Disallow jumbo frames on 5705/5750. */
2673 if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2674 sc->bge_asicrev == BGE_ASICREV_BCM5750) &&
2675 ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
2676 error = EINVAL;
2677 else {
2678 ifp->if_mtu = ifr->ifr_mtu;
2679 ifp->if_flags &= ~IFF_RUNNING;
2680 bge_init(sc);
2682 break;
2683 case SIOCSIFFLAGS:
2684 if (ifp->if_flags & IFF_UP) {
2685 if (ifp->if_flags & IFF_RUNNING) {
2686 int flags = ifp->if_flags & sc->bge_if_flags;
2689 * If only the state of the PROMISC flag
2690 * changed, then just use the 'set promisc
2691 * mode' command instead of reinitializing
2692 * the entire NIC. Doing a full re-init
2693 * means reloading the firmware and waiting
2694 * for it to start up, which may take a
2695 * second or two. Similarly for ALLMULTI.
2697 if (flags & IFF_PROMISC)
2698 bge_setpromisc(sc);
2699 if (flags & IFF_ALLMULTI)
2700 bge_setmulti(sc);
2701 } else {
2702 bge_init(sc);
2704 } else {
2705 if (ifp->if_flags & IFF_RUNNING)
2706 bge_stop(sc);
2708 sc->bge_if_flags = ifp->if_flags;
2709 error = 0;
2710 break;
2711 case SIOCADDMULTI:
2712 case SIOCDELMULTI:
2713 if (ifp->if_flags & IFF_RUNNING) {
2714 bge_setmulti(sc);
2715 error = 0;
2717 break;
2718 case SIOCSIFMEDIA:
2719 case SIOCGIFMEDIA:
2720 if (sc->bge_tbi) {
2721 error = ifmedia_ioctl(ifp, ifr,
2722 &sc->bge_ifmedia, command);
2723 } else {
2724 mii = device_get_softc(sc->bge_miibus);
2725 error = ifmedia_ioctl(ifp, ifr,
2726 &mii->mii_media, command);
2728 break;
2729 case SIOCSIFCAP:
2730 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2731 if (mask & IFCAP_HWCSUM) {
2732 if (IFCAP_HWCSUM & ifp->if_capenable)
2733 ifp->if_capenable &= ~IFCAP_HWCSUM;
2734 else
2735 ifp->if_capenable |= IFCAP_HWCSUM;
2737 error = 0;
2738 break;
2739 default:
2740 error = ether_ioctl(ifp, command, data);
2741 break;
2743 return(error);
2746 static void
2747 bge_watchdog(struct ifnet *ifp)
2749 struct bge_softc *sc = ifp->if_softc;
2751 if_printf(ifp, "watchdog timeout -- resetting\n");
2753 ifp->if_flags &= ~IFF_RUNNING;
2754 bge_init(sc);
2756 ifp->if_oerrors++;
2758 if (!ifq_is_empty(&ifp->if_snd))
2759 ifp->if_start(ifp);
2763 * Stop the adapter and free any mbufs allocated to the
2764 * RX and TX lists.
2766 static void
2767 bge_stop(struct bge_softc *sc)
2769 struct ifnet *ifp = &sc->arpcom.ac_if;
2770 struct ifmedia_entry *ifm;
2771 struct mii_data *mii = NULL;
2772 int mtmp, itmp;
2774 ASSERT_SERIALIZED(ifp->if_serializer);
2776 if (!sc->bge_tbi)
2777 mii = device_get_softc(sc->bge_miibus);
2779 callout_stop(&sc->bge_stat_timer);
2782 * Disable all of the receiver blocks
2784 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2785 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2786 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2787 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2788 sc->bge_asicrev != BGE_ASICREV_BCM5750)
2789 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2790 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
2791 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2792 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
2795 * Disable all of the transmit blocks
2797 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2798 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2799 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2800 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
2801 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
2802 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2803 sc->bge_asicrev != BGE_ASICREV_BCM5750)
2804 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2805 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2808 * Shut down all of the memory managers and related
2809 * state machines.
2811 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
2812 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
2813 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2814 sc->bge_asicrev != BGE_ASICREV_BCM5750)
2815 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2816 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
2817 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
2818 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2819 sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2820 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
2821 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2824 /* Disable host interrupts. */
2825 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2826 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2829 * Tell firmware we're shutting down.
2831 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2833 /* Free the RX lists. */
2834 bge_free_rx_ring_std(sc);
2836 /* Free jumbo RX list. */
2837 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2838 sc->bge_asicrev != BGE_ASICREV_BCM5750)
2839 bge_free_rx_ring_jumbo(sc);
2841 /* Free TX buffers. */
2842 bge_free_tx_ring(sc);
2845 * Isolate/power down the PHY, but leave the media selection
2846 * unchanged so that things will be put back to normal when
2847 * we bring the interface back up.
2849 if (!sc->bge_tbi) {
2850 itmp = ifp->if_flags;
2851 ifp->if_flags |= IFF_UP;
2852 ifm = mii->mii_media.ifm_cur;
2853 mtmp = ifm->ifm_media;
2854 ifm->ifm_media = IFM_ETHER|IFM_NONE;
2855 mii_mediachg(mii);
2856 ifm->ifm_media = mtmp;
2857 ifp->if_flags = itmp;
2860 sc->bge_link = 0;
2862 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
2864 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2868 * Stop all chip I/O so that the kernel's probe routines don't
2869 * get confused by errant DMAs when rebooting.
2871 static void
2872 bge_shutdown(device_t dev)
2874 struct bge_softc *sc = device_get_softc(dev);
2875 struct ifnet *ifp = &sc->arpcom.ac_if;
2877 lwkt_serialize_enter(ifp->if_serializer);
2878 bge_stop(sc);
2879 bge_reset(sc);
2880 lwkt_serialize_exit(ifp->if_serializer);
2883 static int
2884 bge_suspend(device_t dev)
2886 struct bge_softc *sc = device_get_softc(dev);
2887 struct ifnet *ifp = &sc->arpcom.ac_if;
2889 lwkt_serialize_enter(ifp->if_serializer);
2890 bge_stop(sc);
2891 lwkt_serialize_exit(ifp->if_serializer);
2893 return 0;
2896 static int
2897 bge_resume(device_t dev)
2899 struct bge_softc *sc = device_get_softc(dev);
2900 struct ifnet *ifp = &sc->arpcom.ac_if;
2902 lwkt_serialize_enter(ifp->if_serializer);
2904 if (ifp->if_flags & IFF_UP) {
2905 bge_init(sc);
2907 if (ifp->if_flags & IFF_RUNNING)
2908 ifp->if_start(ifp);
2911 lwkt_serialize_exit(ifp->if_serializer);
2913 return 0;
2916 static void
2917 bge_setpromisc(struct bge_softc *sc)
2919 struct ifnet *ifp = &sc->arpcom.ac_if;
2921 if (ifp->if_flags & IFF_PROMISC)
2922 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2923 else
2924 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);