2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
34 * $DragonFly: src/sys/dev/netif/bge/if_bge.c,v 1.74 2007/05/03 14:09:22 sephe Exp $
39 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
41 * Written by Bill Paul <wpaul@windriver.com>
42 * Senior Engineer, Wind River Systems
46 * The Broadcom BCM5700 is based on technology originally developed by
47 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51 * frames, highly configurable RX filtering, and 16 RX and TX queues
52 * (which, along with RX filter rules, can be used for QOS applications).
53 * Other features, such as TCP segmentation, may be available as part
54 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55 * firmware images can be stored in hardware and need not be compiled
58 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
61 * The BCM5701 is a single-chip solution incorporating both the BCM5700
62 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63 * does not support external SSRAM.
65 * Broadcom also produces a variation of the BCM5700 under the "Altima"
66 * brand name, which is functionally similar but lacks PCI-X support.
68 * Without external SSRAM, you can only have at most 4 TX rings,
69 * and the use of the mini RX ring is disabled. This seems to imply
70 * that these features are simply not available on the BCM5701. As a
71 * result, this driver does not implement any support for the mini RX
75 #include <sys/param.h>
77 #include <sys/endian.h>
78 #include <sys/kernel.h>
80 #include <sys/malloc.h>
81 #include <sys/queue.h>
83 #include <sys/serialize.h>
84 #include <sys/socket.h>
85 #include <sys/sockio.h>
88 #include <net/ethernet.h>
90 #include <net/if_arp.h>
91 #include <net/if_dl.h>
92 #include <net/if_media.h>
93 #include <net/if_types.h>
94 #include <net/ifq_var.h>
95 #include <net/vlan/if_vlan_var.h>
97 #include <dev/netif/mii_layer/mii.h>
98 #include <dev/netif/mii_layer/miivar.h>
99 #include <dev/netif/mii_layer/brgphyreg.h>
101 #include <bus/pci/pcidevs.h>
102 #include <bus/pci/pcireg.h>
103 #include <bus/pci/pcivar.h>
105 #include <dev/netif/bge/if_bgereg.h>
107 /* "device miibus" required. See GENERIC if you get errors here. */
108 #include "miibus_if.h"
110 #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
111 #define BGE_MIN_FRAME 60
114 * Various supported device vendors/types and their names. Note: the
115 * spec seems to indicate that the hardware still has Alteon's vendor
116 * ID burned into it, though it will always be overriden by the vendor
117 * ID in the EEPROM. Just to be safe, we cover all possibilities.
119 #define BGE_DEVDESC_MAX 64 /* Maximum device description length */
121 static struct bge_type bge_devs
[] = {
122 { PCI_VENDOR_ALTEON
, PCI_PRODUCT_ALTEON_BCM5700
,
123 "Alteon BCM5700 Gigabit Ethernet" },
124 { PCI_VENDOR_ALTEON
, PCI_PRODUCT_ALTEON_BCM5701
,
125 "Alteon BCM5701 Gigabit Ethernet" },
126 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5700
,
127 "Broadcom BCM5700 Gigabit Ethernet" },
128 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5701
,
129 "Broadcom BCM5701 Gigabit Ethernet" },
130 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5702X
,
131 "Broadcom BCM5702X Gigabit Ethernet" },
132 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5702_ALT
,
133 "Broadcom BCM5702 Gigabit Ethernet" },
134 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5703X
,
135 "Broadcom BCM5703X Gigabit Ethernet" },
136 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5703A3
,
137 "Broadcom BCM5703 Gigabit Ethernet" },
138 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5704C
,
139 "Broadcom BCM5704C Dual Gigabit Ethernet" },
140 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5704S
,
141 "Broadcom BCM5704S Dual Gigabit Ethernet" },
142 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5705
,
143 "Broadcom BCM5705 Gigabit Ethernet" },
144 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5705K
,
145 "Broadcom BCM5705K Gigabit Ethernet" },
146 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5705M
,
147 "Broadcom BCM5705M Gigabit Ethernet" },
148 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5705M_ALT
,
149 "Broadcom BCM5705M Gigabit Ethernet" },
150 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5714
,
151 "Broadcom BCM5714C Gigabit Ethernet" },
152 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5721
,
153 "Broadcom BCM5721 Gigabit Ethernet" },
154 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5750
,
155 "Broadcom BCM5750 Gigabit Ethernet" },
156 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5750M
,
157 "Broadcom BCM5750M Gigabit Ethernet" },
158 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5751
,
159 "Broadcom BCM5751 Gigabit Ethernet" },
160 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5751M
,
161 "Broadcom BCM5751M Gigabit Ethernet" },
162 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5752
,
163 "Broadcom BCM5752 Gigabit Ethernet" },
164 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5782
,
165 "Broadcom BCM5782 Gigabit Ethernet" },
166 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5788
,
167 "Broadcom BCM5788 Gigabit Ethernet" },
168 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5789
,
169 "Broadcom BCM5789 Gigabit Ethernet" },
170 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5901
,
171 "Broadcom BCM5901 Fast Ethernet" },
172 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5901A2
,
173 "Broadcom BCM5901A2 Fast Ethernet" },
174 { PCI_VENDOR_SCHNEIDERKOCH
, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1
,
175 "SysKonnect Gigabit Ethernet" },
176 { PCI_VENDOR_ALTIMA
, PCI_PRODUCT_ALTIMA_AC1000
,
177 "Altima AC1000 Gigabit Ethernet" },
178 { PCI_VENDOR_ALTIMA
, PCI_PRODUCT_ALTIMA_AC1001
,
179 "Altima AC1002 Gigabit Ethernet" },
180 { PCI_VENDOR_ALTIMA
, PCI_PRODUCT_ALTIMA_AC9100
,
181 "Altima AC9100 Gigabit Ethernet" },
185 static int bge_probe(device_t
);
186 static int bge_attach(device_t
);
187 static int bge_detach(device_t
);
188 static void bge_release_resources(struct bge_softc
*);
189 static void bge_txeof(struct bge_softc
*);
190 static void bge_rxeof(struct bge_softc
*);
192 static void bge_tick(void *);
193 static void bge_stats_update(struct bge_softc
*);
194 static void bge_stats_update_regs(struct bge_softc
*);
195 static int bge_encap(struct bge_softc
*, struct mbuf
*, uint32_t *);
197 static void bge_intr(void *);
198 static void bge_start(struct ifnet
*);
199 static int bge_ioctl(struct ifnet
*, u_long
, caddr_t
, struct ucred
*);
200 static void bge_init(void *);
201 static void bge_stop(struct bge_softc
*);
202 static void bge_watchdog(struct ifnet
*);
203 static void bge_shutdown(device_t
);
204 static int bge_suspend(device_t
);
205 static int bge_resume(device_t
);
206 static int bge_ifmedia_upd(struct ifnet
*);
207 static void bge_ifmedia_sts(struct ifnet
*, struct ifmediareq
*);
209 static uint8_t bge_eeprom_getbyte(struct bge_softc
*, uint32_t, uint8_t *);
210 static int bge_read_eeprom(struct bge_softc
*, caddr_t
, uint32_t, size_t);
212 static void bge_setmulti(struct bge_softc
*);
213 static void bge_setpromisc(struct bge_softc
*);
215 static int bge_alloc_jumbo_mem(struct bge_softc
*);
216 static void bge_free_jumbo_mem(struct bge_softc
*);
217 static struct bge_jslot
218 *bge_jalloc(struct bge_softc
*);
219 static void bge_jfree(void *);
220 static void bge_jref(void *);
221 static int bge_newbuf_std(struct bge_softc
*, int, struct mbuf
*);
222 static int bge_newbuf_jumbo(struct bge_softc
*, int, struct mbuf
*);
223 static int bge_init_rx_ring_std(struct bge_softc
*);
224 static void bge_free_rx_ring_std(struct bge_softc
*);
225 static int bge_init_rx_ring_jumbo(struct bge_softc
*);
226 static void bge_free_rx_ring_jumbo(struct bge_softc
*);
227 static void bge_free_tx_ring(struct bge_softc
*);
228 static int bge_init_tx_ring(struct bge_softc
*);
230 static int bge_chipinit(struct bge_softc
*);
231 static int bge_blockinit(struct bge_softc
*);
233 static uint32_t bge_readmem_ind(struct bge_softc
*, uint32_t);
234 static void bge_writemem_ind(struct bge_softc
*, uint32_t, uint32_t);
236 static uint32_t bge_readreg_ind(struct bge_softc
*, uint32_t);
238 static void bge_writereg_ind(struct bge_softc
*, uint32_t, uint32_t);
240 static int bge_miibus_readreg(device_t
, int, int);
241 static int bge_miibus_writereg(device_t
, int, int, int);
242 static void bge_miibus_statchg(device_t
);
243 static void bge_bcm5700_link_upd(struct bge_softc
*, uint32_t);
244 static void bge_tbi_link_upd(struct bge_softc
*, uint32_t);
245 static void bge_copper_link_upd(struct bge_softc
*, uint32_t);
247 static void bge_reset(struct bge_softc
*);
249 static void bge_dma_map_addr(void *, bus_dma_segment_t
*, int, int);
250 static void bge_dma_map_mbuf(void *, bus_dma_segment_t
*, int,
252 static int bge_dma_alloc(struct bge_softc
*);
253 static void bge_dma_free(struct bge_softc
*);
254 static int bge_dma_block_alloc(struct bge_softc
*, bus_size_t
,
255 bus_dma_tag_t
*, bus_dmamap_t
*,
256 void **, bus_addr_t
*);
257 static void bge_dma_block_free(bus_dma_tag_t
, bus_dmamap_t
, void *);
260 * Set following tunable to 1 for some IBM blade servers with the DNLK
261 * switch module. Auto negotiation is broken for those configurations.
263 static int bge_fake_autoneg
= 0;
264 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg
);
266 static device_method_t bge_methods
[] = {
267 /* Device interface */
268 DEVMETHOD(device_probe
, bge_probe
),
269 DEVMETHOD(device_attach
, bge_attach
),
270 DEVMETHOD(device_detach
, bge_detach
),
271 DEVMETHOD(device_shutdown
, bge_shutdown
),
272 DEVMETHOD(device_suspend
, bge_suspend
),
273 DEVMETHOD(device_resume
, bge_resume
),
276 DEVMETHOD(bus_print_child
, bus_generic_print_child
),
277 DEVMETHOD(bus_driver_added
, bus_generic_driver_added
),
280 DEVMETHOD(miibus_readreg
, bge_miibus_readreg
),
281 DEVMETHOD(miibus_writereg
, bge_miibus_writereg
),
282 DEVMETHOD(miibus_statchg
, bge_miibus_statchg
),
287 static DEFINE_CLASS_0(bge
, bge_driver
, bge_methods
, sizeof(struct bge_softc
));
288 static devclass_t bge_devclass
;
290 DECLARE_DUMMY_MODULE(if_bge
);
291 DRIVER_MODULE(if_bge
, pci
, bge_driver
, bge_devclass
, 0, 0);
292 DRIVER_MODULE(miibus
, bge
, miibus_driver
, miibus_devclass
, 0, 0);
295 bge_readmem_ind(struct bge_softc
*sc
, uint32_t off
)
297 device_t dev
= sc
->bge_dev
;
299 pci_write_config(dev
, BGE_PCI_MEMWIN_BASEADDR
, off
, 4);
300 return(pci_read_config(dev
, BGE_PCI_MEMWIN_DATA
, 4));
304 bge_writemem_ind(struct bge_softc
*sc
, uint32_t off
, uint32_t val
)
306 device_t dev
= sc
->bge_dev
;
308 pci_write_config(dev
, BGE_PCI_MEMWIN_BASEADDR
, off
, 4);
309 pci_write_config(dev
, BGE_PCI_MEMWIN_DATA
, val
, 4);
314 bge_readreg_ind(struct bge_softc
*sc
, uin32_t off
)
316 device_t dev
= sc
->bge_dev
;
318 pci_write_config(dev
, BGE_PCI_REG_BASEADDR
, off
, 4);
319 return(pci_read_config(dev
, BGE_PCI_REG_DATA
, 4));
324 bge_writereg_ind(struct bge_softc
*sc
, uint32_t off
, uint32_t val
)
326 device_t dev
= sc
->bge_dev
;
328 pci_write_config(dev
, BGE_PCI_REG_BASEADDR
, off
, 4);
329 pci_write_config(dev
, BGE_PCI_REG_DATA
, val
, 4);
333 * Read a byte of data stored in the EEPROM at address 'addr.' The
334 * BCM570x supports both the traditional bitbang interface and an
335 * auto access interface for reading the EEPROM. We use the auto
339 bge_eeprom_getbyte(struct bge_softc
*sc
, uint32_t addr
, uint8_t *dest
)
345 * Enable use of auto EEPROM access so we can avoid
346 * having to use the bitbang method.
348 BGE_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_AUTO_EEPROM
);
350 /* Reset the EEPROM, load the clock period. */
351 CSR_WRITE_4(sc
, BGE_EE_ADDR
,
352 BGE_EEADDR_RESET
|BGE_EEHALFCLK(BGE_HALFCLK_384SCL
));
355 /* Issue the read EEPROM command. */
356 CSR_WRITE_4(sc
, BGE_EE_ADDR
, BGE_EE_READCMD
| addr
);
358 /* Wait for completion */
359 for(i
= 0; i
< BGE_TIMEOUT
* 10; i
++) {
361 if (CSR_READ_4(sc
, BGE_EE_ADDR
) & BGE_EEADDR_DONE
)
365 if (i
== BGE_TIMEOUT
) {
366 if_printf(&sc
->arpcom
.ac_if
, "eeprom read timed out\n");
371 byte
= CSR_READ_4(sc
, BGE_EE_DATA
);
373 *dest
= (byte
>> ((addr
% 4) * 8)) & 0xFF;
379 * Read a sequence of bytes from the EEPROM.
382 bge_read_eeprom(struct bge_softc
*sc
, caddr_t dest
, uint32_t off
, size_t len
)
388 for (byte
= 0, err
= 0, i
= 0; i
< len
; i
++) {
389 err
= bge_eeprom_getbyte(sc
, off
+ i
, &byte
);
399 bge_miibus_readreg(device_t dev
, int phy
, int reg
)
401 struct bge_softc
*sc
;
403 uint32_t val
, autopoll
;
406 sc
= device_get_softc(dev
);
407 ifp
= &sc
->arpcom
.ac_if
;
410 * Broadcom's own driver always assumes the internal
411 * PHY is at GMII address 1. On some chips, the PHY responds
412 * to accesses at all addresses, which could cause us to
413 * bogusly attach the PHY 32 times at probe type. Always
414 * restricting the lookup to address 1 is simpler than
415 * trying to figure out which chips revisions should be
421 /* Reading with autopolling on may trigger PCI errors */
422 autopoll
= CSR_READ_4(sc
, BGE_MI_MODE
);
423 if (autopoll
& BGE_MIMODE_AUTOPOLL
) {
424 BGE_CLRBIT(sc
, BGE_MI_MODE
, BGE_MIMODE_AUTOPOLL
);
428 CSR_WRITE_4(sc
, BGE_MI_COMM
, BGE_MICMD_READ
|BGE_MICOMM_BUSY
|
429 BGE_MIPHY(phy
)|BGE_MIREG(reg
));
431 for (i
= 0; i
< BGE_TIMEOUT
; i
++) {
432 val
= CSR_READ_4(sc
, BGE_MI_COMM
);
433 if (!(val
& BGE_MICOMM_BUSY
))
437 if (i
== BGE_TIMEOUT
) {
438 if_printf(ifp
, "PHY read timed out\n");
443 val
= CSR_READ_4(sc
, BGE_MI_COMM
);
446 if (autopoll
& BGE_MIMODE_AUTOPOLL
) {
447 BGE_SETBIT(sc
, BGE_MI_MODE
, BGE_MIMODE_AUTOPOLL
);
451 if (val
& BGE_MICOMM_READFAIL
)
454 return(val
& 0xFFFF);
458 bge_miibus_writereg(device_t dev
, int phy
, int reg
, int val
)
460 struct bge_softc
*sc
;
464 sc
= device_get_softc(dev
);
466 /* Reading with autopolling on may trigger PCI errors */
467 autopoll
= CSR_READ_4(sc
, BGE_MI_MODE
);
468 if (autopoll
& BGE_MIMODE_AUTOPOLL
) {
469 BGE_CLRBIT(sc
, BGE_MI_MODE
, BGE_MIMODE_AUTOPOLL
);
473 CSR_WRITE_4(sc
, BGE_MI_COMM
, BGE_MICMD_WRITE
|BGE_MICOMM_BUSY
|
474 BGE_MIPHY(phy
)|BGE_MIREG(reg
)|val
);
476 for (i
= 0; i
< BGE_TIMEOUT
; i
++) {
477 if (!(CSR_READ_4(sc
, BGE_MI_COMM
) & BGE_MICOMM_BUSY
))
481 if (autopoll
& BGE_MIMODE_AUTOPOLL
) {
482 BGE_SETBIT(sc
, BGE_MI_MODE
, BGE_MIMODE_AUTOPOLL
);
486 if (i
== BGE_TIMEOUT
) {
487 if_printf(&sc
->arpcom
.ac_if
, "PHY read timed out\n");
495 bge_miibus_statchg(device_t dev
)
497 struct bge_softc
*sc
;
498 struct mii_data
*mii
;
500 sc
= device_get_softc(dev
);
501 mii
= device_get_softc(sc
->bge_miibus
);
503 BGE_CLRBIT(sc
, BGE_MAC_MODE
, BGE_MACMODE_PORTMODE
);
504 if (IFM_SUBTYPE(mii
->mii_media_active
) == IFM_1000_T
) {
505 BGE_SETBIT(sc
, BGE_MAC_MODE
, BGE_PORTMODE_GMII
);
507 BGE_SETBIT(sc
, BGE_MAC_MODE
, BGE_PORTMODE_MII
);
510 if ((mii
->mii_media_active
& IFM_GMASK
) == IFM_FDX
) {
511 BGE_CLRBIT(sc
, BGE_MAC_MODE
, BGE_MACMODE_HALF_DUPLEX
);
513 BGE_SETBIT(sc
, BGE_MAC_MODE
, BGE_MACMODE_HALF_DUPLEX
);
518 * Memory management for jumbo frames.
521 bge_alloc_jumbo_mem(struct bge_softc
*sc
)
523 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
524 struct bge_jslot
*entry
;
530 * Create tag for jumbo mbufs.
531 * This is really a bit of a kludge. We allocate a special
532 * jumbo buffer pool which (thanks to the way our DMA
533 * memory allocation works) will consist of contiguous
534 * pages. This means that even though a jumbo buffer might
535 * be larger than a page size, we don't really need to
536 * map it into more than one DMA segment. However, the
537 * default mbuf tag will result in multi-segment mappings,
538 * so we have to create a special jumbo mbuf tag that
539 * lets us get away with mapping the jumbo buffers as
540 * a single segment. I think eventually the driver should
541 * be changed so that it uses ordinary mbufs and cluster
542 * buffers, i.e. jumbo frames can span multiple DMA
543 * descriptors. But that's a project for another day.
547 * Create DMA stuffs for jumbo RX ring.
549 error
= bge_dma_block_alloc(sc
, BGE_JUMBO_RX_RING_SZ
,
550 &sc
->bge_cdata
.bge_rx_jumbo_ring_tag
,
551 &sc
->bge_cdata
.bge_rx_jumbo_ring_map
,
552 (void **)&sc
->bge_ldata
.bge_rx_jumbo_ring
,
553 &sc
->bge_ldata
.bge_rx_jumbo_ring_paddr
);
555 if_printf(ifp
, "could not create jumbo RX ring\n");
560 * Create DMA stuffs for jumbo buffer block.
562 error
= bge_dma_block_alloc(sc
, BGE_JMEM
,
563 &sc
->bge_cdata
.bge_jumbo_tag
,
564 &sc
->bge_cdata
.bge_jumbo_map
,
565 (void **)&sc
->bge_ldata
.bge_jumbo_buf
,
568 if_printf(ifp
, "could not create jumbo buffer\n");
572 SLIST_INIT(&sc
->bge_jfree_listhead
);
575 * Now divide it up into 9K pieces and save the addresses
576 * in an array. Note that we play an evil trick here by using
577 * the first few bytes in the buffer to hold the the address
578 * of the softc structure for this interface. This is because
579 * bge_jfree() needs it, but it is called by the mbuf management
580 * code which will not pass it to us explicitly.
582 for (i
= 0, ptr
= sc
->bge_ldata
.bge_jumbo_buf
; i
< BGE_JSLOTS
; i
++) {
583 entry
= &sc
->bge_cdata
.bge_jslots
[i
];
585 entry
->bge_buf
= ptr
;
586 entry
->bge_paddr
= paddr
;
587 entry
->bge_inuse
= 0;
589 SLIST_INSERT_HEAD(&sc
->bge_jfree_listhead
, entry
, jslot_link
);
598 bge_free_jumbo_mem(struct bge_softc
*sc
)
600 /* Destroy jumbo RX ring. */
601 bge_dma_block_free(sc
->bge_cdata
.bge_rx_jumbo_ring_tag
,
602 sc
->bge_cdata
.bge_rx_jumbo_ring_map
,
603 sc
->bge_ldata
.bge_rx_jumbo_ring
);
605 /* Destroy jumbo buffer block. */
606 bge_dma_block_free(sc
->bge_cdata
.bge_jumbo_tag
,
607 sc
->bge_cdata
.bge_jumbo_map
,
608 sc
->bge_ldata
.bge_jumbo_buf
);
612 * Allocate a jumbo buffer.
614 static struct bge_jslot
*
615 bge_jalloc(struct bge_softc
*sc
)
617 struct bge_jslot
*entry
;
619 lwkt_serialize_enter(&sc
->bge_jslot_serializer
);
620 entry
= SLIST_FIRST(&sc
->bge_jfree_listhead
);
622 SLIST_REMOVE_HEAD(&sc
->bge_jfree_listhead
, jslot_link
);
623 entry
->bge_inuse
= 1;
625 if_printf(&sc
->arpcom
.ac_if
, "no free jumbo buffers\n");
627 lwkt_serialize_exit(&sc
->bge_jslot_serializer
);
632 * Adjust usage count on a jumbo buffer.
637 struct bge_jslot
*entry
= (struct bge_jslot
*)arg
;
638 struct bge_softc
*sc
= entry
->bge_sc
;
641 panic("bge_jref: can't find softc pointer!");
643 if (&sc
->bge_cdata
.bge_jslots
[entry
->bge_slot
] != entry
) {
644 panic("bge_jref: asked to reference buffer "
645 "that we don't manage!");
646 } else if (entry
->bge_inuse
== 0) {
647 panic("bge_jref: buffer already free!");
649 atomic_add_int(&entry
->bge_inuse
, 1);
654 * Release a jumbo buffer.
659 struct bge_jslot
*entry
= (struct bge_jslot
*)arg
;
660 struct bge_softc
*sc
= entry
->bge_sc
;
663 panic("bge_jfree: can't find softc pointer!");
665 if (&sc
->bge_cdata
.bge_jslots
[entry
->bge_slot
] != entry
) {
666 panic("bge_jfree: asked to free buffer that we don't manage!");
667 } else if (entry
->bge_inuse
== 0) {
668 panic("bge_jfree: buffer already free!");
671 * Possible MP race to 0, use the serializer. The atomic insn
672 * is still needed for races against bge_jref().
674 lwkt_serialize_enter(&sc
->bge_jslot_serializer
);
675 atomic_subtract_int(&entry
->bge_inuse
, 1);
676 if (entry
->bge_inuse
== 0) {
677 SLIST_INSERT_HEAD(&sc
->bge_jfree_listhead
,
680 lwkt_serialize_exit(&sc
->bge_jslot_serializer
);
686 * Intialize a standard receive ring descriptor.
689 bge_newbuf_std(struct bge_softc
*sc
, int i
, struct mbuf
*m
)
691 struct mbuf
*m_new
= NULL
;
692 struct bge_dmamap_arg ctx
;
693 bus_dma_segment_t seg
;
698 m_new
= m_getcl(MB_DONTWAIT
, MT_DATA
, M_PKTHDR
);
703 m_new
->m_data
= m_new
->m_ext
.ext_buf
;
705 m_new
->m_len
= m_new
->m_pkthdr
.len
= MCLBYTES
;
707 if (!sc
->bge_rx_alignment_bug
)
708 m_adj(m_new
, ETHER_ALIGN
);
712 error
= bus_dmamap_load_mbuf(sc
->bge_cdata
.bge_mtag
,
713 sc
->bge_cdata
.bge_rx_std_dmamap
[i
],
714 m_new
, bge_dma_map_mbuf
, &ctx
,
716 if (error
|| ctx
.bge_maxsegs
== 0) {
722 sc
->bge_cdata
.bge_rx_std_chain
[i
] = m_new
;
724 r
= &sc
->bge_ldata
.bge_rx_std_ring
[i
];
725 r
->bge_addr
.bge_addr_lo
= BGE_ADDR_LO(ctx
.bge_segs
[0].ds_addr
);
726 r
->bge_addr
.bge_addr_hi
= BGE_ADDR_HI(ctx
.bge_segs
[0].ds_addr
);
727 r
->bge_flags
= BGE_RXBDFLAG_END
;
728 r
->bge_len
= m_new
->m_len
;
731 bus_dmamap_sync(sc
->bge_cdata
.bge_mtag
,
732 sc
->bge_cdata
.bge_rx_std_dmamap
[i
],
733 BUS_DMASYNC_PREREAD
);
738 * Initialize a jumbo receive ring descriptor. This allocates
739 * a jumbo buffer from the pool managed internally by the driver.
742 bge_newbuf_jumbo(struct bge_softc
*sc
, int i
, struct mbuf
*m
)
744 struct mbuf
*m_new
= NULL
;
745 struct bge_jslot
*buf
;
750 /* Allocate the mbuf. */
751 MGETHDR(m_new
, MB_DONTWAIT
, MT_DATA
);
755 /* Allocate the jumbo buffer */
756 buf
= bge_jalloc(sc
);
759 if_printf(&sc
->arpcom
.ac_if
, "jumbo allocation failed "
760 "-- packet dropped!\n");
764 /* Attach the buffer to the mbuf. */
765 m_new
->m_ext
.ext_arg
= buf
;
766 m_new
->m_ext
.ext_buf
= buf
->bge_buf
;
767 m_new
->m_ext
.ext_free
= bge_jfree
;
768 m_new
->m_ext
.ext_ref
= bge_jref
;
769 m_new
->m_ext
.ext_size
= BGE_JUMBO_FRAMELEN
;
771 m_new
->m_flags
|= M_EXT
;
773 KKASSERT(m
->m_flags
& M_EXT
);
775 buf
= m_new
->m_ext
.ext_arg
;
777 m_new
->m_data
= m_new
->m_ext
.ext_buf
;
778 m_new
->m_len
= m_new
->m_pkthdr
.len
= m_new
->m_ext
.ext_size
;
780 paddr
= buf
->bge_paddr
;
781 if (!sc
->bge_rx_alignment_bug
) {
782 m_adj(m_new
, ETHER_ALIGN
);
783 paddr
+= ETHER_ALIGN
;
786 /* Set up the descriptor. */
787 sc
->bge_cdata
.bge_rx_jumbo_chain
[i
] = m_new
;
789 r
= &sc
->bge_ldata
.bge_rx_jumbo_ring
[i
];
790 r
->bge_addr
.bge_addr_lo
= BGE_ADDR_LO(paddr
);
791 r
->bge_addr
.bge_addr_hi
= BGE_ADDR_HI(paddr
);
792 r
->bge_flags
= BGE_RXBDFLAG_END
|BGE_RXBDFLAG_JUMBO_RING
;
793 r
->bge_len
= m_new
->m_len
;
800 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
801 * that's 1MB or memory, which is a lot. For now, we fill only the first
802 * 256 ring entries and hope that our CPU is fast enough to keep up with
806 bge_init_rx_ring_std(struct bge_softc
*sc
)
810 for (i
= 0; i
< BGE_SSLOTS
; i
++) {
811 if (bge_newbuf_std(sc
, i
, NULL
) == ENOBUFS
)
815 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_std_ring_tag
,
816 sc
->bge_cdata
.bge_rx_std_ring_map
,
817 BUS_DMASYNC_PREWRITE
);
820 CSR_WRITE_4(sc
, BGE_MBX_RX_STD_PROD_LO
, sc
->bge_std
);
826 bge_free_rx_ring_std(struct bge_softc
*sc
)
830 for (i
= 0; i
< BGE_STD_RX_RING_CNT
; i
++) {
831 if (sc
->bge_cdata
.bge_rx_std_chain
[i
] != NULL
) {
832 bus_dmamap_unload(sc
->bge_cdata
.bge_mtag
,
833 sc
->bge_cdata
.bge_rx_std_dmamap
[i
]);
834 m_freem(sc
->bge_cdata
.bge_rx_std_chain
[i
]);
835 sc
->bge_cdata
.bge_rx_std_chain
[i
] = NULL
;
837 bzero(&sc
->bge_ldata
.bge_rx_std_ring
[i
],
838 sizeof(struct bge_rx_bd
));
843 bge_init_rx_ring_jumbo(struct bge_softc
*sc
)
848 for (i
= 0; i
< BGE_JUMBO_RX_RING_CNT
; i
++) {
849 if (bge_newbuf_jumbo(sc
, i
, NULL
) == ENOBUFS
)
853 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_jumbo_ring_tag
,
854 sc
->bge_cdata
.bge_rx_jumbo_ring_map
,
855 BUS_DMASYNC_PREWRITE
);
857 sc
->bge_jumbo
= i
- 1;
859 rcb
= &sc
->bge_ldata
.bge_info
.bge_jumbo_rx_rcb
;
860 rcb
->bge_maxlen_flags
= BGE_RCB_MAXLEN_FLAGS(0, 0);
861 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS
, rcb
->bge_maxlen_flags
);
863 CSR_WRITE_4(sc
, BGE_MBX_RX_JUMBO_PROD_LO
, sc
->bge_jumbo
);
869 bge_free_rx_ring_jumbo(struct bge_softc
*sc
)
873 for (i
= 0; i
< BGE_JUMBO_RX_RING_CNT
; i
++) {
874 if (sc
->bge_cdata
.bge_rx_jumbo_chain
[i
] != NULL
) {
875 m_freem(sc
->bge_cdata
.bge_rx_jumbo_chain
[i
]);
876 sc
->bge_cdata
.bge_rx_jumbo_chain
[i
] = NULL
;
878 bzero(&sc
->bge_ldata
.bge_rx_jumbo_ring
[i
],
879 sizeof(struct bge_rx_bd
));
884 bge_free_tx_ring(struct bge_softc
*sc
)
888 for (i
= 0; i
< BGE_TX_RING_CNT
; i
++) {
889 if (sc
->bge_cdata
.bge_tx_chain
[i
] != NULL
) {
890 bus_dmamap_unload(sc
->bge_cdata
.bge_mtag
,
891 sc
->bge_cdata
.bge_tx_dmamap
[i
]);
892 m_freem(sc
->bge_cdata
.bge_tx_chain
[i
]);
893 sc
->bge_cdata
.bge_tx_chain
[i
] = NULL
;
895 bzero(&sc
->bge_ldata
.bge_tx_ring
[i
],
896 sizeof(struct bge_tx_bd
));
901 bge_init_tx_ring(struct bge_softc
*sc
)
904 sc
->bge_tx_saved_considx
= 0;
905 sc
->bge_tx_prodidx
= 0;
907 /* Initialize transmit producer index for host-memory send ring. */
908 CSR_WRITE_4(sc
, BGE_MBX_TX_HOST_PROD0_LO
, sc
->bge_tx_prodidx
);
911 if (sc
->bge_chiprev
== BGE_CHIPREV_5700_BX
)
912 CSR_WRITE_4(sc
, BGE_MBX_TX_HOST_PROD0_LO
, 0);
914 CSR_WRITE_4(sc
, BGE_MBX_TX_NIC_PROD0_LO
, 0);
916 if (sc
->bge_chiprev
== BGE_CHIPREV_5700_BX
)
917 CSR_WRITE_4(sc
, BGE_MBX_TX_NIC_PROD0_LO
, 0);
923 bge_setmulti(struct bge_softc
*sc
)
926 struct ifmultiaddr
*ifma
;
927 uint32_t hashes
[4] = { 0, 0, 0, 0 };
930 ifp
= &sc
->arpcom
.ac_if
;
932 if (ifp
->if_flags
& IFF_ALLMULTI
|| ifp
->if_flags
& IFF_PROMISC
) {
933 for (i
= 0; i
< 4; i
++)
934 CSR_WRITE_4(sc
, BGE_MAR0
+ (i
* 4), 0xFFFFFFFF);
938 /* First, zot all the existing filters. */
939 for (i
= 0; i
< 4; i
++)
940 CSR_WRITE_4(sc
, BGE_MAR0
+ (i
* 4), 0);
942 /* Now program new ones. */
943 LIST_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
944 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
947 LLADDR((struct sockaddr_dl
*)ifma
->ifma_addr
),
948 ETHER_ADDR_LEN
) & 0x7f;
949 hashes
[(h
& 0x60) >> 5] |= 1 << (h
& 0x1F);
952 for (i
= 0; i
< 4; i
++)
953 CSR_WRITE_4(sc
, BGE_MAR0
+ (i
* 4), hashes
[i
]);
957 * Do endian, PCI and DMA initialization. Also check the on-board ROM
961 bge_chipinit(struct bge_softc
*sc
)
966 /* Set endian type before we access any non-PCI registers. */
967 pci_write_config(sc
->bge_dev
, BGE_PCI_MISC_CTL
, BGE_INIT
, 4);
970 * Check the 'ROM failed' bit on the RX CPU to see if
973 if (CSR_READ_4(sc
, BGE_RXCPU_MODE
) & BGE_RXCPUMODE_ROMFAIL
) {
974 if_printf(&sc
->arpcom
.ac_if
,
975 "RX CPU self-diagnostics failed!\n");
979 /* Clear the MAC control register */
980 CSR_WRITE_4(sc
, BGE_MAC_MODE
, 0);
983 * Clear the MAC statistics block in the NIC's
986 for (i
= BGE_STATS_BLOCK
;
987 i
< BGE_STATS_BLOCK_END
+ 1; i
+= sizeof(uint32_t))
988 BGE_MEMWIN_WRITE(sc
, i
, 0);
990 for (i
= BGE_STATUS_BLOCK
;
991 i
< BGE_STATUS_BLOCK_END
+ 1; i
+= sizeof(uint32_t))
992 BGE_MEMWIN_WRITE(sc
, i
, 0);
994 /* Set up the PCI DMA control register. */
997 dma_rw_ctl
= BGE_PCI_READ_CMD
|BGE_PCI_WRITE_CMD
|
998 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT
) |
999 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT
);
1000 } else if (pci_read_config(sc
->bge_dev
, BGE_PCI_PCISTATE
, 4) &
1001 BGE_PCISTATE_PCI_BUSMODE
) {
1002 /* Conventional PCI bus */
1003 dma_rw_ctl
= BGE_PCI_READ_CMD
|BGE_PCI_WRITE_CMD
|
1004 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT
) |
1005 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT
) |
1010 * The 5704 uses a different encoding of read/write
1013 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5704
)
1014 dma_rw_ctl
= BGE_PCI_READ_CMD
|BGE_PCI_WRITE_CMD
|
1015 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT
) |
1016 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT
);
1018 dma_rw_ctl
= BGE_PCI_READ_CMD
|BGE_PCI_WRITE_CMD
|
1019 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT
) |
1020 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT
) |
1024 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1025 * for hardware bugs.
1027 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5703
||
1028 sc
->bge_asicrev
== BGE_ASICREV_BCM5704
) {
1031 tmp
= CSR_READ_4(sc
, BGE_PCI_CLKCTL
) & 0x1f;
1032 if (tmp
== 0x6 || tmp
== 0x7)
1033 dma_rw_ctl
|= BGE_PCIDMARWCTL_ONEDMA_ATONCE
;
1037 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5703
||
1038 sc
->bge_asicrev
== BGE_ASICREV_BCM5704
||
1039 sc
->bge_asicrev
== BGE_ASICREV_BCM5705
||
1040 sc
->bge_asicrev
== BGE_ASICREV_BCM5750
)
1041 dma_rw_ctl
&= ~BGE_PCIDMARWCTL_MINDMA
;
1042 pci_write_config(sc
->bge_dev
, BGE_PCI_DMA_RW_CTL
, dma_rw_ctl
, 4);
1045 * Set up general mode register.
1047 CSR_WRITE_4(sc
, BGE_MODE_CTL
, BGE_DMA_SWAP_OPTIONS
|
1048 BGE_MODECTL_MAC_ATTN_INTR
|BGE_MODECTL_HOST_SEND_BDS
|
1049 BGE_MODECTL_TX_NO_PHDR_CSUM
);
1052 * Disable memory write invalidate. Apparently it is not supported
1053 * properly by these devices.
1055 PCI_CLRBIT(sc
->bge_dev
, BGE_PCI_CMD
, PCIM_CMD_MWIEN
, 4);
1057 /* Set the timer prescaler (always 66Mhz) */
1058 CSR_WRITE_4(sc
, BGE_MISC_CFG
, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1064 bge_blockinit(struct bge_softc
*sc
)
1066 struct bge_rcb
*rcb
;
1072 * Initialize the memory window pointer register so that
1073 * we can access the first 32K of internal NIC RAM. This will
1074 * allow us to set up the TX send ring RCBs and the RX return
1075 * ring RCBs, plus other things which live in NIC memory.
1077 CSR_WRITE_4(sc
, BGE_PCI_MEMWIN_BASEADDR
, 0);
1079 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1081 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5705
&&
1082 sc
->bge_asicrev
!= BGE_ASICREV_BCM5750
) {
1083 /* Configure mbuf memory pool */
1084 if (sc
->bge_extram
) {
1085 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_BASEADDR
,
1087 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5704
)
1088 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_LEN
, 0x10000);
1090 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_LEN
, 0x18000);
1092 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_BASEADDR
,
1094 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5704
)
1095 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_LEN
, 0x10000);
1097 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_LEN
, 0x18000);
1100 /* Configure DMA resource pool */
1101 CSR_WRITE_4(sc
, BGE_BMAN_DMA_DESCPOOL_BASEADDR
,
1102 BGE_DMA_DESCRIPTORS
);
1103 CSR_WRITE_4(sc
, BGE_BMAN_DMA_DESCPOOL_LEN
, 0x2000);
1106 /* Configure mbuf pool watermarks */
1107 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5705
||
1108 sc
->bge_asicrev
== BGE_ASICREV_BCM5750
) {
1109 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_READDMA_LOWAT
, 0x0);
1110 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_MACRX_LOWAT
, 0x10);
1112 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_READDMA_LOWAT
, 0x50);
1113 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_MACRX_LOWAT
, 0x20);
1115 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_HIWAT
, 0x60);
1117 /* Configure DMA resource watermarks */
1118 CSR_WRITE_4(sc
, BGE_BMAN_DMA_DESCPOOL_LOWAT
, 5);
1119 CSR_WRITE_4(sc
, BGE_BMAN_DMA_DESCPOOL_HIWAT
, 10);
1121 /* Enable buffer manager */
1122 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5705
&&
1123 sc
->bge_asicrev
!= BGE_ASICREV_BCM5750
) {
1124 CSR_WRITE_4(sc
, BGE_BMAN_MODE
,
1125 BGE_BMANMODE_ENABLE
|BGE_BMANMODE_LOMBUF_ATTN
);
1127 /* Poll for buffer manager start indication */
1128 for (i
= 0; i
< BGE_TIMEOUT
; i
++) {
1129 if (CSR_READ_4(sc
, BGE_BMAN_MODE
) & BGE_BMANMODE_ENABLE
)
1134 if (i
== BGE_TIMEOUT
) {
1135 if_printf(&sc
->arpcom
.ac_if
,
1136 "buffer manager failed to start\n");
1141 /* Enable flow-through queues */
1142 CSR_WRITE_4(sc
, BGE_FTQ_RESET
, 0xFFFFFFFF);
1143 CSR_WRITE_4(sc
, BGE_FTQ_RESET
, 0);
1145 /* Wait until queue initialization is complete */
1146 for (i
= 0; i
< BGE_TIMEOUT
; i
++) {
1147 if (CSR_READ_4(sc
, BGE_FTQ_RESET
) == 0)
1152 if (i
== BGE_TIMEOUT
) {
1153 if_printf(&sc
->arpcom
.ac_if
,
1154 "flow-through queue init failed\n");
1158 /* Initialize the standard RX ring control block */
1159 rcb
= &sc
->bge_ldata
.bge_info
.bge_std_rx_rcb
;
1160 rcb
->bge_hostaddr
.bge_addr_lo
=
1161 BGE_ADDR_LO(sc
->bge_ldata
.bge_rx_std_ring_paddr
);
1162 rcb
->bge_hostaddr
.bge_addr_hi
=
1163 BGE_ADDR_HI(sc
->bge_ldata
.bge_rx_std_ring_paddr
);
1164 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_std_ring_tag
,
1165 sc
->bge_cdata
.bge_rx_std_ring_map
, BUS_DMASYNC_PREREAD
);
1166 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5705
||
1167 sc
->bge_asicrev
== BGE_ASICREV_BCM5750
)
1168 rcb
->bge_maxlen_flags
= BGE_RCB_MAXLEN_FLAGS(512, 0);
1170 rcb
->bge_maxlen_flags
=
1171 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN
, 0);
1173 rcb
->bge_nicaddr
= BGE_EXT_STD_RX_RINGS
;
1175 rcb
->bge_nicaddr
= BGE_STD_RX_RINGS
;
1176 CSR_WRITE_4(sc
, BGE_RX_STD_RCB_HADDR_HI
, rcb
->bge_hostaddr
.bge_addr_hi
);
1177 CSR_WRITE_4(sc
, BGE_RX_STD_RCB_HADDR_LO
, rcb
->bge_hostaddr
.bge_addr_lo
);
1178 CSR_WRITE_4(sc
, BGE_RX_STD_RCB_MAXLEN_FLAGS
, rcb
->bge_maxlen_flags
);
1179 CSR_WRITE_4(sc
, BGE_RX_STD_RCB_NICADDR
, rcb
->bge_nicaddr
);
1182 * Initialize the jumbo RX ring control block
1183 * We set the 'ring disabled' bit in the flags
1184 * field until we're actually ready to start
1185 * using this ring (i.e. once we set the MTU
1186 * high enough to require it).
1188 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5705
&&
1189 sc
->bge_asicrev
!= BGE_ASICREV_BCM5750
) {
1190 rcb
= &sc
->bge_ldata
.bge_info
.bge_jumbo_rx_rcb
;
1192 rcb
->bge_hostaddr
.bge_addr_lo
=
1193 BGE_ADDR_LO(sc
->bge_ldata
.bge_rx_jumbo_ring_paddr
);
1194 rcb
->bge_hostaddr
.bge_addr_hi
=
1195 BGE_ADDR_HI(sc
->bge_ldata
.bge_rx_jumbo_ring_paddr
);
1196 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_jumbo_ring_tag
,
1197 sc
->bge_cdata
.bge_rx_jumbo_ring_map
,
1198 BUS_DMASYNC_PREREAD
);
1199 rcb
->bge_maxlen_flags
=
1200 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN
,
1201 BGE_RCB_FLAG_RING_DISABLED
);
1203 rcb
->bge_nicaddr
= BGE_EXT_JUMBO_RX_RINGS
;
1205 rcb
->bge_nicaddr
= BGE_JUMBO_RX_RINGS
;
1206 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_HADDR_HI
,
1207 rcb
->bge_hostaddr
.bge_addr_hi
);
1208 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_HADDR_LO
,
1209 rcb
->bge_hostaddr
.bge_addr_lo
);
1210 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS
,
1211 rcb
->bge_maxlen_flags
);
1212 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_NICADDR
, rcb
->bge_nicaddr
);
1214 /* Set up dummy disabled mini ring RCB */
1215 rcb
= &sc
->bge_ldata
.bge_info
.bge_mini_rx_rcb
;
1216 rcb
->bge_maxlen_flags
=
1217 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED
);
1218 CSR_WRITE_4(sc
, BGE_RX_MINI_RCB_MAXLEN_FLAGS
,
1219 rcb
->bge_maxlen_flags
);
1223 * Set the BD ring replentish thresholds. The recommended
1224 * values are 1/8th the number of descriptors allocated to
1227 CSR_WRITE_4(sc
, BGE_RBDI_STD_REPL_THRESH
, BGE_STD_RX_RING_CNT
/8);
1228 CSR_WRITE_4(sc
, BGE_RBDI_JUMBO_REPL_THRESH
, BGE_JUMBO_RX_RING_CNT
/8);
1231 * Disable all unused send rings by setting the 'ring disabled'
1232 * bit in the flags field of all the TX send ring control blocks.
1233 * These are located in NIC memory.
1235 vrcb
= BGE_MEMWIN_START
+ BGE_SEND_RING_RCB
;
1236 for (i
= 0; i
< BGE_TX_RINGS_EXTSSRAM_MAX
; i
++) {
1237 RCB_WRITE_4(sc
, vrcb
, bge_maxlen_flags
,
1238 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED
));
1239 RCB_WRITE_4(sc
, vrcb
, bge_nicaddr
, 0);
1240 vrcb
+= sizeof(struct bge_rcb
);
1243 /* Configure TX RCB 0 (we use only the first ring) */
1244 vrcb
= BGE_MEMWIN_START
+ BGE_SEND_RING_RCB
;
1245 BGE_HOSTADDR(taddr
, sc
->bge_ldata
.bge_tx_ring_paddr
);
1246 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_hi
, taddr
.bge_addr_hi
);
1247 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_lo
, taddr
.bge_addr_lo
);
1248 RCB_WRITE_4(sc
, vrcb
, bge_nicaddr
,
1249 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT
));
1250 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5705
&&
1251 sc
->bge_asicrev
!= BGE_ASICREV_BCM5750
) {
1252 RCB_WRITE_4(sc
, vrcb
, bge_maxlen_flags
,
1253 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT
, 0));
1256 /* Disable all unused RX return rings */
1257 vrcb
= BGE_MEMWIN_START
+ BGE_RX_RETURN_RING_RCB
;
1258 for (i
= 0; i
< BGE_RX_RINGS_MAX
; i
++) {
1259 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_hi
, 0);
1260 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_lo
, 0);
1261 RCB_WRITE_4(sc
, vrcb
, bge_maxlen_flags
,
1262 BGE_RCB_MAXLEN_FLAGS(sc
->bge_return_ring_cnt
,
1263 BGE_RCB_FLAG_RING_DISABLED
));
1264 RCB_WRITE_4(sc
, vrcb
, bge_nicaddr
, 0);
1265 CSR_WRITE_4(sc
, BGE_MBX_RX_CONS0_LO
+
1266 (i
* (sizeof(uint64_t))), 0);
1267 vrcb
+= sizeof(struct bge_rcb
);
1270 /* Initialize RX ring indexes */
1271 CSR_WRITE_4(sc
, BGE_MBX_RX_STD_PROD_LO
, 0);
1272 CSR_WRITE_4(sc
, BGE_MBX_RX_JUMBO_PROD_LO
, 0);
1273 CSR_WRITE_4(sc
, BGE_MBX_RX_MINI_PROD_LO
, 0);
1276 * Set up RX return ring 0
1277 * Note that the NIC address for RX return rings is 0x00000000.
1278 * The return rings live entirely within the host, so the
1279 * nicaddr field in the RCB isn't used.
1281 vrcb
= BGE_MEMWIN_START
+ BGE_RX_RETURN_RING_RCB
;
1282 BGE_HOSTADDR(taddr
, sc
->bge_ldata
.bge_rx_return_ring_paddr
);
1283 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_hi
, taddr
.bge_addr_hi
);
1284 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_lo
, taddr
.bge_addr_lo
);
1285 RCB_WRITE_4(sc
, vrcb
, bge_nicaddr
, 0x00000000);
1286 RCB_WRITE_4(sc
, vrcb
, bge_maxlen_flags
,
1287 BGE_RCB_MAXLEN_FLAGS(sc
->bge_return_ring_cnt
, 0));
1289 /* Set random backoff seed for TX */
1290 CSR_WRITE_4(sc
, BGE_TX_RANDOM_BACKOFF
,
1291 sc
->arpcom
.ac_enaddr
[0] + sc
->arpcom
.ac_enaddr
[1] +
1292 sc
->arpcom
.ac_enaddr
[2] + sc
->arpcom
.ac_enaddr
[3] +
1293 sc
->arpcom
.ac_enaddr
[4] + sc
->arpcom
.ac_enaddr
[5] +
1294 BGE_TX_BACKOFF_SEED_MASK
);
1296 /* Set inter-packet gap */
1297 CSR_WRITE_4(sc
, BGE_TX_LENGTHS
, 0x2620);
1300 * Specify which ring to use for packets that don't match
1303 CSR_WRITE_4(sc
, BGE_RX_RULES_CFG
, 0x08);
1306 * Configure number of RX lists. One interrupt distribution
1307 * list, sixteen active lists, one bad frames class.
1309 CSR_WRITE_4(sc
, BGE_RXLP_CFG
, 0x181);
1311 /* Inialize RX list placement stats mask. */
1312 CSR_WRITE_4(sc
, BGE_RXLP_STATS_ENABLE_MASK
, 0x007FFFFF);
1313 CSR_WRITE_4(sc
, BGE_RXLP_STATS_CTL
, 0x1);
1315 /* Disable host coalescing until we get it set up */
1316 CSR_WRITE_4(sc
, BGE_HCC_MODE
, 0x00000000);
1318 /* Poll to make sure it's shut down. */
1319 for (i
= 0; i
< BGE_TIMEOUT
; i
++) {
1320 if (!(CSR_READ_4(sc
, BGE_HCC_MODE
) & BGE_HCCMODE_ENABLE
))
1325 if (i
== BGE_TIMEOUT
) {
1326 if_printf(&sc
->arpcom
.ac_if
,
1327 "host coalescing engine failed to idle\n");
1331 /* Set up host coalescing defaults */
1332 CSR_WRITE_4(sc
, BGE_HCC_RX_COAL_TICKS
, sc
->bge_rx_coal_ticks
);
1333 CSR_WRITE_4(sc
, BGE_HCC_TX_COAL_TICKS
, sc
->bge_tx_coal_ticks
);
1334 CSR_WRITE_4(sc
, BGE_HCC_RX_MAX_COAL_BDS
, sc
->bge_rx_max_coal_bds
);
1335 CSR_WRITE_4(sc
, BGE_HCC_TX_MAX_COAL_BDS
, sc
->bge_tx_max_coal_bds
);
1336 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5705
&&
1337 sc
->bge_asicrev
!= BGE_ASICREV_BCM5750
) {
1338 CSR_WRITE_4(sc
, BGE_HCC_RX_COAL_TICKS_INT
, 0);
1339 CSR_WRITE_4(sc
, BGE_HCC_TX_COAL_TICKS_INT
, 0);
1341 CSR_WRITE_4(sc
, BGE_HCC_RX_MAX_COAL_BDS_INT
, 0);
1342 CSR_WRITE_4(sc
, BGE_HCC_TX_MAX_COAL_BDS_INT
, 0);
1344 /* Set up address of statistics block */
1345 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5705
&&
1346 sc
->bge_asicrev
!= BGE_ASICREV_BCM5750
) {
1347 CSR_WRITE_4(sc
, BGE_HCC_STATS_ADDR_HI
,
1348 BGE_ADDR_HI(sc
->bge_ldata
.bge_stats_paddr
));
1349 CSR_WRITE_4(sc
, BGE_HCC_STATS_ADDR_LO
,
1350 BGE_ADDR_LO(sc
->bge_ldata
.bge_stats_paddr
));
1352 CSR_WRITE_4(sc
, BGE_HCC_STATS_BASEADDR
, BGE_STATS_BLOCK
);
1353 CSR_WRITE_4(sc
, BGE_HCC_STATUSBLK_BASEADDR
, BGE_STATUS_BLOCK
);
1354 CSR_WRITE_4(sc
, BGE_HCC_STATS_TICKS
, sc
->bge_stat_ticks
);
1357 /* Set up address of status block */
1358 CSR_WRITE_4(sc
, BGE_HCC_STATUSBLK_ADDR_HI
,
1359 BGE_ADDR_HI(sc
->bge_ldata
.bge_status_block_paddr
));
1360 CSR_WRITE_4(sc
, BGE_HCC_STATUSBLK_ADDR_LO
,
1361 BGE_ADDR_LO(sc
->bge_ldata
.bge_status_block_paddr
));
1362 sc
->bge_ldata
.bge_status_block
->bge_idx
[0].bge_rx_prod_idx
= 0;
1363 sc
->bge_ldata
.bge_status_block
->bge_idx
[0].bge_tx_cons_idx
= 0;
1365 /* Turn on host coalescing state machine */
1366 CSR_WRITE_4(sc
, BGE_HCC_MODE
, BGE_HCCMODE_ENABLE
);
1368 /* Turn on RX BD completion state machine and enable attentions */
1369 CSR_WRITE_4(sc
, BGE_RBDC_MODE
,
1370 BGE_RBDCMODE_ENABLE
|BGE_RBDCMODE_ATTN
);
1372 /* Turn on RX list placement state machine */
1373 CSR_WRITE_4(sc
, BGE_RXLP_MODE
, BGE_RXLPMODE_ENABLE
);
1375 /* Turn on RX list selector state machine. */
1376 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5705
&&
1377 sc
->bge_asicrev
!= BGE_ASICREV_BCM5750
)
1378 CSR_WRITE_4(sc
, BGE_RXLS_MODE
, BGE_RXLSMODE_ENABLE
);
1380 /* Turn on DMA, clear stats */
1381 CSR_WRITE_4(sc
, BGE_MAC_MODE
, BGE_MACMODE_TXDMA_ENB
|
1382 BGE_MACMODE_RXDMA_ENB
|BGE_MACMODE_RX_STATS_CLEAR
|
1383 BGE_MACMODE_TX_STATS_CLEAR
|BGE_MACMODE_RX_STATS_ENB
|
1384 BGE_MACMODE_TX_STATS_ENB
|BGE_MACMODE_FRMHDR_DMA_ENB
|
1385 (sc
->bge_tbi
? BGE_PORTMODE_TBI
: BGE_PORTMODE_MII
));
1387 /* Set misc. local control, enable interrupts on attentions */
1388 CSR_WRITE_4(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_INTR_ONATTN
);
1391 /* Assert GPIO pins for PHY reset */
1392 BGE_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_MISCIO_OUT0
|
1393 BGE_MLC_MISCIO_OUT1
|BGE_MLC_MISCIO_OUT2
);
1394 BGE_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_MISCIO_OUTEN0
|
1395 BGE_MLC_MISCIO_OUTEN1
|BGE_MLC_MISCIO_OUTEN2
);
1398 /* Turn on DMA completion state machine */
1399 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5705
&&
1400 sc
->bge_asicrev
!= BGE_ASICREV_BCM5750
)
1401 CSR_WRITE_4(sc
, BGE_DMAC_MODE
, BGE_DMACMODE_ENABLE
);
1403 /* Turn on write DMA state machine */
1404 CSR_WRITE_4(sc
, BGE_WDMA_MODE
,
1405 BGE_WDMAMODE_ENABLE
|BGE_WDMAMODE_ALL_ATTNS
);
1407 /* Turn on read DMA state machine */
1408 CSR_WRITE_4(sc
, BGE_RDMA_MODE
,
1409 BGE_RDMAMODE_ENABLE
|BGE_RDMAMODE_ALL_ATTNS
);
1411 /* Turn on RX data completion state machine */
1412 CSR_WRITE_4(sc
, BGE_RDC_MODE
, BGE_RDCMODE_ENABLE
);
1414 /* Turn on RX BD initiator state machine */
1415 CSR_WRITE_4(sc
, BGE_RBDI_MODE
, BGE_RBDIMODE_ENABLE
);
1417 /* Turn on RX data and RX BD initiator state machine */
1418 CSR_WRITE_4(sc
, BGE_RDBDI_MODE
, BGE_RDBDIMODE_ENABLE
);
1420 /* Turn on Mbuf cluster free state machine */
1421 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5705
&&
1422 sc
->bge_asicrev
!= BGE_ASICREV_BCM5750
)
1423 CSR_WRITE_4(sc
, BGE_MBCF_MODE
, BGE_MBCFMODE_ENABLE
);
1425 /* Turn on send BD completion state machine */
1426 CSR_WRITE_4(sc
, BGE_SBDC_MODE
, BGE_SBDCMODE_ENABLE
);
1428 /* Turn on send data completion state machine */
1429 CSR_WRITE_4(sc
, BGE_SDC_MODE
, BGE_SDCMODE_ENABLE
);
1431 /* Turn on send data initiator state machine */
1432 CSR_WRITE_4(sc
, BGE_SDI_MODE
, BGE_SDIMODE_ENABLE
);
1434 /* Turn on send BD initiator state machine */
1435 CSR_WRITE_4(sc
, BGE_SBDI_MODE
, BGE_SBDIMODE_ENABLE
);
1437 /* Turn on send BD selector state machine */
1438 CSR_WRITE_4(sc
, BGE_SRS_MODE
, BGE_SRSMODE_ENABLE
);
1440 CSR_WRITE_4(sc
, BGE_SDI_STATS_ENABLE_MASK
, 0x007FFFFF);
1441 CSR_WRITE_4(sc
, BGE_SDI_STATS_CTL
,
1442 BGE_SDISTATSCTL_ENABLE
|BGE_SDISTATSCTL_FASTER
);
1444 /* ack/clear link change events */
1445 CSR_WRITE_4(sc
, BGE_MAC_STS
, BGE_MACSTAT_SYNC_CHANGED
|
1446 BGE_MACSTAT_CFG_CHANGED
|BGE_MACSTAT_MI_COMPLETE
|
1447 BGE_MACSTAT_LINK_CHANGED
);
1448 CSR_WRITE_4(sc
, BGE_MI_STS
, 0);
1450 /* Enable PHY auto polling (for MII/GMII only) */
1452 CSR_WRITE_4(sc
, BGE_MI_STS
, BGE_MISTS_LINK
);
1454 BGE_SETBIT(sc
, BGE_MI_MODE
, BGE_MIMODE_AUTOPOLL
|10<<16);
1455 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5700
&&
1456 sc
->bge_chipid
!= BGE_CHIPID_BCM5700_B2
) {
1457 CSR_WRITE_4(sc
, BGE_MAC_EVT_ENB
,
1458 BGE_EVTENB_MI_INTERRUPT
);
1463 * Clear any pending link state attention.
1464 * Otherwise some link state change events may be lost until attention
1465 * is cleared by bge_intr() -> bge_softc.bge_link_upd() sequence.
1466 * It's not necessary on newer BCM chips - perhaps enabling link
1467 * state change attentions implies clearing pending attention.
1469 CSR_WRITE_4(sc
, BGE_MAC_STS
, BGE_MACSTAT_SYNC_CHANGED
|
1470 BGE_MACSTAT_CFG_CHANGED
|BGE_MACSTAT_MI_COMPLETE
|
1471 BGE_MACSTAT_LINK_CHANGED
);
1473 /* Enable link state change attentions. */
1474 BGE_SETBIT(sc
, BGE_MAC_EVT_ENB
, BGE_EVTENB_LINK_CHANGED
);
1480 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1481 * against our list and return its name if we find a match. Note
1482 * that since the Broadcom controller contains VPD support, we
1483 * can get the device name string from the controller itself instead
1484 * of the compiled-in string. This is a little slow, but it guarantees
1485 * we'll always announce the right product name.
1488 bge_probe(device_t dev
)
1490 struct bge_softc
*sc
;
1493 uint16_t product
, vendor
;
1495 product
= pci_get_device(dev
);
1496 vendor
= pci_get_vendor(dev
);
1498 for (t
= bge_devs
; t
->bge_name
!= NULL
; t
++) {
1499 if (vendor
== t
->bge_vid
&& product
== t
->bge_did
)
1503 if (t
->bge_name
== NULL
)
1506 sc
= device_get_softc(dev
);
1507 descbuf
= kmalloc(BGE_DEVDESC_MAX
, M_TEMP
, M_WAITOK
);
1508 ksnprintf(descbuf
, BGE_DEVDESC_MAX
, "%s, ASIC rev. %#04x", t
->bge_name
,
1509 pci_read_config(dev
, BGE_PCI_MISC_CTL
, 4) >> 16);
1510 device_set_desc_copy(dev
, descbuf
);
1511 if (pci_get_subvendor(dev
) == PCI_VENDOR_DELL
)
1512 sc
->bge_no_3_led
= 1;
1513 kfree(descbuf
, M_TEMP
);
1518 bge_attach(device_t dev
)
1521 struct bge_softc
*sc
;
1523 uint32_t mac_addr
= 0;
1525 uint8_t ether_addr
[ETHER_ADDR_LEN
];
1527 sc
= device_get_softc(dev
);
1529 callout_init(&sc
->bge_stat_timer
);
1530 lwkt_serialize_init(&sc
->bge_jslot_serializer
);
1533 * Map control/status registers.
1535 pci_enable_busmaster(dev
);
1538 sc
->bge_res
= bus_alloc_resource_any(dev
, SYS_RES_MEMORY
, &rid
,
1541 if (sc
->bge_res
== NULL
) {
1542 device_printf(dev
, "couldn't map memory\n");
1547 sc
->bge_btag
= rman_get_bustag(sc
->bge_res
);
1548 sc
->bge_bhandle
= rman_get_bushandle(sc
->bge_res
);
1550 /* Allocate interrupt */
1553 sc
->bge_irq
= bus_alloc_resource_any(dev
, SYS_RES_IRQ
, &rid
,
1554 RF_SHAREABLE
| RF_ACTIVE
);
1556 if (sc
->bge_irq
== NULL
) {
1557 device_printf(dev
, "couldn't map interrupt\n");
1562 /* Save ASIC rev. */
1564 pci_read_config(dev
, BGE_PCI_MISC_CTL
, 4) &
1565 BGE_PCIMISCCTL_ASICREV
;
1566 sc
->bge_asicrev
= BGE_ASICREV(sc
->bge_chipid
);
1567 sc
->bge_chiprev
= BGE_CHIPREV(sc
->bge_chipid
);
1570 * Treat the 5714 and the 5752 like the 5750 until we have more info
1573 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5714
||
1574 sc
->bge_asicrev
== BGE_ASICREV_BCM5752
)
1575 sc
->bge_asicrev
= BGE_ASICREV_BCM5750
;
1578 * XXX: Broadcom Linux driver. Not in specs or eratta.
1581 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5750
) {
1584 v
= pci_read_config(dev
, BGE_PCI_MSI_CAPID
, 4);
1585 if (((v
>> 8) & 0xff) == BGE_PCIE_MSI_CAPID
) {
1586 v
= pci_read_config(dev
, BGE_PCIE_MSI_CAPID
, 4);
1587 if ((v
& 0xff) == BGE_PCIE_MSI_CAPID_VAL
)
1592 ifp
= &sc
->arpcom
.ac_if
;
1593 if_initname(ifp
, device_get_name(dev
), device_get_unit(dev
));
1595 /* Try to reset the chip. */
1598 if (bge_chipinit(sc
)) {
1599 device_printf(dev
, "chip initialization failed\n");
1605 * Get station address from the EEPROM.
1607 mac_addr
= bge_readmem_ind(sc
, 0x0c14);
1608 if ((mac_addr
>> 16) == 0x484b) {
1609 ether_addr
[0] = (uint8_t)(mac_addr
>> 8);
1610 ether_addr
[1] = (uint8_t)mac_addr
;
1611 mac_addr
= bge_readmem_ind(sc
, 0x0c18);
1612 ether_addr
[2] = (uint8_t)(mac_addr
>> 24);
1613 ether_addr
[3] = (uint8_t)(mac_addr
>> 16);
1614 ether_addr
[4] = (uint8_t)(mac_addr
>> 8);
1615 ether_addr
[5] = (uint8_t)mac_addr
;
1616 } else if (bge_read_eeprom(sc
, ether_addr
,
1617 BGE_EE_MAC_OFFSET
+ 2, ETHER_ADDR_LEN
)) {
1618 device_printf(dev
, "failed to read station address\n");
1623 /* 5705/5750 limits RX return ring to 512 entries. */
1624 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5705
||
1625 sc
->bge_asicrev
== BGE_ASICREV_BCM5750
)
1626 sc
->bge_return_ring_cnt
= BGE_RETURN_RING_CNT_5705
;
1628 sc
->bge_return_ring_cnt
= BGE_RETURN_RING_CNT
;
1630 error
= bge_dma_alloc(sc
);
1634 /* Set default tuneable values. */
1635 sc
->bge_stat_ticks
= BGE_TICKS_PER_SEC
;
1636 sc
->bge_rx_coal_ticks
= 150;
1637 sc
->bge_tx_coal_ticks
= 150;
1638 sc
->bge_rx_max_coal_bds
= 64;
1639 sc
->bge_tx_max_coal_bds
= 128;
1641 /* Set up ifnet structure */
1643 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
1644 ifp
->if_ioctl
= bge_ioctl
;
1645 ifp
->if_start
= bge_start
;
1646 ifp
->if_watchdog
= bge_watchdog
;
1647 ifp
->if_init
= bge_init
;
1648 ifp
->if_mtu
= ETHERMTU
;
1649 ifp
->if_capabilities
= IFCAP_VLAN_HWTAGGING
| IFCAP_VLAN_MTU
;
1650 ifq_set_maxlen(&ifp
->if_snd
, BGE_TX_RING_CNT
- 1);
1651 ifq_set_ready(&ifp
->if_snd
);
1654 * 5700 B0 chips do not support checksumming correctly due
1657 if (sc
->bge_chipid
!= BGE_CHIPID_BCM5700_B0
) {
1658 ifp
->if_capabilities
|= IFCAP_HWCSUM
;
1659 ifp
->if_hwassist
= BGE_CSUM_FEATURES
;
1661 ifp
->if_capenable
= ifp
->if_capabilities
;
1664 * Figure out what sort of media we have by checking the
1665 * hardware config word in the first 32k of NIC internal memory,
1666 * or fall back to examining the EEPROM if necessary.
1667 * Note: on some BCM5700 cards, this value appears to be unset.
1668 * If that's the case, we have to rely on identifying the NIC
1669 * by its PCI subsystem ID, as we do below for the SysKonnect
1672 if (bge_readmem_ind(sc
, BGE_SOFTWARE_GENCOMM_SIG
) == BGE_MAGIC_NUMBER
)
1673 hwcfg
= bge_readmem_ind(sc
, BGE_SOFTWARE_GENCOMM_NICCFG
);
1675 if (bge_read_eeprom(sc
, (caddr_t
)&hwcfg
, BGE_EE_HWCFG_OFFSET
,
1677 device_printf(dev
, "failed to read EEPROM\n");
1681 hwcfg
= ntohl(hwcfg
);
1684 if ((hwcfg
& BGE_HWCFG_MEDIA
) == BGE_MEDIA_FIBER
)
1687 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
1688 if (pci_get_subvendor(dev
) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41
)
1692 ifmedia_init(&sc
->bge_ifmedia
, IFM_IMASK
,
1693 bge_ifmedia_upd
, bge_ifmedia_sts
);
1694 ifmedia_add(&sc
->bge_ifmedia
, IFM_ETHER
|IFM_1000_SX
, 0, NULL
);
1695 ifmedia_add(&sc
->bge_ifmedia
,
1696 IFM_ETHER
|IFM_1000_SX
|IFM_FDX
, 0, NULL
);
1697 ifmedia_add(&sc
->bge_ifmedia
, IFM_ETHER
|IFM_AUTO
, 0, NULL
);
1698 ifmedia_set(&sc
->bge_ifmedia
, IFM_ETHER
|IFM_AUTO
);
1699 sc
->bge_ifmedia
.ifm_media
= sc
->bge_ifmedia
.ifm_cur
->ifm_media
;
1702 * Do transceiver setup.
1704 if (mii_phy_probe(dev
, &sc
->bge_miibus
,
1705 bge_ifmedia_upd
, bge_ifmedia_sts
)) {
1706 device_printf(dev
, "MII without any PHY!\n");
1713 * When using the BCM5701 in PCI-X mode, data corruption has
1714 * been observed in the first few bytes of some received packets.
1715 * Aligning the packet buffer in memory eliminates the corruption.
1716 * Unfortunately, this misaligns the packet payloads. On platforms
1717 * which do not support unaligned accesses, we will realign the
1718 * payloads by copying the received packets.
1720 switch (sc
->bge_chipid
) {
1721 case BGE_CHIPID_BCM5701_A0
:
1722 case BGE_CHIPID_BCM5701_B0
:
1723 case BGE_CHIPID_BCM5701_B2
:
1724 case BGE_CHIPID_BCM5701_B5
:
1725 /* If in PCI-X mode, work around the alignment bug. */
1726 if ((pci_read_config(dev
, BGE_PCI_PCISTATE
, 4) &
1727 (BGE_PCISTATE_PCI_BUSMODE
| BGE_PCISTATE_PCI_BUSSPEED
)) ==
1728 BGE_PCISTATE_PCI_BUSSPEED
)
1729 sc
->bge_rx_alignment_bug
= 1;
1733 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5700
&&
1734 sc
->bge_chipid
!= BGE_CHIPID_BCM5700_B2
) {
1735 sc
->bge_link_upd
= bge_bcm5700_link_upd
;
1736 sc
->bge_link_chg
= BGE_MACSTAT_MI_INTERRUPT
;
1737 } else if (sc
->bge_tbi
) {
1738 sc
->bge_link_upd
= bge_tbi_link_upd
;
1739 sc
->bge_link_chg
= BGE_MACSTAT_LINK_CHANGED
;
1741 sc
->bge_link_upd
= bge_copper_link_upd
;
1742 sc
->bge_link_chg
= BGE_MACSTAT_LINK_CHANGED
;
1746 * Call MI attach routine.
1748 ether_ifattach(ifp
, ether_addr
, NULL
);
1750 error
= bus_setup_intr(dev
, sc
->bge_irq
, INTR_NETSAFE
,
1751 bge_intr
, sc
, &sc
->bge_intrhand
,
1752 ifp
->if_serializer
);
1754 ether_ifdetach(ifp
);
1755 device_printf(dev
, "couldn't set up irq\n");
1765 bge_detach(device_t dev
)
1767 struct bge_softc
*sc
= device_get_softc(dev
);
1768 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1770 if (device_is_attached(dev
)) {
1771 lwkt_serialize_enter(ifp
->if_serializer
);
1774 bus_teardown_intr(dev
, sc
->bge_irq
, sc
->bge_intrhand
);
1775 lwkt_serialize_exit(ifp
->if_serializer
);
1777 ether_ifdetach(ifp
);
1780 ifmedia_removeall(&sc
->bge_ifmedia
);
1782 device_delete_child(dev
, sc
->bge_miibus
);
1783 bus_generic_detach(dev
);
1785 bge_release_resources(sc
);
1792 bge_release_resources(struct bge_softc
*sc
)
1798 if (sc
->bge_irq
!= NULL
)
1799 bus_release_resource(dev
, SYS_RES_IRQ
, 0, sc
->bge_irq
);
1801 if (sc
->bge_res
!= NULL
)
1802 bus_release_resource(dev
, SYS_RES_MEMORY
,
1803 BGE_PCI_BAR0
, sc
->bge_res
);
1807 bge_reset(struct bge_softc
*sc
)
1810 uint32_t cachesize
, command
, pcistate
, reset
;
1815 /* Save some important PCI state. */
1816 cachesize
= pci_read_config(dev
, BGE_PCI_CACHESZ
, 4);
1817 command
= pci_read_config(dev
, BGE_PCI_CMD
, 4);
1818 pcistate
= pci_read_config(dev
, BGE_PCI_PCISTATE
, 4);
1820 pci_write_config(dev
, BGE_PCI_MISC_CTL
,
1821 BGE_PCIMISCCTL_INDIRECT_ACCESS
|BGE_PCIMISCCTL_MASK_PCI_INTR
|
1822 BGE_HIF_SWAP_OPTIONS
|BGE_PCIMISCCTL_PCISTATE_RW
, 4);
1824 reset
= BGE_MISCCFG_RESET_CORE_CLOCKS
|(65<<1);
1826 /* XXX: Broadcom Linux driver. */
1828 if (CSR_READ_4(sc
, 0x7e2c) == 0x60) /* PCIE 1.0 */
1829 CSR_WRITE_4(sc
, 0x7e2c, 0x20);
1830 if (sc
->bge_chipid
!= BGE_CHIPID_BCM5750_A0
) {
1831 /* Prevent PCIE link training during global reset */
1832 CSR_WRITE_4(sc
, BGE_MISC_CFG
, (1<<29));
1837 /* Issue global reset */
1838 bge_writereg_ind(sc
, BGE_MISC_CFG
, reset
);
1842 /* XXX: Broadcom Linux driver. */
1844 if (sc
->bge_chipid
== BGE_CHIPID_BCM5750_A0
) {
1847 DELAY(500000); /* wait for link training to complete */
1848 v
= pci_read_config(dev
, 0xc4, 4);
1849 pci_write_config(dev
, 0xc4, v
| (1<<15), 4);
1851 /* Set PCIE max payload size and clear error status. */
1852 pci_write_config(dev
, 0xd8, 0xf5000, 4);
1855 /* Reset some of the PCI state that got zapped by reset */
1856 pci_write_config(dev
, BGE_PCI_MISC_CTL
,
1857 BGE_PCIMISCCTL_INDIRECT_ACCESS
|BGE_PCIMISCCTL_MASK_PCI_INTR
|
1858 BGE_HIF_SWAP_OPTIONS
|BGE_PCIMISCCTL_PCISTATE_RW
, 4);
1859 pci_write_config(dev
, BGE_PCI_CACHESZ
, cachesize
, 4);
1860 pci_write_config(dev
, BGE_PCI_CMD
, command
, 4);
1861 bge_writereg_ind(sc
, BGE_MISC_CFG
, (65 << 1));
1863 /* Enable memory arbiter. */
1864 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5705
)
1865 CSR_WRITE_4(sc
, BGE_MARB_MODE
, BGE_MARBMODE_ENABLE
);
1868 * Prevent PXE restart: write a magic number to the
1869 * general communications memory at 0xB50.
1871 bge_writemem_ind(sc
, BGE_SOFTWARE_GENCOMM
, BGE_MAGIC_NUMBER
);
1873 * Poll the value location we just wrote until
1874 * we see the 1's complement of the magic number.
1875 * This indicates that the firmware initialization
1878 for (i
= 0; i
< BGE_TIMEOUT
; i
++) {
1879 val
= bge_readmem_ind(sc
, BGE_SOFTWARE_GENCOMM
);
1880 if (val
== ~BGE_MAGIC_NUMBER
)
1885 if (i
== BGE_TIMEOUT
) {
1886 if_printf(&sc
->arpcom
.ac_if
, "firmware handshake timed out\n");
1891 * XXX Wait for the value of the PCISTATE register to
1892 * return to its original pre-reset state. This is a
1893 * fairly good indicator of reset completion. If we don't
1894 * wait for the reset to fully complete, trying to read
1895 * from the device's non-PCI registers may yield garbage
1898 for (i
= 0; i
< BGE_TIMEOUT
; i
++) {
1899 if (pci_read_config(dev
, BGE_PCI_PCISTATE
, 4) == pcistate
)
1904 /* Fix up byte swapping */
1905 CSR_WRITE_4(sc
, BGE_MODE_CTL
, BGE_DMA_SWAP_OPTIONS
|
1906 BGE_MODECTL_BYTESWAP_DATA
);
1908 CSR_WRITE_4(sc
, BGE_MAC_MODE
, 0);
1911 * The 5704 in TBI mode apparently needs some special
1912 * adjustment to insure the SERDES drive level is set
1915 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5704
&& sc
->bge_tbi
) {
1918 serdescfg
= CSR_READ_4(sc
, BGE_SERDES_CFG
);
1919 serdescfg
= (serdescfg
& ~0xFFF) | 0x880;
1920 CSR_WRITE_4(sc
, BGE_SERDES_CFG
, serdescfg
);
1923 /* XXX: Broadcom Linux driver. */
1924 if (sc
->bge_pcie
&& sc
->bge_chipid
!= BGE_CHIPID_BCM5750_A0
) {
1927 v
= CSR_READ_4(sc
, 0x7c00);
1928 CSR_WRITE_4(sc
, 0x7c00, v
| (1<<25));
1935 * Frame reception handling. This is called if there's a frame
1936 * on the receive return list.
1938 * Note: we have to be able to handle two possibilities here:
1939 * 1) the frame is from the jumbo recieve ring
1940 * 2) the frame is from the standard receive ring
1944 bge_rxeof(struct bge_softc
*sc
)
1947 int stdcnt
= 0, jumbocnt
= 0;
1949 if (sc
->bge_rx_saved_considx
==
1950 sc
->bge_ldata
.bge_status_block
->bge_idx
[0].bge_rx_prod_idx
)
1953 ifp
= &sc
->arpcom
.ac_if
;
1955 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_return_ring_tag
,
1956 sc
->bge_cdata
.bge_rx_return_ring_map
,
1957 BUS_DMASYNC_POSTREAD
);
1958 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_std_ring_tag
,
1959 sc
->bge_cdata
.bge_rx_std_ring_map
,
1960 BUS_DMASYNC_POSTREAD
);
1961 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5705
&&
1962 sc
->bge_asicrev
!= BGE_ASICREV_BCM5750
) {
1963 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_jumbo_ring_tag
,
1964 sc
->bge_cdata
.bge_rx_jumbo_ring_map
,
1965 BUS_DMASYNC_POSTREAD
);
1968 while (sc
->bge_rx_saved_considx
!=
1969 sc
->bge_ldata
.bge_status_block
->bge_idx
[0].bge_rx_prod_idx
) {
1970 struct bge_rx_bd
*cur_rx
;
1972 struct mbuf
*m
= NULL
;
1973 uint16_t vlan_tag
= 0;
1977 &sc
->bge_ldata
.bge_rx_return_ring
[sc
->bge_rx_saved_considx
];
1979 rxidx
= cur_rx
->bge_idx
;
1980 BGE_INC(sc
->bge_rx_saved_considx
, sc
->bge_return_ring_cnt
);
1982 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_VLAN_TAG
) {
1984 vlan_tag
= cur_rx
->bge_vlan_tag
;
1987 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_JUMBO_RING
) {
1988 BGE_INC(sc
->bge_jumbo
, BGE_JUMBO_RX_RING_CNT
);
1989 m
= sc
->bge_cdata
.bge_rx_jumbo_chain
[rxidx
];
1990 sc
->bge_cdata
.bge_rx_jumbo_chain
[rxidx
] = NULL
;
1992 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_ERROR
) {
1994 bge_newbuf_jumbo(sc
, sc
->bge_jumbo
, m
);
1997 if (bge_newbuf_jumbo(sc
,
1998 sc
->bge_jumbo
, NULL
) == ENOBUFS
) {
2000 bge_newbuf_jumbo(sc
, sc
->bge_jumbo
, m
);
2004 BGE_INC(sc
->bge_std
, BGE_STD_RX_RING_CNT
);
2005 bus_dmamap_sync(sc
->bge_cdata
.bge_mtag
,
2006 sc
->bge_cdata
.bge_rx_std_dmamap
[rxidx
],
2007 BUS_DMASYNC_POSTREAD
);
2008 bus_dmamap_unload(sc
->bge_cdata
.bge_mtag
,
2009 sc
->bge_cdata
.bge_rx_std_dmamap
[rxidx
]);
2010 m
= sc
->bge_cdata
.bge_rx_std_chain
[rxidx
];
2011 sc
->bge_cdata
.bge_rx_std_chain
[rxidx
] = NULL
;
2013 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_ERROR
) {
2015 bge_newbuf_std(sc
, sc
->bge_std
, m
);
2018 if (bge_newbuf_std(sc
, sc
->bge_std
,
2021 bge_newbuf_std(sc
, sc
->bge_std
, m
);
2029 * The i386 allows unaligned accesses, but for other
2030 * platforms we must make sure the payload is aligned.
2032 if (sc
->bge_rx_alignment_bug
) {
2033 bcopy(m
->m_data
, m
->m_data
+ ETHER_ALIGN
,
2035 m
->m_data
+= ETHER_ALIGN
;
2038 m
->m_pkthdr
.len
= m
->m_len
= cur_rx
->bge_len
- ETHER_CRC_LEN
;
2039 m
->m_pkthdr
.rcvif
= ifp
;
2041 if (ifp
->if_capenable
& IFCAP_RXCSUM
) {
2042 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_IP_CSUM
) {
2043 m
->m_pkthdr
.csum_flags
|= CSUM_IP_CHECKED
;
2044 if ((cur_rx
->bge_ip_csum
^ 0xffff) == 0)
2045 m
->m_pkthdr
.csum_flags
|= CSUM_IP_VALID
;
2047 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_TCP_UDP_CSUM
&&
2048 m
->m_pkthdr
.len
>= BGE_MIN_FRAME
) {
2049 m
->m_pkthdr
.csum_data
=
2050 cur_rx
->bge_tcp_udp_csum
;
2051 m
->m_pkthdr
.csum_flags
|=
2052 CSUM_DATA_VALID
| CSUM_PSEUDO_HDR
;
2057 * If we received a packet with a vlan tag, pass it
2058 * to vlan_input() instead of ether_input().
2061 VLAN_INPUT_TAG(m
, vlan_tag
);
2062 have_tag
= vlan_tag
= 0;
2064 ifp
->if_input(ifp
, m
);
2069 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_std_ring_tag
,
2070 sc
->bge_cdata
.bge_rx_std_ring_map
,
2071 BUS_DMASYNC_PREWRITE
);
2074 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5705
&&
2075 sc
->bge_asicrev
!= BGE_ASICREV_BCM5750
) {
2077 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_jumbo_ring_tag
,
2078 sc
->bge_cdata
.bge_rx_jumbo_ring_map
,
2079 BUS_DMASYNC_PREWRITE
);
2083 CSR_WRITE_4(sc
, BGE_MBX_RX_CONS0_LO
, sc
->bge_rx_saved_considx
);
2085 CSR_WRITE_4(sc
, BGE_MBX_RX_STD_PROD_LO
, sc
->bge_std
);
2087 CSR_WRITE_4(sc
, BGE_MBX_RX_JUMBO_PROD_LO
, sc
->bge_jumbo
);
2091 bge_txeof(struct bge_softc
*sc
)
2093 struct bge_tx_bd
*cur_tx
= NULL
;
2096 if (sc
->bge_tx_saved_considx
==
2097 sc
->bge_ldata
.bge_status_block
->bge_idx
[0].bge_tx_cons_idx
)
2100 ifp
= &sc
->arpcom
.ac_if
;
2102 bus_dmamap_sync(sc
->bge_cdata
.bge_tx_ring_tag
,
2103 sc
->bge_cdata
.bge_tx_ring_map
,
2104 BUS_DMASYNC_POSTREAD
);
2107 * Go through our tx ring and free mbufs for those
2108 * frames that have been sent.
2110 while (sc
->bge_tx_saved_considx
!=
2111 sc
->bge_ldata
.bge_status_block
->bge_idx
[0].bge_tx_cons_idx
) {
2114 idx
= sc
->bge_tx_saved_considx
;
2115 cur_tx
= &sc
->bge_ldata
.bge_tx_ring
[idx
];
2116 if (cur_tx
->bge_flags
& BGE_TXBDFLAG_END
)
2118 if (sc
->bge_cdata
.bge_tx_chain
[idx
] != NULL
) {
2119 bus_dmamap_sync(sc
->bge_cdata
.bge_mtag
,
2120 sc
->bge_cdata
.bge_tx_dmamap
[idx
],
2121 BUS_DMASYNC_POSTWRITE
);
2122 bus_dmamap_unload(sc
->bge_cdata
.bge_mtag
,
2123 sc
->bge_cdata
.bge_tx_dmamap
[idx
]);
2124 m_freem(sc
->bge_cdata
.bge_tx_chain
[idx
]);
2125 sc
->bge_cdata
.bge_tx_chain
[idx
] = NULL
;
2128 BGE_INC(sc
->bge_tx_saved_considx
, BGE_TX_RING_CNT
);
2131 if (cur_tx
!= NULL
&&
2132 (BGE_TX_RING_CNT
- sc
->bge_txcnt
) >=
2133 (BGE_NSEG_RSVD
+ BGE_NSEG_SPARE
))
2134 ifp
->if_flags
&= ~IFF_OACTIVE
;
2136 if (sc
->bge_txcnt
== 0)
2139 if (!ifq_is_empty(&ifp
->if_snd
))
2146 struct bge_softc
*sc
= xsc
;
2147 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2151 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
2152 * disable interrupts by writing nonzero like we used to, since with
2153 * our current organization this just gives complications and
2154 * pessimizations for re-enabling interrupts. We used to have races
2155 * instead of the necessary complications. Disabling interrupts
2156 * would just reduce the chance of a status update while we are
2157 * running (by switching to the interrupt-mode coalescence
2158 * parameters), but this chance is already very low so it is more
2159 * efficient to get another interrupt than prevent it.
2161 * We do the ack first to ensure another interrupt if there is a
2162 * status update after the ack. We don't check for the status
2163 * changing later because it is more efficient to get another
2164 * interrupt than prevent it, not quite as above (not checking is
2165 * a smaller optimization than not toggling the interrupt enable,
2166 * since checking doesn't involve PCI accesses and toggling require
2167 * the status check). So toggling would probably be a pessimization
2168 * even with MSI. It would only be needed for using a task queue.
2170 CSR_WRITE_4(sc
, BGE_MBX_IRQ0_LO
, 0);
2172 bus_dmamap_sync(sc
->bge_cdata
.bge_status_tag
,
2173 sc
->bge_cdata
.bge_status_map
,
2174 BUS_DMASYNC_POSTREAD
);
2177 * Process link state changes.
2179 status
= CSR_READ_4(sc
, BGE_MAC_STS
);
2180 if ((status
& sc
->bge_link_chg
) || sc
->bge_link_evt
) {
2181 sc
->bge_link_evt
= 0;
2182 sc
->bge_link_upd(sc
, status
);
2185 if (ifp
->if_flags
& IFF_RUNNING
) {
2186 /* Check RX return ring producer/consumer */
2189 /* Check TX ring producer/consumer */
2197 struct bge_softc
*sc
= xsc
;
2198 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2200 lwkt_serialize_enter(ifp
->if_serializer
);
2202 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5705
||
2203 sc
->bge_asicrev
== BGE_ASICREV_BCM5750
)
2204 bge_stats_update_regs(sc
);
2206 bge_stats_update(sc
);
2210 * Since in TBI mode auto-polling can't be used we should poll
2211 * link status manually. Here we register pending link event
2212 * and trigger interrupt.
2215 BGE_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_INTR_SET
);
2216 } else if (!sc
->bge_link
) {
2217 mii_tick(device_get_softc(sc
->bge_miibus
));
2220 callout_reset(&sc
->bge_stat_timer
, hz
, bge_tick
, sc
);
2222 lwkt_serialize_exit(ifp
->if_serializer
);
2226 bge_stats_update_regs(struct bge_softc
*sc
)
2228 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2229 struct bge_mac_stats_regs stats
;
2233 s
= (uint32_t *)&stats
;
2234 for (i
= 0; i
< sizeof(struct bge_mac_stats_regs
); i
+= 4) {
2235 *s
= CSR_READ_4(sc
, BGE_RX_STATS
+ i
);
2239 ifp
->if_collisions
+=
2240 (stats
.dot3StatsSingleCollisionFrames
+
2241 stats
.dot3StatsMultipleCollisionFrames
+
2242 stats
.dot3StatsExcessiveCollisions
+
2243 stats
.dot3StatsLateCollisions
) -
2248 bge_stats_update(struct bge_softc
*sc
)
2250 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2253 stats
= BGE_MEMWIN_START
+ BGE_STATS_BLOCK
;
2255 #define READ_STAT(sc, stats, stat) \
2256 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2258 ifp
->if_collisions
+=
2259 (READ_STAT(sc
, stats
,
2260 txstats
.dot3StatsSingleCollisionFrames
.bge_addr_lo
) +
2261 READ_STAT(sc
, stats
,
2262 txstats
.dot3StatsMultipleCollisionFrames
.bge_addr_lo
) +
2263 READ_STAT(sc
, stats
,
2264 txstats
.dot3StatsExcessiveCollisions
.bge_addr_lo
) +
2265 READ_STAT(sc
, stats
,
2266 txstats
.dot3StatsLateCollisions
.bge_addr_lo
)) -
2272 ifp
->if_collisions
+=
2273 (sc
->bge_rdata
->bge_info
.bge_stats
.dot3StatsSingleCollisionFrames
+
2274 sc
->bge_rdata
->bge_info
.bge_stats
.dot3StatsMultipleCollisionFrames
+
2275 sc
->bge_rdata
->bge_info
.bge_stats
.dot3StatsExcessiveCollisions
+
2276 sc
->bge_rdata
->bge_info
.bge_stats
.dot3StatsLateCollisions
) -
2282 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2283 * pointers to descriptors.
2286 bge_encap(struct bge_softc
*sc
, struct mbuf
*m_head
, uint32_t *txidx
)
2288 struct bge_tx_bd
*d
= NULL
;
2289 uint16_t csum_flags
= 0;
2290 struct ifvlan
*ifv
= NULL
;
2291 struct bge_dmamap_arg ctx
;
2292 bus_dma_segment_t segs
[BGE_NSEG_NEW
];
2294 int error
, maxsegs
, idx
, i
;
2296 if ((m_head
->m_flags
& (M_PROTO1
|M_PKTHDR
)) == (M_PROTO1
|M_PKTHDR
) &&
2297 m_head
->m_pkthdr
.rcvif
!= NULL
&&
2298 m_head
->m_pkthdr
.rcvif
->if_type
== IFT_L2VLAN
)
2299 ifv
= m_head
->m_pkthdr
.rcvif
->if_softc
;
2301 if (m_head
->m_pkthdr
.csum_flags
) {
2302 if (m_head
->m_pkthdr
.csum_flags
& CSUM_IP
)
2303 csum_flags
|= BGE_TXBDFLAG_IP_CSUM
;
2304 if (m_head
->m_pkthdr
.csum_flags
& (CSUM_TCP
| CSUM_UDP
))
2305 csum_flags
|= BGE_TXBDFLAG_TCP_UDP_CSUM
;
2306 if (m_head
->m_flags
& M_LASTFRAG
)
2307 csum_flags
|= BGE_TXBDFLAG_IP_FRAG_END
;
2308 else if (m_head
->m_flags
& M_FRAG
)
2309 csum_flags
|= BGE_TXBDFLAG_IP_FRAG
;
2313 map
= sc
->bge_cdata
.bge_tx_dmamap
[idx
];
2315 maxsegs
= (BGE_TX_RING_CNT
- sc
->bge_txcnt
) - BGE_NSEG_RSVD
;
2316 KASSERT(maxsegs
>= BGE_NSEG_SPARE
,
2317 ("not enough segments %d\n", maxsegs
));
2319 if (maxsegs
> BGE_NSEG_NEW
)
2320 maxsegs
= BGE_NSEG_NEW
;
2323 * Pad outbound frame to BGE_MIN_FRAME for an unusual reason.
2324 * The bge hardware will pad out Tx runts to BGE_MIN_FRAME,
2325 * but when such padded frames employ the bge IP/TCP checksum
2326 * offload, the hardware checksum assist gives incorrect results
2327 * (possibly from incorporating its own padding into the UDP/TCP
2328 * checksum; who knows). If we pad such runts with zeros, the
2329 * onboard checksum comes out correct. We do this by pretending
2330 * the mbuf chain has too many fragments so the coalescing code
2331 * below can assemble the packet into a single buffer that's
2332 * padded out to the mininum frame size.
2334 if ((csum_flags
& BGE_TXBDFLAG_TCP_UDP_CSUM
) &&
2335 m_head
->m_pkthdr
.len
< BGE_MIN_FRAME
) {
2338 ctx
.bge_segs
= segs
;
2339 ctx
.bge_maxsegs
= maxsegs
;
2340 error
= bus_dmamap_load_mbuf(sc
->bge_cdata
.bge_mtag
, map
,
2341 m_head
, bge_dma_map_mbuf
, &ctx
,
2344 if (error
== E2BIG
|| ctx
.bge_maxsegs
== 0) {
2347 m_new
= m_defrag(m_head
, MB_DONTWAIT
);
2348 if (m_new
== NULL
) {
2349 if_printf(&sc
->arpcom
.ac_if
,
2350 "could not defrag TX mbuf\n");
2358 * Manually pad short frames, and zero the pad space
2359 * to avoid leaking data.
2361 if ((csum_flags
& BGE_TXBDFLAG_TCP_UDP_CSUM
) &&
2362 m_head
->m_pkthdr
.len
< BGE_MIN_FRAME
) {
2363 int pad_len
= BGE_MIN_FRAME
- m_head
->m_pkthdr
.len
;
2365 bzero(mtod(m_head
, char *) + m_head
->m_pkthdr
.len
,
2367 m_head
->m_pkthdr
.len
+= pad_len
;
2368 m_head
->m_len
= m_head
->m_pkthdr
.len
;
2371 ctx
.bge_segs
= segs
;
2372 ctx
.bge_maxsegs
= maxsegs
;
2373 error
= bus_dmamap_load_mbuf(sc
->bge_cdata
.bge_mtag
, map
,
2374 m_head
, bge_dma_map_mbuf
, &ctx
,
2376 if (error
|| ctx
.bge_maxsegs
== 0) {
2377 if_printf(&sc
->arpcom
.ac_if
,
2378 "could not defrag TX mbuf\n");
2384 if_printf(&sc
->arpcom
.ac_if
, "could not map TX mbuf\n");
2388 bus_dmamap_sync(sc
->bge_cdata
.bge_mtag
, map
, BUS_DMASYNC_PREWRITE
);
2390 for (i
= 0; ; i
++) {
2391 d
= &sc
->bge_ldata
.bge_tx_ring
[idx
];
2393 d
->bge_addr
.bge_addr_lo
= BGE_ADDR_LO(ctx
.bge_segs
[i
].ds_addr
);
2394 d
->bge_addr
.bge_addr_hi
= BGE_ADDR_HI(ctx
.bge_segs
[i
].ds_addr
);
2395 d
->bge_len
= segs
[i
].ds_len
;
2396 d
->bge_flags
= csum_flags
;
2398 if (i
== ctx
.bge_maxsegs
- 1)
2400 BGE_INC(idx
, BGE_TX_RING_CNT
);
2402 /* Mark the last segment as end of packet... */
2403 d
->bge_flags
|= BGE_TXBDFLAG_END
;
2405 /* Set vlan tag to the first segment of the packet. */
2406 d
= &sc
->bge_ldata
.bge_tx_ring
[*txidx
];
2408 d
->bge_flags
|= BGE_TXBDFLAG_VLAN_TAG
;
2409 d
->bge_vlan_tag
= ifv
->ifv_tag
;
2411 d
->bge_vlan_tag
= 0;
2415 * Insure that the map for this transmission is placed at
2416 * the array index of the last descriptor in this chain.
2418 sc
->bge_cdata
.bge_tx_dmamap
[*txidx
] = sc
->bge_cdata
.bge_tx_dmamap
[idx
];
2419 sc
->bge_cdata
.bge_tx_dmamap
[idx
] = map
;
2420 sc
->bge_cdata
.bge_tx_chain
[idx
] = m_head
;
2421 sc
->bge_txcnt
+= ctx
.bge_maxsegs
;
2423 BGE_INC(idx
, BGE_TX_RING_CNT
);
2432 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2433 * to the mbuf data regions directly in the transmit descriptors.
2436 bge_start(struct ifnet
*ifp
)
2438 struct bge_softc
*sc
= ifp
->if_softc
;
2439 struct mbuf
*m_head
= NULL
;
2443 if ((ifp
->if_flags
& (IFF_RUNNING
| IFF_OACTIVE
)) != IFF_RUNNING
||
2447 prodidx
= sc
->bge_tx_prodidx
;
2450 while (sc
->bge_cdata
.bge_tx_chain
[prodidx
] == NULL
) {
2451 m_head
= ifq_poll(&ifp
->if_snd
);
2457 * The code inside the if() block is never reached since we
2458 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
2459 * requests to checksum TCP/UDP in a fragmented packet.
2462 * safety overkill. If this is a fragmented packet chain
2463 * with delayed TCP/UDP checksums, then only encapsulate
2464 * it if we have enough descriptors to handle the entire
2466 * (paranoia -- may not actually be needed)
2468 if (m_head
->m_flags
& M_FIRSTFRAG
&&
2469 m_head
->m_pkthdr
.csum_flags
& (CSUM_DELAY_DATA
)) {
2470 if ((BGE_TX_RING_CNT
- sc
->bge_txcnt
) <
2471 m_head
->m_pkthdr
.csum_data
+ 16) {
2472 ifp
->if_flags
|= IFF_OACTIVE
;
2478 * Sanity check: avoid coming within BGE_NSEG_RSVD
2479 * descriptors of the end of the ring. Also make
2480 * sure there are BGE_NSEG_SPARE descriptors for
2481 * jumbo buffers' defragmentation.
2483 if ((BGE_TX_RING_CNT
- sc
->bge_txcnt
) <
2484 (BGE_NSEG_RSVD
+ BGE_NSEG_SPARE
)) {
2485 ifp
->if_flags
|= IFF_OACTIVE
;
2490 * Dequeue the packet before encapsulation, since
2491 * bge_encap() may free the packet if error happens.
2493 ifq_dequeue(&ifp
->if_snd
, m_head
);
2496 * Pack the data into the transmit ring. If we
2497 * don't have room, set the OACTIVE flag and wait
2498 * for the NIC to drain the ring.
2500 if (bge_encap(sc
, m_head
, &prodidx
)) {
2501 ifp
->if_flags
|= IFF_OACTIVE
;
2506 BPF_MTAP(ifp
, m_head
);
2513 CSR_WRITE_4(sc
, BGE_MBX_TX_HOST_PROD0_LO
, prodidx
);
2514 /* 5700 b2 errata */
2515 if (sc
->bge_chiprev
== BGE_CHIPREV_5700_BX
)
2516 CSR_WRITE_4(sc
, BGE_MBX_TX_HOST_PROD0_LO
, prodidx
);
2518 sc
->bge_tx_prodidx
= prodidx
;
2521 * Set a timeout in case the chip goes out to lunch.
2529 struct bge_softc
*sc
= xsc
;
2530 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2533 ASSERT_SERIALIZED(ifp
->if_serializer
);
2535 if (ifp
->if_flags
& IFF_RUNNING
)
2538 /* Cancel pending I/O and flush buffers. */
2544 * Init the various state machines, ring
2545 * control blocks and firmware.
2547 if (bge_blockinit(sc
)) {
2548 if_printf(ifp
, "initialization failure\n");
2553 CSR_WRITE_4(sc
, BGE_RX_MTU
, ifp
->if_mtu
+
2554 ETHER_HDR_LEN
+ ETHER_CRC_LEN
+ EVL_ENCAPLEN
);
2556 /* Load our MAC address. */
2557 m
= (uint16_t *)&sc
->arpcom
.ac_enaddr
[0];
2558 CSR_WRITE_4(sc
, BGE_MAC_ADDR1_LO
, htons(m
[0]));
2559 CSR_WRITE_4(sc
, BGE_MAC_ADDR1_HI
, (htons(m
[1]) << 16) | htons(m
[2]));
2561 /* Enable or disable promiscuous mode as needed. */
2564 /* Program multicast filter. */
2568 bge_init_rx_ring_std(sc
);
2571 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
2572 * memory to insure that the chip has in fact read the first
2573 * entry of the ring.
2575 if (sc
->bge_chipid
== BGE_CHIPID_BCM5705_A0
) {
2577 for (i
= 0; i
< 10; i
++) {
2579 v
= bge_readmem_ind(sc
, BGE_STD_RX_RINGS
+ 8);
2580 if (v
== (MCLBYTES
- ETHER_ALIGN
))
2584 if_printf(ifp
, "5705 A0 chip failed to load RX ring\n");
2587 /* Init jumbo RX ring. */
2588 if (ifp
->if_mtu
> (ETHERMTU
+ ETHER_HDR_LEN
+ ETHER_CRC_LEN
))
2589 bge_init_rx_ring_jumbo(sc
);
2591 /* Init our RX return ring index */
2592 sc
->bge_rx_saved_considx
= 0;
2595 bge_init_tx_ring(sc
);
2597 /* Turn on transmitter */
2598 BGE_SETBIT(sc
, BGE_TX_MODE
, BGE_TXMODE_ENABLE
);
2600 /* Turn on receiver */
2601 BGE_SETBIT(sc
, BGE_RX_MODE
, BGE_RXMODE_ENABLE
);
2603 /* Tell firmware we're alive. */
2604 BGE_SETBIT(sc
, BGE_MODE_CTL
, BGE_MODECTL_STACKUP
);
2606 /* Enable host interrupts. */
2607 BGE_SETBIT(sc
, BGE_PCI_MISC_CTL
, BGE_PCIMISCCTL_CLEAR_INTA
);
2608 BGE_CLRBIT(sc
, BGE_PCI_MISC_CTL
, BGE_PCIMISCCTL_MASK_PCI_INTR
);
2609 CSR_WRITE_4(sc
, BGE_MBX_IRQ0_LO
, 0);
2611 bge_ifmedia_upd(ifp
);
2613 ifp
->if_flags
|= IFF_RUNNING
;
2614 ifp
->if_flags
&= ~IFF_OACTIVE
;
2616 callout_reset(&sc
->bge_stat_timer
, hz
, bge_tick
, sc
);
2620 * Set media options.
2623 bge_ifmedia_upd(struct ifnet
*ifp
)
2625 struct bge_softc
*sc
= ifp
->if_softc
;
2627 /* If this is a 1000baseX NIC, enable the TBI port. */
2629 struct ifmedia
*ifm
= &sc
->bge_ifmedia
;
2631 if (IFM_TYPE(ifm
->ifm_media
) != IFM_ETHER
)
2634 switch(IFM_SUBTYPE(ifm
->ifm_media
)) {
2637 * The BCM5704 ASIC appears to have a special
2638 * mechanism for programming the autoneg
2639 * advertisement registers in TBI mode.
2641 if (!bge_fake_autoneg
&&
2642 sc
->bge_asicrev
== BGE_ASICREV_BCM5704
) {
2645 CSR_WRITE_4(sc
, BGE_TX_TBI_AUTONEG
, 0);
2646 sgdig
= CSR_READ_4(sc
, BGE_SGDIG_CFG
);
2647 sgdig
|= BGE_SGDIGCFG_AUTO
|
2648 BGE_SGDIGCFG_PAUSE_CAP
|
2649 BGE_SGDIGCFG_ASYM_PAUSE
;
2650 CSR_WRITE_4(sc
, BGE_SGDIG_CFG
,
2651 sgdig
| BGE_SGDIGCFG_SEND
);
2653 CSR_WRITE_4(sc
, BGE_SGDIG_CFG
, sgdig
);
2657 if ((ifm
->ifm_media
& IFM_GMASK
) == IFM_FDX
) {
2658 BGE_CLRBIT(sc
, BGE_MAC_MODE
,
2659 BGE_MACMODE_HALF_DUPLEX
);
2661 BGE_SETBIT(sc
, BGE_MAC_MODE
,
2662 BGE_MACMODE_HALF_DUPLEX
);
2669 struct mii_data
*mii
= device_get_softc(sc
->bge_miibus
);
2673 if (mii
->mii_instance
) {
2674 struct mii_softc
*miisc
;
2676 LIST_FOREACH(miisc
, &mii
->mii_phys
, mii_list
)
2677 mii_phy_reset(miisc
);
2685 * Report current media status.
2688 bge_ifmedia_sts(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
2690 struct bge_softc
*sc
= ifp
->if_softc
;
2693 ifmr
->ifm_status
= IFM_AVALID
;
2694 ifmr
->ifm_active
= IFM_ETHER
;
2695 if (CSR_READ_4(sc
, BGE_MAC_STS
) &
2696 BGE_MACSTAT_TBI_PCS_SYNCHED
) {
2697 ifmr
->ifm_status
|= IFM_ACTIVE
;
2699 ifmr
->ifm_active
|= IFM_NONE
;
2703 ifmr
->ifm_active
|= IFM_1000_SX
;
2704 if (CSR_READ_4(sc
, BGE_MAC_MODE
) & BGE_MACMODE_HALF_DUPLEX
)
2705 ifmr
->ifm_active
|= IFM_HDX
;
2707 ifmr
->ifm_active
|= IFM_FDX
;
2709 struct mii_data
*mii
= device_get_softc(sc
->bge_miibus
);
2712 ifmr
->ifm_active
= mii
->mii_media_active
;
2713 ifmr
->ifm_status
= mii
->mii_media_status
;
2718 bge_ioctl(struct ifnet
*ifp
, u_long command
, caddr_t data
, struct ucred
*cr
)
2720 struct bge_softc
*sc
= ifp
->if_softc
;
2721 struct ifreq
*ifr
= (struct ifreq
*) data
;
2722 int mask
, error
= 0;
2723 struct mii_data
*mii
;
2725 ASSERT_SERIALIZED(ifp
->if_serializer
);
2729 /* Disallow jumbo frames on 5705/5750. */
2730 if (((sc
->bge_asicrev
== BGE_ASICREV_BCM5705
||
2731 sc
->bge_asicrev
== BGE_ASICREV_BCM5750
) &&
2732 ifr
->ifr_mtu
> ETHERMTU
) || ifr
->ifr_mtu
> BGE_JUMBO_MTU
)
2735 ifp
->if_mtu
= ifr
->ifr_mtu
;
2736 ifp
->if_flags
&= ~IFF_RUNNING
;
2741 if (ifp
->if_flags
& IFF_UP
) {
2742 if (ifp
->if_flags
& IFF_RUNNING
) {
2743 int flags
= ifp
->if_flags
& sc
->bge_if_flags
;
2746 * If only the state of the PROMISC flag
2747 * changed, then just use the 'set promisc
2748 * mode' command instead of reinitializing
2749 * the entire NIC. Doing a full re-init
2750 * means reloading the firmware and waiting
2751 * for it to start up, which may take a
2752 * second or two. Similarly for ALLMULTI.
2754 if (flags
& IFF_PROMISC
)
2756 if (flags
& IFF_ALLMULTI
)
2762 if (ifp
->if_flags
& IFF_RUNNING
)
2765 sc
->bge_if_flags
= ifp
->if_flags
;
2770 if (ifp
->if_flags
& IFF_RUNNING
) {
2778 error
= ifmedia_ioctl(ifp
, ifr
,
2779 &sc
->bge_ifmedia
, command
);
2781 mii
= device_get_softc(sc
->bge_miibus
);
2782 error
= ifmedia_ioctl(ifp
, ifr
,
2783 &mii
->mii_media
, command
);
2787 mask
= ifr
->ifr_reqcap
^ ifp
->if_capenable
;
2788 if (mask
& IFCAP_HWCSUM
) {
2789 ifp
->if_capenable
^= IFCAP_HWCSUM
;
2790 if (IFCAP_HWCSUM
& ifp
->if_capenable
)
2791 ifp
->if_hwassist
= BGE_CSUM_FEATURES
;
2793 ifp
->if_hwassist
= 0;
2798 error
= ether_ioctl(ifp
, command
, data
);
2805 bge_watchdog(struct ifnet
*ifp
)
2807 struct bge_softc
*sc
= ifp
->if_softc
;
2809 if_printf(ifp
, "watchdog timeout -- resetting\n");
2811 ifp
->if_flags
&= ~IFF_RUNNING
;
2816 if (!ifq_is_empty(&ifp
->if_snd
))
2821 * Stop the adapter and free any mbufs allocated to the
2825 bge_stop(struct bge_softc
*sc
)
2827 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2828 struct ifmedia_entry
*ifm
;
2829 struct mii_data
*mii
= NULL
;
2832 ASSERT_SERIALIZED(ifp
->if_serializer
);
2835 mii
= device_get_softc(sc
->bge_miibus
);
2837 callout_stop(&sc
->bge_stat_timer
);
2840 * Disable all of the receiver blocks
2842 BGE_CLRBIT(sc
, BGE_RX_MODE
, BGE_RXMODE_ENABLE
);
2843 BGE_CLRBIT(sc
, BGE_RBDI_MODE
, BGE_RBDIMODE_ENABLE
);
2844 BGE_CLRBIT(sc
, BGE_RXLP_MODE
, BGE_RXLPMODE_ENABLE
);
2845 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5705
&&
2846 sc
->bge_asicrev
!= BGE_ASICREV_BCM5750
)
2847 BGE_CLRBIT(sc
, BGE_RXLS_MODE
, BGE_RXLSMODE_ENABLE
);
2848 BGE_CLRBIT(sc
, BGE_RDBDI_MODE
, BGE_RBDIMODE_ENABLE
);
2849 BGE_CLRBIT(sc
, BGE_RDC_MODE
, BGE_RDCMODE_ENABLE
);
2850 BGE_CLRBIT(sc
, BGE_RBDC_MODE
, BGE_RBDCMODE_ENABLE
);
2853 * Disable all of the transmit blocks
2855 BGE_CLRBIT(sc
, BGE_SRS_MODE
, BGE_SRSMODE_ENABLE
);
2856 BGE_CLRBIT(sc
, BGE_SBDI_MODE
, BGE_SBDIMODE_ENABLE
);
2857 BGE_CLRBIT(sc
, BGE_SDI_MODE
, BGE_SDIMODE_ENABLE
);
2858 BGE_CLRBIT(sc
, BGE_RDMA_MODE
, BGE_RDMAMODE_ENABLE
);
2859 BGE_CLRBIT(sc
, BGE_SDC_MODE
, BGE_SDCMODE_ENABLE
);
2860 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5705
&&
2861 sc
->bge_asicrev
!= BGE_ASICREV_BCM5750
)
2862 BGE_CLRBIT(sc
, BGE_DMAC_MODE
, BGE_DMACMODE_ENABLE
);
2863 BGE_CLRBIT(sc
, BGE_SBDC_MODE
, BGE_SBDCMODE_ENABLE
);
2866 * Shut down all of the memory managers and related
2869 BGE_CLRBIT(sc
, BGE_HCC_MODE
, BGE_HCCMODE_ENABLE
);
2870 BGE_CLRBIT(sc
, BGE_WDMA_MODE
, BGE_WDMAMODE_ENABLE
);
2871 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5705
&&
2872 sc
->bge_asicrev
!= BGE_ASICREV_BCM5750
)
2873 BGE_CLRBIT(sc
, BGE_MBCF_MODE
, BGE_MBCFMODE_ENABLE
);
2874 CSR_WRITE_4(sc
, BGE_FTQ_RESET
, 0xFFFFFFFF);
2875 CSR_WRITE_4(sc
, BGE_FTQ_RESET
, 0);
2876 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5705
&&
2877 sc
->bge_asicrev
!= BGE_ASICREV_BCM5750
) {
2878 BGE_CLRBIT(sc
, BGE_BMAN_MODE
, BGE_BMANMODE_ENABLE
);
2879 BGE_CLRBIT(sc
, BGE_MARB_MODE
, BGE_MARBMODE_ENABLE
);
2882 /* Disable host interrupts. */
2883 BGE_SETBIT(sc
, BGE_PCI_MISC_CTL
, BGE_PCIMISCCTL_MASK_PCI_INTR
);
2884 CSR_WRITE_4(sc
, BGE_MBX_IRQ0_LO
, 1);
2887 * Tell firmware we're shutting down.
2889 BGE_CLRBIT(sc
, BGE_MODE_CTL
, BGE_MODECTL_STACKUP
);
2891 /* Free the RX lists. */
2892 bge_free_rx_ring_std(sc
);
2894 /* Free jumbo RX list. */
2895 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5705
&&
2896 sc
->bge_asicrev
!= BGE_ASICREV_BCM5750
)
2897 bge_free_rx_ring_jumbo(sc
);
2899 /* Free TX buffers. */
2900 bge_free_tx_ring(sc
);
2903 * Isolate/power down the PHY, but leave the media selection
2904 * unchanged so that things will be put back to normal when
2905 * we bring the interface back up.
2908 itmp
= ifp
->if_flags
;
2909 ifp
->if_flags
|= IFF_UP
;
2910 ifm
= mii
->mii_media
.ifm_cur
;
2911 mtmp
= ifm
->ifm_media
;
2912 ifm
->ifm_media
= IFM_ETHER
|IFM_NONE
;
2914 ifm
->ifm_media
= mtmp
;
2915 ifp
->if_flags
= itmp
;
2920 sc
->bge_tx_saved_considx
= BGE_TXCONS_UNSET
;
2922 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
2926 * Stop all chip I/O so that the kernel's probe routines don't
2927 * get confused by errant DMAs when rebooting.
2930 bge_shutdown(device_t dev
)
2932 struct bge_softc
*sc
= device_get_softc(dev
);
2933 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2935 lwkt_serialize_enter(ifp
->if_serializer
);
2938 lwkt_serialize_exit(ifp
->if_serializer
);
2942 bge_suspend(device_t dev
)
2944 struct bge_softc
*sc
= device_get_softc(dev
);
2945 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2947 lwkt_serialize_enter(ifp
->if_serializer
);
2949 lwkt_serialize_exit(ifp
->if_serializer
);
2955 bge_resume(device_t dev
)
2957 struct bge_softc
*sc
= device_get_softc(dev
);
2958 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2960 lwkt_serialize_enter(ifp
->if_serializer
);
2962 if (ifp
->if_flags
& IFF_UP
) {
2965 if (!ifq_is_empty(&ifp
->if_snd
))
2969 lwkt_serialize_exit(ifp
->if_serializer
);
2975 bge_setpromisc(struct bge_softc
*sc
)
2977 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2979 if (ifp
->if_flags
& IFF_PROMISC
)
2980 BGE_SETBIT(sc
, BGE_RX_MODE
, BGE_RXMODE_RX_PROMISC
);
2982 BGE_CLRBIT(sc
, BGE_RX_MODE
, BGE_RXMODE_RX_PROMISC
);
2986 bge_dma_map_addr(void *arg
, bus_dma_segment_t
*segs
, int nsegs
, int error
)
2988 struct bge_dmamap_arg
*ctx
= arg
;
2993 KASSERT(nsegs
== 1 && ctx
->bge_maxsegs
== 1,
2994 ("only one segment is allowed\n"));
2996 ctx
->bge_segs
[0] = *segs
;
3000 bge_dma_map_mbuf(void *arg
, bus_dma_segment_t
*segs
, int nsegs
,
3001 bus_size_t mapsz __unused
, int error
)
3003 struct bge_dmamap_arg
*ctx
= arg
;
3009 if (nsegs
> ctx
->bge_maxsegs
) {
3010 ctx
->bge_maxsegs
= 0;
3014 ctx
->bge_maxsegs
= nsegs
;
3015 for (i
= 0; i
< nsegs
; ++i
)
3016 ctx
->bge_segs
[i
] = segs
[i
];
3020 bge_dma_free(struct bge_softc
*sc
)
3024 /* Destroy RX/TX mbuf DMA stuffs. */
3025 if (sc
->bge_cdata
.bge_mtag
!= NULL
) {
3026 for (i
= 0; i
< BGE_STD_RX_RING_CNT
; i
++) {
3027 if (sc
->bge_cdata
.bge_rx_std_dmamap
[i
]) {
3028 bus_dmamap_destroy(sc
->bge_cdata
.bge_mtag
,
3029 sc
->bge_cdata
.bge_rx_std_dmamap
[i
]);
3033 for (i
= 0; i
< BGE_TX_RING_CNT
; i
++) {
3034 if (sc
->bge_cdata
.bge_tx_dmamap
[i
]) {
3035 bus_dmamap_destroy(sc
->bge_cdata
.bge_mtag
,
3036 sc
->bge_cdata
.bge_tx_dmamap
[i
]);
3039 bus_dma_tag_destroy(sc
->bge_cdata
.bge_mtag
);
3042 /* Destroy standard RX ring */
3043 bge_dma_block_free(sc
->bge_cdata
.bge_rx_std_ring_tag
,
3044 sc
->bge_cdata
.bge_rx_std_ring_map
,
3045 sc
->bge_ldata
.bge_rx_std_ring
);
3047 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5705
&&
3048 sc
->bge_asicrev
!= BGE_ASICREV_BCM5750
)
3049 bge_free_jumbo_mem(sc
);
3051 /* Destroy RX return ring */
3052 bge_dma_block_free(sc
->bge_cdata
.bge_rx_return_ring_tag
,
3053 sc
->bge_cdata
.bge_rx_return_ring_map
,
3054 sc
->bge_ldata
.bge_rx_return_ring
);
3056 /* Destroy TX ring */
3057 bge_dma_block_free(sc
->bge_cdata
.bge_tx_ring_tag
,
3058 sc
->bge_cdata
.bge_tx_ring_map
,
3059 sc
->bge_ldata
.bge_tx_ring
);
3061 /* Destroy status block */
3062 bge_dma_block_free(sc
->bge_cdata
.bge_status_tag
,
3063 sc
->bge_cdata
.bge_status_map
,
3064 sc
->bge_ldata
.bge_status_block
);
3066 /* Destroy statistics block */
3067 bge_dma_block_free(sc
->bge_cdata
.bge_stats_tag
,
3068 sc
->bge_cdata
.bge_stats_map
,
3069 sc
->bge_ldata
.bge_stats
);
3071 /* Destroy the parent tag */
3072 if (sc
->bge_cdata
.bge_parent_tag
!= NULL
)
3073 bus_dma_tag_destroy(sc
->bge_cdata
.bge_parent_tag
);
3077 bge_dma_alloc(struct bge_softc
*sc
)
3079 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3083 * Allocate the parent bus DMA tag appropriate for PCI.
3085 error
= bus_dma_tag_create(NULL
, 1, 0,
3086 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
3088 MAXBSIZE
, BGE_NSEG_NEW
,
3089 BUS_SPACE_MAXSIZE_32BIT
,
3090 0, &sc
->bge_cdata
.bge_parent_tag
);
3092 if_printf(ifp
, "could not allocate parent dma tag\n");
3097 * Create DMA tag for mbufs.
3099 nseg
= BGE_NSEG_NEW
;
3100 error
= bus_dma_tag_create(sc
->bge_cdata
.bge_parent_tag
, 1, 0,
3101 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
3103 MCLBYTES
* nseg
, nseg
, MCLBYTES
,
3104 BUS_DMA_ALLOCNOW
, &sc
->bge_cdata
.bge_mtag
);
3106 if_printf(ifp
, "could not allocate mbuf dma tag\n");
3111 * Create DMA maps for TX/RX mbufs.
3113 for (i
= 0; i
< BGE_STD_RX_RING_CNT
; i
++) {
3114 error
= bus_dmamap_create(sc
->bge_cdata
.bge_mtag
, 0,
3115 &sc
->bge_cdata
.bge_rx_std_dmamap
[i
]);
3119 for (j
= 0; j
< i
; ++j
) {
3120 bus_dmamap_destroy(sc
->bge_cdata
.bge_mtag
,
3121 sc
->bge_cdata
.bge_rx_std_dmamap
[j
]);
3123 bus_dma_tag_destroy(sc
->bge_cdata
.bge_mtag
);
3124 sc
->bge_cdata
.bge_mtag
= NULL
;
3126 if_printf(ifp
, "could not create DMA map for RX\n");
3131 for (i
= 0; i
< BGE_TX_RING_CNT
; i
++) {
3132 error
= bus_dmamap_create(sc
->bge_cdata
.bge_mtag
, 0,
3133 &sc
->bge_cdata
.bge_tx_dmamap
[i
]);
3137 for (j
= 0; j
< BGE_STD_RX_RING_CNT
; ++j
) {
3138 bus_dmamap_destroy(sc
->bge_cdata
.bge_mtag
,
3139 sc
->bge_cdata
.bge_rx_std_dmamap
[j
]);
3141 for (j
= 0; j
< i
; ++j
) {
3142 bus_dmamap_destroy(sc
->bge_cdata
.bge_mtag
,
3143 sc
->bge_cdata
.bge_tx_dmamap
[j
]);
3145 bus_dma_tag_destroy(sc
->bge_cdata
.bge_mtag
);
3146 sc
->bge_cdata
.bge_mtag
= NULL
;
3148 if_printf(ifp
, "could not create DMA map for TX\n");
3154 * Create DMA stuffs for standard RX ring.
3156 error
= bge_dma_block_alloc(sc
, BGE_STD_RX_RING_SZ
,
3157 &sc
->bge_cdata
.bge_rx_std_ring_tag
,
3158 &sc
->bge_cdata
.bge_rx_std_ring_map
,
3159 (void **)&sc
->bge_ldata
.bge_rx_std_ring
,
3160 &sc
->bge_ldata
.bge_rx_std_ring_paddr
);
3162 if_printf(ifp
, "could not create std RX ring\n");
3167 * Create jumbo buffer pool.
3169 if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5705
&&
3170 sc
->bge_asicrev
!= BGE_ASICREV_BCM5750
) {
3171 error
= bge_alloc_jumbo_mem(sc
);
3173 if_printf(ifp
, "could not create jumbo buffer pool\n");
3179 * Create DMA stuffs for RX return ring.
3181 error
= bge_dma_block_alloc(sc
, BGE_RX_RTN_RING_SZ(sc
),
3182 &sc
->bge_cdata
.bge_rx_return_ring_tag
,
3183 &sc
->bge_cdata
.bge_rx_return_ring_map
,
3184 (void **)&sc
->bge_ldata
.bge_rx_return_ring
,
3185 &sc
->bge_ldata
.bge_rx_return_ring_paddr
);
3187 if_printf(ifp
, "could not create RX ret ring\n");
3192 * Create DMA stuffs for TX ring.
3194 error
= bge_dma_block_alloc(sc
, BGE_TX_RING_SZ
,
3195 &sc
->bge_cdata
.bge_tx_ring_tag
,
3196 &sc
->bge_cdata
.bge_tx_ring_map
,
3197 (void **)&sc
->bge_ldata
.bge_tx_ring
,
3198 &sc
->bge_ldata
.bge_tx_ring_paddr
);
3200 if_printf(ifp
, "could not create TX ring\n");
3205 * Create DMA stuffs for status block.
3207 error
= bge_dma_block_alloc(sc
, BGE_STATUS_BLK_SZ
,
3208 &sc
->bge_cdata
.bge_status_tag
,
3209 &sc
->bge_cdata
.bge_status_map
,
3210 (void **)&sc
->bge_ldata
.bge_status_block
,
3211 &sc
->bge_ldata
.bge_status_block_paddr
);
3213 if_printf(ifp
, "could not create status block\n");
3218 * Create DMA stuffs for statistics block.
3220 error
= bge_dma_block_alloc(sc
, BGE_STATS_SZ
,
3221 &sc
->bge_cdata
.bge_stats_tag
,
3222 &sc
->bge_cdata
.bge_stats_map
,
3223 (void **)&sc
->bge_ldata
.bge_stats
,
3224 &sc
->bge_ldata
.bge_stats_paddr
);
3226 if_printf(ifp
, "could not create stats block\n");
3233 bge_dma_block_alloc(struct bge_softc
*sc
, bus_size_t size
, bus_dma_tag_t
*tag
,
3234 bus_dmamap_t
*map
, void **addr
, bus_addr_t
*paddr
)
3236 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3237 struct bge_dmamap_arg ctx
;
3238 bus_dma_segment_t seg
;
3244 error
= bus_dma_tag_create(sc
->bge_cdata
.bge_parent_tag
, PAGE_SIZE
, 0,
3245 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
3246 NULL
, NULL
, size
, 1, size
, 0, tag
);
3248 if_printf(ifp
, "could not allocate dma tag\n");
3253 * Allocate DMA'able memory
3255 error
= bus_dmamem_alloc(*tag
, addr
, BUS_DMA_WAITOK
| BUS_DMA_ZERO
,
3258 if_printf(ifp
, "could not allocate dma memory\n");
3259 bus_dma_tag_destroy(*tag
);
3265 * Load the DMA'able memory
3267 ctx
.bge_maxsegs
= 1;
3268 ctx
.bge_segs
= &seg
;
3269 error
= bus_dmamap_load(*tag
, *map
, *addr
, size
, bge_dma_map_addr
, &ctx
,
3272 if_printf(ifp
, "could not load dma memory\n");
3273 bus_dmamem_free(*tag
, *addr
, *map
);
3274 bus_dma_tag_destroy(*tag
);
3278 *paddr
= ctx
.bge_segs
[0].ds_addr
;
3284 bge_dma_block_free(bus_dma_tag_t tag
, bus_dmamap_t map
, void *addr
)
3287 bus_dmamap_unload(tag
, map
);
3288 bus_dmamem_free(tag
, addr
, map
);
3289 bus_dma_tag_destroy(tag
);
3294 * Grrr. The link status word in the status block does
3295 * not work correctly on the BCM5700 rev AX and BX chips,
3296 * according to all available information. Hence, we have
3297 * to enable MII interrupts in order to properly obtain
3298 * async link changes. Unfortunately, this also means that
3299 * we have to read the MAC status register to detect link
3300 * changes, thereby adding an additional register access to
3301 * the interrupt handler.
3303 * XXX: perhaps link state detection procedure used for
3304 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
3307 bge_bcm5700_link_upd(struct bge_softc
*sc
, uint32_t status __unused
)
3309 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3310 struct mii_data
*mii
= device_get_softc(sc
->bge_miibus
);
3314 if (!sc
->bge_link
&&
3315 (mii
->mii_media_status
& IFM_ACTIVE
) &&
3316 IFM_SUBTYPE(mii
->mii_media_active
) != IFM_NONE
) {
3319 if_printf(ifp
, "link UP\n");
3320 } else if (sc
->bge_link
&&
3321 (!(mii
->mii_media_status
& IFM_ACTIVE
) ||
3322 IFM_SUBTYPE(mii
->mii_media_active
) == IFM_NONE
)) {
3325 if_printf(ifp
, "link DOWN\n");
3328 /* Clear the interrupt. */
3329 CSR_WRITE_4(sc
, BGE_MAC_EVT_ENB
, BGE_EVTENB_MI_INTERRUPT
);
3330 bge_miibus_readreg(sc
->bge_dev
, 1, BRGPHY_MII_ISR
);
3331 bge_miibus_writereg(sc
->bge_dev
, 1, BRGPHY_MII_IMR
, BRGPHY_INTRS
);
3335 bge_tbi_link_upd(struct bge_softc
*sc
, uint32_t status
)
3337 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3339 #define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
3342 * Sometimes PCS encoding errors are detected in
3343 * TBI mode (on fiber NICs), and for some reason
3344 * the chip will signal them as link changes.
3345 * If we get a link change event, but the 'PCS
3346 * encoding error' bit in the MAC status register
3347 * is set, don't bother doing a link check.
3348 * This avoids spurious "gigabit link up" messages
3349 * that sometimes appear on fiber NICs during
3350 * periods of heavy traffic.
3352 if (status
& BGE_MACSTAT_TBI_PCS_SYNCHED
) {
3353 if (!sc
->bge_link
) {
3355 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5704
) {
3356 BGE_CLRBIT(sc
, BGE_MAC_MODE
,
3357 BGE_MACMODE_TBI_SEND_CFGS
);
3359 CSR_WRITE_4(sc
, BGE_MAC_STS
, 0xFFFFFFFF);
3362 if_printf(ifp
, "link UP\n");
3364 ifp
->if_link_state
= LINK_STATE_UP
;
3365 if_link_state_change(ifp
);
3367 } else if ((status
& PCS_ENCODE_ERR
) != PCS_ENCODE_ERR
) {
3372 if_printf(ifp
, "link DOWN\n");
3374 ifp
->if_link_state
= LINK_STATE_DOWN
;
3375 if_link_state_change(ifp
);
3379 #undef PCS_ENCODE_ERR
3381 /* Clear the attention. */
3382 CSR_WRITE_4(sc
, BGE_MAC_STS
, BGE_MACSTAT_SYNC_CHANGED
|
3383 BGE_MACSTAT_CFG_CHANGED
| BGE_MACSTAT_MI_COMPLETE
|
3384 BGE_MACSTAT_LINK_CHANGED
);
3388 bge_copper_link_upd(struct bge_softc
*sc
, uint32_t status __unused
)
3391 * Check that the AUTOPOLL bit is set before
3392 * processing the event as a real link change.
3393 * Turning AUTOPOLL on and off in the MII read/write
3394 * functions will often trigger a link status
3395 * interrupt for no reason.
3397 if (CSR_READ_4(sc
, BGE_MI_MODE
) & BGE_MIMODE_AUTOPOLL
) {
3398 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3399 struct mii_data
*mii
= device_get_softc(sc
->bge_miibus
);
3403 if (!sc
->bge_link
&&
3404 (mii
->mii_media_status
& IFM_ACTIVE
) &&
3405 IFM_SUBTYPE(mii
->mii_media_active
) != IFM_NONE
) {
3408 if_printf(ifp
, "link UP\n");
3409 } else if (sc
->bge_link
&&
3410 (!(mii
->mii_media_status
& IFM_ACTIVE
) ||
3411 IFM_SUBTYPE(mii
->mii_media_active
) == IFM_NONE
)) {
3414 if_printf(ifp
, "link DOWN\n");
3418 /* Clear the attention. */
3419 CSR_WRITE_4(sc
, BGE_MAC_STS
, BGE_MACSTAT_SYNC_CHANGED
|
3420 BGE_MACSTAT_CFG_CHANGED
| BGE_MACSTAT_MI_COMPLETE
|
3421 BGE_MACSTAT_LINK_CHANGED
);