dhcpcd: update README.DRAGONFLY
[dragonfly.git] / sys / dev / netif / bge / if_bge.c
blob0b9fd308e4d68df50a3e872d9294713d8c5f1339
1 /*
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
37 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 * Written by Bill Paul <wpaul@windriver.com>
40 * Senior Engineer, Wind River Systems
44 * The Broadcom BCM5700 is based on technology originally developed by
45 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
46 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
47 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
48 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
49 * frames, highly configurable RX filtering, and 16 RX and TX queues
50 * (which, along with RX filter rules, can be used for QOS applications).
51 * Other features, such as TCP segmentation, may be available as part
52 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
53 * firmware images can be stored in hardware and need not be compiled
54 * into the driver.
56 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
57 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
59 * The BCM5701 is a single-chip solution incorporating both the BCM5700
60 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
61 * does not support external SSRAM.
63 * Broadcom also produces a variation of the BCM5700 under the "Altima"
64 * brand name, which is functionally similar but lacks PCI-X support.
66 * Without external SSRAM, you can only have at most 4 TX rings,
67 * and the use of the mini RX ring is disabled. This seems to imply
68 * that these features are simply not available on the BCM5701. As a
69 * result, this driver does not implement any support for the mini RX
70 * ring.
73 #include "opt_ifpoll.h"
75 #include <sys/param.h>
76 #include <sys/bus.h>
77 #include <sys/endian.h>
78 #include <sys/kernel.h>
79 #include <sys/ktr.h>
80 #include <sys/interrupt.h>
81 #include <sys/mbuf.h>
82 #include <sys/malloc.h>
83 #include <sys/queue.h>
84 #include <sys/rman.h>
85 #include <sys/serialize.h>
86 #include <sys/socket.h>
87 #include <sys/sockio.h>
88 #include <sys/sysctl.h>
90 #include <netinet/ip.h>
91 #include <netinet/tcp.h>
93 #include <net/bpf.h>
94 #include <net/ethernet.h>
95 #include <net/if.h>
96 #include <net/if_arp.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_poll.h>
100 #include <net/if_types.h>
101 #include <net/ifq_var.h>
102 #include <net/vlan/if_vlan_var.h>
103 #include <net/vlan/if_vlan_ether.h>
105 #include <dev/netif/mii_layer/mii.h>
106 #include <dev/netif/mii_layer/miivar.h>
107 #include <dev/netif/mii_layer/brgphyreg.h>
109 #include "pcidevs.h"
110 #include <bus/pci/pcireg.h>
111 #include <bus/pci/pcivar.h>
113 #include <dev/netif/bge/if_bgereg.h>
114 #include <dev/netif/bge/if_bgevar.h>
116 /* "device miibus" required. See GENERIC if you get errors here. */
117 #include "miibus_if.h"
119 #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
121 #define BGE_RESET_SHUTDOWN 0
122 #define BGE_RESET_START 1
123 #define BGE_RESET_SUSPEND 2
125 static const struct bge_type {
126 uint16_t bge_vid;
127 uint16_t bge_did;
128 char *bge_name;
129 } bge_devs[] = {
130 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996,
131 "3COM 3C996 Gigabit Ethernet" },
133 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
134 "Alteon BCM5700 Gigabit Ethernet" },
135 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701,
136 "Alteon BCM5701 Gigabit Ethernet" },
138 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000,
139 "Altima AC1000 Gigabit Ethernet" },
140 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001,
141 "Altima AC1002 Gigabit Ethernet" },
142 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100,
143 "Altima AC9100 Gigabit Ethernet" },
145 { PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701,
146 "Apple BCM5701 Gigabit Ethernet" },
148 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700,
149 "Broadcom BCM5700 Gigabit Ethernet" },
150 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701,
151 "Broadcom BCM5701 Gigabit Ethernet" },
152 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702,
153 "Broadcom BCM5702 Gigabit Ethernet" },
154 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X,
155 "Broadcom BCM5702X Gigabit Ethernet" },
156 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT,
157 "Broadcom BCM5702 Gigabit Ethernet" },
158 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703,
159 "Broadcom BCM5703 Gigabit Ethernet" },
160 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X,
161 "Broadcom BCM5703X Gigabit Ethernet" },
162 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703A3,
163 "Broadcom BCM5703 Gigabit Ethernet" },
164 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C,
165 "Broadcom BCM5704C Dual Gigabit Ethernet" },
166 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S,
167 "Broadcom BCM5704S Dual Gigabit Ethernet" },
168 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT,
169 "Broadcom BCM5704S Dual Gigabit Ethernet" },
170 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705,
171 "Broadcom BCM5705 Gigabit Ethernet" },
172 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F,
173 "Broadcom BCM5705F Gigabit Ethernet" },
174 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K,
175 "Broadcom BCM5705K Gigabit Ethernet" },
176 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M,
177 "Broadcom BCM5705M Gigabit Ethernet" },
178 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
179 "Broadcom BCM5705M Gigabit Ethernet" },
180 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714,
181 "Broadcom BCM5714C Gigabit Ethernet" },
182 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S,
183 "Broadcom BCM5714S Gigabit Ethernet" },
184 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715,
185 "Broadcom BCM5715 Gigabit Ethernet" },
186 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S,
187 "Broadcom BCM5715S Gigabit Ethernet" },
188 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720,
189 "Broadcom BCM5720 Gigabit Ethernet" },
190 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721,
191 "Broadcom BCM5721 Gigabit Ethernet" },
192 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722,
193 "Broadcom BCM5722 Gigabit Ethernet" },
194 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5723,
195 "Broadcom BCM5723 Gigabit Ethernet" },
196 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750,
197 "Broadcom BCM5750 Gigabit Ethernet" },
198 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M,
199 "Broadcom BCM5750M Gigabit Ethernet" },
200 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751,
201 "Broadcom BCM5751 Gigabit Ethernet" },
202 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F,
203 "Broadcom BCM5751F Gigabit Ethernet" },
204 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M,
205 "Broadcom BCM5751M Gigabit Ethernet" },
206 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752,
207 "Broadcom BCM5752 Gigabit Ethernet" },
208 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M,
209 "Broadcom BCM5752M Gigabit Ethernet" },
210 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753,
211 "Broadcom BCM5753 Gigabit Ethernet" },
212 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F,
213 "Broadcom BCM5753F Gigabit Ethernet" },
214 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M,
215 "Broadcom BCM5753M Gigabit Ethernet" },
216 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754,
217 "Broadcom BCM5754 Gigabit Ethernet" },
218 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M,
219 "Broadcom BCM5754M Gigabit Ethernet" },
220 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755,
221 "Broadcom BCM5755 Gigabit Ethernet" },
222 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M,
223 "Broadcom BCM5755M Gigabit Ethernet" },
224 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756,
225 "Broadcom BCM5756 Gigabit Ethernet" },
226 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761,
227 "Broadcom BCM5761 Gigabit Ethernet" },
228 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761E,
229 "Broadcom BCM5761E Gigabit Ethernet" },
230 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761S,
231 "Broadcom BCM5761S Gigabit Ethernet" },
232 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761SE,
233 "Broadcom BCM5761SE Gigabit Ethernet" },
234 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5764,
235 "Broadcom BCM5764 Gigabit Ethernet" },
236 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780,
237 "Broadcom BCM5780 Gigabit Ethernet" },
238 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S,
239 "Broadcom BCM5780S Gigabit Ethernet" },
240 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781,
241 "Broadcom BCM5781 Gigabit Ethernet" },
242 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782,
243 "Broadcom BCM5782 Gigabit Ethernet" },
244 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5784,
245 "Broadcom BCM5784 Gigabit Ethernet" },
246 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785F,
247 "Broadcom BCM5785F Gigabit Ethernet" },
248 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785G,
249 "Broadcom BCM5785G Gigabit Ethernet" },
250 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786,
251 "Broadcom BCM5786 Gigabit Ethernet" },
252 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787,
253 "Broadcom BCM5787 Gigabit Ethernet" },
254 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F,
255 "Broadcom BCM5787F Gigabit Ethernet" },
256 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M,
257 "Broadcom BCM5787M Gigabit Ethernet" },
258 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788,
259 "Broadcom BCM5788 Gigabit Ethernet" },
260 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789,
261 "Broadcom BCM5789 Gigabit Ethernet" },
262 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901,
263 "Broadcom BCM5901 Fast Ethernet" },
264 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2,
265 "Broadcom BCM5901A2 Fast Ethernet" },
266 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M,
267 "Broadcom BCM5903M Fast Ethernet" },
268 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906,
269 "Broadcom BCM5906 Fast Ethernet"},
270 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M,
271 "Broadcom BCM5906M Fast Ethernet"},
272 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57760,
273 "Broadcom BCM57760 Gigabit Ethernet"},
274 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57780,
275 "Broadcom BCM57780 Gigabit Ethernet"},
276 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57788,
277 "Broadcom BCM57788 Gigabit Ethernet"},
278 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57790,
279 "Broadcom BCM57790 Gigabit Ethernet"},
280 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
281 "SysKonnect Gigabit Ethernet" },
283 { 0, 0, NULL }
286 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
287 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
288 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
289 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
290 #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
291 #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
292 #define BGE_IS_5788(sc) ((sc)->bge_flags & BGE_FLAG_5788)
294 #define BGE_IS_CRIPPLED(sc) \
295 (BGE_IS_5788((sc)) || (sc)->bge_asicrev == BGE_ASICREV_BCM5700)
297 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
299 static int bge_probe(device_t);
300 static int bge_attach(device_t);
301 static int bge_detach(device_t);
302 static void bge_txeof(struct bge_softc *, uint16_t);
303 static void bge_rxeof(struct bge_softc *, uint16_t, int);
305 static void bge_tick(void *);
306 static void bge_stats_update(struct bge_softc *);
307 static void bge_stats_update_regs(struct bge_softc *);
308 static struct mbuf *
309 bge_defrag_shortdma(struct mbuf *);
310 static int bge_encap(struct bge_softc *, struct mbuf **,
311 uint32_t *, int *);
312 static void bge_xmit(struct bge_softc *, uint32_t);
313 static int bge_setup_tso(struct bge_softc *, struct mbuf **,
314 uint16_t *, uint16_t *);
316 #ifdef IFPOLL_ENABLE
317 static void bge_npoll(struct ifnet *, struct ifpoll_info *);
318 static void bge_npoll_compat(struct ifnet *, void *, int );
319 #endif
320 static void bge_intr_crippled(void *);
321 static void bge_intr_legacy(void *);
322 static void bge_msi(void *);
323 static void bge_msi_oneshot(void *);
324 static void bge_intr(struct bge_softc *);
325 static void bge_enable_intr(struct bge_softc *);
326 static void bge_disable_intr(struct bge_softc *);
327 static void bge_start(struct ifnet *, struct ifaltq_subque *);
328 static int bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
329 static void bge_init(void *);
330 static void bge_stop(struct bge_softc *);
331 static void bge_watchdog(struct ifnet *);
332 static void bge_shutdown(device_t);
333 static int bge_suspend(device_t);
334 static int bge_resume(device_t);
335 static int bge_ifmedia_upd(struct ifnet *);
336 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
338 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
339 static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
341 static uint8_t bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *);
342 static int bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t);
344 static void bge_setmulti(struct bge_softc *);
345 static void bge_setpromisc(struct bge_softc *);
346 static void bge_enable_msi(struct bge_softc *sc);
348 static int bge_alloc_jumbo_mem(struct bge_softc *);
349 static void bge_free_jumbo_mem(struct bge_softc *);
350 static struct bge_jslot
351 *bge_jalloc(struct bge_softc *);
352 static void bge_jfree(void *);
353 static void bge_jref(void *);
354 static int bge_newbuf_std(struct bge_softc *, int, int);
355 static int bge_newbuf_jumbo(struct bge_softc *, int, int);
356 static void bge_setup_rxdesc_std(struct bge_softc *, int);
357 static void bge_setup_rxdesc_jumbo(struct bge_softc *, int);
358 static int bge_init_rx_ring_std(struct bge_softc *);
359 static void bge_free_rx_ring_std(struct bge_softc *);
360 static int bge_init_rx_ring_jumbo(struct bge_softc *);
361 static void bge_free_rx_ring_jumbo(struct bge_softc *);
362 static void bge_free_tx_ring(struct bge_softc *);
363 static int bge_init_tx_ring(struct bge_softc *);
365 static int bge_chipinit(struct bge_softc *);
366 static int bge_blockinit(struct bge_softc *);
367 static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t);
369 static uint32_t bge_readmem_ind(struct bge_softc *, uint32_t);
370 static void bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t);
371 #ifdef notdef
372 static uint32_t bge_readreg_ind(struct bge_softc *, uint32_t);
373 #endif
374 static void bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t);
375 static void bge_writemem_direct(struct bge_softc *, uint32_t, uint32_t);
376 static void bge_writembx(struct bge_softc *, int, int);
378 static int bge_miibus_readreg(device_t, int, int);
379 static int bge_miibus_writereg(device_t, int, int, int);
380 static void bge_miibus_statchg(device_t);
381 static void bge_bcm5700_link_upd(struct bge_softc *, uint32_t);
382 static void bge_tbi_link_upd(struct bge_softc *, uint32_t);
383 static void bge_copper_link_upd(struct bge_softc *, uint32_t);
384 static void bge_autopoll_link_upd(struct bge_softc *, uint32_t);
385 static void bge_link_poll(struct bge_softc *);
387 static void bge_reset(struct bge_softc *);
389 static int bge_dma_alloc(struct bge_softc *);
390 static void bge_dma_free(struct bge_softc *);
391 static int bge_dma_block_alloc(struct bge_softc *, bus_size_t,
392 bus_dma_tag_t *, bus_dmamap_t *,
393 void **, bus_addr_t *);
394 static void bge_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
396 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
397 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
398 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
399 static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
401 static void bge_coal_change(struct bge_softc *);
402 static int bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
403 static int bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
404 static int bge_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS);
405 static int bge_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS);
406 static int bge_sysctl_rx_coal_ticks_int(SYSCTL_HANDLER_ARGS);
407 static int bge_sysctl_tx_coal_ticks_int(SYSCTL_HANDLER_ARGS);
408 static int bge_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS);
409 static int bge_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS);
410 static int bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *,
411 int, int, uint32_t);
413 static void bge_sig_post_reset(struct bge_softc *, int);
414 static void bge_sig_legacy(struct bge_softc *, int);
415 static void bge_sig_pre_reset(struct bge_softc *, int);
416 static void bge_stop_fw(struct bge_softc *);
417 static void bge_asf_driver_up(struct bge_softc *);
419 static void bge_ape_lock_init(struct bge_softc *);
420 static void bge_ape_read_fw_ver(struct bge_softc *);
421 static int bge_ape_lock(struct bge_softc *, int);
422 static void bge_ape_unlock(struct bge_softc *, int);
423 static void bge_ape_send_event(struct bge_softc *, uint32_t);
424 static void bge_ape_driver_state_change(struct bge_softc *, int);
427 * Set following tunable to 1 for some IBM blade servers with the DNLK
428 * switch module. Auto negotiation is broken for those configurations.
430 static int bge_fake_autoneg = 0;
431 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
433 static int bge_msi_enable = 1;
434 TUNABLE_INT("hw.bge.msi.enable", &bge_msi_enable);
436 static int bge_allow_asf = 1;
437 TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
439 #if !defined(KTR_IF_BGE)
440 #define KTR_IF_BGE KTR_ALL
441 #endif
442 KTR_INFO_MASTER(if_bge);
443 KTR_INFO(KTR_IF_BGE, if_bge, intr, 0, "intr");
444 KTR_INFO(KTR_IF_BGE, if_bge, rx_pkt, 1, "rx_pkt");
445 KTR_INFO(KTR_IF_BGE, if_bge, tx_pkt, 2, "tx_pkt");
446 #define logif(name) KTR_LOG(if_bge_ ## name)
448 static device_method_t bge_methods[] = {
449 /* Device interface */
450 DEVMETHOD(device_probe, bge_probe),
451 DEVMETHOD(device_attach, bge_attach),
452 DEVMETHOD(device_detach, bge_detach),
453 DEVMETHOD(device_shutdown, bge_shutdown),
454 DEVMETHOD(device_suspend, bge_suspend),
455 DEVMETHOD(device_resume, bge_resume),
457 /* bus interface */
458 DEVMETHOD(bus_print_child, bus_generic_print_child),
459 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
461 /* MII interface */
462 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
463 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
464 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
466 DEVMETHOD_END
469 static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc));
470 static devclass_t bge_devclass;
472 DECLARE_DUMMY_MODULE(if_bge);
473 MODULE_DEPEND(if_bge, miibus, 1, 1, 1);
474 DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, NULL, NULL);
475 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, NULL, NULL);
477 static uint32_t
478 bge_readmem_ind(struct bge_softc *sc, uint32_t off)
480 device_t dev = sc->bge_dev;
481 uint32_t val;
483 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
484 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
485 return 0;
487 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
488 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
489 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
490 return (val);
493 static void
494 bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
496 device_t dev = sc->bge_dev;
498 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
499 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
500 return;
502 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
503 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
504 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
507 #ifdef notdef
508 static uint32_t
509 bge_readreg_ind(struct bge_softc *sc, uin32_t off)
511 device_t dev = sc->bge_dev;
513 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
514 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
516 #endif
518 static void
519 bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
521 device_t dev = sc->bge_dev;
523 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
524 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
527 static void
528 bge_writemem_direct(struct bge_softc *sc, uint32_t off, uint32_t val)
530 CSR_WRITE_4(sc, off, val);
533 static void
534 bge_writembx(struct bge_softc *sc, int off, int val)
536 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
537 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
539 CSR_WRITE_4(sc, off, val);
540 if (sc->bge_mbox_reorder)
541 CSR_READ_4(sc, off);
544 static uint8_t
545 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
547 uint32_t access, byte = 0;
548 int i;
550 /* Lock. */
551 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
552 for (i = 0; i < 8000; i++) {
553 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
554 break;
555 DELAY(20);
557 if (i == 8000)
558 return (1);
560 /* Enable access. */
561 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
562 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
564 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
565 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
566 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
567 DELAY(10);
568 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
569 DELAY(10);
570 break;
574 if (i == BGE_TIMEOUT * 10) {
575 if_printf(&sc->arpcom.ac_if, "nvram read timed out\n");
576 return (1);
579 /* Get result. */
580 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
582 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
584 /* Disable access. */
585 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
587 /* Unlock. */
588 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
589 CSR_READ_4(sc, BGE_NVRAM_SWARB);
591 return (0);
595 * Read a sequence of bytes from NVRAM.
597 static int
598 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
600 int err = 0, i;
601 uint8_t byte = 0;
603 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
604 return (1);
606 for (i = 0; i < cnt; i++) {
607 err = bge_nvram_getbyte(sc, off + i, &byte);
608 if (err)
609 break;
610 *(dest + i) = byte;
613 return (err ? 1 : 0);
617 * Read a byte of data stored in the EEPROM at address 'addr.' The
618 * BCM570x supports both the traditional bitbang interface and an
619 * auto access interface for reading the EEPROM. We use the auto
620 * access method.
622 static uint8_t
623 bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest)
625 int i;
626 uint32_t byte = 0;
629 * Enable use of auto EEPROM access so we can avoid
630 * having to use the bitbang method.
632 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
634 /* Reset the EEPROM, load the clock period. */
635 CSR_WRITE_4(sc, BGE_EE_ADDR,
636 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
637 DELAY(20);
639 /* Issue the read EEPROM command. */
640 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
642 /* Wait for completion */
643 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
644 DELAY(10);
645 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
646 break;
649 if (i == BGE_TIMEOUT) {
650 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
651 return(1);
654 /* Get result. */
655 byte = CSR_READ_4(sc, BGE_EE_DATA);
657 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
659 return(0);
663 * Read a sequence of bytes from the EEPROM.
665 static int
666 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len)
668 size_t i;
669 int err;
670 uint8_t byte;
672 for (byte = 0, err = 0, i = 0; i < len; i++) {
673 err = bge_eeprom_getbyte(sc, off + i, &byte);
674 if (err)
675 break;
676 *(dest + i) = byte;
679 return(err ? 1 : 0);
682 static int
683 bge_miibus_readreg(device_t dev, int phy, int reg)
685 struct bge_softc *sc = device_get_softc(dev);
686 uint32_t val;
687 int i;
689 KASSERT(phy == sc->bge_phyno,
690 ("invalid phyno %d, should be %d", phy, sc->bge_phyno));
692 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
693 return 0;
695 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
696 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
697 CSR_WRITE_4(sc, BGE_MI_MODE,
698 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
699 DELAY(80);
702 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
703 BGE_MIPHY(phy) | BGE_MIREG(reg));
705 /* Poll for the PHY register access to complete. */
706 for (i = 0; i < BGE_TIMEOUT; i++) {
707 DELAY(10);
708 val = CSR_READ_4(sc, BGE_MI_COMM);
709 if ((val & BGE_MICOMM_BUSY) == 0) {
710 DELAY(5);
711 val = CSR_READ_4(sc, BGE_MI_COMM);
712 break;
715 if (i == BGE_TIMEOUT) {
716 if_printf(&sc->arpcom.ac_if, "PHY read timed out "
717 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val);
718 val = 0;
721 /* Restore the autopoll bit if necessary. */
722 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
723 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
724 DELAY(80);
727 bge_ape_unlock(sc, sc->bge_phy_ape_lock);
729 if (val & BGE_MICOMM_READFAIL)
730 return 0;
732 return (val & 0xFFFF);
735 static int
736 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
738 struct bge_softc *sc = device_get_softc(dev);
739 int i;
741 KASSERT(phy == sc->bge_phyno,
742 ("invalid phyno %d, should be %d", phy, sc->bge_phyno));
744 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
745 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
746 return 0;
748 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
749 return 0;
751 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
752 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
753 CSR_WRITE_4(sc, BGE_MI_MODE,
754 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
755 DELAY(80);
758 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
759 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
761 for (i = 0; i < BGE_TIMEOUT; i++) {
762 DELAY(10);
763 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
764 DELAY(5);
765 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
766 break;
769 if (i == BGE_TIMEOUT) {
770 if_printf(&sc->arpcom.ac_if, "PHY write timed out "
771 "(phy %d, reg %d, val %d)\n", phy, reg, val);
774 /* Restore the autopoll bit if necessary. */
775 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
776 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
777 DELAY(80);
780 bge_ape_unlock(sc, sc->bge_phy_ape_lock);
782 return 0;
785 static void
786 bge_miibus_statchg(device_t dev)
788 struct bge_softc *sc;
789 struct mii_data *mii;
790 uint32_t mac_mode;
792 sc = device_get_softc(dev);
793 if ((sc->arpcom.ac_if.if_flags & IFF_RUNNING) == 0)
794 return;
796 mii = device_get_softc(sc->bge_miibus);
798 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
799 (IFM_ACTIVE | IFM_AVALID)) {
800 switch (IFM_SUBTYPE(mii->mii_media_active)) {
801 case IFM_10_T:
802 case IFM_100_TX:
803 sc->bge_link = 1;
804 break;
805 case IFM_1000_T:
806 case IFM_1000_SX:
807 case IFM_2500_SX:
808 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
809 sc->bge_link = 1;
810 else
811 sc->bge_link = 0;
812 break;
813 default:
814 sc->bge_link = 0;
815 break;
817 } else {
818 sc->bge_link = 0;
820 if (sc->bge_link == 0)
821 return;
824 * APE firmware touches these registers to keep the MAC
825 * connected to the outside world. Try to keep the
826 * accesses atomic.
829 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) &
830 ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX);
832 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
833 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
834 mac_mode |= BGE_PORTMODE_GMII;
835 else
836 mac_mode |= BGE_PORTMODE_MII;
838 if ((mii->mii_media_active & IFM_GMASK) != IFM_FDX)
839 mac_mode |= BGE_MACMODE_HALF_DUPLEX;
841 CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode);
842 DELAY(40);
846 * Memory management for jumbo frames.
848 static int
849 bge_alloc_jumbo_mem(struct bge_softc *sc)
851 struct ifnet *ifp = &sc->arpcom.ac_if;
852 struct bge_jslot *entry;
853 uint8_t *ptr;
854 bus_addr_t paddr;
855 int i, error;
858 * Create tag for jumbo mbufs.
859 * This is really a bit of a kludge. We allocate a special
860 * jumbo buffer pool which (thanks to the way our DMA
861 * memory allocation works) will consist of contiguous
862 * pages. This means that even though a jumbo buffer might
863 * be larger than a page size, we don't really need to
864 * map it into more than one DMA segment. However, the
865 * default mbuf tag will result in multi-segment mappings,
866 * so we have to create a special jumbo mbuf tag that
867 * lets us get away with mapping the jumbo buffers as
868 * a single segment. I think eventually the driver should
869 * be changed so that it uses ordinary mbufs and cluster
870 * buffers, i.e. jumbo frames can span multiple DMA
871 * descriptors. But that's a project for another day.
875 * Create DMA stuffs for jumbo RX ring.
877 error = bge_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
878 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
879 &sc->bge_cdata.bge_rx_jumbo_ring_map,
880 (void *)&sc->bge_ldata.bge_rx_jumbo_ring,
881 &sc->bge_ldata.bge_rx_jumbo_ring_paddr);
882 if (error) {
883 if_printf(ifp, "could not create jumbo RX ring\n");
884 return error;
888 * Create DMA stuffs for jumbo buffer block.
890 error = bge_dma_block_alloc(sc, BGE_JMEM,
891 &sc->bge_cdata.bge_jumbo_tag,
892 &sc->bge_cdata.bge_jumbo_map,
893 (void **)&sc->bge_ldata.bge_jumbo_buf,
894 &paddr);
895 if (error) {
896 if_printf(ifp, "could not create jumbo buffer\n");
897 return error;
900 SLIST_INIT(&sc->bge_jfree_listhead);
903 * Now divide it up into 9K pieces and save the addresses
904 * in an array. Note that we play an evil trick here by using
905 * the first few bytes in the buffer to hold the the address
906 * of the softc structure for this interface. This is because
907 * bge_jfree() needs it, but it is called by the mbuf management
908 * code which will not pass it to us explicitly.
910 for (i = 0, ptr = sc->bge_ldata.bge_jumbo_buf; i < BGE_JSLOTS; i++) {
911 entry = &sc->bge_cdata.bge_jslots[i];
912 entry->bge_sc = sc;
913 entry->bge_buf = ptr;
914 entry->bge_paddr = paddr;
915 entry->bge_inuse = 0;
916 entry->bge_slot = i;
917 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link);
919 ptr += BGE_JLEN;
920 paddr += BGE_JLEN;
922 return 0;
925 static void
926 bge_free_jumbo_mem(struct bge_softc *sc)
928 /* Destroy jumbo RX ring. */
929 bge_dma_block_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
930 sc->bge_cdata.bge_rx_jumbo_ring_map,
931 sc->bge_ldata.bge_rx_jumbo_ring);
933 /* Destroy jumbo buffer block. */
934 bge_dma_block_free(sc->bge_cdata.bge_jumbo_tag,
935 sc->bge_cdata.bge_jumbo_map,
936 sc->bge_ldata.bge_jumbo_buf);
940 * Allocate a jumbo buffer.
942 static struct bge_jslot *
943 bge_jalloc(struct bge_softc *sc)
945 struct bge_jslot *entry;
947 lwkt_serialize_enter(&sc->bge_jslot_serializer);
948 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
949 if (entry) {
950 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link);
951 entry->bge_inuse = 1;
952 } else {
953 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
955 lwkt_serialize_exit(&sc->bge_jslot_serializer);
956 return(entry);
960 * Adjust usage count on a jumbo buffer.
962 static void
963 bge_jref(void *arg)
965 struct bge_jslot *entry = (struct bge_jslot *)arg;
966 struct bge_softc *sc = entry->bge_sc;
968 if (sc == NULL)
969 panic("bge_jref: can't find softc pointer!");
971 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
972 panic("bge_jref: asked to reference buffer "
973 "that we don't manage!");
974 } else if (entry->bge_inuse == 0) {
975 panic("bge_jref: buffer already free!");
976 } else {
977 atomic_add_int(&entry->bge_inuse, 1);
982 * Release a jumbo buffer.
984 static void
985 bge_jfree(void *arg)
987 struct bge_jslot *entry = (struct bge_jslot *)arg;
988 struct bge_softc *sc = entry->bge_sc;
990 if (sc == NULL)
991 panic("bge_jfree: can't find softc pointer!");
993 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
994 panic("bge_jfree: asked to free buffer that we don't manage!");
995 } else if (entry->bge_inuse == 0) {
996 panic("bge_jfree: buffer already free!");
997 } else {
999 * Possible MP race to 0, use the serializer. The atomic insn
1000 * is still needed for races against bge_jref().
1002 lwkt_serialize_enter(&sc->bge_jslot_serializer);
1003 atomic_subtract_int(&entry->bge_inuse, 1);
1004 if (entry->bge_inuse == 0) {
1005 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
1006 entry, jslot_link);
1008 lwkt_serialize_exit(&sc->bge_jslot_serializer);
1014 * Intialize a standard receive ring descriptor.
1016 static int
1017 bge_newbuf_std(struct bge_softc *sc, int i, int init)
1019 struct mbuf *m_new = NULL;
1020 bus_dma_segment_t seg;
1021 bus_dmamap_t map;
1022 int error, nsegs;
1024 m_new = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR);
1025 if (m_new == NULL)
1026 return ENOBUFS;
1027 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1029 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1030 m_adj(m_new, ETHER_ALIGN);
1032 error = bus_dmamap_load_mbuf_segment(sc->bge_cdata.bge_rx_mtag,
1033 sc->bge_cdata.bge_rx_tmpmap, m_new,
1034 &seg, 1, &nsegs, BUS_DMA_NOWAIT);
1035 if (error) {
1036 m_freem(m_new);
1037 return error;
1040 if (!init) {
1041 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1042 sc->bge_cdata.bge_rx_std_dmamap[i],
1043 BUS_DMASYNC_POSTREAD);
1044 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1045 sc->bge_cdata.bge_rx_std_dmamap[i]);
1048 map = sc->bge_cdata.bge_rx_tmpmap;
1049 sc->bge_cdata.bge_rx_tmpmap = sc->bge_cdata.bge_rx_std_dmamap[i];
1050 sc->bge_cdata.bge_rx_std_dmamap[i] = map;
1052 sc->bge_cdata.bge_rx_std_chain[i].bge_mbuf = m_new;
1053 sc->bge_cdata.bge_rx_std_chain[i].bge_paddr = seg.ds_addr;
1055 bge_setup_rxdesc_std(sc, i);
1056 return 0;
1059 static void
1060 bge_setup_rxdesc_std(struct bge_softc *sc, int i)
1062 struct bge_rxchain *rc;
1063 struct bge_rx_bd *r;
1065 rc = &sc->bge_cdata.bge_rx_std_chain[i];
1066 r = &sc->bge_ldata.bge_rx_std_ring[i];
1068 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr);
1069 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr);
1070 r->bge_len = rc->bge_mbuf->m_len;
1071 r->bge_idx = i;
1072 r->bge_flags = BGE_RXBDFLAG_END;
1076 * Initialize a jumbo receive ring descriptor. This allocates
1077 * a jumbo buffer from the pool managed internally by the driver.
1079 static int
1080 bge_newbuf_jumbo(struct bge_softc *sc, int i, int init)
1082 struct mbuf *m_new = NULL;
1083 struct bge_jslot *buf;
1084 bus_addr_t paddr;
1086 /* Allocate the mbuf. */
1087 MGETHDR(m_new, init ? M_WAITOK : M_NOWAIT, MT_DATA);
1088 if (m_new == NULL)
1089 return ENOBUFS;
1091 /* Allocate the jumbo buffer */
1092 buf = bge_jalloc(sc);
1093 if (buf == NULL) {
1094 m_freem(m_new);
1095 return ENOBUFS;
1098 /* Attach the buffer to the mbuf. */
1099 m_new->m_ext.ext_arg = buf;
1100 m_new->m_ext.ext_buf = buf->bge_buf;
1101 m_new->m_ext.ext_free = bge_jfree;
1102 m_new->m_ext.ext_ref = bge_jref;
1103 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
1105 m_new->m_flags |= M_EXT;
1107 m_new->m_data = m_new->m_ext.ext_buf;
1108 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
1110 paddr = buf->bge_paddr;
1111 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) {
1112 m_adj(m_new, ETHER_ALIGN);
1113 paddr += ETHER_ALIGN;
1116 /* Save necessary information */
1117 sc->bge_cdata.bge_rx_jumbo_chain[i].bge_mbuf = m_new;
1118 sc->bge_cdata.bge_rx_jumbo_chain[i].bge_paddr = paddr;
1120 /* Set up the descriptor. */
1121 bge_setup_rxdesc_jumbo(sc, i);
1122 return 0;
1125 static void
1126 bge_setup_rxdesc_jumbo(struct bge_softc *sc, int i)
1128 struct bge_rx_bd *r;
1129 struct bge_rxchain *rc;
1131 r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
1132 rc = &sc->bge_cdata.bge_rx_jumbo_chain[i];
1134 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr);
1135 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr);
1136 r->bge_len = rc->bge_mbuf->m_len;
1137 r->bge_idx = i;
1138 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
1141 static int
1142 bge_init_rx_ring_std(struct bge_softc *sc)
1144 int i, error;
1146 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1147 error = bge_newbuf_std(sc, i, 1);
1148 if (error)
1149 return error;
1152 sc->bge_std = BGE_STD_RX_RING_CNT - 1;
1153 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1155 return(0);
1158 static void
1159 bge_free_rx_ring_std(struct bge_softc *sc)
1161 int i;
1163 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1164 struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_std_chain[i];
1166 if (rc->bge_mbuf != NULL) {
1167 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1168 sc->bge_cdata.bge_rx_std_dmamap[i]);
1169 m_freem(rc->bge_mbuf);
1170 rc->bge_mbuf = NULL;
1172 bzero(&sc->bge_ldata.bge_rx_std_ring[i],
1173 sizeof(struct bge_rx_bd));
1177 static int
1178 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1180 struct bge_rcb *rcb;
1181 int i, error;
1183 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1184 error = bge_newbuf_jumbo(sc, i, 1);
1185 if (error)
1186 return error;
1189 sc->bge_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
1191 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1192 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
1193 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1195 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1197 return(0);
1200 static void
1201 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1203 int i;
1205 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1206 struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_jumbo_chain[i];
1208 if (rc->bge_mbuf != NULL) {
1209 m_freem(rc->bge_mbuf);
1210 rc->bge_mbuf = NULL;
1212 bzero(&sc->bge_ldata.bge_rx_jumbo_ring[i],
1213 sizeof(struct bge_rx_bd));
1217 static void
1218 bge_free_tx_ring(struct bge_softc *sc)
1220 int i;
1222 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1223 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1224 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1225 sc->bge_cdata.bge_tx_dmamap[i]);
1226 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1227 sc->bge_cdata.bge_tx_chain[i] = NULL;
1229 bzero(&sc->bge_ldata.bge_tx_ring[i],
1230 sizeof(struct bge_tx_bd));
1234 static int
1235 bge_init_tx_ring(struct bge_softc *sc)
1237 sc->bge_txcnt = 0;
1238 sc->bge_tx_saved_considx = 0;
1239 sc->bge_tx_prodidx = 0;
1241 /* Initialize transmit producer index for host-memory send ring. */
1242 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1244 /* 5700 b2 errata */
1245 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1246 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1248 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1249 /* 5700 b2 errata */
1250 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1251 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1253 return(0);
1256 static void
1257 bge_setmulti(struct bge_softc *sc)
1259 struct ifnet *ifp;
1260 struct ifmultiaddr *ifma;
1261 uint32_t hashes[4] = { 0, 0, 0, 0 };
1262 int h, i;
1264 ifp = &sc->arpcom.ac_if;
1266 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1267 for (i = 0; i < 4; i++)
1268 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1269 return;
1272 /* First, zot all the existing filters. */
1273 for (i = 0; i < 4; i++)
1274 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1276 /* Now program new ones. */
1277 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1278 if (ifma->ifma_addr->sa_family != AF_LINK)
1279 continue;
1280 h = ether_crc32_le(
1281 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1282 ETHER_ADDR_LEN) & 0x7f;
1283 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1286 for (i = 0; i < 4; i++)
1287 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1291 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1292 * self-test results.
1294 static int
1295 bge_chipinit(struct bge_softc *sc)
1297 int i;
1298 uint32_t dma_rw_ctl, mode_ctl;
1299 uint16_t val;
1301 /* Set endian type before we access any non-PCI registers. */
1302 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1303 BGE_INIT | sc->bge_pci_miscctl, 4);
1306 * Clear the MAC statistics block in the NIC's
1307 * internal memory.
1309 for (i = BGE_STATS_BLOCK;
1310 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1311 BGE_MEMWIN_WRITE(sc, i, 0);
1313 for (i = BGE_STATUS_BLOCK;
1314 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1315 BGE_MEMWIN_WRITE(sc, i, 0);
1317 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1319 * Fix data corruption caused by non-qword write with WB.
1320 * Fix master abort in PCI mode.
1321 * Fix PCI latency timer.
1323 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1324 val |= (1 << 10) | (1 << 12) | (1 << 13);
1325 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1328 /* Set up the PCI DMA control register. */
1329 dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD;
1330 if (sc->bge_flags & BGE_FLAG_PCIE) {
1331 /* PCI-E bus */
1332 /* DMA read watermark not used on PCI-E */
1333 dma_rw_ctl |= (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1334 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1335 /* PCI-X bus */
1336 if (sc->bge_asicrev == BGE_ASICREV_BCM5780) {
1337 dma_rw_ctl |= (0x4 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1338 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1339 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1340 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5714) {
1341 dma_rw_ctl |= (0x4 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1342 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1343 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1344 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1345 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1346 uint32_t rd_wat = 0x7;
1347 uint32_t clkctl;
1349 clkctl = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1350 if ((sc->bge_flags & BGE_FLAG_MAXADDR_40BIT) &&
1351 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1352 dma_rw_ctl |=
1353 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1354 } else if (clkctl == 0x6 || clkctl == 0x7) {
1355 dma_rw_ctl |=
1356 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1358 if (sc->bge_asicrev == BGE_ASICREV_BCM5703)
1359 rd_wat = 0x4;
1361 dma_rw_ctl |= (rd_wat << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1362 (3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1363 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1364 } else {
1365 dma_rw_ctl |= (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1366 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1367 dma_rw_ctl |= 0xf;
1369 } else {
1370 /* Conventional PCI bus */
1371 dma_rw_ctl |= (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1372 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1373 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1374 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1375 dma_rw_ctl |= 0xf;
1378 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1379 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1380 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1381 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1382 sc->bge_asicrev == BGE_ASICREV_BCM5701) {
1383 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1384 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1386 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1389 * Set up general mode register.
1391 mode_ctl = BGE_DMA_SWAP_OPTIONS|
1392 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1393 BGE_MODECTL_TX_NO_PHDR_CSUM;
1396 * BCM5701 B5 have a bug causing data corruption when using
1397 * 64-bit DMA reads, which can be terminated early and then
1398 * completed later as 32-bit accesses, in combination with
1399 * certain bridges.
1401 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1402 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1403 mode_ctl |= BGE_MODECTL_FORCE_PCI32;
1406 * Tell the firmware the driver is running
1408 if (sc->bge_asf_mode & ASF_STACKUP)
1409 mode_ctl |= BGE_MODECTL_STACKUP;
1411 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1414 * Disable memory write invalidate. Apparently it is not supported
1415 * properly by these devices. Also ensure that INTx isn't disabled,
1416 * as these chips need it even when using MSI.
1418 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1419 (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4);
1421 /* Set the timer prescaler (always 66Mhz) */
1422 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1424 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1425 DELAY(40); /* XXX */
1427 /* Put PHY into ready state */
1428 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1429 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1430 DELAY(40);
1433 return(0);
1436 static int
1437 bge_blockinit(struct bge_softc *sc)
1439 struct bge_rcb *rcb;
1440 bus_size_t vrcb;
1441 bge_hostaddr taddr;
1442 uint32_t val;
1443 int i, limit;
1446 * Initialize the memory window pointer register so that
1447 * we can access the first 32K of internal NIC RAM. This will
1448 * allow us to set up the TX send ring RCBs and the RX return
1449 * ring RCBs, plus other things which live in NIC memory.
1451 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1453 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1455 if (!BGE_IS_5705_PLUS(sc)) {
1456 /* Configure mbuf memory pool */
1457 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1458 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1459 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1460 else
1461 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1463 /* Configure DMA resource pool */
1464 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1465 BGE_DMA_DESCRIPTORS);
1466 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1469 /* Configure mbuf pool watermarks */
1470 if (!BGE_IS_5705_PLUS(sc)) {
1471 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1472 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1473 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1474 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1475 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1476 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1477 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1478 } else {
1479 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1480 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1481 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1484 /* Configure DMA resource watermarks */
1485 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1486 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1488 /* Enable buffer manager */
1489 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1490 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1492 /* Poll for buffer manager start indication */
1493 for (i = 0; i < BGE_TIMEOUT; i++) {
1494 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1495 break;
1496 DELAY(10);
1499 if (i == BGE_TIMEOUT) {
1500 if_printf(&sc->arpcom.ac_if,
1501 "buffer manager failed to start\n");
1502 return(ENXIO);
1505 /* Enable flow-through queues */
1506 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1507 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1509 /* Wait until queue initialization is complete */
1510 for (i = 0; i < BGE_TIMEOUT; i++) {
1511 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1512 break;
1513 DELAY(10);
1516 if (i == BGE_TIMEOUT) {
1517 if_printf(&sc->arpcom.ac_if,
1518 "flow-through queue init failed\n");
1519 return(ENXIO);
1523 * Summary of rings supported by the controller:
1525 * Standard Receive Producer Ring
1526 * - This ring is used to feed receive buffers for "standard"
1527 * sized frames (typically 1536 bytes) to the controller.
1529 * Jumbo Receive Producer Ring
1530 * - This ring is used to feed receive buffers for jumbo sized
1531 * frames (i.e. anything bigger than the "standard" frames)
1532 * to the controller.
1534 * Mini Receive Producer Ring
1535 * - This ring is used to feed receive buffers for "mini"
1536 * sized frames to the controller.
1537 * - This feature required external memory for the controller
1538 * but was never used in a production system. Should always
1539 * be disabled.
1541 * Receive Return Ring
1542 * - After the controller has placed an incoming frame into a
1543 * receive buffer that buffer is moved into a receive return
1544 * ring. The driver is then responsible to passing the
1545 * buffer up to the stack. Many versions of the controller
1546 * support multiple RR rings.
1548 * Send Ring
1549 * - This ring is used for outgoing frames. Many versions of
1550 * the controller support multiple send rings.
1553 /* Initialize the standard receive producer ring control block. */
1554 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1555 rcb->bge_hostaddr.bge_addr_lo =
1556 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1557 rcb->bge_hostaddr.bge_addr_hi =
1558 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1559 if (BGE_IS_5705_PLUS(sc)) {
1561 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1562 * Bits 15-2 : Reserved (should be 0)
1563 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1564 * Bit 0 : Reserved
1566 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1567 } else {
1569 * Ring size is always XXX entries
1570 * Bits 31-16: Maximum RX frame size
1571 * Bits 15-2 : Reserved (should be 0)
1572 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1573 * Bit 0 : Reserved
1575 rcb->bge_maxlen_flags =
1576 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1578 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1579 /* Write the standard receive producer ring control block. */
1580 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1581 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1582 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1583 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1584 /* Reset the standard receive producer ring producer index. */
1585 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1588 * Initialize the jumbo RX producer ring control
1589 * block. We set the 'ring disabled' bit in the
1590 * flags field until we're actually ready to start
1591 * using this ring (i.e. once we set the MTU
1592 * high enough to require it).
1594 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1595 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1596 /* Get the jumbo receive producer ring RCB parameters. */
1597 rcb->bge_hostaddr.bge_addr_lo =
1598 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1599 rcb->bge_hostaddr.bge_addr_hi =
1600 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1601 rcb->bge_maxlen_flags =
1602 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1603 BGE_RCB_FLAG_RING_DISABLED);
1604 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1605 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1606 rcb->bge_hostaddr.bge_addr_hi);
1607 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1608 rcb->bge_hostaddr.bge_addr_lo);
1609 /* Program the jumbo receive producer ring RCB parameters. */
1610 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1611 rcb->bge_maxlen_flags);
1612 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1613 /* Reset the jumbo receive producer ring producer index. */
1614 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1617 /* Disable the mini receive producer ring RCB. */
1618 if (BGE_IS_5700_FAMILY(sc)) {
1619 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1620 rcb->bge_maxlen_flags =
1621 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1622 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1623 rcb->bge_maxlen_flags);
1624 /* Reset the mini receive producer ring producer index. */
1625 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1628 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1629 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
1630 (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
1631 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
1632 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)) {
1633 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1634 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1638 * The BD ring replenish thresholds control how often the
1639 * hardware fetches new BD's from the producer rings in host
1640 * memory. Setting the value too low on a busy system can
1641 * starve the hardware and recue the throughpout.
1643 * Set the BD ring replentish thresholds. The recommended
1644 * values are 1/8th the number of descriptors allocated to
1645 * each ring.
1647 if (BGE_IS_5705_PLUS(sc))
1648 val = 8;
1649 else
1650 val = BGE_STD_RX_RING_CNT / 8;
1651 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1652 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1653 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1654 BGE_JUMBO_RX_RING_CNT/8);
1658 * Disable all send rings by setting the 'ring disabled' bit
1659 * in the flags field of all the TX send ring control blocks,
1660 * located in NIC memory.
1662 if (!BGE_IS_5705_PLUS(sc)) {
1663 /* 5700 to 5704 had 16 send rings. */
1664 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1665 } else {
1666 limit = 1;
1668 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1669 for (i = 0; i < limit; i++) {
1670 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1671 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1672 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1673 vrcb += sizeof(struct bge_rcb);
1676 /* Configure send ring RCB 0 (we use only the first ring) */
1677 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1678 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1679 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1680 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1681 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1682 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1683 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1684 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1687 * Disable all receive return rings by setting the
1688 * 'ring diabled' bit in the flags field of all the receive
1689 * return ring control blocks, located in NIC memory.
1691 if (!BGE_IS_5705_PLUS(sc))
1692 limit = BGE_RX_RINGS_MAX;
1693 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755)
1694 limit = 4;
1695 else
1696 limit = 1;
1697 /* Disable all receive return rings. */
1698 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1699 for (i = 0; i < limit; i++) {
1700 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1701 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1702 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1703 BGE_RCB_FLAG_RING_DISABLED);
1704 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1705 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1706 (i * (sizeof(uint64_t))), 0);
1707 vrcb += sizeof(struct bge_rcb);
1711 * Set up receive return ring 0. Note that the NIC address
1712 * for RX return rings is 0x0. The return rings live entirely
1713 * within the host, so the nicaddr field in the RCB isn't used.
1715 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1716 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1717 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1718 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1719 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1720 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1721 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1723 /* Set random backoff seed for TX */
1724 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1725 (sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1726 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1727 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) &
1728 BGE_TX_BACKOFF_SEED_MASK);
1730 /* Set inter-packet gap */
1731 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1734 * Specify which ring to use for packets that don't match
1735 * any RX rules.
1737 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1740 * Configure number of RX lists. One interrupt distribution
1741 * list, sixteen active lists, one bad frames class.
1743 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1745 /* Inialize RX list placement stats mask. */
1746 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1747 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1749 /* Disable host coalescing until we get it set up */
1750 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1752 /* Poll to make sure it's shut down. */
1753 for (i = 0; i < BGE_TIMEOUT; i++) {
1754 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1755 break;
1756 DELAY(10);
1759 if (i == BGE_TIMEOUT) {
1760 if_printf(&sc->arpcom.ac_if,
1761 "host coalescing engine failed to idle\n");
1762 return(ENXIO);
1765 /* Set up host coalescing defaults */
1766 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1767 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1768 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_coal_bds);
1769 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_coal_bds);
1770 if (!BGE_IS_5705_PLUS(sc)) {
1771 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT,
1772 sc->bge_rx_coal_ticks_int);
1773 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT,
1774 sc->bge_tx_coal_ticks_int);
1777 * NOTE:
1778 * The datasheet (57XX-PG105-R) says BCM5705+ do not
1779 * have following two registers; obviously it is wrong.
1781 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, sc->bge_rx_coal_bds_int);
1782 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, sc->bge_tx_coal_bds_int);
1784 /* Set up address of statistics block */
1785 if (!BGE_IS_5705_PLUS(sc)) {
1786 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1787 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1788 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1789 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1791 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1792 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1793 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1796 /* Set up address of status block */
1797 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1798 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1799 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1800 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1801 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1804 * Set up status block partail update size.
1806 * Because only single TX ring, RX produce ring and Rx return ring
1807 * are used, ask device to update only minimum part of status block
1808 * except for BCM5700 AX/BX, whose status block partial update size
1809 * can't be configured.
1811 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1812 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1813 /* XXX Actually reserved on BCM5700 AX/BX */
1814 val = BGE_STATBLKSZ_FULL;
1815 } else {
1816 val = BGE_STATBLKSZ_32BYTE;
1818 #if 0
1820 * Does not seem to have visible effect in both
1821 * bulk data (1472B UDP datagram) and tiny data
1822 * (18B UDP datagram) TX tests.
1824 if (!BGE_IS_CRIPPLED(sc))
1825 val |= BGE_HCCMODE_CLRTICK_TX;
1826 #endif
1828 /* Turn on host coalescing state machine */
1829 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1831 /* Turn on RX BD completion state machine and enable attentions */
1832 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1833 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1835 /* Turn on RX list placement state machine */
1836 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1838 /* Turn on RX list selector state machine. */
1839 if (!BGE_IS_5705_PLUS(sc))
1840 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1842 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1843 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1844 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1845 BGE_MACMODE_FRMHDR_DMA_ENB;
1847 if (sc->bge_flags & BGE_FLAG_TBI)
1848 val |= BGE_PORTMODE_TBI;
1849 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
1850 val |= BGE_PORTMODE_GMII;
1851 else
1852 val |= BGE_PORTMODE_MII;
1854 /* Allow APE to send/receive frames. */
1855 if (sc->bge_mfw_flags & BGE_MFW_ON_APE)
1856 val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
1858 /* Turn on DMA, clear stats */
1859 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1860 DELAY(40);
1862 /* Set misc. local control, enable interrupts on attentions */
1863 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1865 #ifdef notdef
1866 /* Assert GPIO pins for PHY reset */
1867 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1868 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1869 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1870 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1871 #endif
1873 /* Turn on DMA completion state machine */
1874 if (!BGE_IS_5705_PLUS(sc))
1875 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1877 /* Turn on write DMA state machine */
1878 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1879 if (BGE_IS_5755_PLUS(sc)) {
1880 /* Enable host coalescing bug fix. */
1881 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1883 if (sc->bge_asicrev == BGE_ASICREV_BCM5785) {
1884 /* Request larger DMA burst size to get better performance. */
1885 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1887 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1888 DELAY(40);
1890 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
1891 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1892 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1893 sc->bge_asicrev == BGE_ASICREV_BCM57780) {
1895 * Enable fix for read DMA FIFO overruns.
1896 * The fix is to limit the number of RX BDs
1897 * the hardware would fetch at a fime.
1899 val = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
1900 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL,
1901 val| BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
1904 /* Turn on read DMA state machine */
1905 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1906 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1907 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1908 sc->bge_asicrev == BGE_ASICREV_BCM57780)
1909 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1910 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1911 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1912 if (sc->bge_flags & BGE_FLAG_PCIE)
1913 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1914 if (sc->bge_flags & BGE_FLAG_TSO)
1915 val |= BGE_RDMAMODE_TSO4_ENABLE;
1916 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1917 DELAY(40);
1919 /* Turn on RX data completion state machine */
1920 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1922 /* Turn on RX BD initiator state machine */
1923 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1925 /* Turn on RX data and RX BD initiator state machine */
1926 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1928 /* Turn on Mbuf cluster free state machine */
1929 if (!BGE_IS_5705_PLUS(sc))
1930 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1932 /* Turn on send BD completion state machine */
1933 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1935 /* Turn on send data completion state machine */
1936 val = BGE_SDCMODE_ENABLE;
1937 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
1938 val |= BGE_SDCMODE_CDELAY;
1939 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1941 /* Turn on send data initiator state machine */
1942 if (sc->bge_flags & BGE_FLAG_TSO)
1943 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
1944 BGE_SDIMODE_HW_LSO_PRE_DMA);
1945 else
1946 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1948 /* Turn on send BD initiator state machine */
1949 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1951 /* Turn on send BD selector state machine */
1952 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1954 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1955 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1956 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1958 /* ack/clear link change events */
1959 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1960 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1961 BGE_MACSTAT_LINK_CHANGED);
1962 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1965 * Enable attention when the link has changed state for
1966 * devices that use auto polling.
1968 if (sc->bge_flags & BGE_FLAG_TBI) {
1969 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1970 } else {
1971 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
1972 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
1973 DELAY(80);
1975 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1976 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
1977 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1978 BGE_EVTENB_MI_INTERRUPT);
1983 * Clear any pending link state attention.
1984 * Otherwise some link state change events may be lost until attention
1985 * is cleared by bge_intr() -> bge_softc.bge_link_upd() sequence.
1986 * It's not necessary on newer BCM chips - perhaps enabling link
1987 * state change attentions implies clearing pending attention.
1989 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1990 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1991 BGE_MACSTAT_LINK_CHANGED);
1993 /* Enable link state change attentions. */
1994 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1996 return(0);
2000 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2001 * against our list and return its name if we find a match. Note
2002 * that since the Broadcom controller contains VPD support, we
2003 * can get the device name string from the controller itself instead
2004 * of the compiled-in string. This is a little slow, but it guarantees
2005 * we'll always announce the right product name.
2007 static int
2008 bge_probe(device_t dev)
2010 const struct bge_type *t;
2011 uint16_t product, vendor;
2013 product = pci_get_device(dev);
2014 vendor = pci_get_vendor(dev);
2016 for (t = bge_devs; t->bge_name != NULL; t++) {
2017 if (vendor == t->bge_vid && product == t->bge_did)
2018 break;
2020 if (t->bge_name == NULL)
2021 return(ENXIO);
2023 device_set_desc(dev, t->bge_name);
2024 return(0);
2027 static int
2028 bge_attach(device_t dev)
2030 struct ifnet *ifp;
2031 struct bge_softc *sc;
2032 struct sysctl_ctx_list *ctx;
2033 struct sysctl_oid *tree;
2034 uint32_t hwcfg = 0, misccfg;
2035 int error = 0, rid, capmask;
2036 uint8_t ether_addr[ETHER_ADDR_LEN];
2037 uint16_t product, vendor;
2038 driver_intr_t *intr_func;
2039 uintptr_t mii_priv = 0;
2040 u_int intr_flags;
2041 int msi_enable;
2043 sc = device_get_softc(dev);
2044 sc->bge_dev = dev;
2045 callout_init_mp(&sc->bge_stat_timer);
2046 lwkt_serialize_init(&sc->bge_jslot_serializer);
2048 sc->bge_func_addr = pci_get_function(dev);
2049 product = pci_get_device(dev);
2050 vendor = pci_get_vendor(dev);
2052 #ifndef BURN_BRIDGES
2053 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
2054 uint32_t irq, mem;
2056 irq = pci_read_config(dev, PCIR_INTLINE, 4);
2057 mem = pci_read_config(dev, BGE_PCI_BAR0, 4);
2059 device_printf(dev, "chip is in D%d power mode "
2060 "-- setting to D0\n", pci_get_powerstate(dev));
2062 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
2064 pci_write_config(dev, PCIR_INTLINE, irq, 4);
2065 pci_write_config(dev, BGE_PCI_BAR0, mem, 4);
2067 #endif /* !BURN_BRIDGE */
2070 * Map control/status registers.
2072 pci_enable_busmaster(dev);
2074 rid = BGE_PCI_BAR0;
2075 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2076 RF_ACTIVE);
2078 if (sc->bge_res == NULL) {
2079 device_printf(dev, "couldn't map memory\n");
2080 return ENXIO;
2083 sc->bge_btag = rman_get_bustag(sc->bge_res);
2084 sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2086 /* Save various chip information */
2087 sc->bge_chipid =
2088 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2089 BGE_PCIMISCCTL_ASICREV_SHIFT;
2090 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2091 /* All chips, which use BGE_PCI_PRODID_ASICREV, have CPMU */
2092 sc->bge_flags |= BGE_FLAG_CPMU;
2093 sc->bge_chipid = pci_read_config(dev, BGE_PCI_PRODID_ASICREV, 4);
2095 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2096 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2098 /* Save chipset family. */
2099 switch (sc->bge_asicrev) {
2100 case BGE_ASICREV_BCM5755:
2101 case BGE_ASICREV_BCM5761:
2102 case BGE_ASICREV_BCM5784:
2103 case BGE_ASICREV_BCM5785:
2104 case BGE_ASICREV_BCM5787:
2105 case BGE_ASICREV_BCM57780:
2106 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2107 BGE_FLAG_5705_PLUS;
2108 break;
2110 case BGE_ASICREV_BCM5700:
2111 case BGE_ASICREV_BCM5701:
2112 case BGE_ASICREV_BCM5703:
2113 case BGE_ASICREV_BCM5704:
2114 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2115 break;
2117 case BGE_ASICREV_BCM5714_A0:
2118 case BGE_ASICREV_BCM5780:
2119 case BGE_ASICREV_BCM5714:
2120 sc->bge_flags |= BGE_FLAG_5714_FAMILY;
2121 /* Fall through */
2123 case BGE_ASICREV_BCM5750:
2124 case BGE_ASICREV_BCM5752:
2125 case BGE_ASICREV_BCM5906:
2126 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2127 /* Fall through */
2129 case BGE_ASICREV_BCM5705:
2130 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2131 break;
2134 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
2135 sc->bge_flags |= BGE_FLAG_NO_EEPROM;
2137 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
2138 sc->bge_flags |= BGE_FLAG_APE;
2140 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
2141 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2142 (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2143 misccfg == BGE_MISCCFG_BOARD_ID_5788M))
2144 sc->bge_flags |= BGE_FLAG_5788;
2146 /* BCM5755 or higher and BCM5906 have short DMA bug. */
2147 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
2148 sc->bge_flags |= BGE_FLAG_SHORTDMA;
2151 * Increase STD RX ring prod index by at most 8 for BCM5750,
2152 * BCM5752 and BCM5755 to workaround hardware errata.
2154 if (sc->bge_asicrev == BGE_ASICREV_BCM5750 ||
2155 sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
2156 sc->bge_asicrev == BGE_ASICREV_BCM5755)
2157 sc->bge_rx_wreg = 8;
2160 * Check if this is a PCI-X or PCI Express device.
2162 if (BGE_IS_5705_PLUS(sc)) {
2163 if (pci_is_pcie(dev)) {
2164 sc->bge_flags |= BGE_FLAG_PCIE;
2165 sc->bge_pciecap = pci_get_pciecap_ptr(sc->bge_dev);
2166 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096);
2168 } else {
2170 * Check if the device is in PCI-X Mode.
2171 * (This bit is not valid on PCI Express controllers.)
2173 if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
2174 BGE_PCISTATE_PCI_BUSMODE) == 0) {
2175 sc->bge_flags |= BGE_FLAG_PCIX;
2176 sc->bge_pcixcap = pci_get_pcixcap_ptr(sc->bge_dev);
2177 sc->bge_mbox_reorder = device_getenv_int(sc->bge_dev,
2178 "mbox_reorder", 0);
2181 device_printf(dev, "CHIP ID 0x%08x; "
2182 "ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
2183 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
2184 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X"
2185 : ((sc->bge_flags & BGE_FLAG_PCIE) ?
2186 "PCI-E" : "PCI"));
2189 * The 40bit DMA bug applies to the 5714/5715 controllers and is
2190 * not actually a MAC controller bug but an issue with the embedded
2191 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
2193 if ((sc->bge_flags & BGE_FLAG_PCIX) &&
2194 (BGE_IS_5714_FAMILY(sc) || device_getenv_int(dev, "dma40b", 0)))
2195 sc->bge_flags |= BGE_FLAG_MAXADDR_40BIT;
2198 * When using the BCM5701 in PCI-X mode, data corruption has
2199 * been observed in the first few bytes of some received packets.
2200 * Aligning the packet buffer in memory eliminates the corruption.
2201 * Unfortunately, this misaligns the packet payloads. On platforms
2202 * which do not support unaligned accesses, we will realign the
2203 * payloads by copying the received packets.
2205 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2206 (sc->bge_flags & BGE_FLAG_PCIX))
2207 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2209 if (!BGE_IS_CRIPPLED(sc)) {
2210 if (device_getenv_int(dev, "status_tag", 1)) {
2211 sc->bge_flags |= BGE_FLAG_STATUS_TAG;
2212 sc->bge_pci_miscctl = BGE_PCIMISCCTL_TAGGED_STATUS;
2213 if (bootverbose)
2214 device_printf(dev, "enable status tag\n");
2218 if (BGE_IS_5755_PLUS(sc)) {
2220 * BCM5754 and BCM5787 shares the same ASIC id so
2221 * explicit device id check is required.
2222 * Due to unknown reason TSO does not work on BCM5755M.
2224 if (product != PCI_PRODUCT_BROADCOM_BCM5754 &&
2225 product != PCI_PRODUCT_BROADCOM_BCM5754M &&
2226 product != PCI_PRODUCT_BROADCOM_BCM5755M)
2227 sc->bge_flags |= BGE_FLAG_TSO;
2231 * Set various PHY quirk flags.
2234 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2235 sc->bge_asicrev == BGE_ASICREV_BCM5701) &&
2236 pci_get_subvendor(dev) == PCI_VENDOR_DELL)
2237 mii_priv |= BRGPHY_FLAG_NO_3LED;
2239 capmask = MII_CAPMASK_DEFAULT;
2240 if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 &&
2241 (misccfg == 0x4000 || misccfg == 0x8000)) ||
2242 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2243 vendor == PCI_VENDOR_BROADCOM &&
2244 (product == PCI_PRODUCT_BROADCOM_BCM5901 ||
2245 product == PCI_PRODUCT_BROADCOM_BCM5901A2 ||
2246 product == PCI_PRODUCT_BROADCOM_BCM5705F)) ||
2247 (vendor == PCI_VENDOR_BROADCOM &&
2248 (product == PCI_PRODUCT_BROADCOM_BCM5751F ||
2249 product == PCI_PRODUCT_BROADCOM_BCM5753F ||
2250 product == PCI_PRODUCT_BROADCOM_BCM5787F)) ||
2251 product == PCI_PRODUCT_BROADCOM_BCM57790 ||
2252 sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2253 /* 10/100 only */
2254 capmask &= ~BMSR_EXTSTAT;
2257 mii_priv |= BRGPHY_FLAG_WIRESPEED;
2258 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2259 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2260 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2261 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
2262 sc->bge_asicrev == BGE_ASICREV_BCM5906)
2263 mii_priv &= ~BRGPHY_FLAG_WIRESPEED;
2265 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2266 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2267 mii_priv |= BRGPHY_FLAG_CRC_BUG;
2269 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2270 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2271 mii_priv |= BRGPHY_FLAG_ADC_BUG;
2273 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2274 mii_priv |= BRGPHY_FLAG_5704_A0;
2276 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
2277 mii_priv |= BRGPHY_FLAG_5906;
2279 if (BGE_IS_5705_PLUS(sc) &&
2280 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2281 /* sc->bge_asicrev != BGE_ASICREV_BCM5717 && */
2282 sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2283 /* sc->bge_asicrev != BGE_ASICREV_BCM57765 && */
2284 sc->bge_asicrev != BGE_ASICREV_BCM57780) {
2285 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2286 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2287 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2288 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2289 if (product != PCI_PRODUCT_BROADCOM_BCM5722 &&
2290 product != PCI_PRODUCT_BROADCOM_BCM5756)
2291 mii_priv |= BRGPHY_FLAG_JITTER_BUG;
2292 if (product == PCI_PRODUCT_BROADCOM_BCM5755M)
2293 mii_priv |= BRGPHY_FLAG_ADJUST_TRIM;
2294 } else {
2295 mii_priv |= BRGPHY_FLAG_BER_BUG;
2300 * Chips with APE need BAR2 access for APE registers/memory.
2302 if (sc->bge_flags & BGE_FLAG_APE) {
2303 uint32_t pcistate;
2305 rid = PCIR_BAR(2);
2306 sc->bge_res2 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2307 RF_ACTIVE);
2308 if (sc->bge_res2 == NULL) {
2309 device_printf(dev, "couldn't map BAR2 memory\n");
2310 error = ENXIO;
2311 goto fail;
2314 /* Enable APE register/memory access by host driver. */
2315 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2316 pcistate |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
2317 BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
2318 BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
2319 pci_write_config(dev, BGE_PCI_PCISTATE, pcistate, 4);
2321 bge_ape_lock_init(sc);
2322 bge_ape_read_fw_ver(sc);
2326 * Allocate interrupt
2328 msi_enable = bge_msi_enable;
2329 if ((sc->bge_flags & BGE_FLAG_STATUS_TAG) == 0) {
2330 /* If "tagged status" is disabled, don't enable MSI */
2331 msi_enable = 0;
2332 } else if (msi_enable) {
2333 msi_enable = 0; /* Disable by default */
2334 if (BGE_IS_575X_PLUS(sc)) {
2335 msi_enable = 1;
2336 /* XXX we filter all 5714 chips */
2337 if (sc->bge_asicrev == BGE_ASICREV_BCM5714 ||
2338 (sc->bge_asicrev == BGE_ASICREV_BCM5750 &&
2339 (sc->bge_chiprev == BGE_CHIPREV_5750_AX ||
2340 sc->bge_chiprev == BGE_CHIPREV_5750_BX)))
2341 msi_enable = 0;
2342 else if (BGE_IS_5755_PLUS(sc) ||
2343 sc->bge_asicrev == BGE_ASICREV_BCM5906)
2344 sc->bge_flags |= BGE_FLAG_ONESHOT_MSI;
2347 if (msi_enable) {
2348 if (pci_find_extcap(dev, PCIY_MSI, &sc->bge_msicap)) {
2349 device_printf(dev, "no MSI capability\n");
2350 msi_enable = 0;
2354 sc->bge_irq_type = pci_alloc_1intr(dev, msi_enable, &sc->bge_irq_rid,
2355 &intr_flags);
2357 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->bge_irq_rid,
2358 intr_flags);
2359 if (sc->bge_irq == NULL) {
2360 device_printf(dev, "couldn't map interrupt\n");
2361 error = ENXIO;
2362 goto fail;
2365 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI)
2366 bge_enable_msi(sc);
2367 else
2368 sc->bge_flags &= ~BGE_FLAG_ONESHOT_MSI;
2370 /* Initialize if_name earlier, so if_printf could be used */
2371 ifp = &sc->arpcom.ac_if;
2372 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2374 sc->bge_asf_mode = 0;
2375 /* No ASF if APE present. */
2376 if ((sc->bge_flags & BGE_FLAG_APE) == 0) {
2377 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
2378 BGE_SRAM_DATA_SIG_MAGIC)) {
2379 if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG) &
2380 BGE_HWCFG_ASF) {
2381 sc->bge_asf_mode |= ASF_ENABLE;
2382 sc->bge_asf_mode |= ASF_STACKUP;
2383 if (BGE_IS_575X_PLUS(sc))
2384 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2390 * Try to reset the chip.
2392 bge_stop_fw(sc);
2393 bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
2394 bge_reset(sc);
2395 bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
2396 bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
2398 if (bge_chipinit(sc)) {
2399 device_printf(dev, "chip initialization failed\n");
2400 error = ENXIO;
2401 goto fail;
2405 * Get station address
2407 error = bge_get_eaddr(sc, ether_addr);
2408 if (error) {
2409 device_printf(dev, "failed to read station address\n");
2410 goto fail;
2413 /* 5705/5750 limits RX return ring to 512 entries. */
2414 if (BGE_IS_5705_PLUS(sc))
2415 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2416 else
2417 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2419 error = bge_dma_alloc(sc);
2420 if (error)
2421 goto fail;
2423 /* Set default tuneable values. */
2424 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2425 sc->bge_rx_coal_ticks = BGE_RX_COAL_TICKS_DEF;
2426 sc->bge_tx_coal_ticks = BGE_TX_COAL_TICKS_DEF;
2427 sc->bge_rx_coal_bds = BGE_RX_COAL_BDS_DEF;
2428 sc->bge_tx_coal_bds = BGE_TX_COAL_BDS_DEF;
2429 if (sc->bge_flags & BGE_FLAG_STATUS_TAG) {
2430 sc->bge_rx_coal_ticks_int = BGE_RX_COAL_TICKS_DEF;
2431 sc->bge_tx_coal_ticks_int = BGE_TX_COAL_TICKS_DEF;
2432 sc->bge_rx_coal_bds_int = BGE_RX_COAL_BDS_DEF;
2433 sc->bge_tx_coal_bds_int = BGE_TX_COAL_BDS_DEF;
2434 } else {
2435 sc->bge_rx_coal_ticks_int = BGE_RX_COAL_TICKS_MIN;
2436 sc->bge_tx_coal_ticks_int = BGE_TX_COAL_TICKS_MIN;
2437 sc->bge_rx_coal_bds_int = BGE_RX_COAL_BDS_MIN;
2438 sc->bge_tx_coal_bds_int = BGE_TX_COAL_BDS_MIN;
2440 sc->bge_tx_wreg = BGE_TX_WREG_NSEGS;
2442 /* Set up TX spare and reserved descriptor count */
2443 if (sc->bge_flags & BGE_FLAG_TSO) {
2444 sc->bge_txspare = BGE_NSEG_SPARE_TSO;
2445 sc->bge_txrsvd = BGE_NSEG_RSVD_TSO;
2446 } else {
2447 sc->bge_txspare = BGE_NSEG_SPARE;
2448 sc->bge_txrsvd = BGE_NSEG_RSVD;
2451 /* Set up ifnet structure */
2452 ifp->if_softc = sc;
2453 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2454 ifp->if_ioctl = bge_ioctl;
2455 ifp->if_start = bge_start;
2456 #ifdef IFPOLL_ENABLE
2457 ifp->if_npoll = bge_npoll;
2458 #endif
2459 ifp->if_watchdog = bge_watchdog;
2460 ifp->if_init = bge_init;
2461 ifp->if_mtu = ETHERMTU;
2462 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2463 ifp->if_nmbclusters = BGE_STD_RX_RING_CNT;
2464 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
2465 ifq_set_ready(&ifp->if_snd);
2468 * 5700 B0 chips do not support checksumming correctly due
2469 * to hardware bugs.
2471 if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) {
2472 ifp->if_capabilities |= IFCAP_HWCSUM;
2473 ifp->if_hwassist |= BGE_CSUM_FEATURES;
2475 if (sc->bge_flags & BGE_FLAG_TSO) {
2476 ifp->if_capabilities |= IFCAP_TSO;
2477 ifp->if_hwassist |= CSUM_TSO;
2479 ifp->if_capenable = ifp->if_capabilities;
2482 * Figure out what sort of media we have by checking the
2483 * hardware config word in the first 32k of NIC internal memory,
2484 * or fall back to examining the EEPROM if necessary.
2485 * Note: on some BCM5700 cards, this value appears to be unset.
2486 * If that's the case, we have to rely on identifying the NIC
2487 * by its PCI subsystem ID, as we do below for the SysKonnect
2488 * SK-9D41.
2490 if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC) {
2491 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG);
2492 } else {
2493 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2494 sizeof(hwcfg))) {
2495 device_printf(dev, "failed to read EEPROM\n");
2496 error = ENXIO;
2497 goto fail;
2499 hwcfg = ntohl(hwcfg);
2502 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2503 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 ||
2504 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
2505 if (BGE_IS_5714_FAMILY(sc))
2506 sc->bge_flags |= BGE_FLAG_MII_SERDES;
2507 else
2508 sc->bge_flags |= BGE_FLAG_TBI;
2511 /* Setup MI MODE */
2512 if (sc->bge_flags & BGE_FLAG_CPMU)
2513 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
2514 else
2515 sc->bge_mi_mode = BGE_MIMODE_BASE;
2516 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705) {
2517 /* Enable auto polling for BCM570[0-5]. */
2518 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
2521 /* Setup link status update stuffs */
2522 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2523 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
2524 sc->bge_link_upd = bge_bcm5700_link_upd;
2525 sc->bge_link_chg = BGE_MACSTAT_MI_INTERRUPT;
2526 } else if (sc->bge_flags & BGE_FLAG_TBI) {
2527 sc->bge_link_upd = bge_tbi_link_upd;
2528 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
2529 } else if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2530 sc->bge_link_upd = bge_autopoll_link_upd;
2531 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
2532 } else {
2533 sc->bge_link_upd = bge_copper_link_upd;
2534 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
2538 * Broadcom's own driver always assumes the internal
2539 * PHY is at GMII address 1. On some chips, the PHY responds
2540 * to accesses at all addresses, which could cause us to
2541 * bogusly attach the PHY 32 times at probe type. Always
2542 * restricting the lookup to address 1 is simpler than
2543 * trying to figure out which chips revisions should be
2544 * special-cased.
2546 sc->bge_phyno = 1;
2548 if (sc->bge_flags & BGE_FLAG_TBI) {
2549 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2550 bge_ifmedia_upd, bge_ifmedia_sts);
2551 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2552 ifmedia_add(&sc->bge_ifmedia,
2553 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2554 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2555 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2556 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2557 } else {
2558 struct mii_probe_args mii_args;
2559 int tries;
2562 * Do transceiver setup and tell the firmware the
2563 * driver is down so we can try to get access the
2564 * probe if ASF is running. Retry a couple of times
2565 * if we get a conflict with the ASF firmware accessing
2566 * the PHY.
2568 tries = 0;
2569 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2570 again:
2571 bge_asf_driver_up(sc);
2573 mii_probe_args_init(&mii_args, bge_ifmedia_upd, bge_ifmedia_sts);
2574 mii_args.mii_probemask = 1 << sc->bge_phyno;
2575 mii_args.mii_capmask = capmask;
2576 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
2577 mii_args.mii_priv = mii_priv;
2579 error = mii_probe(dev, &sc->bge_miibus, &mii_args);
2580 if (error) {
2581 if (tries++ < 4) {
2582 device_printf(sc->bge_dev, "Probe MII again\n");
2583 bge_miibus_writereg(sc->bge_dev,
2584 sc->bge_phyno, MII_BMCR, BMCR_RESET);
2585 goto again;
2587 device_printf(dev, "MII without any PHY!\n");
2588 goto fail;
2592 * Now tell the firmware we are going up after probing the PHY
2594 if (sc->bge_asf_mode & ASF_STACKUP)
2595 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2598 ctx = device_get_sysctl_ctx(sc->bge_dev);
2599 tree = device_get_sysctl_tree(sc->bge_dev);
2601 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "rx_coal_ticks",
2602 CTLTYPE_INT | CTLFLAG_RW,
2603 sc, 0, bge_sysctl_rx_coal_ticks, "I",
2604 "Receive coalescing ticks (usec).");
2605 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_coal_ticks",
2606 CTLTYPE_INT | CTLFLAG_RW,
2607 sc, 0, bge_sysctl_tx_coal_ticks, "I",
2608 "Transmit coalescing ticks (usec).");
2609 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "rx_coal_bds",
2610 CTLTYPE_INT | CTLFLAG_RW,
2611 sc, 0, bge_sysctl_rx_coal_bds, "I",
2612 "Receive max coalesced BD count.");
2613 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_coal_bds",
2614 CTLTYPE_INT | CTLFLAG_RW,
2615 sc, 0, bge_sysctl_tx_coal_bds, "I",
2616 "Transmit max coalesced BD count.");
2618 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_wreg", CTLFLAG_RW,
2619 &sc->bge_tx_wreg, 0,
2620 "# of segments before writing to hardware register");
2622 if (sc->bge_flags & BGE_FLAG_PCIE) {
2624 * A common design characteristic for many Broadcom
2625 * client controllers is that they only support a
2626 * single outstanding DMA read operation on the PCIe
2627 * bus. This means that it will take twice as long to
2628 * fetch a TX frame that is split into header and
2629 * payload buffers as it does to fetch a single,
2630 * contiguous TX frame (2 reads vs. 1 read). For these
2631 * controllers, coalescing buffers to reduce the number
2632 * of memory reads is effective way to get maximum
2633 * performance(about 940Mbps). Without collapsing TX
2634 * buffers the maximum TCP bulk transfer performance
2635 * is about 850Mbps. However forcing coalescing mbufs
2636 * consumes a lot of CPU cycles, so leave it off by
2637 * default.
2639 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2640 "force_defrag", CTLFLAG_RW,
2641 &sc->bge_force_defrag, 0,
2642 "Force defragment on TX path");
2644 if (sc->bge_flags & BGE_FLAG_STATUS_TAG) {
2645 if (!BGE_IS_5705_PLUS(sc)) {
2646 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2647 "rx_coal_ticks_int", CTLTYPE_INT | CTLFLAG_RW,
2648 sc, 0, bge_sysctl_rx_coal_ticks_int, "I",
2649 "Receive coalescing ticks "
2650 "during interrupt (usec).");
2651 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2652 "tx_coal_ticks_int", CTLTYPE_INT | CTLFLAG_RW,
2653 sc, 0, bge_sysctl_tx_coal_ticks_int, "I",
2654 "Transmit coalescing ticks "
2655 "during interrupt (usec).");
2657 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2658 "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2659 sc, 0, bge_sysctl_rx_coal_bds_int, "I",
2660 "Receive max coalesced BD count during interrupt.");
2661 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2662 "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW,
2663 sc, 0, bge_sysctl_tx_coal_bds_int, "I",
2664 "Transmit max coalesced BD count during interrupt.");
2668 * Call MI attach routine.
2670 ether_ifattach(ifp, ether_addr, NULL);
2672 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->bge_irq));
2674 #ifdef IFPOLL_ENABLE
2675 /* Polling setup */
2676 ifpoll_compat_setup(&sc->bge_npoll, ctx, tree,
2677 device_get_unit(dev), ifp->if_serializer);
2678 #endif
2680 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI) {
2681 if (sc->bge_flags & BGE_FLAG_ONESHOT_MSI) {
2682 intr_func = bge_msi_oneshot;
2683 if (bootverbose)
2684 device_printf(dev, "oneshot MSI\n");
2685 } else {
2686 intr_func = bge_msi;
2688 } else if (sc->bge_flags & BGE_FLAG_STATUS_TAG) {
2689 intr_func = bge_intr_legacy;
2690 } else {
2691 intr_func = bge_intr_crippled;
2693 error = bus_setup_intr(dev, sc->bge_irq, INTR_MPSAFE, intr_func, sc,
2694 &sc->bge_intrhand, ifp->if_serializer);
2695 if (error) {
2696 ether_ifdetach(ifp);
2697 device_printf(dev, "couldn't set up irq\n");
2698 goto fail;
2701 return(0);
2702 fail:
2703 bge_detach(dev);
2704 return(error);
2707 static int
2708 bge_detach(device_t dev)
2710 struct bge_softc *sc = device_get_softc(dev);
2712 if (device_is_attached(dev)) {
2713 struct ifnet *ifp = &sc->arpcom.ac_if;
2715 lwkt_serialize_enter(ifp->if_serializer);
2716 bge_stop(sc);
2717 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2718 lwkt_serialize_exit(ifp->if_serializer);
2720 ether_ifdetach(ifp);
2723 if (sc->bge_flags & BGE_FLAG_TBI)
2724 ifmedia_removeall(&sc->bge_ifmedia);
2725 if (sc->bge_miibus)
2726 device_delete_child(dev, sc->bge_miibus);
2727 bus_generic_detach(dev);
2729 if (sc->bge_irq != NULL) {
2730 bus_release_resource(dev, SYS_RES_IRQ, sc->bge_irq_rid,
2731 sc->bge_irq);
2733 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI)
2734 pci_release_msi(dev);
2736 if (sc->bge_res != NULL) {
2737 bus_release_resource(dev, SYS_RES_MEMORY,
2738 BGE_PCI_BAR0, sc->bge_res);
2740 if (sc->bge_res2 != NULL) {
2741 bus_release_resource(dev, SYS_RES_MEMORY,
2742 PCIR_BAR(2), sc->bge_res2);
2745 bge_dma_free(sc);
2747 return 0;
2750 static void
2751 bge_reset(struct bge_softc *sc)
2753 device_t dev = sc->bge_dev;
2754 uint32_t cachesize, command, reset, mac_mode, mac_mode_mask;
2755 void (*write_op)(struct bge_softc *, uint32_t, uint32_t);
2756 int i, val = 0;
2758 mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE;
2759 if (sc->bge_mfw_flags & BGE_MFW_ON_APE)
2760 mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
2761 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask;
2763 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
2764 sc->bge_asicrev != BGE_ASICREV_BCM5906) {
2765 if (sc->bge_flags & BGE_FLAG_PCIE)
2766 write_op = bge_writemem_direct;
2767 else
2768 write_op = bge_writemem_ind;
2769 } else {
2770 write_op = bge_writereg_ind;
2773 if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
2774 sc->bge_asicrev != BGE_ASICREV_BCM5701) {
2775 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
2776 for (i = 0; i < 8000; i++) {
2777 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) &
2778 BGE_NVRAMSWARB_GNT1)
2779 break;
2780 DELAY(20);
2782 if (i == 8000) {
2783 if (bootverbose) {
2784 if_printf(&sc->arpcom.ac_if,
2785 "NVRAM lock timedout!\n");
2789 /* Take APE lock when performing reset. */
2790 bge_ape_lock(sc, BGE_APE_LOCK_GRC);
2792 /* Save some important PCI state. */
2793 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2794 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2796 pci_write_config(dev, BGE_PCI_MISC_CTL,
2797 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2798 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2799 sc->bge_pci_miscctl, 4);
2801 /* Disable fastboot on controllers that support it. */
2802 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
2803 BGE_IS_5755_PLUS(sc)) {
2804 if (bootverbose)
2805 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
2806 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2810 * Write the magic number to SRAM at offset 0xB50.
2811 * When firmware finishes its initialization it will
2812 * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
2814 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
2816 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2818 /* XXX: Broadcom Linux driver. */
2819 if (sc->bge_flags & BGE_FLAG_PCIE) {
2820 /* Force PCI-E 1.0a mode */
2821 if (sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2822 CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) ==
2823 (BGE_PCIE_PHY_TSTCTL_PSCRAM |
2824 BGE_PCIE_PHY_TSTCTL_PCIE10)) {
2825 CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL,
2826 BGE_PCIE_PHY_TSTCTL_PSCRAM);
2828 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2829 /* Prevent PCIE link training during global reset */
2830 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2831 reset |= (1<<29);
2835 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2836 uint32_t status, ctrl;
2838 status = CSR_READ_4(sc, BGE_VCPU_STATUS);
2839 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2840 status | BGE_VCPU_STATUS_DRV_RESET);
2841 ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2842 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2843 ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2847 * Set GPHY Power Down Override to leave GPHY
2848 * powered up in D0 uninitialized.
2850 if (BGE_IS_5705_PLUS(sc) && (sc->bge_flags & BGE_FLAG_CPMU) == 0)
2851 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
2853 /* Issue global reset */
2854 write_op(sc, BGE_MISC_CFG, reset);
2856 if (sc->bge_flags & BGE_FLAG_PCIE)
2857 DELAY(100 * 1000);
2858 else
2859 DELAY(1000);
2861 /* XXX: Broadcom Linux driver. */
2862 if (sc->bge_flags & BGE_FLAG_PCIE) {
2863 uint16_t devctl;
2865 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2866 uint32_t v;
2868 DELAY(500000); /* wait for link training to complete */
2869 v = pci_read_config(dev, 0xc4, 4);
2870 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2873 devctl = pci_read_config(dev,
2874 sc->bge_pciecap + PCIER_DEVCTRL, 2);
2876 /* Disable no snoop and disable relaxed ordering. */
2877 devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP);
2879 /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */
2880 if ((sc->bge_flags & BGE_FLAG_CPMU) == 0) {
2881 devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK;
2882 devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128;
2885 pci_write_config(dev, sc->bge_pciecap + PCIER_DEVCTRL,
2886 devctl, 2);
2888 /* Clear error status. */
2889 pci_write_config(dev, sc->bge_pciecap + PCIER_DEVSTS,
2890 PCIEM_DEVSTS_CORR_ERR |
2891 PCIEM_DEVSTS_NFATAL_ERR |
2892 PCIEM_DEVSTS_FATAL_ERR |
2893 PCIEM_DEVSTS_UNSUPP_REQ, 2);
2896 /* Reset some of the PCI state that got zapped by reset */
2897 pci_write_config(dev, BGE_PCI_MISC_CTL,
2898 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2899 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW|
2900 sc->bge_pci_miscctl, 4);
2901 val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE;
2902 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 &&
2903 (sc->bge_flags & BGE_FLAG_PCIX))
2904 val |= BGE_PCISTATE_RETRY_SAME_DMA;
2905 if (sc->bge_mfw_flags & BGE_MFW_ON_APE) {
2906 val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
2907 BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
2908 BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
2910 pci_write_config(dev, BGE_PCI_PCISTATE, val, 4);
2911 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2912 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2915 * Disable PCI-X relaxed ordering to ensure status block update
2916 * comes first then packet buffer DMA. Otherwise driver may
2917 * read stale status block.
2919 if (sc->bge_flags & BGE_FLAG_PCIX) {
2920 uint16_t devctl;
2922 devctl = pci_read_config(dev,
2923 sc->bge_pcixcap + PCIXR_COMMAND, 2);
2924 devctl &= ~PCIXM_COMMAND_ERO;
2925 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
2926 devctl &= ~PCIXM_COMMAND_MAX_READ;
2927 devctl |= PCIXM_COMMAND_MAX_READ_2048;
2928 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
2929 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
2930 PCIXM_COMMAND_MAX_READ);
2931 devctl |= PCIXM_COMMAND_MAX_READ_2048;
2933 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
2934 devctl, 2);
2938 * Enable memory arbiter and re-enable MSI if necessary.
2940 if (BGE_IS_5714_FAMILY(sc)) {
2941 uint32_t val;
2943 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI) {
2945 * Resetting BCM5714 family will clear MSI
2946 * enable bit; restore it after resetting.
2948 PCI_SETBIT(sc->bge_dev, sc->bge_msicap + PCIR_MSI_CTRL,
2949 PCIM_MSICTRL_MSI_ENABLE, 2);
2950 BGE_SETBIT(sc, BGE_MSI_MODE, BGE_MSIMODE_ENABLE);
2952 val = CSR_READ_4(sc, BGE_MARB_MODE);
2953 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2954 } else {
2955 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2958 /* Fix up byte swapping. */
2959 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
2960 BGE_MODECTL_BYTESWAP_DATA);
2962 val = CSR_READ_4(sc, BGE_MAC_MODE);
2963 val = (val & ~mac_mode_mask) | mac_mode;
2964 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2965 DELAY(40);
2967 bge_ape_unlock(sc, BGE_APE_LOCK_GRC);
2969 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2970 for (i = 0; i < BGE_TIMEOUT; i++) {
2971 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2972 if (val & BGE_VCPU_STATUS_INIT_DONE)
2973 break;
2974 DELAY(100);
2976 if (i == BGE_TIMEOUT) {
2977 if_printf(&sc->arpcom.ac_if, "reset timed out\n");
2978 return;
2980 } else {
2981 int delay_us = 10;
2983 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
2984 delay_us = 100;
2987 * Poll until we see the 1's complement of the magic number.
2988 * This indicates that the firmware initialization
2989 * is complete.
2991 for (i = 0; i < BGE_FIRMWARE_TIMEOUT; i++) {
2992 val = bge_readmem_ind(sc, BGE_SRAM_FW_MB);
2993 if (val == ~BGE_SRAM_FW_MB_MAGIC)
2994 break;
2995 DELAY(delay_us);
2997 if (i == BGE_FIRMWARE_TIMEOUT) {
2998 if_printf(&sc->arpcom.ac_if, "firmware handshake "
2999 "timed out, found 0x%08x\n", val);
3004 * The 5704 in TBI mode apparently needs some special
3005 * adjustment to insure the SERDES drive level is set
3006 * to 1.2V.
3008 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3009 (sc->bge_flags & BGE_FLAG_TBI)) {
3010 uint32_t serdescfg;
3012 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
3013 serdescfg = (serdescfg & ~0xFFF) | 0x880;
3014 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
3017 /* XXX: Broadcom Linux driver. */
3018 if ((sc->bge_flags & BGE_FLAG_PCIE) &&
3019 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3020 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
3021 uint32_t v;
3023 /* Enable Data FIFO protection. */
3024 v = CSR_READ_4(sc, BGE_PCIE_TLDLPL_PORT);
3025 CSR_WRITE_4(sc, BGE_PCIE_TLDLPL_PORT, v | (1 << 25));
3028 DELAY(10000);
3032 * Frame reception handling. This is called if there's a frame
3033 * on the receive return list.
3035 * Note: we have to be able to handle two possibilities here:
3036 * 1) the frame is from the jumbo recieve ring
3037 * 2) the frame is from the standard receive ring
3040 static void
3041 bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int count)
3043 struct ifnet *ifp;
3044 int stdcnt = 0, jumbocnt = 0;
3046 ifp = &sc->arpcom.ac_if;
3048 while (sc->bge_rx_saved_considx != rx_prod && count != 0) {
3049 struct bge_rx_bd *cur_rx;
3050 uint32_t rxidx;
3051 struct mbuf *m = NULL;
3052 uint16_t vlan_tag = 0;
3053 int have_tag = 0;
3055 --count;
3057 cur_rx =
3058 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
3060 rxidx = cur_rx->bge_idx;
3061 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
3062 logif(rx_pkt);
3064 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3065 have_tag = 1;
3066 vlan_tag = cur_rx->bge_vlan_tag;
3069 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3070 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3071 jumbocnt++;
3073 if (rxidx != sc->bge_jumbo) {
3074 IFNET_STAT_INC(ifp, ierrors, 1);
3075 if_printf(ifp, "sw jumbo index(%d) "
3076 "and hw jumbo index(%d) mismatch, drop!\n",
3077 sc->bge_jumbo, rxidx);
3078 bge_setup_rxdesc_jumbo(sc, rxidx);
3079 continue;
3082 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx].bge_mbuf;
3083 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3084 IFNET_STAT_INC(ifp, ierrors, 1);
3085 bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo);
3086 continue;
3088 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 0)) {
3089 IFNET_STAT_INC(ifp, ierrors, 1);
3090 bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo);
3091 continue;
3093 } else {
3094 int discard = 0;
3096 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3097 stdcnt++;
3099 if (rxidx != sc->bge_std) {
3100 IFNET_STAT_INC(ifp, ierrors, 1);
3101 if_printf(ifp, "sw std index(%d) "
3102 "and hw std index(%d) mismatch, drop!\n",
3103 sc->bge_std, rxidx);
3104 bge_setup_rxdesc_std(sc, rxidx);
3105 discard = 1;
3106 goto refresh_rx;
3109 m = sc->bge_cdata.bge_rx_std_chain[rxidx].bge_mbuf;
3110 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3111 IFNET_STAT_INC(ifp, ierrors, 1);
3112 bge_setup_rxdesc_std(sc, sc->bge_std);
3113 discard = 1;
3114 goto refresh_rx;
3116 if (bge_newbuf_std(sc, sc->bge_std, 0)) {
3117 IFNET_STAT_INC(ifp, ierrors, 1);
3118 bge_setup_rxdesc_std(sc, sc->bge_std);
3119 discard = 1;
3121 refresh_rx:
3122 if (sc->bge_rx_wreg > 0 && stdcnt >= sc->bge_rx_wreg) {
3123 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO,
3124 sc->bge_std);
3125 stdcnt = 0;
3127 if (discard)
3128 continue;
3131 IFNET_STAT_INC(ifp, ipackets, 1);
3132 #if !defined(__x86_64__)
3134 * The x86 allows unaligned accesses, but for other
3135 * platforms we must make sure the payload is aligned.
3137 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3138 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3139 cur_rx->bge_len);
3140 m->m_data += ETHER_ALIGN;
3142 #endif
3143 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3144 m->m_pkthdr.rcvif = ifp;
3146 if (ifp->if_capenable & IFCAP_RXCSUM) {
3147 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3148 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3149 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
3150 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3152 if ((cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) &&
3153 m->m_pkthdr.len >= BGE_MIN_FRAMELEN) {
3154 m->m_pkthdr.csum_data =
3155 cur_rx->bge_tcp_udp_csum;
3156 m->m_pkthdr.csum_flags |=
3157 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
3162 * If we received a packet with a vlan tag, pass it
3163 * to vlan_input() instead of ether_input().
3165 if (have_tag) {
3166 m->m_flags |= M_VLANTAG;
3167 m->m_pkthdr.ether_vlantag = vlan_tag;
3169 ifp->if_input(ifp, m, NULL, -1);
3172 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3173 if (stdcnt)
3174 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
3175 if (jumbocnt)
3176 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
3179 static void
3180 bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
3182 struct ifnet *ifp;
3184 ifp = &sc->arpcom.ac_if;
3187 * Go through our tx ring and free mbufs for those
3188 * frames that have been sent.
3190 while (sc->bge_tx_saved_considx != tx_cons) {
3191 uint32_t idx = 0;
3193 idx = sc->bge_tx_saved_considx;
3194 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3195 IFNET_STAT_INC(ifp, opackets, 1);
3196 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
3197 sc->bge_cdata.bge_tx_dmamap[idx]);
3198 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3199 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3201 sc->bge_txcnt--;
3202 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3203 logif(tx_pkt);
3206 if ((BGE_TX_RING_CNT - sc->bge_txcnt) >=
3207 (sc->bge_txrsvd + sc->bge_txspare))
3208 ifq_clr_oactive(&ifp->if_snd);
3210 if (sc->bge_txcnt == 0)
3211 ifp->if_timer = 0;
3213 if (!ifq_is_empty(&ifp->if_snd))
3214 if_devstart(ifp);
3217 #ifdef IFPOLL_ENABLE
3219 static void
3220 bge_npoll_compat(struct ifnet *ifp, void *arg __unused, int cycles)
3222 struct bge_softc *sc = ifp->if_softc;
3223 struct bge_status_block *sblk = sc->bge_ldata.bge_status_block;
3224 uint16_t rx_prod, tx_cons;
3226 ASSERT_SERIALIZED(ifp->if_serializer);
3228 if (sc->bge_npoll.ifpc_stcount-- == 0) {
3229 sc->bge_npoll.ifpc_stcount = sc->bge_npoll.ifpc_stfrac;
3231 * Process link state changes.
3233 bge_link_poll(sc);
3236 if (sc->bge_flags & BGE_FLAG_STATUS_TAG) {
3237 sc->bge_status_tag = sblk->bge_status_tag;
3239 * Use a load fence to ensure that status_tag
3240 * is saved before rx_prod and tx_cons.
3242 cpu_lfence();
3245 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
3246 if (sc->bge_rx_saved_considx != rx_prod)
3247 bge_rxeof(sc, rx_prod, cycles);
3249 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
3250 if (sc->bge_tx_saved_considx != tx_cons)
3251 bge_txeof(sc, tx_cons);
3253 if (sc->bge_flags & BGE_FLAG_STATUS_TAG)
3254 bge_writembx(sc, BGE_MBX_IRQ0_LO, sc->bge_status_tag << 24);
3256 if (sc->bge_coal_chg)
3257 bge_coal_change(sc);
3260 static void
3261 bge_npoll(struct ifnet *ifp, struct ifpoll_info *info)
3263 struct bge_softc *sc = ifp->if_softc;
3265 ASSERT_SERIALIZED(ifp->if_serializer);
3267 if (info != NULL) {
3268 int cpuid = sc->bge_npoll.ifpc_cpuid;
3270 info->ifpi_rx[cpuid].poll_func = bge_npoll_compat;
3271 info->ifpi_rx[cpuid].arg = NULL;
3272 info->ifpi_rx[cpuid].serializer = ifp->if_serializer;
3274 if (ifp->if_flags & IFF_RUNNING)
3275 bge_disable_intr(sc);
3276 ifq_set_cpuid(&ifp->if_snd, cpuid);
3277 } else {
3278 if (ifp->if_flags & IFF_RUNNING)
3279 bge_enable_intr(sc);
3280 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->bge_irq));
3284 #endif /* IFPOLL_ENABLE */
3286 static void
3287 bge_intr_crippled(void *xsc)
3289 struct bge_softc *sc = xsc;
3290 struct ifnet *ifp = &sc->arpcom.ac_if;
3292 logif(intr);
3295 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
3296 * disable interrupts by writing nonzero like we used to, since with
3297 * our current organization this just gives complications and
3298 * pessimizations for re-enabling interrupts. We used to have races
3299 * instead of the necessary complications. Disabling interrupts
3300 * would just reduce the chance of a status update while we are
3301 * running (by switching to the interrupt-mode coalescence
3302 * parameters), but this chance is already very low so it is more
3303 * efficient to get another interrupt than prevent it.
3305 * We do the ack first to ensure another interrupt if there is a
3306 * status update after the ack. We don't check for the status
3307 * changing later because it is more efficient to get another
3308 * interrupt than prevent it, not quite as above (not checking is
3309 * a smaller optimization than not toggling the interrupt enable,
3310 * since checking doesn't involve PCI accesses and toggling require
3311 * the status check). So toggling would probably be a pessimization
3312 * even with MSI. It would only be needed for using a task queue.
3314 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3317 * Process link state changes.
3319 bge_link_poll(sc);
3321 if (ifp->if_flags & IFF_RUNNING) {
3322 struct bge_status_block *sblk = sc->bge_ldata.bge_status_block;
3323 uint16_t rx_prod, tx_cons;
3325 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
3326 if (sc->bge_rx_saved_considx != rx_prod)
3327 bge_rxeof(sc, rx_prod, -1);
3329 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
3330 if (sc->bge_tx_saved_considx != tx_cons)
3331 bge_txeof(sc, tx_cons);
3334 if (sc->bge_coal_chg)
3335 bge_coal_change(sc);
3338 static void
3339 bge_intr_legacy(void *xsc)
3341 struct bge_softc *sc = xsc;
3342 struct bge_status_block *sblk = sc->bge_ldata.bge_status_block;
3344 if (sc->bge_status_tag == sblk->bge_status_tag) {
3345 uint32_t val;
3347 val = pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4);
3348 if (val & BGE_PCISTAT_INTR_NOTACT)
3349 return;
3353 * NOTE:
3354 * Interrupt will have to be disabled if tagged status
3355 * is used, else interrupt will always be asserted on
3356 * certain chips (at least on BCM5750 AX/BX).
3358 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3360 bge_intr(sc);
3363 static void
3364 bge_msi(void *xsc)
3366 struct bge_softc *sc = xsc;
3368 /* Disable interrupt first */
3369 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3370 bge_intr(sc);
3373 static void
3374 bge_msi_oneshot(void *xsc)
3376 bge_intr(xsc);
3379 static void
3380 bge_intr(struct bge_softc *sc)
3382 struct ifnet *ifp = &sc->arpcom.ac_if;
3383 struct bge_status_block *sblk = sc->bge_ldata.bge_status_block;
3384 uint16_t rx_prod, tx_cons;
3385 uint32_t status;
3387 sc->bge_status_tag = sblk->bge_status_tag;
3389 * Use a load fence to ensure that status_tag is saved
3390 * before rx_prod, tx_cons and status.
3392 cpu_lfence();
3394 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx;
3395 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx;
3396 status = sblk->bge_status;
3398 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bge_link_evt)
3399 bge_link_poll(sc);
3401 if (ifp->if_flags & IFF_RUNNING) {
3402 if (sc->bge_rx_saved_considx != rx_prod)
3403 bge_rxeof(sc, rx_prod, -1);
3405 if (sc->bge_tx_saved_considx != tx_cons)
3406 bge_txeof(sc, tx_cons);
3409 bge_writembx(sc, BGE_MBX_IRQ0_LO, sc->bge_status_tag << 24);
3411 if (sc->bge_coal_chg)
3412 bge_coal_change(sc);
3415 static void
3416 bge_tick(void *xsc)
3418 struct bge_softc *sc = xsc;
3419 struct ifnet *ifp = &sc->arpcom.ac_if;
3421 lwkt_serialize_enter(ifp->if_serializer);
3423 if (BGE_IS_5705_PLUS(sc))
3424 bge_stats_update_regs(sc);
3425 else
3426 bge_stats_update(sc);
3428 if (sc->bge_flags & BGE_FLAG_TBI) {
3430 * Since in TBI mode auto-polling can't be used we should poll
3431 * link status manually. Here we register pending link event
3432 * and trigger interrupt.
3434 sc->bge_link_evt++;
3435 if (BGE_IS_CRIPPLED(sc))
3436 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3437 else
3438 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3439 } else if (!sc->bge_link) {
3440 mii_tick(device_get_softc(sc->bge_miibus));
3443 bge_asf_driver_up(sc);
3445 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
3447 lwkt_serialize_exit(ifp->if_serializer);
3450 static void
3451 bge_stats_update_regs(struct bge_softc *sc)
3453 struct ifnet *ifp = &sc->arpcom.ac_if;
3454 struct bge_mac_stats_regs stats;
3455 uint32_t *s;
3456 int i;
3458 s = (uint32_t *)&stats;
3459 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
3460 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
3461 s++;
3464 IFNET_STAT_SET(ifp, collisions,
3465 (stats.dot3StatsSingleCollisionFrames +
3466 stats.dot3StatsMultipleCollisionFrames +
3467 stats.dot3StatsExcessiveCollisions +
3468 stats.dot3StatsLateCollisions));
3471 static void
3472 bge_stats_update(struct bge_softc *sc)
3474 struct ifnet *ifp = &sc->arpcom.ac_if;
3475 bus_size_t stats;
3477 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3479 #define READ_STAT(sc, stats, stat) \
3480 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3482 IFNET_STAT_SET(ifp, collisions,
3483 (READ_STAT(sc, stats,
3484 txstats.dot3StatsSingleCollisionFrames.bge_addr_lo) +
3485 READ_STAT(sc, stats,
3486 txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo) +
3487 READ_STAT(sc, stats,
3488 txstats.dot3StatsExcessiveCollisions.bge_addr_lo) +
3489 READ_STAT(sc, stats,
3490 txstats.dot3StatsLateCollisions.bge_addr_lo)));
3492 #undef READ_STAT
3494 #ifdef notdef
3495 IFNET_STAT_SET(ifp, collisions,
3496 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
3497 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
3498 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
3499 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions));
3500 #endif
3504 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3505 * pointers to descriptors.
3507 static int
3508 bge_encap(struct bge_softc *sc, struct mbuf **m_head0, uint32_t *txidx,
3509 int *segs_used)
3511 struct bge_tx_bd *d = NULL, *last_d;
3512 uint16_t csum_flags = 0, mss = 0;
3513 bus_dma_segment_t segs[BGE_NSEG_NEW];
3514 bus_dmamap_t map;
3515 int error, maxsegs, nsegs, idx, i;
3516 struct mbuf *m_head = *m_head0, *m_new;
3518 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3519 error = bge_setup_tso(sc, m_head0, &mss, &csum_flags);
3520 if (error)
3521 return ENOBUFS;
3522 m_head = *m_head0;
3523 } else if (m_head->m_pkthdr.csum_flags & BGE_CSUM_FEATURES) {
3524 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
3525 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3526 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
3527 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3528 if (m_head->m_flags & M_LASTFRAG)
3529 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3530 else if (m_head->m_flags & M_FRAG)
3531 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3534 idx = *txidx;
3535 map = sc->bge_cdata.bge_tx_dmamap[idx];
3537 maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - sc->bge_txrsvd;
3538 KASSERT(maxsegs >= sc->bge_txspare,
3539 ("not enough segments %d", maxsegs));
3541 if (maxsegs > BGE_NSEG_NEW)
3542 maxsegs = BGE_NSEG_NEW;
3545 * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason.
3546 * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN,
3547 * but when such padded frames employ the bge IP/TCP checksum
3548 * offload, the hardware checksum assist gives incorrect results
3549 * (possibly from incorporating its own padding into the UDP/TCP
3550 * checksum; who knows). If we pad such runts with zeros, the
3551 * onboard checksum comes out correct.
3553 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
3554 m_head->m_pkthdr.len < BGE_MIN_FRAMELEN) {
3555 error = m_devpad(m_head, BGE_MIN_FRAMELEN);
3556 if (error)
3557 goto back;
3560 if ((sc->bge_flags & BGE_FLAG_SHORTDMA) && m_head->m_next != NULL) {
3561 m_new = bge_defrag_shortdma(m_head);
3562 if (m_new == NULL) {
3563 error = ENOBUFS;
3564 goto back;
3566 *m_head0 = m_head = m_new;
3568 if ((m_head->m_pkthdr.csum_flags & CSUM_TSO) == 0 &&
3569 sc->bge_force_defrag && (sc->bge_flags & BGE_FLAG_PCIE) &&
3570 m_head->m_next != NULL) {
3572 * Forcefully defragment mbuf chain to overcome hardware
3573 * limitation which only support a single outstanding
3574 * DMA read operation. If it fails, keep moving on using
3575 * the original mbuf chain.
3577 m_new = m_defrag(m_head, M_NOWAIT);
3578 if (m_new != NULL)
3579 *m_head0 = m_head = m_new;
3582 error = bus_dmamap_load_mbuf_defrag(sc->bge_cdata.bge_tx_mtag, map,
3583 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
3584 if (error)
3585 goto back;
3586 *segs_used += nsegs;
3588 m_head = *m_head0;
3589 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
3591 for (i = 0; ; i++) {
3592 d = &sc->bge_ldata.bge_tx_ring[idx];
3594 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
3595 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
3596 d->bge_len = segs[i].ds_len;
3597 d->bge_flags = csum_flags;
3598 d->bge_mss = mss;
3600 if (i == nsegs - 1)
3601 break;
3602 BGE_INC(idx, BGE_TX_RING_CNT);
3604 last_d = d;
3606 /* Set vlan tag to the first segment of the packet. */
3607 d = &sc->bge_ldata.bge_tx_ring[*txidx];
3608 if (m_head->m_flags & M_VLANTAG) {
3609 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3610 d->bge_vlan_tag = m_head->m_pkthdr.ether_vlantag;
3611 } else {
3612 d->bge_vlan_tag = 0;
3615 /* Mark the last segment as end of packet... */
3616 last_d->bge_flags |= BGE_TXBDFLAG_END;
3619 * Insure that the map for this transmission is placed at
3620 * the array index of the last descriptor in this chain.
3622 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
3623 sc->bge_cdata.bge_tx_dmamap[idx] = map;
3624 sc->bge_cdata.bge_tx_chain[idx] = m_head;
3625 sc->bge_txcnt += nsegs;
3627 BGE_INC(idx, BGE_TX_RING_CNT);
3628 *txidx = idx;
3629 back:
3630 if (error) {
3631 m_freem(*m_head0);
3632 *m_head0 = NULL;
3634 return error;
3637 static void
3638 bge_xmit(struct bge_softc *sc, uint32_t prodidx)
3640 /* Transmit */
3641 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3642 /* 5700 b2 errata */
3643 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3644 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3648 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3649 * to the mbuf data regions directly in the transmit descriptors.
3651 static void
3652 bge_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
3654 struct bge_softc *sc = ifp->if_softc;
3655 struct mbuf *m_head = NULL;
3656 uint32_t prodidx;
3657 int nsegs = 0;
3659 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
3661 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
3662 return;
3664 prodidx = sc->bge_tx_prodidx;
3666 while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3667 m_head = ifq_dequeue(&ifp->if_snd);
3668 if (m_head == NULL)
3669 break;
3672 * XXX
3673 * The code inside the if() block is never reached since we
3674 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3675 * requests to checksum TCP/UDP in a fragmented packet.
3677 * XXX
3678 * safety overkill. If this is a fragmented packet chain
3679 * with delayed TCP/UDP checksums, then only encapsulate
3680 * it if we have enough descriptors to handle the entire
3681 * chain at once.
3682 * (paranoia -- may not actually be needed)
3684 if ((m_head->m_flags & M_FIRSTFRAG) &&
3685 (m_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) {
3686 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3687 m_head->m_pkthdr.csum_data + sc->bge_txrsvd) {
3688 ifq_set_oactive(&ifp->if_snd);
3689 ifq_prepend(&ifp->if_snd, m_head);
3690 break;
3695 * Sanity check: avoid coming within bge_txrsvd
3696 * descriptors of the end of the ring. Also make
3697 * sure there are bge_txspare descriptors for
3698 * jumbo buffers' defragmentation.
3700 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3701 (sc->bge_txrsvd + sc->bge_txspare)) {
3702 ifq_set_oactive(&ifp->if_snd);
3703 ifq_prepend(&ifp->if_snd, m_head);
3704 break;
3708 * Pack the data into the transmit ring. If we
3709 * don't have room, set the OACTIVE flag and wait
3710 * for the NIC to drain the ring.
3712 if (bge_encap(sc, &m_head, &prodidx, &nsegs)) {
3713 ifq_set_oactive(&ifp->if_snd);
3714 IFNET_STAT_INC(ifp, oerrors, 1);
3715 break;
3718 if (nsegs >= sc->bge_tx_wreg) {
3719 bge_xmit(sc, prodidx);
3720 nsegs = 0;
3723 ETHER_BPF_MTAP(ifp, m_head);
3726 * Set a timeout in case the chip goes out to lunch.
3728 ifp->if_timer = 5;
3731 if (nsegs > 0)
3732 bge_xmit(sc, prodidx);
3733 sc->bge_tx_prodidx = prodidx;
3736 static void
3737 bge_init(void *xsc)
3739 struct bge_softc *sc = xsc;
3740 struct ifnet *ifp = &sc->arpcom.ac_if;
3741 uint16_t *m;
3742 uint32_t mode;
3744 ASSERT_SERIALIZED(ifp->if_serializer);
3746 /* Cancel pending I/O and flush buffers. */
3747 bge_stop(sc);
3749 bge_stop_fw(sc);
3750 bge_sig_pre_reset(sc, BGE_RESET_START);
3751 bge_reset(sc);
3752 bge_sig_legacy(sc, BGE_RESET_START);
3753 bge_sig_post_reset(sc, BGE_RESET_START);
3755 bge_chipinit(sc);
3758 * Init the various state machines, ring
3759 * control blocks and firmware.
3761 if (bge_blockinit(sc)) {
3762 if_printf(ifp, "initialization failure\n");
3763 bge_stop(sc);
3764 return;
3767 /* Specify MTU. */
3768 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3769 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
3771 /* Load our MAC address. */
3772 m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
3773 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3774 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3776 /* Enable or disable promiscuous mode as needed. */
3777 bge_setpromisc(sc);
3779 /* Program multicast filter. */
3780 bge_setmulti(sc);
3782 /* Init RX ring. */
3783 if (bge_init_rx_ring_std(sc)) {
3784 if_printf(ifp, "RX ring initialization failed\n");
3785 bge_stop(sc);
3786 return;
3790 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3791 * memory to insure that the chip has in fact read the first
3792 * entry of the ring.
3794 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3795 uint32_t v, i;
3796 for (i = 0; i < 10; i++) {
3797 DELAY(20);
3798 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3799 if (v == (MCLBYTES - ETHER_ALIGN))
3800 break;
3802 if (i == 10)
3803 if_printf(ifp, "5705 A0 chip failed to load RX ring\n");
3806 /* Init jumbo RX ring. */
3807 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
3808 if (bge_init_rx_ring_jumbo(sc)) {
3809 if_printf(ifp, "Jumbo RX ring initialization failed\n");
3810 bge_stop(sc);
3811 return;
3815 /* Init our RX return ring index */
3816 sc->bge_rx_saved_considx = 0;
3818 /* Init TX ring. */
3819 bge_init_tx_ring(sc);
3821 /* Enable TX MAC state machine lockup fix. */
3822 mode = CSR_READ_4(sc, BGE_TX_MODE);
3823 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
3824 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
3825 /* Turn on transmitter */
3826 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
3827 DELAY(100);
3829 /* Turn on receiver */
3830 mode = CSR_READ_4(sc, BGE_RX_MODE);
3831 if (BGE_IS_5755_PLUS(sc))
3832 mode |= BGE_RXMODE_IPV6_ENABLE;
3833 CSR_WRITE_4(sc, BGE_RX_MODE, mode | BGE_RXMODE_ENABLE);
3834 DELAY(10);
3837 * Set the number of good frames to receive after RX MBUF
3838 * Low Watermark has been reached. After the RX MAC receives
3839 * this number of frames, it will drop subsequent incoming
3840 * frames until the MBUF High Watermark is reached.
3842 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3844 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI) {
3845 if (bootverbose) {
3846 if_printf(ifp, "MSI_MODE: %#x\n",
3847 CSR_READ_4(sc, BGE_MSI_MODE));
3851 * XXX
3852 * Linux driver turns it on for all chips supporting MSI?!
3854 if (sc->bge_flags & BGE_FLAG_ONESHOT_MSI) {
3856 * XXX
3857 * According to 5722-PG101-R,
3858 * BGE_PCIE_TRANSACT_ONESHOT_MSI applies only to
3859 * BCM5906.
3861 BGE_SETBIT(sc, BGE_PCIE_TRANSACT,
3862 BGE_PCIE_TRANSACT_ONESHOT_MSI);
3866 /* Tell firmware we're alive. */
3867 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3869 /* Enable host interrupts if polling(4) is not enabled. */
3870 PCI_SETBIT(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4);
3871 #ifdef IFPOLL_ENABLE
3872 if (ifp->if_flags & IFF_NPOLLING)
3873 bge_disable_intr(sc);
3874 else
3875 #endif
3876 bge_enable_intr(sc);
3878 ifp->if_flags |= IFF_RUNNING;
3879 ifq_clr_oactive(&ifp->if_snd);
3881 bge_ifmedia_upd(ifp);
3883 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
3887 * Set media options.
3889 static int
3890 bge_ifmedia_upd(struct ifnet *ifp)
3892 struct bge_softc *sc = ifp->if_softc;
3894 /* If this is a 1000baseX NIC, enable the TBI port. */
3895 if (sc->bge_flags & BGE_FLAG_TBI) {
3896 struct ifmedia *ifm = &sc->bge_ifmedia;
3898 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3899 return(EINVAL);
3901 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3902 case IFM_AUTO:
3904 * The BCM5704 ASIC appears to have a special
3905 * mechanism for programming the autoneg
3906 * advertisement registers in TBI mode.
3908 if (!bge_fake_autoneg &&
3909 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3910 uint32_t sgdig;
3912 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3913 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3914 sgdig |= BGE_SGDIGCFG_AUTO |
3915 BGE_SGDIGCFG_PAUSE_CAP |
3916 BGE_SGDIGCFG_ASYM_PAUSE;
3917 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3918 sgdig | BGE_SGDIGCFG_SEND);
3919 DELAY(5);
3920 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3922 break;
3923 case IFM_1000_SX:
3924 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3925 BGE_CLRBIT(sc, BGE_MAC_MODE,
3926 BGE_MACMODE_HALF_DUPLEX);
3927 } else {
3928 BGE_SETBIT(sc, BGE_MAC_MODE,
3929 BGE_MACMODE_HALF_DUPLEX);
3931 DELAY(40);
3932 break;
3933 default:
3934 return(EINVAL);
3936 } else {
3937 struct mii_data *mii = device_get_softc(sc->bge_miibus);
3939 sc->bge_link_evt++;
3940 sc->bge_link = 0;
3941 if (mii->mii_instance) {
3942 struct mii_softc *miisc;
3944 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3945 mii_phy_reset(miisc);
3947 mii_mediachg(mii);
3950 * Force an interrupt so that we will call bge_link_upd
3951 * if needed and clear any pending link state attention.
3952 * Without this we are not getting any further interrupts
3953 * for link state changes and thus will not UP the link and
3954 * not be able to send in bge_start. The only way to get
3955 * things working was to receive a packet and get an RX
3956 * intr.
3958 * bge_tick should help for fiber cards and we might not
3959 * need to do this here if BGE_FLAG_TBI is set but as
3960 * we poll for fiber anyway it should not harm.
3962 if (BGE_IS_CRIPPLED(sc))
3963 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3964 else
3965 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3967 return(0);
3971 * Report current media status.
3973 static void
3974 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3976 struct bge_softc *sc = ifp->if_softc;
3978 if ((ifp->if_flags & IFF_RUNNING) == 0)
3979 return;
3981 if (sc->bge_flags & BGE_FLAG_TBI) {
3982 ifmr->ifm_status = IFM_AVALID;
3983 ifmr->ifm_active = IFM_ETHER;
3984 if (CSR_READ_4(sc, BGE_MAC_STS) &
3985 BGE_MACSTAT_TBI_PCS_SYNCHED) {
3986 ifmr->ifm_status |= IFM_ACTIVE;
3987 } else {
3988 ifmr->ifm_active |= IFM_NONE;
3989 return;
3992 ifmr->ifm_active |= IFM_1000_SX;
3993 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3994 ifmr->ifm_active |= IFM_HDX;
3995 else
3996 ifmr->ifm_active |= IFM_FDX;
3997 } else {
3998 struct mii_data *mii = device_get_softc(sc->bge_miibus);
4000 mii_pollstat(mii);
4001 ifmr->ifm_active = mii->mii_media_active;
4002 ifmr->ifm_status = mii->mii_media_status;
4006 static int
4007 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
4009 struct bge_softc *sc = ifp->if_softc;
4010 struct ifreq *ifr = (struct ifreq *)data;
4011 int mask, error = 0;
4013 ASSERT_SERIALIZED(ifp->if_serializer);
4015 switch (command) {
4016 case SIOCSIFMTU:
4017 if ((!BGE_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
4018 (BGE_IS_JUMBO_CAPABLE(sc) &&
4019 ifr->ifr_mtu > BGE_JUMBO_MTU)) {
4020 error = EINVAL;
4021 } else if (ifp->if_mtu != ifr->ifr_mtu) {
4022 ifp->if_mtu = ifr->ifr_mtu;
4023 if (ifp->if_flags & IFF_RUNNING)
4024 bge_init(sc);
4026 break;
4027 case SIOCSIFFLAGS:
4028 if (ifp->if_flags & IFF_UP) {
4029 if (ifp->if_flags & IFF_RUNNING) {
4030 mask = ifp->if_flags ^ sc->bge_if_flags;
4033 * If only the state of the PROMISC flag
4034 * changed, then just use the 'set promisc
4035 * mode' command instead of reinitializing
4036 * the entire NIC. Doing a full re-init
4037 * means reloading the firmware and waiting
4038 * for it to start up, which may take a
4039 * second or two. Similarly for ALLMULTI.
4041 if (mask & IFF_PROMISC)
4042 bge_setpromisc(sc);
4043 if (mask & IFF_ALLMULTI)
4044 bge_setmulti(sc);
4045 } else {
4046 bge_init(sc);
4048 } else if (ifp->if_flags & IFF_RUNNING) {
4049 bge_stop(sc);
4051 sc->bge_if_flags = ifp->if_flags;
4052 break;
4053 case SIOCADDMULTI:
4054 case SIOCDELMULTI:
4055 if (ifp->if_flags & IFF_RUNNING)
4056 bge_setmulti(sc);
4057 break;
4058 case SIOCSIFMEDIA:
4059 case SIOCGIFMEDIA:
4060 if (sc->bge_flags & BGE_FLAG_TBI) {
4061 error = ifmedia_ioctl(ifp, ifr,
4062 &sc->bge_ifmedia, command);
4063 } else {
4064 struct mii_data *mii;
4066 mii = device_get_softc(sc->bge_miibus);
4067 error = ifmedia_ioctl(ifp, ifr,
4068 &mii->mii_media, command);
4070 break;
4071 case SIOCSIFCAP:
4072 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4073 if (mask & IFCAP_HWCSUM) {
4074 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
4075 if (ifp->if_capenable & IFCAP_TXCSUM)
4076 ifp->if_hwassist |= BGE_CSUM_FEATURES;
4077 else
4078 ifp->if_hwassist &= ~BGE_CSUM_FEATURES;
4080 if (mask & IFCAP_TSO) {
4081 ifp->if_capenable ^= IFCAP_TSO;
4082 if (ifp->if_capenable & IFCAP_TSO)
4083 ifp->if_hwassist |= CSUM_TSO;
4084 else
4085 ifp->if_hwassist &= ~CSUM_TSO;
4087 break;
4088 default:
4089 error = ether_ioctl(ifp, command, data);
4090 break;
4092 return error;
4095 static void
4096 bge_watchdog(struct ifnet *ifp)
4098 struct bge_softc *sc = ifp->if_softc;
4100 if_printf(ifp, "watchdog timeout -- resetting\n");
4102 bge_init(sc);
4104 IFNET_STAT_INC(ifp, oerrors, 1);
4106 if (!ifq_is_empty(&ifp->if_snd))
4107 if_devstart(ifp);
4111 * Stop the adapter and free any mbufs allocated to the
4112 * RX and TX lists.
4114 static void
4115 bge_stop(struct bge_softc *sc)
4117 struct ifnet *ifp = &sc->arpcom.ac_if;
4119 ASSERT_SERIALIZED(ifp->if_serializer);
4121 callout_stop(&sc->bge_stat_timer);
4123 /* Disable host interrupts. */
4124 bge_disable_intr(sc);
4127 * Tell firmware we're shutting down.
4129 bge_stop_fw(sc);
4130 bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
4133 * Disable all of the receiver blocks
4135 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4136 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
4137 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
4138 if (BGE_IS_5700_FAMILY(sc))
4139 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
4140 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
4141 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
4142 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
4145 * Disable all of the transmit blocks
4147 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
4148 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
4149 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
4150 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
4151 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
4152 if (BGE_IS_5700_FAMILY(sc))
4153 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
4154 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
4157 * Shut down all of the memory managers and related
4158 * state machines.
4160 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
4161 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
4162 if (BGE_IS_5700_FAMILY(sc))
4163 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
4164 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
4165 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
4166 if (!BGE_IS_5705_PLUS(sc)) {
4167 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
4168 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4171 bge_reset(sc);
4172 bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
4173 bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
4176 * Keep the ASF firmware running if up.
4178 if (sc->bge_asf_mode & ASF_STACKUP)
4179 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4180 else
4181 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4183 /* Free the RX lists. */
4184 bge_free_rx_ring_std(sc);
4186 /* Free jumbo RX list. */
4187 if (BGE_IS_JUMBO_CAPABLE(sc))
4188 bge_free_rx_ring_jumbo(sc);
4190 /* Free TX buffers. */
4191 bge_free_tx_ring(sc);
4193 sc->bge_status_tag = 0;
4194 sc->bge_link = 0;
4195 sc->bge_coal_chg = 0;
4197 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
4199 ifp->if_flags &= ~IFF_RUNNING;
4200 ifq_clr_oactive(&ifp->if_snd);
4201 ifp->if_timer = 0;
4205 * Stop all chip I/O so that the kernel's probe routines don't
4206 * get confused by errant DMAs when rebooting.
4208 static void
4209 bge_shutdown(device_t dev)
4211 struct bge_softc *sc = device_get_softc(dev);
4212 struct ifnet *ifp = &sc->arpcom.ac_if;
4214 lwkt_serialize_enter(ifp->if_serializer);
4215 bge_stop(sc);
4216 lwkt_serialize_exit(ifp->if_serializer);
4219 static int
4220 bge_suspend(device_t dev)
4222 struct bge_softc *sc = device_get_softc(dev);
4223 struct ifnet *ifp = &sc->arpcom.ac_if;
4225 lwkt_serialize_enter(ifp->if_serializer);
4226 bge_stop(sc);
4227 lwkt_serialize_exit(ifp->if_serializer);
4229 return 0;
4232 static int
4233 bge_resume(device_t dev)
4235 struct bge_softc *sc = device_get_softc(dev);
4236 struct ifnet *ifp = &sc->arpcom.ac_if;
4238 lwkt_serialize_enter(ifp->if_serializer);
4240 if (ifp->if_flags & IFF_UP) {
4241 bge_init(sc);
4243 if (!ifq_is_empty(&ifp->if_snd))
4244 if_devstart(ifp);
4247 lwkt_serialize_exit(ifp->if_serializer);
4249 return 0;
4252 static void
4253 bge_setpromisc(struct bge_softc *sc)
4255 struct ifnet *ifp = &sc->arpcom.ac_if;
4257 if (ifp->if_flags & IFF_PROMISC)
4258 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
4259 else
4260 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
4263 static void
4264 bge_dma_free(struct bge_softc *sc)
4266 int i;
4268 /* Destroy RX mbuf DMA stuffs. */
4269 if (sc->bge_cdata.bge_rx_mtag != NULL) {
4270 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
4271 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
4272 sc->bge_cdata.bge_rx_std_dmamap[i]);
4274 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
4275 sc->bge_cdata.bge_rx_tmpmap);
4276 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
4279 /* Destroy TX mbuf DMA stuffs. */
4280 if (sc->bge_cdata.bge_tx_mtag != NULL) {
4281 for (i = 0; i < BGE_TX_RING_CNT; i++) {
4282 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
4283 sc->bge_cdata.bge_tx_dmamap[i]);
4285 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
4288 /* Destroy standard RX ring */
4289 bge_dma_block_free(sc->bge_cdata.bge_rx_std_ring_tag,
4290 sc->bge_cdata.bge_rx_std_ring_map,
4291 sc->bge_ldata.bge_rx_std_ring);
4293 if (BGE_IS_JUMBO_CAPABLE(sc))
4294 bge_free_jumbo_mem(sc);
4296 /* Destroy RX return ring */
4297 bge_dma_block_free(sc->bge_cdata.bge_rx_return_ring_tag,
4298 sc->bge_cdata.bge_rx_return_ring_map,
4299 sc->bge_ldata.bge_rx_return_ring);
4301 /* Destroy TX ring */
4302 bge_dma_block_free(sc->bge_cdata.bge_tx_ring_tag,
4303 sc->bge_cdata.bge_tx_ring_map,
4304 sc->bge_ldata.bge_tx_ring);
4306 /* Destroy status block */
4307 bge_dma_block_free(sc->bge_cdata.bge_status_tag,
4308 sc->bge_cdata.bge_status_map,
4309 sc->bge_ldata.bge_status_block);
4311 /* Destroy statistics block */
4312 bge_dma_block_free(sc->bge_cdata.bge_stats_tag,
4313 sc->bge_cdata.bge_stats_map,
4314 sc->bge_ldata.bge_stats);
4316 /* Destroy the parent tag */
4317 if (sc->bge_cdata.bge_parent_tag != NULL)
4318 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
4321 static int
4322 bge_dma_alloc(struct bge_softc *sc)
4324 struct ifnet *ifp = &sc->arpcom.ac_if;
4325 int i, error;
4326 bus_addr_t lowaddr;
4327 bus_size_t txmaxsz;
4329 lowaddr = BUS_SPACE_MAXADDR;
4330 if (sc->bge_flags & BGE_FLAG_MAXADDR_40BIT)
4331 lowaddr = BGE_DMA_MAXADDR_40BIT;
4334 * Allocate the parent bus DMA tag appropriate for PCI.
4336 * All of the NetExtreme/NetLink controllers have 4GB boundary
4337 * DMA bug.
4338 * Whenever an address crosses a multiple of the 4GB boundary
4339 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
4340 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
4341 * state machine will lockup and cause the device to hang.
4343 error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G,
4344 lowaddr, BUS_SPACE_MAXADDR,
4345 BUS_SPACE_MAXSIZE_32BIT, 0,
4346 BUS_SPACE_MAXSIZE_32BIT,
4347 0, &sc->bge_cdata.bge_parent_tag);
4348 if (error) {
4349 if_printf(ifp, "could not allocate parent dma tag\n");
4350 return error;
4354 * Create DMA tag and maps for RX mbufs.
4356 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
4357 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
4358 MCLBYTES, 1, MCLBYTES,
4359 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
4360 &sc->bge_cdata.bge_rx_mtag);
4361 if (error) {
4362 if_printf(ifp, "could not allocate RX mbuf dma tag\n");
4363 return error;
4366 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag,
4367 BUS_DMA_WAITOK, &sc->bge_cdata.bge_rx_tmpmap);
4368 if (error) {
4369 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
4370 sc->bge_cdata.bge_rx_mtag = NULL;
4371 return error;
4374 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
4375 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag,
4376 BUS_DMA_WAITOK,
4377 &sc->bge_cdata.bge_rx_std_dmamap[i]);
4378 if (error) {
4379 int j;
4381 for (j = 0; j < i; ++j) {
4382 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
4383 sc->bge_cdata.bge_rx_std_dmamap[j]);
4385 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
4386 sc->bge_cdata.bge_rx_mtag = NULL;
4388 if_printf(ifp, "could not create DMA map for RX\n");
4389 return error;
4394 * Create DMA tag and maps for TX mbufs.
4396 if (sc->bge_flags & BGE_FLAG_TSO)
4397 txmaxsz = IP_MAXPACKET + sizeof(struct ether_vlan_header);
4398 else
4399 txmaxsz = BGE_JUMBO_FRAMELEN;
4400 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
4401 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
4402 txmaxsz, BGE_NSEG_NEW, PAGE_SIZE,
4403 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
4404 BUS_DMA_ONEBPAGE,
4405 &sc->bge_cdata.bge_tx_mtag);
4406 if (error) {
4407 if_printf(ifp, "could not allocate TX mbuf dma tag\n");
4408 return error;
4411 for (i = 0; i < BGE_TX_RING_CNT; i++) {
4412 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag,
4413 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
4414 &sc->bge_cdata.bge_tx_dmamap[i]);
4415 if (error) {
4416 int j;
4418 for (j = 0; j < i; ++j) {
4419 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
4420 sc->bge_cdata.bge_tx_dmamap[j]);
4422 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
4423 sc->bge_cdata.bge_tx_mtag = NULL;
4425 if_printf(ifp, "could not create DMA map for TX\n");
4426 return error;
4431 * Create DMA stuffs for standard RX ring.
4433 error = bge_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
4434 &sc->bge_cdata.bge_rx_std_ring_tag,
4435 &sc->bge_cdata.bge_rx_std_ring_map,
4436 (void *)&sc->bge_ldata.bge_rx_std_ring,
4437 &sc->bge_ldata.bge_rx_std_ring_paddr);
4438 if (error) {
4439 if_printf(ifp, "could not create std RX ring\n");
4440 return error;
4444 * Create jumbo buffer pool.
4446 if (BGE_IS_JUMBO_CAPABLE(sc)) {
4447 error = bge_alloc_jumbo_mem(sc);
4448 if (error) {
4449 if_printf(ifp, "could not create jumbo buffer pool\n");
4450 return error;
4455 * Create DMA stuffs for RX return ring.
4457 error = bge_dma_block_alloc(sc,
4458 BGE_RX_RTN_RING_SZ(sc->bge_return_ring_cnt),
4459 &sc->bge_cdata.bge_rx_return_ring_tag,
4460 &sc->bge_cdata.bge_rx_return_ring_map,
4461 (void *)&sc->bge_ldata.bge_rx_return_ring,
4462 &sc->bge_ldata.bge_rx_return_ring_paddr);
4463 if (error) {
4464 if_printf(ifp, "could not create RX ret ring\n");
4465 return error;
4469 * Create DMA stuffs for TX ring.
4471 error = bge_dma_block_alloc(sc, BGE_TX_RING_SZ,
4472 &sc->bge_cdata.bge_tx_ring_tag,
4473 &sc->bge_cdata.bge_tx_ring_map,
4474 (void *)&sc->bge_ldata.bge_tx_ring,
4475 &sc->bge_ldata.bge_tx_ring_paddr);
4476 if (error) {
4477 if_printf(ifp, "could not create TX ring\n");
4478 return error;
4482 * Create DMA stuffs for status block.
4484 error = bge_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
4485 &sc->bge_cdata.bge_status_tag,
4486 &sc->bge_cdata.bge_status_map,
4487 (void *)&sc->bge_ldata.bge_status_block,
4488 &sc->bge_ldata.bge_status_block_paddr);
4489 if (error) {
4490 if_printf(ifp, "could not create status block\n");
4491 return error;
4495 * Create DMA stuffs for statistics block.
4497 error = bge_dma_block_alloc(sc, BGE_STATS_SZ,
4498 &sc->bge_cdata.bge_stats_tag,
4499 &sc->bge_cdata.bge_stats_map,
4500 (void *)&sc->bge_ldata.bge_stats,
4501 &sc->bge_ldata.bge_stats_paddr);
4502 if (error) {
4503 if_printf(ifp, "could not create stats block\n");
4504 return error;
4506 return 0;
4509 static int
4510 bge_dma_block_alloc(struct bge_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
4511 bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
4513 bus_dmamem_t dmem;
4514 int error;
4516 error = bus_dmamem_coherent(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0,
4517 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
4518 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
4519 if (error)
4520 return error;
4522 *tag = dmem.dmem_tag;
4523 *map = dmem.dmem_map;
4524 *addr = dmem.dmem_addr;
4525 *paddr = dmem.dmem_busaddr;
4527 return 0;
4530 static void
4531 bge_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
4533 if (tag != NULL) {
4534 bus_dmamap_unload(tag, map);
4535 bus_dmamem_free(tag, addr, map);
4536 bus_dma_tag_destroy(tag);
4541 * Grrr. The link status word in the status block does
4542 * not work correctly on the BCM5700 rev AX and BX chips,
4543 * according to all available information. Hence, we have
4544 * to enable MII interrupts in order to properly obtain
4545 * async link changes. Unfortunately, this also means that
4546 * we have to read the MAC status register to detect link
4547 * changes, thereby adding an additional register access to
4548 * the interrupt handler.
4550 * XXX: perhaps link state detection procedure used for
4551 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
4553 static void
4554 bge_bcm5700_link_upd(struct bge_softc *sc, uint32_t status __unused)
4556 struct ifnet *ifp = &sc->arpcom.ac_if;
4557 struct mii_data *mii = device_get_softc(sc->bge_miibus);
4559 mii_pollstat(mii);
4561 if (!sc->bge_link &&
4562 (mii->mii_media_status & IFM_ACTIVE) &&
4563 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4564 sc->bge_link++;
4565 if (bootverbose)
4566 if_printf(ifp, "link UP\n");
4567 } else if (sc->bge_link &&
4568 (!(mii->mii_media_status & IFM_ACTIVE) ||
4569 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4570 sc->bge_link = 0;
4571 if (bootverbose)
4572 if_printf(ifp, "link DOWN\n");
4575 /* Clear the interrupt. */
4576 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT);
4577 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
4578 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, BRGPHY_INTRS);
4581 static void
4582 bge_tbi_link_upd(struct bge_softc *sc, uint32_t status)
4584 struct ifnet *ifp = &sc->arpcom.ac_if;
4586 #define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
4589 * Sometimes PCS encoding errors are detected in
4590 * TBI mode (on fiber NICs), and for some reason
4591 * the chip will signal them as link changes.
4592 * If we get a link change event, but the 'PCS
4593 * encoding error' bit in the MAC status register
4594 * is set, don't bother doing a link check.
4595 * This avoids spurious "gigabit link up" messages
4596 * that sometimes appear on fiber NICs during
4597 * periods of heavy traffic.
4599 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4600 if (!sc->bge_link) {
4601 sc->bge_link++;
4602 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4603 BGE_CLRBIT(sc, BGE_MAC_MODE,
4604 BGE_MACMODE_TBI_SEND_CFGS);
4605 DELAY(40);
4607 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4609 if (bootverbose)
4610 if_printf(ifp, "link UP\n");
4612 ifp->if_link_state = LINK_STATE_UP;
4613 if_link_state_change(ifp);
4615 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) {
4616 if (sc->bge_link) {
4617 sc->bge_link = 0;
4619 if (bootverbose)
4620 if_printf(ifp, "link DOWN\n");
4622 ifp->if_link_state = LINK_STATE_DOWN;
4623 if_link_state_change(ifp);
4627 #undef PCS_ENCODE_ERR
4629 /* Clear the attention. */
4630 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4631 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4632 BGE_MACSTAT_LINK_CHANGED);
4635 static void
4636 bge_copper_link_upd(struct bge_softc *sc, uint32_t status __unused)
4638 struct ifnet *ifp = &sc->arpcom.ac_if;
4639 struct mii_data *mii = device_get_softc(sc->bge_miibus);
4641 mii_pollstat(mii);
4642 bge_miibus_statchg(sc->bge_dev);
4644 if (bootverbose) {
4645 if (sc->bge_link)
4646 if_printf(ifp, "link UP\n");
4647 else
4648 if_printf(ifp, "link DOWN\n");
4651 /* Clear the attention. */
4652 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4653 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4654 BGE_MACSTAT_LINK_CHANGED);
4657 static void
4658 bge_autopoll_link_upd(struct bge_softc *sc, uint32_t status __unused)
4660 struct ifnet *ifp = &sc->arpcom.ac_if;
4661 struct mii_data *mii = device_get_softc(sc->bge_miibus);
4663 mii_pollstat(mii);
4665 if (!sc->bge_link &&
4666 (mii->mii_media_status & IFM_ACTIVE) &&
4667 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4668 sc->bge_link++;
4669 if (bootverbose)
4670 if_printf(ifp, "link UP\n");
4671 } else if (sc->bge_link &&
4672 (!(mii->mii_media_status & IFM_ACTIVE) ||
4673 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4674 sc->bge_link = 0;
4675 if (bootverbose)
4676 if_printf(ifp, "link DOWN\n");
4679 /* Clear the attention. */
4680 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4681 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4682 BGE_MACSTAT_LINK_CHANGED);
4685 static int
4686 bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS)
4688 struct bge_softc *sc = arg1;
4690 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
4691 &sc->bge_rx_coal_ticks,
4692 BGE_RX_COAL_TICKS_MIN, BGE_RX_COAL_TICKS_MAX,
4693 BGE_RX_COAL_TICKS_CHG);
4696 static int
4697 bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS)
4699 struct bge_softc *sc = arg1;
4701 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
4702 &sc->bge_tx_coal_ticks,
4703 BGE_TX_COAL_TICKS_MIN, BGE_TX_COAL_TICKS_MAX,
4704 BGE_TX_COAL_TICKS_CHG);
4707 static int
4708 bge_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS)
4710 struct bge_softc *sc = arg1;
4712 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
4713 &sc->bge_rx_coal_bds,
4714 BGE_RX_COAL_BDS_MIN, BGE_RX_COAL_BDS_MAX,
4715 BGE_RX_COAL_BDS_CHG);
4718 static int
4719 bge_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS)
4721 struct bge_softc *sc = arg1;
4723 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
4724 &sc->bge_tx_coal_bds,
4725 BGE_TX_COAL_BDS_MIN, BGE_TX_COAL_BDS_MAX,
4726 BGE_TX_COAL_BDS_CHG);
4729 static int
4730 bge_sysctl_rx_coal_ticks_int(SYSCTL_HANDLER_ARGS)
4732 struct bge_softc *sc = arg1;
4734 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
4735 &sc->bge_rx_coal_ticks_int,
4736 BGE_RX_COAL_TICKS_MIN, BGE_RX_COAL_TICKS_MAX,
4737 BGE_RX_COAL_TICKS_INT_CHG);
4740 static int
4741 bge_sysctl_tx_coal_ticks_int(SYSCTL_HANDLER_ARGS)
4743 struct bge_softc *sc = arg1;
4745 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
4746 &sc->bge_tx_coal_ticks_int,
4747 BGE_TX_COAL_TICKS_MIN, BGE_TX_COAL_TICKS_MAX,
4748 BGE_TX_COAL_TICKS_INT_CHG);
4751 static int
4752 bge_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS)
4754 struct bge_softc *sc = arg1;
4756 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
4757 &sc->bge_rx_coal_bds_int,
4758 BGE_RX_COAL_BDS_MIN, BGE_RX_COAL_BDS_MAX,
4759 BGE_RX_COAL_BDS_INT_CHG);
4762 static int
4763 bge_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS)
4765 struct bge_softc *sc = arg1;
4767 return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
4768 &sc->bge_tx_coal_bds_int,
4769 BGE_TX_COAL_BDS_MIN, BGE_TX_COAL_BDS_MAX,
4770 BGE_TX_COAL_BDS_INT_CHG);
4773 static int
4774 bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
4775 int coal_min, int coal_max, uint32_t coal_chg_mask)
4777 struct bge_softc *sc = arg1;
4778 struct ifnet *ifp = &sc->arpcom.ac_if;
4779 int error = 0, v;
4781 lwkt_serialize_enter(ifp->if_serializer);
4783 v = *coal;
4784 error = sysctl_handle_int(oidp, &v, 0, req);
4785 if (!error && req->newptr != NULL) {
4786 if (v < coal_min || v > coal_max) {
4787 error = EINVAL;
4788 } else {
4789 *coal = v;
4790 sc->bge_coal_chg |= coal_chg_mask;
4794 lwkt_serialize_exit(ifp->if_serializer);
4795 return error;
4798 static void
4799 bge_coal_change(struct bge_softc *sc)
4801 struct ifnet *ifp = &sc->arpcom.ac_if;
4803 ASSERT_SERIALIZED(ifp->if_serializer);
4805 if (sc->bge_coal_chg & BGE_RX_COAL_TICKS_CHG) {
4806 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS,
4807 sc->bge_rx_coal_ticks);
4808 DELAY(10);
4809 CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
4811 if (bootverbose) {
4812 if_printf(ifp, "rx_coal_ticks -> %u\n",
4813 sc->bge_rx_coal_ticks);
4817 if (sc->bge_coal_chg & BGE_TX_COAL_TICKS_CHG) {
4818 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS,
4819 sc->bge_tx_coal_ticks);
4820 DELAY(10);
4821 CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS);
4823 if (bootverbose) {
4824 if_printf(ifp, "tx_coal_ticks -> %u\n",
4825 sc->bge_tx_coal_ticks);
4829 if (sc->bge_coal_chg & BGE_RX_COAL_BDS_CHG) {
4830 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS,
4831 sc->bge_rx_coal_bds);
4832 DELAY(10);
4833 CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
4835 if (bootverbose) {
4836 if_printf(ifp, "rx_coal_bds -> %u\n",
4837 sc->bge_rx_coal_bds);
4841 if (sc->bge_coal_chg & BGE_TX_COAL_BDS_CHG) {
4842 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS,
4843 sc->bge_tx_coal_bds);
4844 DELAY(10);
4845 CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS);
4847 if (bootverbose) {
4848 if_printf(ifp, "tx_max_coal_bds -> %u\n",
4849 sc->bge_tx_coal_bds);
4853 if (sc->bge_coal_chg & BGE_RX_COAL_TICKS_INT_CHG) {
4854 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT,
4855 sc->bge_rx_coal_ticks_int);
4856 DELAY(10);
4857 CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS_INT);
4859 if (bootverbose) {
4860 if_printf(ifp, "rx_coal_ticks_int -> %u\n",
4861 sc->bge_rx_coal_ticks_int);
4865 if (sc->bge_coal_chg & BGE_TX_COAL_TICKS_INT_CHG) {
4866 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT,
4867 sc->bge_tx_coal_ticks_int);
4868 DELAY(10);
4869 CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS_INT);
4871 if (bootverbose) {
4872 if_printf(ifp, "tx_coal_ticks_int -> %u\n",
4873 sc->bge_tx_coal_ticks_int);
4877 if (sc->bge_coal_chg & BGE_RX_COAL_BDS_INT_CHG) {
4878 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT,
4879 sc->bge_rx_coal_bds_int);
4880 DELAY(10);
4881 CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT);
4883 if (bootverbose) {
4884 if_printf(ifp, "rx_coal_bds_int -> %u\n",
4885 sc->bge_rx_coal_bds_int);
4889 if (sc->bge_coal_chg & BGE_TX_COAL_BDS_INT_CHG) {
4890 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT,
4891 sc->bge_tx_coal_bds_int);
4892 DELAY(10);
4893 CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT);
4895 if (bootverbose) {
4896 if_printf(ifp, "tx_coal_bds_int -> %u\n",
4897 sc->bge_tx_coal_bds_int);
4901 sc->bge_coal_chg = 0;
4904 static void
4905 bge_enable_intr(struct bge_softc *sc)
4907 struct ifnet *ifp = &sc->arpcom.ac_if;
4909 lwkt_serialize_handler_enable(ifp->if_serializer);
4912 * Enable interrupt.
4914 bge_writembx(sc, BGE_MBX_IRQ0_LO, sc->bge_status_tag << 24);
4915 if (sc->bge_flags & BGE_FLAG_ONESHOT_MSI) {
4916 /* XXX Linux driver */
4917 bge_writembx(sc, BGE_MBX_IRQ0_LO, sc->bge_status_tag << 24);
4921 * Unmask the interrupt when we stop polling.
4923 PCI_CLRBIT(sc->bge_dev, BGE_PCI_MISC_CTL,
4924 BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
4927 * Trigger another interrupt, since above writing
4928 * to interrupt mailbox0 may acknowledge pending
4929 * interrupt.
4931 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4934 static void
4935 bge_disable_intr(struct bge_softc *sc)
4937 struct ifnet *ifp = &sc->arpcom.ac_if;
4940 * Mask the interrupt when we start polling.
4942 PCI_SETBIT(sc->bge_dev, BGE_PCI_MISC_CTL,
4943 BGE_PCIMISCCTL_MASK_PCI_INTR, 4);
4946 * Acknowledge possible asserted interrupt.
4948 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4950 sc->bge_npoll.ifpc_stcount = 0;
4952 lwkt_serialize_handler_disable(ifp->if_serializer);
4955 static int
4956 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
4958 uint32_t mac_addr;
4959 int ret = 1;
4961 mac_addr = bge_readmem_ind(sc, 0x0c14);
4962 if ((mac_addr >> 16) == 0x484b) {
4963 ether_addr[0] = (uint8_t)(mac_addr >> 8);
4964 ether_addr[1] = (uint8_t)mac_addr;
4965 mac_addr = bge_readmem_ind(sc, 0x0c18);
4966 ether_addr[2] = (uint8_t)(mac_addr >> 24);
4967 ether_addr[3] = (uint8_t)(mac_addr >> 16);
4968 ether_addr[4] = (uint8_t)(mac_addr >> 8);
4969 ether_addr[5] = (uint8_t)mac_addr;
4970 ret = 0;
4972 return ret;
4975 static int
4976 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
4978 int mac_offset = BGE_EE_MAC_OFFSET;
4980 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
4981 mac_offset = BGE_EE_MAC_OFFSET_5906;
4983 return bge_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN);
4986 static int
4987 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
4989 if (sc->bge_flags & BGE_FLAG_NO_EEPROM)
4990 return 1;
4992 return bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
4993 ETHER_ADDR_LEN);
4996 static int
4997 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
4999 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
5000 /* NOTE: Order is critical */
5001 bge_get_eaddr_mem,
5002 bge_get_eaddr_nvram,
5003 bge_get_eaddr_eeprom,
5004 NULL
5006 const bge_eaddr_fcn_t *func;
5008 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
5009 if ((*func)(sc, eaddr) == 0)
5010 break;
5012 return (*func == NULL ? ENXIO : 0);
5016 * NOTE: 'm' is not freed upon failure
5018 static struct mbuf *
5019 bge_defrag_shortdma(struct mbuf *m)
5021 struct mbuf *n;
5022 int found;
5025 * If device receive two back-to-back send BDs with less than
5026 * or equal to 8 total bytes then the device may hang. The two
5027 * back-to-back send BDs must in the same frame for this failure
5028 * to occur. Scan mbuf chains and see whether two back-to-back
5029 * send BDs are there. If this is the case, allocate new mbuf
5030 * and copy the frame to workaround the silicon bug.
5032 for (n = m, found = 0; n != NULL; n = n->m_next) {
5033 if (n->m_len < 8) {
5034 found++;
5035 if (found > 1)
5036 break;
5037 continue;
5039 found = 0;
5042 if (found > 1)
5043 n = m_defrag(m, M_NOWAIT);
5044 else
5045 n = m;
5046 return n;
5049 static void
5050 bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit)
5052 int i;
5054 BGE_CLRBIT(sc, reg, bit);
5055 for (i = 0; i < BGE_TIMEOUT; i++) {
5056 if ((CSR_READ_4(sc, reg) & bit) == 0)
5057 return;
5058 DELAY(100);
5062 static void
5063 bge_link_poll(struct bge_softc *sc)
5065 uint32_t status;
5067 status = CSR_READ_4(sc, BGE_MAC_STS);
5068 if ((status & sc->bge_link_chg) || sc->bge_link_evt) {
5069 sc->bge_link_evt = 0;
5070 sc->bge_link_upd(sc, status);
5074 static void
5075 bge_enable_msi(struct bge_softc *sc)
5077 uint32_t msi_mode;
5079 msi_mode = CSR_READ_4(sc, BGE_MSI_MODE);
5080 msi_mode |= BGE_MSIMODE_ENABLE;
5081 if (sc->bge_flags & BGE_FLAG_ONESHOT_MSI) {
5083 * According to all of the datasheets that are publicly
5084 * available, bit 5 of the MSI_MODE is defined to be
5085 * "MSI FIFO Underrun Attn" for BCM5755+ and BCM5906, on
5086 * which "oneshot MSI" is enabled. However, it is always
5087 * safe to clear it here.
5089 msi_mode &= ~BGE_MSIMODE_ONESHOT_DISABLE;
5091 CSR_WRITE_4(sc, BGE_MSI_MODE, msi_mode);
5094 static int
5095 bge_setup_tso(struct bge_softc *sc, struct mbuf **mp,
5096 uint16_t *mss0, uint16_t *flags0)
5098 struct mbuf *m;
5099 struct ip *ip;
5100 struct tcphdr *th;
5101 int thoff, iphlen, hoff, hlen;
5102 uint16_t flags, mss;
5104 m = *mp;
5105 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
5107 hoff = m->m_pkthdr.csum_lhlen;
5108 iphlen = m->m_pkthdr.csum_iphlen;
5109 thoff = m->m_pkthdr.csum_thlen;
5111 KASSERT(hoff > 0, ("invalid ether header len"));
5112 KASSERT(iphlen > 0, ("invalid ip header len"));
5113 KASSERT(thoff > 0, ("invalid tcp header len"));
5115 if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
5116 m = m_pullup(m, hoff + iphlen + thoff);
5117 if (m == NULL) {
5118 *mp = NULL;
5119 return ENOBUFS;
5121 *mp = m;
5123 ip = mtodoff(m, struct ip *, hoff);
5124 th = mtodoff(m, struct tcphdr *, hoff + iphlen);
5126 mss = m->m_pkthdr.tso_segsz;
5127 flags = BGE_TXBDFLAG_CPU_PRE_DMA | BGE_TXBDFLAG_CPU_POST_DMA;
5129 ip->ip_len = htons(mss + iphlen + thoff);
5130 th->th_sum = 0;
5132 hlen = (iphlen + thoff) >> 2;
5133 mss |= (hlen << 11);
5135 *mss0 = mss;
5136 *flags0 = flags;
5138 return 0;
5141 static void
5142 bge_stop_fw(struct bge_softc *sc)
5144 int i;
5146 if (sc->bge_asf_mode) {
5147 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE);
5148 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
5149 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT);
5151 for (i = 0; i < 100; i++ ) {
5152 if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) &
5153 BGE_RX_CPU_DRV_EVENT))
5154 break;
5155 DELAY(10);
5160 static void
5161 bge_sig_pre_reset(struct bge_softc *sc, int type)
5164 * Some chips don't like this so only do this if ASF is enabled
5166 if (sc->bge_asf_mode)
5167 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
5169 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
5170 switch (type) {
5171 case BGE_RESET_START:
5172 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
5173 BGE_FW_DRV_STATE_START);
5174 break;
5175 case BGE_RESET_SHUTDOWN:
5176 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
5177 BGE_FW_DRV_STATE_UNLOAD);
5178 break;
5179 case BGE_RESET_SUSPEND:
5180 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
5181 BGE_FW_DRV_STATE_SUSPEND);
5182 break;
5186 if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND)
5187 bge_ape_driver_state_change(sc, type);
5190 static void
5191 bge_sig_legacy(struct bge_softc *sc, int type)
5193 if (sc->bge_asf_mode) {
5194 switch (type) {
5195 case BGE_RESET_START:
5196 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
5197 BGE_FW_DRV_STATE_START);
5198 break;
5199 case BGE_RESET_SHUTDOWN:
5200 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
5201 BGE_FW_DRV_STATE_UNLOAD);
5202 break;
5207 static void
5208 bge_sig_post_reset(struct bge_softc *sc, int type)
5210 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
5211 switch (type) {
5212 case BGE_RESET_START:
5213 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
5214 BGE_FW_DRV_STATE_START_DONE);
5215 /* START DONE */
5216 break;
5217 case BGE_RESET_SHUTDOWN:
5218 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
5219 BGE_FW_DRV_STATE_UNLOAD_DONE);
5220 break;
5223 if (type == BGE_RESET_SHUTDOWN)
5224 bge_ape_driver_state_change(sc, type);
5227 static void
5228 bge_asf_driver_up(struct bge_softc *sc)
5230 if (sc->bge_asf_mode & ASF_STACKUP) {
5231 /* Send ASF heartbeat aprox. every 2s */
5232 if (sc->bge_asf_count)
5233 sc->bge_asf_count --;
5234 else {
5235 sc->bge_asf_count = 2;
5236 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB,
5237 BGE_FW_CMD_DRV_ALIVE);
5238 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4);
5239 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB,
5240 BGE_FW_HB_TIMEOUT_SEC);
5241 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
5242 CSR_READ_4(sc, BGE_RX_CPU_EVENT) |
5243 BGE_RX_CPU_DRV_EVENT);
5249 * Clear all stale locks and select the lock for this driver instance.
5251 static void
5252 bge_ape_lock_init(struct bge_softc *sc)
5254 uint32_t bit, regbase;
5255 int i;
5257 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
5258 regbase = BGE_APE_LOCK_GRANT;
5259 else
5260 regbase = BGE_APE_PER_LOCK_GRANT;
5262 /* Clear any stale locks. */
5263 for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) {
5264 switch (i) {
5265 case BGE_APE_LOCK_PHY0:
5266 case BGE_APE_LOCK_PHY1:
5267 case BGE_APE_LOCK_PHY2:
5268 case BGE_APE_LOCK_PHY3:
5269 bit = BGE_APE_LOCK_GRANT_DRIVER0;
5270 break;
5271 default:
5272 if (sc->bge_func_addr == 0)
5273 bit = BGE_APE_LOCK_GRANT_DRIVER0;
5274 else
5275 bit = (1 << sc->bge_func_addr);
5277 APE_WRITE_4(sc, regbase + 4 * i, bit);
5280 /* Select the PHY lock based on the device's function number. */
5281 switch (sc->bge_func_addr) {
5282 case 0:
5283 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0;
5284 break;
5285 case 1:
5286 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1;
5287 break;
5288 case 2:
5289 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2;
5290 break;
5291 case 3:
5292 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3;
5293 break;
5294 default:
5295 device_printf(sc->bge_dev,
5296 "PHY lock not supported on this function\n");
5301 * Check for APE firmware, set flags, and print version info.
5303 static void
5304 bge_ape_read_fw_ver(struct bge_softc *sc)
5306 const char *fwtype;
5307 uint32_t apedata, features;
5309 /* Check for a valid APE signature in shared memory. */
5310 apedata = APE_READ_4(sc, BGE_APE_SEG_SIG);
5311 if (apedata != BGE_APE_SEG_SIG_MAGIC) {
5312 device_printf(sc->bge_dev, "no APE signature\n");
5313 sc->bge_mfw_flags &= ~BGE_MFW_ON_APE;
5314 return;
5317 /* Check if APE firmware is running. */
5318 apedata = APE_READ_4(sc, BGE_APE_FW_STATUS);
5319 if ((apedata & BGE_APE_FW_STATUS_READY) == 0) {
5320 device_printf(sc->bge_dev, "APE signature found "
5321 "but FW status not ready! 0x%08x\n", apedata);
5322 return;
5325 sc->bge_mfw_flags |= BGE_MFW_ON_APE;
5327 /* Fetch the APE firwmare type and version. */
5328 apedata = APE_READ_4(sc, BGE_APE_FW_VERSION);
5329 features = APE_READ_4(sc, BGE_APE_FW_FEATURES);
5330 if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) {
5331 sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI;
5332 fwtype = "NCSI";
5333 } else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) {
5334 sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH;
5335 fwtype = "DASH";
5336 } else
5337 fwtype = "UNKN";
5339 /* Print the APE firmware version. */
5340 device_printf(sc->bge_dev, "APE FW version: %s v%d.%d.%d.%d\n",
5341 fwtype,
5342 (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT,
5343 (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT,
5344 (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT,
5345 (apedata & BGE_APE_FW_VERSION_BLDMSK));
5348 static int
5349 bge_ape_lock(struct bge_softc *sc, int locknum)
5351 uint32_t bit, gnt, req, status;
5352 int i, off;
5354 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
5355 return (0);
5357 /* Lock request/grant registers have different bases. */
5358 if (sc->bge_asicrev == BGE_ASICREV_BCM5761) {
5359 req = BGE_APE_LOCK_REQ;
5360 gnt = BGE_APE_LOCK_GRANT;
5361 } else {
5362 req = BGE_APE_PER_LOCK_REQ;
5363 gnt = BGE_APE_PER_LOCK_GRANT;
5366 off = 4 * locknum;
5368 switch (locknum) {
5369 case BGE_APE_LOCK_GPIO:
5370 /* Lock required when using GPIO. */
5371 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
5372 return (0);
5373 if (sc->bge_func_addr == 0)
5374 bit = BGE_APE_LOCK_REQ_DRIVER0;
5375 else
5376 bit = (1 << sc->bge_func_addr);
5377 break;
5378 case BGE_APE_LOCK_GRC:
5379 /* Lock required to reset the device. */
5380 if (sc->bge_func_addr == 0)
5381 bit = BGE_APE_LOCK_REQ_DRIVER0;
5382 else
5383 bit = (1 << sc->bge_func_addr);
5384 break;
5385 case BGE_APE_LOCK_MEM:
5386 /* Lock required when accessing certain APE memory. */
5387 if (sc->bge_func_addr == 0)
5388 bit = BGE_APE_LOCK_REQ_DRIVER0;
5389 else
5390 bit = (1 << sc->bge_func_addr);
5391 break;
5392 case BGE_APE_LOCK_PHY0:
5393 case BGE_APE_LOCK_PHY1:
5394 case BGE_APE_LOCK_PHY2:
5395 case BGE_APE_LOCK_PHY3:
5396 /* Lock required when accessing PHYs. */
5397 bit = BGE_APE_LOCK_REQ_DRIVER0;
5398 break;
5399 default:
5400 return (EINVAL);
5403 /* Request a lock. */
5404 APE_WRITE_4(sc, req + off, bit);
5406 /* Wait up to 1 second to acquire lock. */
5407 for (i = 0; i < 20000; i++) {
5408 status = APE_READ_4(sc, gnt + off);
5409 if (status == bit)
5410 break;
5411 DELAY(50);
5414 /* Handle any errors. */
5415 if (status != bit) {
5416 device_printf(sc->bge_dev, "APE lock %d request failed! "
5417 "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n",
5418 locknum, req + off, bit & 0xFFFF, gnt + off,
5419 status & 0xFFFF);
5420 /* Revoke the lock request. */
5421 APE_WRITE_4(sc, gnt + off, bit);
5422 return (EBUSY);
5425 return (0);
5428 static void
5429 bge_ape_unlock(struct bge_softc *sc, int locknum)
5431 uint32_t bit, gnt;
5432 int off;
5434 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
5435 return;
5437 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
5438 gnt = BGE_APE_LOCK_GRANT;
5439 else
5440 gnt = BGE_APE_PER_LOCK_GRANT;
5442 off = 4 * locknum;
5444 switch (locknum) {
5445 case BGE_APE_LOCK_GPIO:
5446 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
5447 return;
5448 if (sc->bge_func_addr == 0)
5449 bit = BGE_APE_LOCK_GRANT_DRIVER0;
5450 else
5451 bit = (1 << sc->bge_func_addr);
5452 break;
5453 case BGE_APE_LOCK_GRC:
5454 if (sc->bge_func_addr == 0)
5455 bit = BGE_APE_LOCK_GRANT_DRIVER0;
5456 else
5457 bit = (1 << sc->bge_func_addr);
5458 break;
5459 case BGE_APE_LOCK_MEM:
5460 if (sc->bge_func_addr == 0)
5461 bit = BGE_APE_LOCK_GRANT_DRIVER0;
5462 else
5463 bit = (1 << sc->bge_func_addr);
5464 break;
5465 case BGE_APE_LOCK_PHY0:
5466 case BGE_APE_LOCK_PHY1:
5467 case BGE_APE_LOCK_PHY2:
5468 case BGE_APE_LOCK_PHY3:
5469 bit = BGE_APE_LOCK_GRANT_DRIVER0;
5470 break;
5471 default:
5472 return;
5475 APE_WRITE_4(sc, gnt + off, bit);
5479 * Send an event to the APE firmware.
5481 static void
5482 bge_ape_send_event(struct bge_softc *sc, uint32_t event)
5484 uint32_t apedata;
5485 int i;
5487 /* NCSI does not support APE events. */
5488 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
5489 return;
5491 /* Wait up to 1ms for APE to service previous event. */
5492 for (i = 10; i > 0; i--) {
5493 if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0)
5494 break;
5495 apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS);
5496 if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) {
5497 APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event |
5498 BGE_APE_EVENT_STATUS_EVENT_PENDING);
5499 bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
5500 APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1);
5501 break;
5503 bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
5504 DELAY(100);
5506 if (i == 0)
5507 device_printf(sc->bge_dev, "APE event 0x%08x send timed out\n",
5508 event);
5511 static void
5512 bge_ape_driver_state_change(struct bge_softc *sc, int kind)
5514 uint32_t apedata, event;
5516 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
5517 return;
5519 switch (kind) {
5520 case BGE_RESET_START:
5521 /* If this is the first load, clear the load counter. */
5522 apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG);
5523 if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC)
5524 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0);
5525 else {
5526 apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT);
5527 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata);
5529 APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG,
5530 BGE_APE_HOST_SEG_SIG_MAGIC);
5531 APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN,
5532 BGE_APE_HOST_SEG_LEN_MAGIC);
5534 /* Add some version info if bge(4) supports it. */
5535 APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID,
5536 BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0));
5537 APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR,
5538 BGE_APE_HOST_BEHAV_NO_PHYLOCK);
5539 APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS,
5540 BGE_APE_HOST_HEARTBEAT_INT_DISABLE);
5541 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
5542 BGE_APE_HOST_DRVR_STATE_START);
5543 event = BGE_APE_EVENT_STATUS_STATE_START;
5544 break;
5545 case BGE_RESET_SHUTDOWN:
5546 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
5547 BGE_APE_HOST_DRVR_STATE_UNLOAD);
5548 event = BGE_APE_EVENT_STATUS_STATE_UNLOAD;
5549 break;
5550 case BGE_RESET_SUSPEND:
5551 event = BGE_APE_EVENT_STATUS_STATE_SUSPEND;
5552 break;
5553 default:
5554 return;
5557 bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT |
5558 BGE_APE_EVENT_STATUS_STATE_CHNGE);