2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
34 * $DragonFly: src/sys/dev/netif/bge/if_bge.c,v 1.95 2008/07/05 13:37:17 sephe Exp $
39 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
41 * Written by Bill Paul <wpaul@windriver.com>
42 * Senior Engineer, Wind River Systems
46 * The Broadcom BCM5700 is based on technology originally developed by
47 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51 * frames, highly configurable RX filtering, and 16 RX and TX queues
52 * (which, along with RX filter rules, can be used for QOS applications).
53 * Other features, such as TCP segmentation, may be available as part
54 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55 * firmware images can be stored in hardware and need not be compiled
58 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
61 * The BCM5701 is a single-chip solution incorporating both the BCM5700
62 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63 * does not support external SSRAM.
65 * Broadcom also produces a variation of the BCM5700 under the "Altima"
66 * brand name, which is functionally similar but lacks PCI-X support.
68 * Without external SSRAM, you can only have at most 4 TX rings,
69 * and the use of the mini RX ring is disabled. This seems to imply
70 * that these features are simply not available on the BCM5701. As a
71 * result, this driver does not implement any support for the mini RX
75 #include "opt_polling.h"
76 #include "opt_ethernet.h"
78 #include <sys/param.h>
80 #include <sys/endian.h>
81 #include <sys/kernel.h>
83 #include <sys/interrupt.h>
85 #include <sys/malloc.h>
86 #include <sys/queue.h>
88 #include <sys/serialize.h>
89 #include <sys/socket.h>
90 #include <sys/sockio.h>
91 #include <sys/sysctl.h>
94 #include <net/ethernet.h>
96 #include <net/if_arp.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_types.h>
100 #include <net/ifq_var.h>
101 #include <net/vlan/if_vlan_var.h>
102 #include <net/vlan/if_vlan_ether.h>
104 #include <dev/netif/mii_layer/mii.h>
105 #include <dev/netif/mii_layer/miivar.h>
106 #include <dev/netif/mii_layer/brgphyreg.h>
108 #include <bus/pci/pcidevs.h>
109 #include <bus/pci/pcireg.h>
110 #include <bus/pci/pcivar.h>
112 #include <dev/netif/bge/if_bgereg.h>
114 /* "device miibus" required. See GENERIC if you get errors here. */
115 #include "miibus_if.h"
117 #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
118 #define BGE_MIN_FRAME 60
121 * Various supported device vendors/types and their names. Note: the
122 * spec seems to indicate that the hardware still has Alteon's vendor
123 * ID burned into it, though it will always be overriden by the vendor
124 * ID in the EEPROM. Just to be safe, we cover all possibilities.
126 #define BGE_DEVDESC_MAX 64 /* Maximum device description length */
128 static struct bge_type bge_devs
[] = {
129 { PCI_VENDOR_3COM
, PCI_PRODUCT_3COM_3C996
,
130 "3COM 3C996 Gigabit Ethernet" },
132 { PCI_VENDOR_ALTEON
, PCI_PRODUCT_ALTEON_BCM5700
,
133 "Alteon BCM5700 Gigabit Ethernet" },
134 { PCI_VENDOR_ALTEON
, PCI_PRODUCT_ALTEON_BCM5701
,
135 "Alteon BCM5701 Gigabit Ethernet" },
137 { PCI_VENDOR_ALTIMA
, PCI_PRODUCT_ALTIMA_AC1000
,
138 "Altima AC1000 Gigabit Ethernet" },
139 { PCI_VENDOR_ALTIMA
, PCI_PRODUCT_ALTIMA_AC1001
,
140 "Altima AC1002 Gigabit Ethernet" },
141 { PCI_VENDOR_ALTIMA
, PCI_PRODUCT_ALTIMA_AC9100
,
142 "Altima AC9100 Gigabit Ethernet" },
144 { PCI_VENDOR_APPLE
, PCI_PRODUCT_APPLE_BCM5701
,
145 "Apple BCM5701 Gigabit Ethernet" },
147 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5700
,
148 "Broadcom BCM5700 Gigabit Ethernet" },
149 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5701
,
150 "Broadcom BCM5701 Gigabit Ethernet" },
151 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5702
,
152 "Broadcom BCM5702 Gigabit Ethernet" },
153 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5702X
,
154 "Broadcom BCM5702X Gigabit Ethernet" },
155 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5702_ALT
,
156 "Broadcom BCM5702 Gigabit Ethernet" },
157 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5703
,
158 "Broadcom BCM5703 Gigabit Ethernet" },
159 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5703X
,
160 "Broadcom BCM5703X Gigabit Ethernet" },
161 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5703A3
,
162 "Broadcom BCM5703 Gigabit Ethernet" },
163 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5704C
,
164 "Broadcom BCM5704C Dual Gigabit Ethernet" },
165 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5704S
,
166 "Broadcom BCM5704S Dual Gigabit Ethernet" },
167 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5704S_ALT
,
168 "Broadcom BCM5704S Dual Gigabit Ethernet" },
169 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5705
,
170 "Broadcom BCM5705 Gigabit Ethernet" },
171 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5705F
,
172 "Broadcom BCM5705F Gigabit Ethernet" },
173 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5705K
,
174 "Broadcom BCM5705K Gigabit Ethernet" },
175 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5705M
,
176 "Broadcom BCM5705M Gigabit Ethernet" },
177 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5705M_ALT
,
178 "Broadcom BCM5705M Gigabit Ethernet" },
179 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5714
,
180 "Broadcom BCM5714C Gigabit Ethernet" },
181 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5714S
,
182 "Broadcom BCM5714S Gigabit Ethernet" },
183 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5715
,
184 "Broadcom BCM5715 Gigabit Ethernet" },
185 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5715S
,
186 "Broadcom BCM5715S Gigabit Ethernet" },
187 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5720
,
188 "Broadcom BCM5720 Gigabit Ethernet" },
189 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5721
,
190 "Broadcom BCM5721 Gigabit Ethernet" },
191 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5722
,
192 "Broadcom BCM5722 Gigabit Ethernet" },
193 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5750
,
194 "Broadcom BCM5750 Gigabit Ethernet" },
195 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5750M
,
196 "Broadcom BCM5750M Gigabit Ethernet" },
197 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5751
,
198 "Broadcom BCM5751 Gigabit Ethernet" },
199 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5751F
,
200 "Broadcom BCM5751F Gigabit Ethernet" },
201 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5751M
,
202 "Broadcom BCM5751M Gigabit Ethernet" },
203 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5752
,
204 "Broadcom BCM5752 Gigabit Ethernet" },
205 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5752M
,
206 "Broadcom BCM5752M Gigabit Ethernet" },
207 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5753
,
208 "Broadcom BCM5753 Gigabit Ethernet" },
209 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5753F
,
210 "Broadcom BCM5753F Gigabit Ethernet" },
211 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5753M
,
212 "Broadcom BCM5753M Gigabit Ethernet" },
213 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5754
,
214 "Broadcom BCM5754 Gigabit Ethernet" },
215 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5754M
,
216 "Broadcom BCM5754M Gigabit Ethernet" },
217 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5755
,
218 "Broadcom BCM5755 Gigabit Ethernet" },
219 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5755M
,
220 "Broadcom BCM5755M Gigabit Ethernet" },
221 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5756
,
222 "Broadcom BCM5756 Gigabit Ethernet" },
223 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5780
,
224 "Broadcom BCM5780 Gigabit Ethernet" },
225 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5780S
,
226 "Broadcom BCM5780S Gigabit Ethernet" },
227 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5781
,
228 "Broadcom BCM5781 Gigabit Ethernet" },
229 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5782
,
230 "Broadcom BCM5782 Gigabit Ethernet" },
231 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5786
,
232 "Broadcom BCM5786 Gigabit Ethernet" },
233 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5787
,
234 "Broadcom BCM5787 Gigabit Ethernet" },
235 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5787F
,
236 "Broadcom BCM5787F Gigabit Ethernet" },
237 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5787M
,
238 "Broadcom BCM5787M Gigabit Ethernet" },
239 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5788
,
240 "Broadcom BCM5788 Gigabit Ethernet" },
241 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5789
,
242 "Broadcom BCM5789 Gigabit Ethernet" },
243 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5901
,
244 "Broadcom BCM5901 Fast Ethernet" },
245 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5901A2
,
246 "Broadcom BCM5901A2 Fast Ethernet" },
247 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5903M
,
248 "Broadcom BCM5903M Fast Ethernet" },
250 { PCI_VENDOR_SCHNEIDERKOCH
, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1
,
251 "SysKonnect Gigabit Ethernet" },
256 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
257 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
258 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
259 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
260 #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
262 static int bge_probe(device_t
);
263 static int bge_attach(device_t
);
264 static int bge_detach(device_t
);
265 static void bge_txeof(struct bge_softc
*);
266 static void bge_rxeof(struct bge_softc
*);
268 static void bge_tick(void *);
269 static void bge_stats_update(struct bge_softc
*);
270 static void bge_stats_update_regs(struct bge_softc
*);
271 static int bge_encap(struct bge_softc
*, struct mbuf
**, uint32_t *);
273 #ifdef DEVICE_POLLING
274 static void bge_poll(struct ifnet
*ifp
, enum poll_cmd cmd
, int count
);
276 static void bge_intr(void *);
277 static void bge_enable_intr(struct bge_softc
*);
278 static void bge_disable_intr(struct bge_softc
*);
279 static void bge_start(struct ifnet
*);
280 static int bge_ioctl(struct ifnet
*, u_long
, caddr_t
, struct ucred
*);
281 static void bge_init(void *);
282 static void bge_stop(struct bge_softc
*);
283 static void bge_watchdog(struct ifnet
*);
284 static void bge_shutdown(device_t
);
285 static int bge_suspend(device_t
);
286 static int bge_resume(device_t
);
287 static int bge_ifmedia_upd(struct ifnet
*);
288 static void bge_ifmedia_sts(struct ifnet
*, struct ifmediareq
*);
290 static uint8_t bge_eeprom_getbyte(struct bge_softc
*, uint32_t, uint8_t *);
291 static int bge_read_eeprom(struct bge_softc
*, caddr_t
, uint32_t, size_t);
293 static void bge_setmulti(struct bge_softc
*);
294 static void bge_setpromisc(struct bge_softc
*);
296 static int bge_alloc_jumbo_mem(struct bge_softc
*);
297 static void bge_free_jumbo_mem(struct bge_softc
*);
298 static struct bge_jslot
299 *bge_jalloc(struct bge_softc
*);
300 static void bge_jfree(void *);
301 static void bge_jref(void *);
302 static int bge_newbuf_std(struct bge_softc
*, int, struct mbuf
*);
303 static int bge_newbuf_jumbo(struct bge_softc
*, int, struct mbuf
*);
304 static int bge_init_rx_ring_std(struct bge_softc
*);
305 static void bge_free_rx_ring_std(struct bge_softc
*);
306 static int bge_init_rx_ring_jumbo(struct bge_softc
*);
307 static void bge_free_rx_ring_jumbo(struct bge_softc
*);
308 static void bge_free_tx_ring(struct bge_softc
*);
309 static int bge_init_tx_ring(struct bge_softc
*);
311 static int bge_chipinit(struct bge_softc
*);
312 static int bge_blockinit(struct bge_softc
*);
314 static uint32_t bge_readmem_ind(struct bge_softc
*, uint32_t);
315 static void bge_writemem_ind(struct bge_softc
*, uint32_t, uint32_t);
317 static uint32_t bge_readreg_ind(struct bge_softc
*, uint32_t);
319 static void bge_writereg_ind(struct bge_softc
*, uint32_t, uint32_t);
320 static void bge_writemem_direct(struct bge_softc
*, uint32_t, uint32_t);
322 static int bge_miibus_readreg(device_t
, int, int);
323 static int bge_miibus_writereg(device_t
, int, int, int);
324 static void bge_miibus_statchg(device_t
);
325 static void bge_bcm5700_link_upd(struct bge_softc
*, uint32_t);
326 static void bge_tbi_link_upd(struct bge_softc
*, uint32_t);
327 static void bge_copper_link_upd(struct bge_softc
*, uint32_t);
329 static void bge_reset(struct bge_softc
*);
331 static void bge_dma_map_addr(void *, bus_dma_segment_t
*, int, int);
332 static void bge_dma_map_mbuf(void *, bus_dma_segment_t
*, int,
334 static int bge_dma_alloc(struct bge_softc
*);
335 static void bge_dma_free(struct bge_softc
*);
336 static int bge_dma_block_alloc(struct bge_softc
*, bus_size_t
,
337 bus_dma_tag_t
*, bus_dmamap_t
*,
338 void **, bus_addr_t
*);
339 static void bge_dma_block_free(bus_dma_tag_t
, bus_dmamap_t
, void *);
341 static void bge_coal_change(struct bge_softc
*);
342 static int bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS
);
343 static int bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS
);
344 static int bge_sysctl_rx_max_coal_bds(SYSCTL_HANDLER_ARGS
);
345 static int bge_sysctl_tx_max_coal_bds(SYSCTL_HANDLER_ARGS
);
346 static int bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS
, uint32_t *, uint32_t);
349 * Set following tunable to 1 for some IBM blade servers with the DNLK
350 * switch module. Auto negotiation is broken for those configurations.
352 static int bge_fake_autoneg
= 0;
353 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg
);
355 /* Interrupt moderation control variables. */
356 static int bge_rx_coal_ticks
= 150; /* usec */
357 static int bge_tx_coal_ticks
= 1000000; /* usec */
358 static int bge_rx_max_coal_bds
= 16;
359 static int bge_tx_max_coal_bds
= 32;
361 TUNABLE_INT("hw.bge.rx_coal_ticks", &bge_rx_coal_ticks
);
362 TUNABLE_INT("hw.bge.tx_coal_ticks", &bge_tx_coal_ticks
);
363 TUNABLE_INT("hw.bge.rx_max_coal_bds", &bge_rx_max_coal_bds
);
364 TUNABLE_INT("hw.bge.tx_max_coal_bds", &bge_tx_max_coal_bds
);
366 #if !defined(KTR_IF_BGE)
367 #define KTR_IF_BGE KTR_ALL
369 KTR_INFO_MASTER(if_bge
);
370 KTR_INFO(KTR_IF_BGE
, if_bge
, intr
, 0, "intr", 0);
371 KTR_INFO(KTR_IF_BGE
, if_bge
, rx_pkt
, 1, "rx_pkt", 0);
372 KTR_INFO(KTR_IF_BGE
, if_bge
, tx_pkt
, 2, "tx_pkt", 0);
373 #define logif(name) KTR_LOG(if_bge_ ## name)
375 static device_method_t bge_methods
[] = {
376 /* Device interface */
377 DEVMETHOD(device_probe
, bge_probe
),
378 DEVMETHOD(device_attach
, bge_attach
),
379 DEVMETHOD(device_detach
, bge_detach
),
380 DEVMETHOD(device_shutdown
, bge_shutdown
),
381 DEVMETHOD(device_suspend
, bge_suspend
),
382 DEVMETHOD(device_resume
, bge_resume
),
385 DEVMETHOD(bus_print_child
, bus_generic_print_child
),
386 DEVMETHOD(bus_driver_added
, bus_generic_driver_added
),
389 DEVMETHOD(miibus_readreg
, bge_miibus_readreg
),
390 DEVMETHOD(miibus_writereg
, bge_miibus_writereg
),
391 DEVMETHOD(miibus_statchg
, bge_miibus_statchg
),
396 static DEFINE_CLASS_0(bge
, bge_driver
, bge_methods
, sizeof(struct bge_softc
));
397 static devclass_t bge_devclass
;
399 DECLARE_DUMMY_MODULE(if_bge
);
400 DRIVER_MODULE(if_bge
, pci
, bge_driver
, bge_devclass
, 0, 0);
401 DRIVER_MODULE(miibus
, bge
, miibus_driver
, miibus_devclass
, 0, 0);
404 bge_readmem_ind(struct bge_softc
*sc
, uint32_t off
)
406 device_t dev
= sc
->bge_dev
;
409 pci_write_config(dev
, BGE_PCI_MEMWIN_BASEADDR
, off
, 4);
410 val
= pci_read_config(dev
, BGE_PCI_MEMWIN_DATA
, 4);
411 pci_write_config(dev
, BGE_PCI_MEMWIN_BASEADDR
, 0, 4);
416 bge_writemem_ind(struct bge_softc
*sc
, uint32_t off
, uint32_t val
)
418 device_t dev
= sc
->bge_dev
;
420 pci_write_config(dev
, BGE_PCI_MEMWIN_BASEADDR
, off
, 4);
421 pci_write_config(dev
, BGE_PCI_MEMWIN_DATA
, val
, 4);
422 pci_write_config(dev
, BGE_PCI_MEMWIN_BASEADDR
, 0, 4);
427 bge_readreg_ind(struct bge_softc
*sc
, uin32_t off
)
429 device_t dev
= sc
->bge_dev
;
431 pci_write_config(dev
, BGE_PCI_REG_BASEADDR
, off
, 4);
432 return(pci_read_config(dev
, BGE_PCI_REG_DATA
, 4));
437 bge_writereg_ind(struct bge_softc
*sc
, uint32_t off
, uint32_t val
)
439 device_t dev
= sc
->bge_dev
;
441 pci_write_config(dev
, BGE_PCI_REG_BASEADDR
, off
, 4);
442 pci_write_config(dev
, BGE_PCI_REG_DATA
, val
, 4);
446 bge_writemem_direct(struct bge_softc
*sc
, uint32_t off
, uint32_t val
)
448 CSR_WRITE_4(sc
, off
, val
);
452 * Read a byte of data stored in the EEPROM at address 'addr.' The
453 * BCM570x supports both the traditional bitbang interface and an
454 * auto access interface for reading the EEPROM. We use the auto
458 bge_eeprom_getbyte(struct bge_softc
*sc
, uint32_t addr
, uint8_t *dest
)
464 * Enable use of auto EEPROM access so we can avoid
465 * having to use the bitbang method.
467 BGE_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_AUTO_EEPROM
);
469 /* Reset the EEPROM, load the clock period. */
470 CSR_WRITE_4(sc
, BGE_EE_ADDR
,
471 BGE_EEADDR_RESET
|BGE_EEHALFCLK(BGE_HALFCLK_384SCL
));
474 /* Issue the read EEPROM command. */
475 CSR_WRITE_4(sc
, BGE_EE_ADDR
, BGE_EE_READCMD
| addr
);
477 /* Wait for completion */
478 for(i
= 0; i
< BGE_TIMEOUT
* 10; i
++) {
480 if (CSR_READ_4(sc
, BGE_EE_ADDR
) & BGE_EEADDR_DONE
)
484 if (i
== BGE_TIMEOUT
) {
485 if_printf(&sc
->arpcom
.ac_if
, "eeprom read timed out\n");
490 byte
= CSR_READ_4(sc
, BGE_EE_DATA
);
492 *dest
= (byte
>> ((addr
% 4) * 8)) & 0xFF;
498 * Read a sequence of bytes from the EEPROM.
501 bge_read_eeprom(struct bge_softc
*sc
, caddr_t dest
, uint32_t off
, size_t len
)
507 for (byte
= 0, err
= 0, i
= 0; i
< len
; i
++) {
508 err
= bge_eeprom_getbyte(sc
, off
+ i
, &byte
);
518 bge_miibus_readreg(device_t dev
, int phy
, int reg
)
520 struct bge_softc
*sc
= device_get_softc(dev
);
521 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
522 uint32_t val
, autopoll
;
526 * Broadcom's own driver always assumes the internal
527 * PHY is at GMII address 1. On some chips, the PHY responds
528 * to accesses at all addresses, which could cause us to
529 * bogusly attach the PHY 32 times at probe type. Always
530 * restricting the lookup to address 1 is simpler than
531 * trying to figure out which chips revisions should be
537 /* Reading with autopolling on may trigger PCI errors */
538 autopoll
= CSR_READ_4(sc
, BGE_MI_MODE
);
539 if (autopoll
& BGE_MIMODE_AUTOPOLL
) {
540 BGE_CLRBIT(sc
, BGE_MI_MODE
, BGE_MIMODE_AUTOPOLL
);
544 CSR_WRITE_4(sc
, BGE_MI_COMM
, BGE_MICMD_READ
|BGE_MICOMM_BUSY
|
545 BGE_MIPHY(phy
)|BGE_MIREG(reg
));
547 for (i
= 0; i
< BGE_TIMEOUT
; i
++) {
549 val
= CSR_READ_4(sc
, BGE_MI_COMM
);
550 if (!(val
& BGE_MICOMM_BUSY
))
554 if (i
== BGE_TIMEOUT
) {
555 if_printf(ifp
, "PHY read timed out "
556 "(phy %d, reg %d, val 0x%08x)\n", phy
, reg
, val
);
562 val
= CSR_READ_4(sc
, BGE_MI_COMM
);
565 if (autopoll
& BGE_MIMODE_AUTOPOLL
) {
566 BGE_SETBIT(sc
, BGE_MI_MODE
, BGE_MIMODE_AUTOPOLL
);
570 if (val
& BGE_MICOMM_READFAIL
)
573 return(val
& 0xFFFF);
577 bge_miibus_writereg(device_t dev
, int phy
, int reg
, int val
)
579 struct bge_softc
*sc
= device_get_softc(dev
);
584 * See the related comment in bge_miibus_readreg()
589 /* Reading with autopolling on may trigger PCI errors */
590 autopoll
= CSR_READ_4(sc
, BGE_MI_MODE
);
591 if (autopoll
& BGE_MIMODE_AUTOPOLL
) {
592 BGE_CLRBIT(sc
, BGE_MI_MODE
, BGE_MIMODE_AUTOPOLL
);
596 CSR_WRITE_4(sc
, BGE_MI_COMM
, BGE_MICMD_WRITE
|BGE_MICOMM_BUSY
|
597 BGE_MIPHY(phy
)|BGE_MIREG(reg
)|val
);
599 for (i
= 0; i
< BGE_TIMEOUT
; i
++) {
601 if (!(CSR_READ_4(sc
, BGE_MI_COMM
) & BGE_MICOMM_BUSY
)) {
603 CSR_READ_4(sc
, BGE_MI_COMM
); /* dummy read */
608 if (autopoll
& BGE_MIMODE_AUTOPOLL
) {
609 BGE_SETBIT(sc
, BGE_MI_MODE
, BGE_MIMODE_AUTOPOLL
);
613 if (i
== BGE_TIMEOUT
) {
614 if_printf(&sc
->arpcom
.ac_if
, "PHY write timed out "
615 "(phy %d, reg %d, val %d)\n", phy
, reg
, val
);
623 bge_miibus_statchg(device_t dev
)
625 struct bge_softc
*sc
;
626 struct mii_data
*mii
;
628 sc
= device_get_softc(dev
);
629 mii
= device_get_softc(sc
->bge_miibus
);
631 BGE_CLRBIT(sc
, BGE_MAC_MODE
, BGE_MACMODE_PORTMODE
);
632 if (IFM_SUBTYPE(mii
->mii_media_active
) == IFM_1000_T
) {
633 BGE_SETBIT(sc
, BGE_MAC_MODE
, BGE_PORTMODE_GMII
);
635 BGE_SETBIT(sc
, BGE_MAC_MODE
, BGE_PORTMODE_MII
);
638 if ((mii
->mii_media_active
& IFM_GMASK
) == IFM_FDX
) {
639 BGE_CLRBIT(sc
, BGE_MAC_MODE
, BGE_MACMODE_HALF_DUPLEX
);
641 BGE_SETBIT(sc
, BGE_MAC_MODE
, BGE_MACMODE_HALF_DUPLEX
);
646 * Memory management for jumbo frames.
649 bge_alloc_jumbo_mem(struct bge_softc
*sc
)
651 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
652 struct bge_jslot
*entry
;
658 * Create tag for jumbo mbufs.
659 * This is really a bit of a kludge. We allocate a special
660 * jumbo buffer pool which (thanks to the way our DMA
661 * memory allocation works) will consist of contiguous
662 * pages. This means that even though a jumbo buffer might
663 * be larger than a page size, we don't really need to
664 * map it into more than one DMA segment. However, the
665 * default mbuf tag will result in multi-segment mappings,
666 * so we have to create a special jumbo mbuf tag that
667 * lets us get away with mapping the jumbo buffers as
668 * a single segment. I think eventually the driver should
669 * be changed so that it uses ordinary mbufs and cluster
670 * buffers, i.e. jumbo frames can span multiple DMA
671 * descriptors. But that's a project for another day.
675 * Create DMA stuffs for jumbo RX ring.
677 error
= bge_dma_block_alloc(sc
, BGE_JUMBO_RX_RING_SZ
,
678 &sc
->bge_cdata
.bge_rx_jumbo_ring_tag
,
679 &sc
->bge_cdata
.bge_rx_jumbo_ring_map
,
680 (void **)&sc
->bge_ldata
.bge_rx_jumbo_ring
,
681 &sc
->bge_ldata
.bge_rx_jumbo_ring_paddr
);
683 if_printf(ifp
, "could not create jumbo RX ring\n");
688 * Create DMA stuffs for jumbo buffer block.
690 error
= bge_dma_block_alloc(sc
, BGE_JMEM
,
691 &sc
->bge_cdata
.bge_jumbo_tag
,
692 &sc
->bge_cdata
.bge_jumbo_map
,
693 (void **)&sc
->bge_ldata
.bge_jumbo_buf
,
696 if_printf(ifp
, "could not create jumbo buffer\n");
700 SLIST_INIT(&sc
->bge_jfree_listhead
);
703 * Now divide it up into 9K pieces and save the addresses
704 * in an array. Note that we play an evil trick here by using
705 * the first few bytes in the buffer to hold the the address
706 * of the softc structure for this interface. This is because
707 * bge_jfree() needs it, but it is called by the mbuf management
708 * code which will not pass it to us explicitly.
710 for (i
= 0, ptr
= sc
->bge_ldata
.bge_jumbo_buf
; i
< BGE_JSLOTS
; i
++) {
711 entry
= &sc
->bge_cdata
.bge_jslots
[i
];
713 entry
->bge_buf
= ptr
;
714 entry
->bge_paddr
= paddr
;
715 entry
->bge_inuse
= 0;
717 SLIST_INSERT_HEAD(&sc
->bge_jfree_listhead
, entry
, jslot_link
);
726 bge_free_jumbo_mem(struct bge_softc
*sc
)
728 /* Destroy jumbo RX ring. */
729 bge_dma_block_free(sc
->bge_cdata
.bge_rx_jumbo_ring_tag
,
730 sc
->bge_cdata
.bge_rx_jumbo_ring_map
,
731 sc
->bge_ldata
.bge_rx_jumbo_ring
);
733 /* Destroy jumbo buffer block. */
734 bge_dma_block_free(sc
->bge_cdata
.bge_jumbo_tag
,
735 sc
->bge_cdata
.bge_jumbo_map
,
736 sc
->bge_ldata
.bge_jumbo_buf
);
740 * Allocate a jumbo buffer.
742 static struct bge_jslot
*
743 bge_jalloc(struct bge_softc
*sc
)
745 struct bge_jslot
*entry
;
747 lwkt_serialize_enter(&sc
->bge_jslot_serializer
);
748 entry
= SLIST_FIRST(&sc
->bge_jfree_listhead
);
750 SLIST_REMOVE_HEAD(&sc
->bge_jfree_listhead
, jslot_link
);
751 entry
->bge_inuse
= 1;
753 if_printf(&sc
->arpcom
.ac_if
, "no free jumbo buffers\n");
755 lwkt_serialize_exit(&sc
->bge_jslot_serializer
);
760 * Adjust usage count on a jumbo buffer.
765 struct bge_jslot
*entry
= (struct bge_jslot
*)arg
;
766 struct bge_softc
*sc
= entry
->bge_sc
;
769 panic("bge_jref: can't find softc pointer!");
771 if (&sc
->bge_cdata
.bge_jslots
[entry
->bge_slot
] != entry
) {
772 panic("bge_jref: asked to reference buffer "
773 "that we don't manage!");
774 } else if (entry
->bge_inuse
== 0) {
775 panic("bge_jref: buffer already free!");
777 atomic_add_int(&entry
->bge_inuse
, 1);
782 * Release a jumbo buffer.
787 struct bge_jslot
*entry
= (struct bge_jslot
*)arg
;
788 struct bge_softc
*sc
= entry
->bge_sc
;
791 panic("bge_jfree: can't find softc pointer!");
793 if (&sc
->bge_cdata
.bge_jslots
[entry
->bge_slot
] != entry
) {
794 panic("bge_jfree: asked to free buffer that we don't manage!");
795 } else if (entry
->bge_inuse
== 0) {
796 panic("bge_jfree: buffer already free!");
799 * Possible MP race to 0, use the serializer. The atomic insn
800 * is still needed for races against bge_jref().
802 lwkt_serialize_enter(&sc
->bge_jslot_serializer
);
803 atomic_subtract_int(&entry
->bge_inuse
, 1);
804 if (entry
->bge_inuse
== 0) {
805 SLIST_INSERT_HEAD(&sc
->bge_jfree_listhead
,
808 lwkt_serialize_exit(&sc
->bge_jslot_serializer
);
814 * Intialize a standard receive ring descriptor.
817 bge_newbuf_std(struct bge_softc
*sc
, int i
, struct mbuf
*m
)
819 struct mbuf
*m_new
= NULL
;
820 struct bge_dmamap_arg ctx
;
821 bus_dma_segment_t seg
;
826 m_new
= m_getcl(MB_DONTWAIT
, MT_DATA
, M_PKTHDR
);
831 m_new
->m_data
= m_new
->m_ext
.ext_buf
;
833 m_new
->m_len
= m_new
->m_pkthdr
.len
= MCLBYTES
;
835 if ((sc
->bge_flags
& BGE_FLAG_RX_ALIGNBUG
) == 0)
836 m_adj(m_new
, ETHER_ALIGN
);
840 error
= bus_dmamap_load_mbuf(sc
->bge_cdata
.bge_mtag
,
841 sc
->bge_cdata
.bge_rx_std_dmamap
[i
],
842 m_new
, bge_dma_map_mbuf
, &ctx
,
844 if (error
|| ctx
.bge_maxsegs
== 0) {
850 sc
->bge_cdata
.bge_rx_std_chain
[i
] = m_new
;
852 r
= &sc
->bge_ldata
.bge_rx_std_ring
[i
];
853 r
->bge_addr
.bge_addr_lo
= BGE_ADDR_LO(ctx
.bge_segs
[0].ds_addr
);
854 r
->bge_addr
.bge_addr_hi
= BGE_ADDR_HI(ctx
.bge_segs
[0].ds_addr
);
855 r
->bge_flags
= BGE_RXBDFLAG_END
;
856 r
->bge_len
= m_new
->m_len
;
859 bus_dmamap_sync(sc
->bge_cdata
.bge_mtag
,
860 sc
->bge_cdata
.bge_rx_std_dmamap
[i
],
861 BUS_DMASYNC_PREREAD
);
866 * Initialize a jumbo receive ring descriptor. This allocates
867 * a jumbo buffer from the pool managed internally by the driver.
870 bge_newbuf_jumbo(struct bge_softc
*sc
, int i
, struct mbuf
*m
)
872 struct mbuf
*m_new
= NULL
;
873 struct bge_jslot
*buf
;
878 /* Allocate the mbuf. */
879 MGETHDR(m_new
, MB_DONTWAIT
, MT_DATA
);
883 /* Allocate the jumbo buffer */
884 buf
= bge_jalloc(sc
);
887 if_printf(&sc
->arpcom
.ac_if
, "jumbo allocation failed "
888 "-- packet dropped!\n");
892 /* Attach the buffer to the mbuf. */
893 m_new
->m_ext
.ext_arg
= buf
;
894 m_new
->m_ext
.ext_buf
= buf
->bge_buf
;
895 m_new
->m_ext
.ext_free
= bge_jfree
;
896 m_new
->m_ext
.ext_ref
= bge_jref
;
897 m_new
->m_ext
.ext_size
= BGE_JUMBO_FRAMELEN
;
899 m_new
->m_flags
|= M_EXT
;
901 KKASSERT(m
->m_flags
& M_EXT
);
903 buf
= m_new
->m_ext
.ext_arg
;
905 m_new
->m_data
= m_new
->m_ext
.ext_buf
;
906 m_new
->m_len
= m_new
->m_pkthdr
.len
= m_new
->m_ext
.ext_size
;
908 paddr
= buf
->bge_paddr
;
909 if ((sc
->bge_flags
& BGE_FLAG_RX_ALIGNBUG
) == 0) {
910 m_adj(m_new
, ETHER_ALIGN
);
911 paddr
+= ETHER_ALIGN
;
914 /* Set up the descriptor. */
915 sc
->bge_cdata
.bge_rx_jumbo_chain
[i
] = m_new
;
917 r
= &sc
->bge_ldata
.bge_rx_jumbo_ring
[i
];
918 r
->bge_addr
.bge_addr_lo
= BGE_ADDR_LO(paddr
);
919 r
->bge_addr
.bge_addr_hi
= BGE_ADDR_HI(paddr
);
920 r
->bge_flags
= BGE_RXBDFLAG_END
|BGE_RXBDFLAG_JUMBO_RING
;
921 r
->bge_len
= m_new
->m_len
;
928 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
929 * that's 1MB or memory, which is a lot. For now, we fill only the first
930 * 256 ring entries and hope that our CPU is fast enough to keep up with
934 bge_init_rx_ring_std(struct bge_softc
*sc
)
938 for (i
= 0; i
< BGE_SSLOTS
; i
++) {
939 if (bge_newbuf_std(sc
, i
, NULL
) == ENOBUFS
)
943 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_std_ring_tag
,
944 sc
->bge_cdata
.bge_rx_std_ring_map
,
945 BUS_DMASYNC_PREWRITE
);
948 CSR_WRITE_4(sc
, BGE_MBX_RX_STD_PROD_LO
, sc
->bge_std
);
954 bge_free_rx_ring_std(struct bge_softc
*sc
)
958 for (i
= 0; i
< BGE_STD_RX_RING_CNT
; i
++) {
959 if (sc
->bge_cdata
.bge_rx_std_chain
[i
] != NULL
) {
960 bus_dmamap_unload(sc
->bge_cdata
.bge_mtag
,
961 sc
->bge_cdata
.bge_rx_std_dmamap
[i
]);
962 m_freem(sc
->bge_cdata
.bge_rx_std_chain
[i
]);
963 sc
->bge_cdata
.bge_rx_std_chain
[i
] = NULL
;
965 bzero(&sc
->bge_ldata
.bge_rx_std_ring
[i
],
966 sizeof(struct bge_rx_bd
));
971 bge_init_rx_ring_jumbo(struct bge_softc
*sc
)
976 for (i
= 0; i
< BGE_JUMBO_RX_RING_CNT
; i
++) {
977 if (bge_newbuf_jumbo(sc
, i
, NULL
) == ENOBUFS
)
981 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_jumbo_ring_tag
,
982 sc
->bge_cdata
.bge_rx_jumbo_ring_map
,
983 BUS_DMASYNC_PREWRITE
);
985 sc
->bge_jumbo
= i
- 1;
987 rcb
= &sc
->bge_ldata
.bge_info
.bge_jumbo_rx_rcb
;
988 rcb
->bge_maxlen_flags
= BGE_RCB_MAXLEN_FLAGS(0, 0);
989 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS
, rcb
->bge_maxlen_flags
);
991 CSR_WRITE_4(sc
, BGE_MBX_RX_JUMBO_PROD_LO
, sc
->bge_jumbo
);
997 bge_free_rx_ring_jumbo(struct bge_softc
*sc
)
1001 for (i
= 0; i
< BGE_JUMBO_RX_RING_CNT
; i
++) {
1002 if (sc
->bge_cdata
.bge_rx_jumbo_chain
[i
] != NULL
) {
1003 m_freem(sc
->bge_cdata
.bge_rx_jumbo_chain
[i
]);
1004 sc
->bge_cdata
.bge_rx_jumbo_chain
[i
] = NULL
;
1006 bzero(&sc
->bge_ldata
.bge_rx_jumbo_ring
[i
],
1007 sizeof(struct bge_rx_bd
));
1012 bge_free_tx_ring(struct bge_softc
*sc
)
1016 for (i
= 0; i
< BGE_TX_RING_CNT
; i
++) {
1017 if (sc
->bge_cdata
.bge_tx_chain
[i
] != NULL
) {
1018 bus_dmamap_unload(sc
->bge_cdata
.bge_mtag
,
1019 sc
->bge_cdata
.bge_tx_dmamap
[i
]);
1020 m_freem(sc
->bge_cdata
.bge_tx_chain
[i
]);
1021 sc
->bge_cdata
.bge_tx_chain
[i
] = NULL
;
1023 bzero(&sc
->bge_ldata
.bge_tx_ring
[i
],
1024 sizeof(struct bge_tx_bd
));
1029 bge_init_tx_ring(struct bge_softc
*sc
)
1032 sc
->bge_tx_saved_considx
= 0;
1033 sc
->bge_tx_prodidx
= 0;
1035 /* Initialize transmit producer index for host-memory send ring. */
1036 CSR_WRITE_4(sc
, BGE_MBX_TX_HOST_PROD0_LO
, sc
->bge_tx_prodidx
);
1038 /* 5700 b2 errata */
1039 if (sc
->bge_chiprev
== BGE_CHIPREV_5700_BX
)
1040 CSR_WRITE_4(sc
, BGE_MBX_TX_HOST_PROD0_LO
, 0);
1042 CSR_WRITE_4(sc
, BGE_MBX_TX_NIC_PROD0_LO
, 0);
1043 /* 5700 b2 errata */
1044 if (sc
->bge_chiprev
== BGE_CHIPREV_5700_BX
)
1045 CSR_WRITE_4(sc
, BGE_MBX_TX_NIC_PROD0_LO
, 0);
1051 bge_setmulti(struct bge_softc
*sc
)
1054 struct ifmultiaddr
*ifma
;
1055 uint32_t hashes
[4] = { 0, 0, 0, 0 };
1058 ifp
= &sc
->arpcom
.ac_if
;
1060 if (ifp
->if_flags
& IFF_ALLMULTI
|| ifp
->if_flags
& IFF_PROMISC
) {
1061 for (i
= 0; i
< 4; i
++)
1062 CSR_WRITE_4(sc
, BGE_MAR0
+ (i
* 4), 0xFFFFFFFF);
1066 /* First, zot all the existing filters. */
1067 for (i
= 0; i
< 4; i
++)
1068 CSR_WRITE_4(sc
, BGE_MAR0
+ (i
* 4), 0);
1070 /* Now program new ones. */
1071 LIST_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
1072 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
1075 LLADDR((struct sockaddr_dl
*)ifma
->ifma_addr
),
1076 ETHER_ADDR_LEN
) & 0x7f;
1077 hashes
[(h
& 0x60) >> 5] |= 1 << (h
& 0x1F);
1080 for (i
= 0; i
< 4; i
++)
1081 CSR_WRITE_4(sc
, BGE_MAR0
+ (i
* 4), hashes
[i
]);
1085 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1086 * self-test results.
1089 bge_chipinit(struct bge_softc
*sc
)
1092 uint32_t dma_rw_ctl
;
1094 /* Set endian type before we access any non-PCI registers. */
1095 pci_write_config(sc
->bge_dev
, BGE_PCI_MISC_CTL
, BGE_INIT
, 4);
1098 * Check the 'ROM failed' bit on the RX CPU to see if
1099 * self-tests passed.
1101 if (CSR_READ_4(sc
, BGE_RXCPU_MODE
) & BGE_RXCPUMODE_ROMFAIL
) {
1102 if_printf(&sc
->arpcom
.ac_if
,
1103 "RX CPU self-diagnostics failed!\n");
1107 /* Clear the MAC control register */
1108 CSR_WRITE_4(sc
, BGE_MAC_MODE
, 0);
1111 * Clear the MAC statistics block in the NIC's
1114 for (i
= BGE_STATS_BLOCK
;
1115 i
< BGE_STATS_BLOCK_END
+ 1; i
+= sizeof(uint32_t))
1116 BGE_MEMWIN_WRITE(sc
, i
, 0);
1118 for (i
= BGE_STATUS_BLOCK
;
1119 i
< BGE_STATUS_BLOCK_END
+ 1; i
+= sizeof(uint32_t))
1120 BGE_MEMWIN_WRITE(sc
, i
, 0);
1122 /* Set up the PCI DMA control register. */
1123 if (sc
->bge_flags
& BGE_FLAG_PCIE
) {
1125 dma_rw_ctl
= BGE_PCI_READ_CMD
|BGE_PCI_WRITE_CMD
|
1126 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT
) |
1127 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT
);
1128 } else if (sc
->bge_flags
& BGE_FLAG_PCIX
) {
1130 if (BGE_IS_5714_FAMILY(sc
)) {
1131 dma_rw_ctl
= BGE_PCI_READ_CMD
|BGE_PCI_WRITE_CMD
;
1132 dma_rw_ctl
&= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE
; /* XXX */
1133 /* XXX magic values, Broadcom-supplied Linux driver */
1134 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5780
) {
1135 dma_rw_ctl
|= (1 << 20) | (1 << 18) |
1136 BGE_PCIDMARWCTL_ONEDMA_ATONCE
;
1138 dma_rw_ctl
|= (1 << 20) | (1 << 18) | (1 << 15);
1140 } else if (sc
->bge_asicrev
== BGE_ASICREV_BCM5704
) {
1142 * The 5704 uses a different encoding of read/write
1145 dma_rw_ctl
= BGE_PCI_READ_CMD
|BGE_PCI_WRITE_CMD
|
1146 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT
) |
1147 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT
);
1149 dma_rw_ctl
= BGE_PCI_READ_CMD
|BGE_PCI_WRITE_CMD
|
1150 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT
) |
1151 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT
) |
1156 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1157 * for hardware bugs.
1159 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5703
||
1160 sc
->bge_asicrev
== BGE_ASICREV_BCM5704
) {
1163 tmp
= CSR_READ_4(sc
, BGE_PCI_CLKCTL
) & 0x1f;
1164 if (tmp
== 0x6 || tmp
== 0x7)
1165 dma_rw_ctl
|= BGE_PCIDMARWCTL_ONEDMA_ATONCE
;
1168 /* Conventional PCI bus */
1169 dma_rw_ctl
= BGE_PCI_READ_CMD
|BGE_PCI_WRITE_CMD
|
1170 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT
) |
1171 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT
) |
1175 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5703
||
1176 sc
->bge_asicrev
== BGE_ASICREV_BCM5704
||
1177 sc
->bge_asicrev
== BGE_ASICREV_BCM5705
)
1178 dma_rw_ctl
&= ~BGE_PCIDMARWCTL_MINDMA
;
1179 pci_write_config(sc
->bge_dev
, BGE_PCI_DMA_RW_CTL
, dma_rw_ctl
, 4);
1182 * Set up general mode register.
1184 CSR_WRITE_4(sc
, BGE_MODE_CTL
, BGE_DMA_SWAP_OPTIONS
|
1185 BGE_MODECTL_MAC_ATTN_INTR
|BGE_MODECTL_HOST_SEND_BDS
|
1186 BGE_MODECTL_TX_NO_PHDR_CSUM
);
1189 * Disable memory write invalidate. Apparently it is not supported
1190 * properly by these devices.
1192 PCI_CLRBIT(sc
->bge_dev
, BGE_PCI_CMD
, PCIM_CMD_MWIEN
, 4);
1194 /* Set the timer prescaler (always 66Mhz) */
1195 CSR_WRITE_4(sc
, BGE_MISC_CFG
, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1201 bge_blockinit(struct bge_softc
*sc
)
1203 struct bge_rcb
*rcb
;
1210 * Initialize the memory window pointer register so that
1211 * we can access the first 32K of internal NIC RAM. This will
1212 * allow us to set up the TX send ring RCBs and the RX return
1213 * ring RCBs, plus other things which live in NIC memory.
1215 CSR_WRITE_4(sc
, BGE_PCI_MEMWIN_BASEADDR
, 0);
1217 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1219 if (!BGE_IS_5705_PLUS(sc
)) {
1220 /* Configure mbuf memory pool */
1221 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_BASEADDR
, BGE_BUFFPOOL_1
);
1222 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5704
)
1223 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_LEN
, 0x10000);
1225 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_LEN
, 0x18000);
1227 /* Configure DMA resource pool */
1228 CSR_WRITE_4(sc
, BGE_BMAN_DMA_DESCPOOL_BASEADDR
,
1229 BGE_DMA_DESCRIPTORS
);
1230 CSR_WRITE_4(sc
, BGE_BMAN_DMA_DESCPOOL_LEN
, 0x2000);
1233 /* Configure mbuf pool watermarks */
1234 if (BGE_IS_5705_PLUS(sc
)) {
1235 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_READDMA_LOWAT
, 0x0);
1236 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_MACRX_LOWAT
, 0x10);
1238 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_READDMA_LOWAT
, 0x50);
1239 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_MACRX_LOWAT
, 0x20);
1241 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_HIWAT
, 0x60);
1243 /* Configure DMA resource watermarks */
1244 CSR_WRITE_4(sc
, BGE_BMAN_DMA_DESCPOOL_LOWAT
, 5);
1245 CSR_WRITE_4(sc
, BGE_BMAN_DMA_DESCPOOL_HIWAT
, 10);
1247 /* Enable buffer manager */
1248 if (!BGE_IS_5705_PLUS(sc
)) {
1249 CSR_WRITE_4(sc
, BGE_BMAN_MODE
,
1250 BGE_BMANMODE_ENABLE
|BGE_BMANMODE_LOMBUF_ATTN
);
1252 /* Poll for buffer manager start indication */
1253 for (i
= 0; i
< BGE_TIMEOUT
; i
++) {
1254 if (CSR_READ_4(sc
, BGE_BMAN_MODE
) & BGE_BMANMODE_ENABLE
)
1259 if (i
== BGE_TIMEOUT
) {
1260 if_printf(&sc
->arpcom
.ac_if
,
1261 "buffer manager failed to start\n");
1266 /* Enable flow-through queues */
1267 CSR_WRITE_4(sc
, BGE_FTQ_RESET
, 0xFFFFFFFF);
1268 CSR_WRITE_4(sc
, BGE_FTQ_RESET
, 0);
1270 /* Wait until queue initialization is complete */
1271 for (i
= 0; i
< BGE_TIMEOUT
; i
++) {
1272 if (CSR_READ_4(sc
, BGE_FTQ_RESET
) == 0)
1277 if (i
== BGE_TIMEOUT
) {
1278 if_printf(&sc
->arpcom
.ac_if
,
1279 "flow-through queue init failed\n");
1283 /* Initialize the standard RX ring control block */
1284 rcb
= &sc
->bge_ldata
.bge_info
.bge_std_rx_rcb
;
1285 rcb
->bge_hostaddr
.bge_addr_lo
=
1286 BGE_ADDR_LO(sc
->bge_ldata
.bge_rx_std_ring_paddr
);
1287 rcb
->bge_hostaddr
.bge_addr_hi
=
1288 BGE_ADDR_HI(sc
->bge_ldata
.bge_rx_std_ring_paddr
);
1289 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_std_ring_tag
,
1290 sc
->bge_cdata
.bge_rx_std_ring_map
, BUS_DMASYNC_PREREAD
);
1291 if (BGE_IS_5705_PLUS(sc
))
1292 rcb
->bge_maxlen_flags
= BGE_RCB_MAXLEN_FLAGS(512, 0);
1294 rcb
->bge_maxlen_flags
=
1295 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN
, 0);
1296 rcb
->bge_nicaddr
= BGE_STD_RX_RINGS
;
1297 CSR_WRITE_4(sc
, BGE_RX_STD_RCB_HADDR_HI
, rcb
->bge_hostaddr
.bge_addr_hi
);
1298 CSR_WRITE_4(sc
, BGE_RX_STD_RCB_HADDR_LO
, rcb
->bge_hostaddr
.bge_addr_lo
);
1299 CSR_WRITE_4(sc
, BGE_RX_STD_RCB_MAXLEN_FLAGS
, rcb
->bge_maxlen_flags
);
1300 CSR_WRITE_4(sc
, BGE_RX_STD_RCB_NICADDR
, rcb
->bge_nicaddr
);
1303 * Initialize the jumbo RX ring control block
1304 * We set the 'ring disabled' bit in the flags
1305 * field until we're actually ready to start
1306 * using this ring (i.e. once we set the MTU
1307 * high enough to require it).
1309 if (BGE_IS_JUMBO_CAPABLE(sc
)) {
1310 rcb
= &sc
->bge_ldata
.bge_info
.bge_jumbo_rx_rcb
;
1312 rcb
->bge_hostaddr
.bge_addr_lo
=
1313 BGE_ADDR_LO(sc
->bge_ldata
.bge_rx_jumbo_ring_paddr
);
1314 rcb
->bge_hostaddr
.bge_addr_hi
=
1315 BGE_ADDR_HI(sc
->bge_ldata
.bge_rx_jumbo_ring_paddr
);
1316 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_jumbo_ring_tag
,
1317 sc
->bge_cdata
.bge_rx_jumbo_ring_map
,
1318 BUS_DMASYNC_PREREAD
);
1319 rcb
->bge_maxlen_flags
=
1320 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN
,
1321 BGE_RCB_FLAG_RING_DISABLED
);
1322 rcb
->bge_nicaddr
= BGE_JUMBO_RX_RINGS
;
1323 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_HADDR_HI
,
1324 rcb
->bge_hostaddr
.bge_addr_hi
);
1325 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_HADDR_LO
,
1326 rcb
->bge_hostaddr
.bge_addr_lo
);
1327 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS
,
1328 rcb
->bge_maxlen_flags
);
1329 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_NICADDR
, rcb
->bge_nicaddr
);
1331 /* Set up dummy disabled mini ring RCB */
1332 rcb
= &sc
->bge_ldata
.bge_info
.bge_mini_rx_rcb
;
1333 rcb
->bge_maxlen_flags
=
1334 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED
);
1335 CSR_WRITE_4(sc
, BGE_RX_MINI_RCB_MAXLEN_FLAGS
,
1336 rcb
->bge_maxlen_flags
);
1340 * Set the BD ring replentish thresholds. The recommended
1341 * values are 1/8th the number of descriptors allocated to
1344 if (BGE_IS_5705_PLUS(sc
))
1347 val
= BGE_STD_RX_RING_CNT
/ 8;
1348 CSR_WRITE_4(sc
, BGE_RBDI_STD_REPL_THRESH
, val
);
1349 CSR_WRITE_4(sc
, BGE_RBDI_JUMBO_REPL_THRESH
, BGE_JUMBO_RX_RING_CNT
/8);
1352 * Disable all unused send rings by setting the 'ring disabled'
1353 * bit in the flags field of all the TX send ring control blocks.
1354 * These are located in NIC memory.
1356 vrcb
= BGE_MEMWIN_START
+ BGE_SEND_RING_RCB
;
1357 for (i
= 0; i
< BGE_TX_RINGS_EXTSSRAM_MAX
; i
++) {
1358 RCB_WRITE_4(sc
, vrcb
, bge_maxlen_flags
,
1359 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED
));
1360 RCB_WRITE_4(sc
, vrcb
, bge_nicaddr
, 0);
1361 vrcb
+= sizeof(struct bge_rcb
);
1364 /* Configure TX RCB 0 (we use only the first ring) */
1365 vrcb
= BGE_MEMWIN_START
+ BGE_SEND_RING_RCB
;
1366 BGE_HOSTADDR(taddr
, sc
->bge_ldata
.bge_tx_ring_paddr
);
1367 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_hi
, taddr
.bge_addr_hi
);
1368 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_lo
, taddr
.bge_addr_lo
);
1369 RCB_WRITE_4(sc
, vrcb
, bge_nicaddr
,
1370 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT
));
1371 if (!BGE_IS_5705_PLUS(sc
)) {
1372 RCB_WRITE_4(sc
, vrcb
, bge_maxlen_flags
,
1373 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT
, 0));
1376 /* Disable all unused RX return rings */
1377 vrcb
= BGE_MEMWIN_START
+ BGE_RX_RETURN_RING_RCB
;
1378 for (i
= 0; i
< BGE_RX_RINGS_MAX
; i
++) {
1379 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_hi
, 0);
1380 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_lo
, 0);
1381 RCB_WRITE_4(sc
, vrcb
, bge_maxlen_flags
,
1382 BGE_RCB_MAXLEN_FLAGS(sc
->bge_return_ring_cnt
,
1383 BGE_RCB_FLAG_RING_DISABLED
));
1384 RCB_WRITE_4(sc
, vrcb
, bge_nicaddr
, 0);
1385 CSR_WRITE_4(sc
, BGE_MBX_RX_CONS0_LO
+
1386 (i
* (sizeof(uint64_t))), 0);
1387 vrcb
+= sizeof(struct bge_rcb
);
1390 /* Initialize RX ring indexes */
1391 CSR_WRITE_4(sc
, BGE_MBX_RX_STD_PROD_LO
, 0);
1392 CSR_WRITE_4(sc
, BGE_MBX_RX_JUMBO_PROD_LO
, 0);
1393 CSR_WRITE_4(sc
, BGE_MBX_RX_MINI_PROD_LO
, 0);
1396 * Set up RX return ring 0
1397 * Note that the NIC address for RX return rings is 0x00000000.
1398 * The return rings live entirely within the host, so the
1399 * nicaddr field in the RCB isn't used.
1401 vrcb
= BGE_MEMWIN_START
+ BGE_RX_RETURN_RING_RCB
;
1402 BGE_HOSTADDR(taddr
, sc
->bge_ldata
.bge_rx_return_ring_paddr
);
1403 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_hi
, taddr
.bge_addr_hi
);
1404 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_lo
, taddr
.bge_addr_lo
);
1405 RCB_WRITE_4(sc
, vrcb
, bge_nicaddr
, 0x00000000);
1406 RCB_WRITE_4(sc
, vrcb
, bge_maxlen_flags
,
1407 BGE_RCB_MAXLEN_FLAGS(sc
->bge_return_ring_cnt
, 0));
1409 /* Set random backoff seed for TX */
1410 CSR_WRITE_4(sc
, BGE_TX_RANDOM_BACKOFF
,
1411 sc
->arpcom
.ac_enaddr
[0] + sc
->arpcom
.ac_enaddr
[1] +
1412 sc
->arpcom
.ac_enaddr
[2] + sc
->arpcom
.ac_enaddr
[3] +
1413 sc
->arpcom
.ac_enaddr
[4] + sc
->arpcom
.ac_enaddr
[5] +
1414 BGE_TX_BACKOFF_SEED_MASK
);
1416 /* Set inter-packet gap */
1417 CSR_WRITE_4(sc
, BGE_TX_LENGTHS
, 0x2620);
1420 * Specify which ring to use for packets that don't match
1423 CSR_WRITE_4(sc
, BGE_RX_RULES_CFG
, 0x08);
1426 * Configure number of RX lists. One interrupt distribution
1427 * list, sixteen active lists, one bad frames class.
1429 CSR_WRITE_4(sc
, BGE_RXLP_CFG
, 0x181);
1431 /* Inialize RX list placement stats mask. */
1432 CSR_WRITE_4(sc
, BGE_RXLP_STATS_ENABLE_MASK
, 0x007FFFFF);
1433 CSR_WRITE_4(sc
, BGE_RXLP_STATS_CTL
, 0x1);
1435 /* Disable host coalescing until we get it set up */
1436 CSR_WRITE_4(sc
, BGE_HCC_MODE
, 0x00000000);
1438 /* Poll to make sure it's shut down. */
1439 for (i
= 0; i
< BGE_TIMEOUT
; i
++) {
1440 if (!(CSR_READ_4(sc
, BGE_HCC_MODE
) & BGE_HCCMODE_ENABLE
))
1445 if (i
== BGE_TIMEOUT
) {
1446 if_printf(&sc
->arpcom
.ac_if
,
1447 "host coalescing engine failed to idle\n");
1451 /* Set up host coalescing defaults */
1452 CSR_WRITE_4(sc
, BGE_HCC_RX_COAL_TICKS
, sc
->bge_rx_coal_ticks
);
1453 CSR_WRITE_4(sc
, BGE_HCC_TX_COAL_TICKS
, sc
->bge_tx_coal_ticks
);
1454 CSR_WRITE_4(sc
, BGE_HCC_RX_MAX_COAL_BDS
, sc
->bge_rx_max_coal_bds
);
1455 CSR_WRITE_4(sc
, BGE_HCC_TX_MAX_COAL_BDS
, sc
->bge_tx_max_coal_bds
);
1456 if (!BGE_IS_5705_PLUS(sc
)) {
1457 CSR_WRITE_4(sc
, BGE_HCC_RX_COAL_TICKS_INT
, 0);
1458 CSR_WRITE_4(sc
, BGE_HCC_TX_COAL_TICKS_INT
, 0);
1460 CSR_WRITE_4(sc
, BGE_HCC_RX_MAX_COAL_BDS_INT
, 1);
1461 CSR_WRITE_4(sc
, BGE_HCC_TX_MAX_COAL_BDS_INT
, 1);
1463 /* Set up address of statistics block */
1464 if (!BGE_IS_5705_PLUS(sc
)) {
1465 CSR_WRITE_4(sc
, BGE_HCC_STATS_ADDR_HI
,
1466 BGE_ADDR_HI(sc
->bge_ldata
.bge_stats_paddr
));
1467 CSR_WRITE_4(sc
, BGE_HCC_STATS_ADDR_LO
,
1468 BGE_ADDR_LO(sc
->bge_ldata
.bge_stats_paddr
));
1470 CSR_WRITE_4(sc
, BGE_HCC_STATS_BASEADDR
, BGE_STATS_BLOCK
);
1471 CSR_WRITE_4(sc
, BGE_HCC_STATUSBLK_BASEADDR
, BGE_STATUS_BLOCK
);
1472 CSR_WRITE_4(sc
, BGE_HCC_STATS_TICKS
, sc
->bge_stat_ticks
);
1475 /* Set up address of status block */
1476 CSR_WRITE_4(sc
, BGE_HCC_STATUSBLK_ADDR_HI
,
1477 BGE_ADDR_HI(sc
->bge_ldata
.bge_status_block_paddr
));
1478 CSR_WRITE_4(sc
, BGE_HCC_STATUSBLK_ADDR_LO
,
1479 BGE_ADDR_LO(sc
->bge_ldata
.bge_status_block_paddr
));
1480 sc
->bge_ldata
.bge_status_block
->bge_idx
[0].bge_rx_prod_idx
= 0;
1481 sc
->bge_ldata
.bge_status_block
->bge_idx
[0].bge_tx_cons_idx
= 0;
1483 /* Turn on host coalescing state machine */
1484 CSR_WRITE_4(sc
, BGE_HCC_MODE
, BGE_HCCMODE_ENABLE
);
1486 /* Turn on RX BD completion state machine and enable attentions */
1487 CSR_WRITE_4(sc
, BGE_RBDC_MODE
,
1488 BGE_RBDCMODE_ENABLE
|BGE_RBDCMODE_ATTN
);
1490 /* Turn on RX list placement state machine */
1491 CSR_WRITE_4(sc
, BGE_RXLP_MODE
, BGE_RXLPMODE_ENABLE
);
1493 /* Turn on RX list selector state machine. */
1494 if (!BGE_IS_5705_PLUS(sc
))
1495 CSR_WRITE_4(sc
, BGE_RXLS_MODE
, BGE_RXLSMODE_ENABLE
);
1497 /* Turn on DMA, clear stats */
1498 CSR_WRITE_4(sc
, BGE_MAC_MODE
, BGE_MACMODE_TXDMA_ENB
|
1499 BGE_MACMODE_RXDMA_ENB
|BGE_MACMODE_RX_STATS_CLEAR
|
1500 BGE_MACMODE_TX_STATS_CLEAR
|BGE_MACMODE_RX_STATS_ENB
|
1501 BGE_MACMODE_TX_STATS_ENB
|BGE_MACMODE_FRMHDR_DMA_ENB
|
1502 ((sc
->bge_flags
& BGE_FLAG_TBI
) ?
1503 BGE_PORTMODE_TBI
: BGE_PORTMODE_MII
));
1505 /* Set misc. local control, enable interrupts on attentions */
1506 CSR_WRITE_4(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_INTR_ONATTN
);
1509 /* Assert GPIO pins for PHY reset */
1510 BGE_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_MISCIO_OUT0
|
1511 BGE_MLC_MISCIO_OUT1
|BGE_MLC_MISCIO_OUT2
);
1512 BGE_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_MISCIO_OUTEN0
|
1513 BGE_MLC_MISCIO_OUTEN1
|BGE_MLC_MISCIO_OUTEN2
);
1516 /* Turn on DMA completion state machine */
1517 if (!BGE_IS_5705_PLUS(sc
))
1518 CSR_WRITE_4(sc
, BGE_DMAC_MODE
, BGE_DMACMODE_ENABLE
);
1520 /* Turn on write DMA state machine */
1521 val
= BGE_WDMAMODE_ENABLE
|BGE_WDMAMODE_ALL_ATTNS
;
1522 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5755
||
1523 sc
->bge_asicrev
== BGE_ASICREV_BCM5787
)
1524 val
|= (1 << 29); /* Enable host coalescing bug fix. */
1525 CSR_WRITE_4(sc
, BGE_WDMA_MODE
, val
);
1527 /* Turn on read DMA state machine */
1528 CSR_WRITE_4(sc
, BGE_RDMA_MODE
,
1529 BGE_RDMAMODE_ENABLE
|BGE_RDMAMODE_ALL_ATTNS
);
1531 /* Turn on RX data completion state machine */
1532 CSR_WRITE_4(sc
, BGE_RDC_MODE
, BGE_RDCMODE_ENABLE
);
1534 /* Turn on RX BD initiator state machine */
1535 CSR_WRITE_4(sc
, BGE_RBDI_MODE
, BGE_RBDIMODE_ENABLE
);
1537 /* Turn on RX data and RX BD initiator state machine */
1538 CSR_WRITE_4(sc
, BGE_RDBDI_MODE
, BGE_RDBDIMODE_ENABLE
);
1540 /* Turn on Mbuf cluster free state machine */
1541 if (!BGE_IS_5705_PLUS(sc
))
1542 CSR_WRITE_4(sc
, BGE_MBCF_MODE
, BGE_MBCFMODE_ENABLE
);
1544 /* Turn on send BD completion state machine */
1545 CSR_WRITE_4(sc
, BGE_SBDC_MODE
, BGE_SBDCMODE_ENABLE
);
1547 /* Turn on send data completion state machine */
1548 CSR_WRITE_4(sc
, BGE_SDC_MODE
, BGE_SDCMODE_ENABLE
);
1550 /* Turn on send data initiator state machine */
1551 CSR_WRITE_4(sc
, BGE_SDI_MODE
, BGE_SDIMODE_ENABLE
);
1553 /* Turn on send BD initiator state machine */
1554 CSR_WRITE_4(sc
, BGE_SBDI_MODE
, BGE_SBDIMODE_ENABLE
);
1556 /* Turn on send BD selector state machine */
1557 CSR_WRITE_4(sc
, BGE_SRS_MODE
, BGE_SRSMODE_ENABLE
);
1559 CSR_WRITE_4(sc
, BGE_SDI_STATS_ENABLE_MASK
, 0x007FFFFF);
1560 CSR_WRITE_4(sc
, BGE_SDI_STATS_CTL
,
1561 BGE_SDISTATSCTL_ENABLE
|BGE_SDISTATSCTL_FASTER
);
1563 /* ack/clear link change events */
1564 CSR_WRITE_4(sc
, BGE_MAC_STS
, BGE_MACSTAT_SYNC_CHANGED
|
1565 BGE_MACSTAT_CFG_CHANGED
|BGE_MACSTAT_MI_COMPLETE
|
1566 BGE_MACSTAT_LINK_CHANGED
);
1567 CSR_WRITE_4(sc
, BGE_MI_STS
, 0);
1569 /* Enable PHY auto polling (for MII/GMII only) */
1570 if (sc
->bge_flags
& BGE_FLAG_TBI
) {
1571 CSR_WRITE_4(sc
, BGE_MI_STS
, BGE_MISTS_LINK
);
1573 BGE_SETBIT(sc
, BGE_MI_MODE
, BGE_MIMODE_AUTOPOLL
|10<<16);
1574 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5700
&&
1575 sc
->bge_chipid
!= BGE_CHIPID_BCM5700_B2
) {
1576 CSR_WRITE_4(sc
, BGE_MAC_EVT_ENB
,
1577 BGE_EVTENB_MI_INTERRUPT
);
1582 * Clear any pending link state attention.
1583 * Otherwise some link state change events may be lost until attention
1584 * is cleared by bge_intr() -> bge_softc.bge_link_upd() sequence.
1585 * It's not necessary on newer BCM chips - perhaps enabling link
1586 * state change attentions implies clearing pending attention.
1588 CSR_WRITE_4(sc
, BGE_MAC_STS
, BGE_MACSTAT_SYNC_CHANGED
|
1589 BGE_MACSTAT_CFG_CHANGED
|BGE_MACSTAT_MI_COMPLETE
|
1590 BGE_MACSTAT_LINK_CHANGED
);
1592 /* Enable link state change attentions. */
1593 BGE_SETBIT(sc
, BGE_MAC_EVT_ENB
, BGE_EVTENB_LINK_CHANGED
);
1599 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1600 * against our list and return its name if we find a match. Note
1601 * that since the Broadcom controller contains VPD support, we
1602 * can get the device name string from the controller itself instead
1603 * of the compiled-in string. This is a little slow, but it guarantees
1604 * we'll always announce the right product name.
1607 bge_probe(device_t dev
)
1609 struct bge_softc
*sc
;
1612 uint16_t product
, vendor
;
1614 product
= pci_get_device(dev
);
1615 vendor
= pci_get_vendor(dev
);
1617 for (t
= bge_devs
; t
->bge_name
!= NULL
; t
++) {
1618 if (vendor
== t
->bge_vid
&& product
== t
->bge_did
)
1622 if (t
->bge_name
== NULL
)
1625 sc
= device_get_softc(dev
);
1626 descbuf
= kmalloc(BGE_DEVDESC_MAX
, M_TEMP
, M_WAITOK
);
1627 ksnprintf(descbuf
, BGE_DEVDESC_MAX
, "%s, ASIC rev. %#04x", t
->bge_name
,
1628 pci_read_config(dev
, BGE_PCI_MISC_CTL
, 4) >> 16);
1629 device_set_desc_copy(dev
, descbuf
);
1630 if (pci_get_subvendor(dev
) == PCI_VENDOR_DELL
)
1631 sc
->bge_flags
|= BGE_FLAG_NO_3LED
;
1632 kfree(descbuf
, M_TEMP
);
1637 bge_attach(device_t dev
)
1640 struct bge_softc
*sc
;
1642 uint32_t mac_addr
= 0;
1644 uint8_t ether_addr
[ETHER_ADDR_LEN
];
1646 sc
= device_get_softc(dev
);
1648 callout_init(&sc
->bge_stat_timer
);
1649 lwkt_serialize_init(&sc
->bge_jslot_serializer
);
1652 * Map control/status registers.
1654 pci_enable_busmaster(dev
);
1657 sc
->bge_res
= bus_alloc_resource_any(dev
, SYS_RES_MEMORY
, &rid
,
1660 if (sc
->bge_res
== NULL
) {
1661 device_printf(dev
, "couldn't map memory\n");
1665 sc
->bge_btag
= rman_get_bustag(sc
->bge_res
);
1666 sc
->bge_bhandle
= rman_get_bushandle(sc
->bge_res
);
1668 /* Save ASIC rev. */
1670 pci_read_config(dev
, BGE_PCI_MISC_CTL
, 4) &
1671 BGE_PCIMISCCTL_ASICREV
;
1672 sc
->bge_asicrev
= BGE_ASICREV(sc
->bge_chipid
);
1673 sc
->bge_chiprev
= BGE_CHIPREV(sc
->bge_chipid
);
1675 /* Save chipset family. */
1676 switch (sc
->bge_asicrev
) {
1677 case BGE_ASICREV_BCM5700
:
1678 case BGE_ASICREV_BCM5701
:
1679 case BGE_ASICREV_BCM5703
:
1680 case BGE_ASICREV_BCM5704
:
1681 sc
->bge_flags
|= BGE_FLAG_5700_FAMILY
| BGE_FLAG_JUMBO
;
1684 case BGE_ASICREV_BCM5714_A0
:
1685 case BGE_ASICREV_BCM5780
:
1686 case BGE_ASICREV_BCM5714
:
1687 sc
->bge_flags
|= BGE_FLAG_5714_FAMILY
;
1690 case BGE_ASICREV_BCM5750
:
1691 case BGE_ASICREV_BCM5752
:
1692 case BGE_ASICREV_BCM5755
:
1693 case BGE_ASICREV_BCM5787
:
1694 sc
->bge_flags
|= BGE_FLAG_575X_PLUS
;
1697 case BGE_ASICREV_BCM5705
:
1698 sc
->bge_flags
|= BGE_FLAG_5705_PLUS
;
1703 * Set various quirk flags.
1706 sc
->bge_flags
|= BGE_FLAG_ETH_WIRESPEED
;
1707 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5700
||
1708 (sc
->bge_asicrev
== BGE_ASICREV_BCM5705
&&
1709 (sc
->bge_chipid
!= BGE_CHIPID_BCM5705_A0
&&
1710 sc
->bge_chipid
!= BGE_CHIPID_BCM5705_A1
)) ||
1711 sc
->bge_asicrev
== BGE_ASICREV_BCM5906
)
1712 sc
->bge_flags
&= ~BGE_FLAG_ETH_WIRESPEED
;
1714 if (sc
->bge_chipid
== BGE_CHIPID_BCM5701_A0
||
1715 sc
->bge_chipid
== BGE_CHIPID_BCM5701_B0
)
1716 sc
->bge_flags
|= BGE_FLAG_CRC_BUG
;
1718 if (sc
->bge_chiprev
== BGE_CHIPREV_5703_AX
||
1719 sc
->bge_chiprev
== BGE_CHIPREV_5704_AX
)
1720 sc
->bge_flags
|= BGE_FLAG_ADC_BUG
;
1722 if (sc
->bge_chipid
== BGE_CHIPID_BCM5704_A0
)
1723 sc
->bge_flags
|= BGE_FLAG_5704_A0_BUG
;
1725 if (BGE_IS_5705_PLUS(sc
)) {
1726 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5755
||
1727 sc
->bge_asicrev
== BGE_ASICREV_BCM5787
) {
1728 uint32_t product
= pci_get_device(dev
);
1730 if (product
!= PCI_PRODUCT_BROADCOM_BCM5722
&&
1731 product
!= PCI_PRODUCT_BROADCOM_BCM5756
)
1732 sc
->bge_flags
|= BGE_FLAG_JITTER_BUG
;
1733 if (product
== PCI_PRODUCT_BROADCOM_BCM5755M
)
1734 sc
->bge_flags
|= BGE_FLAG_ADJUST_TRIM
;
1735 } else if (sc
->bge_asicrev
!= BGE_ASICREV_BCM5906
) {
1736 sc
->bge_flags
|= BGE_FLAG_BER_BUG
;
1740 /* Allocate interrupt */
1743 sc
->bge_irq
= bus_alloc_resource_any(dev
, SYS_RES_IRQ
, &rid
,
1744 RF_SHAREABLE
| RF_ACTIVE
);
1746 if (sc
->bge_irq
== NULL
) {
1747 device_printf(dev
, "couldn't map interrupt\n");
1753 * Check if this is a PCI-X or PCI Express device.
1755 if (BGE_IS_5705_PLUS(sc
)) {
1758 reg
= pci_read_config(dev
, BGE_PCIE_CAPID_REG
, 4);
1759 if ((reg
& 0xff) == BGE_PCIE_CAPID
)
1760 sc
->bge_flags
|= BGE_FLAG_PCIE
;
1763 * Check if the device is in PCI-X Mode.
1764 * (This bit is not valid on PCI Express controllers.)
1766 if ((pci_read_config(sc
->bge_dev
, BGE_PCI_PCISTATE
, 4) &
1767 BGE_PCISTATE_PCI_BUSMODE
) == 0)
1768 sc
->bge_flags
|= BGE_FLAG_PCIX
;
1772 device_printf(dev
, "asic 0x%04x, chip 0x%04x, %s\n",
1773 sc
->bge_asicrev
, sc
->bge_chiprev
,
1774 (sc
->bge_flags
& BGE_FLAG_PCIX
) ? "PCI-X"
1775 : ((sc
->bge_flags
& BGE_FLAG_PCIE
) ? "PCI-E" : "PCI"));
1778 ifp
= &sc
->arpcom
.ac_if
;
1779 if_initname(ifp
, device_get_name(dev
), device_get_unit(dev
));
1781 /* Try to reset the chip. */
1784 if (bge_chipinit(sc
)) {
1785 device_printf(dev
, "chip initialization failed\n");
1791 * Get station address from the EEPROM.
1793 mac_addr
= bge_readmem_ind(sc
, 0x0c14);
1794 if ((mac_addr
>> 16) == 0x484b) {
1795 ether_addr
[0] = (uint8_t)(mac_addr
>> 8);
1796 ether_addr
[1] = (uint8_t)mac_addr
;
1797 mac_addr
= bge_readmem_ind(sc
, 0x0c18);
1798 ether_addr
[2] = (uint8_t)(mac_addr
>> 24);
1799 ether_addr
[3] = (uint8_t)(mac_addr
>> 16);
1800 ether_addr
[4] = (uint8_t)(mac_addr
>> 8);
1801 ether_addr
[5] = (uint8_t)mac_addr
;
1802 } else if (bge_read_eeprom(sc
, ether_addr
,
1803 BGE_EE_MAC_OFFSET
+ 2, ETHER_ADDR_LEN
)) {
1804 device_printf(dev
, "failed to read station address\n");
1809 /* 5705/5750 limits RX return ring to 512 entries. */
1810 if (BGE_IS_5705_PLUS(sc
))
1811 sc
->bge_return_ring_cnt
= BGE_RETURN_RING_CNT_5705
;
1813 sc
->bge_return_ring_cnt
= BGE_RETURN_RING_CNT
;
1815 error
= bge_dma_alloc(sc
);
1819 /* Set default tuneable values. */
1820 sc
->bge_stat_ticks
= BGE_TICKS_PER_SEC
;
1821 sc
->bge_rx_coal_ticks
= bge_rx_coal_ticks
;
1822 sc
->bge_tx_coal_ticks
= bge_tx_coal_ticks
;
1823 sc
->bge_rx_max_coal_bds
= bge_rx_max_coal_bds
;
1824 sc
->bge_tx_max_coal_bds
= bge_tx_max_coal_bds
;
1826 /* Set up ifnet structure */
1828 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
1829 ifp
->if_ioctl
= bge_ioctl
;
1830 ifp
->if_start
= bge_start
;
1831 #ifdef DEVICE_POLLING
1832 ifp
->if_poll
= bge_poll
;
1834 ifp
->if_watchdog
= bge_watchdog
;
1835 ifp
->if_init
= bge_init
;
1836 ifp
->if_mtu
= ETHERMTU
;
1837 ifp
->if_capabilities
= IFCAP_VLAN_HWTAGGING
| IFCAP_VLAN_MTU
;
1838 ifq_set_maxlen(&ifp
->if_snd
, BGE_TX_RING_CNT
- 1);
1839 ifq_set_ready(&ifp
->if_snd
);
1842 * 5700 B0 chips do not support checksumming correctly due
1845 if (sc
->bge_chipid
!= BGE_CHIPID_BCM5700_B0
) {
1846 ifp
->if_capabilities
|= IFCAP_HWCSUM
;
1847 ifp
->if_hwassist
= BGE_CSUM_FEATURES
;
1849 ifp
->if_capenable
= ifp
->if_capabilities
;
1852 * Figure out what sort of media we have by checking the
1853 * hardware config word in the first 32k of NIC internal memory,
1854 * or fall back to examining the EEPROM if necessary.
1855 * Note: on some BCM5700 cards, this value appears to be unset.
1856 * If that's the case, we have to rely on identifying the NIC
1857 * by its PCI subsystem ID, as we do below for the SysKonnect
1860 if (bge_readmem_ind(sc
, BGE_SOFTWARE_GENCOMM_SIG
) == BGE_MAGIC_NUMBER
)
1861 hwcfg
= bge_readmem_ind(sc
, BGE_SOFTWARE_GENCOMM_NICCFG
);
1863 if (bge_read_eeprom(sc
, (caddr_t
)&hwcfg
, BGE_EE_HWCFG_OFFSET
,
1865 device_printf(dev
, "failed to read EEPROM\n");
1869 hwcfg
= ntohl(hwcfg
);
1872 if ((hwcfg
& BGE_HWCFG_MEDIA
) == BGE_MEDIA_FIBER
)
1873 sc
->bge_flags
|= BGE_FLAG_TBI
;
1875 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
1876 if (pci_get_subvendor(dev
) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41
)
1877 sc
->bge_flags
|= BGE_FLAG_TBI
;
1879 if (sc
->bge_flags
& BGE_FLAG_TBI
) {
1880 ifmedia_init(&sc
->bge_ifmedia
, IFM_IMASK
,
1881 bge_ifmedia_upd
, bge_ifmedia_sts
);
1882 ifmedia_add(&sc
->bge_ifmedia
, IFM_ETHER
|IFM_1000_SX
, 0, NULL
);
1883 ifmedia_add(&sc
->bge_ifmedia
,
1884 IFM_ETHER
|IFM_1000_SX
|IFM_FDX
, 0, NULL
);
1885 ifmedia_add(&sc
->bge_ifmedia
, IFM_ETHER
|IFM_AUTO
, 0, NULL
);
1886 ifmedia_set(&sc
->bge_ifmedia
, IFM_ETHER
|IFM_AUTO
);
1887 sc
->bge_ifmedia
.ifm_media
= sc
->bge_ifmedia
.ifm_cur
->ifm_media
;
1890 * Do transceiver setup.
1892 if (mii_phy_probe(dev
, &sc
->bge_miibus
,
1893 bge_ifmedia_upd
, bge_ifmedia_sts
)) {
1894 device_printf(dev
, "MII without any PHY!\n");
1901 * When using the BCM5701 in PCI-X mode, data corruption has
1902 * been observed in the first few bytes of some received packets.
1903 * Aligning the packet buffer in memory eliminates the corruption.
1904 * Unfortunately, this misaligns the packet payloads. On platforms
1905 * which do not support unaligned accesses, we will realign the
1906 * payloads by copying the received packets.
1908 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5701
&&
1909 (sc
->bge_flags
& BGE_FLAG_PCIX
))
1910 sc
->bge_flags
|= BGE_FLAG_RX_ALIGNBUG
;
1912 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5700
&&
1913 sc
->bge_chipid
!= BGE_CHIPID_BCM5700_B2
) {
1914 sc
->bge_link_upd
= bge_bcm5700_link_upd
;
1915 sc
->bge_link_chg
= BGE_MACSTAT_MI_INTERRUPT
;
1916 } else if (sc
->bge_flags
& BGE_FLAG_TBI
) {
1917 sc
->bge_link_upd
= bge_tbi_link_upd
;
1918 sc
->bge_link_chg
= BGE_MACSTAT_LINK_CHANGED
;
1920 sc
->bge_link_upd
= bge_copper_link_upd
;
1921 sc
->bge_link_chg
= BGE_MACSTAT_LINK_CHANGED
;
1925 * Create sysctl nodes.
1927 sysctl_ctx_init(&sc
->bge_sysctl_ctx
);
1928 sc
->bge_sysctl_tree
= SYSCTL_ADD_NODE(&sc
->bge_sysctl_ctx
,
1929 SYSCTL_STATIC_CHILDREN(_hw
),
1931 device_get_nameunit(dev
),
1933 if (sc
->bge_sysctl_tree
== NULL
) {
1934 device_printf(dev
, "can't add sysctl node\n");
1939 SYSCTL_ADD_PROC(&sc
->bge_sysctl_ctx
,
1940 SYSCTL_CHILDREN(sc
->bge_sysctl_tree
),
1941 OID_AUTO
, "rx_coal_ticks",
1942 CTLTYPE_INT
| CTLFLAG_RW
,
1943 sc
, 0, bge_sysctl_rx_coal_ticks
, "I",
1944 "Receive coalescing ticks (usec).");
1945 SYSCTL_ADD_PROC(&sc
->bge_sysctl_ctx
,
1946 SYSCTL_CHILDREN(sc
->bge_sysctl_tree
),
1947 OID_AUTO
, "tx_coal_ticks",
1948 CTLTYPE_INT
| CTLFLAG_RW
,
1949 sc
, 0, bge_sysctl_tx_coal_ticks
, "I",
1950 "Transmit coalescing ticks (usec).");
1951 SYSCTL_ADD_PROC(&sc
->bge_sysctl_ctx
,
1952 SYSCTL_CHILDREN(sc
->bge_sysctl_tree
),
1953 OID_AUTO
, "rx_max_coal_bds",
1954 CTLTYPE_INT
| CTLFLAG_RW
,
1955 sc
, 0, bge_sysctl_rx_max_coal_bds
, "I",
1956 "Receive max coalesced BD count.");
1957 SYSCTL_ADD_PROC(&sc
->bge_sysctl_ctx
,
1958 SYSCTL_CHILDREN(sc
->bge_sysctl_tree
),
1959 OID_AUTO
, "tx_max_coal_bds",
1960 CTLTYPE_INT
| CTLFLAG_RW
,
1961 sc
, 0, bge_sysctl_tx_max_coal_bds
, "I",
1962 "Transmit max coalesced BD count.");
1965 * Call MI attach routine.
1967 ether_ifattach(ifp
, ether_addr
, NULL
);
1969 error
= bus_setup_intr(dev
, sc
->bge_irq
, INTR_NETSAFE
,
1970 bge_intr
, sc
, &sc
->bge_intrhand
,
1971 ifp
->if_serializer
);
1973 ether_ifdetach(ifp
);
1974 device_printf(dev
, "couldn't set up irq\n");
1978 ifp
->if_cpuid
= ithread_cpuid(rman_get_start(sc
->bge_irq
));
1979 KKASSERT(ifp
->if_cpuid
>= 0 && ifp
->if_cpuid
< ncpus
);
1988 bge_detach(device_t dev
)
1990 struct bge_softc
*sc
= device_get_softc(dev
);
1992 if (device_is_attached(dev
)) {
1993 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1995 lwkt_serialize_enter(ifp
->if_serializer
);
1998 bus_teardown_intr(dev
, sc
->bge_irq
, sc
->bge_intrhand
);
1999 lwkt_serialize_exit(ifp
->if_serializer
);
2001 ether_ifdetach(ifp
);
2004 if (sc
->bge_flags
& BGE_FLAG_TBI
)
2005 ifmedia_removeall(&sc
->bge_ifmedia
);
2007 device_delete_child(dev
, sc
->bge_miibus
);
2008 bus_generic_detach(dev
);
2010 if (sc
->bge_irq
!= NULL
)
2011 bus_release_resource(dev
, SYS_RES_IRQ
, 0, sc
->bge_irq
);
2013 if (sc
->bge_res
!= NULL
)
2014 bus_release_resource(dev
, SYS_RES_MEMORY
,
2015 BGE_PCI_BAR0
, sc
->bge_res
);
2017 if (sc
->bge_sysctl_tree
!= NULL
)
2018 sysctl_ctx_free(&sc
->bge_sysctl_ctx
);
2026 bge_reset(struct bge_softc
*sc
)
2029 uint32_t cachesize
, command
, pcistate
, reset
;
2030 void (*write_op
)(struct bge_softc
*, uint32_t, uint32_t);
2035 if (BGE_IS_575X_PLUS(sc
) && !BGE_IS_5714_FAMILY(sc
)) {
2036 if (sc
->bge_flags
& BGE_FLAG_PCIE
)
2037 write_op
= bge_writemem_direct
;
2039 write_op
= bge_writemem_ind
;
2041 write_op
= bge_writereg_ind
;
2044 /* Save some important PCI state. */
2045 cachesize
= pci_read_config(dev
, BGE_PCI_CACHESZ
, 4);
2046 command
= pci_read_config(dev
, BGE_PCI_CMD
, 4);
2047 pcistate
= pci_read_config(dev
, BGE_PCI_PCISTATE
, 4);
2049 pci_write_config(dev
, BGE_PCI_MISC_CTL
,
2050 BGE_PCIMISCCTL_INDIRECT_ACCESS
|BGE_PCIMISCCTL_MASK_PCI_INTR
|
2051 BGE_HIF_SWAP_OPTIONS
|BGE_PCIMISCCTL_PCISTATE_RW
, 4);
2053 /* Disable fastboot on controllers that support it. */
2054 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5752
||
2055 sc
->bge_asicrev
== BGE_ASICREV_BCM5755
||
2056 sc
->bge_asicrev
== BGE_ASICREV_BCM5787
) {
2058 if_printf(&sc
->arpcom
.ac_if
, "Disabling fastboot\n");
2059 CSR_WRITE_4(sc
, BGE_FASTBOOT_PC
, 0x0);
2063 * Write the magic number to SRAM at offset 0xB50.
2064 * When firmware finishes its initialization it will
2065 * write ~BGE_MAGIC_NUMBER to the same location.
2067 bge_writemem_ind(sc
, BGE_SOFTWARE_GENCOMM
, BGE_MAGIC_NUMBER
);
2069 reset
= BGE_MISCCFG_RESET_CORE_CLOCKS
|(65<<1);
2071 /* XXX: Broadcom Linux driver. */
2072 if (sc
->bge_flags
& BGE_FLAG_PCIE
) {
2073 if (CSR_READ_4(sc
, 0x7e2c) == 0x60) /* PCIE 1.0 */
2074 CSR_WRITE_4(sc
, 0x7e2c, 0x20);
2075 if (sc
->bge_chipid
!= BGE_CHIPID_BCM5750_A0
) {
2076 /* Prevent PCIE link training during global reset */
2077 CSR_WRITE_4(sc
, BGE_MISC_CFG
, (1<<29));
2083 * Set GPHY Power Down Override to leave GPHY
2084 * powered up in D0 uninitialized.
2086 if (BGE_IS_5705_PLUS(sc
))
2087 reset
|= 0x04000000;
2089 /* Issue global reset */
2090 write_op(sc
, BGE_MISC_CFG
, reset
);
2094 /* XXX: Broadcom Linux driver. */
2095 if (sc
->bge_flags
& BGE_FLAG_PCIE
) {
2096 if (sc
->bge_chipid
== BGE_CHIPID_BCM5750_A0
) {
2099 DELAY(500000); /* wait for link training to complete */
2100 v
= pci_read_config(dev
, 0xc4, 4);
2101 pci_write_config(dev
, 0xc4, v
| (1<<15), 4);
2104 * Set PCIE max payload size to 128 bytes and
2105 * clear error status.
2107 pci_write_config(dev
, 0xd8, 0xf5000, 4);
2110 /* Reset some of the PCI state that got zapped by reset */
2111 pci_write_config(dev
, BGE_PCI_MISC_CTL
,
2112 BGE_PCIMISCCTL_INDIRECT_ACCESS
|BGE_PCIMISCCTL_MASK_PCI_INTR
|
2113 BGE_HIF_SWAP_OPTIONS
|BGE_PCIMISCCTL_PCISTATE_RW
, 4);
2114 pci_write_config(dev
, BGE_PCI_CACHESZ
, cachesize
, 4);
2115 pci_write_config(dev
, BGE_PCI_CMD
, command
, 4);
2116 write_op(sc
, BGE_MISC_CFG
, (65 << 1));
2118 /* Enable memory arbiter. */
2119 if (BGE_IS_5714_FAMILY(sc
)) {
2122 val
= CSR_READ_4(sc
, BGE_MARB_MODE
);
2123 CSR_WRITE_4(sc
, BGE_MARB_MODE
, BGE_MARBMODE_ENABLE
| val
);
2125 CSR_WRITE_4(sc
, BGE_MARB_MODE
, BGE_MARBMODE_ENABLE
);
2129 * Poll until we see the 1's complement of the magic number.
2130 * This indicates that the firmware initialization
2133 for (i
= 0; i
< BGE_TIMEOUT
; i
++) {
2134 val
= bge_readmem_ind(sc
, BGE_SOFTWARE_GENCOMM
);
2135 if (val
== ~BGE_MAGIC_NUMBER
)
2140 if (i
== BGE_TIMEOUT
) {
2141 if_printf(&sc
->arpcom
.ac_if
, "firmware handshake timed out,"
2142 "found 0x%08x\n", val
);
2147 * XXX Wait for the value of the PCISTATE register to
2148 * return to its original pre-reset state. This is a
2149 * fairly good indicator of reset completion. If we don't
2150 * wait for the reset to fully complete, trying to read
2151 * from the device's non-PCI registers may yield garbage
2154 for (i
= 0; i
< BGE_TIMEOUT
; i
++) {
2155 if (pci_read_config(dev
, BGE_PCI_PCISTATE
, 4) == pcistate
)
2160 if (sc
->bge_flags
& BGE_FLAG_PCIE
) {
2161 reset
= bge_readmem_ind(sc
, 0x7c00);
2162 bge_writemem_ind(sc
, 0x7c00, reset
| (1 << 25));
2165 /* Fix up byte swapping */
2166 CSR_WRITE_4(sc
, BGE_MODE_CTL
, BGE_DMA_SWAP_OPTIONS
|
2167 BGE_MODECTL_BYTESWAP_DATA
);
2169 CSR_WRITE_4(sc
, BGE_MAC_MODE
, 0);
2172 * The 5704 in TBI mode apparently needs some special
2173 * adjustment to insure the SERDES drive level is set
2176 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5704
&&
2177 (sc
->bge_flags
& BGE_FLAG_TBI
)) {
2180 serdescfg
= CSR_READ_4(sc
, BGE_SERDES_CFG
);
2181 serdescfg
= (serdescfg
& ~0xFFF) | 0x880;
2182 CSR_WRITE_4(sc
, BGE_SERDES_CFG
, serdescfg
);
2185 /* XXX: Broadcom Linux driver. */
2186 if ((sc
->bge_flags
& BGE_FLAG_PCIE
) &&
2187 sc
->bge_chipid
!= BGE_CHIPID_BCM5750_A0
) {
2190 v
= CSR_READ_4(sc
, 0x7c00);
2191 CSR_WRITE_4(sc
, 0x7c00, v
| (1<<25));
2198 * Frame reception handling. This is called if there's a frame
2199 * on the receive return list.
2201 * Note: we have to be able to handle two possibilities here:
2202 * 1) the frame is from the jumbo recieve ring
2203 * 2) the frame is from the standard receive ring
2207 bge_rxeof(struct bge_softc
*sc
)
2210 int stdcnt
= 0, jumbocnt
= 0;
2211 #ifdef ETHER_INPUT_CHAIN
2212 struct mbuf_chain chain
[MAXCPU
];
2215 if (sc
->bge_rx_saved_considx
==
2216 sc
->bge_ldata
.bge_status_block
->bge_idx
[0].bge_rx_prod_idx
)
2219 #ifdef ETHER_INPUT_CHAIN
2220 ether_input_chain_init(chain
);
2223 ifp
= &sc
->arpcom
.ac_if
;
2225 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_return_ring_tag
,
2226 sc
->bge_cdata
.bge_rx_return_ring_map
,
2227 BUS_DMASYNC_POSTREAD
);
2228 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_std_ring_tag
,
2229 sc
->bge_cdata
.bge_rx_std_ring_map
,
2230 BUS_DMASYNC_POSTREAD
);
2231 if (BGE_IS_JUMBO_CAPABLE(sc
)) {
2232 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_jumbo_ring_tag
,
2233 sc
->bge_cdata
.bge_rx_jumbo_ring_map
,
2234 BUS_DMASYNC_POSTREAD
);
2237 while (sc
->bge_rx_saved_considx
!=
2238 sc
->bge_ldata
.bge_status_block
->bge_idx
[0].bge_rx_prod_idx
) {
2239 struct bge_rx_bd
*cur_rx
;
2241 struct mbuf
*m
= NULL
;
2242 uint16_t vlan_tag
= 0;
2246 &sc
->bge_ldata
.bge_rx_return_ring
[sc
->bge_rx_saved_considx
];
2248 rxidx
= cur_rx
->bge_idx
;
2249 BGE_INC(sc
->bge_rx_saved_considx
, sc
->bge_return_ring_cnt
);
2252 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_VLAN_TAG
) {
2254 vlan_tag
= cur_rx
->bge_vlan_tag
;
2257 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_JUMBO_RING
) {
2258 BGE_INC(sc
->bge_jumbo
, BGE_JUMBO_RX_RING_CNT
);
2259 m
= sc
->bge_cdata
.bge_rx_jumbo_chain
[rxidx
];
2260 sc
->bge_cdata
.bge_rx_jumbo_chain
[rxidx
] = NULL
;
2262 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_ERROR
) {
2264 bge_newbuf_jumbo(sc
, sc
->bge_jumbo
, m
);
2267 if (bge_newbuf_jumbo(sc
,
2268 sc
->bge_jumbo
, NULL
) == ENOBUFS
) {
2270 bge_newbuf_jumbo(sc
, sc
->bge_jumbo
, m
);
2274 BGE_INC(sc
->bge_std
, BGE_STD_RX_RING_CNT
);
2275 bus_dmamap_sync(sc
->bge_cdata
.bge_mtag
,
2276 sc
->bge_cdata
.bge_rx_std_dmamap
[rxidx
],
2277 BUS_DMASYNC_POSTREAD
);
2278 bus_dmamap_unload(sc
->bge_cdata
.bge_mtag
,
2279 sc
->bge_cdata
.bge_rx_std_dmamap
[rxidx
]);
2280 m
= sc
->bge_cdata
.bge_rx_std_chain
[rxidx
];
2281 sc
->bge_cdata
.bge_rx_std_chain
[rxidx
] = NULL
;
2283 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_ERROR
) {
2285 bge_newbuf_std(sc
, sc
->bge_std
, m
);
2288 if (bge_newbuf_std(sc
, sc
->bge_std
,
2291 bge_newbuf_std(sc
, sc
->bge_std
, m
);
2299 * The i386 allows unaligned accesses, but for other
2300 * platforms we must make sure the payload is aligned.
2302 if (sc
->bge_flags
& BGE_FLAG_RX_ALIGNBUG
) {
2303 bcopy(m
->m_data
, m
->m_data
+ ETHER_ALIGN
,
2305 m
->m_data
+= ETHER_ALIGN
;
2308 m
->m_pkthdr
.len
= m
->m_len
= cur_rx
->bge_len
- ETHER_CRC_LEN
;
2309 m
->m_pkthdr
.rcvif
= ifp
;
2311 if (ifp
->if_capenable
& IFCAP_RXCSUM
) {
2312 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_IP_CSUM
) {
2313 m
->m_pkthdr
.csum_flags
|= CSUM_IP_CHECKED
;
2314 if ((cur_rx
->bge_ip_csum
^ 0xffff) == 0)
2315 m
->m_pkthdr
.csum_flags
|= CSUM_IP_VALID
;
2317 if ((cur_rx
->bge_flags
& BGE_RXBDFLAG_TCP_UDP_CSUM
) &&
2318 m
->m_pkthdr
.len
>= BGE_MIN_FRAME
) {
2319 m
->m_pkthdr
.csum_data
=
2320 cur_rx
->bge_tcp_udp_csum
;
2321 m
->m_pkthdr
.csum_flags
|=
2322 CSUM_DATA_VALID
| CSUM_PSEUDO_HDR
;
2327 * If we received a packet with a vlan tag, pass it
2328 * to vlan_input() instead of ether_input().
2331 m
->m_flags
|= M_VLANTAG
;
2332 m
->m_pkthdr
.ether_vlantag
= vlan_tag
;
2333 have_tag
= vlan_tag
= 0;
2335 #ifdef ETHER_INPUT_CHAIN
2337 ether_input_chain2(ifp
, m
, chain
);
2339 ether_input_chain(ifp
, m
, chain
);
2342 ifp
->if_input(ifp
, m
);
2346 #ifdef ETHER_INPUT_CHAIN
2347 ether_input_dispatch(chain
);
2351 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_std_ring_tag
,
2352 sc
->bge_cdata
.bge_rx_std_ring_map
,
2353 BUS_DMASYNC_PREWRITE
);
2356 if (BGE_IS_JUMBO_CAPABLE(sc
) && jumbocnt
> 0) {
2357 bus_dmamap_sync(sc
->bge_cdata
.bge_rx_jumbo_ring_tag
,
2358 sc
->bge_cdata
.bge_rx_jumbo_ring_map
,
2359 BUS_DMASYNC_PREWRITE
);
2362 CSR_WRITE_4(sc
, BGE_MBX_RX_CONS0_LO
, sc
->bge_rx_saved_considx
);
2364 CSR_WRITE_4(sc
, BGE_MBX_RX_STD_PROD_LO
, sc
->bge_std
);
2366 CSR_WRITE_4(sc
, BGE_MBX_RX_JUMBO_PROD_LO
, sc
->bge_jumbo
);
2370 bge_txeof(struct bge_softc
*sc
)
2372 struct bge_tx_bd
*cur_tx
= NULL
;
2375 if (sc
->bge_tx_saved_considx
==
2376 sc
->bge_ldata
.bge_status_block
->bge_idx
[0].bge_tx_cons_idx
)
2379 ifp
= &sc
->arpcom
.ac_if
;
2381 bus_dmamap_sync(sc
->bge_cdata
.bge_tx_ring_tag
,
2382 sc
->bge_cdata
.bge_tx_ring_map
,
2383 BUS_DMASYNC_POSTREAD
);
2386 * Go through our tx ring and free mbufs for those
2387 * frames that have been sent.
2389 while (sc
->bge_tx_saved_considx
!=
2390 sc
->bge_ldata
.bge_status_block
->bge_idx
[0].bge_tx_cons_idx
) {
2393 idx
= sc
->bge_tx_saved_considx
;
2394 cur_tx
= &sc
->bge_ldata
.bge_tx_ring
[idx
];
2395 if (cur_tx
->bge_flags
& BGE_TXBDFLAG_END
)
2397 if (sc
->bge_cdata
.bge_tx_chain
[idx
] != NULL
) {
2398 bus_dmamap_sync(sc
->bge_cdata
.bge_mtag
,
2399 sc
->bge_cdata
.bge_tx_dmamap
[idx
],
2400 BUS_DMASYNC_POSTWRITE
);
2401 bus_dmamap_unload(sc
->bge_cdata
.bge_mtag
,
2402 sc
->bge_cdata
.bge_tx_dmamap
[idx
]);
2403 m_freem(sc
->bge_cdata
.bge_tx_chain
[idx
]);
2404 sc
->bge_cdata
.bge_tx_chain
[idx
] = NULL
;
2407 BGE_INC(sc
->bge_tx_saved_considx
, BGE_TX_RING_CNT
);
2411 if (cur_tx
!= NULL
&&
2412 (BGE_TX_RING_CNT
- sc
->bge_txcnt
) >=
2413 (BGE_NSEG_RSVD
+ BGE_NSEG_SPARE
))
2414 ifp
->if_flags
&= ~IFF_OACTIVE
;
2416 if (sc
->bge_txcnt
== 0)
2419 if (!ifq_is_empty(&ifp
->if_snd
))
2423 #ifdef DEVICE_POLLING
2426 bge_poll(struct ifnet
*ifp
, enum poll_cmd cmd
, int count
)
2428 struct bge_softc
*sc
= ifp
->if_softc
;
2433 bge_disable_intr(sc
);
2435 case POLL_DEREGISTER
:
2436 bge_enable_intr(sc
);
2438 case POLL_AND_CHECK_STATUS
:
2439 bus_dmamap_sync(sc
->bge_cdata
.bge_status_tag
,
2440 sc
->bge_cdata
.bge_status_map
,
2441 BUS_DMASYNC_POSTREAD
);
2444 * Process link state changes.
2446 status
= CSR_READ_4(sc
, BGE_MAC_STS
);
2447 if ((status
& sc
->bge_link_chg
) || sc
->bge_link_evt
) {
2448 sc
->bge_link_evt
= 0;
2449 sc
->bge_link_upd(sc
, status
);
2453 if (ifp
->if_flags
& IFF_RUNNING
) {
2466 struct bge_softc
*sc
= xsc
;
2467 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2473 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
2474 * disable interrupts by writing nonzero like we used to, since with
2475 * our current organization this just gives complications and
2476 * pessimizations for re-enabling interrupts. We used to have races
2477 * instead of the necessary complications. Disabling interrupts
2478 * would just reduce the chance of a status update while we are
2479 * running (by switching to the interrupt-mode coalescence
2480 * parameters), but this chance is already very low so it is more
2481 * efficient to get another interrupt than prevent it.
2483 * We do the ack first to ensure another interrupt if there is a
2484 * status update after the ack. We don't check for the status
2485 * changing later because it is more efficient to get another
2486 * interrupt than prevent it, not quite as above (not checking is
2487 * a smaller optimization than not toggling the interrupt enable,
2488 * since checking doesn't involve PCI accesses and toggling require
2489 * the status check). So toggling would probably be a pessimization
2490 * even with MSI. It would only be needed for using a task queue.
2492 CSR_WRITE_4(sc
, BGE_MBX_IRQ0_LO
, 0);
2494 bus_dmamap_sync(sc
->bge_cdata
.bge_status_tag
,
2495 sc
->bge_cdata
.bge_status_map
,
2496 BUS_DMASYNC_POSTREAD
);
2499 * Process link state changes.
2501 status
= CSR_READ_4(sc
, BGE_MAC_STS
);
2502 if ((status
& sc
->bge_link_chg
) || sc
->bge_link_evt
) {
2503 sc
->bge_link_evt
= 0;
2504 sc
->bge_link_upd(sc
, status
);
2507 if (ifp
->if_flags
& IFF_RUNNING
) {
2508 /* Check RX return ring producer/consumer */
2511 /* Check TX ring producer/consumer */
2515 if (sc
->bge_coal_chg
)
2516 bge_coal_change(sc
);
2522 struct bge_softc
*sc
= xsc
;
2523 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2525 lwkt_serialize_enter(ifp
->if_serializer
);
2527 if (BGE_IS_5705_PLUS(sc
))
2528 bge_stats_update_regs(sc
);
2530 bge_stats_update(sc
);
2532 if (sc
->bge_flags
& BGE_FLAG_TBI
) {
2534 * Since in TBI mode auto-polling can't be used we should poll
2535 * link status manually. Here we register pending link event
2536 * and trigger interrupt.
2539 BGE_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_INTR_SET
);
2540 } else if (!sc
->bge_link
) {
2541 mii_tick(device_get_softc(sc
->bge_miibus
));
2544 callout_reset(&sc
->bge_stat_timer
, hz
, bge_tick
, sc
);
2546 lwkt_serialize_exit(ifp
->if_serializer
);
2550 bge_stats_update_regs(struct bge_softc
*sc
)
2552 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2553 struct bge_mac_stats_regs stats
;
2557 s
= (uint32_t *)&stats
;
2558 for (i
= 0; i
< sizeof(struct bge_mac_stats_regs
); i
+= 4) {
2559 *s
= CSR_READ_4(sc
, BGE_RX_STATS
+ i
);
2563 ifp
->if_collisions
+=
2564 (stats
.dot3StatsSingleCollisionFrames
+
2565 stats
.dot3StatsMultipleCollisionFrames
+
2566 stats
.dot3StatsExcessiveCollisions
+
2567 stats
.dot3StatsLateCollisions
) -
2572 bge_stats_update(struct bge_softc
*sc
)
2574 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2577 stats
= BGE_MEMWIN_START
+ BGE_STATS_BLOCK
;
2579 #define READ_STAT(sc, stats, stat) \
2580 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2582 ifp
->if_collisions
+=
2583 (READ_STAT(sc
, stats
,
2584 txstats
.dot3StatsSingleCollisionFrames
.bge_addr_lo
) +
2585 READ_STAT(sc
, stats
,
2586 txstats
.dot3StatsMultipleCollisionFrames
.bge_addr_lo
) +
2587 READ_STAT(sc
, stats
,
2588 txstats
.dot3StatsExcessiveCollisions
.bge_addr_lo
) +
2589 READ_STAT(sc
, stats
,
2590 txstats
.dot3StatsLateCollisions
.bge_addr_lo
)) -
2596 ifp
->if_collisions
+=
2597 (sc
->bge_rdata
->bge_info
.bge_stats
.dot3StatsSingleCollisionFrames
+
2598 sc
->bge_rdata
->bge_info
.bge_stats
.dot3StatsMultipleCollisionFrames
+
2599 sc
->bge_rdata
->bge_info
.bge_stats
.dot3StatsExcessiveCollisions
+
2600 sc
->bge_rdata
->bge_info
.bge_stats
.dot3StatsLateCollisions
) -
2606 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2607 * pointers to descriptors.
2610 bge_encap(struct bge_softc
*sc
, struct mbuf
**m_head0
, uint32_t *txidx
)
2612 struct bge_tx_bd
*d
= NULL
;
2613 uint16_t csum_flags
= 0;
2614 struct bge_dmamap_arg ctx
;
2615 bus_dma_segment_t segs
[BGE_NSEG_NEW
];
2617 int error
, maxsegs
, idx
, i
;
2618 struct mbuf
*m_head
= *m_head0
;
2620 if (m_head
->m_pkthdr
.csum_flags
) {
2621 if (m_head
->m_pkthdr
.csum_flags
& CSUM_IP
)
2622 csum_flags
|= BGE_TXBDFLAG_IP_CSUM
;
2623 if (m_head
->m_pkthdr
.csum_flags
& (CSUM_TCP
| CSUM_UDP
))
2624 csum_flags
|= BGE_TXBDFLAG_TCP_UDP_CSUM
;
2625 if (m_head
->m_flags
& M_LASTFRAG
)
2626 csum_flags
|= BGE_TXBDFLAG_IP_FRAG_END
;
2627 else if (m_head
->m_flags
& M_FRAG
)
2628 csum_flags
|= BGE_TXBDFLAG_IP_FRAG
;
2632 map
= sc
->bge_cdata
.bge_tx_dmamap
[idx
];
2634 maxsegs
= (BGE_TX_RING_CNT
- sc
->bge_txcnt
) - BGE_NSEG_RSVD
;
2635 KASSERT(maxsegs
>= BGE_NSEG_SPARE
,
2636 ("not enough segments %d\n", maxsegs
));
2638 if (maxsegs
> BGE_NSEG_NEW
)
2639 maxsegs
= BGE_NSEG_NEW
;
2642 * Pad outbound frame to BGE_MIN_FRAME for an unusual reason.
2643 * The bge hardware will pad out Tx runts to BGE_MIN_FRAME,
2644 * but when such padded frames employ the bge IP/TCP checksum
2645 * offload, the hardware checksum assist gives incorrect results
2646 * (possibly from incorporating its own padding into the UDP/TCP
2647 * checksum; who knows). If we pad such runts with zeros, the
2648 * onboard checksum comes out correct. We do this by pretending
2649 * the mbuf chain has too many fragments so the coalescing code
2650 * below can assemble the packet into a single buffer that's
2651 * padded out to the mininum frame size.
2653 if ((csum_flags
& BGE_TXBDFLAG_TCP_UDP_CSUM
) &&
2654 m_head
->m_pkthdr
.len
< BGE_MIN_FRAME
) {
2657 ctx
.bge_segs
= segs
;
2658 ctx
.bge_maxsegs
= maxsegs
;
2659 error
= bus_dmamap_load_mbuf(sc
->bge_cdata
.bge_mtag
, map
,
2660 m_head
, bge_dma_map_mbuf
, &ctx
,
2663 if (error
== EFBIG
|| ctx
.bge_maxsegs
== 0) {
2666 m_new
= m_defrag(m_head
, MB_DONTWAIT
);
2667 if (m_new
== NULL
) {
2668 if_printf(&sc
->arpcom
.ac_if
,
2669 "could not defrag TX mbuf\n");
2678 * Manually pad short frames, and zero the pad space
2679 * to avoid leaking data.
2681 if ((csum_flags
& BGE_TXBDFLAG_TCP_UDP_CSUM
) &&
2682 m_head
->m_pkthdr
.len
< BGE_MIN_FRAME
) {
2683 int pad_len
= BGE_MIN_FRAME
- m_head
->m_pkthdr
.len
;
2685 bzero(mtod(m_head
, char *) + m_head
->m_pkthdr
.len
,
2687 m_head
->m_pkthdr
.len
+= pad_len
;
2688 m_head
->m_len
= m_head
->m_pkthdr
.len
;
2691 ctx
.bge_segs
= segs
;
2692 ctx
.bge_maxsegs
= maxsegs
;
2693 error
= bus_dmamap_load_mbuf(sc
->bge_cdata
.bge_mtag
, map
,
2694 m_head
, bge_dma_map_mbuf
, &ctx
,
2696 if (error
|| ctx
.bge_maxsegs
== 0) {
2697 if_printf(&sc
->arpcom
.ac_if
,
2698 "could not defrag TX mbuf\n");
2704 if_printf(&sc
->arpcom
.ac_if
, "could not map TX mbuf\n");
2708 bus_dmamap_sync(sc
->bge_cdata
.bge_mtag
, map
, BUS_DMASYNC_PREWRITE
);
2710 for (i
= 0; ; i
++) {
2711 d
= &sc
->bge_ldata
.bge_tx_ring
[idx
];
2713 d
->bge_addr
.bge_addr_lo
= BGE_ADDR_LO(ctx
.bge_segs
[i
].ds_addr
);
2714 d
->bge_addr
.bge_addr_hi
= BGE_ADDR_HI(ctx
.bge_segs
[i
].ds_addr
);
2715 d
->bge_len
= segs
[i
].ds_len
;
2716 d
->bge_flags
= csum_flags
;
2718 if (i
== ctx
.bge_maxsegs
- 1)
2720 BGE_INC(idx
, BGE_TX_RING_CNT
);
2722 /* Mark the last segment as end of packet... */
2723 d
->bge_flags
|= BGE_TXBDFLAG_END
;
2725 /* Set vlan tag to the first segment of the packet. */
2726 d
= &sc
->bge_ldata
.bge_tx_ring
[*txidx
];
2727 if (m_head
->m_flags
& M_VLANTAG
) {
2728 d
->bge_flags
|= BGE_TXBDFLAG_VLAN_TAG
;
2729 d
->bge_vlan_tag
= m_head
->m_pkthdr
.ether_vlantag
;
2731 d
->bge_vlan_tag
= 0;
2735 * Insure that the map for this transmission is placed at
2736 * the array index of the last descriptor in this chain.
2738 sc
->bge_cdata
.bge_tx_dmamap
[*txidx
] = sc
->bge_cdata
.bge_tx_dmamap
[idx
];
2739 sc
->bge_cdata
.bge_tx_dmamap
[idx
] = map
;
2740 sc
->bge_cdata
.bge_tx_chain
[idx
] = m_head
;
2741 sc
->bge_txcnt
+= ctx
.bge_maxsegs
;
2743 BGE_INC(idx
, BGE_TX_RING_CNT
);
2754 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2755 * to the mbuf data regions directly in the transmit descriptors.
2758 bge_start(struct ifnet
*ifp
)
2760 struct bge_softc
*sc
= ifp
->if_softc
;
2761 struct mbuf
*m_head
= NULL
;
2765 if ((ifp
->if_flags
& (IFF_RUNNING
| IFF_OACTIVE
)) != IFF_RUNNING
)
2768 prodidx
= sc
->bge_tx_prodidx
;
2771 while (sc
->bge_cdata
.bge_tx_chain
[prodidx
] == NULL
) {
2772 m_head
= ifq_dequeue(&ifp
->if_snd
, NULL
);
2778 * The code inside the if() block is never reached since we
2779 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
2780 * requests to checksum TCP/UDP in a fragmented packet.
2783 * safety overkill. If this is a fragmented packet chain
2784 * with delayed TCP/UDP checksums, then only encapsulate
2785 * it if we have enough descriptors to handle the entire
2787 * (paranoia -- may not actually be needed)
2789 if ((m_head
->m_flags
& M_FIRSTFRAG
) &&
2790 (m_head
->m_pkthdr
.csum_flags
& CSUM_DELAY_DATA
)) {
2791 if ((BGE_TX_RING_CNT
- sc
->bge_txcnt
) <
2792 m_head
->m_pkthdr
.csum_data
+ BGE_NSEG_RSVD
) {
2793 ifp
->if_flags
|= IFF_OACTIVE
;
2794 ifq_prepend(&ifp
->if_snd
, m_head
);
2800 * Sanity check: avoid coming within BGE_NSEG_RSVD
2801 * descriptors of the end of the ring. Also make
2802 * sure there are BGE_NSEG_SPARE descriptors for
2803 * jumbo buffers' defragmentation.
2805 if ((BGE_TX_RING_CNT
- sc
->bge_txcnt
) <
2806 (BGE_NSEG_RSVD
+ BGE_NSEG_SPARE
)) {
2807 ifp
->if_flags
|= IFF_OACTIVE
;
2808 ifq_prepend(&ifp
->if_snd
, m_head
);
2813 * Pack the data into the transmit ring. If we
2814 * don't have room, set the OACTIVE flag and wait
2815 * for the NIC to drain the ring.
2817 if (bge_encap(sc
, &m_head
, &prodidx
)) {
2818 ifp
->if_flags
|= IFF_OACTIVE
;
2823 ETHER_BPF_MTAP(ifp
, m_head
);
2830 CSR_WRITE_4(sc
, BGE_MBX_TX_HOST_PROD0_LO
, prodidx
);
2831 /* 5700 b2 errata */
2832 if (sc
->bge_chiprev
== BGE_CHIPREV_5700_BX
)
2833 CSR_WRITE_4(sc
, BGE_MBX_TX_HOST_PROD0_LO
, prodidx
);
2835 sc
->bge_tx_prodidx
= prodidx
;
2838 * Set a timeout in case the chip goes out to lunch.
2846 struct bge_softc
*sc
= xsc
;
2847 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2850 ASSERT_SERIALIZED(ifp
->if_serializer
);
2852 if (ifp
->if_flags
& IFF_RUNNING
)
2855 /* Cancel pending I/O and flush buffers. */
2861 * Init the various state machines, ring
2862 * control blocks and firmware.
2864 if (bge_blockinit(sc
)) {
2865 if_printf(ifp
, "initialization failure\n");
2870 CSR_WRITE_4(sc
, BGE_RX_MTU
, ifp
->if_mtu
+
2871 ETHER_HDR_LEN
+ ETHER_CRC_LEN
+ EVL_ENCAPLEN
);
2873 /* Load our MAC address. */
2874 m
= (uint16_t *)&sc
->arpcom
.ac_enaddr
[0];
2875 CSR_WRITE_4(sc
, BGE_MAC_ADDR1_LO
, htons(m
[0]));
2876 CSR_WRITE_4(sc
, BGE_MAC_ADDR1_HI
, (htons(m
[1]) << 16) | htons(m
[2]));
2878 /* Enable or disable promiscuous mode as needed. */
2881 /* Program multicast filter. */
2885 bge_init_rx_ring_std(sc
);
2888 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
2889 * memory to insure that the chip has in fact read the first
2890 * entry of the ring.
2892 if (sc
->bge_chipid
== BGE_CHIPID_BCM5705_A0
) {
2894 for (i
= 0; i
< 10; i
++) {
2896 v
= bge_readmem_ind(sc
, BGE_STD_RX_RINGS
+ 8);
2897 if (v
== (MCLBYTES
- ETHER_ALIGN
))
2901 if_printf(ifp
, "5705 A0 chip failed to load RX ring\n");
2904 /* Init jumbo RX ring. */
2905 if (ifp
->if_mtu
> (ETHERMTU
+ ETHER_HDR_LEN
+ ETHER_CRC_LEN
))
2906 bge_init_rx_ring_jumbo(sc
);
2908 /* Init our RX return ring index */
2909 sc
->bge_rx_saved_considx
= 0;
2912 bge_init_tx_ring(sc
);
2914 /* Turn on transmitter */
2915 BGE_SETBIT(sc
, BGE_TX_MODE
, BGE_TXMODE_ENABLE
);
2917 /* Turn on receiver */
2918 BGE_SETBIT(sc
, BGE_RX_MODE
, BGE_RXMODE_ENABLE
);
2920 /* Tell firmware we're alive. */
2921 BGE_SETBIT(sc
, BGE_MODE_CTL
, BGE_MODECTL_STACKUP
);
2923 /* Enable host interrupts if polling(4) is not enabled. */
2924 BGE_SETBIT(sc
, BGE_PCI_MISC_CTL
, BGE_PCIMISCCTL_CLEAR_INTA
);
2925 #ifdef DEVICE_POLLING
2926 if (ifp
->if_flags
& IFF_POLLING
)
2927 bge_disable_intr(sc
);
2930 bge_enable_intr(sc
);
2932 bge_ifmedia_upd(ifp
);
2934 ifp
->if_flags
|= IFF_RUNNING
;
2935 ifp
->if_flags
&= ~IFF_OACTIVE
;
2937 callout_reset(&sc
->bge_stat_timer
, hz
, bge_tick
, sc
);
2941 * Set media options.
2944 bge_ifmedia_upd(struct ifnet
*ifp
)
2946 struct bge_softc
*sc
= ifp
->if_softc
;
2948 /* If this is a 1000baseX NIC, enable the TBI port. */
2949 if (sc
->bge_flags
& BGE_FLAG_TBI
) {
2950 struct ifmedia
*ifm
= &sc
->bge_ifmedia
;
2952 if (IFM_TYPE(ifm
->ifm_media
) != IFM_ETHER
)
2955 switch(IFM_SUBTYPE(ifm
->ifm_media
)) {
2958 * The BCM5704 ASIC appears to have a special
2959 * mechanism for programming the autoneg
2960 * advertisement registers in TBI mode.
2962 if (!bge_fake_autoneg
&&
2963 sc
->bge_asicrev
== BGE_ASICREV_BCM5704
) {
2966 CSR_WRITE_4(sc
, BGE_TX_TBI_AUTONEG
, 0);
2967 sgdig
= CSR_READ_4(sc
, BGE_SGDIG_CFG
);
2968 sgdig
|= BGE_SGDIGCFG_AUTO
|
2969 BGE_SGDIGCFG_PAUSE_CAP
|
2970 BGE_SGDIGCFG_ASYM_PAUSE
;
2971 CSR_WRITE_4(sc
, BGE_SGDIG_CFG
,
2972 sgdig
| BGE_SGDIGCFG_SEND
);
2974 CSR_WRITE_4(sc
, BGE_SGDIG_CFG
, sgdig
);
2978 if ((ifm
->ifm_media
& IFM_GMASK
) == IFM_FDX
) {
2979 BGE_CLRBIT(sc
, BGE_MAC_MODE
,
2980 BGE_MACMODE_HALF_DUPLEX
);
2982 BGE_SETBIT(sc
, BGE_MAC_MODE
,
2983 BGE_MACMODE_HALF_DUPLEX
);
2990 struct mii_data
*mii
= device_get_softc(sc
->bge_miibus
);
2994 if (mii
->mii_instance
) {
2995 struct mii_softc
*miisc
;
2997 LIST_FOREACH(miisc
, &mii
->mii_phys
, mii_list
)
2998 mii_phy_reset(miisc
);
3006 * Report current media status.
3009 bge_ifmedia_sts(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
3011 struct bge_softc
*sc
= ifp
->if_softc
;
3013 if (sc
->bge_flags
& BGE_FLAG_TBI
) {
3014 ifmr
->ifm_status
= IFM_AVALID
;
3015 ifmr
->ifm_active
= IFM_ETHER
;
3016 if (CSR_READ_4(sc
, BGE_MAC_STS
) &
3017 BGE_MACSTAT_TBI_PCS_SYNCHED
) {
3018 ifmr
->ifm_status
|= IFM_ACTIVE
;
3020 ifmr
->ifm_active
|= IFM_NONE
;
3024 ifmr
->ifm_active
|= IFM_1000_SX
;
3025 if (CSR_READ_4(sc
, BGE_MAC_MODE
) & BGE_MACMODE_HALF_DUPLEX
)
3026 ifmr
->ifm_active
|= IFM_HDX
;
3028 ifmr
->ifm_active
|= IFM_FDX
;
3030 struct mii_data
*mii
= device_get_softc(sc
->bge_miibus
);
3033 ifmr
->ifm_active
= mii
->mii_media_active
;
3034 ifmr
->ifm_status
= mii
->mii_media_status
;
3039 bge_ioctl(struct ifnet
*ifp
, u_long command
, caddr_t data
, struct ucred
*cr
)
3041 struct bge_softc
*sc
= ifp
->if_softc
;
3042 struct ifreq
*ifr
= (struct ifreq
*)data
;
3043 int mask
, error
= 0;
3045 ASSERT_SERIALIZED(ifp
->if_serializer
);
3049 if ((!BGE_IS_JUMBO_CAPABLE(sc
) && ifr
->ifr_mtu
> ETHERMTU
) ||
3050 (BGE_IS_JUMBO_CAPABLE(sc
) &&
3051 ifr
->ifr_mtu
> BGE_JUMBO_MTU
)) {
3053 } else if (ifp
->if_mtu
!= ifr
->ifr_mtu
) {
3054 ifp
->if_mtu
= ifr
->ifr_mtu
;
3055 ifp
->if_flags
&= ~IFF_RUNNING
;
3060 if (ifp
->if_flags
& IFF_UP
) {
3061 if (ifp
->if_flags
& IFF_RUNNING
) {
3062 mask
= ifp
->if_flags
^ sc
->bge_if_flags
;
3065 * If only the state of the PROMISC flag
3066 * changed, then just use the 'set promisc
3067 * mode' command instead of reinitializing
3068 * the entire NIC. Doing a full re-init
3069 * means reloading the firmware and waiting
3070 * for it to start up, which may take a
3071 * second or two. Similarly for ALLMULTI.
3073 if (mask
& IFF_PROMISC
)
3075 if (mask
& IFF_ALLMULTI
)
3081 if (ifp
->if_flags
& IFF_RUNNING
)
3084 sc
->bge_if_flags
= ifp
->if_flags
;
3088 if (ifp
->if_flags
& IFF_RUNNING
)
3093 if (sc
->bge_flags
& BGE_FLAG_TBI
) {
3094 error
= ifmedia_ioctl(ifp
, ifr
,
3095 &sc
->bge_ifmedia
, command
);
3097 struct mii_data
*mii
;
3099 mii
= device_get_softc(sc
->bge_miibus
);
3100 error
= ifmedia_ioctl(ifp
, ifr
,
3101 &mii
->mii_media
, command
);
3105 mask
= ifr
->ifr_reqcap
^ ifp
->if_capenable
;
3106 if (mask
& IFCAP_HWCSUM
) {
3107 ifp
->if_capenable
^= IFCAP_HWCSUM
;
3108 if (IFCAP_HWCSUM
& ifp
->if_capenable
)
3109 ifp
->if_hwassist
= BGE_CSUM_FEATURES
;
3111 ifp
->if_hwassist
= 0;
3115 error
= ether_ioctl(ifp
, command
, data
);
3122 bge_watchdog(struct ifnet
*ifp
)
3124 struct bge_softc
*sc
= ifp
->if_softc
;
3126 if_printf(ifp
, "watchdog timeout -- resetting\n");
3128 ifp
->if_flags
&= ~IFF_RUNNING
;
3133 if (!ifq_is_empty(&ifp
->if_snd
))
3138 * Stop the adapter and free any mbufs allocated to the
3142 bge_stop(struct bge_softc
*sc
)
3144 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3145 struct ifmedia_entry
*ifm
;
3146 struct mii_data
*mii
= NULL
;
3149 ASSERT_SERIALIZED(ifp
->if_serializer
);
3151 if ((sc
->bge_flags
& BGE_FLAG_TBI
) == 0)
3152 mii
= device_get_softc(sc
->bge_miibus
);
3154 callout_stop(&sc
->bge_stat_timer
);
3157 * Disable all of the receiver blocks
3159 BGE_CLRBIT(sc
, BGE_RX_MODE
, BGE_RXMODE_ENABLE
);
3160 BGE_CLRBIT(sc
, BGE_RBDI_MODE
, BGE_RBDIMODE_ENABLE
);
3161 BGE_CLRBIT(sc
, BGE_RXLP_MODE
, BGE_RXLPMODE_ENABLE
);
3162 if (!BGE_IS_5705_PLUS(sc
))
3163 BGE_CLRBIT(sc
, BGE_RXLS_MODE
, BGE_RXLSMODE_ENABLE
);
3164 BGE_CLRBIT(sc
, BGE_RDBDI_MODE
, BGE_RBDIMODE_ENABLE
);
3165 BGE_CLRBIT(sc
, BGE_RDC_MODE
, BGE_RDCMODE_ENABLE
);
3166 BGE_CLRBIT(sc
, BGE_RBDC_MODE
, BGE_RBDCMODE_ENABLE
);
3169 * Disable all of the transmit blocks
3171 BGE_CLRBIT(sc
, BGE_SRS_MODE
, BGE_SRSMODE_ENABLE
);
3172 BGE_CLRBIT(sc
, BGE_SBDI_MODE
, BGE_SBDIMODE_ENABLE
);
3173 BGE_CLRBIT(sc
, BGE_SDI_MODE
, BGE_SDIMODE_ENABLE
);
3174 BGE_CLRBIT(sc
, BGE_RDMA_MODE
, BGE_RDMAMODE_ENABLE
);
3175 BGE_CLRBIT(sc
, BGE_SDC_MODE
, BGE_SDCMODE_ENABLE
);
3176 if (!BGE_IS_5705_PLUS(sc
))
3177 BGE_CLRBIT(sc
, BGE_DMAC_MODE
, BGE_DMACMODE_ENABLE
);
3178 BGE_CLRBIT(sc
, BGE_SBDC_MODE
, BGE_SBDCMODE_ENABLE
);
3181 * Shut down all of the memory managers and related
3184 BGE_CLRBIT(sc
, BGE_HCC_MODE
, BGE_HCCMODE_ENABLE
);
3185 BGE_CLRBIT(sc
, BGE_WDMA_MODE
, BGE_WDMAMODE_ENABLE
);
3186 if (!BGE_IS_5705_PLUS(sc
))
3187 BGE_CLRBIT(sc
, BGE_MBCF_MODE
, BGE_MBCFMODE_ENABLE
);
3188 CSR_WRITE_4(sc
, BGE_FTQ_RESET
, 0xFFFFFFFF);
3189 CSR_WRITE_4(sc
, BGE_FTQ_RESET
, 0);
3190 if (!BGE_IS_5705_PLUS(sc
)) {
3191 BGE_CLRBIT(sc
, BGE_BMAN_MODE
, BGE_BMANMODE_ENABLE
);
3192 BGE_CLRBIT(sc
, BGE_MARB_MODE
, BGE_MARBMODE_ENABLE
);
3195 /* Disable host interrupts. */
3196 bge_disable_intr(sc
);
3199 * Tell firmware we're shutting down.
3201 BGE_CLRBIT(sc
, BGE_MODE_CTL
, BGE_MODECTL_STACKUP
);
3203 /* Free the RX lists. */
3204 bge_free_rx_ring_std(sc
);
3206 /* Free jumbo RX list. */
3207 if (BGE_IS_JUMBO_CAPABLE(sc
))
3208 bge_free_rx_ring_jumbo(sc
);
3210 /* Free TX buffers. */
3211 bge_free_tx_ring(sc
);
3214 * Isolate/power down the PHY, but leave the media selection
3215 * unchanged so that things will be put back to normal when
3216 * we bring the interface back up.
3218 * 'mii' may be NULL in the following cases:
3219 * - The device uses TBI.
3220 * - bge_stop() is called by bge_detach().
3223 itmp
= ifp
->if_flags
;
3224 ifp
->if_flags
|= IFF_UP
;
3225 ifm
= mii
->mii_media
.ifm_cur
;
3226 mtmp
= ifm
->ifm_media
;
3227 ifm
->ifm_media
= IFM_ETHER
|IFM_NONE
;
3229 ifm
->ifm_media
= mtmp
;
3230 ifp
->if_flags
= itmp
;
3234 sc
->bge_coal_chg
= 0;
3236 sc
->bge_tx_saved_considx
= BGE_TXCONS_UNSET
;
3238 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
3243 * Stop all chip I/O so that the kernel's probe routines don't
3244 * get confused by errant DMAs when rebooting.
3247 bge_shutdown(device_t dev
)
3249 struct bge_softc
*sc
= device_get_softc(dev
);
3250 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3252 lwkt_serialize_enter(ifp
->if_serializer
);
3255 lwkt_serialize_exit(ifp
->if_serializer
);
3259 bge_suspend(device_t dev
)
3261 struct bge_softc
*sc
= device_get_softc(dev
);
3262 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3264 lwkt_serialize_enter(ifp
->if_serializer
);
3266 lwkt_serialize_exit(ifp
->if_serializer
);
3272 bge_resume(device_t dev
)
3274 struct bge_softc
*sc
= device_get_softc(dev
);
3275 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3277 lwkt_serialize_enter(ifp
->if_serializer
);
3279 if (ifp
->if_flags
& IFF_UP
) {
3282 if (!ifq_is_empty(&ifp
->if_snd
))
3286 lwkt_serialize_exit(ifp
->if_serializer
);
3292 bge_setpromisc(struct bge_softc
*sc
)
3294 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3296 if (ifp
->if_flags
& IFF_PROMISC
)
3297 BGE_SETBIT(sc
, BGE_RX_MODE
, BGE_RXMODE_RX_PROMISC
);
3299 BGE_CLRBIT(sc
, BGE_RX_MODE
, BGE_RXMODE_RX_PROMISC
);
3303 bge_dma_map_addr(void *arg
, bus_dma_segment_t
*segs
, int nsegs
, int error
)
3305 struct bge_dmamap_arg
*ctx
= arg
;
3310 KASSERT(nsegs
== 1 && ctx
->bge_maxsegs
== 1,
3311 ("only one segment is allowed\n"));
3313 ctx
->bge_segs
[0] = *segs
;
3317 bge_dma_map_mbuf(void *arg
, bus_dma_segment_t
*segs
, int nsegs
,
3318 bus_size_t mapsz __unused
, int error
)
3320 struct bge_dmamap_arg
*ctx
= arg
;
3326 if (nsegs
> ctx
->bge_maxsegs
) {
3327 ctx
->bge_maxsegs
= 0;
3331 ctx
->bge_maxsegs
= nsegs
;
3332 for (i
= 0; i
< nsegs
; ++i
)
3333 ctx
->bge_segs
[i
] = segs
[i
];
3337 bge_dma_free(struct bge_softc
*sc
)
3341 /* Destroy RX/TX mbuf DMA stuffs. */
3342 if (sc
->bge_cdata
.bge_mtag
!= NULL
) {
3343 for (i
= 0; i
< BGE_STD_RX_RING_CNT
; i
++) {
3344 if (sc
->bge_cdata
.bge_rx_std_dmamap
[i
]) {
3345 bus_dmamap_destroy(sc
->bge_cdata
.bge_mtag
,
3346 sc
->bge_cdata
.bge_rx_std_dmamap
[i
]);
3350 for (i
= 0; i
< BGE_TX_RING_CNT
; i
++) {
3351 if (sc
->bge_cdata
.bge_tx_dmamap
[i
]) {
3352 bus_dmamap_destroy(sc
->bge_cdata
.bge_mtag
,
3353 sc
->bge_cdata
.bge_tx_dmamap
[i
]);
3356 bus_dma_tag_destroy(sc
->bge_cdata
.bge_mtag
);
3359 /* Destroy standard RX ring */
3360 bge_dma_block_free(sc
->bge_cdata
.bge_rx_std_ring_tag
,
3361 sc
->bge_cdata
.bge_rx_std_ring_map
,
3362 sc
->bge_ldata
.bge_rx_std_ring
);
3364 if (BGE_IS_JUMBO_CAPABLE(sc
))
3365 bge_free_jumbo_mem(sc
);
3367 /* Destroy RX return ring */
3368 bge_dma_block_free(sc
->bge_cdata
.bge_rx_return_ring_tag
,
3369 sc
->bge_cdata
.bge_rx_return_ring_map
,
3370 sc
->bge_ldata
.bge_rx_return_ring
);
3372 /* Destroy TX ring */
3373 bge_dma_block_free(sc
->bge_cdata
.bge_tx_ring_tag
,
3374 sc
->bge_cdata
.bge_tx_ring_map
,
3375 sc
->bge_ldata
.bge_tx_ring
);
3377 /* Destroy status block */
3378 bge_dma_block_free(sc
->bge_cdata
.bge_status_tag
,
3379 sc
->bge_cdata
.bge_status_map
,
3380 sc
->bge_ldata
.bge_status_block
);
3382 /* Destroy statistics block */
3383 bge_dma_block_free(sc
->bge_cdata
.bge_stats_tag
,
3384 sc
->bge_cdata
.bge_stats_map
,
3385 sc
->bge_ldata
.bge_stats
);
3387 /* Destroy the parent tag */
3388 if (sc
->bge_cdata
.bge_parent_tag
!= NULL
)
3389 bus_dma_tag_destroy(sc
->bge_cdata
.bge_parent_tag
);
3393 bge_dma_alloc(struct bge_softc
*sc
)
3395 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3399 * Allocate the parent bus DMA tag appropriate for PCI.
3401 error
= bus_dma_tag_create(NULL
, 1, 0,
3402 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
3404 MAXBSIZE
, BGE_NSEG_NEW
,
3405 BUS_SPACE_MAXSIZE_32BIT
,
3406 0, &sc
->bge_cdata
.bge_parent_tag
);
3408 if_printf(ifp
, "could not allocate parent dma tag\n");
3413 * Create DMA tag for mbufs.
3415 nseg
= BGE_NSEG_NEW
;
3416 error
= bus_dma_tag_create(sc
->bge_cdata
.bge_parent_tag
, 1, 0,
3417 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
3419 MCLBYTES
* nseg
, nseg
, MCLBYTES
,
3420 BUS_DMA_ALLOCNOW
, &sc
->bge_cdata
.bge_mtag
);
3422 if_printf(ifp
, "could not allocate mbuf dma tag\n");
3427 * Create DMA maps for TX/RX mbufs.
3429 for (i
= 0; i
< BGE_STD_RX_RING_CNT
; i
++) {
3430 error
= bus_dmamap_create(sc
->bge_cdata
.bge_mtag
, 0,
3431 &sc
->bge_cdata
.bge_rx_std_dmamap
[i
]);
3435 for (j
= 0; j
< i
; ++j
) {
3436 bus_dmamap_destroy(sc
->bge_cdata
.bge_mtag
,
3437 sc
->bge_cdata
.bge_rx_std_dmamap
[j
]);
3439 bus_dma_tag_destroy(sc
->bge_cdata
.bge_mtag
);
3440 sc
->bge_cdata
.bge_mtag
= NULL
;
3442 if_printf(ifp
, "could not create DMA map for RX\n");
3447 for (i
= 0; i
< BGE_TX_RING_CNT
; i
++) {
3448 error
= bus_dmamap_create(sc
->bge_cdata
.bge_mtag
, 0,
3449 &sc
->bge_cdata
.bge_tx_dmamap
[i
]);
3453 for (j
= 0; j
< BGE_STD_RX_RING_CNT
; ++j
) {
3454 bus_dmamap_destroy(sc
->bge_cdata
.bge_mtag
,
3455 sc
->bge_cdata
.bge_rx_std_dmamap
[j
]);
3457 for (j
= 0; j
< i
; ++j
) {
3458 bus_dmamap_destroy(sc
->bge_cdata
.bge_mtag
,
3459 sc
->bge_cdata
.bge_tx_dmamap
[j
]);
3461 bus_dma_tag_destroy(sc
->bge_cdata
.bge_mtag
);
3462 sc
->bge_cdata
.bge_mtag
= NULL
;
3464 if_printf(ifp
, "could not create DMA map for TX\n");
3470 * Create DMA stuffs for standard RX ring.
3472 error
= bge_dma_block_alloc(sc
, BGE_STD_RX_RING_SZ
,
3473 &sc
->bge_cdata
.bge_rx_std_ring_tag
,
3474 &sc
->bge_cdata
.bge_rx_std_ring_map
,
3475 (void **)&sc
->bge_ldata
.bge_rx_std_ring
,
3476 &sc
->bge_ldata
.bge_rx_std_ring_paddr
);
3478 if_printf(ifp
, "could not create std RX ring\n");
3483 * Create jumbo buffer pool.
3485 if (BGE_IS_JUMBO_CAPABLE(sc
)) {
3486 error
= bge_alloc_jumbo_mem(sc
);
3488 if_printf(ifp
, "could not create jumbo buffer pool\n");
3494 * Create DMA stuffs for RX return ring.
3496 error
= bge_dma_block_alloc(sc
, BGE_RX_RTN_RING_SZ(sc
),
3497 &sc
->bge_cdata
.bge_rx_return_ring_tag
,
3498 &sc
->bge_cdata
.bge_rx_return_ring_map
,
3499 (void **)&sc
->bge_ldata
.bge_rx_return_ring
,
3500 &sc
->bge_ldata
.bge_rx_return_ring_paddr
);
3502 if_printf(ifp
, "could not create RX ret ring\n");
3507 * Create DMA stuffs for TX ring.
3509 error
= bge_dma_block_alloc(sc
, BGE_TX_RING_SZ
,
3510 &sc
->bge_cdata
.bge_tx_ring_tag
,
3511 &sc
->bge_cdata
.bge_tx_ring_map
,
3512 (void **)&sc
->bge_ldata
.bge_tx_ring
,
3513 &sc
->bge_ldata
.bge_tx_ring_paddr
);
3515 if_printf(ifp
, "could not create TX ring\n");
3520 * Create DMA stuffs for status block.
3522 error
= bge_dma_block_alloc(sc
, BGE_STATUS_BLK_SZ
,
3523 &sc
->bge_cdata
.bge_status_tag
,
3524 &sc
->bge_cdata
.bge_status_map
,
3525 (void **)&sc
->bge_ldata
.bge_status_block
,
3526 &sc
->bge_ldata
.bge_status_block_paddr
);
3528 if_printf(ifp
, "could not create status block\n");
3533 * Create DMA stuffs for statistics block.
3535 error
= bge_dma_block_alloc(sc
, BGE_STATS_SZ
,
3536 &sc
->bge_cdata
.bge_stats_tag
,
3537 &sc
->bge_cdata
.bge_stats_map
,
3538 (void **)&sc
->bge_ldata
.bge_stats
,
3539 &sc
->bge_ldata
.bge_stats_paddr
);
3541 if_printf(ifp
, "could not create stats block\n");
3548 bge_dma_block_alloc(struct bge_softc
*sc
, bus_size_t size
, bus_dma_tag_t
*tag
,
3549 bus_dmamap_t
*map
, void **addr
, bus_addr_t
*paddr
)
3551 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3552 struct bge_dmamap_arg ctx
;
3553 bus_dma_segment_t seg
;
3559 error
= bus_dma_tag_create(sc
->bge_cdata
.bge_parent_tag
, PAGE_SIZE
, 0,
3560 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
3561 NULL
, NULL
, size
, 1, size
, 0, tag
);
3563 if_printf(ifp
, "could not allocate dma tag\n");
3568 * Allocate DMA'able memory
3570 error
= bus_dmamem_alloc(*tag
, addr
, BUS_DMA_WAITOK
| BUS_DMA_ZERO
,
3573 if_printf(ifp
, "could not allocate dma memory\n");
3574 bus_dma_tag_destroy(*tag
);
3580 * Load the DMA'able memory
3582 ctx
.bge_maxsegs
= 1;
3583 ctx
.bge_segs
= &seg
;
3584 error
= bus_dmamap_load(*tag
, *map
, *addr
, size
, bge_dma_map_addr
, &ctx
,
3587 if_printf(ifp
, "could not load dma memory\n");
3588 bus_dmamem_free(*tag
, *addr
, *map
);
3589 bus_dma_tag_destroy(*tag
);
3593 *paddr
= ctx
.bge_segs
[0].ds_addr
;
3599 bge_dma_block_free(bus_dma_tag_t tag
, bus_dmamap_t map
, void *addr
)
3602 bus_dmamap_unload(tag
, map
);
3603 bus_dmamem_free(tag
, addr
, map
);
3604 bus_dma_tag_destroy(tag
);
3609 * Grrr. The link status word in the status block does
3610 * not work correctly on the BCM5700 rev AX and BX chips,
3611 * according to all available information. Hence, we have
3612 * to enable MII interrupts in order to properly obtain
3613 * async link changes. Unfortunately, this also means that
3614 * we have to read the MAC status register to detect link
3615 * changes, thereby adding an additional register access to
3616 * the interrupt handler.
3618 * XXX: perhaps link state detection procedure used for
3619 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
3622 bge_bcm5700_link_upd(struct bge_softc
*sc
, uint32_t status __unused
)
3624 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3625 struct mii_data
*mii
= device_get_softc(sc
->bge_miibus
);
3629 if (!sc
->bge_link
&&
3630 (mii
->mii_media_status
& IFM_ACTIVE
) &&
3631 IFM_SUBTYPE(mii
->mii_media_active
) != IFM_NONE
) {
3634 if_printf(ifp
, "link UP\n");
3635 } else if (sc
->bge_link
&&
3636 (!(mii
->mii_media_status
& IFM_ACTIVE
) ||
3637 IFM_SUBTYPE(mii
->mii_media_active
) == IFM_NONE
)) {
3640 if_printf(ifp
, "link DOWN\n");
3643 /* Clear the interrupt. */
3644 CSR_WRITE_4(sc
, BGE_MAC_EVT_ENB
, BGE_EVTENB_MI_INTERRUPT
);
3645 bge_miibus_readreg(sc
->bge_dev
, 1, BRGPHY_MII_ISR
);
3646 bge_miibus_writereg(sc
->bge_dev
, 1, BRGPHY_MII_IMR
, BRGPHY_INTRS
);
3650 bge_tbi_link_upd(struct bge_softc
*sc
, uint32_t status
)
3652 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3654 #define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
3657 * Sometimes PCS encoding errors are detected in
3658 * TBI mode (on fiber NICs), and for some reason
3659 * the chip will signal them as link changes.
3660 * If we get a link change event, but the 'PCS
3661 * encoding error' bit in the MAC status register
3662 * is set, don't bother doing a link check.
3663 * This avoids spurious "gigabit link up" messages
3664 * that sometimes appear on fiber NICs during
3665 * periods of heavy traffic.
3667 if (status
& BGE_MACSTAT_TBI_PCS_SYNCHED
) {
3668 if (!sc
->bge_link
) {
3670 if (sc
->bge_asicrev
== BGE_ASICREV_BCM5704
) {
3671 BGE_CLRBIT(sc
, BGE_MAC_MODE
,
3672 BGE_MACMODE_TBI_SEND_CFGS
);
3674 CSR_WRITE_4(sc
, BGE_MAC_STS
, 0xFFFFFFFF);
3677 if_printf(ifp
, "link UP\n");
3679 ifp
->if_link_state
= LINK_STATE_UP
;
3680 if_link_state_change(ifp
);
3682 } else if ((status
& PCS_ENCODE_ERR
) != PCS_ENCODE_ERR
) {
3687 if_printf(ifp
, "link DOWN\n");
3689 ifp
->if_link_state
= LINK_STATE_DOWN
;
3690 if_link_state_change(ifp
);
3694 #undef PCS_ENCODE_ERR
3696 /* Clear the attention. */
3697 CSR_WRITE_4(sc
, BGE_MAC_STS
, BGE_MACSTAT_SYNC_CHANGED
|
3698 BGE_MACSTAT_CFG_CHANGED
| BGE_MACSTAT_MI_COMPLETE
|
3699 BGE_MACSTAT_LINK_CHANGED
);
3703 bge_copper_link_upd(struct bge_softc
*sc
, uint32_t status __unused
)
3706 * Check that the AUTOPOLL bit is set before
3707 * processing the event as a real link change.
3708 * Turning AUTOPOLL on and off in the MII read/write
3709 * functions will often trigger a link status
3710 * interrupt for no reason.
3712 if (CSR_READ_4(sc
, BGE_MI_MODE
) & BGE_MIMODE_AUTOPOLL
) {
3713 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3714 struct mii_data
*mii
= device_get_softc(sc
->bge_miibus
);
3718 if (!sc
->bge_link
&&
3719 (mii
->mii_media_status
& IFM_ACTIVE
) &&
3720 IFM_SUBTYPE(mii
->mii_media_active
) != IFM_NONE
) {
3723 if_printf(ifp
, "link UP\n");
3724 } else if (sc
->bge_link
&&
3725 (!(mii
->mii_media_status
& IFM_ACTIVE
) ||
3726 IFM_SUBTYPE(mii
->mii_media_active
) == IFM_NONE
)) {
3729 if_printf(ifp
, "link DOWN\n");
3733 /* Clear the attention. */
3734 CSR_WRITE_4(sc
, BGE_MAC_STS
, BGE_MACSTAT_SYNC_CHANGED
|
3735 BGE_MACSTAT_CFG_CHANGED
| BGE_MACSTAT_MI_COMPLETE
|
3736 BGE_MACSTAT_LINK_CHANGED
);
3740 bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS
)
3742 struct bge_softc
*sc
= arg1
;
3744 return bge_sysctl_coal_chg(oidp
, arg1
, arg2
, req
,
3745 &sc
->bge_rx_coal_ticks
,
3746 BGE_RX_COAL_TICKS_CHG
);
3750 bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS
)
3752 struct bge_softc
*sc
= arg1
;
3754 return bge_sysctl_coal_chg(oidp
, arg1
, arg2
, req
,
3755 &sc
->bge_tx_coal_ticks
,
3756 BGE_TX_COAL_TICKS_CHG
);
3760 bge_sysctl_rx_max_coal_bds(SYSCTL_HANDLER_ARGS
)
3762 struct bge_softc
*sc
= arg1
;
3764 return bge_sysctl_coal_chg(oidp
, arg1
, arg2
, req
,
3765 &sc
->bge_rx_max_coal_bds
,
3766 BGE_RX_MAX_COAL_BDS_CHG
);
3770 bge_sysctl_tx_max_coal_bds(SYSCTL_HANDLER_ARGS
)
3772 struct bge_softc
*sc
= arg1
;
3774 return bge_sysctl_coal_chg(oidp
, arg1
, arg2
, req
,
3775 &sc
->bge_tx_max_coal_bds
,
3776 BGE_TX_MAX_COAL_BDS_CHG
);
3780 bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS
, uint32_t *coal
,
3781 uint32_t coal_chg_mask
)
3783 struct bge_softc
*sc
= arg1
;
3784 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3787 lwkt_serialize_enter(ifp
->if_serializer
);
3790 error
= sysctl_handle_int(oidp
, &v
, 0, req
);
3791 if (!error
&& req
->newptr
!= NULL
) {
3796 sc
->bge_coal_chg
|= coal_chg_mask
;
3800 lwkt_serialize_exit(ifp
->if_serializer
);
3805 bge_coal_change(struct bge_softc
*sc
)
3807 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3810 ASSERT_SERIALIZED(ifp
->if_serializer
);
3812 if (sc
->bge_coal_chg
& BGE_RX_COAL_TICKS_CHG
) {
3813 CSR_WRITE_4(sc
, BGE_HCC_RX_COAL_TICKS
,
3814 sc
->bge_rx_coal_ticks
);
3816 val
= CSR_READ_4(sc
, BGE_HCC_RX_COAL_TICKS
);
3819 if_printf(ifp
, "rx_coal_ticks -> %u\n",
3820 sc
->bge_rx_coal_ticks
);
3824 if (sc
->bge_coal_chg
& BGE_TX_COAL_TICKS_CHG
) {
3825 CSR_WRITE_4(sc
, BGE_HCC_TX_COAL_TICKS
,
3826 sc
->bge_tx_coal_ticks
);
3828 val
= CSR_READ_4(sc
, BGE_HCC_TX_COAL_TICKS
);
3831 if_printf(ifp
, "tx_coal_ticks -> %u\n",
3832 sc
->bge_tx_coal_ticks
);
3836 if (sc
->bge_coal_chg
& BGE_RX_MAX_COAL_BDS_CHG
) {
3837 CSR_WRITE_4(sc
, BGE_HCC_RX_MAX_COAL_BDS
,
3838 sc
->bge_rx_max_coal_bds
);
3840 val
= CSR_READ_4(sc
, BGE_HCC_RX_MAX_COAL_BDS
);
3843 if_printf(ifp
, "rx_max_coal_bds -> %u\n",
3844 sc
->bge_rx_max_coal_bds
);
3848 if (sc
->bge_coal_chg
& BGE_TX_MAX_COAL_BDS_CHG
) {
3849 CSR_WRITE_4(sc
, BGE_HCC_TX_MAX_COAL_BDS
,
3850 sc
->bge_tx_max_coal_bds
);
3852 val
= CSR_READ_4(sc
, BGE_HCC_TX_MAX_COAL_BDS
);
3855 if_printf(ifp
, "tx_max_coal_bds -> %u\n",
3856 sc
->bge_tx_max_coal_bds
);
3860 sc
->bge_coal_chg
= 0;
3864 bge_enable_intr(struct bge_softc
*sc
)
3866 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3868 lwkt_serialize_handler_enable(ifp
->if_serializer
);
3873 CSR_WRITE_4(sc
, BGE_MBX_IRQ0_LO
, 0);
3876 * Unmask the interrupt when we stop polling.
3878 BGE_CLRBIT(sc
, BGE_PCI_MISC_CTL
, BGE_PCIMISCCTL_MASK_PCI_INTR
);
3881 * Trigger another interrupt, since above writing
3882 * to interrupt mailbox0 may acknowledge pending
3885 BGE_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_INTR_SET
);
3889 bge_disable_intr(struct bge_softc
*sc
)
3891 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3894 * Mask the interrupt when we start polling.
3896 BGE_SETBIT(sc
, BGE_PCI_MISC_CTL
, BGE_PCIMISCCTL_MASK_PCI_INTR
);
3899 * Acknowledge possible asserted interrupt.
3901 CSR_WRITE_4(sc
, BGE_MBX_IRQ0_LO
, 1);
3903 lwkt_serialize_handler_disable(ifp
->if_serializer
);