mbuf: Factor function to set mbuf hash.
[dragonfly.git] / sys / dev / netif / bce / if_bce.c
blob3cda3688ac86cbdecf981cd496c4c81c4756ef86
1 /*-
2 * Copyright (c) 2006-2007 Broadcom Corporation
3 * David Christensen <davidch@broadcom.com>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written consent.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
30 * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $
34 * The following controllers are supported by this driver:
35 * BCM5706C A2, A3
36 * BCM5706S A2, A3
37 * BCM5708C B1, B2
38 * BCM5708S B1, B2
39 * BCM5709C A1, B2, C0
40 * BCM5716 C0
42 * The following controllers are not supported by this driver:
43 * BCM5706C A0, A1
44 * BCM5706S A0, A1
45 * BCM5708C A0, B0
46 * BCM5708S A0, B0
47 * BCM5709C A0, B0, B1
48 * BCM5709S A0, A1, B0, B1, B2, C0
51 * Note about MSI-X on 5709/5716:
52 * - 9 MSI-X vectors are supported.
53 * - MSI-X vectors, RX/TX rings and status blocks' association
54 * are fixed:
55 * o The first RX ring and the first TX ring use the first
56 * status block.
57 * o The first MSI-X vector is associated with the first
58 * status block.
59 * o The second RX ring and the second TX ring use the second
60 * status block.
61 * o The second MSI-X vector is associated with the second
62 * status block.
63 * ...
64 * and so on so forth.
65 * - Status blocks must reside in physically contiguous memory
66 * and each status block consumes 128bytes. In addition to
67 * this, the memory for the status blocks is aligned on 128bytes
68 * in this driver. (see bce_dma_alloc() and HC_CONFIG)
69 * - Each status block has its own coalesce parameters, which also
70 * serve as the related MSI-X vector's interrupt moderation
71 * parameters. (see bce_coal_change())
74 #include "opt_bce.h"
75 #include "opt_ifpoll.h"
77 #include <sys/param.h>
78 #include <sys/bus.h>
79 #include <sys/endian.h>
80 #include <sys/kernel.h>
81 #include <sys/interrupt.h>
82 #include <sys/mbuf.h>
83 #include <sys/malloc.h>
84 #include <sys/queue.h>
85 #include <sys/rman.h>
86 #include <sys/serialize.h>
87 #include <sys/socket.h>
88 #include <sys/sockio.h>
89 #include <sys/sysctl.h>
91 #include <netinet/ip.h>
92 #include <netinet/tcp.h>
94 #include <net/bpf.h>
95 #include <net/ethernet.h>
96 #include <net/if.h>
97 #include <net/if_arp.h>
98 #include <net/if_dl.h>
99 #include <net/if_media.h>
100 #include <net/if_poll.h>
101 #include <net/if_types.h>
102 #include <net/ifq_var.h>
103 #include <net/toeplitz.h>
104 #include <net/toeplitz2.h>
105 #include <net/vlan/if_vlan_var.h>
106 #include <net/vlan/if_vlan_ether.h>
108 #include <dev/netif/mii_layer/mii.h>
109 #include <dev/netif/mii_layer/miivar.h>
110 #include <dev/netif/mii_layer/brgphyreg.h>
112 #include <bus/pci/pcireg.h>
113 #include <bus/pci/pcivar.h>
115 #include "miibus_if.h"
117 #include <dev/netif/bce/if_bcereg.h>
118 #include <dev/netif/bce/if_bcefw.h>
120 #define BCE_MSI_CKINTVL ((10 * hz) / 1000) /* 10ms */
122 #ifdef BCE_RSS_DEBUG
123 #define BCE_RSS_DPRINTF(sc, lvl, fmt, ...) \
124 do { \
125 if (sc->rss_debug >= lvl) \
126 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
127 } while (0)
128 #else /* !BCE_RSS_DEBUG */
129 #define BCE_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
130 #endif /* BCE_RSS_DEBUG */
132 /****************************************************************************/
133 /* PCI Device ID Table */
134 /* */
135 /* Used by bce_probe() to identify the devices supported by this driver. */
136 /****************************************************************************/
137 #define BCE_DEVDESC_MAX 64
139 static struct bce_type bce_devs[] = {
140 /* BCM5706C Controllers and OEM boards. */
141 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101,
142 "HP NC370T Multifunction Gigabit Server Adapter" },
143 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106,
144 "HP NC370i Multifunction Gigabit Server Adapter" },
145 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3070,
146 "HP NC380T PCIe DP Multifunc Gig Server Adapter" },
147 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x1709,
148 "HP NC371i Multifunction Gigabit Server Adapter" },
149 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID,
150 "Broadcom NetXtreme II BCM5706 1000Base-T" },
152 /* BCM5706S controllers and OEM boards. */
153 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
154 "HP NC370F Multifunction Gigabit Server Adapter" },
155 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID,
156 "Broadcom NetXtreme II BCM5706 1000Base-SX" },
158 /* BCM5708C controllers and OEM boards. */
159 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7037,
160 "HP NC373T PCIe Multifunction Gig Server Adapter" },
161 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7038,
162 "HP NC373i Multifunction Gigabit Server Adapter" },
163 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7045,
164 "HP NC374m PCIe Multifunction Adapter" },
165 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID,
166 "Broadcom NetXtreme II BCM5708 1000Base-T" },
168 /* BCM5708S controllers and OEM boards. */
169 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x1706,
170 "HP NC373m Multifunction Gigabit Server Adapter" },
171 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703b,
172 "HP NC373i Multifunction Gigabit Server Adapter" },
173 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703d,
174 "HP NC373F PCIe Multifunc Giga Server Adapter" },
175 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID,
176 "Broadcom NetXtreme II BCM5708S 1000Base-T" },
178 /* BCM5709C controllers and OEM boards. */
179 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7055,
180 "HP NC382i DP Multifunction Gigabit Server Adapter" },
181 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7059,
182 "HP NC382T PCIe DP Multifunction Gigabit Server Adapter" },
183 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, PCI_ANY_ID, PCI_ANY_ID,
184 "Broadcom NetXtreme II BCM5709 1000Base-T" },
186 /* BCM5709S controllers and OEM boards. */
187 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x171d,
188 "HP NC382m DP 1GbE Multifunction BL-c Adapter" },
189 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x7056,
190 "HP NC382i DP Multifunction Gigabit Server Adapter" },
191 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, PCI_ANY_ID, PCI_ANY_ID,
192 "Broadcom NetXtreme II BCM5709 1000Base-SX" },
194 /* BCM5716 controllers and OEM boards. */
195 { BRCM_VENDORID, BRCM_DEVICEID_BCM5716, PCI_ANY_ID, PCI_ANY_ID,
196 "Broadcom NetXtreme II BCM5716 1000Base-T" },
198 { 0, 0, 0, 0, NULL }
201 /****************************************************************************/
202 /* Supported Flash NVRAM device data. */
203 /****************************************************************************/
204 static const struct flash_spec flash_table[] =
206 #define BUFFERED_FLAGS (BCE_NV_BUFFERED | BCE_NV_TRANSLATE)
207 #define NONBUFFERED_FLAGS (BCE_NV_WREN)
209 /* Slow EEPROM */
210 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
211 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
212 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
213 "EEPROM - slow"},
214 /* Expansion entry 0001 */
215 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
216 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
217 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
218 "Entry 0001"},
219 /* Saifun SA25F010 (non-buffered flash) */
220 /* strap, cfg1, & write1 need updates */
221 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
222 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
223 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
224 "Non-buffered flash (128kB)"},
225 /* Saifun SA25F020 (non-buffered flash) */
226 /* strap, cfg1, & write1 need updates */
227 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
228 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
229 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
230 "Non-buffered flash (256kB)"},
231 /* Expansion entry 0100 */
232 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
233 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
234 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
235 "Entry 0100"},
236 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
237 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
238 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
239 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
240 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
241 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
242 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
243 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
244 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
245 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
246 /* Saifun SA25F005 (non-buffered flash) */
247 /* strap, cfg1, & write1 need updates */
248 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
249 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
250 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
251 "Non-buffered flash (64kB)"},
252 /* Fast EEPROM */
253 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
254 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
255 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
256 "EEPROM - fast"},
257 /* Expansion entry 1001 */
258 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
259 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
260 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
261 "Entry 1001"},
262 /* Expansion entry 1010 */
263 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
264 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
265 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
266 "Entry 1010"},
267 /* ATMEL AT45DB011B (buffered flash) */
268 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
269 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
270 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
271 "Buffered flash (128kB)"},
272 /* Expansion entry 1100 */
273 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
274 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
275 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
276 "Entry 1100"},
277 /* Expansion entry 1101 */
278 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
279 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
280 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
281 "Entry 1101"},
282 /* Ateml Expansion entry 1110 */
283 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
284 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
285 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
286 "Entry 1110 (Atmel)"},
287 /* ATMEL AT45DB021B (buffered flash) */
288 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
289 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
290 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
291 "Buffered flash (256kB)"},
295 * The BCM5709 controllers transparently handle the
296 * differences between Atmel 264 byte pages and all
297 * flash devices which use 256 byte pages, so no
298 * logical-to-physical mapping is required in the
299 * driver.
301 static struct flash_spec flash_5709 = {
302 .flags = BCE_NV_BUFFERED,
303 .page_bits = BCM5709_FLASH_PAGE_BITS,
304 .page_size = BCM5709_FLASH_PAGE_SIZE,
305 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
306 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2,
307 .name = "5709/5716 buffered flash (256kB)",
310 /****************************************************************************/
311 /* DragonFly device entry points. */
312 /****************************************************************************/
313 static int bce_probe(device_t);
314 static int bce_attach(device_t);
315 static int bce_detach(device_t);
316 static void bce_shutdown(device_t);
317 static int bce_miibus_read_reg(device_t, int, int);
318 static int bce_miibus_write_reg(device_t, int, int, int);
319 static void bce_miibus_statchg(device_t);
321 /****************************************************************************/
322 /* BCE Register/Memory Access Routines */
323 /****************************************************************************/
324 static uint32_t bce_reg_rd_ind(struct bce_softc *, uint32_t);
325 static void bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t);
326 static void bce_shmem_wr(struct bce_softc *, uint32_t, uint32_t);
327 static uint32_t bce_shmem_rd(struct bce_softc *, u32);
328 static void bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t);
330 /****************************************************************************/
331 /* BCE NVRAM Access Routines */
332 /****************************************************************************/
333 static int bce_acquire_nvram_lock(struct bce_softc *);
334 static int bce_release_nvram_lock(struct bce_softc *);
335 static void bce_enable_nvram_access(struct bce_softc *);
336 static void bce_disable_nvram_access(struct bce_softc *);
337 static int bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *,
338 uint32_t);
339 static int bce_init_nvram(struct bce_softc *);
340 static int bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int);
341 static int bce_nvram_test(struct bce_softc *);
343 /****************************************************************************/
344 /* BCE DMA Allocate/Free Routines */
345 /****************************************************************************/
346 static int bce_dma_alloc(struct bce_softc *);
347 static void bce_dma_free(struct bce_softc *);
348 static void bce_dma_map_addr(void *, bus_dma_segment_t *, int, int);
350 /****************************************************************************/
351 /* BCE Firmware Synchronization and Load */
352 /****************************************************************************/
353 static int bce_fw_sync(struct bce_softc *, uint32_t);
354 static void bce_load_rv2p_fw(struct bce_softc *, uint32_t *,
355 uint32_t, uint32_t);
356 static void bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *,
357 struct fw_info *);
358 static void bce_start_cpu(struct bce_softc *, struct cpu_reg *);
359 static void bce_halt_cpu(struct bce_softc *, struct cpu_reg *);
360 static void bce_start_rxp_cpu(struct bce_softc *);
361 static void bce_init_rxp_cpu(struct bce_softc *);
362 static void bce_init_txp_cpu(struct bce_softc *);
363 static void bce_init_tpat_cpu(struct bce_softc *);
364 static void bce_init_cp_cpu(struct bce_softc *);
365 static void bce_init_com_cpu(struct bce_softc *);
366 static void bce_init_cpus(struct bce_softc *);
367 static void bce_setup_msix_table(struct bce_softc *);
368 static void bce_init_rss(struct bce_softc *);
370 static void bce_stop(struct bce_softc *);
371 static int bce_reset(struct bce_softc *, uint32_t);
372 static int bce_chipinit(struct bce_softc *);
373 static int bce_blockinit(struct bce_softc *);
374 static void bce_probe_pci_caps(struct bce_softc *);
375 static void bce_print_adapter_info(struct bce_softc *);
376 static void bce_get_media(struct bce_softc *);
377 static void bce_mgmt_init(struct bce_softc *);
378 static int bce_init_ctx(struct bce_softc *);
379 static void bce_get_mac_addr(struct bce_softc *);
380 static void bce_set_mac_addr(struct bce_softc *);
381 static void bce_set_rx_mode(struct bce_softc *);
382 static void bce_coal_change(struct bce_softc *);
383 static void bce_npoll_coal_change(struct bce_softc *);
384 static void bce_setup_serialize(struct bce_softc *);
385 static void bce_serialize_skipmain(struct bce_softc *);
386 static void bce_deserialize_skipmain(struct bce_softc *);
387 static void bce_set_timer_cpuid(struct bce_softc *, boolean_t);
388 static int bce_alloc_intr(struct bce_softc *);
389 static void bce_free_intr(struct bce_softc *);
390 static void bce_try_alloc_msix(struct bce_softc *);
391 static void bce_free_msix(struct bce_softc *, boolean_t);
392 static void bce_setup_ring_cnt(struct bce_softc *);
393 static int bce_setup_intr(struct bce_softc *);
394 static void bce_teardown_intr(struct bce_softc *);
395 static int bce_setup_msix(struct bce_softc *);
396 static void bce_teardown_msix(struct bce_softc *, int);
398 static int bce_create_tx_ring(struct bce_tx_ring *);
399 static void bce_destroy_tx_ring(struct bce_tx_ring *);
400 static void bce_init_tx_context(struct bce_tx_ring *);
401 static int bce_init_tx_chain(struct bce_tx_ring *);
402 static void bce_free_tx_chain(struct bce_tx_ring *);
403 static void bce_xmit(struct bce_tx_ring *);
404 static int bce_encap(struct bce_tx_ring *, struct mbuf **, int *);
405 static int bce_tso_setup(struct bce_tx_ring *, struct mbuf **,
406 uint16_t *, uint16_t *);
408 static int bce_create_rx_ring(struct bce_rx_ring *);
409 static void bce_destroy_rx_ring(struct bce_rx_ring *);
410 static void bce_init_rx_context(struct bce_rx_ring *);
411 static int bce_init_rx_chain(struct bce_rx_ring *);
412 static void bce_free_rx_chain(struct bce_rx_ring *);
413 static int bce_newbuf_std(struct bce_rx_ring *, uint16_t *, uint16_t,
414 uint32_t *, int);
415 static void bce_setup_rxdesc_std(struct bce_rx_ring *, uint16_t,
416 uint32_t *);
417 static struct pktinfo *bce_rss_pktinfo(struct pktinfo *, uint32_t,
418 const struct l2_fhdr *);
420 static void bce_start(struct ifnet *, struct ifaltq_subque *);
421 static int bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
422 static void bce_watchdog(struct ifaltq_subque *);
423 static int bce_ifmedia_upd(struct ifnet *);
424 static void bce_ifmedia_sts(struct ifnet *, struct ifmediareq *);
425 static void bce_init(void *);
426 #ifdef IFPOLL_ENABLE
427 static void bce_npoll(struct ifnet *, struct ifpoll_info *);
428 static void bce_npoll_rx(struct ifnet *, void *, int);
429 static void bce_npoll_tx(struct ifnet *, void *, int);
430 static void bce_npoll_status(struct ifnet *);
431 static void bce_npoll_rx_pack(struct ifnet *, void *, int);
432 #endif
433 static void bce_serialize(struct ifnet *, enum ifnet_serialize);
434 static void bce_deserialize(struct ifnet *, enum ifnet_serialize);
435 static int bce_tryserialize(struct ifnet *, enum ifnet_serialize);
436 #ifdef INVARIANTS
437 static void bce_serialize_assert(struct ifnet *, enum ifnet_serialize,
438 boolean_t);
439 #endif
441 static void bce_intr(struct bce_softc *);
442 static void bce_intr_legacy(void *);
443 static void bce_intr_msi(void *);
444 static void bce_intr_msi_oneshot(void *);
445 static void bce_intr_msix_rxtx(void *);
446 static void bce_intr_msix_rx(void *);
447 static void bce_tx_intr(struct bce_tx_ring *, uint16_t);
448 static void bce_rx_intr(struct bce_rx_ring *, int, uint16_t);
449 static void bce_phy_intr(struct bce_softc *);
450 static void bce_disable_intr(struct bce_softc *);
451 static void bce_enable_intr(struct bce_softc *);
452 static void bce_reenable_intr(struct bce_rx_ring *);
453 static void bce_check_msi(void *);
455 static void bce_stats_update(struct bce_softc *);
456 static void bce_tick(void *);
457 static void bce_tick_serialized(struct bce_softc *);
458 static void bce_pulse(void *);
460 static void bce_add_sysctls(struct bce_softc *);
461 static int bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS);
462 static int bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS);
463 static int bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS);
464 static int bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS);
465 static int bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS);
466 static int bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS);
467 static int bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS);
468 static int bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS);
469 #ifdef IFPOLL_ENABLE
470 static int bce_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS);
471 #endif
472 static int bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS,
473 uint32_t *, uint32_t);
476 * NOTE:
477 * Don't set bce_tx_ticks_int/bce_tx_ticks to 1023. Linux's bnx2
478 * takes 1023 as the TX ticks limit. However, using 1023 will
479 * cause 5708(B2) to generate extra interrupts (~2000/s) even when
480 * there is _no_ network activity on the NIC.
482 static uint32_t bce_tx_bds_int = 255; /* bcm: 20 */
483 static uint32_t bce_tx_bds = 255; /* bcm: 20 */
484 static uint32_t bce_tx_ticks_int = 1022; /* bcm: 80 */
485 static uint32_t bce_tx_ticks = 1022; /* bcm: 80 */
486 static uint32_t bce_rx_bds_int = 128; /* bcm: 6 */
487 static uint32_t bce_rx_bds = 0; /* bcm: 6 */
488 static uint32_t bce_rx_ticks_int = 150; /* bcm: 18 */
489 static uint32_t bce_rx_ticks = 150; /* bcm: 18 */
491 static int bce_tx_wreg = 8;
493 static int bce_msi_enable = 1;
494 static int bce_msix_enable = 1;
496 static int bce_rx_pages = RX_PAGES_DEFAULT;
497 static int bce_tx_pages = TX_PAGES_DEFAULT;
499 static int bce_rx_rings = 0; /* auto */
500 static int bce_tx_rings = 0; /* auto */
502 TUNABLE_INT("hw.bce.tx_bds_int", &bce_tx_bds_int);
503 TUNABLE_INT("hw.bce.tx_bds", &bce_tx_bds);
504 TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int);
505 TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks);
506 TUNABLE_INT("hw.bce.rx_bds_int", &bce_rx_bds_int);
507 TUNABLE_INT("hw.bce.rx_bds", &bce_rx_bds);
508 TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int);
509 TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks);
510 TUNABLE_INT("hw.bce.msi.enable", &bce_msi_enable);
511 TUNABLE_INT("hw.bce.msix.enable", &bce_msix_enable);
512 TUNABLE_INT("hw.bce.rx_pages", &bce_rx_pages);
513 TUNABLE_INT("hw.bce.tx_pages", &bce_tx_pages);
514 TUNABLE_INT("hw.bce.tx_wreg", &bce_tx_wreg);
515 TUNABLE_INT("hw.bce.tx_rings", &bce_tx_rings);
516 TUNABLE_INT("hw.bce.rx_rings", &bce_rx_rings);
518 /****************************************************************************/
519 /* DragonFly device dispatch table. */
520 /****************************************************************************/
521 static device_method_t bce_methods[] = {
522 /* Device interface */
523 DEVMETHOD(device_probe, bce_probe),
524 DEVMETHOD(device_attach, bce_attach),
525 DEVMETHOD(device_detach, bce_detach),
526 DEVMETHOD(device_shutdown, bce_shutdown),
528 /* bus interface */
529 DEVMETHOD(bus_print_child, bus_generic_print_child),
530 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
532 /* MII interface */
533 DEVMETHOD(miibus_readreg, bce_miibus_read_reg),
534 DEVMETHOD(miibus_writereg, bce_miibus_write_reg),
535 DEVMETHOD(miibus_statchg, bce_miibus_statchg),
537 DEVMETHOD_END
540 static driver_t bce_driver = {
541 "bce",
542 bce_methods,
543 sizeof(struct bce_softc)
546 static devclass_t bce_devclass;
548 DECLARE_DUMMY_MODULE(if_bce);
549 MODULE_DEPEND(bce, miibus, 1, 1, 1);
550 DRIVER_MODULE(if_bce, pci, bce_driver, bce_devclass, NULL, NULL);
551 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, NULL, NULL);
553 /****************************************************************************/
554 /* Device probe function. */
555 /* */
556 /* Compares the device to the driver's list of supported devices and */
557 /* reports back to the OS whether this is the right driver for the device. */
558 /* */
559 /* Returns: */
560 /* BUS_PROBE_DEFAULT on success, positive value on failure. */
561 /****************************************************************************/
562 static int
563 bce_probe(device_t dev)
565 struct bce_type *t;
566 uint16_t vid, did, svid, sdid;
568 /* Get the data for the device to be probed. */
569 vid = pci_get_vendor(dev);
570 did = pci_get_device(dev);
571 svid = pci_get_subvendor(dev);
572 sdid = pci_get_subdevice(dev);
574 /* Look through the list of known devices for a match. */
575 for (t = bce_devs; t->bce_name != NULL; ++t) {
576 if (vid == t->bce_vid && did == t->bce_did &&
577 (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) &&
578 (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) {
579 uint32_t revid = pci_read_config(dev, PCIR_REVID, 4);
580 char *descbuf;
582 descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK);
584 /* Print out the device identity. */
585 ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
586 t->bce_name,
587 ((revid & 0xf0) >> 4) + 'A', revid & 0xf);
589 device_set_desc_copy(dev, descbuf);
590 kfree(descbuf, M_TEMP);
591 return 0;
594 return ENXIO;
597 /****************************************************************************/
598 /* PCI Capabilities Probe Function. */
599 /* */
600 /* Walks the PCI capabiites list for the device to find what features are */
601 /* supported. */
602 /* */
603 /* Returns: */
604 /* None. */
605 /****************************************************************************/
606 static void
607 bce_print_adapter_info(struct bce_softc *sc)
609 device_printf(sc->bce_dev, "ASIC (0x%08X); ", sc->bce_chipid);
611 kprintf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
612 ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
614 /* Bus info. */
615 if (sc->bce_flags & BCE_PCIE_FLAG) {
616 kprintf("Bus (PCIe x%d, ", sc->link_width);
617 switch (sc->link_speed) {
618 case 1:
619 kprintf("2.5Gbps); ");
620 break;
621 case 2:
622 kprintf("5Gbps); ");
623 break;
624 default:
625 kprintf("Unknown link speed); ");
626 break;
628 } else {
629 kprintf("Bus (PCI%s, %s, %dMHz); ",
630 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
631 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
632 sc->bus_speed_mhz);
635 /* Firmware version and device features. */
636 kprintf("B/C (%s)", sc->bce_bc_ver);
638 if ((sc->bce_flags & BCE_MFW_ENABLE_FLAG) ||
639 (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)) {
640 kprintf("; Flags(");
641 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
642 kprintf("MFW[%s]", sc->bce_mfw_ver);
643 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
644 kprintf(" 2.5G");
645 kprintf(")");
647 kprintf("\n");
650 /****************************************************************************/
651 /* PCI Capabilities Probe Function. */
652 /* */
653 /* Walks the PCI capabiites list for the device to find what features are */
654 /* supported. */
655 /* */
656 /* Returns: */
657 /* None. */
658 /****************************************************************************/
659 static void
660 bce_probe_pci_caps(struct bce_softc *sc)
662 device_t dev = sc->bce_dev;
663 uint8_t ptr;
665 if (pci_is_pcix(dev))
666 sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG;
668 ptr = pci_get_pciecap_ptr(dev);
669 if (ptr) {
670 uint16_t link_status = pci_read_config(dev, ptr + 0x12, 2);
672 sc->link_speed = link_status & 0xf;
673 sc->link_width = (link_status >> 4) & 0x3f;
674 sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG;
675 sc->bce_flags |= BCE_PCIE_FLAG;
679 /****************************************************************************/
680 /* Device attach function. */
681 /* */
682 /* Allocates device resources, performs secondary chip identification, */
683 /* resets and initializes the hardware, and initializes driver instance */
684 /* variables. */
685 /* */
686 /* Returns: */
687 /* 0 on success, positive value on failure. */
688 /****************************************************************************/
689 static int
690 bce_attach(device_t dev)
692 struct bce_softc *sc = device_get_softc(dev);
693 struct ifnet *ifp = &sc->arpcom.ac_if;
694 uint32_t val;
695 int rid, rc = 0;
696 int i, j;
697 struct mii_probe_args mii_args;
698 uintptr_t mii_priv = 0;
699 #ifdef IFPOLL_ENABLE
700 int offset, offset_def;
701 #endif
703 sc->bce_dev = dev;
704 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
706 lwkt_serialize_init(&sc->main_serialize);
707 for (i = 0; i < BCE_MSIX_MAX; ++i) {
708 struct bce_msix_data *msix = &sc->bce_msix[i];
710 msix->msix_cpuid = -1;
711 msix->msix_rid = -1;
714 pci_enable_busmaster(dev);
716 bce_probe_pci_caps(sc);
718 /* Allocate PCI memory resources. */
719 rid = PCIR_BAR(0);
720 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
721 RF_ACTIVE | PCI_RF_DENSE);
722 if (sc->bce_res_mem == NULL) {
723 device_printf(dev, "PCI memory allocation failed\n");
724 return ENXIO;
726 sc->bce_btag = rman_get_bustag(sc->bce_res_mem);
727 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
730 * Configure byte swap and enable indirect register access.
731 * Rely on CPU to do target byte swapping on big endian systems.
732 * Access to registers outside of PCI configurtion space are not
733 * valid until this is done.
735 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
736 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
737 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
739 /* Save ASIC revsion info. */
740 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID);
742 /* Weed out any non-production controller revisions. */
743 switch (BCE_CHIP_ID(sc)) {
744 case BCE_CHIP_ID_5706_A0:
745 case BCE_CHIP_ID_5706_A1:
746 case BCE_CHIP_ID_5708_A0:
747 case BCE_CHIP_ID_5708_B0:
748 case BCE_CHIP_ID_5709_A0:
749 case BCE_CHIP_ID_5709_B0:
750 case BCE_CHIP_ID_5709_B1:
751 #ifdef foo
752 /* 5709C B2 seems to work fine */
753 case BCE_CHIP_ID_5709_B2:
754 #endif
755 device_printf(dev, "Unsupported chip id 0x%08x!\n",
756 BCE_CHIP_ID(sc));
757 rc = ENODEV;
758 goto fail;
761 mii_priv |= BRGPHY_FLAG_WIRESPEED;
762 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
763 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax ||
764 BCE_CHIP_REV(sc) == BCE_CHIP_REV_Bx)
765 mii_priv |= BRGPHY_FLAG_NO_EARLYDAC;
766 } else {
767 mii_priv |= BRGPHY_FLAG_BER_BUG;
771 * Find the base address for shared memory access.
772 * Newer versions of bootcode use a signature and offset
773 * while older versions use a fixed address.
775 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
776 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) ==
777 BCE_SHM_HDR_SIGNATURE_SIG) {
778 /* Multi-port devices use different offsets in shared memory. */
779 sc->bce_shmem_base = REG_RD_IND(sc,
780 BCE_SHM_HDR_ADDR_0 + (pci_get_function(sc->bce_dev) << 2));
781 } else {
782 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
785 /* Fetch the bootcode revision. */
786 val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV);
787 for (i = 0, j = 0; i < 3; i++) {
788 uint8_t num;
789 int k, skip0;
791 num = (uint8_t)(val >> (24 - (i * 8)));
792 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
793 if (num >= k || !skip0 || k == 1) {
794 sc->bce_bc_ver[j++] = (num / k) + '0';
795 skip0 = 0;
798 if (i != 2)
799 sc->bce_bc_ver[j++] = '.';
802 /* Check if any management firwmare is running. */
803 val = bce_shmem_rd(sc, BCE_PORT_FEATURE);
804 if (val & BCE_PORT_FEATURE_ASF_ENABLED) {
805 sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
807 /* Allow time for firmware to enter the running state. */
808 for (i = 0; i < 30; i++) {
809 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
810 if (val & BCE_CONDITION_MFW_RUN_MASK)
811 break;
812 DELAY(10000);
816 /* Check the current bootcode state. */
817 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION) &
818 BCE_CONDITION_MFW_RUN_MASK;
819 if (val != BCE_CONDITION_MFW_RUN_UNKNOWN &&
820 val != BCE_CONDITION_MFW_RUN_NONE) {
821 uint32_t addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR);
823 for (i = 0, j = 0; j < 3; j++) {
824 val = bce_reg_rd_ind(sc, addr + j * 4);
825 val = bswap32(val);
826 memcpy(&sc->bce_mfw_ver[i], &val, 4);
827 i += 4;
831 /* Get PCI bus information (speed and type). */
832 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
833 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
834 uint32_t clkreg;
836 sc->bce_flags |= BCE_PCIX_FLAG;
838 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) &
839 BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
840 switch (clkreg) {
841 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
842 sc->bus_speed_mhz = 133;
843 break;
845 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
846 sc->bus_speed_mhz = 100;
847 break;
849 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
850 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
851 sc->bus_speed_mhz = 66;
852 break;
854 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
855 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
856 sc->bus_speed_mhz = 50;
857 break;
859 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
860 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
861 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
862 sc->bus_speed_mhz = 33;
863 break;
865 } else {
866 if (val & BCE_PCICFG_MISC_STATUS_M66EN)
867 sc->bus_speed_mhz = 66;
868 else
869 sc->bus_speed_mhz = 33;
872 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
873 sc->bce_flags |= BCE_PCI_32BIT_FLAG;
875 /* Reset the controller. */
876 rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
877 if (rc != 0)
878 goto fail;
880 /* Initialize the controller. */
881 rc = bce_chipinit(sc);
882 if (rc != 0) {
883 device_printf(dev, "Controller initialization failed!\n");
884 goto fail;
887 /* Perform NVRAM test. */
888 rc = bce_nvram_test(sc);
889 if (rc != 0) {
890 device_printf(dev, "NVRAM test failed!\n");
891 goto fail;
894 /* Fetch the permanent Ethernet MAC address. */
895 bce_get_mac_addr(sc);
898 * Trip points control how many BDs
899 * should be ready before generating an
900 * interrupt while ticks control how long
901 * a BD can sit in the chain before
902 * generating an interrupt. Set the default
903 * values for the RX and TX rings.
906 #ifdef BCE_DRBUG
907 /* Force more frequent interrupts. */
908 sc->bce_tx_quick_cons_trip_int = 1;
909 sc->bce_tx_quick_cons_trip = 1;
910 sc->bce_tx_ticks_int = 0;
911 sc->bce_tx_ticks = 0;
913 sc->bce_rx_quick_cons_trip_int = 1;
914 sc->bce_rx_quick_cons_trip = 1;
915 sc->bce_rx_ticks_int = 0;
916 sc->bce_rx_ticks = 0;
917 #else
918 sc->bce_tx_quick_cons_trip_int = bce_tx_bds_int;
919 sc->bce_tx_quick_cons_trip = bce_tx_bds;
920 sc->bce_tx_ticks_int = bce_tx_ticks_int;
921 sc->bce_tx_ticks = bce_tx_ticks;
923 sc->bce_rx_quick_cons_trip_int = bce_rx_bds_int;
924 sc->bce_rx_quick_cons_trip = bce_rx_bds;
925 sc->bce_rx_ticks_int = bce_rx_ticks_int;
926 sc->bce_rx_ticks = bce_rx_ticks;
927 #endif
929 /* Update statistics once every second. */
930 sc->bce_stats_ticks = 1000000 & 0xffff00;
932 /* Find the media type for the adapter. */
933 bce_get_media(sc);
935 /* Find out RX/TX ring count */
936 bce_setup_ring_cnt(sc);
938 /* Allocate DMA memory resources. */
939 rc = bce_dma_alloc(sc);
940 if (rc != 0) {
941 device_printf(dev, "DMA resource allocation failed!\n");
942 goto fail;
945 #ifdef IFPOLL_ENABLE
947 * NPOLLING RX/TX CPU offset
949 if (sc->rx_ring_cnt2 == ncpus2) {
950 offset = 0;
951 } else {
952 offset_def = (sc->rx_ring_cnt2 * device_get_unit(dev)) % ncpus2;
953 offset = device_getenv_int(dev, "npoll.offset", offset_def);
954 if (offset >= ncpus2 ||
955 offset % sc->rx_ring_cnt2 != 0) {
956 device_printf(dev, "invalid npoll.offset %d, use %d\n",
957 offset, offset_def);
958 offset = offset_def;
961 sc->npoll_ofs = offset;
962 #endif
964 /* Allocate PCI IRQ resources. */
965 rc = bce_alloc_intr(sc);
966 if (rc != 0)
967 goto fail;
969 /* Setup serializer */
970 bce_setup_serialize(sc);
972 /* Initialize the ifnet interface. */
973 ifp->if_softc = sc;
974 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
975 ifp->if_ioctl = bce_ioctl;
976 ifp->if_start = bce_start;
977 ifp->if_init = bce_init;
978 ifp->if_serialize = bce_serialize;
979 ifp->if_deserialize = bce_deserialize;
980 ifp->if_tryserialize = bce_tryserialize;
981 #ifdef INVARIANTS
982 ifp->if_serialize_assert = bce_serialize_assert;
983 #endif
984 #ifdef IFPOLL_ENABLE
985 ifp->if_npoll = bce_npoll;
986 #endif
988 ifp->if_mtu = ETHERMTU;
989 ifp->if_hwassist = BCE_CSUM_FEATURES | CSUM_TSO;
990 ifp->if_capabilities = BCE_IF_CAPABILITIES;
991 if (sc->rx_ring_cnt > 1)
992 ifp->if_capabilities |= IFCAP_RSS;
993 ifp->if_capenable = ifp->if_capabilities;
995 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
996 ifp->if_baudrate = IF_Gbps(2.5);
997 else
998 ifp->if_baudrate = IF_Gbps(1);
1000 ifp->if_nmbclusters = sc->rx_ring_cnt * USABLE_RX_BD(&sc->rx_rings[0]);
1002 ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD(&sc->tx_rings[0]));
1003 ifq_set_ready(&ifp->if_snd);
1004 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt);
1006 if (sc->tx_ring_cnt > 1) {
1007 ifp->if_mapsubq = ifq_mapsubq_mask;
1008 ifq_set_subq_mask(&ifp->if_snd, sc->tx_ring_cnt - 1);
1012 * Look for our PHY.
1014 mii_probe_args_init(&mii_args, bce_ifmedia_upd, bce_ifmedia_sts);
1015 mii_args.mii_probemask = 1 << sc->bce_phy_addr;
1016 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
1017 mii_args.mii_priv = mii_priv;
1019 rc = mii_probe(dev, &sc->bce_miibus, &mii_args);
1020 if (rc != 0) {
1021 device_printf(dev, "PHY probe failed!\n");
1022 goto fail;
1025 /* Attach to the Ethernet interface list. */
1026 ether_ifattach(ifp, sc->eaddr, NULL);
1028 /* Setup TX rings and subqueues */
1029 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1030 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i);
1031 struct bce_tx_ring *txr = &sc->tx_rings[i];
1033 ifsq_set_cpuid(ifsq, sc->bce_msix[i].msix_cpuid);
1034 ifsq_set_priv(ifsq, txr);
1035 ifsq_set_hw_serialize(ifsq, &txr->tx_serialize);
1036 txr->ifsq = ifsq;
1038 ifsq_watchdog_init(&txr->tx_watchdog, ifsq, bce_watchdog);
1041 callout_init_mp(&sc->bce_tick_callout);
1042 callout_init_mp(&sc->bce_pulse_callout);
1043 callout_init_mp(&sc->bce_ckmsi_callout);
1045 rc = bce_setup_intr(sc);
1046 if (rc != 0) {
1047 device_printf(dev, "Failed to setup IRQ!\n");
1048 ether_ifdetach(ifp);
1049 goto fail;
1052 /* Set timer CPUID */
1053 bce_set_timer_cpuid(sc, FALSE);
1055 /* Add the supported sysctls to the kernel. */
1056 bce_add_sysctls(sc);
1059 * The chip reset earlier notified the bootcode that
1060 * a driver is present. We now need to start our pulse
1061 * routine so that the bootcode is reminded that we're
1062 * still running.
1064 bce_pulse(sc);
1066 /* Get the firmware running so IPMI still works */
1067 bce_mgmt_init(sc);
1069 if (bootverbose)
1070 bce_print_adapter_info(sc);
1072 return 0;
1073 fail:
1074 bce_detach(dev);
1075 return(rc);
1078 /****************************************************************************/
1079 /* Device detach function. */
1080 /* */
1081 /* Stops the controller, resets the controller, and releases resources. */
1082 /* */
1083 /* Returns: */
1084 /* 0 on success, positive value on failure. */
1085 /****************************************************************************/
1086 static int
1087 bce_detach(device_t dev)
1089 struct bce_softc *sc = device_get_softc(dev);
1091 if (device_is_attached(dev)) {
1092 struct ifnet *ifp = &sc->arpcom.ac_if;
1093 uint32_t msg;
1095 ifnet_serialize_all(ifp);
1097 /* Stop and reset the controller. */
1098 callout_stop(&sc->bce_pulse_callout);
1099 bce_stop(sc);
1100 if (sc->bce_flags & BCE_NO_WOL_FLAG)
1101 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1102 else
1103 msg = BCE_DRV_MSG_CODE_UNLOAD;
1104 bce_reset(sc, msg);
1106 bce_teardown_intr(sc);
1108 ifnet_deserialize_all(ifp);
1110 ether_ifdetach(ifp);
1113 /* If we have a child device on the MII bus remove it too. */
1114 if (sc->bce_miibus)
1115 device_delete_child(dev, sc->bce_miibus);
1116 bus_generic_detach(dev);
1118 bce_free_intr(sc);
1120 if (sc->bce_res_mem != NULL) {
1121 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
1122 sc->bce_res_mem);
1125 bce_dma_free(sc);
1127 if (sc->serializes != NULL)
1128 kfree(sc->serializes, M_DEVBUF);
1130 return 0;
1133 /****************************************************************************/
1134 /* Device shutdown function. */
1135 /* */
1136 /* Stops and resets the controller. */
1137 /* */
1138 /* Returns: */
1139 /* Nothing */
1140 /****************************************************************************/
1141 static void
1142 bce_shutdown(device_t dev)
1144 struct bce_softc *sc = device_get_softc(dev);
1145 struct ifnet *ifp = &sc->arpcom.ac_if;
1146 uint32_t msg;
1148 ifnet_serialize_all(ifp);
1150 bce_stop(sc);
1151 if (sc->bce_flags & BCE_NO_WOL_FLAG)
1152 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1153 else
1154 msg = BCE_DRV_MSG_CODE_UNLOAD;
1155 bce_reset(sc, msg);
1157 ifnet_deserialize_all(ifp);
1160 /****************************************************************************/
1161 /* Indirect register read. */
1162 /* */
1163 /* Reads NetXtreme II registers using an index/data register pair in PCI */
1164 /* configuration space. Using this mechanism avoids issues with posted */
1165 /* reads but is much slower than memory-mapped I/O. */
1166 /* */
1167 /* Returns: */
1168 /* The value of the register. */
1169 /****************************************************************************/
1170 static uint32_t
1171 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset)
1173 device_t dev = sc->bce_dev;
1175 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1176 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1179 /****************************************************************************/
1180 /* Indirect register write. */
1181 /* */
1182 /* Writes NetXtreme II registers using an index/data register pair in PCI */
1183 /* configuration space. Using this mechanism avoids issues with posted */
1184 /* writes but is muchh slower than memory-mapped I/O. */
1185 /* */
1186 /* Returns: */
1187 /* Nothing. */
1188 /****************************************************************************/
1189 static void
1190 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val)
1192 device_t dev = sc->bce_dev;
1194 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1195 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
1198 /****************************************************************************/
1199 /* Shared memory write. */
1200 /* */
1201 /* Writes NetXtreme II shared memory region. */
1202 /* */
1203 /* Returns: */
1204 /* Nothing. */
1205 /****************************************************************************/
1206 static void
1207 bce_shmem_wr(struct bce_softc *sc, uint32_t offset, uint32_t val)
1209 bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val);
1212 /****************************************************************************/
1213 /* Shared memory read. */
1214 /* */
1215 /* Reads NetXtreme II shared memory region. */
1216 /* */
1217 /* Returns: */
1218 /* The 32 bit value read. */
1219 /****************************************************************************/
1220 static u32
1221 bce_shmem_rd(struct bce_softc *sc, uint32_t offset)
1223 return bce_reg_rd_ind(sc, sc->bce_shmem_base + offset);
1226 /****************************************************************************/
1227 /* Context memory write. */
1228 /* */
1229 /* The NetXtreme II controller uses context memory to track connection */
1230 /* information for L2 and higher network protocols. */
1231 /* */
1232 /* Returns: */
1233 /* Nothing. */
1234 /****************************************************************************/
1235 static void
1236 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t ctx_offset,
1237 uint32_t ctx_val)
1239 uint32_t idx, offset = ctx_offset + cid_addr;
1240 uint32_t val, retry_cnt = 5;
1242 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1243 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1244 REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val);
1245 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ));
1247 for (idx = 0; idx < retry_cnt; idx++) {
1248 val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1249 if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0)
1250 break;
1251 DELAY(5);
1254 if (val & BCE_CTX_CTX_CTRL_WRITE_REQ) {
1255 device_printf(sc->bce_dev,
1256 "Unable to write CTX memory: "
1257 "cid_addr = 0x%08X, offset = 0x%08X!\n",
1258 cid_addr, ctx_offset);
1260 } else {
1261 REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1262 REG_WR(sc, BCE_CTX_DATA, ctx_val);
1266 /****************************************************************************/
1267 /* PHY register read. */
1268 /* */
1269 /* Implements register reads on the MII bus. */
1270 /* */
1271 /* Returns: */
1272 /* The value of the register. */
1273 /****************************************************************************/
1274 static int
1275 bce_miibus_read_reg(device_t dev, int phy, int reg)
1277 struct bce_softc *sc = device_get_softc(dev);
1278 uint32_t val;
1279 int i;
1281 /* Make sure we are accessing the correct PHY address. */
1282 KASSERT(phy == sc->bce_phy_addr,
1283 ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr));
1285 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1286 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1287 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1289 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1290 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1292 DELAY(40);
1295 val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
1296 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
1297 BCE_EMAC_MDIO_COMM_START_BUSY;
1298 REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
1300 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1301 DELAY(10);
1303 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1304 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1305 DELAY(5);
1307 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1308 val &= BCE_EMAC_MDIO_COMM_DATA;
1309 break;
1313 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1314 if_printf(&sc->arpcom.ac_if,
1315 "Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1316 phy, reg);
1317 val = 0x0;
1318 } else {
1319 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1322 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1323 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1324 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1326 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1327 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1329 DELAY(40);
1331 return (val & 0xffff);
1334 /****************************************************************************/
1335 /* PHY register write. */
1336 /* */
1337 /* Implements register writes on the MII bus. */
1338 /* */
1339 /* Returns: */
1340 /* The value of the register. */
1341 /****************************************************************************/
1342 static int
1343 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1345 struct bce_softc *sc = device_get_softc(dev);
1346 uint32_t val1;
1347 int i;
1349 /* Make sure we are accessing the correct PHY address. */
1350 KASSERT(phy == sc->bce_phy_addr,
1351 ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr));
1353 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1354 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1355 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1357 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1358 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1360 DELAY(40);
1363 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1364 BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1365 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1366 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1368 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1369 DELAY(10);
1371 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1372 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1373 DELAY(5);
1374 break;
1378 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1379 if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n");
1381 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1382 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1383 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1385 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1386 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1388 DELAY(40);
1390 return 0;
1393 /****************************************************************************/
1394 /* MII bus status change. */
1395 /* */
1396 /* Called by the MII bus driver when the PHY establishes link to set the */
1397 /* MAC interface registers. */
1398 /* */
1399 /* Returns: */
1400 /* Nothing. */
1401 /****************************************************************************/
1402 static void
1403 bce_miibus_statchg(device_t dev)
1405 struct bce_softc *sc = device_get_softc(dev);
1406 struct mii_data *mii = device_get_softc(sc->bce_miibus);
1408 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1411 * Set MII or GMII interface based on the speed negotiated
1412 * by the PHY.
1414 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1415 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
1416 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1417 } else {
1418 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1422 * Set half or full duplex based on the duplicity negotiated
1423 * by the PHY.
1425 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1426 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1427 } else {
1428 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1432 /****************************************************************************/
1433 /* Acquire NVRAM lock. */
1434 /* */
1435 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */
1436 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1437 /* for use by the driver. */
1438 /* */
1439 /* Returns: */
1440 /* 0 on success, positive value on failure. */
1441 /****************************************************************************/
1442 static int
1443 bce_acquire_nvram_lock(struct bce_softc *sc)
1445 uint32_t val;
1446 int j;
1448 /* Request access to the flash interface. */
1449 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1450 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1451 val = REG_RD(sc, BCE_NVM_SW_ARB);
1452 if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1453 break;
1455 DELAY(5);
1458 if (j >= NVRAM_TIMEOUT_COUNT) {
1459 return EBUSY;
1461 return 0;
1464 /****************************************************************************/
1465 /* Release NVRAM lock. */
1466 /* */
1467 /* When the caller is finished accessing NVRAM the lock must be released. */
1468 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1469 /* for use by the driver. */
1470 /* */
1471 /* Returns: */
1472 /* 0 on success, positive value on failure. */
1473 /****************************************************************************/
1474 static int
1475 bce_release_nvram_lock(struct bce_softc *sc)
1477 int j;
1478 uint32_t val;
1481 * Relinquish nvram interface.
1483 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1485 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1486 val = REG_RD(sc, BCE_NVM_SW_ARB);
1487 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1488 break;
1490 DELAY(5);
1493 if (j >= NVRAM_TIMEOUT_COUNT) {
1494 return EBUSY;
1496 return 0;
1499 /****************************************************************************/
1500 /* Enable NVRAM access. */
1501 /* */
1502 /* Before accessing NVRAM for read or write operations the caller must */
1503 /* enabled NVRAM access. */
1504 /* */
1505 /* Returns: */
1506 /* Nothing. */
1507 /****************************************************************************/
1508 static void
1509 bce_enable_nvram_access(struct bce_softc *sc)
1511 uint32_t val;
1513 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1514 /* Enable both bits, even on read. */
1515 REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1516 val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1519 /****************************************************************************/
1520 /* Disable NVRAM access. */
1521 /* */
1522 /* When the caller is finished accessing NVRAM access must be disabled. */
1523 /* */
1524 /* Returns: */
1525 /* Nothing. */
1526 /****************************************************************************/
1527 static void
1528 bce_disable_nvram_access(struct bce_softc *sc)
1530 uint32_t val;
1532 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1534 /* Disable both bits, even after read. */
1535 REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1536 val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN));
1539 /****************************************************************************/
1540 /* Read a dword (32 bits) from NVRAM. */
1541 /* */
1542 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */
1543 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */
1544 /* */
1545 /* Returns: */
1546 /* 0 on success and the 32 bit value read, positive value on failure. */
1547 /****************************************************************************/
1548 static int
1549 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val,
1550 uint32_t cmd_flags)
1552 uint32_t cmd;
1553 int i, rc = 0;
1555 /* Build the command word. */
1556 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1558 /* Calculate the offset for buffered flash. */
1559 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
1560 offset = ((offset / sc->bce_flash_info->page_size) <<
1561 sc->bce_flash_info->page_bits) +
1562 (offset % sc->bce_flash_info->page_size);
1566 * Clear the DONE bit separately, set the address to read,
1567 * and issue the read.
1569 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1570 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1571 REG_WR(sc, BCE_NVM_COMMAND, cmd);
1573 /* Wait for completion. */
1574 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1575 uint32_t val;
1577 DELAY(5);
1579 val = REG_RD(sc, BCE_NVM_COMMAND);
1580 if (val & BCE_NVM_COMMAND_DONE) {
1581 val = REG_RD(sc, BCE_NVM_READ);
1583 val = be32toh(val);
1584 memcpy(ret_val, &val, 4);
1585 break;
1589 /* Check for errors. */
1590 if (i >= NVRAM_TIMEOUT_COUNT) {
1591 if_printf(&sc->arpcom.ac_if,
1592 "Timeout error reading NVRAM at offset 0x%08X!\n",
1593 offset);
1594 rc = EBUSY;
1596 return rc;
1599 /****************************************************************************/
1600 /* Initialize NVRAM access. */
1601 /* */
1602 /* Identify the NVRAM device in use and prepare the NVRAM interface to */
1603 /* access that device. */
1604 /* */
1605 /* Returns: */
1606 /* 0 on success, positive value on failure. */
1607 /****************************************************************************/
1608 static int
1609 bce_init_nvram(struct bce_softc *sc)
1611 uint32_t val;
1612 int j, entry_count, rc = 0;
1613 const struct flash_spec *flash;
1615 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1616 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1617 sc->bce_flash_info = &flash_5709;
1618 goto bce_init_nvram_get_flash_size;
1621 /* Determine the selected interface. */
1622 val = REG_RD(sc, BCE_NVM_CFG1);
1624 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1627 * Flash reconfiguration is required to support additional
1628 * NVRAM devices not directly supported in hardware.
1629 * Check if the flash interface was reconfigured
1630 * by the bootcode.
1633 if (val & 0x40000000) {
1634 /* Flash interface reconfigured by bootcode. */
1635 for (j = 0, flash = flash_table; j < entry_count;
1636 j++, flash++) {
1637 if ((val & FLASH_BACKUP_STRAP_MASK) ==
1638 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1639 sc->bce_flash_info = flash;
1640 break;
1643 } else {
1644 /* Flash interface not yet reconfigured. */
1645 uint32_t mask;
1647 if (val & (1 << 23))
1648 mask = FLASH_BACKUP_STRAP_MASK;
1649 else
1650 mask = FLASH_STRAP_MASK;
1652 /* Look for the matching NVRAM device configuration data. */
1653 for (j = 0, flash = flash_table; j < entry_count;
1654 j++, flash++) {
1655 /* Check if the device matches any of the known devices. */
1656 if ((val & mask) == (flash->strapping & mask)) {
1657 /* Found a device match. */
1658 sc->bce_flash_info = flash;
1660 /* Request access to the flash interface. */
1661 rc = bce_acquire_nvram_lock(sc);
1662 if (rc != 0)
1663 return rc;
1665 /* Reconfigure the flash interface. */
1666 bce_enable_nvram_access(sc);
1667 REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1668 REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1669 REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1670 REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1671 bce_disable_nvram_access(sc);
1672 bce_release_nvram_lock(sc);
1673 break;
1678 /* Check if a matching device was found. */
1679 if (j == entry_count) {
1680 sc->bce_flash_info = NULL;
1681 if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n");
1682 return ENODEV;
1685 bce_init_nvram_get_flash_size:
1686 /* Write the flash config data to the shared memory interface. */
1687 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2) &
1688 BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1689 if (val)
1690 sc->bce_flash_size = val;
1691 else
1692 sc->bce_flash_size = sc->bce_flash_info->total_size;
1694 return rc;
1697 /****************************************************************************/
1698 /* Read an arbitrary range of data from NVRAM. */
1699 /* */
1700 /* Prepares the NVRAM interface for access and reads the requested data */
1701 /* into the supplied buffer. */
1702 /* */
1703 /* Returns: */
1704 /* 0 on success and the data read, positive value on failure. */
1705 /****************************************************************************/
1706 static int
1707 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf,
1708 int buf_size)
1710 uint32_t cmd_flags, offset32, len32, extra;
1711 int rc = 0;
1713 if (buf_size == 0)
1714 return 0;
1716 /* Request access to the flash interface. */
1717 rc = bce_acquire_nvram_lock(sc);
1718 if (rc != 0)
1719 return rc;
1721 /* Enable access to flash interface */
1722 bce_enable_nvram_access(sc);
1724 len32 = buf_size;
1725 offset32 = offset;
1726 extra = 0;
1728 cmd_flags = 0;
1730 /* XXX should we release nvram lock if read_dword() fails? */
1731 if (offset32 & 3) {
1732 uint8_t buf[4];
1733 uint32_t pre_len;
1735 offset32 &= ~3;
1736 pre_len = 4 - (offset & 3);
1738 if (pre_len >= len32) {
1739 pre_len = len32;
1740 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1741 } else {
1742 cmd_flags = BCE_NVM_COMMAND_FIRST;
1745 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1746 if (rc)
1747 return rc;
1749 memcpy(ret_buf, buf + (offset & 3), pre_len);
1751 offset32 += 4;
1752 ret_buf += pre_len;
1753 len32 -= pre_len;
1756 if (len32 & 3) {
1757 extra = 4 - (len32 & 3);
1758 len32 = (len32 + 4) & ~3;
1761 if (len32 == 4) {
1762 uint8_t buf[4];
1764 if (cmd_flags)
1765 cmd_flags = BCE_NVM_COMMAND_LAST;
1766 else
1767 cmd_flags = BCE_NVM_COMMAND_FIRST |
1768 BCE_NVM_COMMAND_LAST;
1770 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1772 memcpy(ret_buf, buf, 4 - extra);
1773 } else if (len32 > 0) {
1774 uint8_t buf[4];
1776 /* Read the first word. */
1777 if (cmd_flags)
1778 cmd_flags = 0;
1779 else
1780 cmd_flags = BCE_NVM_COMMAND_FIRST;
1782 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1784 /* Advance to the next dword. */
1785 offset32 += 4;
1786 ret_buf += 4;
1787 len32 -= 4;
1789 while (len32 > 4 && rc == 0) {
1790 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1792 /* Advance to the next dword. */
1793 offset32 += 4;
1794 ret_buf += 4;
1795 len32 -= 4;
1798 if (rc)
1799 goto bce_nvram_read_locked_exit;
1801 cmd_flags = BCE_NVM_COMMAND_LAST;
1802 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1804 memcpy(ret_buf, buf, 4 - extra);
1807 bce_nvram_read_locked_exit:
1808 /* Disable access to flash interface and release the lock. */
1809 bce_disable_nvram_access(sc);
1810 bce_release_nvram_lock(sc);
1812 return rc;
1815 /****************************************************************************/
1816 /* Verifies that NVRAM is accessible and contains valid data. */
1817 /* */
1818 /* Reads the configuration data from NVRAM and verifies that the CRC is */
1819 /* correct. */
1820 /* */
1821 /* Returns: */
1822 /* 0 on success, positive value on failure. */
1823 /****************************************************************************/
1824 static int
1825 bce_nvram_test(struct bce_softc *sc)
1827 uint32_t buf[BCE_NVRAM_SIZE / 4];
1828 uint32_t magic, csum;
1829 uint8_t *data = (uint8_t *)buf;
1830 int rc = 0;
1833 * Check that the device NVRAM is valid by reading
1834 * the magic value at offset 0.
1836 rc = bce_nvram_read(sc, 0, data, 4);
1837 if (rc != 0)
1838 return rc;
1840 magic = be32toh(buf[0]);
1841 if (magic != BCE_NVRAM_MAGIC) {
1842 if_printf(&sc->arpcom.ac_if,
1843 "Invalid NVRAM magic value! Expected: 0x%08X, "
1844 "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic);
1845 return ENODEV;
1849 * Verify that the device NVRAM includes valid
1850 * configuration data.
1852 rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE);
1853 if (rc != 0)
1854 return rc;
1856 csum = ether_crc32_le(data, 0x100);
1857 if (csum != BCE_CRC32_RESIDUAL) {
1858 if_printf(&sc->arpcom.ac_if,
1859 "Invalid Manufacturing Information NVRAM CRC! "
1860 "Expected: 0x%08X, Found: 0x%08X\n",
1861 BCE_CRC32_RESIDUAL, csum);
1862 return ENODEV;
1865 csum = ether_crc32_le(data + 0x100, 0x100);
1866 if (csum != BCE_CRC32_RESIDUAL) {
1867 if_printf(&sc->arpcom.ac_if,
1868 "Invalid Feature Configuration Information "
1869 "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1870 BCE_CRC32_RESIDUAL, csum);
1871 rc = ENODEV;
1873 return rc;
1876 /****************************************************************************/
1877 /* Identifies the current media type of the controller and sets the PHY */
1878 /* address. */
1879 /* */
1880 /* Returns: */
1881 /* Nothing. */
1882 /****************************************************************************/
1883 static void
1884 bce_get_media(struct bce_softc *sc)
1886 uint32_t val;
1888 sc->bce_phy_addr = 1;
1890 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1891 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1892 uint32_t val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL);
1893 uint32_t bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID;
1894 uint32_t strap;
1897 * The BCM5709S is software configurable
1898 * for Copper or SerDes operation.
1900 if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
1901 return;
1902 } else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
1903 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1904 return;
1907 if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) {
1908 strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
1909 } else {
1910 strap =
1911 (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
1914 if (pci_get_function(sc->bce_dev) == 0) {
1915 switch (strap) {
1916 case 0x4:
1917 case 0x5:
1918 case 0x6:
1919 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1920 break;
1922 } else {
1923 switch (strap) {
1924 case 0x1:
1925 case 0x2:
1926 case 0x4:
1927 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1928 break;
1931 } else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
1932 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1935 if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
1936 sc->bce_flags |= BCE_NO_WOL_FLAG;
1937 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
1938 sc->bce_phy_addr = 2;
1939 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG);
1940 if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
1941 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
1943 } else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) ||
1944 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) {
1945 sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG;
1949 static void
1950 bce_destroy_tx_ring(struct bce_tx_ring *txr)
1952 int i;
1954 /* Destroy the TX buffer descriptor DMA stuffs. */
1955 if (txr->tx_bd_chain_tag != NULL) {
1956 for (i = 0; i < txr->tx_pages; i++) {
1957 if (txr->tx_bd_chain[i] != NULL) {
1958 bus_dmamap_unload(txr->tx_bd_chain_tag,
1959 txr->tx_bd_chain_map[i]);
1960 bus_dmamem_free(txr->tx_bd_chain_tag,
1961 txr->tx_bd_chain[i],
1962 txr->tx_bd_chain_map[i]);
1965 bus_dma_tag_destroy(txr->tx_bd_chain_tag);
1968 /* Destroy the TX mbuf DMA stuffs. */
1969 if (txr->tx_mbuf_tag != NULL) {
1970 for (i = 0; i < TOTAL_TX_BD(txr); i++) {
1971 /* Must have been unloaded in bce_stop() */
1972 KKASSERT(txr->tx_bufs[i].tx_mbuf_ptr == NULL);
1973 bus_dmamap_destroy(txr->tx_mbuf_tag,
1974 txr->tx_bufs[i].tx_mbuf_map);
1976 bus_dma_tag_destroy(txr->tx_mbuf_tag);
1979 if (txr->tx_bd_chain_map != NULL)
1980 kfree(txr->tx_bd_chain_map, M_DEVBUF);
1981 if (txr->tx_bd_chain != NULL)
1982 kfree(txr->tx_bd_chain, M_DEVBUF);
1983 if (txr->tx_bd_chain_paddr != NULL)
1984 kfree(txr->tx_bd_chain_paddr, M_DEVBUF);
1986 if (txr->tx_bufs != NULL)
1987 kfree(txr->tx_bufs, M_DEVBUF);
1990 static void
1991 bce_destroy_rx_ring(struct bce_rx_ring *rxr)
1993 int i;
1995 /* Destroy the RX buffer descriptor DMA stuffs. */
1996 if (rxr->rx_bd_chain_tag != NULL) {
1997 for (i = 0; i < rxr->rx_pages; i++) {
1998 if (rxr->rx_bd_chain[i] != NULL) {
1999 bus_dmamap_unload(rxr->rx_bd_chain_tag,
2000 rxr->rx_bd_chain_map[i]);
2001 bus_dmamem_free(rxr->rx_bd_chain_tag,
2002 rxr->rx_bd_chain[i],
2003 rxr->rx_bd_chain_map[i]);
2006 bus_dma_tag_destroy(rxr->rx_bd_chain_tag);
2009 /* Destroy the RX mbuf DMA stuffs. */
2010 if (rxr->rx_mbuf_tag != NULL) {
2011 for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
2012 /* Must have been unloaded in bce_stop() */
2013 KKASSERT(rxr->rx_bufs[i].rx_mbuf_ptr == NULL);
2014 bus_dmamap_destroy(rxr->rx_mbuf_tag,
2015 rxr->rx_bufs[i].rx_mbuf_map);
2017 bus_dmamap_destroy(rxr->rx_mbuf_tag, rxr->rx_mbuf_tmpmap);
2018 bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2021 if (rxr->rx_bd_chain_map != NULL)
2022 kfree(rxr->rx_bd_chain_map, M_DEVBUF);
2023 if (rxr->rx_bd_chain != NULL)
2024 kfree(rxr->rx_bd_chain, M_DEVBUF);
2025 if (rxr->rx_bd_chain_paddr != NULL)
2026 kfree(rxr->rx_bd_chain_paddr, M_DEVBUF);
2028 if (rxr->rx_bufs != NULL)
2029 kfree(rxr->rx_bufs, M_DEVBUF);
2032 /****************************************************************************/
2033 /* Free any DMA memory owned by the driver. */
2034 /* */
2035 /* Scans through each data structre that requires DMA memory and frees */
2036 /* the memory if allocated. */
2037 /* */
2038 /* Returns: */
2039 /* Nothing. */
2040 /****************************************************************************/
2041 static void
2042 bce_dma_free(struct bce_softc *sc)
2044 int i;
2046 /* Destroy the status block. */
2047 if (sc->status_tag != NULL) {
2048 if (sc->status_block != NULL) {
2049 bus_dmamap_unload(sc->status_tag, sc->status_map);
2050 bus_dmamem_free(sc->status_tag, sc->status_block,
2051 sc->status_map);
2053 bus_dma_tag_destroy(sc->status_tag);
2056 /* Destroy the statistics block. */
2057 if (sc->stats_tag != NULL) {
2058 if (sc->stats_block != NULL) {
2059 bus_dmamap_unload(sc->stats_tag, sc->stats_map);
2060 bus_dmamem_free(sc->stats_tag, sc->stats_block,
2061 sc->stats_map);
2063 bus_dma_tag_destroy(sc->stats_tag);
2066 /* Destroy the CTX DMA stuffs. */
2067 if (sc->ctx_tag != NULL) {
2068 for (i = 0; i < sc->ctx_pages; i++) {
2069 if (sc->ctx_block[i] != NULL) {
2070 bus_dmamap_unload(sc->ctx_tag, sc->ctx_map[i]);
2071 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i],
2072 sc->ctx_map[i]);
2075 bus_dma_tag_destroy(sc->ctx_tag);
2078 /* Free TX rings */
2079 if (sc->tx_rings != NULL) {
2080 for (i = 0; i < sc->tx_ring_cnt; ++i)
2081 bce_destroy_tx_ring(&sc->tx_rings[i]);
2082 kfree(sc->tx_rings, M_DEVBUF);
2085 /* Free RX rings */
2086 if (sc->rx_rings != NULL) {
2087 for (i = 0; i < sc->rx_ring_cnt; ++i)
2088 bce_destroy_rx_ring(&sc->rx_rings[i]);
2089 kfree(sc->rx_rings, M_DEVBUF);
2092 /* Destroy the parent tag */
2093 if (sc->parent_tag != NULL)
2094 bus_dma_tag_destroy(sc->parent_tag);
2097 /****************************************************************************/
2098 /* Get DMA memory from the OS. */
2099 /* */
2100 /* Validates that the OS has provided DMA buffers in response to a */
2101 /* bus_dmamap_load() call and saves the physical address of those buffers. */
2102 /* When the callback is used the OS will return 0 for the mapping function */
2103 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */
2104 /* failures back to the caller. */
2105 /* */
2106 /* Returns: */
2107 /* Nothing. */
2108 /****************************************************************************/
2109 static void
2110 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2112 bus_addr_t *busaddr = arg;
2114 /* Check for an error and signal the caller that an error occurred. */
2115 if (error)
2116 return;
2118 KASSERT(nseg == 1, ("only one segment is allowed"));
2119 *busaddr = segs->ds_addr;
2122 static int
2123 bce_create_tx_ring(struct bce_tx_ring *txr)
2125 int pages, rc, i;
2127 lwkt_serialize_init(&txr->tx_serialize);
2128 txr->tx_wreg = bce_tx_wreg;
2130 pages = device_getenv_int(txr->sc->bce_dev, "tx_pages", bce_tx_pages);
2131 if (pages <= 0 || pages > TX_PAGES_MAX || !powerof2(pages)) {
2132 device_printf(txr->sc->bce_dev, "invalid # of TX pages\n");
2133 pages = TX_PAGES_DEFAULT;
2135 txr->tx_pages = pages;
2137 txr->tx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * txr->tx_pages,
2138 M_DEVBUF, M_WAITOK | M_ZERO);
2139 txr->tx_bd_chain = kmalloc(sizeof(struct tx_bd *) * txr->tx_pages,
2140 M_DEVBUF, M_WAITOK | M_ZERO);
2141 txr->tx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * txr->tx_pages,
2142 M_DEVBUF, M_WAITOK | M_ZERO);
2144 txr->tx_bufs = kmalloc_cachealign(
2145 sizeof(struct bce_tx_buf) * TOTAL_TX_BD(txr),
2146 M_DEVBUF, M_WAITOK | M_ZERO);
2149 * Create a DMA tag for the TX buffer descriptor chain,
2150 * allocate and clear the memory, and fetch the
2151 * physical address of the block.
2153 rc = bus_dma_tag_create(txr->sc->parent_tag, BCM_PAGE_SIZE, 0,
2154 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2155 BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ,
2156 0, &txr->tx_bd_chain_tag);
2157 if (rc != 0) {
2158 device_printf(txr->sc->bce_dev, "Could not allocate "
2159 "TX descriptor chain DMA tag!\n");
2160 return rc;
2163 for (i = 0; i < txr->tx_pages; i++) {
2164 bus_addr_t busaddr;
2166 rc = bus_dmamem_alloc(txr->tx_bd_chain_tag,
2167 (void **)&txr->tx_bd_chain[i],
2168 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2169 &txr->tx_bd_chain_map[i]);
2170 if (rc != 0) {
2171 device_printf(txr->sc->bce_dev,
2172 "Could not allocate %dth TX descriptor "
2173 "chain DMA memory!\n", i);
2174 return rc;
2177 rc = bus_dmamap_load(txr->tx_bd_chain_tag,
2178 txr->tx_bd_chain_map[i],
2179 txr->tx_bd_chain[i],
2180 BCE_TX_CHAIN_PAGE_SZ,
2181 bce_dma_map_addr, &busaddr,
2182 BUS_DMA_WAITOK);
2183 if (rc != 0) {
2184 if (rc == EINPROGRESS) {
2185 panic("%s coherent memory loading "
2186 "is still in progress!",
2187 txr->sc->arpcom.ac_if.if_xname);
2189 device_printf(txr->sc->bce_dev, "Could not map %dth "
2190 "TX descriptor chain DMA memory!\n", i);
2191 bus_dmamem_free(txr->tx_bd_chain_tag,
2192 txr->tx_bd_chain[i],
2193 txr->tx_bd_chain_map[i]);
2194 txr->tx_bd_chain[i] = NULL;
2195 return rc;
2198 txr->tx_bd_chain_paddr[i] = busaddr;
2201 /* Create a DMA tag for TX mbufs. */
2202 rc = bus_dma_tag_create(txr->sc->parent_tag, 1, 0,
2203 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2204 IP_MAXPACKET + sizeof(struct ether_vlan_header),
2205 BCE_MAX_SEGMENTS, PAGE_SIZE,
2206 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2207 &txr->tx_mbuf_tag);
2208 if (rc != 0) {
2209 device_printf(txr->sc->bce_dev,
2210 "Could not allocate TX mbuf DMA tag!\n");
2211 return rc;
2214 /* Create DMA maps for the TX mbufs clusters. */
2215 for (i = 0; i < TOTAL_TX_BD(txr); i++) {
2216 rc = bus_dmamap_create(txr->tx_mbuf_tag,
2217 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2218 &txr->tx_bufs[i].tx_mbuf_map);
2219 if (rc != 0) {
2220 int j;
2222 for (j = 0; j < i; ++j) {
2223 bus_dmamap_destroy(txr->tx_mbuf_tag,
2224 txr->tx_bufs[j].tx_mbuf_map);
2226 bus_dma_tag_destroy(txr->tx_mbuf_tag);
2227 txr->tx_mbuf_tag = NULL;
2229 device_printf(txr->sc->bce_dev, "Unable to create "
2230 "%dth TX mbuf DMA map!\n", i);
2231 return rc;
2234 return 0;
2237 static int
2238 bce_create_rx_ring(struct bce_rx_ring *rxr)
2240 int pages, rc, i;
2242 lwkt_serialize_init(&rxr->rx_serialize);
2244 pages = device_getenv_int(rxr->sc->bce_dev, "rx_pages", bce_rx_pages);
2245 if (pages <= 0 || pages > RX_PAGES_MAX || !powerof2(pages)) {
2246 device_printf(rxr->sc->bce_dev, "invalid # of RX pages\n");
2247 pages = RX_PAGES_DEFAULT;
2249 rxr->rx_pages = pages;
2251 rxr->rx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * rxr->rx_pages,
2252 M_DEVBUF, M_WAITOK | M_ZERO);
2253 rxr->rx_bd_chain = kmalloc(sizeof(struct rx_bd *) * rxr->rx_pages,
2254 M_DEVBUF, M_WAITOK | M_ZERO);
2255 rxr->rx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * rxr->rx_pages,
2256 M_DEVBUF, M_WAITOK | M_ZERO);
2258 rxr->rx_bufs = kmalloc_cachealign(
2259 sizeof(struct bce_rx_buf) * TOTAL_RX_BD(rxr),
2260 M_DEVBUF, M_WAITOK | M_ZERO);
2263 * Create a DMA tag for the RX buffer descriptor chain,
2264 * allocate and clear the memory, and fetch the physical
2265 * address of the blocks.
2267 rc = bus_dma_tag_create(rxr->sc->parent_tag, BCM_PAGE_SIZE, 0,
2268 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2269 BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ,
2270 0, &rxr->rx_bd_chain_tag);
2271 if (rc != 0) {
2272 device_printf(rxr->sc->bce_dev, "Could not allocate "
2273 "RX descriptor chain DMA tag!\n");
2274 return rc;
2277 for (i = 0; i < rxr->rx_pages; i++) {
2278 bus_addr_t busaddr;
2280 rc = bus_dmamem_alloc(rxr->rx_bd_chain_tag,
2281 (void **)&rxr->rx_bd_chain[i],
2282 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2283 &rxr->rx_bd_chain_map[i]);
2284 if (rc != 0) {
2285 device_printf(rxr->sc->bce_dev,
2286 "Could not allocate %dth RX descriptor "
2287 "chain DMA memory!\n", i);
2288 return rc;
2291 rc = bus_dmamap_load(rxr->rx_bd_chain_tag,
2292 rxr->rx_bd_chain_map[i],
2293 rxr->rx_bd_chain[i],
2294 BCE_RX_CHAIN_PAGE_SZ,
2295 bce_dma_map_addr, &busaddr,
2296 BUS_DMA_WAITOK);
2297 if (rc != 0) {
2298 if (rc == EINPROGRESS) {
2299 panic("%s coherent memory loading "
2300 "is still in progress!",
2301 rxr->sc->arpcom.ac_if.if_xname);
2303 device_printf(rxr->sc->bce_dev,
2304 "Could not map %dth RX descriptor "
2305 "chain DMA memory!\n", i);
2306 bus_dmamem_free(rxr->rx_bd_chain_tag,
2307 rxr->rx_bd_chain[i],
2308 rxr->rx_bd_chain_map[i]);
2309 rxr->rx_bd_chain[i] = NULL;
2310 return rc;
2313 rxr->rx_bd_chain_paddr[i] = busaddr;
2316 /* Create a DMA tag for RX mbufs. */
2317 rc = bus_dma_tag_create(rxr->sc->parent_tag, BCE_DMA_RX_ALIGN, 0,
2318 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2319 MCLBYTES, 1, MCLBYTES,
2320 BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED | BUS_DMA_WAITOK,
2321 &rxr->rx_mbuf_tag);
2322 if (rc != 0) {
2323 device_printf(rxr->sc->bce_dev,
2324 "Could not allocate RX mbuf DMA tag!\n");
2325 return rc;
2328 /* Create tmp DMA map for RX mbuf clusters. */
2329 rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK,
2330 &rxr->rx_mbuf_tmpmap);
2331 if (rc != 0) {
2332 bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2333 rxr->rx_mbuf_tag = NULL;
2335 device_printf(rxr->sc->bce_dev,
2336 "Could not create RX mbuf tmp DMA map!\n");
2337 return rc;
2340 /* Create DMA maps for the RX mbuf clusters. */
2341 for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
2342 rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK,
2343 &rxr->rx_bufs[i].rx_mbuf_map);
2344 if (rc != 0) {
2345 int j;
2347 for (j = 0; j < i; ++j) {
2348 bus_dmamap_destroy(rxr->rx_mbuf_tag,
2349 rxr->rx_bufs[j].rx_mbuf_map);
2351 bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2352 rxr->rx_mbuf_tag = NULL;
2354 device_printf(rxr->sc->bce_dev, "Unable to create "
2355 "%dth RX mbuf DMA map!\n", i);
2356 return rc;
2359 return 0;
2362 /****************************************************************************/
2363 /* Allocate any DMA memory needed by the driver. */
2364 /* */
2365 /* Allocates DMA memory needed for the various global structures needed by */
2366 /* hardware. */
2367 /* */
2368 /* Memory alignment requirements: */
2369 /* -----------------+----------+----------+----------+----------+ */
2370 /* Data Structure | 5706 | 5708 | 5709 | 5716 | */
2371 /* -----------------+----------+----------+----------+----------+ */
2372 /* Status Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */
2373 /* Statistics Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */
2374 /* RX Buffers | 16 bytes | 16 bytes | 16 bytes | 16 bytes | */
2375 /* PG Buffers | none | none | none | none | */
2376 /* TX Buffers | none | none | none | none | */
2377 /* Chain Pages(1) | 4KiB | 4KiB | 4KiB | 4KiB | */
2378 /* Context Pages(1) | N/A | N/A | 4KiB | 4KiB | */
2379 /* -----------------+----------+----------+----------+----------+ */
2380 /* */
2381 /* (1) Must align with CPU page size (BCM_PAGE_SZIE). */
2382 /* */
2383 /* Returns: */
2384 /* 0 for success, positive value for failure. */
2385 /****************************************************************************/
2386 static int
2387 bce_dma_alloc(struct bce_softc *sc)
2389 struct ifnet *ifp = &sc->arpcom.ac_if;
2390 int i, rc = 0;
2391 bus_addr_t busaddr, max_busaddr;
2392 bus_size_t status_align, stats_align, status_size;
2395 * The embedded PCIe to PCI-X bridge (EPB)
2396 * in the 5708 cannot address memory above
2397 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
2399 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
2400 max_busaddr = BCE_BUS_SPACE_MAXADDR;
2401 else
2402 max_busaddr = BUS_SPACE_MAXADDR;
2405 * BCM5709 and BCM5716 uses host memory as cache for context memory.
2407 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2408 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2409 sc->ctx_pages = BCE_CTX_BLK_SZ / BCM_PAGE_SIZE;
2410 if (sc->ctx_pages == 0)
2411 sc->ctx_pages = 1;
2412 if (sc->ctx_pages > BCE_CTX_PAGES) {
2413 device_printf(sc->bce_dev, "excessive ctx pages %d\n",
2414 sc->ctx_pages);
2415 return ENOMEM;
2417 status_align = 16;
2418 stats_align = 16;
2419 } else {
2420 status_align = 8;
2421 stats_align = 8;
2425 * Each MSI-X vector needs a status block; each status block
2426 * consumes 128bytes and is 128bytes aligned.
2428 if (sc->rx_ring_cnt > 1) {
2429 status_size = BCE_MSIX_MAX * BCE_STATUS_BLK_MSIX_ALIGN;
2430 status_align = BCE_STATUS_BLK_MSIX_ALIGN;
2431 } else {
2432 status_size = BCE_STATUS_BLK_SZ;
2436 * Allocate the parent bus DMA tag appropriate for PCI.
2438 rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY,
2439 max_busaddr, BUS_SPACE_MAXADDR,
2440 NULL, NULL,
2441 BUS_SPACE_MAXSIZE_32BIT, 0,
2442 BUS_SPACE_MAXSIZE_32BIT,
2443 0, &sc->parent_tag);
2444 if (rc != 0) {
2445 if_printf(ifp, "Could not allocate parent DMA tag!\n");
2446 return rc;
2450 * Allocate status block.
2452 sc->status_block = bus_dmamem_coherent_any(sc->parent_tag,
2453 status_align, status_size,
2454 BUS_DMA_WAITOK | BUS_DMA_ZERO,
2455 &sc->status_tag, &sc->status_map,
2456 &sc->status_block_paddr);
2457 if (sc->status_block == NULL) {
2458 if_printf(ifp, "Could not allocate status block!\n");
2459 return ENOMEM;
2463 * Allocate statistics block.
2465 sc->stats_block = bus_dmamem_coherent_any(sc->parent_tag,
2466 stats_align, BCE_STATS_BLK_SZ,
2467 BUS_DMA_WAITOK | BUS_DMA_ZERO,
2468 &sc->stats_tag, &sc->stats_map,
2469 &sc->stats_block_paddr);
2470 if (sc->stats_block == NULL) {
2471 if_printf(ifp, "Could not allocate statistics block!\n");
2472 return ENOMEM;
2476 * Allocate context block, if needed
2478 if (sc->ctx_pages != 0) {
2479 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0,
2480 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2481 NULL, NULL,
2482 BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE,
2483 0, &sc->ctx_tag);
2484 if (rc != 0) {
2485 if_printf(ifp, "Could not allocate "
2486 "context block DMA tag!\n");
2487 return rc;
2490 for (i = 0; i < sc->ctx_pages; i++) {
2491 rc = bus_dmamem_alloc(sc->ctx_tag,
2492 (void **)&sc->ctx_block[i],
2493 BUS_DMA_WAITOK | BUS_DMA_ZERO |
2494 BUS_DMA_COHERENT,
2495 &sc->ctx_map[i]);
2496 if (rc != 0) {
2497 if_printf(ifp, "Could not allocate %dth context "
2498 "DMA memory!\n", i);
2499 return rc;
2502 rc = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i],
2503 sc->ctx_block[i], BCM_PAGE_SIZE,
2504 bce_dma_map_addr, &busaddr,
2505 BUS_DMA_WAITOK);
2506 if (rc != 0) {
2507 if (rc == EINPROGRESS) {
2508 panic("%s coherent memory loading "
2509 "is still in progress!", ifp->if_xname);
2511 if_printf(ifp, "Could not map %dth context "
2512 "DMA memory!\n", i);
2513 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i],
2514 sc->ctx_map[i]);
2515 sc->ctx_block[i] = NULL;
2516 return rc;
2518 sc->ctx_paddr[i] = busaddr;
2522 sc->tx_rings = kmalloc_cachealign(
2523 sizeof(struct bce_tx_ring) * sc->tx_ring_cnt, M_DEVBUF,
2524 M_WAITOK | M_ZERO);
2525 for (i = 0; i < sc->tx_ring_cnt; ++i) {
2526 sc->tx_rings[i].sc = sc;
2527 if (i == 0) {
2528 sc->tx_rings[i].tx_cid = TX_CID;
2529 sc->tx_rings[i].tx_hw_cons =
2530 &sc->status_block->status_tx_quick_consumer_index0;
2531 } else {
2532 struct status_block_msix *sblk =
2533 (struct status_block_msix *)
2534 (((uint8_t *)(sc->status_block)) +
2535 (i * BCE_STATUS_BLK_MSIX_ALIGN));
2537 sc->tx_rings[i].tx_cid = TX_TSS_CID + i - 1;
2538 sc->tx_rings[i].tx_hw_cons =
2539 &sblk->status_tx_quick_consumer_index;
2542 rc = bce_create_tx_ring(&sc->tx_rings[i]);
2543 if (rc != 0) {
2544 device_printf(sc->bce_dev,
2545 "can't create %dth tx ring\n", i);
2546 return rc;
2550 sc->rx_rings = kmalloc_cachealign(
2551 sizeof(struct bce_rx_ring) * sc->rx_ring_cnt, M_DEVBUF,
2552 M_WAITOK | M_ZERO);
2553 for (i = 0; i < sc->rx_ring_cnt; ++i) {
2554 sc->rx_rings[i].sc = sc;
2555 sc->rx_rings[i].idx = i;
2556 if (i == 0) {
2557 sc->rx_rings[i].rx_cid = RX_CID;
2558 sc->rx_rings[i].rx_hw_cons =
2559 &sc->status_block->status_rx_quick_consumer_index0;
2560 sc->rx_rings[i].hw_status_idx =
2561 &sc->status_block->status_idx;
2562 } else {
2563 struct status_block_msix *sblk =
2564 (struct status_block_msix *)
2565 (((uint8_t *)(sc->status_block)) +
2566 (i * BCE_STATUS_BLK_MSIX_ALIGN));
2568 sc->rx_rings[i].rx_cid = RX_RSS_CID + i - 1;
2569 sc->rx_rings[i].rx_hw_cons =
2570 &sblk->status_rx_quick_consumer_index;
2571 sc->rx_rings[i].hw_status_idx = &sblk->status_idx;
2574 rc = bce_create_rx_ring(&sc->rx_rings[i]);
2575 if (rc != 0) {
2576 device_printf(sc->bce_dev,
2577 "can't create %dth rx ring\n", i);
2578 return rc;
2582 return 0;
2585 /****************************************************************************/
2586 /* Firmware synchronization. */
2587 /* */
2588 /* Before performing certain events such as a chip reset, synchronize with */
2589 /* the firmware first. */
2590 /* */
2591 /* Returns: */
2592 /* 0 for success, positive value for failure. */
2593 /****************************************************************************/
2594 static int
2595 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data)
2597 int i, rc = 0;
2598 uint32_t val;
2600 /* Don't waste any time if we've timed out before. */
2601 if (sc->bce_fw_timed_out)
2602 return EBUSY;
2604 /* Increment the message sequence number. */
2605 sc->bce_fw_wr_seq++;
2606 msg_data |= sc->bce_fw_wr_seq;
2608 /* Send the message to the bootcode driver mailbox. */
2609 bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
2611 /* Wait for the bootcode to acknowledge the message. */
2612 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2613 /* Check for a response in the bootcode firmware mailbox. */
2614 val = bce_shmem_rd(sc, BCE_FW_MB);
2615 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2616 break;
2617 DELAY(1000);
2620 /* If we've timed out, tell the bootcode that we've stopped waiting. */
2621 if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) &&
2622 (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) {
2623 if_printf(&sc->arpcom.ac_if,
2624 "Firmware synchronization timeout! "
2625 "msg_data = 0x%08X\n", msg_data);
2627 msg_data &= ~BCE_DRV_MSG_CODE;
2628 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2630 bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
2632 sc->bce_fw_timed_out = 1;
2633 rc = EBUSY;
2635 return rc;
2638 /****************************************************************************/
2639 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */
2640 /* */
2641 /* Returns: */
2642 /* Nothing. */
2643 /****************************************************************************/
2644 static void
2645 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code,
2646 uint32_t rv2p_code_len, uint32_t rv2p_proc)
2648 int i;
2649 uint32_t val;
2651 for (i = 0; i < rv2p_code_len; i += 8) {
2652 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2653 rv2p_code++;
2654 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2655 rv2p_code++;
2657 if (rv2p_proc == RV2P_PROC1) {
2658 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2659 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2660 } else {
2661 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2662 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2666 /* Reset the processor, un-stall is done later. */
2667 if (rv2p_proc == RV2P_PROC1)
2668 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2669 else
2670 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2673 /****************************************************************************/
2674 /* Load RISC processor firmware. */
2675 /* */
2676 /* Loads firmware from the file if_bcefw.h into the scratchpad memory */
2677 /* associated with a particular processor. */
2678 /* */
2679 /* Returns: */
2680 /* Nothing. */
2681 /****************************************************************************/
2682 static void
2683 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2684 struct fw_info *fw)
2686 uint32_t offset;
2687 int j;
2689 bce_halt_cpu(sc, cpu_reg);
2691 /* Load the Text area. */
2692 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2693 if (fw->text) {
2694 for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2695 REG_WR_IND(sc, offset, fw->text[j]);
2698 /* Load the Data area. */
2699 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2700 if (fw->data) {
2701 for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2702 REG_WR_IND(sc, offset, fw->data[j]);
2705 /* Load the SBSS area. */
2706 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2707 if (fw->sbss) {
2708 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2709 REG_WR_IND(sc, offset, fw->sbss[j]);
2712 /* Load the BSS area. */
2713 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2714 if (fw->bss) {
2715 for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2716 REG_WR_IND(sc, offset, fw->bss[j]);
2719 /* Load the Read-Only area. */
2720 offset = cpu_reg->spad_base +
2721 (fw->rodata_addr - cpu_reg->mips_view_base);
2722 if (fw->rodata) {
2723 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2724 REG_WR_IND(sc, offset, fw->rodata[j]);
2727 /* Clear the pre-fetch instruction and set the FW start address. */
2728 REG_WR_IND(sc, cpu_reg->inst, 0);
2729 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2732 /****************************************************************************/
2733 /* Starts the RISC processor. */
2734 /* */
2735 /* Assumes the CPU starting address has already been set. */
2736 /* */
2737 /* Returns: */
2738 /* Nothing. */
2739 /****************************************************************************/
2740 static void
2741 bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
2743 uint32_t val;
2745 /* Start the CPU. */
2746 val = REG_RD_IND(sc, cpu_reg->mode);
2747 val &= ~cpu_reg->mode_value_halt;
2748 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2749 REG_WR_IND(sc, cpu_reg->mode, val);
2752 /****************************************************************************/
2753 /* Halts the RISC processor. */
2754 /* */
2755 /* Returns: */
2756 /* Nothing. */
2757 /****************************************************************************/
2758 static void
2759 bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
2761 uint32_t val;
2763 /* Halt the CPU. */
2764 val = REG_RD_IND(sc, cpu_reg->mode);
2765 val |= cpu_reg->mode_value_halt;
2766 REG_WR_IND(sc, cpu_reg->mode, val);
2767 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2770 /****************************************************************************/
2771 /* Start the RX CPU. */
2772 /* */
2773 /* Returns: */
2774 /* Nothing. */
2775 /****************************************************************************/
2776 static void
2777 bce_start_rxp_cpu(struct bce_softc *sc)
2779 struct cpu_reg cpu_reg;
2781 cpu_reg.mode = BCE_RXP_CPU_MODE;
2782 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2783 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2784 cpu_reg.state = BCE_RXP_CPU_STATE;
2785 cpu_reg.state_value_clear = 0xffffff;
2786 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2787 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2788 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2789 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2790 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2791 cpu_reg.spad_base = BCE_RXP_SCRATCH;
2792 cpu_reg.mips_view_base = 0x8000000;
2794 bce_start_cpu(sc, &cpu_reg);
2797 /****************************************************************************/
2798 /* Initialize the RX CPU. */
2799 /* */
2800 /* Returns: */
2801 /* Nothing. */
2802 /****************************************************************************/
2803 static void
2804 bce_init_rxp_cpu(struct bce_softc *sc)
2806 struct cpu_reg cpu_reg;
2807 struct fw_info fw;
2809 cpu_reg.mode = BCE_RXP_CPU_MODE;
2810 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2811 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2812 cpu_reg.state = BCE_RXP_CPU_STATE;
2813 cpu_reg.state_value_clear = 0xffffff;
2814 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2815 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2816 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2817 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2818 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2819 cpu_reg.spad_base = BCE_RXP_SCRATCH;
2820 cpu_reg.mips_view_base = 0x8000000;
2822 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2823 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2824 fw.ver_major = bce_RXP_b09FwReleaseMajor;
2825 fw.ver_minor = bce_RXP_b09FwReleaseMinor;
2826 fw.ver_fix = bce_RXP_b09FwReleaseFix;
2827 fw.start_addr = bce_RXP_b09FwStartAddr;
2829 fw.text_addr = bce_RXP_b09FwTextAddr;
2830 fw.text_len = bce_RXP_b09FwTextLen;
2831 fw.text_index = 0;
2832 fw.text = bce_RXP_b09FwText;
2834 fw.data_addr = bce_RXP_b09FwDataAddr;
2835 fw.data_len = bce_RXP_b09FwDataLen;
2836 fw.data_index = 0;
2837 fw.data = bce_RXP_b09FwData;
2839 fw.sbss_addr = bce_RXP_b09FwSbssAddr;
2840 fw.sbss_len = bce_RXP_b09FwSbssLen;
2841 fw.sbss_index = 0;
2842 fw.sbss = bce_RXP_b09FwSbss;
2844 fw.bss_addr = bce_RXP_b09FwBssAddr;
2845 fw.bss_len = bce_RXP_b09FwBssLen;
2846 fw.bss_index = 0;
2847 fw.bss = bce_RXP_b09FwBss;
2849 fw.rodata_addr = bce_RXP_b09FwRodataAddr;
2850 fw.rodata_len = bce_RXP_b09FwRodataLen;
2851 fw.rodata_index = 0;
2852 fw.rodata = bce_RXP_b09FwRodata;
2853 } else {
2854 fw.ver_major = bce_RXP_b06FwReleaseMajor;
2855 fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2856 fw.ver_fix = bce_RXP_b06FwReleaseFix;
2857 fw.start_addr = bce_RXP_b06FwStartAddr;
2859 fw.text_addr = bce_RXP_b06FwTextAddr;
2860 fw.text_len = bce_RXP_b06FwTextLen;
2861 fw.text_index = 0;
2862 fw.text = bce_RXP_b06FwText;
2864 fw.data_addr = bce_RXP_b06FwDataAddr;
2865 fw.data_len = bce_RXP_b06FwDataLen;
2866 fw.data_index = 0;
2867 fw.data = bce_RXP_b06FwData;
2869 fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2870 fw.sbss_len = bce_RXP_b06FwSbssLen;
2871 fw.sbss_index = 0;
2872 fw.sbss = bce_RXP_b06FwSbss;
2874 fw.bss_addr = bce_RXP_b06FwBssAddr;
2875 fw.bss_len = bce_RXP_b06FwBssLen;
2876 fw.bss_index = 0;
2877 fw.bss = bce_RXP_b06FwBss;
2879 fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2880 fw.rodata_len = bce_RXP_b06FwRodataLen;
2881 fw.rodata_index = 0;
2882 fw.rodata = bce_RXP_b06FwRodata;
2885 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2886 /* Delay RXP start until initialization is complete. */
2889 /****************************************************************************/
2890 /* Initialize the TX CPU. */
2891 /* */
2892 /* Returns: */
2893 /* Nothing. */
2894 /****************************************************************************/
2895 static void
2896 bce_init_txp_cpu(struct bce_softc *sc)
2898 struct cpu_reg cpu_reg;
2899 struct fw_info fw;
2901 cpu_reg.mode = BCE_TXP_CPU_MODE;
2902 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2903 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2904 cpu_reg.state = BCE_TXP_CPU_STATE;
2905 cpu_reg.state_value_clear = 0xffffff;
2906 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2907 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2908 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2909 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2910 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2911 cpu_reg.spad_base = BCE_TXP_SCRATCH;
2912 cpu_reg.mips_view_base = 0x8000000;
2914 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2915 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2916 fw.ver_major = bce_TXP_b09FwReleaseMajor;
2917 fw.ver_minor = bce_TXP_b09FwReleaseMinor;
2918 fw.ver_fix = bce_TXP_b09FwReleaseFix;
2919 fw.start_addr = bce_TXP_b09FwStartAddr;
2921 fw.text_addr = bce_TXP_b09FwTextAddr;
2922 fw.text_len = bce_TXP_b09FwTextLen;
2923 fw.text_index = 0;
2924 fw.text = bce_TXP_b09FwText;
2926 fw.data_addr = bce_TXP_b09FwDataAddr;
2927 fw.data_len = bce_TXP_b09FwDataLen;
2928 fw.data_index = 0;
2929 fw.data = bce_TXP_b09FwData;
2931 fw.sbss_addr = bce_TXP_b09FwSbssAddr;
2932 fw.sbss_len = bce_TXP_b09FwSbssLen;
2933 fw.sbss_index = 0;
2934 fw.sbss = bce_TXP_b09FwSbss;
2936 fw.bss_addr = bce_TXP_b09FwBssAddr;
2937 fw.bss_len = bce_TXP_b09FwBssLen;
2938 fw.bss_index = 0;
2939 fw.bss = bce_TXP_b09FwBss;
2941 fw.rodata_addr = bce_TXP_b09FwRodataAddr;
2942 fw.rodata_len = bce_TXP_b09FwRodataLen;
2943 fw.rodata_index = 0;
2944 fw.rodata = bce_TXP_b09FwRodata;
2945 } else {
2946 fw.ver_major = bce_TXP_b06FwReleaseMajor;
2947 fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2948 fw.ver_fix = bce_TXP_b06FwReleaseFix;
2949 fw.start_addr = bce_TXP_b06FwStartAddr;
2951 fw.text_addr = bce_TXP_b06FwTextAddr;
2952 fw.text_len = bce_TXP_b06FwTextLen;
2953 fw.text_index = 0;
2954 fw.text = bce_TXP_b06FwText;
2956 fw.data_addr = bce_TXP_b06FwDataAddr;
2957 fw.data_len = bce_TXP_b06FwDataLen;
2958 fw.data_index = 0;
2959 fw.data = bce_TXP_b06FwData;
2961 fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2962 fw.sbss_len = bce_TXP_b06FwSbssLen;
2963 fw.sbss_index = 0;
2964 fw.sbss = bce_TXP_b06FwSbss;
2966 fw.bss_addr = bce_TXP_b06FwBssAddr;
2967 fw.bss_len = bce_TXP_b06FwBssLen;
2968 fw.bss_index = 0;
2969 fw.bss = bce_TXP_b06FwBss;
2971 fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2972 fw.rodata_len = bce_TXP_b06FwRodataLen;
2973 fw.rodata_index = 0;
2974 fw.rodata = bce_TXP_b06FwRodata;
2977 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2978 bce_start_cpu(sc, &cpu_reg);
2981 /****************************************************************************/
2982 /* Initialize the TPAT CPU. */
2983 /* */
2984 /* Returns: */
2985 /* Nothing. */
2986 /****************************************************************************/
2987 static void
2988 bce_init_tpat_cpu(struct bce_softc *sc)
2990 struct cpu_reg cpu_reg;
2991 struct fw_info fw;
2993 cpu_reg.mode = BCE_TPAT_CPU_MODE;
2994 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2995 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2996 cpu_reg.state = BCE_TPAT_CPU_STATE;
2997 cpu_reg.state_value_clear = 0xffffff;
2998 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2999 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
3000 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
3001 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
3002 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
3003 cpu_reg.spad_base = BCE_TPAT_SCRATCH;
3004 cpu_reg.mips_view_base = 0x8000000;
3006 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3007 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3008 fw.ver_major = bce_TPAT_b09FwReleaseMajor;
3009 fw.ver_minor = bce_TPAT_b09FwReleaseMinor;
3010 fw.ver_fix = bce_TPAT_b09FwReleaseFix;
3011 fw.start_addr = bce_TPAT_b09FwStartAddr;
3013 fw.text_addr = bce_TPAT_b09FwTextAddr;
3014 fw.text_len = bce_TPAT_b09FwTextLen;
3015 fw.text_index = 0;
3016 fw.text = bce_TPAT_b09FwText;
3018 fw.data_addr = bce_TPAT_b09FwDataAddr;
3019 fw.data_len = bce_TPAT_b09FwDataLen;
3020 fw.data_index = 0;
3021 fw.data = bce_TPAT_b09FwData;
3023 fw.sbss_addr = bce_TPAT_b09FwSbssAddr;
3024 fw.sbss_len = bce_TPAT_b09FwSbssLen;
3025 fw.sbss_index = 0;
3026 fw.sbss = bce_TPAT_b09FwSbss;
3028 fw.bss_addr = bce_TPAT_b09FwBssAddr;
3029 fw.bss_len = bce_TPAT_b09FwBssLen;
3030 fw.bss_index = 0;
3031 fw.bss = bce_TPAT_b09FwBss;
3033 fw.rodata_addr = bce_TPAT_b09FwRodataAddr;
3034 fw.rodata_len = bce_TPAT_b09FwRodataLen;
3035 fw.rodata_index = 0;
3036 fw.rodata = bce_TPAT_b09FwRodata;
3037 } else {
3038 fw.ver_major = bce_TPAT_b06FwReleaseMajor;
3039 fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
3040 fw.ver_fix = bce_TPAT_b06FwReleaseFix;
3041 fw.start_addr = bce_TPAT_b06FwStartAddr;
3043 fw.text_addr = bce_TPAT_b06FwTextAddr;
3044 fw.text_len = bce_TPAT_b06FwTextLen;
3045 fw.text_index = 0;
3046 fw.text = bce_TPAT_b06FwText;
3048 fw.data_addr = bce_TPAT_b06FwDataAddr;
3049 fw.data_len = bce_TPAT_b06FwDataLen;
3050 fw.data_index = 0;
3051 fw.data = bce_TPAT_b06FwData;
3053 fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
3054 fw.sbss_len = bce_TPAT_b06FwSbssLen;
3055 fw.sbss_index = 0;
3056 fw.sbss = bce_TPAT_b06FwSbss;
3058 fw.bss_addr = bce_TPAT_b06FwBssAddr;
3059 fw.bss_len = bce_TPAT_b06FwBssLen;
3060 fw.bss_index = 0;
3061 fw.bss = bce_TPAT_b06FwBss;
3063 fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
3064 fw.rodata_len = bce_TPAT_b06FwRodataLen;
3065 fw.rodata_index = 0;
3066 fw.rodata = bce_TPAT_b06FwRodata;
3069 bce_load_cpu_fw(sc, &cpu_reg, &fw);
3070 bce_start_cpu(sc, &cpu_reg);
3073 /****************************************************************************/
3074 /* Initialize the CP CPU. */
3075 /* */
3076 /* Returns: */
3077 /* Nothing. */
3078 /****************************************************************************/
3079 static void
3080 bce_init_cp_cpu(struct bce_softc *sc)
3082 struct cpu_reg cpu_reg;
3083 struct fw_info fw;
3085 cpu_reg.mode = BCE_CP_CPU_MODE;
3086 cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT;
3087 cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA;
3088 cpu_reg.state = BCE_CP_CPU_STATE;
3089 cpu_reg.state_value_clear = 0xffffff;
3090 cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE;
3091 cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK;
3092 cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER;
3093 cpu_reg.inst = BCE_CP_CPU_INSTRUCTION;
3094 cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT;
3095 cpu_reg.spad_base = BCE_CP_SCRATCH;
3096 cpu_reg.mips_view_base = 0x8000000;
3098 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3099 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3100 fw.ver_major = bce_CP_b09FwReleaseMajor;
3101 fw.ver_minor = bce_CP_b09FwReleaseMinor;
3102 fw.ver_fix = bce_CP_b09FwReleaseFix;
3103 fw.start_addr = bce_CP_b09FwStartAddr;
3105 fw.text_addr = bce_CP_b09FwTextAddr;
3106 fw.text_len = bce_CP_b09FwTextLen;
3107 fw.text_index = 0;
3108 fw.text = bce_CP_b09FwText;
3110 fw.data_addr = bce_CP_b09FwDataAddr;
3111 fw.data_len = bce_CP_b09FwDataLen;
3112 fw.data_index = 0;
3113 fw.data = bce_CP_b09FwData;
3115 fw.sbss_addr = bce_CP_b09FwSbssAddr;
3116 fw.sbss_len = bce_CP_b09FwSbssLen;
3117 fw.sbss_index = 0;
3118 fw.sbss = bce_CP_b09FwSbss;
3120 fw.bss_addr = bce_CP_b09FwBssAddr;
3121 fw.bss_len = bce_CP_b09FwBssLen;
3122 fw.bss_index = 0;
3123 fw.bss = bce_CP_b09FwBss;
3125 fw.rodata_addr = bce_CP_b09FwRodataAddr;
3126 fw.rodata_len = bce_CP_b09FwRodataLen;
3127 fw.rodata_index = 0;
3128 fw.rodata = bce_CP_b09FwRodata;
3129 } else {
3130 fw.ver_major = bce_CP_b06FwReleaseMajor;
3131 fw.ver_minor = bce_CP_b06FwReleaseMinor;
3132 fw.ver_fix = bce_CP_b06FwReleaseFix;
3133 fw.start_addr = bce_CP_b06FwStartAddr;
3135 fw.text_addr = bce_CP_b06FwTextAddr;
3136 fw.text_len = bce_CP_b06FwTextLen;
3137 fw.text_index = 0;
3138 fw.text = bce_CP_b06FwText;
3140 fw.data_addr = bce_CP_b06FwDataAddr;
3141 fw.data_len = bce_CP_b06FwDataLen;
3142 fw.data_index = 0;
3143 fw.data = bce_CP_b06FwData;
3145 fw.sbss_addr = bce_CP_b06FwSbssAddr;
3146 fw.sbss_len = bce_CP_b06FwSbssLen;
3147 fw.sbss_index = 0;
3148 fw.sbss = bce_CP_b06FwSbss;
3150 fw.bss_addr = bce_CP_b06FwBssAddr;
3151 fw.bss_len = bce_CP_b06FwBssLen;
3152 fw.bss_index = 0;
3153 fw.bss = bce_CP_b06FwBss;
3155 fw.rodata_addr = bce_CP_b06FwRodataAddr;
3156 fw.rodata_len = bce_CP_b06FwRodataLen;
3157 fw.rodata_index = 0;
3158 fw.rodata = bce_CP_b06FwRodata;
3161 bce_load_cpu_fw(sc, &cpu_reg, &fw);
3162 bce_start_cpu(sc, &cpu_reg);
3165 /****************************************************************************/
3166 /* Initialize the COM CPU. */
3167 /* */
3168 /* Returns: */
3169 /* Nothing. */
3170 /****************************************************************************/
3171 static void
3172 bce_init_com_cpu(struct bce_softc *sc)
3174 struct cpu_reg cpu_reg;
3175 struct fw_info fw;
3177 cpu_reg.mode = BCE_COM_CPU_MODE;
3178 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
3179 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
3180 cpu_reg.state = BCE_COM_CPU_STATE;
3181 cpu_reg.state_value_clear = 0xffffff;
3182 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
3183 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
3184 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
3185 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
3186 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
3187 cpu_reg.spad_base = BCE_COM_SCRATCH;
3188 cpu_reg.mips_view_base = 0x8000000;
3190 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3191 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3192 fw.ver_major = bce_COM_b09FwReleaseMajor;
3193 fw.ver_minor = bce_COM_b09FwReleaseMinor;
3194 fw.ver_fix = bce_COM_b09FwReleaseFix;
3195 fw.start_addr = bce_COM_b09FwStartAddr;
3197 fw.text_addr = bce_COM_b09FwTextAddr;
3198 fw.text_len = bce_COM_b09FwTextLen;
3199 fw.text_index = 0;
3200 fw.text = bce_COM_b09FwText;
3202 fw.data_addr = bce_COM_b09FwDataAddr;
3203 fw.data_len = bce_COM_b09FwDataLen;
3204 fw.data_index = 0;
3205 fw.data = bce_COM_b09FwData;
3207 fw.sbss_addr = bce_COM_b09FwSbssAddr;
3208 fw.sbss_len = bce_COM_b09FwSbssLen;
3209 fw.sbss_index = 0;
3210 fw.sbss = bce_COM_b09FwSbss;
3212 fw.bss_addr = bce_COM_b09FwBssAddr;
3213 fw.bss_len = bce_COM_b09FwBssLen;
3214 fw.bss_index = 0;
3215 fw.bss = bce_COM_b09FwBss;
3217 fw.rodata_addr = bce_COM_b09FwRodataAddr;
3218 fw.rodata_len = bce_COM_b09FwRodataLen;
3219 fw.rodata_index = 0;
3220 fw.rodata = bce_COM_b09FwRodata;
3221 } else {
3222 fw.ver_major = bce_COM_b06FwReleaseMajor;
3223 fw.ver_minor = bce_COM_b06FwReleaseMinor;
3224 fw.ver_fix = bce_COM_b06FwReleaseFix;
3225 fw.start_addr = bce_COM_b06FwStartAddr;
3227 fw.text_addr = bce_COM_b06FwTextAddr;
3228 fw.text_len = bce_COM_b06FwTextLen;
3229 fw.text_index = 0;
3230 fw.text = bce_COM_b06FwText;
3232 fw.data_addr = bce_COM_b06FwDataAddr;
3233 fw.data_len = bce_COM_b06FwDataLen;
3234 fw.data_index = 0;
3235 fw.data = bce_COM_b06FwData;
3237 fw.sbss_addr = bce_COM_b06FwSbssAddr;
3238 fw.sbss_len = bce_COM_b06FwSbssLen;
3239 fw.sbss_index = 0;
3240 fw.sbss = bce_COM_b06FwSbss;
3242 fw.bss_addr = bce_COM_b06FwBssAddr;
3243 fw.bss_len = bce_COM_b06FwBssLen;
3244 fw.bss_index = 0;
3245 fw.bss = bce_COM_b06FwBss;
3247 fw.rodata_addr = bce_COM_b06FwRodataAddr;
3248 fw.rodata_len = bce_COM_b06FwRodataLen;
3249 fw.rodata_index = 0;
3250 fw.rodata = bce_COM_b06FwRodata;
3253 bce_load_cpu_fw(sc, &cpu_reg, &fw);
3254 bce_start_cpu(sc, &cpu_reg);
3257 /****************************************************************************/
3258 /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs. */
3259 /* */
3260 /* Loads the firmware for each CPU and starts the CPU. */
3261 /* */
3262 /* Returns: */
3263 /* Nothing. */
3264 /****************************************************************************/
3265 static void
3266 bce_init_cpus(struct bce_softc *sc)
3268 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3269 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3270 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax) {
3271 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1,
3272 sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1);
3273 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2,
3274 sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2);
3275 } else {
3276 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1,
3277 sizeof(bce_xi_rv2p_proc1), RV2P_PROC1);
3278 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2,
3279 sizeof(bce_xi_rv2p_proc2), RV2P_PROC2);
3281 } else {
3282 bce_load_rv2p_fw(sc, bce_rv2p_proc1,
3283 sizeof(bce_rv2p_proc1), RV2P_PROC1);
3284 bce_load_rv2p_fw(sc, bce_rv2p_proc2,
3285 sizeof(bce_rv2p_proc2), RV2P_PROC2);
3288 bce_init_rxp_cpu(sc);
3289 bce_init_txp_cpu(sc);
3290 bce_init_tpat_cpu(sc);
3291 bce_init_com_cpu(sc);
3292 bce_init_cp_cpu(sc);
3295 /****************************************************************************/
3296 /* Initialize context memory. */
3297 /* */
3298 /* Clears the memory associated with each Context ID (CID). */
3299 /* */
3300 /* Returns: */
3301 /* Nothing. */
3302 /****************************************************************************/
3303 static int
3304 bce_init_ctx(struct bce_softc *sc)
3306 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3307 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3308 /* DRC: Replace this constant value with a #define. */
3309 int i, retry_cnt = 10;
3310 uint32_t val;
3313 * BCM5709 context memory may be cached
3314 * in host memory so prepare the host memory
3315 * for access.
3317 val = BCE_CTX_COMMAND_ENABLED | BCE_CTX_COMMAND_MEM_INIT |
3318 (1 << 12);
3319 val |= (BCM_PAGE_BITS - 8) << 16;
3320 REG_WR(sc, BCE_CTX_COMMAND, val);
3322 /* Wait for mem init command to complete. */
3323 for (i = 0; i < retry_cnt; i++) {
3324 val = REG_RD(sc, BCE_CTX_COMMAND);
3325 if (!(val & BCE_CTX_COMMAND_MEM_INIT))
3326 break;
3327 DELAY(2);
3329 if (i == retry_cnt) {
3330 device_printf(sc->bce_dev,
3331 "Context memory initialization failed!\n");
3332 return ETIMEDOUT;
3335 for (i = 0; i < sc->ctx_pages; i++) {
3336 int j;
3339 * Set the physical address of the context
3340 * memory cache.
3342 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0,
3343 BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) |
3344 BCE_CTX_HOST_PAGE_TBL_DATA0_VALID);
3345 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1,
3346 BCE_ADDR_HI(sc->ctx_paddr[i]));
3347 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL,
3348 i | BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
3351 * Verify that the context memory write was successful.
3353 for (j = 0; j < retry_cnt; j++) {
3354 val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL);
3355 if ((val &
3356 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
3357 break;
3358 DELAY(5);
3360 if (j == retry_cnt) {
3361 device_printf(sc->bce_dev,
3362 "Failed to initialize context page!\n");
3363 return ETIMEDOUT;
3366 } else {
3367 uint32_t vcid_addr, offset;
3370 * For the 5706/5708, context memory is local to
3371 * the controller, so initialize the controller
3372 * context memory.
3375 vcid_addr = GET_CID_ADDR(96);
3376 while (vcid_addr) {
3377 vcid_addr -= PHY_CTX_SIZE;
3379 REG_WR(sc, BCE_CTX_VIRT_ADDR, 0);
3380 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
3382 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
3383 CTX_WR(sc, 0x00, offset, 0);
3385 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
3386 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
3389 return 0;
3392 /****************************************************************************/
3393 /* Fetch the permanent MAC address of the controller. */
3394 /* */
3395 /* Returns: */
3396 /* Nothing. */
3397 /****************************************************************************/
3398 static void
3399 bce_get_mac_addr(struct bce_softc *sc)
3401 uint32_t mac_lo = 0, mac_hi = 0;
3404 * The NetXtreme II bootcode populates various NIC
3405 * power-on and runtime configuration items in a
3406 * shared memory area. The factory configured MAC
3407 * address is available from both NVRAM and the
3408 * shared memory area so we'll read the value from
3409 * shared memory for speed.
3412 mac_hi = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_UPPER);
3413 mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER);
3415 if (mac_lo == 0 && mac_hi == 0) {
3416 if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n");
3417 } else {
3418 sc->eaddr[0] = (u_char)(mac_hi >> 8);
3419 sc->eaddr[1] = (u_char)(mac_hi >> 0);
3420 sc->eaddr[2] = (u_char)(mac_lo >> 24);
3421 sc->eaddr[3] = (u_char)(mac_lo >> 16);
3422 sc->eaddr[4] = (u_char)(mac_lo >> 8);
3423 sc->eaddr[5] = (u_char)(mac_lo >> 0);
3427 /****************************************************************************/
3428 /* Program the MAC address. */
3429 /* */
3430 /* Returns: */
3431 /* Nothing. */
3432 /****************************************************************************/
3433 static void
3434 bce_set_mac_addr(struct bce_softc *sc)
3436 const uint8_t *mac_addr = sc->eaddr;
3437 uint32_t val;
3439 val = (mac_addr[0] << 8) | mac_addr[1];
3440 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
3442 val = (mac_addr[2] << 24) |
3443 (mac_addr[3] << 16) |
3444 (mac_addr[4] << 8) |
3445 mac_addr[5];
3446 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
3449 /****************************************************************************/
3450 /* Stop the controller. */
3451 /* */
3452 /* Returns: */
3453 /* Nothing. */
3454 /****************************************************************************/
3455 static void
3456 bce_stop(struct bce_softc *sc)
3458 struct ifnet *ifp = &sc->arpcom.ac_if;
3459 int i;
3461 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3463 callout_stop(&sc->bce_tick_callout);
3465 /* Disable the transmit/receive blocks. */
3466 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT);
3467 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3468 DELAY(20);
3470 bce_disable_intr(sc);
3472 ifp->if_flags &= ~IFF_RUNNING;
3473 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3474 ifsq_clr_oactive(sc->tx_rings[i].ifsq);
3475 ifsq_watchdog_stop(&sc->tx_rings[i].tx_watchdog);
3478 /* Free the RX lists. */
3479 for (i = 0; i < sc->rx_ring_cnt; ++i)
3480 bce_free_rx_chain(&sc->rx_rings[i]);
3482 /* Free TX buffers. */
3483 for (i = 0; i < sc->tx_ring_cnt; ++i)
3484 bce_free_tx_chain(&sc->tx_rings[i]);
3486 sc->bce_link = 0;
3487 sc->bce_coalchg_mask = 0;
3490 static int
3491 bce_reset(struct bce_softc *sc, uint32_t reset_code)
3493 uint32_t val;
3494 int i, rc = 0;
3496 /* Wait for pending PCI transactions to complete. */
3497 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
3498 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3499 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3500 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3501 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3502 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3503 DELAY(5);
3505 /* Disable DMA */
3506 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3507 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3508 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
3509 val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
3510 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
3513 /* Assume bootcode is running. */
3514 sc->bce_fw_timed_out = 0;
3515 sc->bce_drv_cardiac_arrest = 0;
3517 /* Give the firmware a chance to prepare for the reset. */
3518 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3519 if (rc) {
3520 if_printf(&sc->arpcom.ac_if,
3521 "Firmware is not ready for reset\n");
3522 return rc;
3525 /* Set a firmware reminder that this is a soft reset. */
3526 bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE,
3527 BCE_DRV_RESET_SIGNATURE_MAGIC);
3529 /* Dummy read to force the chip to complete all current transactions. */
3530 val = REG_RD(sc, BCE_MISC_ID);
3532 /* Chip reset. */
3533 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3534 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3535 REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET);
3536 REG_RD(sc, BCE_MISC_COMMAND);
3537 DELAY(5);
3539 val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3540 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3542 pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4);
3543 } else {
3544 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3545 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3546 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3547 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3549 /* Allow up to 30us for reset to complete. */
3550 for (i = 0; i < 10; i++) {
3551 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3552 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3553 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3554 break;
3555 DELAY(10);
3558 /* Check that reset completed successfully. */
3559 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3560 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3561 if_printf(&sc->arpcom.ac_if, "Reset failed!\n");
3562 return EBUSY;
3566 /* Make sure byte swapping is properly configured. */
3567 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3568 if (val != 0x01020304) {
3569 if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n");
3570 return ENODEV;
3573 /* Just completed a reset, assume that firmware is running again. */
3574 sc->bce_fw_timed_out = 0;
3575 sc->bce_drv_cardiac_arrest = 0;
3577 /* Wait for the firmware to finish its initialization. */
3578 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3579 if (rc) {
3580 if_printf(&sc->arpcom.ac_if,
3581 "Firmware did not complete initialization!\n");
3584 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
3585 bce_setup_msix_table(sc);
3586 /* Prevent MSIX table reads and write from timing out */
3587 REG_WR(sc, BCE_MISC_ECO_HW_CTL,
3588 BCE_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
3591 return rc;
3594 static int
3595 bce_chipinit(struct bce_softc *sc)
3597 uint32_t val;
3598 int rc = 0;
3600 /* Make sure the interrupt is not active. */
3601 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3602 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
3605 * Initialize DMA byte/word swapping, configure the number of DMA
3606 * channels and PCI clock compensation delay.
3608 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3609 BCE_DMA_CONFIG_DATA_WORD_SWAP |
3610 #if BYTE_ORDER == BIG_ENDIAN
3611 BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3612 #endif
3613 BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3614 DMA_READ_CHANS << 12 |
3615 DMA_WRITE_CHANS << 16;
3617 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3619 if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133)
3620 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3623 * This setting resolves a problem observed on certain Intel PCI
3624 * chipsets that cannot handle multiple outstanding DMA operations.
3625 * See errata E9_5706A1_65.
3627 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 &&
3628 BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 &&
3629 !(sc->bce_flags & BCE_PCIX_FLAG))
3630 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3632 REG_WR(sc, BCE_DMA_CONFIG, val);
3634 /* Enable the RX_V2P and Context state machines before access. */
3635 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3636 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3637 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3638 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3640 /* Initialize context mapping and zero out the quick contexts. */
3641 rc = bce_init_ctx(sc);
3642 if (rc != 0)
3643 return rc;
3645 /* Initialize the on-boards CPUs */
3646 bce_init_cpus(sc);
3648 /* Enable management frames (NC-SI) to flow to the MCP. */
3649 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
3650 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) |
3651 BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
3652 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
3655 /* Prepare NVRAM for access. */
3656 rc = bce_init_nvram(sc);
3657 if (rc != 0)
3658 return rc;
3660 /* Set the kernel bypass block size */
3661 val = REG_RD(sc, BCE_MQ_CONFIG);
3662 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3663 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3665 /* Enable bins used on the 5709/5716. */
3666 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3667 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3668 val |= BCE_MQ_CONFIG_BIN_MQ_MODE;
3669 if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1)
3670 val |= BCE_MQ_CONFIG_HALT_DIS;
3673 REG_WR(sc, BCE_MQ_CONFIG, val);
3675 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3676 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3677 REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3679 /* Set the page size and clear the RV2P processor stall bits. */
3680 val = (BCM_PAGE_BITS - 8) << 24;
3681 REG_WR(sc, BCE_RV2P_CONFIG, val);
3683 /* Configure page size. */
3684 val = REG_RD(sc, BCE_TBDR_CONFIG);
3685 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3686 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3687 REG_WR(sc, BCE_TBDR_CONFIG, val);
3689 /* Set the perfect match control register to default. */
3690 REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0);
3692 return 0;
3695 /****************************************************************************/
3696 /* Initialize the controller in preparation to send/receive traffic. */
3697 /* */
3698 /* Returns: */
3699 /* 0 for success, positive value for failure. */
3700 /****************************************************************************/
3701 static int
3702 bce_blockinit(struct bce_softc *sc)
3704 uint32_t reg, val;
3705 int i;
3707 /* Load the hardware default MAC address. */
3708 bce_set_mac_addr(sc);
3710 /* Set the Ethernet backoff seed value */
3711 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3712 sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3713 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3715 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3717 /* Set up link change interrupt generation. */
3718 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3720 /* Program the physical address of the status block. */
3721 REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr));
3722 REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr));
3724 /* Program the physical address of the statistics block. */
3725 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3726 BCE_ADDR_LO(sc->stats_block_paddr));
3727 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3728 BCE_ADDR_HI(sc->stats_block_paddr));
3730 /* Program various host coalescing parameters. */
3731 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3732 (sc->bce_tx_quick_cons_trip_int << 16) |
3733 sc->bce_tx_quick_cons_trip);
3734 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3735 (sc->bce_rx_quick_cons_trip_int << 16) |
3736 sc->bce_rx_quick_cons_trip);
3737 REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3738 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3739 REG_WR(sc, BCE_HC_TX_TICKS,
3740 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3741 REG_WR(sc, BCE_HC_RX_TICKS,
3742 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3743 REG_WR(sc, BCE_HC_COM_TICKS,
3744 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3745 REG_WR(sc, BCE_HC_CMD_TICKS,
3746 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3747 REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00));
3748 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3750 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
3751 REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL);
3753 val = BCE_HC_CONFIG_TX_TMR_MODE | BCE_HC_CONFIG_COLLECT_STATS;
3754 if ((sc->bce_flags & BCE_ONESHOT_MSI_FLAG) ||
3755 sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
3756 if (bootverbose) {
3757 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
3758 if_printf(&sc->arpcom.ac_if,
3759 "using MSI-X\n");
3760 } else {
3761 if_printf(&sc->arpcom.ac_if,
3762 "using oneshot MSI\n");
3765 val |= BCE_HC_CONFIG_ONE_SHOT | BCE_HC_CONFIG_USE_INT_PARAM;
3766 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
3767 val |= BCE_HC_CONFIG_SB_ADDR_INC_128B;
3769 REG_WR(sc, BCE_HC_CONFIG, val);
3771 for (i = 1; i < sc->rx_ring_cnt; ++i) {
3772 uint32_t base;
3774 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) + BCE_HC_SB_CONFIG_1;
3775 KKASSERT(base <= BCE_HC_SB_CONFIG_8);
3777 REG_WR(sc, base,
3778 BCE_HC_SB_CONFIG_1_TX_TMR_MODE |
3779 /* BCE_HC_SB_CONFIG_1_RX_TMR_MODE | */
3780 BCE_HC_SB_CONFIG_1_ONE_SHOT);
3782 REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF,
3783 (sc->bce_tx_quick_cons_trip_int << 16) |
3784 sc->bce_tx_quick_cons_trip);
3785 REG_WR(sc, base + BCE_HC_RX_QUICK_CONS_TRIP_OFF,
3786 (sc->bce_rx_quick_cons_trip_int << 16) |
3787 sc->bce_rx_quick_cons_trip);
3788 REG_WR(sc, base + BCE_HC_TX_TICKS_OFF,
3789 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3790 REG_WR(sc, base + BCE_HC_RX_TICKS_OFF,
3791 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3794 /* Clear the internal statistics counters. */
3795 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3797 /* Verify that bootcode is running. */
3798 reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE);
3800 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3801 BCE_DEV_INFO_SIGNATURE_MAGIC) {
3802 if_printf(&sc->arpcom.ac_if,
3803 "Bootcode not running! Found: 0x%08X, "
3804 "Expected: 08%08X\n",
3805 reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK,
3806 BCE_DEV_INFO_SIGNATURE_MAGIC);
3807 return ENODEV;
3810 /* Enable DMA */
3811 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3812 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3813 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
3814 val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
3815 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
3818 /* Allow bootcode to apply any additional fixes before enabling MAC. */
3819 bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3821 /* Enable link state change interrupt generation. */
3822 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3824 /* Enable the RXP. */
3825 bce_start_rxp_cpu(sc);
3827 /* Disable management frames (NC-SI) from flowing to the MCP. */
3828 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
3829 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) &
3830 ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
3831 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
3834 /* Enable all remaining blocks in the MAC. */
3835 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3836 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3837 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3838 BCE_MISC_ENABLE_DEFAULT_XI);
3839 } else {
3840 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
3842 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3843 DELAY(20);
3845 /* Save the current host coalescing block settings. */
3846 sc->hc_command = REG_RD(sc, BCE_HC_COMMAND);
3848 return 0;
3851 /****************************************************************************/
3852 /* Encapsulate an mbuf cluster into the rx_bd chain. */
3853 /* */
3854 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */
3855 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */
3856 /* necessary. */
3857 /* */
3858 /* Returns: */
3859 /* 0 for success, positive value for failure. */
3860 /****************************************************************************/
3861 static int
3862 bce_newbuf_std(struct bce_rx_ring *rxr, uint16_t *prod, uint16_t chain_prod,
3863 uint32_t *prod_bseq, int init)
3865 struct bce_rx_buf *rx_buf;
3866 bus_dmamap_t map;
3867 bus_dma_segment_t seg;
3868 struct mbuf *m_new;
3869 int error, nseg;
3871 /* This is a new mbuf allocation. */
3872 m_new = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR);
3873 if (m_new == NULL)
3874 return ENOBUFS;
3876 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
3878 /* Map the mbuf cluster into device memory. */
3879 error = bus_dmamap_load_mbuf_segment(rxr->rx_mbuf_tag,
3880 rxr->rx_mbuf_tmpmap, m_new, &seg, 1, &nseg, BUS_DMA_NOWAIT);
3881 if (error) {
3882 m_freem(m_new);
3883 if (init) {
3884 if_printf(&rxr->sc->arpcom.ac_if,
3885 "Error mapping mbuf into RX chain!\n");
3887 return error;
3890 rx_buf = &rxr->rx_bufs[chain_prod];
3891 if (rx_buf->rx_mbuf_ptr != NULL)
3892 bus_dmamap_unload(rxr->rx_mbuf_tag, rx_buf->rx_mbuf_map);
3894 map = rx_buf->rx_mbuf_map;
3895 rx_buf->rx_mbuf_map = rxr->rx_mbuf_tmpmap;
3896 rxr->rx_mbuf_tmpmap = map;
3898 /* Save the mbuf and update our counter. */
3899 rx_buf->rx_mbuf_ptr = m_new;
3900 rx_buf->rx_mbuf_paddr = seg.ds_addr;
3901 rxr->free_rx_bd--;
3903 bce_setup_rxdesc_std(rxr, chain_prod, prod_bseq);
3905 return 0;
3908 static void
3909 bce_setup_rxdesc_std(struct bce_rx_ring *rxr, uint16_t chain_prod,
3910 uint32_t *prod_bseq)
3912 const struct bce_rx_buf *rx_buf;
3913 struct rx_bd *rxbd;
3914 bus_addr_t paddr;
3915 int len;
3917 rx_buf = &rxr->rx_bufs[chain_prod];
3918 paddr = rx_buf->rx_mbuf_paddr;
3919 len = rx_buf->rx_mbuf_ptr->m_len;
3921 /* Setup the rx_bd for the first segment. */
3922 rxbd = &rxr->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)];
3924 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(paddr));
3925 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(paddr));
3926 rxbd->rx_bd_len = htole32(len);
3927 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3928 *prod_bseq += len;
3930 rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3933 /****************************************************************************/
3934 /* Initialize the TX context memory. */
3935 /* */
3936 /* Returns: */
3937 /* Nothing */
3938 /****************************************************************************/
3939 static void
3940 bce_init_tx_context(struct bce_tx_ring *txr)
3942 uint32_t val;
3944 /* Initialize the context ID for an L2 TX chain. */
3945 if (BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5709 ||
3946 BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5716) {
3947 /* Set the CID type to support an L2 connection. */
3948 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
3949 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3950 BCE_L2CTX_TX_TYPE_XI, val);
3951 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
3952 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3953 BCE_L2CTX_TX_CMD_TYPE_XI, val);
3955 /* Point the hardware to the first page in the chain. */
3956 val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]);
3957 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3958 BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val);
3959 val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]);
3960 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3961 BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val);
3962 } else {
3963 /* Set the CID type to support an L2 connection. */
3964 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
3965 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3966 BCE_L2CTX_TX_TYPE, val);
3967 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
3968 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3969 BCE_L2CTX_TX_CMD_TYPE, val);
3971 /* Point the hardware to the first page in the chain. */
3972 val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]);
3973 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3974 BCE_L2CTX_TX_TBDR_BHADDR_HI, val);
3975 val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]);
3976 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3977 BCE_L2CTX_TX_TBDR_BHADDR_LO, val);
3981 /****************************************************************************/
3982 /* Allocate memory and initialize the TX data structures. */
3983 /* */
3984 /* Returns: */
3985 /* 0 for success, positive value for failure. */
3986 /****************************************************************************/
3987 static int
3988 bce_init_tx_chain(struct bce_tx_ring *txr)
3990 struct tx_bd *txbd;
3991 int i, rc = 0;
3993 /* Set the initial TX producer/consumer indices. */
3994 txr->tx_prod = 0;
3995 txr->tx_cons = 0;
3996 txr->tx_prod_bseq = 0;
3997 txr->used_tx_bd = 0;
3998 txr->max_tx_bd = USABLE_TX_BD(txr);
4001 * The NetXtreme II supports a linked-list structre called
4002 * a Buffer Descriptor Chain (or BD chain). A BD chain
4003 * consists of a series of 1 or more chain pages, each of which
4004 * consists of a fixed number of BD entries.
4005 * The last BD entry on each page is a pointer to the next page
4006 * in the chain, and the last pointer in the BD chain
4007 * points back to the beginning of the chain.
4010 /* Set the TX next pointer chain entries. */
4011 for (i = 0; i < txr->tx_pages; i++) {
4012 int j;
4014 txbd = &txr->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
4016 /* Check if we've reached the last page. */
4017 if (i == (txr->tx_pages - 1))
4018 j = 0;
4019 else
4020 j = i + 1;
4022 txbd->tx_bd_haddr_hi =
4023 htole32(BCE_ADDR_HI(txr->tx_bd_chain_paddr[j]));
4024 txbd->tx_bd_haddr_lo =
4025 htole32(BCE_ADDR_LO(txr->tx_bd_chain_paddr[j]));
4027 bce_init_tx_context(txr);
4029 return(rc);
4032 /****************************************************************************/
4033 /* Free memory and clear the TX data structures. */
4034 /* */
4035 /* Returns: */
4036 /* Nothing. */
4037 /****************************************************************************/
4038 static void
4039 bce_free_tx_chain(struct bce_tx_ring *txr)
4041 int i;
4043 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
4044 for (i = 0; i < TOTAL_TX_BD(txr); i++) {
4045 struct bce_tx_buf *tx_buf = &txr->tx_bufs[i];
4047 if (tx_buf->tx_mbuf_ptr != NULL) {
4048 bus_dmamap_unload(txr->tx_mbuf_tag,
4049 tx_buf->tx_mbuf_map);
4050 m_freem(tx_buf->tx_mbuf_ptr);
4051 tx_buf->tx_mbuf_ptr = NULL;
4055 /* Clear each TX chain page. */
4056 for (i = 0; i < txr->tx_pages; i++)
4057 bzero(txr->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
4058 txr->used_tx_bd = 0;
4061 /****************************************************************************/
4062 /* Initialize the RX context memory. */
4063 /* */
4064 /* Returns: */
4065 /* Nothing */
4066 /****************************************************************************/
4067 static void
4068 bce_init_rx_context(struct bce_rx_ring *rxr)
4070 uint32_t val;
4072 /* Initialize the context ID for an L2 RX chain. */
4073 val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4074 BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4077 * Set the level for generating pause frames
4078 * when the number of available rx_bd's gets
4079 * too low (the low watermark) and the level
4080 * when pause frames can be stopped (the high
4081 * watermark).
4083 if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 ||
4084 BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) {
4085 uint32_t lo_water, hi_water;
4087 lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT;
4088 hi_water = USABLE_RX_BD(rxr) / 4;
4090 lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE;
4091 hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE;
4093 if (hi_water > 0xf)
4094 hi_water = 0xf;
4095 else if (hi_water == 0)
4096 lo_water = 0;
4097 val |= lo_water |
4098 (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT);
4101 CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid),
4102 BCE_L2CTX_RX_CTX_TYPE, val);
4104 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
4105 if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 ||
4106 BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) {
4107 val = REG_RD(rxr->sc, BCE_MQ_MAP_L2_5);
4108 REG_WR(rxr->sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM);
4111 /* Point the hardware to the first page in the chain. */
4112 val = BCE_ADDR_HI(rxr->rx_bd_chain_paddr[0]);
4113 CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid),
4114 BCE_L2CTX_RX_NX_BDHADDR_HI, val);
4115 val = BCE_ADDR_LO(rxr->rx_bd_chain_paddr[0]);
4116 CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid),
4117 BCE_L2CTX_RX_NX_BDHADDR_LO, val);
4120 /****************************************************************************/
4121 /* Allocate memory and initialize the RX data structures. */
4122 /* */
4123 /* Returns: */
4124 /* 0 for success, positive value for failure. */
4125 /****************************************************************************/
4126 static int
4127 bce_init_rx_chain(struct bce_rx_ring *rxr)
4129 struct rx_bd *rxbd;
4130 int i, rc = 0;
4131 uint16_t prod, chain_prod;
4132 uint32_t prod_bseq;
4134 /* Initialize the RX producer and consumer indices. */
4135 rxr->rx_prod = 0;
4136 rxr->rx_cons = 0;
4137 rxr->rx_prod_bseq = 0;
4138 rxr->free_rx_bd = USABLE_RX_BD(rxr);
4139 rxr->max_rx_bd = USABLE_RX_BD(rxr);
4141 /* Clear cache status index */
4142 rxr->last_status_idx = 0;
4144 /* Initialize the RX next pointer chain entries. */
4145 for (i = 0; i < rxr->rx_pages; i++) {
4146 int j;
4148 rxbd = &rxr->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
4150 /* Check if we've reached the last page. */
4151 if (i == (rxr->rx_pages - 1))
4152 j = 0;
4153 else
4154 j = i + 1;
4156 /* Setup the chain page pointers. */
4157 rxbd->rx_bd_haddr_hi =
4158 htole32(BCE_ADDR_HI(rxr->rx_bd_chain_paddr[j]));
4159 rxbd->rx_bd_haddr_lo =
4160 htole32(BCE_ADDR_LO(rxr->rx_bd_chain_paddr[j]));
4163 /* Allocate mbuf clusters for the rx_bd chain. */
4164 prod = prod_bseq = 0;
4165 while (prod < TOTAL_RX_BD(rxr)) {
4166 chain_prod = RX_CHAIN_IDX(rxr, prod);
4167 if (bce_newbuf_std(rxr, &prod, chain_prod, &prod_bseq, 1)) {
4168 if_printf(&rxr->sc->arpcom.ac_if,
4169 "Error filling RX chain: rx_bd[0x%04X]!\n",
4170 chain_prod);
4171 rc = ENOBUFS;
4172 break;
4174 prod = NEXT_RX_BD(prod);
4177 /* Save the RX chain producer index. */
4178 rxr->rx_prod = prod;
4179 rxr->rx_prod_bseq = prod_bseq;
4181 /* Tell the chip about the waiting rx_bd's. */
4182 REG_WR16(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BDIDX,
4183 rxr->rx_prod);
4184 REG_WR(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BSEQ,
4185 rxr->rx_prod_bseq);
4187 bce_init_rx_context(rxr);
4189 return(rc);
4192 /****************************************************************************/
4193 /* Free memory and clear the RX data structures. */
4194 /* */
4195 /* Returns: */
4196 /* Nothing. */
4197 /****************************************************************************/
4198 static void
4199 bce_free_rx_chain(struct bce_rx_ring *rxr)
4201 int i;
4203 /* Free any mbufs still in the RX mbuf chain. */
4204 for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
4205 struct bce_rx_buf *rx_buf = &rxr->rx_bufs[i];
4207 if (rx_buf->rx_mbuf_ptr != NULL) {
4208 bus_dmamap_unload(rxr->rx_mbuf_tag,
4209 rx_buf->rx_mbuf_map);
4210 m_freem(rx_buf->rx_mbuf_ptr);
4211 rx_buf->rx_mbuf_ptr = NULL;
4215 /* Clear each RX chain page. */
4216 for (i = 0; i < rxr->rx_pages; i++)
4217 bzero(rxr->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
4220 /****************************************************************************/
4221 /* Set media options. */
4222 /* */
4223 /* Returns: */
4224 /* 0 for success, positive value for failure. */
4225 /****************************************************************************/
4226 static int
4227 bce_ifmedia_upd(struct ifnet *ifp)
4229 struct bce_softc *sc = ifp->if_softc;
4230 struct mii_data *mii = device_get_softc(sc->bce_miibus);
4231 int error = 0;
4234 * 'mii' will be NULL, when this function is called on following
4235 * code path: bce_attach() -> bce_mgmt_init()
4237 if (mii != NULL) {
4238 /* Make sure the MII bus has been enumerated. */
4239 sc->bce_link = 0;
4240 if (mii->mii_instance) {
4241 struct mii_softc *miisc;
4243 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4244 mii_phy_reset(miisc);
4246 error = mii_mediachg(mii);
4248 return error;
4251 /****************************************************************************/
4252 /* Reports current media status. */
4253 /* */
4254 /* Returns: */
4255 /* Nothing. */
4256 /****************************************************************************/
4257 static void
4258 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4260 struct bce_softc *sc = ifp->if_softc;
4261 struct mii_data *mii = device_get_softc(sc->bce_miibus);
4263 mii_pollstat(mii);
4264 ifmr->ifm_active = mii->mii_media_active;
4265 ifmr->ifm_status = mii->mii_media_status;
4268 /****************************************************************************/
4269 /* Handles PHY generated interrupt events. */
4270 /* */
4271 /* Returns: */
4272 /* Nothing. */
4273 /****************************************************************************/
4274 static void
4275 bce_phy_intr(struct bce_softc *sc)
4277 uint32_t new_link_state, old_link_state;
4278 struct ifnet *ifp = &sc->arpcom.ac_if;
4280 ASSERT_SERIALIZED(&sc->main_serialize);
4282 new_link_state = sc->status_block->status_attn_bits &
4283 STATUS_ATTN_BITS_LINK_STATE;
4284 old_link_state = sc->status_block->status_attn_bits_ack &
4285 STATUS_ATTN_BITS_LINK_STATE;
4287 /* Handle any changes if the link state has changed. */
4288 if (new_link_state != old_link_state) { /* XXX redundant? */
4289 /* Update the status_attn_bits_ack field in the status block. */
4290 if (new_link_state) {
4291 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
4292 STATUS_ATTN_BITS_LINK_STATE);
4293 if (bootverbose)
4294 if_printf(ifp, "Link is now UP.\n");
4295 } else {
4296 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
4297 STATUS_ATTN_BITS_LINK_STATE);
4298 if (bootverbose)
4299 if_printf(ifp, "Link is now DOWN.\n");
4303 * Assume link is down and allow tick routine to
4304 * update the state based on the actual media state.
4306 sc->bce_link = 0;
4307 callout_stop(&sc->bce_tick_callout);
4308 bce_tick_serialized(sc);
4311 /* Acknowledge the link change interrupt. */
4312 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
4315 /****************************************************************************/
4316 /* Reads the receive consumer value from the status block (skipping over */
4317 /* chain page pointer if necessary). */
4318 /* */
4319 /* Returns: */
4320 /* hw_cons */
4321 /****************************************************************************/
4322 static __inline uint16_t
4323 bce_get_hw_rx_cons(struct bce_rx_ring *rxr)
4325 uint16_t hw_cons = *rxr->rx_hw_cons;
4327 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4328 hw_cons++;
4329 return hw_cons;
4332 /****************************************************************************/
4333 /* Handles received frame interrupt events. */
4334 /* */
4335 /* Returns: */
4336 /* Nothing. */
4337 /****************************************************************************/
4338 static void
4339 bce_rx_intr(struct bce_rx_ring *rxr, int count, uint16_t hw_cons)
4341 struct ifnet *ifp = &rxr->sc->arpcom.ac_if;
4342 uint16_t sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
4343 uint32_t sw_prod_bseq;
4344 int cpuid = mycpuid;
4346 ASSERT_SERIALIZED(&rxr->rx_serialize);
4348 /* Get working copies of the driver's view of the RX indices. */
4349 sw_cons = rxr->rx_cons;
4350 sw_prod = rxr->rx_prod;
4351 sw_prod_bseq = rxr->rx_prod_bseq;
4353 /* Scan through the receive chain as long as there is work to do. */
4354 while (sw_cons != hw_cons) {
4355 struct pktinfo pi0, *pi = NULL;
4356 struct bce_rx_buf *rx_buf;
4357 struct mbuf *m = NULL;
4358 struct l2_fhdr *l2fhdr = NULL;
4359 unsigned int len;
4360 uint32_t status = 0;
4362 #ifdef IFPOLL_ENABLE
4363 if (count >= 0 && count-- == 0)
4364 break;
4365 #endif
4368 * Convert the producer/consumer indices
4369 * to an actual rx_bd index.
4371 sw_chain_cons = RX_CHAIN_IDX(rxr, sw_cons);
4372 sw_chain_prod = RX_CHAIN_IDX(rxr, sw_prod);
4373 rx_buf = &rxr->rx_bufs[sw_chain_cons];
4375 rxr->free_rx_bd++;
4377 /* The mbuf is stored with the last rx_bd entry of a packet. */
4378 if (rx_buf->rx_mbuf_ptr != NULL) {
4379 if (sw_chain_cons != sw_chain_prod) {
4380 if_printf(ifp, "RX cons(%d) != prod(%d), "
4381 "drop!\n", sw_chain_cons, sw_chain_prod);
4382 IFNET_STAT_INC(ifp, ierrors, 1);
4384 bce_setup_rxdesc_std(rxr, sw_chain_cons,
4385 &sw_prod_bseq);
4386 m = NULL;
4387 goto bce_rx_int_next_rx;
4390 /* Unmap the mbuf from DMA space. */
4391 bus_dmamap_sync(rxr->rx_mbuf_tag, rx_buf->rx_mbuf_map,
4392 BUS_DMASYNC_POSTREAD);
4394 /* Save the mbuf from the driver's chain. */
4395 m = rx_buf->rx_mbuf_ptr;
4398 * Frames received on the NetXteme II are prepended
4399 * with an l2_fhdr structure which provides status
4400 * information about the received frame (including
4401 * VLAN tags and checksum info). The frames are also
4402 * automatically adjusted to align the IP header
4403 * (i.e. two null bytes are inserted before the
4404 * Ethernet header). As a result the data DMA'd by
4405 * the controller into the mbuf is as follows:
4407 * +---------+-----+---------------------+-----+
4408 * | l2_fhdr | pad | packet data | FCS |
4409 * +---------+-----+---------------------+-----+
4411 * The l2_fhdr needs to be checked and skipped and the
4412 * FCS needs to be stripped before sending the packet
4413 * up the stack.
4415 l2fhdr = mtod(m, struct l2_fhdr *);
4417 len = l2fhdr->l2_fhdr_pkt_len;
4418 status = l2fhdr->l2_fhdr_status;
4420 len -= ETHER_CRC_LEN;
4422 /* Check the received frame for errors. */
4423 if (status & (L2_FHDR_ERRORS_BAD_CRC |
4424 L2_FHDR_ERRORS_PHY_DECODE |
4425 L2_FHDR_ERRORS_ALIGNMENT |
4426 L2_FHDR_ERRORS_TOO_SHORT |
4427 L2_FHDR_ERRORS_GIANT_FRAME)) {
4428 IFNET_STAT_INC(ifp, ierrors, 1);
4430 /* Reuse the mbuf for a new frame. */
4431 bce_setup_rxdesc_std(rxr, sw_chain_prod,
4432 &sw_prod_bseq);
4433 m = NULL;
4434 goto bce_rx_int_next_rx;
4438 * Get a new mbuf for the rx_bd. If no new
4439 * mbufs are available then reuse the current mbuf,
4440 * log an ierror on the interface, and generate
4441 * an error in the system log.
4443 if (bce_newbuf_std(rxr, &sw_prod, sw_chain_prod,
4444 &sw_prod_bseq, 0)) {
4445 IFNET_STAT_INC(ifp, ierrors, 1);
4447 /* Try and reuse the exisitng mbuf. */
4448 bce_setup_rxdesc_std(rxr, sw_chain_prod,
4449 &sw_prod_bseq);
4450 m = NULL;
4451 goto bce_rx_int_next_rx;
4455 * Skip over the l2_fhdr when passing
4456 * the data up the stack.
4458 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4460 m->m_pkthdr.len = m->m_len = len;
4461 m->m_pkthdr.rcvif = ifp;
4463 /* Validate the checksum if offload enabled. */
4464 if (ifp->if_capenable & IFCAP_RXCSUM) {
4465 /* Check for an IP datagram. */
4466 if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4467 m->m_pkthdr.csum_flags |=
4468 CSUM_IP_CHECKED;
4470 /* Check if the IP checksum is valid. */
4471 if ((l2fhdr->l2_fhdr_ip_xsum ^
4472 0xffff) == 0) {
4473 m->m_pkthdr.csum_flags |=
4474 CSUM_IP_VALID;
4478 /* Check for a valid TCP/UDP frame. */
4479 if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4480 L2_FHDR_STATUS_UDP_DATAGRAM)) {
4482 /* Check for a good TCP/UDP checksum. */
4483 if ((status &
4484 (L2_FHDR_ERRORS_TCP_XSUM |
4485 L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4486 m->m_pkthdr.csum_data =
4487 l2fhdr->l2_fhdr_tcp_udp_xsum;
4488 m->m_pkthdr.csum_flags |=
4489 CSUM_DATA_VALID |
4490 CSUM_PSEUDO_HDR;
4494 if (ifp->if_capenable & IFCAP_RSS) {
4495 pi = bce_rss_pktinfo(&pi0, status, l2fhdr);
4496 if (pi != NULL &&
4497 (status & L2_FHDR_STATUS_RSS_HASH)) {
4498 m_sethash(m,
4499 toeplitz_hash(l2fhdr->l2_fhdr_hash));
4503 IFNET_STAT_INC(ifp, ipackets, 1);
4504 bce_rx_int_next_rx:
4505 sw_prod = NEXT_RX_BD(sw_prod);
4508 sw_cons = NEXT_RX_BD(sw_cons);
4510 /* If we have a packet, pass it up the stack */
4511 if (m) {
4512 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
4513 m->m_flags |= M_VLANTAG;
4514 m->m_pkthdr.ether_vlantag =
4515 l2fhdr->l2_fhdr_vlan_tag;
4517 ifp->if_input(ifp, m, pi, cpuid);
4518 #ifdef BCE_RSS_DEBUG
4519 rxr->rx_pkts++;
4520 #endif
4524 rxr->rx_cons = sw_cons;
4525 rxr->rx_prod = sw_prod;
4526 rxr->rx_prod_bseq = sw_prod_bseq;
4528 REG_WR16(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BDIDX,
4529 rxr->rx_prod);
4530 REG_WR(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BSEQ,
4531 rxr->rx_prod_bseq);
4534 /****************************************************************************/
4535 /* Reads the transmit consumer value from the status block (skipping over */
4536 /* chain page pointer if necessary). */
4537 /* */
4538 /* Returns: */
4539 /* hw_cons */
4540 /****************************************************************************/
4541 static __inline uint16_t
4542 bce_get_hw_tx_cons(struct bce_tx_ring *txr)
4544 uint16_t hw_cons = *txr->tx_hw_cons;
4546 if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4547 hw_cons++;
4548 return hw_cons;
4551 /****************************************************************************/
4552 /* Handles transmit completion interrupt events. */
4553 /* */
4554 /* Returns: */
4555 /* Nothing. */
4556 /****************************************************************************/
4557 static void
4558 bce_tx_intr(struct bce_tx_ring *txr, uint16_t hw_tx_cons)
4560 struct ifnet *ifp = &txr->sc->arpcom.ac_if;
4561 uint16_t sw_tx_cons, sw_tx_chain_cons;
4563 ASSERT_SERIALIZED(&txr->tx_serialize);
4565 /* Get the hardware's view of the TX consumer index. */
4566 sw_tx_cons = txr->tx_cons;
4568 /* Cycle through any completed TX chain page entries. */
4569 while (sw_tx_cons != hw_tx_cons) {
4570 struct bce_tx_buf *tx_buf;
4572 sw_tx_chain_cons = TX_CHAIN_IDX(txr, sw_tx_cons);
4573 tx_buf = &txr->tx_bufs[sw_tx_chain_cons];
4576 * Free the associated mbuf. Remember
4577 * that only the last tx_bd of a packet
4578 * has an mbuf pointer and DMA map.
4580 if (tx_buf->tx_mbuf_ptr != NULL) {
4581 /* Unmap the mbuf. */
4582 bus_dmamap_unload(txr->tx_mbuf_tag,
4583 tx_buf->tx_mbuf_map);
4585 /* Free the mbuf. */
4586 m_freem(tx_buf->tx_mbuf_ptr);
4587 tx_buf->tx_mbuf_ptr = NULL;
4589 IFNET_STAT_INC(ifp, opackets, 1);
4590 #ifdef BCE_TSS_DEBUG
4591 txr->tx_pkts++;
4592 #endif
4595 txr->used_tx_bd--;
4596 sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4599 if (txr->used_tx_bd == 0) {
4600 /* Clear the TX timeout timer. */
4601 txr->tx_watchdog.wd_timer = 0;
4604 /* Clear the tx hardware queue full flag. */
4605 if (txr->max_tx_bd - txr->used_tx_bd >= BCE_TX_SPARE_SPACE)
4606 ifsq_clr_oactive(txr->ifsq);
4607 txr->tx_cons = sw_tx_cons;
4610 /****************************************************************************/
4611 /* Disables interrupt generation. */
4612 /* */
4613 /* Returns: */
4614 /* Nothing. */
4615 /****************************************************************************/
4616 static void
4617 bce_disable_intr(struct bce_softc *sc)
4619 int i;
4621 for (i = 0; i < sc->rx_ring_cnt; ++i) {
4622 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4623 (sc->rx_rings[i].idx << 24) |
4624 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4626 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4628 callout_stop(&sc->bce_ckmsi_callout);
4629 sc->bce_msi_maylose = FALSE;
4630 sc->bce_check_rx_cons = 0;
4631 sc->bce_check_tx_cons = 0;
4632 sc->bce_check_status_idx = 0xffff;
4634 for (i = 0; i < sc->rx_ring_cnt; ++i)
4635 lwkt_serialize_handler_disable(sc->bce_msix[i].msix_serialize);
4638 /****************************************************************************/
4639 /* Enables interrupt generation. */
4640 /* */
4641 /* Returns: */
4642 /* Nothing. */
4643 /****************************************************************************/
4644 static void
4645 bce_enable_intr(struct bce_softc *sc)
4647 int i;
4649 for (i = 0; i < sc->rx_ring_cnt; ++i)
4650 lwkt_serialize_handler_enable(sc->bce_msix[i].msix_serialize);
4652 for (i = 0; i < sc->rx_ring_cnt; ++i) {
4653 struct bce_rx_ring *rxr = &sc->rx_rings[i];
4655 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) |
4656 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4657 BCE_PCICFG_INT_ACK_CMD_MASK_INT |
4658 rxr->last_status_idx);
4659 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) |
4660 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4661 rxr->last_status_idx);
4663 REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW);
4665 if (sc->bce_flags & BCE_CHECK_MSI_FLAG) {
4666 sc->bce_msi_maylose = FALSE;
4667 sc->bce_check_rx_cons = 0;
4668 sc->bce_check_tx_cons = 0;
4669 sc->bce_check_status_idx = 0xffff;
4671 if (bootverbose)
4672 if_printf(&sc->arpcom.ac_if, "check msi\n");
4674 callout_reset_bycpu(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL,
4675 bce_check_msi, sc, sc->bce_msix[0].msix_cpuid);
4679 /****************************************************************************/
4680 /* Reenables interrupt generation during interrupt handling. */
4681 /* */
4682 /* Returns: */
4683 /* Nothing. */
4684 /****************************************************************************/
4685 static void
4686 bce_reenable_intr(struct bce_rx_ring *rxr)
4688 REG_WR(rxr->sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) |
4689 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | rxr->last_status_idx);
4692 /****************************************************************************/
4693 /* Handles controller initialization. */
4694 /* */
4695 /* Returns: */
4696 /* Nothing. */
4697 /****************************************************************************/
4698 static void
4699 bce_init(void *xsc)
4701 struct bce_softc *sc = xsc;
4702 struct ifnet *ifp = &sc->arpcom.ac_if;
4703 uint32_t ether_mtu;
4704 int error, i;
4705 boolean_t polling;
4707 ASSERT_IFNET_SERIALIZED_ALL(ifp);
4709 /* Check if the driver is still running and bail out if it is. */
4710 if (ifp->if_flags & IFF_RUNNING)
4711 return;
4713 bce_stop(sc);
4715 error = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
4716 if (error) {
4717 if_printf(ifp, "Controller reset failed!\n");
4718 goto back;
4721 error = bce_chipinit(sc);
4722 if (error) {
4723 if_printf(ifp, "Controller initialization failed!\n");
4724 goto back;
4727 error = bce_blockinit(sc);
4728 if (error) {
4729 if_printf(ifp, "Block initialization failed!\n");
4730 goto back;
4733 /* Load our MAC address. */
4734 bcopy(IF_LLADDR(ifp), sc->eaddr, ETHER_ADDR_LEN);
4735 bce_set_mac_addr(sc);
4737 /* Calculate and program the Ethernet MTU size. */
4738 ether_mtu = ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN;
4741 * Program the mtu, enabling jumbo frame
4742 * support if necessary. Also set the mbuf
4743 * allocation count for RX frames.
4745 if (ether_mtu > ETHER_MAX_LEN + EVL_ENCAPLEN) {
4746 #ifdef notyet
4747 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
4748 min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
4749 BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4750 #else
4751 panic("jumbo buffer is not supported yet");
4752 #endif
4753 } else {
4754 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4757 /* Program appropriate promiscuous/multicast filtering. */
4758 bce_set_rx_mode(sc);
4761 * Init RX buffer descriptor chain.
4763 REG_WR(sc, BCE_RLUP_RSS_CONFIG, 0);
4764 bce_reg_wr_ind(sc, BCE_RXP_SCRATCH_RSS_TBL_SZ, 0);
4766 for (i = 0; i < sc->rx_ring_cnt; ++i)
4767 bce_init_rx_chain(&sc->rx_rings[i]); /* XXX return value */
4769 if (sc->rx_ring_cnt > 1)
4770 bce_init_rss(sc);
4773 * Init TX buffer descriptor chain.
4775 REG_WR(sc, BCE_TSCH_TSS_CFG, 0);
4777 for (i = 0; i < sc->tx_ring_cnt; ++i)
4778 bce_init_tx_chain(&sc->tx_rings[i]);
4780 if (sc->tx_ring_cnt > 1) {
4781 REG_WR(sc, BCE_TSCH_TSS_CFG,
4782 ((sc->tx_ring_cnt - 1) << 24) | (TX_TSS_CID << 7));
4785 polling = FALSE;
4786 #ifdef IFPOLL_ENABLE
4787 if (ifp->if_flags & IFF_NPOLLING)
4788 polling = TRUE;
4789 #endif
4791 if (polling) {
4792 /* Disable interrupts if we are polling. */
4793 bce_disable_intr(sc);
4795 /* Change coalesce parameters */
4796 bce_npoll_coal_change(sc);
4797 } else {
4798 /* Enable host interrupts. */
4799 bce_enable_intr(sc);
4801 bce_set_timer_cpuid(sc, polling);
4803 bce_ifmedia_upd(ifp);
4805 ifp->if_flags |= IFF_RUNNING;
4806 for (i = 0; i < sc->tx_ring_cnt; ++i) {
4807 ifsq_clr_oactive(sc->tx_rings[i].ifsq);
4808 ifsq_watchdog_start(&sc->tx_rings[i].tx_watchdog);
4811 callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc,
4812 sc->bce_timer_cpuid);
4813 back:
4814 if (error)
4815 bce_stop(sc);
4818 /****************************************************************************/
4819 /* Initialize the controller just enough so that any management firmware */
4820 /* running on the device will continue to operate corectly. */
4821 /* */
4822 /* Returns: */
4823 /* Nothing. */
4824 /****************************************************************************/
4825 static void
4826 bce_mgmt_init(struct bce_softc *sc)
4828 struct ifnet *ifp = &sc->arpcom.ac_if;
4830 /* Bail out if management firmware is not running. */
4831 if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
4832 return;
4834 /* Enable all critical blocks in the MAC. */
4835 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
4836 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
4837 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4838 BCE_MISC_ENABLE_DEFAULT_XI);
4839 } else {
4840 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
4842 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4843 DELAY(20);
4845 bce_ifmedia_upd(ifp);
4848 /****************************************************************************/
4849 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4850 /* memory visible to the controller. */
4851 /* */
4852 /* Returns: */
4853 /* 0 for success, positive value for failure. */
4854 /****************************************************************************/
4855 static int
4856 bce_encap(struct bce_tx_ring *txr, struct mbuf **m_head, int *nsegs_used)
4858 bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
4859 bus_dmamap_t map, tmp_map;
4860 struct mbuf *m0 = *m_head;
4861 struct tx_bd *txbd = NULL;
4862 uint16_t vlan_tag = 0, flags = 0, mss = 0;
4863 uint16_t chain_prod, chain_prod_start, prod;
4864 uint32_t prod_bseq;
4865 int i, error, maxsegs, nsegs;
4867 /* Transfer any checksum offload flags to the bd. */
4868 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
4869 error = bce_tso_setup(txr, m_head, &flags, &mss);
4870 if (error)
4871 return ENOBUFS;
4872 m0 = *m_head;
4873 } else if (m0->m_pkthdr.csum_flags & BCE_CSUM_FEATURES) {
4874 if (m0->m_pkthdr.csum_flags & CSUM_IP)
4875 flags |= TX_BD_FLAGS_IP_CKSUM;
4876 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4877 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4880 /* Transfer any VLAN tags to the bd. */
4881 if (m0->m_flags & M_VLANTAG) {
4882 flags |= TX_BD_FLAGS_VLAN_TAG;
4883 vlan_tag = m0->m_pkthdr.ether_vlantag;
4886 prod = txr->tx_prod;
4887 chain_prod_start = chain_prod = TX_CHAIN_IDX(txr, prod);
4889 /* Map the mbuf into DMAable memory. */
4890 map = txr->tx_bufs[chain_prod_start].tx_mbuf_map;
4892 maxsegs = txr->max_tx_bd - txr->used_tx_bd;
4893 KASSERT(maxsegs >= BCE_TX_SPARE_SPACE,
4894 ("not enough segments %d", maxsegs));
4895 if (maxsegs > BCE_MAX_SEGMENTS)
4896 maxsegs = BCE_MAX_SEGMENTS;
4898 /* Map the mbuf into our DMA address space. */
4899 error = bus_dmamap_load_mbuf_defrag(txr->tx_mbuf_tag, map, m_head,
4900 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
4901 if (error)
4902 goto back;
4903 bus_dmamap_sync(txr->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE);
4905 *nsegs_used += nsegs;
4907 /* Reset m0 */
4908 m0 = *m_head;
4910 /* prod points to an empty tx_bd at this point. */
4911 prod_bseq = txr->tx_prod_bseq;
4914 * Cycle through each mbuf segment that makes up
4915 * the outgoing frame, gathering the mapping info
4916 * for that segment and creating a tx_bd to for
4917 * the mbuf.
4919 for (i = 0; i < nsegs; i++) {
4920 chain_prod = TX_CHAIN_IDX(txr, prod);
4921 txbd =
4922 &txr->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4924 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
4925 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
4926 txbd->tx_bd_mss_nbytes = htole32(mss << 16) |
4927 htole16(segs[i].ds_len);
4928 txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4929 txbd->tx_bd_flags = htole16(flags);
4931 prod_bseq += segs[i].ds_len;
4932 if (i == 0)
4933 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4934 prod = NEXT_TX_BD(prod);
4937 /* Set the END flag on the last TX buffer descriptor. */
4938 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4941 * Ensure that the mbuf pointer for this transmission
4942 * is placed at the array index of the last
4943 * descriptor in this chain. This is done
4944 * because a single map is used for all
4945 * segments of the mbuf and we don't want to
4946 * unload the map before all of the segments
4947 * have been freed.
4949 txr->tx_bufs[chain_prod].tx_mbuf_ptr = m0;
4951 tmp_map = txr->tx_bufs[chain_prod].tx_mbuf_map;
4952 txr->tx_bufs[chain_prod].tx_mbuf_map = map;
4953 txr->tx_bufs[chain_prod_start].tx_mbuf_map = tmp_map;
4955 txr->used_tx_bd += nsegs;
4957 /* prod points to the next free tx_bd at this point. */
4958 txr->tx_prod = prod;
4959 txr->tx_prod_bseq = prod_bseq;
4960 back:
4961 if (error) {
4962 m_freem(*m_head);
4963 *m_head = NULL;
4965 return error;
4968 static void
4969 bce_xmit(struct bce_tx_ring *txr)
4971 /* Start the transmit. */
4972 REG_WR16(txr->sc, MB_GET_CID_ADDR(txr->tx_cid) + BCE_L2CTX_TX_HOST_BIDX,
4973 txr->tx_prod);
4974 REG_WR(txr->sc, MB_GET_CID_ADDR(txr->tx_cid) + BCE_L2CTX_TX_HOST_BSEQ,
4975 txr->tx_prod_bseq);
4978 /****************************************************************************/
4979 /* Main transmit routine when called from another routine with a lock. */
4980 /* */
4981 /* Returns: */
4982 /* Nothing. */
4983 /****************************************************************************/
4984 static void
4985 bce_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
4987 struct bce_softc *sc = ifp->if_softc;
4988 struct bce_tx_ring *txr = ifsq_get_priv(ifsq);
4989 int count = 0;
4991 KKASSERT(txr->ifsq == ifsq);
4992 ASSERT_SERIALIZED(&txr->tx_serialize);
4994 /* If there's no link or the transmit queue is empty then just exit. */
4995 if (!sc->bce_link) {
4996 ifsq_purge(ifsq);
4997 return;
5000 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq))
5001 return;
5003 for (;;) {
5004 struct mbuf *m_head;
5007 * We keep BCE_TX_SPARE_SPACE entries, so bce_encap() is
5008 * unlikely to fail.
5010 if (txr->max_tx_bd - txr->used_tx_bd < BCE_TX_SPARE_SPACE) {
5011 ifsq_set_oactive(ifsq);
5012 break;
5015 /* Check for any frames to send. */
5016 m_head = ifsq_dequeue(ifsq);
5017 if (m_head == NULL)
5018 break;
5021 * Pack the data into the transmit ring. If we
5022 * don't have room, place the mbuf back at the
5023 * head of the queue and set the OACTIVE flag
5024 * to wait for the NIC to drain the chain.
5026 if (bce_encap(txr, &m_head, &count)) {
5027 IFNET_STAT_INC(ifp, oerrors, 1);
5028 if (txr->used_tx_bd == 0) {
5029 continue;
5030 } else {
5031 ifsq_set_oactive(ifsq);
5032 break;
5036 if (count >= txr->tx_wreg) {
5037 bce_xmit(txr);
5038 count = 0;
5041 /* Send a copy of the frame to any BPF listeners. */
5042 ETHER_BPF_MTAP(ifp, m_head);
5044 /* Set the tx timeout. */
5045 txr->tx_watchdog.wd_timer = BCE_TX_TIMEOUT;
5047 if (count > 0)
5048 bce_xmit(txr);
5051 /****************************************************************************/
5052 /* Handles any IOCTL calls from the operating system. */
5053 /* */
5054 /* Returns: */
5055 /* 0 for success, positive value for failure. */
5056 /****************************************************************************/
5057 static int
5058 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
5060 struct bce_softc *sc = ifp->if_softc;
5061 struct ifreq *ifr = (struct ifreq *)data;
5062 struct mii_data *mii;
5063 int mask, error = 0;
5065 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5067 switch(command) {
5068 case SIOCSIFMTU:
5069 /* Check that the MTU setting is supported. */
5070 if (ifr->ifr_mtu < BCE_MIN_MTU ||
5071 #ifdef notyet
5072 ifr->ifr_mtu > BCE_MAX_JUMBO_MTU
5073 #else
5074 ifr->ifr_mtu > ETHERMTU
5075 #endif
5077 error = EINVAL;
5078 break;
5081 ifp->if_mtu = ifr->ifr_mtu;
5082 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */
5083 bce_init(sc);
5084 break;
5086 case SIOCSIFFLAGS:
5087 if (ifp->if_flags & IFF_UP) {
5088 if (ifp->if_flags & IFF_RUNNING) {
5089 mask = ifp->if_flags ^ sc->bce_if_flags;
5091 if (mask & (IFF_PROMISC | IFF_ALLMULTI))
5092 bce_set_rx_mode(sc);
5093 } else {
5094 bce_init(sc);
5096 } else if (ifp->if_flags & IFF_RUNNING) {
5097 bce_stop(sc);
5099 /* If MFW is running, restart the controller a bit. */
5100 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
5101 bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
5102 bce_chipinit(sc);
5103 bce_mgmt_init(sc);
5106 sc->bce_if_flags = ifp->if_flags;
5107 break;
5109 case SIOCADDMULTI:
5110 case SIOCDELMULTI:
5111 if (ifp->if_flags & IFF_RUNNING)
5112 bce_set_rx_mode(sc);
5113 break;
5115 case SIOCSIFMEDIA:
5116 case SIOCGIFMEDIA:
5117 mii = device_get_softc(sc->bce_miibus);
5118 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
5119 break;
5121 case SIOCSIFCAP:
5122 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5123 if (mask & IFCAP_HWCSUM) {
5124 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
5125 if (ifp->if_capenable & IFCAP_TXCSUM)
5126 ifp->if_hwassist |= BCE_CSUM_FEATURES;
5127 else
5128 ifp->if_hwassist &= ~BCE_CSUM_FEATURES;
5130 if (mask & IFCAP_TSO) {
5131 ifp->if_capenable ^= IFCAP_TSO;
5132 if (ifp->if_capenable & IFCAP_TSO)
5133 ifp->if_hwassist |= CSUM_TSO;
5134 else
5135 ifp->if_hwassist &= ~CSUM_TSO;
5137 if (mask & IFCAP_RSS)
5138 ifp->if_capenable ^= IFCAP_RSS;
5139 break;
5141 default:
5142 error = ether_ioctl(ifp, command, data);
5143 break;
5145 return error;
5148 /****************************************************************************/
5149 /* Transmit timeout handler. */
5150 /* */
5151 /* Returns: */
5152 /* Nothing. */
5153 /****************************************************************************/
5154 static void
5155 bce_watchdog(struct ifaltq_subque *ifsq)
5157 struct ifnet *ifp = ifsq_get_ifp(ifsq);
5158 struct bce_softc *sc = ifp->if_softc;
5159 int i;
5161 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5164 * If we are in this routine because of pause frames, then
5165 * don't reset the hardware.
5167 if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
5168 return;
5170 if_printf(ifp, "Watchdog timeout occurred, resetting!\n");
5172 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */
5173 bce_init(sc);
5175 IFNET_STAT_INC(ifp, oerrors, 1);
5177 for (i = 0; i < sc->tx_ring_cnt; ++i)
5178 ifsq_devstart_sched(sc->tx_rings[i].ifsq);
5181 #ifdef IFPOLL_ENABLE
5183 static void
5184 bce_npoll_status(struct ifnet *ifp)
5186 struct bce_softc *sc = ifp->if_softc;
5187 struct status_block *sblk = sc->status_block;
5188 uint32_t status_attn_bits;
5190 ASSERT_SERIALIZED(&sc->main_serialize);
5192 status_attn_bits = sblk->status_attn_bits;
5194 /* Was it a link change interrupt? */
5195 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5196 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5197 bce_phy_intr(sc);
5200 * Clear any transient status updates during link state change.
5202 REG_WR(sc, BCE_HC_COMMAND,
5203 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
5204 REG_RD(sc, BCE_HC_COMMAND);
5208 * If any other attention is asserted then the chip is toast.
5210 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5211 (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) {
5212 if_printf(ifp, "Fatal attention detected: 0x%08X\n",
5213 sblk->status_attn_bits);
5214 bce_serialize_skipmain(sc);
5215 bce_init(sc);
5216 bce_deserialize_skipmain(sc);
5220 static void
5221 bce_npoll_rx(struct ifnet *ifp, void *arg, int count)
5223 struct bce_rx_ring *rxr = arg;
5224 uint16_t hw_rx_cons;
5226 ASSERT_SERIALIZED(&rxr->rx_serialize);
5229 * Save the status block index value for use when enabling
5230 * the interrupt.
5232 rxr->last_status_idx = *rxr->hw_status_idx;
5234 /* Make sure status index is extracted before RX/TX cons */
5235 cpu_lfence();
5237 hw_rx_cons = bce_get_hw_rx_cons(rxr);
5239 /* Check for any completed RX frames. */
5240 if (hw_rx_cons != rxr->rx_cons)
5241 bce_rx_intr(rxr, count, hw_rx_cons);
5244 static void
5245 bce_npoll_rx_pack(struct ifnet *ifp, void *arg, int count)
5247 struct bce_rx_ring *rxr = arg;
5249 KASSERT(rxr->idx == 0, ("not the first RX ring, but %d", rxr->idx));
5250 bce_npoll_rx(ifp, rxr, count);
5252 KASSERT(rxr->sc->rx_ring_cnt != rxr->sc->rx_ring_cnt2,
5253 ("RX ring count %d, count2 %d", rxr->sc->rx_ring_cnt,
5254 rxr->sc->rx_ring_cnt2));
5256 /* Last ring carries packets whose masked hash is 0 */
5257 rxr = &rxr->sc->rx_rings[rxr->sc->rx_ring_cnt - 1];
5259 lwkt_serialize_enter(&rxr->rx_serialize);
5260 bce_npoll_rx(ifp, rxr, count);
5261 lwkt_serialize_exit(&rxr->rx_serialize);
5264 static void
5265 bce_npoll_tx(struct ifnet *ifp, void *arg, int count __unused)
5267 struct bce_tx_ring *txr = arg;
5268 uint16_t hw_tx_cons;
5270 ASSERT_SERIALIZED(&txr->tx_serialize);
5272 hw_tx_cons = bce_get_hw_tx_cons(txr);
5274 /* Check for any completed TX frames. */
5275 if (hw_tx_cons != txr->tx_cons) {
5276 bce_tx_intr(txr, hw_tx_cons);
5277 if (!ifsq_is_empty(txr->ifsq))
5278 ifsq_devstart(txr->ifsq);
5282 static void
5283 bce_npoll(struct ifnet *ifp, struct ifpoll_info *info)
5285 struct bce_softc *sc = ifp->if_softc;
5286 int i;
5288 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5290 if (info != NULL) {
5291 info->ifpi_status.status_func = bce_npoll_status;
5292 info->ifpi_status.serializer = &sc->main_serialize;
5294 for (i = 0; i < sc->tx_ring_cnt; ++i) {
5295 struct bce_tx_ring *txr = &sc->tx_rings[i];
5296 int idx = i + sc->npoll_ofs;
5298 KKASSERT(idx < ncpus2);
5299 info->ifpi_tx[idx].poll_func = bce_npoll_tx;
5300 info->ifpi_tx[idx].arg = txr;
5301 info->ifpi_tx[idx].serializer = &txr->tx_serialize;
5302 ifsq_set_cpuid(txr->ifsq, idx);
5305 for (i = 0; i < sc->rx_ring_cnt2; ++i) {
5306 struct bce_rx_ring *rxr = &sc->rx_rings[i];
5307 int idx = i + sc->npoll_ofs;
5309 KKASSERT(idx < ncpus2);
5310 if (i == 0 && sc->rx_ring_cnt2 != sc->rx_ring_cnt) {
5312 * If RSS is enabled, the packets whose
5313 * masked hash are 0 are queued to the
5314 * last RX ring; piggyback the last RX
5315 * ring's processing in the first RX
5316 * polling handler. (see also: comment
5317 * in bce_setup_ring_cnt())
5319 if (bootverbose) {
5320 if_printf(ifp, "npoll pack last "
5321 "RX ring on cpu%d\n", idx);
5323 info->ifpi_rx[idx].poll_func =
5324 bce_npoll_rx_pack;
5325 } else {
5326 info->ifpi_rx[idx].poll_func = bce_npoll_rx;
5328 info->ifpi_rx[idx].arg = rxr;
5329 info->ifpi_rx[idx].serializer = &rxr->rx_serialize;
5332 if (ifp->if_flags & IFF_RUNNING) {
5333 bce_set_timer_cpuid(sc, TRUE);
5334 bce_disable_intr(sc);
5335 bce_npoll_coal_change(sc);
5337 } else {
5338 for (i = 0; i < sc->tx_ring_cnt; ++i) {
5339 ifsq_set_cpuid(sc->tx_rings[i].ifsq,
5340 sc->bce_msix[i].msix_cpuid);
5343 if (ifp->if_flags & IFF_RUNNING) {
5344 bce_set_timer_cpuid(sc, FALSE);
5345 bce_enable_intr(sc);
5347 sc->bce_coalchg_mask |= BCE_COALMASK_TX_BDS_INT |
5348 BCE_COALMASK_RX_BDS_INT;
5349 bce_coal_change(sc);
5354 #endif /* IFPOLL_ENABLE */
5357 * Interrupt handler.
5359 /****************************************************************************/
5360 /* Main interrupt entry point. Verifies that the controller generated the */
5361 /* interrupt and then calls a separate routine for handle the various */
5362 /* interrupt causes (PHY, TX, RX). */
5363 /* */
5364 /* Returns: */
5365 /* 0 for success, positive value for failure. */
5366 /****************************************************************************/
5367 static void
5368 bce_intr(struct bce_softc *sc)
5370 struct ifnet *ifp = &sc->arpcom.ac_if;
5371 struct status_block *sblk;
5372 uint16_t hw_rx_cons, hw_tx_cons;
5373 uint32_t status_attn_bits;
5374 struct bce_tx_ring *txr = &sc->tx_rings[0];
5375 struct bce_rx_ring *rxr = &sc->rx_rings[0];
5377 ASSERT_SERIALIZED(&sc->main_serialize);
5379 sblk = sc->status_block;
5382 * Save the status block index value for use during
5383 * the next interrupt.
5385 rxr->last_status_idx = *rxr->hw_status_idx;
5387 /* Make sure status index is extracted before RX/TX cons */
5388 cpu_lfence();
5390 /* Check if the hardware has finished any work. */
5391 hw_rx_cons = bce_get_hw_rx_cons(rxr);
5392 hw_tx_cons = bce_get_hw_tx_cons(txr);
5394 status_attn_bits = sblk->status_attn_bits;
5396 /* Was it a link change interrupt? */
5397 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5398 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5399 bce_phy_intr(sc);
5402 * Clear any transient status updates during link state
5403 * change.
5405 REG_WR(sc, BCE_HC_COMMAND,
5406 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
5407 REG_RD(sc, BCE_HC_COMMAND);
5411 * If any other attention is asserted then
5412 * the chip is toast.
5414 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5415 (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) {
5416 if_printf(ifp, "Fatal attention detected: 0x%08X\n",
5417 sblk->status_attn_bits);
5418 bce_serialize_skipmain(sc);
5419 bce_init(sc);
5420 bce_deserialize_skipmain(sc);
5421 return;
5424 /* Check for any completed RX frames. */
5425 lwkt_serialize_enter(&rxr->rx_serialize);
5426 if (hw_rx_cons != rxr->rx_cons)
5427 bce_rx_intr(rxr, -1, hw_rx_cons);
5428 lwkt_serialize_exit(&rxr->rx_serialize);
5430 /* Check for any completed TX frames. */
5431 lwkt_serialize_enter(&txr->tx_serialize);
5432 if (hw_tx_cons != txr->tx_cons) {
5433 bce_tx_intr(txr, hw_tx_cons);
5434 if (!ifsq_is_empty(txr->ifsq))
5435 ifsq_devstart(txr->ifsq);
5437 lwkt_serialize_exit(&txr->tx_serialize);
5440 static void
5441 bce_intr_legacy(void *xsc)
5443 struct bce_softc *sc = xsc;
5444 struct bce_rx_ring *rxr = &sc->rx_rings[0];
5445 struct status_block *sblk;
5447 sblk = sc->status_block;
5450 * If the hardware status block index matches the last value
5451 * read by the driver and we haven't asserted our interrupt
5452 * then there's nothing to do.
5454 if (sblk->status_idx == rxr->last_status_idx &&
5455 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) &
5456 BCE_PCICFG_MISC_STATUS_INTA_VALUE))
5457 return;
5459 /* Ack the interrupt and stop others from occuring. */
5460 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5461 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5462 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5465 * Read back to deassert IRQ immediately to avoid too
5466 * many spurious interrupts.
5468 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
5470 bce_intr(sc);
5472 /* Re-enable interrupts. */
5473 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5474 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
5475 BCE_PCICFG_INT_ACK_CMD_MASK_INT | rxr->last_status_idx);
5476 bce_reenable_intr(rxr);
5479 static void
5480 bce_intr_msi(void *xsc)
5482 struct bce_softc *sc = xsc;
5484 /* Ack the interrupt and stop others from occuring. */
5485 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5486 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5487 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5489 bce_intr(sc);
5491 /* Re-enable interrupts */
5492 bce_reenable_intr(&sc->rx_rings[0]);
5495 static void
5496 bce_intr_msi_oneshot(void *xsc)
5498 struct bce_softc *sc = xsc;
5500 bce_intr(sc);
5502 /* Re-enable interrupts */
5503 bce_reenable_intr(&sc->rx_rings[0]);
5506 static void
5507 bce_intr_msix_rxtx(void *xrxr)
5509 struct bce_rx_ring *rxr = xrxr;
5510 struct bce_tx_ring *txr;
5511 uint16_t hw_rx_cons, hw_tx_cons;
5513 ASSERT_SERIALIZED(&rxr->rx_serialize);
5515 KKASSERT(rxr->idx < rxr->sc->tx_ring_cnt);
5516 txr = &rxr->sc->tx_rings[rxr->idx];
5519 * Save the status block index value for use during
5520 * the next interrupt.
5522 rxr->last_status_idx = *rxr->hw_status_idx;
5524 /* Make sure status index is extracted before RX/TX cons */
5525 cpu_lfence();
5527 /* Check if the hardware has finished any work. */
5528 hw_rx_cons = bce_get_hw_rx_cons(rxr);
5529 if (hw_rx_cons != rxr->rx_cons)
5530 bce_rx_intr(rxr, -1, hw_rx_cons);
5532 /* Check for any completed TX frames. */
5533 hw_tx_cons = bce_get_hw_tx_cons(txr);
5534 lwkt_serialize_enter(&txr->tx_serialize);
5535 if (hw_tx_cons != txr->tx_cons) {
5536 bce_tx_intr(txr, hw_tx_cons);
5537 if (!ifsq_is_empty(txr->ifsq))
5538 ifsq_devstart(txr->ifsq);
5540 lwkt_serialize_exit(&txr->tx_serialize);
5542 /* Re-enable interrupts */
5543 bce_reenable_intr(rxr);
5546 static void
5547 bce_intr_msix_rx(void *xrxr)
5549 struct bce_rx_ring *rxr = xrxr;
5550 uint16_t hw_rx_cons;
5552 ASSERT_SERIALIZED(&rxr->rx_serialize);
5555 * Save the status block index value for use during
5556 * the next interrupt.
5558 rxr->last_status_idx = *rxr->hw_status_idx;
5560 /* Make sure status index is extracted before RX cons */
5561 cpu_lfence();
5563 /* Check if the hardware has finished any work. */
5564 hw_rx_cons = bce_get_hw_rx_cons(rxr);
5565 if (hw_rx_cons != rxr->rx_cons)
5566 bce_rx_intr(rxr, -1, hw_rx_cons);
5568 /* Re-enable interrupts */
5569 bce_reenable_intr(rxr);
5572 /****************************************************************************/
5573 /* Programs the various packet receive modes (broadcast and multicast). */
5574 /* */
5575 /* Returns: */
5576 /* Nothing. */
5577 /****************************************************************************/
5578 static void
5579 bce_set_rx_mode(struct bce_softc *sc)
5581 struct ifnet *ifp = &sc->arpcom.ac_if;
5582 struct ifmultiaddr *ifma;
5583 uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5584 uint32_t rx_mode, sort_mode;
5585 int h, i;
5587 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5589 /* Initialize receive mode default settings. */
5590 rx_mode = sc->rx_mode &
5591 ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
5592 BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
5593 sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
5596 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5597 * be enbled.
5599 if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5600 !(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
5601 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5604 * Check for promiscuous, all multicast, or selected
5605 * multicast address filtering.
5607 if (ifp->if_flags & IFF_PROMISC) {
5608 /* Enable promiscuous mode. */
5609 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5610 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5611 } else if (ifp->if_flags & IFF_ALLMULTI) {
5612 /* Enable all multicast addresses. */
5613 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5614 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5615 0xffffffff);
5617 sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5618 } else {
5619 /* Accept one or more multicast(s). */
5620 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5621 if (ifma->ifma_addr->sa_family != AF_LINK)
5622 continue;
5623 h = ether_crc32_le(
5624 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
5625 ETHER_ADDR_LEN) & 0xFF;
5626 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5629 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5630 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5631 hashes[i]);
5633 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5636 /* Only make changes if the recive mode has actually changed. */
5637 if (rx_mode != sc->rx_mode) {
5638 sc->rx_mode = rx_mode;
5639 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5642 /* Disable and clear the exisitng sort before enabling a new sort. */
5643 REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5644 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5645 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5648 /****************************************************************************/
5649 /* Called periodically to updates statistics from the controllers */
5650 /* statistics block. */
5651 /* */
5652 /* Returns: */
5653 /* Nothing. */
5654 /****************************************************************************/
5655 static void
5656 bce_stats_update(struct bce_softc *sc)
5658 struct ifnet *ifp = &sc->arpcom.ac_if;
5659 struct statistics_block *stats = sc->stats_block;
5661 ASSERT_SERIALIZED(&sc->main_serialize);
5664 * Certain controllers don't report carrier sense errors correctly.
5665 * See errata E11_5708CA0_1165.
5667 if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5668 !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) {
5669 IFNET_STAT_INC(ifp, oerrors,
5670 (u_long)stats->stat_Dot3StatsCarrierSenseErrors);
5674 * Update the sysctl statistics from the hardware statistics.
5676 sc->stat_IfHCInOctets =
5677 ((uint64_t)stats->stat_IfHCInOctets_hi << 32) +
5678 (uint64_t)stats->stat_IfHCInOctets_lo;
5680 sc->stat_IfHCInBadOctets =
5681 ((uint64_t)stats->stat_IfHCInBadOctets_hi << 32) +
5682 (uint64_t)stats->stat_IfHCInBadOctets_lo;
5684 sc->stat_IfHCOutOctets =
5685 ((uint64_t)stats->stat_IfHCOutOctets_hi << 32) +
5686 (uint64_t)stats->stat_IfHCOutOctets_lo;
5688 sc->stat_IfHCOutBadOctets =
5689 ((uint64_t)stats->stat_IfHCOutBadOctets_hi << 32) +
5690 (uint64_t)stats->stat_IfHCOutBadOctets_lo;
5692 sc->stat_IfHCInUcastPkts =
5693 ((uint64_t)stats->stat_IfHCInUcastPkts_hi << 32) +
5694 (uint64_t)stats->stat_IfHCInUcastPkts_lo;
5696 sc->stat_IfHCInMulticastPkts =
5697 ((uint64_t)stats->stat_IfHCInMulticastPkts_hi << 32) +
5698 (uint64_t)stats->stat_IfHCInMulticastPkts_lo;
5700 sc->stat_IfHCInBroadcastPkts =
5701 ((uint64_t)stats->stat_IfHCInBroadcastPkts_hi << 32) +
5702 (uint64_t)stats->stat_IfHCInBroadcastPkts_lo;
5704 sc->stat_IfHCOutUcastPkts =
5705 ((uint64_t)stats->stat_IfHCOutUcastPkts_hi << 32) +
5706 (uint64_t)stats->stat_IfHCOutUcastPkts_lo;
5708 sc->stat_IfHCOutMulticastPkts =
5709 ((uint64_t)stats->stat_IfHCOutMulticastPkts_hi << 32) +
5710 (uint64_t)stats->stat_IfHCOutMulticastPkts_lo;
5712 sc->stat_IfHCOutBroadcastPkts =
5713 ((uint64_t)stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5714 (uint64_t)stats->stat_IfHCOutBroadcastPkts_lo;
5716 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5717 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5719 sc->stat_Dot3StatsCarrierSenseErrors =
5720 stats->stat_Dot3StatsCarrierSenseErrors;
5722 sc->stat_Dot3StatsFCSErrors =
5723 stats->stat_Dot3StatsFCSErrors;
5725 sc->stat_Dot3StatsAlignmentErrors =
5726 stats->stat_Dot3StatsAlignmentErrors;
5728 sc->stat_Dot3StatsSingleCollisionFrames =
5729 stats->stat_Dot3StatsSingleCollisionFrames;
5731 sc->stat_Dot3StatsMultipleCollisionFrames =
5732 stats->stat_Dot3StatsMultipleCollisionFrames;
5734 sc->stat_Dot3StatsDeferredTransmissions =
5735 stats->stat_Dot3StatsDeferredTransmissions;
5737 sc->stat_Dot3StatsExcessiveCollisions =
5738 stats->stat_Dot3StatsExcessiveCollisions;
5740 sc->stat_Dot3StatsLateCollisions =
5741 stats->stat_Dot3StatsLateCollisions;
5743 sc->stat_EtherStatsCollisions =
5744 stats->stat_EtherStatsCollisions;
5746 sc->stat_EtherStatsFragments =
5747 stats->stat_EtherStatsFragments;
5749 sc->stat_EtherStatsJabbers =
5750 stats->stat_EtherStatsJabbers;
5752 sc->stat_EtherStatsUndersizePkts =
5753 stats->stat_EtherStatsUndersizePkts;
5755 sc->stat_EtherStatsOverrsizePkts =
5756 stats->stat_EtherStatsOverrsizePkts;
5758 sc->stat_EtherStatsPktsRx64Octets =
5759 stats->stat_EtherStatsPktsRx64Octets;
5761 sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5762 stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5764 sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5765 stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5767 sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5768 stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5770 sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5771 stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5773 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5774 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5776 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5777 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5779 sc->stat_EtherStatsPktsTx64Octets =
5780 stats->stat_EtherStatsPktsTx64Octets;
5782 sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5783 stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5785 sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5786 stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5788 sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5789 stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5791 sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5792 stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5794 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5795 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5797 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5798 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5800 sc->stat_XonPauseFramesReceived =
5801 stats->stat_XonPauseFramesReceived;
5803 sc->stat_XoffPauseFramesReceived =
5804 stats->stat_XoffPauseFramesReceived;
5806 sc->stat_OutXonSent =
5807 stats->stat_OutXonSent;
5809 sc->stat_OutXoffSent =
5810 stats->stat_OutXoffSent;
5812 sc->stat_FlowControlDone =
5813 stats->stat_FlowControlDone;
5815 sc->stat_MacControlFramesReceived =
5816 stats->stat_MacControlFramesReceived;
5818 sc->stat_XoffStateEntered =
5819 stats->stat_XoffStateEntered;
5821 sc->stat_IfInFramesL2FilterDiscards =
5822 stats->stat_IfInFramesL2FilterDiscards;
5824 sc->stat_IfInRuleCheckerDiscards =
5825 stats->stat_IfInRuleCheckerDiscards;
5827 sc->stat_IfInFTQDiscards =
5828 stats->stat_IfInFTQDiscards;
5830 sc->stat_IfInMBUFDiscards =
5831 stats->stat_IfInMBUFDiscards;
5833 sc->stat_IfInRuleCheckerP4Hit =
5834 stats->stat_IfInRuleCheckerP4Hit;
5836 sc->stat_CatchupInRuleCheckerDiscards =
5837 stats->stat_CatchupInRuleCheckerDiscards;
5839 sc->stat_CatchupInFTQDiscards =
5840 stats->stat_CatchupInFTQDiscards;
5842 sc->stat_CatchupInMBUFDiscards =
5843 stats->stat_CatchupInMBUFDiscards;
5845 sc->stat_CatchupInRuleCheckerP4Hit =
5846 stats->stat_CatchupInRuleCheckerP4Hit;
5848 sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
5851 * Update the interface statistics from the
5852 * hardware statistics.
5854 IFNET_STAT_SET(ifp, collisions, (u_long)sc->stat_EtherStatsCollisions);
5856 IFNET_STAT_SET(ifp, ierrors, (u_long)sc->stat_EtherStatsUndersizePkts +
5857 (u_long)sc->stat_EtherStatsOverrsizePkts +
5858 (u_long)sc->stat_IfInMBUFDiscards +
5859 (u_long)sc->stat_Dot3StatsAlignmentErrors +
5860 (u_long)sc->stat_Dot3StatsFCSErrors +
5861 (u_long)sc->stat_IfInRuleCheckerDiscards +
5862 (u_long)sc->stat_IfInFTQDiscards +
5863 (u_long)sc->com_no_buffers);
5865 IFNET_STAT_SET(ifp, oerrors,
5866 (u_long)sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5867 (u_long)sc->stat_Dot3StatsExcessiveCollisions +
5868 (u_long)sc->stat_Dot3StatsLateCollisions);
5871 /****************************************************************************/
5872 /* Periodic function to notify the bootcode that the driver is still */
5873 /* present. */
5874 /* */
5875 /* Returns: */
5876 /* Nothing. */
5877 /****************************************************************************/
5878 static void
5879 bce_pulse(void *xsc)
5881 struct bce_softc *sc = xsc;
5882 struct ifnet *ifp = &sc->arpcom.ac_if;
5883 uint32_t msg;
5885 lwkt_serialize_enter(&sc->main_serialize);
5887 /* Tell the firmware that the driver is still running. */
5888 msg = (uint32_t)++sc->bce_fw_drv_pulse_wr_seq;
5889 bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg);
5891 /* Update the bootcode condition. */
5892 sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
5894 /* Report whether the bootcode still knows the driver is running. */
5895 if (!sc->bce_drv_cardiac_arrest) {
5896 if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) {
5897 sc->bce_drv_cardiac_arrest = 1;
5898 if_printf(ifp, "Bootcode lost the driver pulse! "
5899 "(bc_state = 0x%08X)\n", sc->bc_state);
5901 } else {
5903 * Not supported by all bootcode versions.
5904 * (v5.0.11+ and v5.2.1+) Older bootcode
5905 * will require the driver to reset the
5906 * controller to clear this condition.
5908 if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) {
5909 sc->bce_drv_cardiac_arrest = 0;
5910 if_printf(ifp, "Bootcode found the driver pulse! "
5911 "(bc_state = 0x%08X)\n", sc->bc_state);
5915 /* Schedule the next pulse. */
5916 callout_reset_bycpu(&sc->bce_pulse_callout, hz, bce_pulse, sc,
5917 sc->bce_timer_cpuid);
5919 lwkt_serialize_exit(&sc->main_serialize);
5922 /****************************************************************************/
5923 /* Periodic function to check whether MSI is lost */
5924 /* */
5925 /* Returns: */
5926 /* Nothing. */
5927 /****************************************************************************/
5928 static void
5929 bce_check_msi(void *xsc)
5931 struct bce_softc *sc = xsc;
5932 struct ifnet *ifp = &sc->arpcom.ac_if;
5933 struct status_block *sblk = sc->status_block;
5934 struct bce_tx_ring *txr = &sc->tx_rings[0];
5935 struct bce_rx_ring *rxr = &sc->rx_rings[0];
5937 lwkt_serialize_enter(&sc->main_serialize);
5939 KKASSERT(mycpuid == sc->bce_msix[0].msix_cpuid);
5941 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
5942 lwkt_serialize_exit(&sc->main_serialize);
5943 return;
5946 if (bce_get_hw_rx_cons(rxr) != rxr->rx_cons ||
5947 bce_get_hw_tx_cons(txr) != txr->tx_cons ||
5948 (sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5949 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5950 if (sc->bce_check_rx_cons == rxr->rx_cons &&
5951 sc->bce_check_tx_cons == txr->tx_cons &&
5952 sc->bce_check_status_idx == rxr->last_status_idx) {
5953 uint32_t msi_ctrl;
5955 if (!sc->bce_msi_maylose) {
5956 sc->bce_msi_maylose = TRUE;
5957 goto done;
5960 msi_ctrl = REG_RD(sc, BCE_PCICFG_MSI_CONTROL);
5961 if (msi_ctrl & BCE_PCICFG_MSI_CONTROL_ENABLE) {
5962 if (bootverbose)
5963 if_printf(ifp, "lost MSI\n");
5965 REG_WR(sc, BCE_PCICFG_MSI_CONTROL,
5966 msi_ctrl & ~BCE_PCICFG_MSI_CONTROL_ENABLE);
5967 REG_WR(sc, BCE_PCICFG_MSI_CONTROL, msi_ctrl);
5969 bce_intr_msi(sc);
5970 } else if (bootverbose) {
5971 if_printf(ifp, "MSI may be lost\n");
5975 sc->bce_msi_maylose = FALSE;
5976 sc->bce_check_rx_cons = rxr->rx_cons;
5977 sc->bce_check_tx_cons = txr->tx_cons;
5978 sc->bce_check_status_idx = rxr->last_status_idx;
5980 done:
5981 callout_reset(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL,
5982 bce_check_msi, sc);
5983 lwkt_serialize_exit(&sc->main_serialize);
5986 /****************************************************************************/
5987 /* Periodic function to perform maintenance tasks. */
5988 /* */
5989 /* Returns: */
5990 /* Nothing. */
5991 /****************************************************************************/
5992 static void
5993 bce_tick_serialized(struct bce_softc *sc)
5995 struct mii_data *mii;
5997 ASSERT_SERIALIZED(&sc->main_serialize);
5999 /* Update the statistics from the hardware statistics block. */
6000 bce_stats_update(sc);
6002 /* Schedule the next tick. */
6003 callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc,
6004 sc->bce_timer_cpuid);
6006 /* If link is up already up then we're done. */
6007 if (sc->bce_link)
6008 return;
6010 mii = device_get_softc(sc->bce_miibus);
6011 mii_tick(mii);
6013 /* Check if the link has come up. */
6014 if ((mii->mii_media_status & IFM_ACTIVE) &&
6015 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
6016 int i;
6018 sc->bce_link++;
6019 /* Now that link is up, handle any outstanding TX traffic. */
6020 for (i = 0; i < sc->tx_ring_cnt; ++i)
6021 ifsq_devstart_sched(sc->tx_rings[i].ifsq);
6025 static void
6026 bce_tick(void *xsc)
6028 struct bce_softc *sc = xsc;
6030 lwkt_serialize_enter(&sc->main_serialize);
6031 bce_tick_serialized(sc);
6032 lwkt_serialize_exit(&sc->main_serialize);
6035 /****************************************************************************/
6036 /* Adds any sysctl parameters for tuning or debugging purposes. */
6037 /* */
6038 /* Returns: */
6039 /* 0 for success, positive value for failure. */
6040 /****************************************************************************/
6041 static void
6042 bce_add_sysctls(struct bce_softc *sc)
6044 struct sysctl_ctx_list *ctx;
6045 struct sysctl_oid_list *children;
6046 #if defined(BCE_TSS_DEBUG) || defined(BCE_RSS_DEBUG)
6047 char node[32];
6048 int i;
6049 #endif
6051 ctx = device_get_sysctl_ctx(sc->bce_dev);
6052 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
6054 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds_int",
6055 CTLTYPE_INT | CTLFLAG_RW,
6056 sc, 0, bce_sysctl_tx_bds_int, "I",
6057 "Send max coalesced BD count during interrupt");
6058 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds",
6059 CTLTYPE_INT | CTLFLAG_RW,
6060 sc, 0, bce_sysctl_tx_bds, "I",
6061 "Send max coalesced BD count");
6062 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks_int",
6063 CTLTYPE_INT | CTLFLAG_RW,
6064 sc, 0, bce_sysctl_tx_ticks_int, "I",
6065 "Send coalescing ticks during interrupt");
6066 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks",
6067 CTLTYPE_INT | CTLFLAG_RW,
6068 sc, 0, bce_sysctl_tx_ticks, "I",
6069 "Send coalescing ticks");
6071 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds_int",
6072 CTLTYPE_INT | CTLFLAG_RW,
6073 sc, 0, bce_sysctl_rx_bds_int, "I",
6074 "Receive max coalesced BD count during interrupt");
6075 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds",
6076 CTLTYPE_INT | CTLFLAG_RW,
6077 sc, 0, bce_sysctl_rx_bds, "I",
6078 "Receive max coalesced BD count");
6079 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks_int",
6080 CTLTYPE_INT | CTLFLAG_RW,
6081 sc, 0, bce_sysctl_rx_ticks_int, "I",
6082 "Receive coalescing ticks during interrupt");
6083 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks",
6084 CTLTYPE_INT | CTLFLAG_RW,
6085 sc, 0, bce_sysctl_rx_ticks, "I",
6086 "Receive coalescing ticks");
6088 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_rings",
6089 CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings");
6090 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_pages",
6091 CTLFLAG_RD, &sc->rx_rings[0].rx_pages, 0, "# of RX pages");
6093 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_rings",
6094 CTLFLAG_RD, &sc->tx_ring_cnt, 0, "# of TX rings");
6095 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_pages",
6096 CTLFLAG_RD, &sc->tx_rings[0].tx_pages, 0, "# of TX pages");
6098 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_wreg",
6099 CTLFLAG_RW, &sc->tx_rings[0].tx_wreg, 0,
6100 "# segments before write to hardware registers");
6102 #ifdef IFPOLL_ENABLE
6103 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "npoll_offset",
6104 CTLTYPE_INT|CTLFLAG_RW, sc, 0, bce_sysctl_npoll_offset,
6105 "I", "NPOLLING cpu offset");
6106 #endif
6108 #ifdef BCE_RSS_DEBUG
6109 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rss_debug",
6110 CTLFLAG_RW, &sc->rss_debug, 0, "RSS debug level");
6111 for (i = 0; i < sc->rx_ring_cnt; ++i) {
6112 ksnprintf(node, sizeof(node), "rx%d_pkt", i);
6113 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, node,
6114 CTLFLAG_RW, &sc->rx_rings[i].rx_pkts,
6115 "RXed packets");
6117 #endif
6119 #ifdef BCE_TSS_DEBUG
6120 for (i = 0; i < sc->tx_ring_cnt; ++i) {
6121 ksnprintf(node, sizeof(node), "tx%d_pkt", i);
6122 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, node,
6123 CTLFLAG_RW, &sc->tx_rings[i].tx_pkts,
6124 "TXed packets");
6126 #endif
6128 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6129 "stat_IfHCInOctets",
6130 CTLFLAG_RD, &sc->stat_IfHCInOctets,
6131 "Bytes received");
6133 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6134 "stat_IfHCInBadOctets",
6135 CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
6136 "Bad bytes received");
6138 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6139 "stat_IfHCOutOctets",
6140 CTLFLAG_RD, &sc->stat_IfHCOutOctets,
6141 "Bytes sent");
6143 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6144 "stat_IfHCOutBadOctets",
6145 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
6146 "Bad bytes sent");
6148 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6149 "stat_IfHCInUcastPkts",
6150 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
6151 "Unicast packets received");
6153 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6154 "stat_IfHCInMulticastPkts",
6155 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
6156 "Multicast packets received");
6158 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6159 "stat_IfHCInBroadcastPkts",
6160 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
6161 "Broadcast packets received");
6163 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6164 "stat_IfHCOutUcastPkts",
6165 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
6166 "Unicast packets sent");
6168 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6169 "stat_IfHCOutMulticastPkts",
6170 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
6171 "Multicast packets sent");
6173 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6174 "stat_IfHCOutBroadcastPkts",
6175 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
6176 "Broadcast packets sent");
6178 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6179 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
6180 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
6181 0, "Internal MAC transmit errors");
6183 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6184 "stat_Dot3StatsCarrierSenseErrors",
6185 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
6186 0, "Carrier sense errors");
6188 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6189 "stat_Dot3StatsFCSErrors",
6190 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
6191 0, "Frame check sequence errors");
6193 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6194 "stat_Dot3StatsAlignmentErrors",
6195 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
6196 0, "Alignment errors");
6198 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6199 "stat_Dot3StatsSingleCollisionFrames",
6200 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
6201 0, "Single Collision Frames");
6203 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6204 "stat_Dot3StatsMultipleCollisionFrames",
6205 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
6206 0, "Multiple Collision Frames");
6208 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6209 "stat_Dot3StatsDeferredTransmissions",
6210 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
6211 0, "Deferred Transmissions");
6213 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6214 "stat_Dot3StatsExcessiveCollisions",
6215 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
6216 0, "Excessive Collisions");
6218 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6219 "stat_Dot3StatsLateCollisions",
6220 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
6221 0, "Late Collisions");
6223 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6224 "stat_EtherStatsCollisions",
6225 CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
6226 0, "Collisions");
6228 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6229 "stat_EtherStatsFragments",
6230 CTLFLAG_RD, &sc->stat_EtherStatsFragments,
6231 0, "Fragments");
6233 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6234 "stat_EtherStatsJabbers",
6235 CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
6236 0, "Jabbers");
6238 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6239 "stat_EtherStatsUndersizePkts",
6240 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
6241 0, "Undersize packets");
6243 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6244 "stat_EtherStatsOverrsizePkts",
6245 CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
6246 0, "stat_EtherStatsOverrsizePkts");
6248 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6249 "stat_EtherStatsPktsRx64Octets",
6250 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
6251 0, "Bytes received in 64 byte packets");
6253 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6254 "stat_EtherStatsPktsRx65Octetsto127Octets",
6255 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
6256 0, "Bytes received in 65 to 127 byte packets");
6258 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6259 "stat_EtherStatsPktsRx128Octetsto255Octets",
6260 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
6261 0, "Bytes received in 128 to 255 byte packets");
6263 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6264 "stat_EtherStatsPktsRx256Octetsto511Octets",
6265 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
6266 0, "Bytes received in 256 to 511 byte packets");
6268 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6269 "stat_EtherStatsPktsRx512Octetsto1023Octets",
6270 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
6271 0, "Bytes received in 512 to 1023 byte packets");
6273 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6274 "stat_EtherStatsPktsRx1024Octetsto1522Octets",
6275 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
6276 0, "Bytes received in 1024 t0 1522 byte packets");
6278 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6279 "stat_EtherStatsPktsRx1523Octetsto9022Octets",
6280 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
6281 0, "Bytes received in 1523 to 9022 byte packets");
6283 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6284 "stat_EtherStatsPktsTx64Octets",
6285 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
6286 0, "Bytes sent in 64 byte packets");
6288 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6289 "stat_EtherStatsPktsTx65Octetsto127Octets",
6290 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
6291 0, "Bytes sent in 65 to 127 byte packets");
6293 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6294 "stat_EtherStatsPktsTx128Octetsto255Octets",
6295 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
6296 0, "Bytes sent in 128 to 255 byte packets");
6298 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6299 "stat_EtherStatsPktsTx256Octetsto511Octets",
6300 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
6301 0, "Bytes sent in 256 to 511 byte packets");
6303 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6304 "stat_EtherStatsPktsTx512Octetsto1023Octets",
6305 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
6306 0, "Bytes sent in 512 to 1023 byte packets");
6308 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6309 "stat_EtherStatsPktsTx1024Octetsto1522Octets",
6310 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
6311 0, "Bytes sent in 1024 to 1522 byte packets");
6313 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6314 "stat_EtherStatsPktsTx1523Octetsto9022Octets",
6315 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
6316 0, "Bytes sent in 1523 to 9022 byte packets");
6318 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6319 "stat_XonPauseFramesReceived",
6320 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
6321 0, "XON pause frames receved");
6323 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6324 "stat_XoffPauseFramesReceived",
6325 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
6326 0, "XOFF pause frames received");
6328 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6329 "stat_OutXonSent",
6330 CTLFLAG_RD, &sc->stat_OutXonSent,
6331 0, "XON pause frames sent");
6333 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6334 "stat_OutXoffSent",
6335 CTLFLAG_RD, &sc->stat_OutXoffSent,
6336 0, "XOFF pause frames sent");
6338 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6339 "stat_FlowControlDone",
6340 CTLFLAG_RD, &sc->stat_FlowControlDone,
6341 0, "Flow control done");
6343 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6344 "stat_MacControlFramesReceived",
6345 CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
6346 0, "MAC control frames received");
6348 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6349 "stat_XoffStateEntered",
6350 CTLFLAG_RD, &sc->stat_XoffStateEntered,
6351 0, "XOFF state entered");
6353 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6354 "stat_IfInFramesL2FilterDiscards",
6355 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
6356 0, "Received L2 packets discarded");
6358 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6359 "stat_IfInRuleCheckerDiscards",
6360 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
6361 0, "Received packets discarded by rule");
6363 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6364 "stat_IfInFTQDiscards",
6365 CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
6366 0, "Received packet FTQ discards");
6368 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6369 "stat_IfInMBUFDiscards",
6370 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
6371 0, "Received packets discarded due to lack of controller buffer memory");
6373 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6374 "stat_IfInRuleCheckerP4Hit",
6375 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
6376 0, "Received packets rule checker hits");
6378 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6379 "stat_CatchupInRuleCheckerDiscards",
6380 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
6381 0, "Received packets discarded in Catchup path");
6383 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6384 "stat_CatchupInFTQDiscards",
6385 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
6386 0, "Received packets discarded in FTQ in Catchup path");
6388 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6389 "stat_CatchupInMBUFDiscards",
6390 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
6391 0, "Received packets discarded in controller buffer memory in Catchup path");
6393 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6394 "stat_CatchupInRuleCheckerP4Hit",
6395 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
6396 0, "Received packets rule checker hits in Catchup path");
6398 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6399 "com_no_buffers",
6400 CTLFLAG_RD, &sc->com_no_buffers,
6401 0, "Valid packets received but no RX buffers available");
6404 static int
6405 bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS)
6407 struct bce_softc *sc = arg1;
6409 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6410 &sc->bce_tx_quick_cons_trip_int,
6411 BCE_COALMASK_TX_BDS_INT);
6414 static int
6415 bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS)
6417 struct bce_softc *sc = arg1;
6419 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6420 &sc->bce_tx_quick_cons_trip,
6421 BCE_COALMASK_TX_BDS);
6424 static int
6425 bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS)
6427 struct bce_softc *sc = arg1;
6429 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6430 &sc->bce_tx_ticks_int,
6431 BCE_COALMASK_TX_TICKS_INT);
6434 static int
6435 bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS)
6437 struct bce_softc *sc = arg1;
6439 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6440 &sc->bce_tx_ticks,
6441 BCE_COALMASK_TX_TICKS);
6444 static int
6445 bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS)
6447 struct bce_softc *sc = arg1;
6449 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6450 &sc->bce_rx_quick_cons_trip_int,
6451 BCE_COALMASK_RX_BDS_INT);
6454 static int
6455 bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS)
6457 struct bce_softc *sc = arg1;
6459 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6460 &sc->bce_rx_quick_cons_trip,
6461 BCE_COALMASK_RX_BDS);
6464 static int
6465 bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS)
6467 struct bce_softc *sc = arg1;
6469 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6470 &sc->bce_rx_ticks_int,
6471 BCE_COALMASK_RX_TICKS_INT);
6474 static int
6475 bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS)
6477 struct bce_softc *sc = arg1;
6479 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6480 &sc->bce_rx_ticks,
6481 BCE_COALMASK_RX_TICKS);
6484 static int
6485 bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, uint32_t *coal,
6486 uint32_t coalchg_mask)
6488 struct bce_softc *sc = arg1;
6489 struct ifnet *ifp = &sc->arpcom.ac_if;
6490 int error = 0, v;
6492 ifnet_serialize_all(ifp);
6494 v = *coal;
6495 error = sysctl_handle_int(oidp, &v, 0, req);
6496 if (!error && req->newptr != NULL) {
6497 if (v < 0) {
6498 error = EINVAL;
6499 } else {
6500 *coal = v;
6501 sc->bce_coalchg_mask |= coalchg_mask;
6503 /* Commit changes */
6504 bce_coal_change(sc);
6508 ifnet_deserialize_all(ifp);
6509 return error;
6512 static void
6513 bce_coal_change(struct bce_softc *sc)
6515 struct ifnet *ifp = &sc->arpcom.ac_if;
6516 int i;
6518 ASSERT_SERIALIZED(&sc->main_serialize);
6520 if ((ifp->if_flags & IFF_RUNNING) == 0) {
6521 sc->bce_coalchg_mask = 0;
6522 return;
6525 if (sc->bce_coalchg_mask &
6526 (BCE_COALMASK_TX_BDS | BCE_COALMASK_TX_BDS_INT)) {
6527 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
6528 (sc->bce_tx_quick_cons_trip_int << 16) |
6529 sc->bce_tx_quick_cons_trip);
6530 for (i = 1; i < sc->rx_ring_cnt; ++i) {
6531 uint32_t base;
6533 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6534 BCE_HC_SB_CONFIG_1;
6535 REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF,
6536 (sc->bce_tx_quick_cons_trip_int << 16) |
6537 sc->bce_tx_quick_cons_trip);
6539 if (bootverbose) {
6540 if_printf(ifp, "tx_bds %u, tx_bds_int %u\n",
6541 sc->bce_tx_quick_cons_trip,
6542 sc->bce_tx_quick_cons_trip_int);
6546 if (sc->bce_coalchg_mask &
6547 (BCE_COALMASK_TX_TICKS | BCE_COALMASK_TX_TICKS_INT)) {
6548 REG_WR(sc, BCE_HC_TX_TICKS,
6549 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
6550 for (i = 1; i < sc->rx_ring_cnt; ++i) {
6551 uint32_t base;
6553 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6554 BCE_HC_SB_CONFIG_1;
6555 REG_WR(sc, base + BCE_HC_TX_TICKS_OFF,
6556 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
6558 if (bootverbose) {
6559 if_printf(ifp, "tx_ticks %u, tx_ticks_int %u\n",
6560 sc->bce_tx_ticks, sc->bce_tx_ticks_int);
6564 if (sc->bce_coalchg_mask &
6565 (BCE_COALMASK_RX_BDS | BCE_COALMASK_RX_BDS_INT)) {
6566 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
6567 (sc->bce_rx_quick_cons_trip_int << 16) |
6568 sc->bce_rx_quick_cons_trip);
6569 for (i = 1; i < sc->rx_ring_cnt; ++i) {
6570 uint32_t base;
6572 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6573 BCE_HC_SB_CONFIG_1;
6574 REG_WR(sc, base + BCE_HC_RX_QUICK_CONS_TRIP_OFF,
6575 (sc->bce_rx_quick_cons_trip_int << 16) |
6576 sc->bce_rx_quick_cons_trip);
6578 if (bootverbose) {
6579 if_printf(ifp, "rx_bds %u, rx_bds_int %u\n",
6580 sc->bce_rx_quick_cons_trip,
6581 sc->bce_rx_quick_cons_trip_int);
6585 if (sc->bce_coalchg_mask &
6586 (BCE_COALMASK_RX_TICKS | BCE_COALMASK_RX_TICKS_INT)) {
6587 REG_WR(sc, BCE_HC_RX_TICKS,
6588 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
6589 for (i = 1; i < sc->rx_ring_cnt; ++i) {
6590 uint32_t base;
6592 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6593 BCE_HC_SB_CONFIG_1;
6594 REG_WR(sc, base + BCE_HC_RX_TICKS_OFF,
6595 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
6597 if (bootverbose) {
6598 if_printf(ifp, "rx_ticks %u, rx_ticks_int %u\n",
6599 sc->bce_rx_ticks, sc->bce_rx_ticks_int);
6603 sc->bce_coalchg_mask = 0;
6606 static int
6607 bce_tso_setup(struct bce_tx_ring *txr, struct mbuf **mp,
6608 uint16_t *flags0, uint16_t *mss0)
6610 struct mbuf *m;
6611 uint16_t flags;
6612 int thoff, iphlen, hoff;
6614 m = *mp;
6615 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
6617 hoff = m->m_pkthdr.csum_lhlen;
6618 iphlen = m->m_pkthdr.csum_iphlen;
6619 thoff = m->m_pkthdr.csum_thlen;
6621 KASSERT(hoff >= sizeof(struct ether_header),
6622 ("invalid ether header len %d", hoff));
6623 KASSERT(iphlen >= sizeof(struct ip),
6624 ("invalid ip header len %d", iphlen));
6625 KASSERT(thoff >= sizeof(struct tcphdr),
6626 ("invalid tcp header len %d", thoff));
6628 if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
6629 m = m_pullup(m, hoff + iphlen + thoff);
6630 if (m == NULL) {
6631 *mp = NULL;
6632 return ENOBUFS;
6634 *mp = m;
6637 /* Set the LSO flag in the TX BD */
6638 flags = TX_BD_FLAGS_SW_LSO;
6640 /* Set the length of IP + TCP options (in 32 bit words) */
6641 flags |= (((iphlen + thoff -
6642 sizeof(struct ip) - sizeof(struct tcphdr)) >> 2) << 8);
6644 *mss0 = htole16(m->m_pkthdr.tso_segsz);
6645 *flags0 = flags;
6647 return 0;
6650 static void
6651 bce_setup_serialize(struct bce_softc *sc)
6653 int i, j;
6656 * Allocate serializer array
6659 /* Main + TX + RX */
6660 sc->serialize_cnt = 1 + sc->tx_ring_cnt + sc->rx_ring_cnt;
6662 sc->serializes =
6663 kmalloc(sc->serialize_cnt * sizeof(struct lwkt_serialize *),
6664 M_DEVBUF, M_WAITOK | M_ZERO);
6667 * Setup serializers
6669 * NOTE: Order is critical
6672 i = 0;
6674 KKASSERT(i < sc->serialize_cnt);
6675 sc->serializes[i++] = &sc->main_serialize;
6677 for (j = 0; j < sc->rx_ring_cnt; ++j) {
6678 KKASSERT(i < sc->serialize_cnt);
6679 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize;
6682 for (j = 0; j < sc->tx_ring_cnt; ++j) {
6683 KKASSERT(i < sc->serialize_cnt);
6684 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize;
6687 KKASSERT(i == sc->serialize_cnt);
6690 static void
6691 bce_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
6693 struct bce_softc *sc = ifp->if_softc;
6695 ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt, slz);
6698 static void
6699 bce_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
6701 struct bce_softc *sc = ifp->if_softc;
6703 ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt, slz);
6706 static int
6707 bce_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
6709 struct bce_softc *sc = ifp->if_softc;
6711 return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt,
6712 slz);
6715 #ifdef INVARIANTS
6717 static void
6718 bce_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
6719 boolean_t serialized)
6721 struct bce_softc *sc = ifp->if_softc;
6723 ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt,
6724 slz, serialized);
6727 #endif /* INVARIANTS */
6729 static void
6730 bce_serialize_skipmain(struct bce_softc *sc)
6732 lwkt_serialize_array_enter(sc->serializes, sc->serialize_cnt, 1);
6735 static void
6736 bce_deserialize_skipmain(struct bce_softc *sc)
6738 lwkt_serialize_array_exit(sc->serializes, sc->serialize_cnt, 1);
6741 #ifdef IFPOLL_ENABLE
6743 static int
6744 bce_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS)
6746 struct bce_softc *sc = (void *)arg1;
6747 struct ifnet *ifp = &sc->arpcom.ac_if;
6748 int error, off;
6750 off = sc->npoll_ofs;
6751 error = sysctl_handle_int(oidp, &off, 0, req);
6752 if (error || req->newptr == NULL)
6753 return error;
6754 if (off < 0)
6755 return EINVAL;
6757 ifnet_serialize_all(ifp);
6758 if (off >= ncpus2 || off % sc->rx_ring_cnt2 != 0) {
6759 error = EINVAL;
6760 } else {
6761 error = 0;
6762 sc->npoll_ofs = off;
6764 ifnet_deserialize_all(ifp);
6766 return error;
6769 #endif /* IFPOLL_ENABLE */
6771 static void
6772 bce_set_timer_cpuid(struct bce_softc *sc, boolean_t polling)
6774 if (polling)
6775 sc->bce_timer_cpuid = 0; /* XXX */
6776 else
6777 sc->bce_timer_cpuid = sc->bce_msix[0].msix_cpuid;
6780 static int
6781 bce_alloc_intr(struct bce_softc *sc)
6783 u_int irq_flags;
6785 bce_try_alloc_msix(sc);
6786 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
6787 return 0;
6789 sc->bce_irq_type = pci_alloc_1intr(sc->bce_dev, bce_msi_enable,
6790 &sc->bce_irq_rid, &irq_flags);
6792 sc->bce_res_irq = bus_alloc_resource_any(sc->bce_dev, SYS_RES_IRQ,
6793 &sc->bce_irq_rid, irq_flags);
6794 if (sc->bce_res_irq == NULL) {
6795 device_printf(sc->bce_dev, "PCI map interrupt failed\n");
6796 return ENXIO;
6798 sc->bce_msix[0].msix_cpuid = rman_get_cpuid(sc->bce_res_irq);
6799 sc->bce_msix[0].msix_serialize = &sc->main_serialize;
6801 return 0;
6804 static void
6805 bce_try_alloc_msix(struct bce_softc *sc)
6807 struct bce_msix_data *msix;
6808 int offset, i, error;
6809 boolean_t setup = FALSE;
6811 if (sc->rx_ring_cnt == 1)
6812 return;
6814 if (sc->rx_ring_cnt2 == ncpus2) {
6815 offset = 0;
6816 } else {
6817 int offset_def =
6818 (sc->rx_ring_cnt2 * device_get_unit(sc->bce_dev)) % ncpus2;
6820 offset = device_getenv_int(sc->bce_dev,
6821 "msix.offset", offset_def);
6822 if (offset >= ncpus2 || offset % sc->rx_ring_cnt2 != 0) {
6823 device_printf(sc->bce_dev,
6824 "invalid msix.offset %d, use %d\n",
6825 offset, offset_def);
6826 offset = offset_def;
6830 msix = &sc->bce_msix[0];
6831 msix->msix_serialize = &sc->main_serialize;
6832 msix->msix_func = bce_intr_msi_oneshot;
6833 msix->msix_arg = sc;
6834 KKASSERT(offset < ncpus2);
6835 msix->msix_cpuid = offset;
6836 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), "%s combo",
6837 device_get_nameunit(sc->bce_dev));
6839 for (i = 1; i < sc->rx_ring_cnt; ++i) {
6840 struct bce_rx_ring *rxr = &sc->rx_rings[i];
6842 msix = &sc->bce_msix[i];
6844 msix->msix_serialize = &rxr->rx_serialize;
6845 msix->msix_arg = rxr;
6846 msix->msix_cpuid = offset + (i % sc->rx_ring_cnt2);
6847 KKASSERT(msix->msix_cpuid < ncpus2);
6849 if (i < sc->tx_ring_cnt) {
6850 msix->msix_func = bce_intr_msix_rxtx;
6851 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc),
6852 "%s rxtx%d", device_get_nameunit(sc->bce_dev), i);
6853 } else {
6854 msix->msix_func = bce_intr_msix_rx;
6855 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc),
6856 "%s rx%d", device_get_nameunit(sc->bce_dev), i);
6861 * Setup MSI-X table
6863 bce_setup_msix_table(sc);
6864 REG_WR(sc, BCE_PCI_MSIX_CONTROL, BCE_MSIX_MAX - 1);
6865 REG_WR(sc, BCE_PCI_MSIX_TBL_OFF_BIR, BCE_PCI_GRC_WINDOW2_BASE);
6866 REG_WR(sc, BCE_PCI_MSIX_PBA_OFF_BIT, BCE_PCI_GRC_WINDOW3_BASE);
6867 /* Flush */
6868 REG_RD(sc, BCE_PCI_MSIX_CONTROL);
6870 error = pci_setup_msix(sc->bce_dev);
6871 if (error) {
6872 device_printf(sc->bce_dev, "Setup MSI-X failed\n");
6873 goto back;
6875 setup = TRUE;
6877 for (i = 0; i < sc->rx_ring_cnt; ++i) {
6878 msix = &sc->bce_msix[i];
6880 error = pci_alloc_msix_vector(sc->bce_dev, i, &msix->msix_rid,
6881 msix->msix_cpuid);
6882 if (error) {
6883 device_printf(sc->bce_dev,
6884 "Unable to allocate MSI-X %d on cpu%d\n",
6885 i, msix->msix_cpuid);
6886 goto back;
6889 msix->msix_res = bus_alloc_resource_any(sc->bce_dev,
6890 SYS_RES_IRQ, &msix->msix_rid, RF_ACTIVE);
6891 if (msix->msix_res == NULL) {
6892 device_printf(sc->bce_dev,
6893 "Unable to allocate MSI-X %d resource\n", i);
6894 error = ENOMEM;
6895 goto back;
6899 pci_enable_msix(sc->bce_dev);
6900 sc->bce_irq_type = PCI_INTR_TYPE_MSIX;
6901 back:
6902 if (error)
6903 bce_free_msix(sc, setup);
6906 static void
6907 bce_setup_ring_cnt(struct bce_softc *sc)
6909 int msix_enable, ring_max, msix_cnt2, msix_cnt, i;
6911 sc->rx_ring_cnt = 1;
6912 sc->rx_ring_cnt2 = 1;
6913 sc->tx_ring_cnt = 1;
6915 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5709 &&
6916 BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5716)
6917 return;
6919 msix_enable = device_getenv_int(sc->bce_dev, "msix.enable",
6920 bce_msix_enable);
6921 if (!msix_enable)
6922 return;
6924 if (ncpus2 == 1)
6925 return;
6927 msix_cnt = pci_msix_count(sc->bce_dev);
6928 if (msix_cnt <= 1)
6929 return;
6931 i = 0;
6932 while ((1 << (i + 1)) <= msix_cnt)
6933 ++i;
6934 msix_cnt2 = 1 << i;
6937 * One extra RX ring will be needed (see below), so make sure
6938 * that there are enough MSI-X vectors.
6940 if (msix_cnt == msix_cnt2) {
6942 * XXX
6943 * This probably will not happen; 5709/5716
6944 * come with 9 MSI-X vectors.
6946 msix_cnt2 >>= 1;
6947 if (msix_cnt2 <= 1) {
6948 device_printf(sc->bce_dev,
6949 "MSI-X count %d could not be used\n", msix_cnt);
6950 return;
6952 device_printf(sc->bce_dev, "MSI-X count %d is power of 2\n",
6953 msix_cnt);
6957 * Setup RX ring count
6959 ring_max = BCE_RX_RING_MAX;
6960 if (ring_max > msix_cnt2)
6961 ring_max = msix_cnt2;
6962 sc->rx_ring_cnt2 = device_getenv_int(sc->bce_dev, "rx_rings",
6963 bce_rx_rings);
6964 sc->rx_ring_cnt2 = if_ring_count2(sc->rx_ring_cnt2, ring_max);
6967 * Don't use MSI-X, if the effective RX ring count is 1.
6968 * Since if the effective RX ring count is 1, the TX ring
6969 * count will be 1. This RX ring and the TX ring must be
6970 * bundled into one MSI-X vector, so the hot path will be
6971 * exact same as using MSI. Besides, the first RX ring
6972 * must be fully populated, which only accepts packets whose
6973 * RSS hash can't calculated, e.g. ARP packets; waste of
6974 * resource at least.
6976 if (sc->rx_ring_cnt2 == 1)
6977 return;
6980 * One extra RX ring is allocated, since the first RX ring
6981 * could not be used for RSS hashed packets whose masked
6982 * hash is 0. The first RX ring is only used for packets
6983 * whose RSS hash could not be calculated, e.g. ARP packets.
6984 * This extra RX ring will be used for packets whose masked
6985 * hash is 0. The effective RX ring count involved in RSS
6986 * is still sc->rx_ring_cnt2.
6988 KKASSERT(sc->rx_ring_cnt2 + 1 <= msix_cnt);
6989 sc->rx_ring_cnt = sc->rx_ring_cnt2 + 1;
6992 * Setup TX ring count
6994 * NOTE:
6995 * TX ring count must be less than the effective RSS RX ring
6996 * count, since we use RX ring software data struct to save
6997 * status index and various other MSI-X related stuffs.
6999 ring_max = BCE_TX_RING_MAX;
7000 if (ring_max > msix_cnt2)
7001 ring_max = msix_cnt2;
7002 if (ring_max > sc->rx_ring_cnt2)
7003 ring_max = sc->rx_ring_cnt2;
7004 sc->tx_ring_cnt = device_getenv_int(sc->bce_dev, "tx_rings",
7005 bce_tx_rings);
7006 sc->tx_ring_cnt = if_ring_count2(sc->tx_ring_cnt, ring_max);
7009 static void
7010 bce_free_msix(struct bce_softc *sc, boolean_t setup)
7012 int i;
7014 KKASSERT(sc->rx_ring_cnt > 1);
7016 for (i = 0; i < sc->rx_ring_cnt; ++i) {
7017 struct bce_msix_data *msix = &sc->bce_msix[i];
7019 if (msix->msix_res != NULL) {
7020 bus_release_resource(sc->bce_dev, SYS_RES_IRQ,
7021 msix->msix_rid, msix->msix_res);
7023 if (msix->msix_rid >= 0)
7024 pci_release_msix_vector(sc->bce_dev, msix->msix_rid);
7026 if (setup)
7027 pci_teardown_msix(sc->bce_dev);
7030 static void
7031 bce_free_intr(struct bce_softc *sc)
7033 if (sc->bce_irq_type != PCI_INTR_TYPE_MSIX) {
7034 if (sc->bce_res_irq != NULL) {
7035 bus_release_resource(sc->bce_dev, SYS_RES_IRQ,
7036 sc->bce_irq_rid, sc->bce_res_irq);
7038 if (sc->bce_irq_type == PCI_INTR_TYPE_MSI)
7039 pci_release_msi(sc->bce_dev);
7040 } else {
7041 bce_free_msix(sc, TRUE);
7045 static void
7046 bce_setup_msix_table(struct bce_softc *sc)
7048 REG_WR(sc, BCE_PCI_GRC_WINDOW_ADDR, BCE_PCI_GRC_WINDOW_ADDR_SEP_WIN);
7049 REG_WR(sc, BCE_PCI_GRC_WINDOW2_ADDR, BCE_MSIX_TABLE_ADDR);
7050 REG_WR(sc, BCE_PCI_GRC_WINDOW3_ADDR, BCE_MSIX_PBA_ADDR);
7053 static int
7054 bce_setup_intr(struct bce_softc *sc)
7056 void (*irq_handle)(void *);
7057 int error;
7059 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
7060 return bce_setup_msix(sc);
7062 if (sc->bce_irq_type == PCI_INTR_TYPE_LEGACY) {
7063 irq_handle = bce_intr_legacy;
7064 } else if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) {
7065 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
7066 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
7067 irq_handle = bce_intr_msi_oneshot;
7068 sc->bce_flags |= BCE_ONESHOT_MSI_FLAG;
7069 } else {
7070 irq_handle = bce_intr_msi;
7071 sc->bce_flags |= BCE_CHECK_MSI_FLAG;
7073 } else {
7074 panic("%s: unsupported intr type %d",
7075 device_get_nameunit(sc->bce_dev), sc->bce_irq_type);
7078 error = bus_setup_intr(sc->bce_dev, sc->bce_res_irq, INTR_MPSAFE,
7079 irq_handle, sc, &sc->bce_intrhand, &sc->main_serialize);
7080 if (error != 0) {
7081 device_printf(sc->bce_dev, "Failed to setup IRQ!\n");
7082 return error;
7085 return 0;
7088 static void
7089 bce_teardown_intr(struct bce_softc *sc)
7091 if (sc->bce_irq_type != PCI_INTR_TYPE_MSIX)
7092 bus_teardown_intr(sc->bce_dev, sc->bce_res_irq, sc->bce_intrhand);
7093 else
7094 bce_teardown_msix(sc, sc->rx_ring_cnt);
7097 static int
7098 bce_setup_msix(struct bce_softc *sc)
7100 int i;
7102 for (i = 0; i < sc->rx_ring_cnt; ++i) {
7103 struct bce_msix_data *msix = &sc->bce_msix[i];
7104 int error;
7106 error = bus_setup_intr_descr(sc->bce_dev, msix->msix_res,
7107 INTR_MPSAFE, msix->msix_func, msix->msix_arg,
7108 &msix->msix_handle, msix->msix_serialize, msix->msix_desc);
7109 if (error) {
7110 device_printf(sc->bce_dev, "could not set up %s "
7111 "interrupt handler.\n", msix->msix_desc);
7112 bce_teardown_msix(sc, i);
7113 return error;
7116 return 0;
7119 static void
7120 bce_teardown_msix(struct bce_softc *sc, int msix_cnt)
7122 int i;
7124 for (i = 0; i < msix_cnt; ++i) {
7125 struct bce_msix_data *msix = &sc->bce_msix[i];
7127 bus_teardown_intr(sc->bce_dev, msix->msix_res,
7128 msix->msix_handle);
7132 static void
7133 bce_init_rss(struct bce_softc *sc)
7135 uint8_t key[BCE_RLUP_RSS_KEY_CNT * BCE_RLUP_RSS_KEY_SIZE];
7136 uint32_t tbl = 0;
7137 int i;
7139 KKASSERT(sc->rx_ring_cnt > 2);
7142 * Configure RSS keys
7144 toeplitz_get_key(key, sizeof(key));
7145 for (i = 0; i < BCE_RLUP_RSS_KEY_CNT; ++i) {
7146 uint32_t rss_key;
7148 rss_key = BCE_RLUP_RSS_KEYVAL(key, i);
7149 BCE_RSS_DPRINTF(sc, 1, "rss_key%d 0x%08x\n", i, rss_key);
7151 REG_WR(sc, BCE_RLUP_RSS_KEY(i), rss_key);
7155 * Configure the redirect table
7157 * NOTE:
7158 * - The "queue ID" in redirect table is the software RX ring's
7159 * index _minus_ one.
7160 * - The last RX ring, whose "queue ID" is (sc->rx_ring_cnt - 2)
7161 * will be used for packets whose masked hash is 0.
7162 * (see also: comment in bce_setup_ring_cnt())
7164 * The redirect table is configured in following fashion, except
7165 * for the masked hash 0, which is noted above:
7166 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
7168 for (i = 0; i < BCE_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
7169 int shift = (i % 8) << 2, qid;
7171 qid = i % sc->rx_ring_cnt2;
7172 if (qid > 0)
7173 --qid;
7174 else
7175 qid = sc->rx_ring_cnt - 2;
7176 KKASSERT(qid < (sc->rx_ring_cnt - 1));
7178 tbl |= qid << shift;
7179 if (i % 8 == 7) {
7180 BCE_RSS_DPRINTF(sc, 1, "tbl 0x%08x\n", tbl);
7181 REG_WR(sc, BCE_RLUP_RSS_DATA, tbl);
7182 REG_WR(sc, BCE_RLUP_RSS_COMMAND, (i >> 3) |
7183 BCE_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
7184 BCE_RLUP_RSS_COMMAND_WRITE |
7185 BCE_RLUP_RSS_COMMAND_HASH_MASK);
7186 tbl = 0;
7189 REG_WR(sc, BCE_RLUP_RSS_CONFIG,
7190 BCE_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI);
7193 static void
7194 bce_npoll_coal_change(struct bce_softc *sc)
7196 uint32_t old_rx_cons, old_tx_cons;
7198 old_rx_cons = sc->bce_rx_quick_cons_trip_int;
7199 old_tx_cons = sc->bce_tx_quick_cons_trip_int;
7200 sc->bce_rx_quick_cons_trip_int = 1;
7201 sc->bce_tx_quick_cons_trip_int = 1;
7203 sc->bce_coalchg_mask |= BCE_COALMASK_TX_BDS_INT |
7204 BCE_COALMASK_RX_BDS_INT;
7205 bce_coal_change(sc);
7207 sc->bce_rx_quick_cons_trip_int = old_rx_cons;
7208 sc->bce_tx_quick_cons_trip_int = old_tx_cons;
7211 static struct pktinfo *
7212 bce_rss_pktinfo(struct pktinfo *pi, uint32_t status,
7213 const struct l2_fhdr *l2fhdr)
7215 /* Check for an IP datagram. */
7216 if ((status & L2_FHDR_STATUS_IP_DATAGRAM) == 0)
7217 return NULL;
7219 /* Check if the IP checksum is valid. */
7220 if (l2fhdr->l2_fhdr_ip_xsum != 0xffff)
7221 return NULL;
7223 /* Check for a valid TCP/UDP frame. */
7224 if (status & L2_FHDR_STATUS_TCP_SEGMENT) {
7225 if (status & L2_FHDR_ERRORS_TCP_XSUM)
7226 return NULL;
7227 if (l2fhdr->l2_fhdr_tcp_udp_xsum != 0xffff)
7228 return NULL;
7229 pi->pi_l3proto = IPPROTO_TCP;
7230 } else if (status & L2_FHDR_STATUS_UDP_DATAGRAM) {
7231 if (status & L2_FHDR_ERRORS_UDP_XSUM)
7232 return NULL;
7233 if (l2fhdr->l2_fhdr_tcp_udp_xsum != 0xffff)
7234 return NULL;
7235 pi->pi_l3proto = IPPROTO_UDP;
7236 } else {
7237 return NULL;
7239 pi->pi_netisr = NETISR_IP;
7240 pi->pi_flags = 0;
7242 return pi;