2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
37 #include "opt_ifpoll.h"
39 #include <sys/param.h>
41 #include <sys/endian.h>
42 #include <sys/kernel.h>
43 #include <sys/interrupt.h>
45 #include <sys/malloc.h>
46 #include <sys/queue.h>
48 #include <sys/serialize.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
53 #include <netinet/ip.h>
54 #include <netinet/tcp.h>
57 #include <net/ethernet.h>
59 #include <net/if_arp.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_poll.h>
63 #include <net/if_types.h>
64 #include <net/ifq_var.h>
65 #include <net/toeplitz.h>
66 #include <net/toeplitz2.h>
67 #include <net/vlan/if_vlan_var.h>
68 #include <net/vlan/if_vlan_ether.h>
70 #include <dev/netif/mii_layer/mii.h>
71 #include <dev/netif/mii_layer/miivar.h>
72 #include <dev/netif/mii_layer/brgphyreg.h>
75 #include <bus/pci/pcireg.h>
76 #include <bus/pci/pcivar.h>
78 #include <dev/netif/bge/if_bgereg.h>
79 #include <dev/netif/bnx/if_bnxvar.h>
81 /* "device miibus" required. See GENERIC if you get errors here. */
82 #include "miibus_if.h"
84 #define BNX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
86 #define BNX_RESET_SHUTDOWN 0
87 #define BNX_RESET_START 1
88 #define BNX_RESET_SUSPEND 2
90 #define BNX_INTR_CKINTVL ((10 * hz) / 1000) /* 10ms */
93 #define BNX_RSS_DPRINTF(sc, lvl, fmt, ...) \
95 if (sc->bnx_rss_debug >= lvl) \
96 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
98 #else /* !BNX_RSS_DEBUG */
99 #define BNX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
100 #endif /* BNX_RSS_DEBUG */
102 static const struct bnx_type
{
107 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5717
,
108 "Broadcom BCM5717 Gigabit Ethernet" },
109 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5717C
,
110 "Broadcom BCM5717C Gigabit Ethernet" },
111 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5718
,
112 "Broadcom BCM5718 Gigabit Ethernet" },
113 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5719
,
114 "Broadcom BCM5719 Gigabit Ethernet" },
115 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5720_ALT
,
116 "Broadcom BCM5720 Gigabit Ethernet" },
118 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5725
,
119 "Broadcom BCM5725 Gigabit Ethernet" },
120 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5727
,
121 "Broadcom BCM5727 Gigabit Ethernet" },
122 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5762
,
123 "Broadcom BCM5762 Gigabit Ethernet" },
125 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM57761
,
126 "Broadcom BCM57761 Gigabit Ethernet" },
127 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM57762
,
128 "Broadcom BCM57762 Gigabit Ethernet" },
129 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM57765
,
130 "Broadcom BCM57765 Gigabit Ethernet" },
131 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM57766
,
132 "Broadcom BCM57766 Gigabit Ethernet" },
133 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM57781
,
134 "Broadcom BCM57781 Gigabit Ethernet" },
135 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM57782
,
136 "Broadcom BCM57782 Gigabit Ethernet" },
137 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM57785
,
138 "Broadcom BCM57785 Gigabit Ethernet" },
139 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM57786
,
140 "Broadcom BCM57786 Gigabit Ethernet" },
141 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM57791
,
142 "Broadcom BCM57791 Fast Ethernet" },
143 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM57795
,
144 "Broadcom BCM57795 Fast Ethernet" },
149 static const int bnx_tx_mailbox
[BNX_TX_RING_MAX
] = {
150 BGE_MBX_TX_HOST_PROD0_LO
,
151 BGE_MBX_TX_HOST_PROD0_HI
,
152 BGE_MBX_TX_HOST_PROD1_LO
,
153 BGE_MBX_TX_HOST_PROD1_HI
156 #define BNX_IS_JUMBO_CAPABLE(sc) ((sc)->bnx_flags & BNX_FLAG_JUMBO)
157 #define BNX_IS_5717_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_5717_PLUS)
158 #define BNX_IS_57765_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_57765_PLUS)
159 #define BNX_IS_57765_FAMILY(sc) \
160 ((sc)->bnx_flags & BNX_FLAG_57765_FAMILY)
162 typedef int (*bnx_eaddr_fcn_t
)(struct bnx_softc
*, uint8_t[]);
164 static int bnx_probe(device_t
);
165 static int bnx_attach(device_t
);
166 static int bnx_detach(device_t
);
167 static void bnx_shutdown(device_t
);
168 static int bnx_suspend(device_t
);
169 static int bnx_resume(device_t
);
170 static int bnx_miibus_readreg(device_t
, int, int);
171 static int bnx_miibus_writereg(device_t
, int, int, int);
172 static void bnx_miibus_statchg(device_t
);
174 static int bnx_handle_status(struct bnx_softc
*);
176 static void bnx_npoll(struct ifnet
*, struct ifpoll_info
*);
177 static void bnx_npoll_rx(struct ifnet
*, void *, int);
178 static void bnx_npoll_tx(struct ifnet
*, void *, int);
179 static void bnx_npoll_tx_notag(struct ifnet
*, void *, int);
180 static void bnx_npoll_status(struct ifnet
*);
181 static void bnx_npoll_status_notag(struct ifnet
*);
183 static void bnx_intr_legacy(void *);
184 static void bnx_msi(void *);
185 static void bnx_intr(struct bnx_softc
*);
186 static void bnx_msix_status(void *);
187 static void bnx_msix_tx_status(void *);
188 static void bnx_msix_rx(void *);
189 static void bnx_msix_rxtx(void *);
190 static void bnx_enable_intr(struct bnx_softc
*);
191 static void bnx_disable_intr(struct bnx_softc
*);
192 static void bnx_txeof(struct bnx_tx_ring
*, uint16_t);
193 static void bnx_rxeof(struct bnx_rx_ret_ring
*, uint16_t, int);
194 static int bnx_alloc_intr(struct bnx_softc
*);
195 static int bnx_setup_intr(struct bnx_softc
*);
196 static void bnx_free_intr(struct bnx_softc
*);
197 static void bnx_teardown_intr(struct bnx_softc
*, int);
198 static int bnx_alloc_msix(struct bnx_softc
*);
199 static void bnx_free_msix(struct bnx_softc
*, boolean_t
);
200 static void bnx_check_intr_rxtx(void *);
201 static void bnx_check_intr_rx(void *);
202 static void bnx_check_intr_tx(void *);
203 static void bnx_rx_std_refill_ithread(void *);
204 static void bnx_rx_std_refill(void *, void *);
205 static void bnx_rx_std_refill_sched_ipi(void *);
206 static void bnx_rx_std_refill_stop(void *);
207 static void bnx_rx_std_refill_sched(struct bnx_rx_ret_ring
*,
208 struct bnx_rx_std_ring
*);
210 static void bnx_start(struct ifnet
*, struct ifaltq_subque
*);
211 static int bnx_ioctl(struct ifnet
*, u_long
, caddr_t
, struct ucred
*);
212 static void bnx_init(void *);
213 static void bnx_stop(struct bnx_softc
*);
214 static void bnx_watchdog(struct ifaltq_subque
*);
215 static int bnx_ifmedia_upd(struct ifnet
*);
216 static void bnx_ifmedia_sts(struct ifnet
*, struct ifmediareq
*);
217 static void bnx_tick(void *);
218 static void bnx_serialize(struct ifnet
*, enum ifnet_serialize
);
219 static void bnx_deserialize(struct ifnet
*, enum ifnet_serialize
);
220 static int bnx_tryserialize(struct ifnet
*, enum ifnet_serialize
);
222 static void bnx_serialize_assert(struct ifnet
*, enum ifnet_serialize
,
225 static void bnx_serialize_skipmain(struct bnx_softc
*);
226 static void bnx_deserialize_skipmain(struct bnx_softc
*sc
);
228 static int bnx_alloc_jumbo_mem(struct bnx_softc
*);
229 static void bnx_free_jumbo_mem(struct bnx_softc
*);
230 static struct bnx_jslot
231 *bnx_jalloc(struct bnx_softc
*);
232 static void bnx_jfree(void *);
233 static void bnx_jref(void *);
234 static int bnx_newbuf_std(struct bnx_rx_ret_ring
*, int, int);
235 static int bnx_newbuf_jumbo(struct bnx_softc
*, int, int);
236 static void bnx_setup_rxdesc_std(struct bnx_rx_std_ring
*, int);
237 static void bnx_setup_rxdesc_jumbo(struct bnx_softc
*, int);
238 static int bnx_init_rx_ring_std(struct bnx_rx_std_ring
*);
239 static void bnx_free_rx_ring_std(struct bnx_rx_std_ring
*);
240 static int bnx_init_rx_ring_jumbo(struct bnx_softc
*);
241 static void bnx_free_rx_ring_jumbo(struct bnx_softc
*);
242 static void bnx_free_tx_ring(struct bnx_tx_ring
*);
243 static int bnx_init_tx_ring(struct bnx_tx_ring
*);
244 static int bnx_create_tx_ring(struct bnx_tx_ring
*);
245 static void bnx_destroy_tx_ring(struct bnx_tx_ring
*);
246 static int bnx_create_rx_ret_ring(struct bnx_rx_ret_ring
*);
247 static void bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring
*);
248 static int bnx_dma_alloc(device_t
);
249 static void bnx_dma_free(struct bnx_softc
*);
250 static int bnx_dma_block_alloc(struct bnx_softc
*, bus_size_t
,
251 bus_dma_tag_t
*, bus_dmamap_t
*, void **, bus_addr_t
*);
252 static void bnx_dma_block_free(bus_dma_tag_t
, bus_dmamap_t
, void *);
254 bnx_defrag_shortdma(struct mbuf
*);
255 static int bnx_encap(struct bnx_tx_ring
*, struct mbuf
**,
257 static int bnx_setup_tso(struct bnx_tx_ring
*, struct mbuf
**,
258 uint16_t *, uint16_t *);
259 static void bnx_setup_serialize(struct bnx_softc
*);
260 static void bnx_set_tick_cpuid(struct bnx_softc
*, boolean_t
);
261 static void bnx_setup_ring_cnt(struct bnx_softc
*);
263 static struct pktinfo
*bnx_rss_info(struct pktinfo
*,
264 const struct bge_rx_bd
*);
265 static void bnx_init_rss(struct bnx_softc
*);
266 static void bnx_reset(struct bnx_softc
*);
267 static int bnx_chipinit(struct bnx_softc
*);
268 static int bnx_blockinit(struct bnx_softc
*);
269 static void bnx_stop_block(struct bnx_softc
*, bus_size_t
, uint32_t);
270 static void bnx_enable_msi(struct bnx_softc
*, boolean_t
);
271 static void bnx_setmulti(struct bnx_softc
*);
272 static void bnx_setpromisc(struct bnx_softc
*);
273 static void bnx_stats_update_regs(struct bnx_softc
*);
274 static uint32_t bnx_dma_swap_options(struct bnx_softc
*);
276 static uint32_t bnx_readmem_ind(struct bnx_softc
*, uint32_t);
277 static void bnx_writemem_ind(struct bnx_softc
*, uint32_t, uint32_t);
279 static uint32_t bnx_readreg_ind(struct bnx_softc
*, uint32_t);
281 static void bnx_writemem_direct(struct bnx_softc
*, uint32_t, uint32_t);
282 static void bnx_writembx(struct bnx_softc
*, int, int);
283 static int bnx_read_nvram(struct bnx_softc
*, caddr_t
, int, int);
284 static uint8_t bnx_eeprom_getbyte(struct bnx_softc
*, uint32_t, uint8_t *);
285 static int bnx_read_eeprom(struct bnx_softc
*, caddr_t
, uint32_t, size_t);
287 static void bnx_tbi_link_upd(struct bnx_softc
*, uint32_t);
288 static void bnx_copper_link_upd(struct bnx_softc
*, uint32_t);
289 static void bnx_autopoll_link_upd(struct bnx_softc
*, uint32_t);
290 static void bnx_link_poll(struct bnx_softc
*);
292 static int bnx_get_eaddr_mem(struct bnx_softc
*, uint8_t[]);
293 static int bnx_get_eaddr_nvram(struct bnx_softc
*, uint8_t[]);
294 static int bnx_get_eaddr_eeprom(struct bnx_softc
*, uint8_t[]);
295 static int bnx_get_eaddr(struct bnx_softc
*, uint8_t[]);
297 static void bnx_coal_change(struct bnx_softc
*);
298 static int bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS
);
299 static int bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS
);
300 static int bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS
);
301 static int bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS
);
302 static int bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS
);
303 static int bnx_sysctl_rx_coal_bds_poll(SYSCTL_HANDLER_ARGS
);
304 static int bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS
);
305 static int bnx_sysctl_tx_coal_bds_poll(SYSCTL_HANDLER_ARGS
);
306 static int bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS
);
307 static int bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS
);
308 static int bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS
, uint32_t *,
310 static int bnx_sysctl_std_refill(SYSCTL_HANDLER_ARGS
);
312 static void bnx_sig_post_reset(struct bnx_softc
*, int);
313 static void bnx_sig_pre_reset(struct bnx_softc
*, int);
314 static void bnx_ape_lock_init(struct bnx_softc
*);
315 static void bnx_ape_read_fw_ver(struct bnx_softc
*);
316 static int bnx_ape_lock(struct bnx_softc
*, int);
317 static void bnx_ape_unlock(struct bnx_softc
*, int);
318 static void bnx_ape_send_event(struct bnx_softc
*, uint32_t);
319 static void bnx_ape_driver_state_change(struct bnx_softc
*, int);
321 static int bnx_msi_enable
= 1;
322 static int bnx_msix_enable
= 1;
324 static int bnx_rx_rings
= 0; /* auto */
325 static int bnx_tx_rings
= 0; /* auto */
327 TUNABLE_INT("hw.bnx.msi.enable", &bnx_msi_enable
);
328 TUNABLE_INT("hw.bnx.msix.enable", &bnx_msix_enable
);
329 TUNABLE_INT("hw.bnx.rx_rings", &bnx_rx_rings
);
330 TUNABLE_INT("hw.bnx.tx_rings", &bnx_tx_rings
);
332 static device_method_t bnx_methods
[] = {
333 /* Device interface */
334 DEVMETHOD(device_probe
, bnx_probe
),
335 DEVMETHOD(device_attach
, bnx_attach
),
336 DEVMETHOD(device_detach
, bnx_detach
),
337 DEVMETHOD(device_shutdown
, bnx_shutdown
),
338 DEVMETHOD(device_suspend
, bnx_suspend
),
339 DEVMETHOD(device_resume
, bnx_resume
),
342 DEVMETHOD(bus_print_child
, bus_generic_print_child
),
343 DEVMETHOD(bus_driver_added
, bus_generic_driver_added
),
346 DEVMETHOD(miibus_readreg
, bnx_miibus_readreg
),
347 DEVMETHOD(miibus_writereg
, bnx_miibus_writereg
),
348 DEVMETHOD(miibus_statchg
, bnx_miibus_statchg
),
353 static DEFINE_CLASS_0(bnx
, bnx_driver
, bnx_methods
, sizeof(struct bnx_softc
));
354 static devclass_t bnx_devclass
;
356 DECLARE_DUMMY_MODULE(if_bnx
);
357 MODULE_DEPEND(if_bnx
, miibus
, 1, 1, 1);
358 DRIVER_MODULE(if_bnx
, pci
, bnx_driver
, bnx_devclass
, NULL
, NULL
);
359 DRIVER_MODULE(miibus
, bnx
, miibus_driver
, miibus_devclass
, NULL
, NULL
);
362 bnx_readmem_ind(struct bnx_softc
*sc
, uint32_t off
)
364 device_t dev
= sc
->bnx_dev
;
367 pci_write_config(dev
, BGE_PCI_MEMWIN_BASEADDR
, off
, 4);
368 val
= pci_read_config(dev
, BGE_PCI_MEMWIN_DATA
, 4);
369 pci_write_config(dev
, BGE_PCI_MEMWIN_BASEADDR
, 0, 4);
374 bnx_writemem_ind(struct bnx_softc
*sc
, uint32_t off
, uint32_t val
)
376 device_t dev
= sc
->bnx_dev
;
378 pci_write_config(dev
, BGE_PCI_MEMWIN_BASEADDR
, off
, 4);
379 pci_write_config(dev
, BGE_PCI_MEMWIN_DATA
, val
, 4);
380 pci_write_config(dev
, BGE_PCI_MEMWIN_BASEADDR
, 0, 4);
384 bnx_writemem_direct(struct bnx_softc
*sc
, uint32_t off
, uint32_t val
)
386 CSR_WRITE_4(sc
, off
, val
);
390 bnx_writembx(struct bnx_softc
*sc
, int off
, int val
)
392 CSR_WRITE_4(sc
, off
, val
);
396 * Read a sequence of bytes from NVRAM.
399 bnx_read_nvram(struct bnx_softc
*sc
, caddr_t dest
, int off
, int cnt
)
405 * Read a byte of data stored in the EEPROM at address 'addr.' The
406 * BCM570x supports both the traditional bitbang interface and an
407 * auto access interface for reading the EEPROM. We use the auto
411 bnx_eeprom_getbyte(struct bnx_softc
*sc
, uint32_t addr
, uint8_t *dest
)
417 * Enable use of auto EEPROM access so we can avoid
418 * having to use the bitbang method.
420 BNX_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_AUTO_EEPROM
);
422 /* Reset the EEPROM, load the clock period. */
423 CSR_WRITE_4(sc
, BGE_EE_ADDR
,
424 BGE_EEADDR_RESET
|BGE_EEHALFCLK(BGE_HALFCLK_384SCL
));
427 /* Issue the read EEPROM command. */
428 CSR_WRITE_4(sc
, BGE_EE_ADDR
, BGE_EE_READCMD
| addr
);
430 /* Wait for completion */
431 for(i
= 0; i
< BNX_TIMEOUT
* 10; i
++) {
433 if (CSR_READ_4(sc
, BGE_EE_ADDR
) & BGE_EEADDR_DONE
)
437 if (i
== BNX_TIMEOUT
) {
438 if_printf(&sc
->arpcom
.ac_if
, "eeprom read timed out\n");
443 byte
= CSR_READ_4(sc
, BGE_EE_DATA
);
445 *dest
= (byte
>> ((addr
% 4) * 8)) & 0xFF;
451 * Read a sequence of bytes from the EEPROM.
454 bnx_read_eeprom(struct bnx_softc
*sc
, caddr_t dest
, uint32_t off
, size_t len
)
460 for (byte
= 0, err
= 0, i
= 0; i
< len
; i
++) {
461 err
= bnx_eeprom_getbyte(sc
, off
+ i
, &byte
);
471 bnx_miibus_readreg(device_t dev
, int phy
, int reg
)
473 struct bnx_softc
*sc
= device_get_softc(dev
);
477 KASSERT(phy
== sc
->bnx_phyno
,
478 ("invalid phyno %d, should be %d", phy
, sc
->bnx_phyno
));
480 if (bnx_ape_lock(sc
, sc
->bnx_phy_ape_lock
) != 0)
483 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
484 if (sc
->bnx_mi_mode
& BGE_MIMODE_AUTOPOLL
) {
485 CSR_WRITE_4(sc
, BGE_MI_MODE
,
486 sc
->bnx_mi_mode
& ~BGE_MIMODE_AUTOPOLL
);
490 CSR_WRITE_4(sc
, BGE_MI_COMM
, BGE_MICMD_READ
| BGE_MICOMM_BUSY
|
491 BGE_MIPHY(phy
) | BGE_MIREG(reg
));
493 /* Poll for the PHY register access to complete. */
494 for (i
= 0; i
< BNX_TIMEOUT
; i
++) {
496 val
= CSR_READ_4(sc
, BGE_MI_COMM
);
497 if ((val
& BGE_MICOMM_BUSY
) == 0) {
499 val
= CSR_READ_4(sc
, BGE_MI_COMM
);
503 if (i
== BNX_TIMEOUT
) {
504 if_printf(&sc
->arpcom
.ac_if
, "PHY read timed out "
505 "(phy %d, reg %d, val 0x%08x)\n", phy
, reg
, val
);
509 /* Restore the autopoll bit if necessary. */
510 if (sc
->bnx_mi_mode
& BGE_MIMODE_AUTOPOLL
) {
511 CSR_WRITE_4(sc
, BGE_MI_MODE
, sc
->bnx_mi_mode
);
515 bnx_ape_unlock(sc
, sc
->bnx_phy_ape_lock
);
517 if (val
& BGE_MICOMM_READFAIL
)
520 return (val
& 0xFFFF);
524 bnx_miibus_writereg(device_t dev
, int phy
, int reg
, int val
)
526 struct bnx_softc
*sc
= device_get_softc(dev
);
529 KASSERT(phy
== sc
->bnx_phyno
,
530 ("invalid phyno %d, should be %d", phy
, sc
->bnx_phyno
));
532 if (bnx_ape_lock(sc
, sc
->bnx_phy_ape_lock
) != 0)
535 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
536 if (sc
->bnx_mi_mode
& BGE_MIMODE_AUTOPOLL
) {
537 CSR_WRITE_4(sc
, BGE_MI_MODE
,
538 sc
->bnx_mi_mode
& ~BGE_MIMODE_AUTOPOLL
);
542 CSR_WRITE_4(sc
, BGE_MI_COMM
, BGE_MICMD_WRITE
| BGE_MICOMM_BUSY
|
543 BGE_MIPHY(phy
) | BGE_MIREG(reg
) | val
);
545 for (i
= 0; i
< BNX_TIMEOUT
; i
++) {
547 if (!(CSR_READ_4(sc
, BGE_MI_COMM
) & BGE_MICOMM_BUSY
)) {
549 CSR_READ_4(sc
, BGE_MI_COMM
); /* dummy read */
553 if (i
== BNX_TIMEOUT
) {
554 if_printf(&sc
->arpcom
.ac_if
, "PHY write timed out "
555 "(phy %d, reg %d, val %d)\n", phy
, reg
, val
);
558 /* Restore the autopoll bit if necessary. */
559 if (sc
->bnx_mi_mode
& BGE_MIMODE_AUTOPOLL
) {
560 CSR_WRITE_4(sc
, BGE_MI_MODE
, sc
->bnx_mi_mode
);
564 bnx_ape_unlock(sc
, sc
->bnx_phy_ape_lock
);
570 bnx_miibus_statchg(device_t dev
)
572 struct bnx_softc
*sc
;
573 struct mii_data
*mii
;
576 sc
= device_get_softc(dev
);
577 if ((sc
->arpcom
.ac_if
.if_flags
& IFF_RUNNING
) == 0)
580 mii
= device_get_softc(sc
->bnx_miibus
);
582 if ((mii
->mii_media_status
& (IFM_ACTIVE
| IFM_AVALID
)) ==
583 (IFM_ACTIVE
| IFM_AVALID
)) {
584 switch (IFM_SUBTYPE(mii
->mii_media_active
)) {
601 if (sc
->bnx_link
== 0)
605 * APE firmware touches these registers to keep the MAC
606 * connected to the outside world. Try to keep the
610 mac_mode
= CSR_READ_4(sc
, BGE_MAC_MODE
) &
611 ~(BGE_MACMODE_PORTMODE
| BGE_MACMODE_HALF_DUPLEX
);
613 if (IFM_SUBTYPE(mii
->mii_media_active
) == IFM_1000_T
||
614 IFM_SUBTYPE(mii
->mii_media_active
) == IFM_1000_SX
)
615 mac_mode
|= BGE_PORTMODE_GMII
;
617 mac_mode
|= BGE_PORTMODE_MII
;
619 if ((mii
->mii_media_active
& IFM_GMASK
) != IFM_FDX
)
620 mac_mode
|= BGE_MACMODE_HALF_DUPLEX
;
622 CSR_WRITE_4(sc
, BGE_MAC_MODE
, mac_mode
);
627 * Memory management for jumbo frames.
630 bnx_alloc_jumbo_mem(struct bnx_softc
*sc
)
632 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
633 struct bnx_jslot
*entry
;
639 * Create tag for jumbo mbufs.
640 * This is really a bit of a kludge. We allocate a special
641 * jumbo buffer pool which (thanks to the way our DMA
642 * memory allocation works) will consist of contiguous
643 * pages. This means that even though a jumbo buffer might
644 * be larger than a page size, we don't really need to
645 * map it into more than one DMA segment. However, the
646 * default mbuf tag will result in multi-segment mappings,
647 * so we have to create a special jumbo mbuf tag that
648 * lets us get away with mapping the jumbo buffers as
649 * a single segment. I think eventually the driver should
650 * be changed so that it uses ordinary mbufs and cluster
651 * buffers, i.e. jumbo frames can span multiple DMA
652 * descriptors. But that's a project for another day.
656 * Create DMA stuffs for jumbo RX ring.
658 error
= bnx_dma_block_alloc(sc
, BGE_JUMBO_RX_RING_SZ
,
659 &sc
->bnx_cdata
.bnx_rx_jumbo_ring_tag
,
660 &sc
->bnx_cdata
.bnx_rx_jumbo_ring_map
,
661 (void *)&sc
->bnx_ldata
.bnx_rx_jumbo_ring
,
662 &sc
->bnx_ldata
.bnx_rx_jumbo_ring_paddr
);
664 if_printf(ifp
, "could not create jumbo RX ring\n");
669 * Create DMA stuffs for jumbo buffer block.
671 error
= bnx_dma_block_alloc(sc
, BNX_JMEM
,
672 &sc
->bnx_cdata
.bnx_jumbo_tag
,
673 &sc
->bnx_cdata
.bnx_jumbo_map
,
674 (void **)&sc
->bnx_ldata
.bnx_jumbo_buf
,
677 if_printf(ifp
, "could not create jumbo buffer\n");
681 SLIST_INIT(&sc
->bnx_jfree_listhead
);
684 * Now divide it up into 9K pieces and save the addresses
685 * in an array. Note that we play an evil trick here by using
686 * the first few bytes in the buffer to hold the the address
687 * of the softc structure for this interface. This is because
688 * bnx_jfree() needs it, but it is called by the mbuf management
689 * code which will not pass it to us explicitly.
691 for (i
= 0, ptr
= sc
->bnx_ldata
.bnx_jumbo_buf
; i
< BNX_JSLOTS
; i
++) {
692 entry
= &sc
->bnx_cdata
.bnx_jslots
[i
];
694 entry
->bnx_buf
= ptr
;
695 entry
->bnx_paddr
= paddr
;
696 entry
->bnx_inuse
= 0;
698 SLIST_INSERT_HEAD(&sc
->bnx_jfree_listhead
, entry
, jslot_link
);
707 bnx_free_jumbo_mem(struct bnx_softc
*sc
)
709 /* Destroy jumbo RX ring. */
710 bnx_dma_block_free(sc
->bnx_cdata
.bnx_rx_jumbo_ring_tag
,
711 sc
->bnx_cdata
.bnx_rx_jumbo_ring_map
,
712 sc
->bnx_ldata
.bnx_rx_jumbo_ring
);
714 /* Destroy jumbo buffer block. */
715 bnx_dma_block_free(sc
->bnx_cdata
.bnx_jumbo_tag
,
716 sc
->bnx_cdata
.bnx_jumbo_map
,
717 sc
->bnx_ldata
.bnx_jumbo_buf
);
721 * Allocate a jumbo buffer.
723 static struct bnx_jslot
*
724 bnx_jalloc(struct bnx_softc
*sc
)
726 struct bnx_jslot
*entry
;
728 lwkt_serialize_enter(&sc
->bnx_jslot_serializer
);
729 entry
= SLIST_FIRST(&sc
->bnx_jfree_listhead
);
731 SLIST_REMOVE_HEAD(&sc
->bnx_jfree_listhead
, jslot_link
);
732 entry
->bnx_inuse
= 1;
734 if_printf(&sc
->arpcom
.ac_if
, "no free jumbo buffers\n");
736 lwkt_serialize_exit(&sc
->bnx_jslot_serializer
);
741 * Adjust usage count on a jumbo buffer.
746 struct bnx_jslot
*entry
= (struct bnx_jslot
*)arg
;
747 struct bnx_softc
*sc
= entry
->bnx_sc
;
750 panic("bnx_jref: can't find softc pointer!");
752 if (&sc
->bnx_cdata
.bnx_jslots
[entry
->bnx_slot
] != entry
) {
753 panic("bnx_jref: asked to reference buffer "
754 "that we don't manage!");
755 } else if (entry
->bnx_inuse
== 0) {
756 panic("bnx_jref: buffer already free!");
758 atomic_add_int(&entry
->bnx_inuse
, 1);
763 * Release a jumbo buffer.
768 struct bnx_jslot
*entry
= (struct bnx_jslot
*)arg
;
769 struct bnx_softc
*sc
= entry
->bnx_sc
;
772 panic("bnx_jfree: can't find softc pointer!");
774 if (&sc
->bnx_cdata
.bnx_jslots
[entry
->bnx_slot
] != entry
) {
775 panic("bnx_jfree: asked to free buffer that we don't manage!");
776 } else if (entry
->bnx_inuse
== 0) {
777 panic("bnx_jfree: buffer already free!");
780 * Possible MP race to 0, use the serializer. The atomic insn
781 * is still needed for races against bnx_jref().
783 lwkt_serialize_enter(&sc
->bnx_jslot_serializer
);
784 atomic_subtract_int(&entry
->bnx_inuse
, 1);
785 if (entry
->bnx_inuse
== 0) {
786 SLIST_INSERT_HEAD(&sc
->bnx_jfree_listhead
,
789 lwkt_serialize_exit(&sc
->bnx_jslot_serializer
);
795 * Intialize a standard receive ring descriptor.
798 bnx_newbuf_std(struct bnx_rx_ret_ring
*ret
, int i
, int init
)
800 struct mbuf
*m_new
= NULL
;
801 bus_dma_segment_t seg
;
804 struct bnx_rx_buf
*rb
;
806 rb
= &ret
->bnx_std
->bnx_rx_std_buf
[i
];
807 KASSERT(!rb
->bnx_rx_refilled
, ("RX buf %dth has been refilled", i
));
809 m_new
= m_getcl(init
? M_WAITOK
: M_NOWAIT
, MT_DATA
, M_PKTHDR
);
814 m_new
->m_len
= m_new
->m_pkthdr
.len
= MCLBYTES
;
815 m_adj(m_new
, ETHER_ALIGN
);
817 error
= bus_dmamap_load_mbuf_segment(ret
->bnx_rx_mtag
,
818 ret
->bnx_rx_tmpmap
, m_new
, &seg
, 1, &nsegs
, BUS_DMA_NOWAIT
);
825 bus_dmamap_sync(ret
->bnx_rx_mtag
, rb
->bnx_rx_dmamap
,
826 BUS_DMASYNC_POSTREAD
);
827 bus_dmamap_unload(ret
->bnx_rx_mtag
, rb
->bnx_rx_dmamap
);
830 map
= ret
->bnx_rx_tmpmap
;
831 ret
->bnx_rx_tmpmap
= rb
->bnx_rx_dmamap
;
833 rb
->bnx_rx_dmamap
= map
;
834 rb
->bnx_rx_mbuf
= m_new
;
835 rb
->bnx_rx_paddr
= seg
.ds_addr
;
836 rb
->bnx_rx_len
= m_new
->m_len
;
839 rb
->bnx_rx_refilled
= 1;
844 bnx_setup_rxdesc_std(struct bnx_rx_std_ring
*std
, int i
)
846 struct bnx_rx_buf
*rb
;
851 rb
= &std
->bnx_rx_std_buf
[i
];
852 KASSERT(rb
->bnx_rx_refilled
, ("RX buf %dth is not refilled", i
));
854 paddr
= rb
->bnx_rx_paddr
;
855 len
= rb
->bnx_rx_len
;
859 rb
->bnx_rx_refilled
= 0;
861 r
= &std
->bnx_rx_std_ring
[i
];
862 r
->bge_addr
.bge_addr_lo
= BGE_ADDR_LO(paddr
);
863 r
->bge_addr
.bge_addr_hi
= BGE_ADDR_HI(paddr
);
866 r
->bge_flags
= BGE_RXBDFLAG_END
;
870 * Initialize a jumbo receive ring descriptor. This allocates
871 * a jumbo buffer from the pool managed internally by the driver.
874 bnx_newbuf_jumbo(struct bnx_softc
*sc
, int i
, int init
)
876 struct mbuf
*m_new
= NULL
;
877 struct bnx_jslot
*buf
;
880 /* Allocate the mbuf. */
881 MGETHDR(m_new
, init
? M_WAITOK
: M_NOWAIT
, MT_DATA
);
885 /* Allocate the jumbo buffer */
886 buf
= bnx_jalloc(sc
);
892 /* Attach the buffer to the mbuf. */
893 m_new
->m_ext
.ext_arg
= buf
;
894 m_new
->m_ext
.ext_buf
= buf
->bnx_buf
;
895 m_new
->m_ext
.ext_free
= bnx_jfree
;
896 m_new
->m_ext
.ext_ref
= bnx_jref
;
897 m_new
->m_ext
.ext_size
= BNX_JUMBO_FRAMELEN
;
899 m_new
->m_flags
|= M_EXT
;
901 m_new
->m_data
= m_new
->m_ext
.ext_buf
;
902 m_new
->m_len
= m_new
->m_pkthdr
.len
= m_new
->m_ext
.ext_size
;
904 paddr
= buf
->bnx_paddr
;
905 m_adj(m_new
, ETHER_ALIGN
);
906 paddr
+= ETHER_ALIGN
;
908 /* Save necessary information */
909 sc
->bnx_cdata
.bnx_rx_jumbo_chain
[i
].bnx_rx_mbuf
= m_new
;
910 sc
->bnx_cdata
.bnx_rx_jumbo_chain
[i
].bnx_rx_paddr
= paddr
;
912 /* Set up the descriptor. */
913 bnx_setup_rxdesc_jumbo(sc
, i
);
918 bnx_setup_rxdesc_jumbo(struct bnx_softc
*sc
, int i
)
921 struct bnx_rx_buf
*rc
;
923 r
= &sc
->bnx_ldata
.bnx_rx_jumbo_ring
[i
];
924 rc
= &sc
->bnx_cdata
.bnx_rx_jumbo_chain
[i
];
926 r
->bge_addr
.bge_addr_lo
= BGE_ADDR_LO(rc
->bnx_rx_paddr
);
927 r
->bge_addr
.bge_addr_hi
= BGE_ADDR_HI(rc
->bnx_rx_paddr
);
928 r
->bge_len
= rc
->bnx_rx_mbuf
->m_len
;
930 r
->bge_flags
= BGE_RXBDFLAG_END
|BGE_RXBDFLAG_JUMBO_RING
;
934 bnx_init_rx_ring_std(struct bnx_rx_std_ring
*std
)
938 for (i
= 0; i
< BGE_STD_RX_RING_CNT
; i
++) {
939 /* Use the first RX return ring's tmp RX mbuf DMA map */
940 error
= bnx_newbuf_std(&std
->bnx_sc
->bnx_rx_ret_ring
[0], i
, 1);
943 bnx_setup_rxdesc_std(std
, i
);
946 std
->bnx_rx_std_used
= 0;
947 std
->bnx_rx_std_refill
= 0;
948 std
->bnx_rx_std_running
= 0;
950 lwkt_serialize_handler_enable(&std
->bnx_rx_std_serialize
);
952 std
->bnx_rx_std
= BGE_STD_RX_RING_CNT
- 1;
953 bnx_writembx(std
->bnx_sc
, BGE_MBX_RX_STD_PROD_LO
, std
->bnx_rx_std
);
959 bnx_free_rx_ring_std(struct bnx_rx_std_ring
*std
)
963 lwkt_serialize_handler_disable(&std
->bnx_rx_std_serialize
);
965 for (i
= 0; i
< BGE_STD_RX_RING_CNT
; i
++) {
966 struct bnx_rx_buf
*rb
= &std
->bnx_rx_std_buf
[i
];
968 rb
->bnx_rx_refilled
= 0;
969 if (rb
->bnx_rx_mbuf
!= NULL
) {
970 bus_dmamap_unload(std
->bnx_rx_mtag
, rb
->bnx_rx_dmamap
);
971 m_freem(rb
->bnx_rx_mbuf
);
972 rb
->bnx_rx_mbuf
= NULL
;
974 bzero(&std
->bnx_rx_std_ring
[i
], sizeof(struct bge_rx_bd
));
979 bnx_init_rx_ring_jumbo(struct bnx_softc
*sc
)
984 for (i
= 0; i
< BGE_JUMBO_RX_RING_CNT
; i
++) {
985 error
= bnx_newbuf_jumbo(sc
, i
, 1);
990 sc
->bnx_jumbo
= BGE_JUMBO_RX_RING_CNT
- 1;
992 rcb
= &sc
->bnx_ldata
.bnx_info
.bnx_jumbo_rx_rcb
;
993 rcb
->bge_maxlen_flags
= BGE_RCB_MAXLEN_FLAGS(0, 0);
994 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS
, rcb
->bge_maxlen_flags
);
996 bnx_writembx(sc
, BGE_MBX_RX_JUMBO_PROD_LO
, sc
->bnx_jumbo
);
1002 bnx_free_rx_ring_jumbo(struct bnx_softc
*sc
)
1006 for (i
= 0; i
< BGE_JUMBO_RX_RING_CNT
; i
++) {
1007 struct bnx_rx_buf
*rc
= &sc
->bnx_cdata
.bnx_rx_jumbo_chain
[i
];
1009 if (rc
->bnx_rx_mbuf
!= NULL
) {
1010 m_freem(rc
->bnx_rx_mbuf
);
1011 rc
->bnx_rx_mbuf
= NULL
;
1013 bzero(&sc
->bnx_ldata
.bnx_rx_jumbo_ring
[i
],
1014 sizeof(struct bge_rx_bd
));
1019 bnx_free_tx_ring(struct bnx_tx_ring
*txr
)
1023 for (i
= 0; i
< BGE_TX_RING_CNT
; i
++) {
1024 struct bnx_tx_buf
*buf
= &txr
->bnx_tx_buf
[i
];
1026 if (buf
->bnx_tx_mbuf
!= NULL
) {
1027 bus_dmamap_unload(txr
->bnx_tx_mtag
,
1028 buf
->bnx_tx_dmamap
);
1029 m_freem(buf
->bnx_tx_mbuf
);
1030 buf
->bnx_tx_mbuf
= NULL
;
1032 bzero(&txr
->bnx_tx_ring
[i
], sizeof(struct bge_tx_bd
));
1034 txr
->bnx_tx_saved_considx
= BNX_TXCONS_UNSET
;
1038 bnx_init_tx_ring(struct bnx_tx_ring
*txr
)
1040 txr
->bnx_tx_cnt
= 0;
1041 txr
->bnx_tx_saved_considx
= 0;
1042 txr
->bnx_tx_prodidx
= 0;
1044 /* Initialize transmit producer index for host-memory send ring. */
1045 bnx_writembx(txr
->bnx_sc
, txr
->bnx_tx_mbx
, txr
->bnx_tx_prodidx
);
1051 bnx_setmulti(struct bnx_softc
*sc
)
1054 struct ifmultiaddr
*ifma
;
1055 uint32_t hashes
[4] = { 0, 0, 0, 0 };
1058 ifp
= &sc
->arpcom
.ac_if
;
1060 if (ifp
->if_flags
& IFF_ALLMULTI
|| ifp
->if_flags
& IFF_PROMISC
) {
1061 for (i
= 0; i
< 4; i
++)
1062 CSR_WRITE_4(sc
, BGE_MAR0
+ (i
* 4), 0xFFFFFFFF);
1066 /* First, zot all the existing filters. */
1067 for (i
= 0; i
< 4; i
++)
1068 CSR_WRITE_4(sc
, BGE_MAR0
+ (i
* 4), 0);
1070 /* Now program new ones. */
1071 TAILQ_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
1072 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
1075 LLADDR((struct sockaddr_dl
*)ifma
->ifma_addr
),
1076 ETHER_ADDR_LEN
) & 0x7f;
1077 hashes
[(h
& 0x60) >> 5] |= 1 << (h
& 0x1F);
1080 for (i
= 0; i
< 4; i
++)
1081 CSR_WRITE_4(sc
, BGE_MAR0
+ (i
* 4), hashes
[i
]);
1085 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1086 * self-test results.
1089 bnx_chipinit(struct bnx_softc
*sc
)
1091 uint32_t dma_rw_ctl
, mode_ctl
;
1094 /* Set endian type before we access any non-PCI registers. */
1095 pci_write_config(sc
->bnx_dev
, BGE_PCI_MISC_CTL
,
1096 BGE_INIT
| BGE_PCIMISCCTL_TAGGED_STATUS
, 4);
1099 * Clear the MAC statistics block in the NIC's
1102 for (i
= BGE_STATS_BLOCK
;
1103 i
< BGE_STATS_BLOCK_END
+ 1; i
+= sizeof(uint32_t))
1104 BNX_MEMWIN_WRITE(sc
, i
, 0);
1106 for (i
= BGE_STATUS_BLOCK
;
1107 i
< BGE_STATUS_BLOCK_END
+ 1; i
+= sizeof(uint32_t))
1108 BNX_MEMWIN_WRITE(sc
, i
, 0);
1110 if (BNX_IS_57765_FAMILY(sc
)) {
1113 if (sc
->bnx_chipid
== BGE_CHIPID_BCM57765_A0
) {
1114 mode_ctl
= CSR_READ_4(sc
, BGE_MODE_CTL
);
1115 val
= mode_ctl
& ~BGE_MODECTL_PCIE_PORTS
;
1117 /* Access the lower 1K of PL PCI-E block registers. */
1118 CSR_WRITE_4(sc
, BGE_MODE_CTL
,
1119 val
| BGE_MODECTL_PCIE_PL_SEL
);
1121 val
= CSR_READ_4(sc
, BGE_PCIE_PL_LO_PHYCTL5
);
1122 val
|= BGE_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
;
1123 CSR_WRITE_4(sc
, BGE_PCIE_PL_LO_PHYCTL5
, val
);
1125 CSR_WRITE_4(sc
, BGE_MODE_CTL
, mode_ctl
);
1127 if (sc
->bnx_chiprev
!= BGE_CHIPREV_57765_AX
) {
1128 /* Fix transmit hangs */
1129 val
= CSR_READ_4(sc
, BGE_CPMU_PADRNG_CTL
);
1130 val
|= BGE_CPMU_PADRNG_CTL_RDIV2
;
1131 CSR_WRITE_4(sc
, BGE_CPMU_PADRNG_CTL
, val
);
1133 mode_ctl
= CSR_READ_4(sc
, BGE_MODE_CTL
);
1134 val
= mode_ctl
& ~BGE_MODECTL_PCIE_PORTS
;
1136 /* Access the lower 1K of DL PCI-E block registers. */
1137 CSR_WRITE_4(sc
, BGE_MODE_CTL
,
1138 val
| BGE_MODECTL_PCIE_DL_SEL
);
1140 val
= CSR_READ_4(sc
, BGE_PCIE_DL_LO_FTSMAX
);
1141 val
&= ~BGE_PCIE_DL_LO_FTSMAX_MASK
;
1142 val
|= BGE_PCIE_DL_LO_FTSMAX_VAL
;
1143 CSR_WRITE_4(sc
, BGE_PCIE_DL_LO_FTSMAX
, val
);
1145 CSR_WRITE_4(sc
, BGE_MODE_CTL
, mode_ctl
);
1148 val
= CSR_READ_4(sc
, BGE_CPMU_LSPD_10MB_CLK
);
1149 val
&= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK
;
1150 val
|= BGE_CPMU_LSPD_10MB_MACCLK_6_25
;
1151 CSR_WRITE_4(sc
, BGE_CPMU_LSPD_10MB_CLK
, val
);
1155 * Set up the PCI DMA control register.
1157 dma_rw_ctl
= pci_read_config(sc
->bnx_dev
, BGE_PCI_DMA_RW_CTL
, 4);
1159 * Disable 32bytes cache alignment for DMA write to host memory
1162 * 64bytes cache alignment for DMA write to host memory is still
1165 dma_rw_ctl
|= BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT
;
1166 if (sc
->bnx_chipid
== BGE_CHIPID_BCM57765_A0
)
1167 dma_rw_ctl
&= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK
;
1169 * Enable HW workaround for controllers that misinterpret
1170 * a status tag update and leave interrupts permanently
1173 if (sc
->bnx_asicrev
!= BGE_ASICREV_BCM5717
&&
1174 sc
->bnx_asicrev
!= BGE_ASICREV_BCM5762
&&
1175 !BNX_IS_57765_FAMILY(sc
))
1176 dma_rw_ctl
|= BGE_PCIDMARWCTL_TAGGED_STATUS_WA
;
1178 if_printf(&sc
->arpcom
.ac_if
, "DMA read/write %#x\n",
1181 pci_write_config(sc
->bnx_dev
, BGE_PCI_DMA_RW_CTL
, dma_rw_ctl
, 4);
1184 * Set up general mode register.
1186 mode_ctl
= bnx_dma_swap_options(sc
);
1187 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5720
||
1188 sc
->bnx_asicrev
== BGE_ASICREV_BCM5762
) {
1189 /* Retain Host-2-BMC settings written by APE firmware. */
1190 mode_ctl
|= CSR_READ_4(sc
, BGE_MODE_CTL
) &
1191 (BGE_MODECTL_BYTESWAP_B2HRX_DATA
|
1192 BGE_MODECTL_WORDSWAP_B2HRX_DATA
|
1193 BGE_MODECTL_B2HRX_ENABLE
| BGE_MODECTL_HTX2B_ENABLE
);
1195 mode_ctl
|= BGE_MODECTL_MAC_ATTN_INTR
|
1196 BGE_MODECTL_HOST_SEND_BDS
| BGE_MODECTL_TX_NO_PHDR_CSUM
;
1197 CSR_WRITE_4(sc
, BGE_MODE_CTL
, mode_ctl
);
1200 * Disable memory write invalidate. Apparently it is not supported
1201 * properly by these devices. Also ensure that INTx isn't disabled,
1202 * as these chips need it even when using MSI.
1204 PCI_CLRBIT(sc
->bnx_dev
, BGE_PCI_CMD
,
1205 (PCIM_CMD_MWRICEN
| PCIM_CMD_INTxDIS
), 4);
1207 /* Set the timer prescaler (always 66Mhz) */
1208 CSR_WRITE_4(sc
, BGE_MISC_CFG
, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1214 bnx_blockinit(struct bnx_softc
*sc
)
1216 struct bnx_intr_data
*intr
;
1217 struct bge_rcb
*rcb
;
1224 * Initialize the memory window pointer register so that
1225 * we can access the first 32K of internal NIC RAM. This will
1226 * allow us to set up the TX send ring RCBs and the RX return
1227 * ring RCBs, plus other things which live in NIC memory.
1229 CSR_WRITE_4(sc
, BGE_PCI_MEMWIN_BASEADDR
, 0);
1231 /* Configure mbuf pool watermarks */
1232 if (BNX_IS_57765_PLUS(sc
)) {
1233 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_READDMA_LOWAT
, 0x0);
1234 if (sc
->arpcom
.ac_if
.if_mtu
> ETHERMTU
) {
1235 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_MACRX_LOWAT
, 0x7e);
1236 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_HIWAT
, 0xea);
1238 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_MACRX_LOWAT
, 0x2a);
1239 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_HIWAT
, 0xa0);
1242 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_READDMA_LOWAT
, 0x0);
1243 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_MACRX_LOWAT
, 0x10);
1244 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_HIWAT
, 0x60);
1247 /* Configure DMA resource watermarks */
1248 CSR_WRITE_4(sc
, BGE_BMAN_DMA_DESCPOOL_LOWAT
, 5);
1249 CSR_WRITE_4(sc
, BGE_BMAN_DMA_DESCPOOL_HIWAT
, 10);
1251 /* Enable buffer manager */
1252 val
= BGE_BMANMODE_ENABLE
| BGE_BMANMODE_LOMBUF_ATTN
;
1254 * Change the arbitration algorithm of TXMBUF read request to
1255 * round-robin instead of priority based for BCM5719. When
1256 * TXFIFO is almost empty, RDMA will hold its request until
1257 * TXFIFO is not almost empty.
1259 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5719
)
1260 val
|= BGE_BMANMODE_NO_TX_UNDERRUN
;
1261 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5717
||
1262 sc
->bnx_chipid
== BGE_CHIPID_BCM5719_A0
||
1263 sc
->bnx_chipid
== BGE_CHIPID_BCM5720_A0
)
1264 val
|= BGE_BMANMODE_LOMBUF_ATTN
;
1265 CSR_WRITE_4(sc
, BGE_BMAN_MODE
, val
);
1267 /* Poll for buffer manager start indication */
1268 for (i
= 0; i
< BNX_TIMEOUT
; i
++) {
1269 if (CSR_READ_4(sc
, BGE_BMAN_MODE
) & BGE_BMANMODE_ENABLE
)
1274 if (i
== BNX_TIMEOUT
) {
1275 if_printf(&sc
->arpcom
.ac_if
,
1276 "buffer manager failed to start\n");
1280 /* Enable flow-through queues */
1281 CSR_WRITE_4(sc
, BGE_FTQ_RESET
, 0xFFFFFFFF);
1282 CSR_WRITE_4(sc
, BGE_FTQ_RESET
, 0);
1284 /* Wait until queue initialization is complete */
1285 for (i
= 0; i
< BNX_TIMEOUT
; i
++) {
1286 if (CSR_READ_4(sc
, BGE_FTQ_RESET
) == 0)
1291 if (i
== BNX_TIMEOUT
) {
1292 if_printf(&sc
->arpcom
.ac_if
,
1293 "flow-through queue init failed\n");
1298 * Summary of rings supported by the controller:
1300 * Standard Receive Producer Ring
1301 * - This ring is used to feed receive buffers for "standard"
1302 * sized frames (typically 1536 bytes) to the controller.
1304 * Jumbo Receive Producer Ring
1305 * - This ring is used to feed receive buffers for jumbo sized
1306 * frames (i.e. anything bigger than the "standard" frames)
1307 * to the controller.
1309 * Mini Receive Producer Ring
1310 * - This ring is used to feed receive buffers for "mini"
1311 * sized frames to the controller.
1312 * - This feature required external memory for the controller
1313 * but was never used in a production system. Should always
1316 * Receive Return Ring
1317 * - After the controller has placed an incoming frame into a
1318 * receive buffer that buffer is moved into a receive return
1319 * ring. The driver is then responsible to passing the
1320 * buffer up to the stack. BCM5718/BCM57785 families support
1321 * multiple receive return rings.
1324 * - This ring is used for outgoing frames. BCM5719/BCM5720
1325 * support multiple send rings.
1328 /* Initialize the standard receive producer ring control block. */
1329 rcb
= &sc
->bnx_ldata
.bnx_info
.bnx_std_rx_rcb
;
1330 rcb
->bge_hostaddr
.bge_addr_lo
=
1331 BGE_ADDR_LO(sc
->bnx_rx_std_ring
.bnx_rx_std_ring_paddr
);
1332 rcb
->bge_hostaddr
.bge_addr_hi
=
1333 BGE_ADDR_HI(sc
->bnx_rx_std_ring
.bnx_rx_std_ring_paddr
);
1334 if (BNX_IS_57765_PLUS(sc
)) {
1336 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1337 * Bits 15-2 : Maximum RX frame size
1338 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1341 rcb
->bge_maxlen_flags
=
1342 BGE_RCB_MAXLEN_FLAGS(512, BNX_MAX_FRAMELEN
<< 2);
1345 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1346 * Bits 15-2 : Reserved (should be 0)
1347 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1350 rcb
->bge_maxlen_flags
= BGE_RCB_MAXLEN_FLAGS(512, 0);
1352 if (BNX_IS_5717_PLUS(sc
))
1353 rcb
->bge_nicaddr
= BGE_STD_RX_RINGS_5717
;
1355 rcb
->bge_nicaddr
= BGE_STD_RX_RINGS
;
1356 /* Write the standard receive producer ring control block. */
1357 CSR_WRITE_4(sc
, BGE_RX_STD_RCB_HADDR_HI
, rcb
->bge_hostaddr
.bge_addr_hi
);
1358 CSR_WRITE_4(sc
, BGE_RX_STD_RCB_HADDR_LO
, rcb
->bge_hostaddr
.bge_addr_lo
);
1359 CSR_WRITE_4(sc
, BGE_RX_STD_RCB_MAXLEN_FLAGS
, rcb
->bge_maxlen_flags
);
1360 if (!BNX_IS_5717_PLUS(sc
))
1361 CSR_WRITE_4(sc
, BGE_RX_STD_RCB_NICADDR
, rcb
->bge_nicaddr
);
1362 /* Reset the standard receive producer ring producer index. */
1363 bnx_writembx(sc
, BGE_MBX_RX_STD_PROD_LO
, 0);
1366 * Initialize the jumbo RX producer ring control
1367 * block. We set the 'ring disabled' bit in the
1368 * flags field until we're actually ready to start
1369 * using this ring (i.e. once we set the MTU
1370 * high enough to require it).
1372 if (BNX_IS_JUMBO_CAPABLE(sc
)) {
1373 rcb
= &sc
->bnx_ldata
.bnx_info
.bnx_jumbo_rx_rcb
;
1374 /* Get the jumbo receive producer ring RCB parameters. */
1375 rcb
->bge_hostaddr
.bge_addr_lo
=
1376 BGE_ADDR_LO(sc
->bnx_ldata
.bnx_rx_jumbo_ring_paddr
);
1377 rcb
->bge_hostaddr
.bge_addr_hi
=
1378 BGE_ADDR_HI(sc
->bnx_ldata
.bnx_rx_jumbo_ring_paddr
);
1379 rcb
->bge_maxlen_flags
=
1380 BGE_RCB_MAXLEN_FLAGS(BNX_MAX_FRAMELEN
,
1381 BGE_RCB_FLAG_RING_DISABLED
);
1382 if (BNX_IS_5717_PLUS(sc
))
1383 rcb
->bge_nicaddr
= BGE_JUMBO_RX_RINGS_5717
;
1385 rcb
->bge_nicaddr
= BGE_JUMBO_RX_RINGS
;
1386 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_HADDR_HI
,
1387 rcb
->bge_hostaddr
.bge_addr_hi
);
1388 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_HADDR_LO
,
1389 rcb
->bge_hostaddr
.bge_addr_lo
);
1390 /* Program the jumbo receive producer ring RCB parameters. */
1391 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS
,
1392 rcb
->bge_maxlen_flags
);
1393 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_NICADDR
, rcb
->bge_nicaddr
);
1394 /* Reset the jumbo receive producer ring producer index. */
1395 bnx_writembx(sc
, BGE_MBX_RX_JUMBO_PROD_LO
, 0);
1399 * The BD ring replenish thresholds control how often the
1400 * hardware fetches new BD's from the producer rings in host
1401 * memory. Setting the value too low on a busy system can
1402 * starve the hardware and recue the throughpout.
1404 * Set the BD ring replentish thresholds. The recommended
1405 * values are 1/8th the number of descriptors allocated to
1409 CSR_WRITE_4(sc
, BGE_RBDI_STD_REPL_THRESH
, val
);
1410 if (BNX_IS_JUMBO_CAPABLE(sc
)) {
1411 CSR_WRITE_4(sc
, BGE_RBDI_JUMBO_REPL_THRESH
,
1412 BGE_JUMBO_RX_RING_CNT
/8);
1414 if (BNX_IS_57765_PLUS(sc
)) {
1415 CSR_WRITE_4(sc
, BGE_STD_REPLENISH_LWM
, 32);
1416 CSR_WRITE_4(sc
, BGE_JMB_REPLENISH_LWM
, 16);
1420 * Disable all send rings by setting the 'ring disabled' bit
1421 * in the flags field of all the TX send ring control blocks,
1422 * located in NIC memory.
1424 if (BNX_IS_5717_PLUS(sc
))
1426 else if (BNX_IS_57765_FAMILY(sc
) ||
1427 sc
->bnx_asicrev
== BGE_ASICREV_BCM5762
)
1431 vrcb
= BGE_MEMWIN_START
+ BGE_SEND_RING_RCB
;
1432 for (i
= 0; i
< limit
; i
++) {
1433 RCB_WRITE_4(sc
, vrcb
, bge_maxlen_flags
,
1434 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED
));
1435 vrcb
+= sizeof(struct bge_rcb
);
1439 * Configure send ring RCBs
1441 vrcb
= BGE_MEMWIN_START
+ BGE_SEND_RING_RCB
;
1442 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
1443 struct bnx_tx_ring
*txr
= &sc
->bnx_tx_ring
[i
];
1445 BGE_HOSTADDR(taddr
, txr
->bnx_tx_ring_paddr
);
1446 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_hi
,
1448 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_lo
,
1450 RCB_WRITE_4(sc
, vrcb
, bge_maxlen_flags
,
1451 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT
, 0));
1452 vrcb
+= sizeof(struct bge_rcb
);
1456 * Disable all receive return rings by setting the
1457 * 'ring disabled' bit in the flags field of all the receive
1458 * return ring control blocks, located in NIC memory.
1460 if (BNX_IS_5717_PLUS(sc
)) {
1461 /* Should be 17, use 16 until we get an SRAM map. */
1463 } else if (BNX_IS_57765_FAMILY(sc
) ||
1464 sc
->bnx_asicrev
== BGE_ASICREV_BCM5762
) {
1469 /* Disable all receive return rings. */
1470 vrcb
= BGE_MEMWIN_START
+ BGE_RX_RETURN_RING_RCB
;
1471 for (i
= 0; i
< limit
; i
++) {
1472 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_hi
, 0);
1473 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_lo
, 0);
1474 RCB_WRITE_4(sc
, vrcb
, bge_maxlen_flags
,
1475 BGE_RCB_FLAG_RING_DISABLED
);
1476 bnx_writembx(sc
, BGE_MBX_RX_CONS0_LO
+
1477 (i
* (sizeof(uint64_t))), 0);
1478 vrcb
+= sizeof(struct bge_rcb
);
1482 * Set up receive return rings.
1484 vrcb
= BGE_MEMWIN_START
+ BGE_RX_RETURN_RING_RCB
;
1485 for (i
= 0; i
< sc
->bnx_rx_retcnt
; ++i
) {
1486 struct bnx_rx_ret_ring
*ret
= &sc
->bnx_rx_ret_ring
[i
];
1488 BGE_HOSTADDR(taddr
, ret
->bnx_rx_ret_ring_paddr
);
1489 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_hi
,
1491 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_lo
,
1493 RCB_WRITE_4(sc
, vrcb
, bge_maxlen_flags
,
1494 BGE_RCB_MAXLEN_FLAGS(BNX_RETURN_RING_CNT
, 0));
1495 vrcb
+= sizeof(struct bge_rcb
);
1498 /* Set random backoff seed for TX */
1499 CSR_WRITE_4(sc
, BGE_TX_RANDOM_BACKOFF
,
1500 (sc
->arpcom
.ac_enaddr
[0] + sc
->arpcom
.ac_enaddr
[1] +
1501 sc
->arpcom
.ac_enaddr
[2] + sc
->arpcom
.ac_enaddr
[3] +
1502 sc
->arpcom
.ac_enaddr
[4] + sc
->arpcom
.ac_enaddr
[5]) &
1503 BGE_TX_BACKOFF_SEED_MASK
);
1505 /* Set inter-packet gap */
1507 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5720
||
1508 sc
->bnx_asicrev
== BGE_ASICREV_BCM5762
) {
1509 val
|= CSR_READ_4(sc
, BGE_TX_LENGTHS
) &
1510 (BGE_TXLEN_JMB_FRM_LEN_MSK
| BGE_TXLEN_CNT_DN_VAL_MSK
);
1512 CSR_WRITE_4(sc
, BGE_TX_LENGTHS
, val
);
1515 * Specify which ring to use for packets that don't match
1518 CSR_WRITE_4(sc
, BGE_RX_RULES_CFG
, 0x08);
1521 * Configure number of RX lists. One interrupt distribution
1522 * list, sixteen active lists, one bad frames class.
1524 CSR_WRITE_4(sc
, BGE_RXLP_CFG
, 0x181);
1526 /* Inialize RX list placement stats mask. */
1527 CSR_WRITE_4(sc
, BGE_RXLP_STATS_ENABLE_MASK
, 0x007FFFFF);
1528 CSR_WRITE_4(sc
, BGE_RXLP_STATS_CTL
, 0x1);
1530 /* Disable host coalescing until we get it set up */
1531 CSR_WRITE_4(sc
, BGE_HCC_MODE
, 0x00000000);
1533 /* Poll to make sure it's shut down. */
1534 for (i
= 0; i
< BNX_TIMEOUT
; i
++) {
1535 if (!(CSR_READ_4(sc
, BGE_HCC_MODE
) & BGE_HCCMODE_ENABLE
))
1540 if (i
== BNX_TIMEOUT
) {
1541 if_printf(&sc
->arpcom
.ac_if
,
1542 "host coalescing engine failed to idle\n");
1546 /* Set up host coalescing defaults */
1547 sc
->bnx_coal_chg
= BNX_RX_COAL_TICKS_CHG
|
1548 BNX_TX_COAL_TICKS_CHG
|
1549 BNX_RX_COAL_BDS_CHG
|
1550 BNX_TX_COAL_BDS_CHG
|
1551 BNX_RX_COAL_BDS_INT_CHG
|
1552 BNX_TX_COAL_BDS_INT_CHG
;
1553 bnx_coal_change(sc
);
1556 * Set up addresses of status blocks
1558 intr
= &sc
->bnx_intr_data
[0];
1559 bzero(intr
->bnx_status_block
, BGE_STATUS_BLK_SZ
);
1560 CSR_WRITE_4(sc
, BGE_HCC_STATUSBLK_ADDR_HI
,
1561 BGE_ADDR_HI(intr
->bnx_status_block_paddr
));
1562 CSR_WRITE_4(sc
, BGE_HCC_STATUSBLK_ADDR_LO
,
1563 BGE_ADDR_LO(intr
->bnx_status_block_paddr
));
1564 for (i
= 1; i
< sc
->bnx_intr_cnt
; ++i
) {
1565 intr
= &sc
->bnx_intr_data
[i
];
1566 bzero(intr
->bnx_status_block
, BGE_STATUS_BLK_SZ
);
1567 CSR_WRITE_4(sc
, BGE_VEC1_STATUSBLK_ADDR_HI
+ ((i
- 1) * 8),
1568 BGE_ADDR_HI(intr
->bnx_status_block_paddr
));
1569 CSR_WRITE_4(sc
, BGE_VEC1_STATUSBLK_ADDR_LO
+ ((i
- 1) * 8),
1570 BGE_ADDR_LO(intr
->bnx_status_block_paddr
));
1573 /* Set up status block partail update size. */
1574 val
= BGE_STATBLKSZ_32BYTE
;
1577 * Does not seem to have visible effect in both
1578 * bulk data (1472B UDP datagram) and tiny data
1579 * (18B UDP datagram) TX tests.
1581 val
|= BGE_HCCMODE_CLRTICK_TX
;
1583 /* Turn on host coalescing state machine */
1584 CSR_WRITE_4(sc
, BGE_HCC_MODE
, val
| BGE_HCCMODE_ENABLE
);
1586 /* Turn on RX BD completion state machine and enable attentions */
1587 CSR_WRITE_4(sc
, BGE_RBDC_MODE
,
1588 BGE_RBDCMODE_ENABLE
|BGE_RBDCMODE_ATTN
);
1590 /* Turn on RX list placement state machine */
1591 CSR_WRITE_4(sc
, BGE_RXLP_MODE
, BGE_RXLPMODE_ENABLE
);
1593 val
= BGE_MACMODE_TXDMA_ENB
| BGE_MACMODE_RXDMA_ENB
|
1594 BGE_MACMODE_RX_STATS_CLEAR
| BGE_MACMODE_TX_STATS_CLEAR
|
1595 BGE_MACMODE_RX_STATS_ENB
| BGE_MACMODE_TX_STATS_ENB
|
1596 BGE_MACMODE_FRMHDR_DMA_ENB
;
1598 if (sc
->bnx_flags
& BNX_FLAG_TBI
)
1599 val
|= BGE_PORTMODE_TBI
;
1600 else if (sc
->bnx_flags
& BNX_FLAG_MII_SERDES
)
1601 val
|= BGE_PORTMODE_GMII
;
1603 val
|= BGE_PORTMODE_MII
;
1605 /* Allow APE to send/receive frames. */
1606 if (sc
->bnx_mfw_flags
& BNX_MFW_ON_APE
)
1607 val
|= BGE_MACMODE_APE_RX_EN
| BGE_MACMODE_APE_TX_EN
;
1609 /* Turn on DMA, clear stats */
1610 CSR_WRITE_4(sc
, BGE_MAC_MODE
, val
);
1613 /* Set misc. local control, enable interrupts on attentions */
1614 BNX_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_INTR_ONATTN
);
1617 /* Assert GPIO pins for PHY reset */
1618 BNX_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_MISCIO_OUT0
|
1619 BGE_MLC_MISCIO_OUT1
|BGE_MLC_MISCIO_OUT2
);
1620 BNX_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_MISCIO_OUTEN0
|
1621 BGE_MLC_MISCIO_OUTEN1
|BGE_MLC_MISCIO_OUTEN2
);
1624 if (sc
->bnx_intr_type
== PCI_INTR_TYPE_MSIX
)
1625 bnx_enable_msi(sc
, TRUE
);
1627 /* Turn on write DMA state machine */
1628 val
= BGE_WDMAMODE_ENABLE
|BGE_WDMAMODE_ALL_ATTNS
;
1629 /* Enable host coalescing bug fix. */
1630 val
|= BGE_WDMAMODE_STATUS_TAG_FIX
;
1631 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5785
) {
1632 /* Request larger DMA burst size to get better performance. */
1633 val
|= BGE_WDMAMODE_BURST_ALL_DATA
;
1635 CSR_WRITE_4(sc
, BGE_WDMA_MODE
, val
);
1638 if (BNX_IS_57765_PLUS(sc
)) {
1639 uint32_t dmactl
, dmactl_reg
;
1641 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5762
)
1642 dmactl_reg
= BGE_RDMA_RSRVCTRL2
;
1644 dmactl_reg
= BGE_RDMA_RSRVCTRL
;
1646 dmactl
= CSR_READ_4(sc
, dmactl_reg
);
1648 * Adjust tx margin to prevent TX data corruption and
1649 * fix internal FIFO overflow.
1651 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5719
||
1652 sc
->bnx_asicrev
== BGE_ASICREV_BCM5720
||
1653 sc
->bnx_asicrev
== BGE_ASICREV_BCM5762
) {
1654 dmactl
&= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
1655 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK
|
1656 BGE_RDMA_RSRVCTRL_TXMRGN_MASK
);
1657 dmactl
|= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
1658 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K
|
1659 BGE_RDMA_RSRVCTRL_TXMRGN_320B
;
1662 * Enable fix for read DMA FIFO overruns.
1663 * The fix is to limit the number of RX BDs
1664 * the hardware would fetch at a fime.
1666 CSR_WRITE_4(sc
, dmactl_reg
,
1667 dmactl
| BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
1670 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5719
) {
1671 CSR_WRITE_4(sc
, BGE_RDMA_LSO_CRPTEN_CTRL
,
1672 CSR_READ_4(sc
, BGE_RDMA_LSO_CRPTEN_CTRL
) |
1673 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K
|
1674 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K
);
1675 } else if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5720
||
1676 sc
->bnx_asicrev
== BGE_ASICREV_BCM5762
) {
1679 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5762
)
1680 ctrl_reg
= BGE_RDMA_LSO_CRPTEN_CTRL2
;
1682 ctrl_reg
= BGE_RDMA_LSO_CRPTEN_CTRL
;
1685 * Allow 4KB burst length reads for non-LSO frames.
1686 * Enable 512B burst length reads for buffer descriptors.
1688 CSR_WRITE_4(sc
, ctrl_reg
,
1689 CSR_READ_4(sc
, ctrl_reg
) |
1690 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512
|
1691 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K
);
1694 /* Turn on read DMA state machine */
1695 val
= BGE_RDMAMODE_ENABLE
| BGE_RDMAMODE_ALL_ATTNS
;
1696 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5717
)
1697 val
|= BGE_RDMAMODE_MULT_DMA_RD_DIS
;
1698 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5784
||
1699 sc
->bnx_asicrev
== BGE_ASICREV_BCM5785
||
1700 sc
->bnx_asicrev
== BGE_ASICREV_BCM57780
) {
1701 val
|= BGE_RDMAMODE_BD_SBD_CRPT_ATTN
|
1702 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN
|
1703 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN
;
1705 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5720
||
1706 sc
->bnx_asicrev
== BGE_ASICREV_BCM5762
) {
1707 val
|= CSR_READ_4(sc
, BGE_RDMA_MODE
) &
1708 BGE_RDMAMODE_H2BNC_VLAN_DET
;
1710 * Allow multiple outstanding read requests from
1711 * non-LSO read DMA engine.
1713 val
&= ~BGE_RDMAMODE_MULT_DMA_RD_DIS
;
1715 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM57766
)
1716 val
|= BGE_RDMAMODE_JMB_2K_MMRR
;
1717 if (sc
->bnx_flags
& BNX_FLAG_TSO
)
1718 val
|= BGE_RDMAMODE_TSO4_ENABLE
;
1719 val
|= BGE_RDMAMODE_FIFO_LONG_BURST
;
1720 CSR_WRITE_4(sc
, BGE_RDMA_MODE
, val
);
1723 /* Turn on RX data completion state machine */
1724 CSR_WRITE_4(sc
, BGE_RDC_MODE
, BGE_RDCMODE_ENABLE
);
1726 /* Turn on RX BD initiator state machine */
1727 CSR_WRITE_4(sc
, BGE_RBDI_MODE
, BGE_RBDIMODE_ENABLE
);
1729 /* Turn on RX data and RX BD initiator state machine */
1730 CSR_WRITE_4(sc
, BGE_RDBDI_MODE
, BGE_RDBDIMODE_ENABLE
);
1732 /* Turn on send BD completion state machine */
1733 CSR_WRITE_4(sc
, BGE_SBDC_MODE
, BGE_SBDCMODE_ENABLE
);
1735 /* Turn on send data completion state machine */
1736 val
= BGE_SDCMODE_ENABLE
;
1737 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5761
)
1738 val
|= BGE_SDCMODE_CDELAY
;
1739 CSR_WRITE_4(sc
, BGE_SDC_MODE
, val
);
1741 /* Turn on send data initiator state machine */
1742 if (sc
->bnx_flags
& BNX_FLAG_TSO
) {
1743 CSR_WRITE_4(sc
, BGE_SDI_MODE
, BGE_SDIMODE_ENABLE
|
1744 BGE_SDIMODE_HW_LSO_PRE_DMA
);
1746 CSR_WRITE_4(sc
, BGE_SDI_MODE
, BGE_SDIMODE_ENABLE
);
1749 /* Turn on send BD initiator state machine */
1750 val
= BGE_SBDIMODE_ENABLE
;
1751 if (sc
->bnx_tx_ringcnt
> 1)
1752 val
|= BGE_SBDIMODE_MULTI_TXR
;
1753 CSR_WRITE_4(sc
, BGE_SBDI_MODE
, val
);
1755 /* Turn on send BD selector state machine */
1756 CSR_WRITE_4(sc
, BGE_SRS_MODE
, BGE_SRSMODE_ENABLE
);
1758 CSR_WRITE_4(sc
, BGE_SDI_STATS_ENABLE_MASK
, 0x007FFFFF);
1759 CSR_WRITE_4(sc
, BGE_SDI_STATS_CTL
,
1760 BGE_SDISTATSCTL_ENABLE
|BGE_SDISTATSCTL_FASTER
);
1762 /* ack/clear link change events */
1763 CSR_WRITE_4(sc
, BGE_MAC_STS
, BGE_MACSTAT_SYNC_CHANGED
|
1764 BGE_MACSTAT_CFG_CHANGED
|BGE_MACSTAT_MI_COMPLETE
|
1765 BGE_MACSTAT_LINK_CHANGED
);
1766 CSR_WRITE_4(sc
, BGE_MI_STS
, 0);
1769 * Enable attention when the link has changed state for
1770 * devices that use auto polling.
1772 if (sc
->bnx_flags
& BNX_FLAG_TBI
) {
1773 CSR_WRITE_4(sc
, BGE_MI_STS
, BGE_MISTS_LINK
);
1775 if (sc
->bnx_mi_mode
& BGE_MIMODE_AUTOPOLL
) {
1776 CSR_WRITE_4(sc
, BGE_MI_MODE
, sc
->bnx_mi_mode
);
1782 * Clear any pending link state attention.
1783 * Otherwise some link state change events may be lost until attention
1784 * is cleared by bnx_intr() -> bnx_softc.bnx_link_upd() sequence.
1785 * It's not necessary on newer BCM chips - perhaps enabling link
1786 * state change attentions implies clearing pending attention.
1788 CSR_WRITE_4(sc
, BGE_MAC_STS
, BGE_MACSTAT_SYNC_CHANGED
|
1789 BGE_MACSTAT_CFG_CHANGED
|BGE_MACSTAT_MI_COMPLETE
|
1790 BGE_MACSTAT_LINK_CHANGED
);
1792 /* Enable link state change attentions. */
1793 BNX_SETBIT(sc
, BGE_MAC_EVT_ENB
, BGE_EVTENB_LINK_CHANGED
);
1799 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1800 * against our list and return its name if we find a match. Note
1801 * that since the Broadcom controller contains VPD support, we
1802 * can get the device name string from the controller itself instead
1803 * of the compiled-in string. This is a little slow, but it guarantees
1804 * we'll always announce the right product name.
1807 bnx_probe(device_t dev
)
1809 const struct bnx_type
*t
;
1810 uint16_t product
, vendor
;
1812 if (!pci_is_pcie(dev
))
1815 product
= pci_get_device(dev
);
1816 vendor
= pci_get_vendor(dev
);
1818 for (t
= bnx_devs
; t
->bnx_name
!= NULL
; t
++) {
1819 if (vendor
== t
->bnx_vid
&& product
== t
->bnx_did
)
1822 if (t
->bnx_name
== NULL
)
1825 device_set_desc(dev
, t
->bnx_name
);
1830 bnx_attach(device_t dev
)
1833 struct bnx_softc
*sc
;
1834 struct bnx_rx_std_ring
*std
;
1835 struct sysctl_ctx_list
*ctx
;
1836 struct sysctl_oid_list
*tree
;
1838 int error
= 0, rid
, capmask
, i
, std_cpuid
, std_cpuid_def
;
1839 uint8_t ether_addr
[ETHER_ADDR_LEN
];
1841 uintptr_t mii_priv
= 0;
1842 #if defined(BNX_TSO_DEBUG) || defined(BNX_RSS_DEBUG) || defined(BNX_TSS_DEBUG)
1846 sc
= device_get_softc(dev
);
1848 callout_init_mp(&sc
->bnx_tick_timer
);
1849 lwkt_serialize_init(&sc
->bnx_jslot_serializer
);
1850 lwkt_serialize_init(&sc
->bnx_main_serialize
);
1852 /* Always setup interrupt mailboxes */
1853 for (i
= 0; i
< BNX_INTR_MAX
; ++i
) {
1854 callout_init_mp(&sc
->bnx_intr_data
[i
].bnx_intr_timer
);
1855 sc
->bnx_intr_data
[i
].bnx_sc
= sc
;
1856 sc
->bnx_intr_data
[i
].bnx_intr_mbx
= BGE_MBX_IRQ0_LO
+ (i
* 8);
1857 sc
->bnx_intr_data
[i
].bnx_intr_rid
= -1;
1858 sc
->bnx_intr_data
[i
].bnx_intr_cpuid
= -1;
1861 sc
->bnx_func_addr
= pci_get_function(dev
);
1862 product
= pci_get_device(dev
);
1864 #ifndef BURN_BRIDGES
1865 if (pci_get_powerstate(dev
) != PCI_POWERSTATE_D0
) {
1868 irq
= pci_read_config(dev
, PCIR_INTLINE
, 4);
1869 mem
= pci_read_config(dev
, BGE_PCI_BAR0
, 4);
1871 device_printf(dev
, "chip is in D%d power mode "
1872 "-- setting to D0\n", pci_get_powerstate(dev
));
1874 pci_set_powerstate(dev
, PCI_POWERSTATE_D0
);
1876 pci_write_config(dev
, PCIR_INTLINE
, irq
, 4);
1877 pci_write_config(dev
, BGE_PCI_BAR0
, mem
, 4);
1879 #endif /* !BURN_BRIDGE */
1882 * Map control/status registers.
1884 pci_enable_busmaster(dev
);
1887 sc
->bnx_res
= bus_alloc_resource_any(dev
, SYS_RES_MEMORY
, &rid
,
1890 if (sc
->bnx_res
== NULL
) {
1891 device_printf(dev
, "couldn't map memory\n");
1895 sc
->bnx_btag
= rman_get_bustag(sc
->bnx_res
);
1896 sc
->bnx_bhandle
= rman_get_bushandle(sc
->bnx_res
);
1898 /* Save various chip information */
1900 pci_read_config(dev
, BGE_PCI_MISC_CTL
, 4) >>
1901 BGE_PCIMISCCTL_ASICREV_SHIFT
;
1902 if (BGE_ASICREV(sc
->bnx_chipid
) == BGE_ASICREV_USE_PRODID_REG
) {
1903 /* All chips having dedicated ASICREV register have CPMU */
1904 sc
->bnx_flags
|= BNX_FLAG_CPMU
;
1907 case PCI_PRODUCT_BROADCOM_BCM5717
:
1908 case PCI_PRODUCT_BROADCOM_BCM5717C
:
1909 case PCI_PRODUCT_BROADCOM_BCM5718
:
1910 case PCI_PRODUCT_BROADCOM_BCM5719
:
1911 case PCI_PRODUCT_BROADCOM_BCM5720_ALT
:
1912 case PCI_PRODUCT_BROADCOM_BCM5725
:
1913 case PCI_PRODUCT_BROADCOM_BCM5727
:
1914 case PCI_PRODUCT_BROADCOM_BCM5762
:
1915 sc
->bnx_chipid
= pci_read_config(dev
,
1916 BGE_PCI_GEN2_PRODID_ASICREV
, 4);
1919 case PCI_PRODUCT_BROADCOM_BCM57761
:
1920 case PCI_PRODUCT_BROADCOM_BCM57762
:
1921 case PCI_PRODUCT_BROADCOM_BCM57765
:
1922 case PCI_PRODUCT_BROADCOM_BCM57766
:
1923 case PCI_PRODUCT_BROADCOM_BCM57781
:
1924 case PCI_PRODUCT_BROADCOM_BCM57782
:
1925 case PCI_PRODUCT_BROADCOM_BCM57785
:
1926 case PCI_PRODUCT_BROADCOM_BCM57786
:
1927 case PCI_PRODUCT_BROADCOM_BCM57791
:
1928 case PCI_PRODUCT_BROADCOM_BCM57795
:
1929 sc
->bnx_chipid
= pci_read_config(dev
,
1930 BGE_PCI_GEN15_PRODID_ASICREV
, 4);
1934 sc
->bnx_chipid
= pci_read_config(dev
,
1935 BGE_PCI_PRODID_ASICREV
, 4);
1939 if (sc
->bnx_chipid
== BGE_CHIPID_BCM5717_C0
)
1940 sc
->bnx_chipid
= BGE_CHIPID_BCM5720_A0
;
1942 sc
->bnx_asicrev
= BGE_ASICREV(sc
->bnx_chipid
);
1943 sc
->bnx_chiprev
= BGE_CHIPREV(sc
->bnx_chipid
);
1945 switch (sc
->bnx_asicrev
) {
1946 case BGE_ASICREV_BCM5717
:
1947 case BGE_ASICREV_BCM5719
:
1948 case BGE_ASICREV_BCM5720
:
1949 sc
->bnx_flags
|= BNX_FLAG_5717_PLUS
| BNX_FLAG_57765_PLUS
;
1952 case BGE_ASICREV_BCM5762
:
1953 sc
->bnx_flags
|= BNX_FLAG_57765_PLUS
;
1956 case BGE_ASICREV_BCM57765
:
1957 case BGE_ASICREV_BCM57766
:
1958 sc
->bnx_flags
|= BNX_FLAG_57765_FAMILY
| BNX_FLAG_57765_PLUS
;
1962 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5717
||
1963 sc
->bnx_asicrev
== BGE_ASICREV_BCM5719
||
1964 sc
->bnx_asicrev
== BGE_ASICREV_BCM5720
||
1965 sc
->bnx_asicrev
== BGE_ASICREV_BCM5762
)
1966 sc
->bnx_flags
|= BNX_FLAG_APE
;
1968 sc
->bnx_flags
|= BNX_FLAG_TSO
;
1969 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5719
&&
1970 sc
->bnx_chipid
== BGE_CHIPID_BCM5719_A0
)
1971 sc
->bnx_flags
&= ~BNX_FLAG_TSO
;
1973 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5717
||
1974 BNX_IS_57765_FAMILY(sc
)) {
1976 * All BCM57785 and BCM5718 families chips have a bug that
1977 * under certain situation interrupt will not be enabled
1978 * even if status tag is written to interrupt mailbox.
1980 * While BCM5719 and BCM5720 have a hardware workaround
1981 * which could fix the above bug.
1982 * See the comment near BGE_PCIDMARWCTL_TAGGED_STATUS_WA in
1985 * For the rest of the chips in these two families, we will
1986 * have to poll the status block at high rate (10ms currently)
1987 * to check whether the interrupt is hosed or not.
1988 * See bnx_check_intr_*() for details.
1990 sc
->bnx_flags
|= BNX_FLAG_STATUSTAG_BUG
;
1993 sc
->bnx_pciecap
= pci_get_pciecap_ptr(sc
->bnx_dev
);
1994 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5719
||
1995 sc
->bnx_asicrev
== BGE_ASICREV_BCM5720
)
1996 pcie_set_max_readrq(dev
, PCIEM_DEVCTL_MAX_READRQ_2048
);
1998 pcie_set_max_readrq(dev
, PCIEM_DEVCTL_MAX_READRQ_4096
);
1999 device_printf(dev
, "CHIP ID 0x%08x; "
2000 "ASIC REV 0x%02x; CHIP REV 0x%02x\n",
2001 sc
->bnx_chipid
, sc
->bnx_asicrev
, sc
->bnx_chiprev
);
2004 * Set various PHY quirk flags.
2007 capmask
= MII_CAPMASK_DEFAULT
;
2008 if (product
== PCI_PRODUCT_BROADCOM_BCM57791
||
2009 product
== PCI_PRODUCT_BROADCOM_BCM57795
) {
2011 capmask
&= ~BMSR_EXTSTAT
;
2014 mii_priv
|= BRGPHY_FLAG_WIRESPEED
;
2015 if (sc
->bnx_chipid
== BGE_CHIPID_BCM5762_A0
)
2016 mii_priv
|= BRGPHY_FLAG_5762_A0
;
2019 * Chips with APE need BAR2 access for APE registers/memory.
2021 if (sc
->bnx_flags
& BNX_FLAG_APE
) {
2025 sc
->bnx_res2
= bus_alloc_resource_any(dev
, SYS_RES_MEMORY
, &rid
,
2027 if (sc
->bnx_res2
== NULL
) {
2028 device_printf(dev
, "couldn't map BAR2 memory\n");
2033 /* Enable APE register/memory access by host driver. */
2034 pcistate
= pci_read_config(dev
, BGE_PCI_PCISTATE
, 4);
2035 pcistate
|= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR
|
2036 BGE_PCISTATE_ALLOW_APE_SHMEM_WR
|
2037 BGE_PCISTATE_ALLOW_APE_PSPACE_WR
;
2038 pci_write_config(dev
, BGE_PCI_PCISTATE
, pcistate
, 4);
2040 bnx_ape_lock_init(sc
);
2041 bnx_ape_read_fw_ver(sc
);
2044 /* Initialize if_name earlier, so if_printf could be used */
2045 ifp
= &sc
->arpcom
.ac_if
;
2046 if_initname(ifp
, device_get_name(dev
), device_get_unit(dev
));
2049 * Try to reset the chip.
2051 bnx_sig_pre_reset(sc
, BNX_RESET_SHUTDOWN
);
2053 bnx_sig_post_reset(sc
, BNX_RESET_SHUTDOWN
);
2055 if (bnx_chipinit(sc
)) {
2056 device_printf(dev
, "chip initialization failed\n");
2062 * Get station address
2064 error
= bnx_get_eaddr(sc
, ether_addr
);
2066 device_printf(dev
, "failed to read station address\n");
2070 /* Setup RX/TX and interrupt count */
2071 bnx_setup_ring_cnt(sc
);
2073 if ((sc
->bnx_rx_retcnt
== 1 && sc
->bnx_tx_ringcnt
== 1) ||
2074 (sc
->bnx_rx_retcnt
> 1 && sc
->bnx_tx_ringcnt
> 1)) {
2076 * The RX ring and the corresponding TX ring processing
2077 * should be on the same CPU, since they share the same
2080 sc
->bnx_flags
|= BNX_FLAG_RXTX_BUNDLE
;
2082 device_printf(dev
, "RX/TX bundle\n");
2083 if (sc
->bnx_tx_ringcnt
> 1) {
2085 * Multiple TX rings do not share status block
2086 * with link status, so link status will have
2087 * to save its own status_tag.
2089 sc
->bnx_flags
|= BNX_FLAG_STATUS_HASTAG
;
2091 device_printf(dev
, "status needs tag\n");
2094 KKASSERT(sc
->bnx_rx_retcnt
> 1 && sc
->bnx_tx_ringcnt
== 1);
2096 device_printf(dev
, "RX/TX not bundled\n");
2099 error
= bnx_dma_alloc(dev
);
2104 * Allocate interrupt
2106 error
= bnx_alloc_intr(sc
);
2110 /* Setup serializers */
2111 bnx_setup_serialize(sc
);
2113 /* Set default tuneable values. */
2114 sc
->bnx_rx_coal_ticks
= BNX_RX_COAL_TICKS_DEF
;
2115 sc
->bnx_tx_coal_ticks
= BNX_TX_COAL_TICKS_DEF
;
2116 sc
->bnx_rx_coal_bds
= BNX_RX_COAL_BDS_DEF
;
2117 sc
->bnx_rx_coal_bds_poll
= sc
->bnx_rx_ret_ring
[0].bnx_rx_cntmax
;
2118 sc
->bnx_tx_coal_bds
= BNX_TX_COAL_BDS_DEF
;
2119 sc
->bnx_tx_coal_bds_poll
= BNX_TX_COAL_BDS_POLL_DEF
;
2120 sc
->bnx_rx_coal_bds_int
= BNX_RX_COAL_BDS_INT_DEF
;
2121 sc
->bnx_tx_coal_bds_int
= BNX_TX_COAL_BDS_INT_DEF
;
2123 /* Set up ifnet structure */
2125 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
2126 ifp
->if_ioctl
= bnx_ioctl
;
2127 ifp
->if_start
= bnx_start
;
2128 #ifdef IFPOLL_ENABLE
2129 ifp
->if_npoll
= bnx_npoll
;
2131 ifp
->if_init
= bnx_init
;
2132 ifp
->if_serialize
= bnx_serialize
;
2133 ifp
->if_deserialize
= bnx_deserialize
;
2134 ifp
->if_tryserialize
= bnx_tryserialize
;
2136 ifp
->if_serialize_assert
= bnx_serialize_assert
;
2138 ifp
->if_mtu
= ETHERMTU
;
2139 ifp
->if_capabilities
= IFCAP_VLAN_HWTAGGING
| IFCAP_VLAN_MTU
;
2141 ifp
->if_capabilities
|= IFCAP_HWCSUM
;
2142 ifp
->if_hwassist
= BNX_CSUM_FEATURES
;
2143 if (sc
->bnx_flags
& BNX_FLAG_TSO
) {
2144 ifp
->if_capabilities
|= IFCAP_TSO
;
2145 ifp
->if_hwassist
|= CSUM_TSO
;
2147 if (BNX_RSS_ENABLED(sc
))
2148 ifp
->if_capabilities
|= IFCAP_RSS
;
2149 ifp
->if_capenable
= ifp
->if_capabilities
;
2151 ifp
->if_nmbclusters
= BGE_STD_RX_RING_CNT
;
2153 ifq_set_maxlen(&ifp
->if_snd
, BGE_TX_RING_CNT
- 1);
2154 ifq_set_ready(&ifp
->if_snd
);
2155 ifq_set_subq_cnt(&ifp
->if_snd
, sc
->bnx_tx_ringcnt
);
2157 if (sc
->bnx_tx_ringcnt
> 1) {
2158 ifp
->if_mapsubq
= ifq_mapsubq_modulo
;
2159 ifq_set_subq_divisor(&ifp
->if_snd
, sc
->bnx_tx_ringcnt
);
2163 * Figure out what sort of media we have by checking the
2164 * hardware config word in the first 32k of NIC internal memory,
2165 * or fall back to examining the EEPROM if necessary.
2166 * Note: on some BCM5700 cards, this value appears to be unset.
2167 * If that's the case, we have to rely on identifying the NIC
2168 * by its PCI subsystem ID, as we do below for the SysKonnect
2171 if (bnx_readmem_ind(sc
, BGE_SRAM_DATA_SIG
) == BGE_SRAM_DATA_SIG_MAGIC
) {
2172 hwcfg
= bnx_readmem_ind(sc
, BGE_SRAM_DATA_CFG
);
2174 if (bnx_read_eeprom(sc
, (caddr_t
)&hwcfg
, BGE_EE_HWCFG_OFFSET
,
2176 device_printf(dev
, "failed to read EEPROM\n");
2180 hwcfg
= ntohl(hwcfg
);
2183 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2184 if (pci_get_subvendor(dev
) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41
||
2185 (hwcfg
& BGE_HWCFG_MEDIA
) == BGE_MEDIA_FIBER
)
2186 sc
->bnx_flags
|= BNX_FLAG_TBI
;
2189 if (sc
->bnx_flags
& BNX_FLAG_CPMU
)
2190 sc
->bnx_mi_mode
= BGE_MIMODE_500KHZ_CONST
;
2192 sc
->bnx_mi_mode
= BGE_MIMODE_BASE
;
2194 /* Setup link status update stuffs */
2195 if (sc
->bnx_flags
& BNX_FLAG_TBI
) {
2196 sc
->bnx_link_upd
= bnx_tbi_link_upd
;
2197 sc
->bnx_link_chg
= BGE_MACSTAT_LINK_CHANGED
;
2198 } else if (sc
->bnx_mi_mode
& BGE_MIMODE_AUTOPOLL
) {
2199 sc
->bnx_link_upd
= bnx_autopoll_link_upd
;
2200 sc
->bnx_link_chg
= BGE_MACSTAT_LINK_CHANGED
;
2202 sc
->bnx_link_upd
= bnx_copper_link_upd
;
2203 sc
->bnx_link_chg
= BGE_MACSTAT_LINK_CHANGED
;
2206 /* Set default PHY address */
2210 * PHY address mapping for various devices.
2212 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2213 * ---------+-------+-------+-------+-------+
2214 * BCM57XX | 1 | X | X | X |
2215 * BCM5717 | 1 | 8 | 2 | 9 |
2216 * BCM5719 | 1 | 8 | 2 | 9 |
2217 * BCM5720 | 1 | 8 | 2 | 9 |
2219 * | F2 Cu | F2 Sr | F3 Cu | F3 Sr |
2220 * ---------+-------+-------+-------+-------+
2221 * BCM57XX | X | X | X | X |
2222 * BCM5717 | X | X | X | X |
2223 * BCM5719 | 3 | 10 | 4 | 11 |
2224 * BCM5720 | X | X | X | X |
2226 * Other addresses may respond but they are not
2227 * IEEE compliant PHYs and should be ignored.
2229 if (BNX_IS_5717_PLUS(sc
)) {
2230 if (sc
->bnx_chipid
== BGE_CHIPID_BCM5717_A0
) {
2231 if (CSR_READ_4(sc
, BGE_SGDIG_STS
) &
2232 BGE_SGDIGSTS_IS_SERDES
)
2233 sc
->bnx_phyno
= sc
->bnx_func_addr
+ 8;
2235 sc
->bnx_phyno
= sc
->bnx_func_addr
+ 1;
2237 if (CSR_READ_4(sc
, BGE_CPMU_PHY_STRAP
) &
2238 BGE_CPMU_PHY_STRAP_IS_SERDES
)
2239 sc
->bnx_phyno
= sc
->bnx_func_addr
+ 8;
2241 sc
->bnx_phyno
= sc
->bnx_func_addr
+ 1;
2245 if (sc
->bnx_flags
& BNX_FLAG_TBI
) {
2246 ifmedia_init(&sc
->bnx_ifmedia
, IFM_IMASK
,
2247 bnx_ifmedia_upd
, bnx_ifmedia_sts
);
2248 ifmedia_add(&sc
->bnx_ifmedia
, IFM_ETHER
|IFM_1000_SX
, 0, NULL
);
2249 ifmedia_add(&sc
->bnx_ifmedia
,
2250 IFM_ETHER
|IFM_1000_SX
|IFM_FDX
, 0, NULL
);
2251 ifmedia_add(&sc
->bnx_ifmedia
, IFM_ETHER
|IFM_AUTO
, 0, NULL
);
2252 ifmedia_set(&sc
->bnx_ifmedia
, IFM_ETHER
|IFM_AUTO
);
2253 sc
->bnx_ifmedia
.ifm_media
= sc
->bnx_ifmedia
.ifm_cur
->ifm_media
;
2255 struct mii_probe_args mii_args
;
2257 mii_probe_args_init(&mii_args
, bnx_ifmedia_upd
, bnx_ifmedia_sts
);
2258 mii_args
.mii_probemask
= 1 << sc
->bnx_phyno
;
2259 mii_args
.mii_capmask
= capmask
;
2260 mii_args
.mii_privtag
= MII_PRIVTAG_BRGPHY
;
2261 mii_args
.mii_priv
= mii_priv
;
2263 error
= mii_probe(dev
, &sc
->bnx_miibus
, &mii_args
);
2265 device_printf(dev
, "MII without any PHY!\n");
2270 ctx
= device_get_sysctl_ctx(sc
->bnx_dev
);
2271 tree
= SYSCTL_CHILDREN(device_get_sysctl_tree(sc
->bnx_dev
));
2273 SYSCTL_ADD_INT(ctx
, tree
, OID_AUTO
,
2274 "rx_rings", CTLFLAG_RD
, &sc
->bnx_rx_retcnt
, 0, "# of RX rings");
2275 SYSCTL_ADD_INT(ctx
, tree
, OID_AUTO
,
2276 "tx_rings", CTLFLAG_RD
, &sc
->bnx_tx_ringcnt
, 0, "# of TX rings");
2278 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
, "rx_coal_ticks",
2279 CTLTYPE_INT
| CTLFLAG_RW
,
2280 sc
, 0, bnx_sysctl_rx_coal_ticks
, "I",
2281 "Receive coalescing ticks (usec).");
2282 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
, "tx_coal_ticks",
2283 CTLTYPE_INT
| CTLFLAG_RW
,
2284 sc
, 0, bnx_sysctl_tx_coal_ticks
, "I",
2285 "Transmit coalescing ticks (usec).");
2286 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
, "rx_coal_bds",
2287 CTLTYPE_INT
| CTLFLAG_RW
,
2288 sc
, 0, bnx_sysctl_rx_coal_bds
, "I",
2289 "Receive max coalesced BD count.");
2290 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
, "rx_coal_bds_poll",
2291 CTLTYPE_INT
| CTLFLAG_RW
,
2292 sc
, 0, bnx_sysctl_rx_coal_bds_poll
, "I",
2293 "Receive max coalesced BD count in polling.");
2294 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
, "tx_coal_bds",
2295 CTLTYPE_INT
| CTLFLAG_RW
,
2296 sc
, 0, bnx_sysctl_tx_coal_bds
, "I",
2297 "Transmit max coalesced BD count.");
2298 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
, "tx_coal_bds_poll",
2299 CTLTYPE_INT
| CTLFLAG_RW
,
2300 sc
, 0, bnx_sysctl_tx_coal_bds_poll
, "I",
2301 "Transmit max coalesced BD count in polling.");
2303 * A common design characteristic for many Broadcom
2304 * client controllers is that they only support a
2305 * single outstanding DMA read operation on the PCIe
2306 * bus. This means that it will take twice as long to
2307 * fetch a TX frame that is split into header and
2308 * payload buffers as it does to fetch a single,
2309 * contiguous TX frame (2 reads vs. 1 read). For these
2310 * controllers, coalescing buffers to reduce the number
2311 * of memory reads is effective way to get maximum
2312 * performance(about 940Mbps). Without collapsing TX
2313 * buffers the maximum TCP bulk transfer performance
2314 * is about 850Mbps. However forcing coalescing mbufs
2315 * consumes a lot of CPU cycles, so leave it off by
2318 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
,
2319 "force_defrag", CTLTYPE_INT
| CTLFLAG_RW
,
2320 sc
, 0, bnx_sysctl_force_defrag
, "I",
2321 "Force defragment on TX path");
2323 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
,
2324 "tx_wreg", CTLTYPE_INT
| CTLFLAG_RW
,
2325 sc
, 0, bnx_sysctl_tx_wreg
, "I",
2326 "# of segments before writing to hardware register");
2328 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
,
2329 "std_refill", CTLTYPE_INT
| CTLFLAG_RW
,
2330 sc
, 0, bnx_sysctl_std_refill
, "I",
2331 "# of packets received before scheduling standard refilling");
2333 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
,
2334 "rx_coal_bds_int", CTLTYPE_INT
| CTLFLAG_RW
,
2335 sc
, 0, bnx_sysctl_rx_coal_bds_int
, "I",
2336 "Receive max coalesced BD count during interrupt.");
2337 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
,
2338 "tx_coal_bds_int", CTLTYPE_INT
| CTLFLAG_RW
,
2339 sc
, 0, bnx_sysctl_tx_coal_bds_int
, "I",
2340 "Transmit max coalesced BD count during interrupt.");
2342 if (sc
->bnx_intr_type
== PCI_INTR_TYPE_MSIX
) {
2343 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
, "tx_cpumap",
2344 CTLTYPE_OPAQUE
| CTLFLAG_RD
,
2345 sc
->bnx_tx_rmap
, 0, if_ringmap_cpumap_sysctl
, "I",
2347 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
, "rx_cpumap",
2348 CTLTYPE_OPAQUE
| CTLFLAG_RD
,
2349 sc
->bnx_rx_rmap
, 0, if_ringmap_cpumap_sysctl
, "I",
2352 #ifdef IFPOLL_ENABLE
2353 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
, "tx_poll_cpumap",
2354 CTLTYPE_OPAQUE
| CTLFLAG_RD
,
2355 sc
->bnx_tx_rmap
, 0, if_ringmap_cpumap_sysctl
, "I",
2357 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
, "rx_poll_cpumap",
2358 CTLTYPE_OPAQUE
| CTLFLAG_RD
,
2359 sc
->bnx_rx_rmap
, 0, if_ringmap_cpumap_sysctl
, "I",
2364 #ifdef BNX_RSS_DEBUG
2365 SYSCTL_ADD_INT(ctx
, tree
, OID_AUTO
,
2366 "std_refill_mask", CTLFLAG_RD
,
2367 &sc
->bnx_rx_std_ring
.bnx_rx_std_refill
, 0, "");
2368 SYSCTL_ADD_INT(ctx
, tree
, OID_AUTO
,
2369 "std_used", CTLFLAG_RD
,
2370 &sc
->bnx_rx_std_ring
.bnx_rx_std_used
, 0, "");
2371 SYSCTL_ADD_INT(ctx
, tree
, OID_AUTO
,
2372 "rss_debug", CTLFLAG_RW
, &sc
->bnx_rss_debug
, 0, "");
2373 for (i
= 0; i
< sc
->bnx_rx_retcnt
; ++i
) {
2374 ksnprintf(desc
, sizeof(desc
), "rx_pkt%d", i
);
2375 SYSCTL_ADD_ULONG(ctx
, tree
, OID_AUTO
,
2376 desc
, CTLFLAG_RW
, &sc
->bnx_rx_ret_ring
[i
].bnx_rx_pkt
, "");
2378 ksnprintf(desc
, sizeof(desc
), "rx_force_sched%d", i
);
2379 SYSCTL_ADD_ULONG(ctx
, tree
, OID_AUTO
,
2381 &sc
->bnx_rx_ret_ring
[i
].bnx_rx_force_sched
, "");
2384 #ifdef BNX_TSS_DEBUG
2385 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
2386 ksnprintf(desc
, sizeof(desc
), "tx_pkt%d", i
);
2387 SYSCTL_ADD_ULONG(ctx
, tree
, OID_AUTO
,
2388 desc
, CTLFLAG_RW
, &sc
->bnx_tx_ring
[i
].bnx_tx_pkt
, "");
2392 SYSCTL_ADD_ULONG(ctx
, tree
, OID_AUTO
,
2393 "norxbds", CTLFLAG_RW
, &sc
->bnx_norxbds
, "");
2395 SYSCTL_ADD_ULONG(ctx
, tree
, OID_AUTO
,
2396 "errors", CTLFLAG_RW
, &sc
->bnx_errors
, "");
2398 #ifdef BNX_TSO_DEBUG
2399 for (i
= 0; i
< BNX_TSO_NSTATS
; ++i
) {
2400 ksnprintf(desc
, sizeof(desc
), "tso%d", i
+ 1);
2401 SYSCTL_ADD_ULONG(ctx
, tree
, OID_AUTO
,
2402 desc
, CTLFLAG_RW
, &sc
->bnx_tsosegs
[i
], "");
2407 * Call MI attach routine.
2409 ether_ifattach(ifp
, ether_addr
, NULL
);
2411 /* Setup TX rings and subqueues */
2412 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
2413 struct ifaltq_subque
*ifsq
= ifq_get_subq(&ifp
->if_snd
, i
);
2414 struct bnx_tx_ring
*txr
= &sc
->bnx_tx_ring
[i
];
2416 ifsq_set_cpuid(ifsq
, txr
->bnx_tx_cpuid
);
2417 ifsq_set_hw_serialize(ifsq
, &txr
->bnx_tx_serialize
);
2418 ifsq_set_priv(ifsq
, txr
);
2419 txr
->bnx_ifsq
= ifsq
;
2421 ifsq_watchdog_init(&txr
->bnx_tx_watchdog
, ifsq
, bnx_watchdog
);
2424 device_printf(dev
, "txr %d -> cpu%d\n", i
,
2429 error
= bnx_setup_intr(sc
);
2431 ether_ifdetach(ifp
);
2434 bnx_set_tick_cpuid(sc
, FALSE
);
2437 * Create RX standard ring refilling thread
2439 std_cpuid_def
= if_ringmap_cpumap(sc
->bnx_rx_rmap
, 0);
2440 std_cpuid
= device_getenv_int(dev
, "std.cpuid", std_cpuid_def
);
2441 if (std_cpuid
< 0 || std_cpuid
>= ncpus
) {
2442 device_printf(dev
, "invalid std.cpuid %d, use %d\n",
2443 std_cpuid
, std_cpuid_def
);
2444 std_cpuid
= std_cpuid_def
;
2447 std
= &sc
->bnx_rx_std_ring
;
2448 lwkt_create(bnx_rx_std_refill_ithread
, std
, &std
->bnx_rx_std_ithread
,
2449 NULL
, TDF_NOSTART
| TDF_INTTHREAD
, std_cpuid
,
2450 "%s std", device_get_nameunit(dev
));
2451 lwkt_setpri(std
->bnx_rx_std_ithread
, TDPRI_INT_MED
);
2452 std
->bnx_rx_std_ithread
->td_preemptable
= lwkt_preempt
;
2461 bnx_detach(device_t dev
)
2463 struct bnx_softc
*sc
= device_get_softc(dev
);
2464 struct bnx_rx_std_ring
*std
= &sc
->bnx_rx_std_ring
;
2466 if (device_is_attached(dev
)) {
2467 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2469 ifnet_serialize_all(ifp
);
2471 bnx_teardown_intr(sc
, sc
->bnx_intr_cnt
);
2472 ifnet_deserialize_all(ifp
);
2474 ether_ifdetach(ifp
);
2477 if (std
->bnx_rx_std_ithread
!= NULL
) {
2478 tsleep_interlock(std
, 0);
2480 if (std
->bnx_rx_std_ithread
->td_gd
== mycpu
) {
2481 bnx_rx_std_refill_stop(std
);
2483 lwkt_send_ipiq(std
->bnx_rx_std_ithread
->td_gd
,
2484 bnx_rx_std_refill_stop
, std
);
2487 tsleep(std
, PINTERLOCKED
, "bnx_detach", 0);
2489 device_printf(dev
, "RX std ithread exited\n");
2491 lwkt_synchronize_ipiqs("bnx_detach_ipiq");
2494 if (sc
->bnx_flags
& BNX_FLAG_TBI
)
2495 ifmedia_removeall(&sc
->bnx_ifmedia
);
2497 device_delete_child(dev
, sc
->bnx_miibus
);
2498 bus_generic_detach(dev
);
2502 if (sc
->bnx_msix_mem_res
!= NULL
) {
2503 bus_release_resource(dev
, SYS_RES_MEMORY
, sc
->bnx_msix_mem_rid
,
2504 sc
->bnx_msix_mem_res
);
2506 if (sc
->bnx_res
!= NULL
) {
2507 bus_release_resource(dev
, SYS_RES_MEMORY
,
2508 BGE_PCI_BAR0
, sc
->bnx_res
);
2510 if (sc
->bnx_res2
!= NULL
) {
2511 bus_release_resource(dev
, SYS_RES_MEMORY
,
2512 PCIR_BAR(2), sc
->bnx_res2
);
2517 if (sc
->bnx_serialize
!= NULL
)
2518 kfree(sc
->bnx_serialize
, M_DEVBUF
);
2520 if (sc
->bnx_rx_rmap
!= NULL
)
2521 if_ringmap_free(sc
->bnx_rx_rmap
);
2522 if (sc
->bnx_tx_rmap
!= NULL
)
2523 if_ringmap_free(sc
->bnx_tx_rmap
);
2529 bnx_reset(struct bnx_softc
*sc
)
2531 device_t dev
= sc
->bnx_dev
;
2532 uint32_t cachesize
, command
, reset
, mac_mode
, mac_mode_mask
;
2533 void (*write_op
)(struct bnx_softc
*, uint32_t, uint32_t);
2537 mac_mode_mask
= BGE_MACMODE_HALF_DUPLEX
| BGE_MACMODE_PORTMODE
;
2538 if (sc
->bnx_mfw_flags
& BNX_MFW_ON_APE
)
2539 mac_mode_mask
|= BGE_MACMODE_APE_RX_EN
| BGE_MACMODE_APE_TX_EN
;
2540 mac_mode
= CSR_READ_4(sc
, BGE_MAC_MODE
) & mac_mode_mask
;
2542 write_op
= bnx_writemem_direct
;
2544 CSR_WRITE_4(sc
, BGE_NVRAM_SWARB
, BGE_NVRAMSWARB_SET1
);
2545 for (i
= 0; i
< 8000; i
++) {
2546 if (CSR_READ_4(sc
, BGE_NVRAM_SWARB
) & BGE_NVRAMSWARB_GNT1
)
2551 if_printf(&sc
->arpcom
.ac_if
, "NVRAM lock timedout!\n");
2553 /* Take APE lock when performing reset. */
2554 bnx_ape_lock(sc
, BGE_APE_LOCK_GRC
);
2556 /* Save some important PCI state. */
2557 cachesize
= pci_read_config(dev
, BGE_PCI_CACHESZ
, 4);
2558 command
= pci_read_config(dev
, BGE_PCI_CMD
, 4);
2560 pci_write_config(dev
, BGE_PCI_MISC_CTL
,
2561 BGE_PCIMISCCTL_INDIRECT_ACCESS
|BGE_PCIMISCCTL_MASK_PCI_INTR
|
2562 BGE_HIF_SWAP_OPTIONS
|BGE_PCIMISCCTL_PCISTATE_RW
|
2563 BGE_PCIMISCCTL_TAGGED_STATUS
, 4);
2565 /* Disable fastboot on controllers that support it. */
2567 if_printf(&sc
->arpcom
.ac_if
, "Disabling fastboot\n");
2568 CSR_WRITE_4(sc
, BGE_FASTBOOT_PC
, 0x0);
2571 * Write the magic number to SRAM at offset 0xB50.
2572 * When firmware finishes its initialization it will
2573 * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
2575 bnx_writemem_ind(sc
, BGE_SRAM_FW_MB
, BGE_SRAM_FW_MB_MAGIC
);
2577 reset
= BGE_MISCCFG_RESET_CORE_CLOCKS
|(65<<1);
2579 /* XXX: Broadcom Linux driver. */
2580 /* Force PCI-E 1.0a mode */
2581 if (!BNX_IS_57765_PLUS(sc
) &&
2582 CSR_READ_4(sc
, BGE_PCIE_PHY_TSTCTL
) ==
2583 (BGE_PCIE_PHY_TSTCTL_PSCRAM
|
2584 BGE_PCIE_PHY_TSTCTL_PCIE10
)) {
2585 CSR_WRITE_4(sc
, BGE_PCIE_PHY_TSTCTL
,
2586 BGE_PCIE_PHY_TSTCTL_PSCRAM
);
2588 if (sc
->bnx_chipid
!= BGE_CHIPID_BCM5750_A0
) {
2589 /* Prevent PCIE link training during global reset */
2590 CSR_WRITE_4(sc
, BGE_MISC_CFG
, (1<<29));
2595 * Set GPHY Power Down Override to leave GPHY
2596 * powered up in D0 uninitialized.
2598 if ((sc
->bnx_flags
& BNX_FLAG_CPMU
) == 0)
2599 reset
|= BGE_MISCCFG_GPHY_PD_OVERRIDE
;
2601 /* Issue global reset */
2602 write_op(sc
, BGE_MISC_CFG
, reset
);
2606 /* XXX: Broadcom Linux driver. */
2607 if (sc
->bnx_chipid
== BGE_CHIPID_BCM5750_A0
) {
2610 DELAY(500000); /* wait for link training to complete */
2611 v
= pci_read_config(dev
, 0xc4, 4);
2612 pci_write_config(dev
, 0xc4, v
| (1<<15), 4);
2615 devctl
= pci_read_config(dev
, sc
->bnx_pciecap
+ PCIER_DEVCTRL
, 2);
2617 /* Disable no snoop and disable relaxed ordering. */
2618 devctl
&= ~(PCIEM_DEVCTL_RELAX_ORDER
| PCIEM_DEVCTL_NOSNOOP
);
2620 /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */
2621 if ((sc
->bnx_flags
& BNX_FLAG_CPMU
) == 0) {
2622 devctl
&= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK
;
2623 devctl
|= PCIEM_DEVCTL_MAX_PAYLOAD_128
;
2626 pci_write_config(dev
, sc
->bnx_pciecap
+ PCIER_DEVCTRL
,
2629 /* Clear error status. */
2630 pci_write_config(dev
, sc
->bnx_pciecap
+ PCIER_DEVSTS
,
2631 PCIEM_DEVSTS_CORR_ERR
|
2632 PCIEM_DEVSTS_NFATAL_ERR
|
2633 PCIEM_DEVSTS_FATAL_ERR
|
2634 PCIEM_DEVSTS_UNSUPP_REQ
, 2);
2636 /* Reset some of the PCI state that got zapped by reset */
2637 pci_write_config(dev
, BGE_PCI_MISC_CTL
,
2638 BGE_PCIMISCCTL_INDIRECT_ACCESS
|BGE_PCIMISCCTL_MASK_PCI_INTR
|
2639 BGE_HIF_SWAP_OPTIONS
|BGE_PCIMISCCTL_PCISTATE_RW
|
2640 BGE_PCIMISCCTL_TAGGED_STATUS
, 4);
2641 val
= BGE_PCISTATE_ROM_ENABLE
| BGE_PCISTATE_ROM_RETRY_ENABLE
;
2642 if (sc
->bnx_mfw_flags
& BNX_MFW_ON_APE
) {
2643 val
|= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR
|
2644 BGE_PCISTATE_ALLOW_APE_SHMEM_WR
|
2645 BGE_PCISTATE_ALLOW_APE_PSPACE_WR
;
2647 pci_write_config(dev
, BGE_PCI_PCISTATE
, val
, 4);
2648 pci_write_config(dev
, BGE_PCI_CACHESZ
, cachesize
, 4);
2649 pci_write_config(dev
, BGE_PCI_CMD
, command
, 4);
2651 /* Enable memory arbiter */
2652 CSR_WRITE_4(sc
, BGE_MARB_MODE
, BGE_MARBMODE_ENABLE
);
2654 /* Fix up byte swapping */
2655 CSR_WRITE_4(sc
, BGE_MODE_CTL
, bnx_dma_swap_options(sc
));
2657 val
= CSR_READ_4(sc
, BGE_MAC_MODE
);
2658 val
= (val
& ~mac_mode_mask
) | mac_mode
;
2659 CSR_WRITE_4(sc
, BGE_MAC_MODE
, val
);
2662 bnx_ape_unlock(sc
, BGE_APE_LOCK_GRC
);
2665 * Poll until we see the 1's complement of the magic number.
2666 * This indicates that the firmware initialization is complete.
2668 for (i
= 0; i
< BNX_FIRMWARE_TIMEOUT
; i
++) {
2669 val
= bnx_readmem_ind(sc
, BGE_SRAM_FW_MB
);
2670 if (val
== ~BGE_SRAM_FW_MB_MAGIC
)
2674 if (i
== BNX_FIRMWARE_TIMEOUT
) {
2675 if_printf(&sc
->arpcom
.ac_if
, "firmware handshake "
2676 "timed out, found 0x%08x\n", val
);
2679 /* BCM57765 A0 needs additional time before accessing. */
2680 if (sc
->bnx_chipid
== BGE_CHIPID_BCM57765_A0
)
2684 * The 5704 in TBI mode apparently needs some special
2685 * adjustment to insure the SERDES drive level is set
2688 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5704
&&
2689 (sc
->bnx_flags
& BNX_FLAG_TBI
)) {
2692 serdescfg
= CSR_READ_4(sc
, BGE_SERDES_CFG
);
2693 serdescfg
= (serdescfg
& ~0xFFF) | 0x880;
2694 CSR_WRITE_4(sc
, BGE_SERDES_CFG
, serdescfg
);
2697 CSR_WRITE_4(sc
, BGE_MI_MODE
,
2698 sc
->bnx_mi_mode
& ~BGE_MIMODE_AUTOPOLL
);
2701 /* XXX: Broadcom Linux driver. */
2702 if (!BNX_IS_57765_PLUS(sc
)) {
2705 /* Enable Data FIFO protection. */
2706 v
= CSR_READ_4(sc
, BGE_PCIE_TLDLPL_PORT
);
2707 CSR_WRITE_4(sc
, BGE_PCIE_TLDLPL_PORT
, v
| (1 << 25));
2712 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5720
) {
2713 BNX_CLRBIT(sc
, BGE_CPMU_CLCK_ORIDE
,
2714 CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
2719 * Frame reception handling. This is called if there's a frame
2720 * on the receive return list.
2722 * Note: we have to be able to handle two possibilities here:
2723 * 1) the frame is from the jumbo recieve ring
2724 * 2) the frame is from the standard receive ring
2728 bnx_rxeof(struct bnx_rx_ret_ring
*ret
, uint16_t rx_prod
, int count
)
2730 struct bnx_softc
*sc
= ret
->bnx_sc
;
2731 struct bnx_rx_std_ring
*std
= ret
->bnx_std
;
2732 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2733 int std_used
= 0, cpuid
= mycpuid
;
2735 while (ret
->bnx_rx_saved_considx
!= rx_prod
&& count
!= 0) {
2736 struct pktinfo pi0
, *pi
= NULL
;
2737 struct bge_rx_bd
*cur_rx
;
2738 struct bnx_rx_buf
*rb
;
2740 struct mbuf
*m
= NULL
;
2741 uint16_t vlan_tag
= 0;
2746 cur_rx
= &ret
->bnx_rx_ret_ring
[ret
->bnx_rx_saved_considx
];
2748 rxidx
= cur_rx
->bge_idx
;
2749 KKASSERT(rxidx
< BGE_STD_RX_RING_CNT
);
2751 BNX_INC(ret
->bnx_rx_saved_considx
, BNX_RETURN_RING_CNT
);
2752 #ifdef BNX_RSS_DEBUG
2756 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_VLAN_TAG
) {
2758 vlan_tag
= cur_rx
->bge_vlan_tag
;
2761 if (ret
->bnx_rx_cnt
>= ret
->bnx_rx_cntmax
) {
2762 atomic_add_int(&std
->bnx_rx_std_used
, std_used
);
2765 bnx_rx_std_refill_sched(ret
, std
);
2770 rb
= &std
->bnx_rx_std_buf
[rxidx
];
2771 m
= rb
->bnx_rx_mbuf
;
2772 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_ERROR
) {
2773 IFNET_STAT_INC(ifp
, ierrors
, 1);
2775 rb
->bnx_rx_refilled
= 1;
2778 if (bnx_newbuf_std(ret
, rxidx
, 0)) {
2779 IFNET_STAT_INC(ifp
, ierrors
, 1);
2783 IFNET_STAT_INC(ifp
, ipackets
, 1);
2784 m
->m_pkthdr
.len
= m
->m_len
= cur_rx
->bge_len
- ETHER_CRC_LEN
;
2785 m
->m_pkthdr
.rcvif
= ifp
;
2787 if ((ifp
->if_capenable
& IFCAP_RXCSUM
) &&
2788 (cur_rx
->bge_flags
& BGE_RXBDFLAG_IPV6
) == 0) {
2789 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_IP_CSUM
) {
2790 m
->m_pkthdr
.csum_flags
|= CSUM_IP_CHECKED
;
2791 if ((cur_rx
->bge_error_flag
&
2792 BGE_RXERRFLAG_IP_CSUM_NOK
) == 0)
2793 m
->m_pkthdr
.csum_flags
|= CSUM_IP_VALID
;
2795 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_TCP_UDP_CSUM
) {
2796 m
->m_pkthdr
.csum_data
=
2797 cur_rx
->bge_tcp_udp_csum
;
2798 m
->m_pkthdr
.csum_flags
|= CSUM_DATA_VALID
|
2802 if (ifp
->if_capenable
& IFCAP_RSS
) {
2803 pi
= bnx_rss_info(&pi0
, cur_rx
);
2805 (cur_rx
->bge_flags
& BGE_RXBDFLAG_RSS_HASH
))
2806 m_sethash(m
, toeplitz_hash(cur_rx
->bge_hash
));
2810 * If we received a packet with a vlan tag, pass it
2811 * to vlan_input() instead of ether_input().
2814 m
->m_flags
|= M_VLANTAG
;
2815 m
->m_pkthdr
.ether_vlantag
= vlan_tag
;
2817 ifp
->if_input(ifp
, m
, pi
, cpuid
);
2819 bnx_writembx(sc
, ret
->bnx_rx_mbx
, ret
->bnx_rx_saved_considx
);
2824 cur_std_used
= atomic_fetchadd_int(&std
->bnx_rx_std_used
,
2826 if (cur_std_used
+ std_used
>= (BGE_STD_RX_RING_CNT
/ 2)) {
2827 #ifdef BNX_RSS_DEBUG
2828 ret
->bnx_rx_force_sched
++;
2830 bnx_rx_std_refill_sched(ret
, std
);
2836 bnx_txeof(struct bnx_tx_ring
*txr
, uint16_t tx_cons
)
2838 struct ifnet
*ifp
= &txr
->bnx_sc
->arpcom
.ac_if
;
2841 * Go through our tx ring and free mbufs for those
2842 * frames that have been sent.
2844 while (txr
->bnx_tx_saved_considx
!= tx_cons
) {
2845 struct bnx_tx_buf
*buf
;
2848 idx
= txr
->bnx_tx_saved_considx
;
2849 buf
= &txr
->bnx_tx_buf
[idx
];
2850 if (buf
->bnx_tx_mbuf
!= NULL
) {
2851 IFNET_STAT_INC(ifp
, opackets
, 1);
2852 #ifdef BNX_TSS_DEBUG
2855 bus_dmamap_unload(txr
->bnx_tx_mtag
,
2856 buf
->bnx_tx_dmamap
);
2857 m_freem(buf
->bnx_tx_mbuf
);
2858 buf
->bnx_tx_mbuf
= NULL
;
2861 BNX_INC(txr
->bnx_tx_saved_considx
, BGE_TX_RING_CNT
);
2864 if ((BGE_TX_RING_CNT
- txr
->bnx_tx_cnt
) >=
2865 (BNX_NSEG_RSVD
+ BNX_NSEG_SPARE
))
2866 ifsq_clr_oactive(txr
->bnx_ifsq
);
2868 if (txr
->bnx_tx_cnt
== 0)
2869 txr
->bnx_tx_watchdog
.wd_timer
= 0;
2871 if (!ifsq_is_empty(txr
->bnx_ifsq
))
2872 ifsq_devstart(txr
->bnx_ifsq
);
2876 bnx_handle_status(struct bnx_softc
*sc
)
2881 status
= *sc
->bnx_hw_status
;
2883 if (status
& BGE_STATFLAG_ERROR
) {
2889 val
= CSR_READ_4(sc
, BGE_FLOW_ATTN
);
2890 if (val
& ~BGE_FLOWATTN_MB_LOWAT
) {
2891 if_printf(&sc
->arpcom
.ac_if
,
2892 "flow attn 0x%08x\n", val
);
2896 val
= CSR_READ_4(sc
, BGE_MSI_STATUS
);
2897 if (val
& ~BGE_MSISTAT_MSI_PCI_REQ
) {
2898 if_printf(&sc
->arpcom
.ac_if
,
2899 "msi status 0x%08x\n", val
);
2903 val
= CSR_READ_4(sc
, BGE_RDMA_STATUS
);
2905 if_printf(&sc
->arpcom
.ac_if
,
2906 "rmda status 0x%08x\n", val
);
2910 val
= CSR_READ_4(sc
, BGE_WDMA_STATUS
);
2912 if_printf(&sc
->arpcom
.ac_if
,
2913 "wdma status 0x%08x\n", val
);
2918 bnx_serialize_skipmain(sc
);
2920 bnx_deserialize_skipmain(sc
);
2925 if ((status
& BGE_STATFLAG_LINKSTATE_CHANGED
) || sc
->bnx_link_evt
) {
2927 if_printf(&sc
->arpcom
.ac_if
, "link change, "
2928 "link_evt %d\n", sc
->bnx_link_evt
);
2937 #ifdef IFPOLL_ENABLE
2940 bnx_npoll_rx(struct ifnet
*ifp __unused
, void *xret
, int cycle
)
2942 struct bnx_rx_ret_ring
*ret
= xret
;
2945 ASSERT_SERIALIZED(&ret
->bnx_rx_ret_serialize
);
2947 ret
->bnx_saved_status_tag
= *ret
->bnx_hw_status_tag
;
2950 rx_prod
= *ret
->bnx_rx_considx
;
2951 if (ret
->bnx_rx_saved_considx
!= rx_prod
)
2952 bnx_rxeof(ret
, rx_prod
, cycle
);
2956 bnx_npoll_tx_notag(struct ifnet
*ifp __unused
, void *xtxr
, int cycle __unused
)
2958 struct bnx_tx_ring
*txr
= xtxr
;
2961 ASSERT_SERIALIZED(&txr
->bnx_tx_serialize
);
2963 tx_cons
= *txr
->bnx_tx_considx
;
2964 if (txr
->bnx_tx_saved_considx
!= tx_cons
)
2965 bnx_txeof(txr
, tx_cons
);
2969 bnx_npoll_tx(struct ifnet
*ifp
, void *xtxr
, int cycle
)
2971 struct bnx_tx_ring
*txr
= xtxr
;
2973 ASSERT_SERIALIZED(&txr
->bnx_tx_serialize
);
2975 txr
->bnx_saved_status_tag
= *txr
->bnx_hw_status_tag
;
2977 bnx_npoll_tx_notag(ifp
, txr
, cycle
);
2981 bnx_npoll_status_notag(struct ifnet
*ifp
)
2983 struct bnx_softc
*sc
= ifp
->if_softc
;
2985 ASSERT_SERIALIZED(&sc
->bnx_main_serialize
);
2987 if (bnx_handle_status(sc
)) {
2989 * Status changes are handled; force the chip to
2990 * update the status block to reflect whether there
2991 * are more status changes or not, else staled status
2992 * changes are always seen.
2994 BNX_SETBIT(sc
, BGE_HCC_MODE
, BGE_HCCMODE_COAL_NOW
);
2999 bnx_npoll_status(struct ifnet
*ifp
)
3001 struct bnx_softc
*sc
= ifp
->if_softc
;
3003 ASSERT_SERIALIZED(&sc
->bnx_main_serialize
);
3005 sc
->bnx_saved_status_tag
= *sc
->bnx_hw_status_tag
;
3007 bnx_npoll_status_notag(ifp
);
3011 bnx_npoll(struct ifnet
*ifp
, struct ifpoll_info
*info
)
3013 struct bnx_softc
*sc
= ifp
->if_softc
;
3016 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
3019 if (sc
->bnx_flags
& BNX_FLAG_STATUS_HASTAG
)
3020 info
->ifpi_status
.status_func
= bnx_npoll_status
;
3022 info
->ifpi_status
.status_func
= bnx_npoll_status_notag
;
3023 info
->ifpi_status
.serializer
= &sc
->bnx_main_serialize
;
3025 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
3026 struct bnx_tx_ring
*txr
= &sc
->bnx_tx_ring
[i
];
3027 int cpu
= if_ringmap_cpumap(sc
->bnx_tx_rmap
, i
);
3029 KKASSERT(cpu
< netisr_ncpus
);
3030 if (sc
->bnx_flags
& BNX_FLAG_RXTX_BUNDLE
) {
3031 info
->ifpi_tx
[cpu
].poll_func
=
3034 info
->ifpi_tx
[cpu
].poll_func
= bnx_npoll_tx
;
3036 info
->ifpi_tx
[cpu
].arg
= txr
;
3037 info
->ifpi_tx
[cpu
].serializer
= &txr
->bnx_tx_serialize
;
3038 ifsq_set_cpuid(txr
->bnx_ifsq
, cpu
);
3041 for (i
= 0; i
< sc
->bnx_rx_retcnt
; ++i
) {
3042 struct bnx_rx_ret_ring
*ret
= &sc
->bnx_rx_ret_ring
[i
];
3043 int cpu
= if_ringmap_cpumap(sc
->bnx_rx_rmap
, i
);
3045 KKASSERT(cpu
< netisr_ncpus
);
3046 info
->ifpi_rx
[cpu
].poll_func
= bnx_npoll_rx
;
3047 info
->ifpi_rx
[cpu
].arg
= ret
;
3048 info
->ifpi_rx
[cpu
].serializer
=
3049 &ret
->bnx_rx_ret_serialize
;
3052 if (ifp
->if_flags
& IFF_RUNNING
) {
3053 bnx_disable_intr(sc
);
3054 bnx_set_tick_cpuid(sc
, TRUE
);
3056 sc
->bnx_coal_chg
= BNX_TX_COAL_BDS_CHG
|
3057 BNX_RX_COAL_BDS_CHG
;
3058 bnx_coal_change(sc
);
3061 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
3062 ifsq_set_cpuid(sc
->bnx_tx_ring
[i
].bnx_ifsq
,
3063 sc
->bnx_tx_ring
[i
].bnx_tx_cpuid
);
3065 if (ifp
->if_flags
& IFF_RUNNING
) {
3066 sc
->bnx_coal_chg
= BNX_TX_COAL_BDS_CHG
|
3067 BNX_RX_COAL_BDS_CHG
;
3068 bnx_coal_change(sc
);
3070 bnx_enable_intr(sc
);
3071 bnx_set_tick_cpuid(sc
, FALSE
);
3076 #endif /* IFPOLL_ENABLE */
3079 bnx_intr_legacy(void *xsc
)
3081 struct bnx_softc
*sc
= xsc
;
3082 struct bnx_rx_ret_ring
*ret
= &sc
->bnx_rx_ret_ring
[0];
3084 if (ret
->bnx_saved_status_tag
== *ret
->bnx_hw_status_tag
) {
3087 val
= pci_read_config(sc
->bnx_dev
, BGE_PCI_PCISTATE
, 4);
3088 if (val
& BGE_PCISTAT_INTR_NOTACT
)
3094 * Interrupt will have to be disabled if tagged status
3095 * is used, else interrupt will always be asserted on
3096 * certain chips (at least on BCM5750 AX/BX).
3098 bnx_writembx(sc
, BGE_MBX_IRQ0_LO
, 1);
3110 bnx_intr(struct bnx_softc
*sc
)
3112 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3113 struct bnx_rx_ret_ring
*ret
= &sc
->bnx_rx_ret_ring
[0];
3115 ASSERT_SERIALIZED(&sc
->bnx_main_serialize
);
3117 ret
->bnx_saved_status_tag
= *ret
->bnx_hw_status_tag
;
3119 * Use a load fence to ensure that status_tag is saved
3120 * before rx_prod, tx_cons and status.
3124 bnx_handle_status(sc
);
3126 if (ifp
->if_flags
& IFF_RUNNING
) {
3127 struct bnx_tx_ring
*txr
= &sc
->bnx_tx_ring
[0];
3128 uint16_t rx_prod
, tx_cons
;
3130 lwkt_serialize_enter(&ret
->bnx_rx_ret_serialize
);
3131 rx_prod
= *ret
->bnx_rx_considx
;
3132 if (ret
->bnx_rx_saved_considx
!= rx_prod
)
3133 bnx_rxeof(ret
, rx_prod
, -1);
3134 lwkt_serialize_exit(&ret
->bnx_rx_ret_serialize
);
3136 lwkt_serialize_enter(&txr
->bnx_tx_serialize
);
3137 tx_cons
= *txr
->bnx_tx_considx
;
3138 if (txr
->bnx_tx_saved_considx
!= tx_cons
)
3139 bnx_txeof(txr
, tx_cons
);
3140 lwkt_serialize_exit(&txr
->bnx_tx_serialize
);
3143 bnx_writembx(sc
, BGE_MBX_IRQ0_LO
, ret
->bnx_saved_status_tag
<< 24);
3147 bnx_msix_tx_status(void *xtxr
)
3149 struct bnx_tx_ring
*txr
= xtxr
;
3150 struct bnx_softc
*sc
= txr
->bnx_sc
;
3151 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3153 ASSERT_SERIALIZED(&sc
->bnx_main_serialize
);
3155 txr
->bnx_saved_status_tag
= *txr
->bnx_hw_status_tag
;
3157 * Use a load fence to ensure that status_tag is saved
3158 * before tx_cons and status.
3162 bnx_handle_status(sc
);
3164 if (ifp
->if_flags
& IFF_RUNNING
) {
3167 lwkt_serialize_enter(&txr
->bnx_tx_serialize
);
3168 tx_cons
= *txr
->bnx_tx_considx
;
3169 if (txr
->bnx_tx_saved_considx
!= tx_cons
)
3170 bnx_txeof(txr
, tx_cons
);
3171 lwkt_serialize_exit(&txr
->bnx_tx_serialize
);
3174 bnx_writembx(sc
, BGE_MBX_IRQ0_LO
, txr
->bnx_saved_status_tag
<< 24);
3178 bnx_msix_rx(void *xret
)
3180 struct bnx_rx_ret_ring
*ret
= xret
;
3183 ASSERT_SERIALIZED(&ret
->bnx_rx_ret_serialize
);
3185 ret
->bnx_saved_status_tag
= *ret
->bnx_hw_status_tag
;
3187 * Use a load fence to ensure that status_tag is saved
3192 rx_prod
= *ret
->bnx_rx_considx
;
3193 if (ret
->bnx_rx_saved_considx
!= rx_prod
)
3194 bnx_rxeof(ret
, rx_prod
, -1);
3196 bnx_writembx(ret
->bnx_sc
, ret
->bnx_msix_mbx
,
3197 ret
->bnx_saved_status_tag
<< 24);
3201 bnx_msix_rxtx(void *xret
)
3203 struct bnx_rx_ret_ring
*ret
= xret
;
3204 struct bnx_tx_ring
*txr
= ret
->bnx_txr
;
3205 uint16_t rx_prod
, tx_cons
;
3207 ASSERT_SERIALIZED(&ret
->bnx_rx_ret_serialize
);
3209 ret
->bnx_saved_status_tag
= *ret
->bnx_hw_status_tag
;
3211 * Use a load fence to ensure that status_tag is saved
3212 * before rx_prod and tx_cons.
3216 rx_prod
= *ret
->bnx_rx_considx
;
3217 if (ret
->bnx_rx_saved_considx
!= rx_prod
)
3218 bnx_rxeof(ret
, rx_prod
, -1);
3220 lwkt_serialize_enter(&txr
->bnx_tx_serialize
);
3221 tx_cons
= *txr
->bnx_tx_considx
;
3222 if (txr
->bnx_tx_saved_considx
!= tx_cons
)
3223 bnx_txeof(txr
, tx_cons
);
3224 lwkt_serialize_exit(&txr
->bnx_tx_serialize
);
3226 bnx_writembx(ret
->bnx_sc
, ret
->bnx_msix_mbx
,
3227 ret
->bnx_saved_status_tag
<< 24);
3231 bnx_msix_status(void *xsc
)
3233 struct bnx_softc
*sc
= xsc
;
3235 ASSERT_SERIALIZED(&sc
->bnx_main_serialize
);
3237 sc
->bnx_saved_status_tag
= *sc
->bnx_hw_status_tag
;
3239 * Use a load fence to ensure that status_tag is saved
3244 bnx_handle_status(sc
);
3246 bnx_writembx(sc
, BGE_MBX_IRQ0_LO
, sc
->bnx_saved_status_tag
<< 24);
3252 struct bnx_softc
*sc
= xsc
;
3254 lwkt_serialize_enter(&sc
->bnx_main_serialize
);
3256 bnx_stats_update_regs(sc
);
3258 if (sc
->bnx_flags
& BNX_FLAG_TBI
) {
3260 * Since in TBI mode auto-polling can't be used we should poll
3261 * link status manually. Here we register pending link event
3262 * and trigger interrupt.
3265 BNX_SETBIT(sc
, BGE_HCC_MODE
, BGE_HCCMODE_COAL_NOW
);
3266 } else if (!sc
->bnx_link
) {
3267 mii_tick(device_get_softc(sc
->bnx_miibus
));
3270 callout_reset_bycpu(&sc
->bnx_tick_timer
, hz
, bnx_tick
, sc
,
3271 sc
->bnx_tick_cpuid
);
3273 lwkt_serialize_exit(&sc
->bnx_main_serialize
);
3277 bnx_stats_update_regs(struct bnx_softc
*sc
)
3279 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3280 struct bge_mac_stats_regs stats
;
3284 s
= (uint32_t *)&stats
;
3285 for (i
= 0; i
< sizeof(struct bge_mac_stats_regs
); i
+= 4) {
3286 *s
= CSR_READ_4(sc
, BGE_RX_STATS
+ i
);
3290 IFNET_STAT_SET(ifp
, collisions
,
3291 (stats
.dot3StatsSingleCollisionFrames
+
3292 stats
.dot3StatsMultipleCollisionFrames
+
3293 stats
.dot3StatsExcessiveCollisions
+
3294 stats
.dot3StatsLateCollisions
));
3296 val
= CSR_READ_4(sc
, BGE_RXLP_LOCSTAT_OUT_OF_BDS
);
3297 sc
->bnx_norxbds
+= val
;
3301 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3302 * pointers to descriptors.
3305 bnx_encap(struct bnx_tx_ring
*txr
, struct mbuf
**m_head0
, uint32_t *txidx
,
3308 struct bge_tx_bd
*d
= NULL
;
3309 uint16_t csum_flags
= 0, vlan_tag
= 0, mss
= 0;
3310 bus_dma_segment_t segs
[BNX_NSEG_NEW
];
3312 int error
, maxsegs
, nsegs
, idx
, i
;
3313 struct mbuf
*m_head
= *m_head0
, *m_new
;
3315 if (m_head
->m_pkthdr
.csum_flags
& CSUM_TSO
) {
3316 #ifdef BNX_TSO_DEBUG
3320 error
= bnx_setup_tso(txr
, m_head0
, &mss
, &csum_flags
);
3325 #ifdef BNX_TSO_DEBUG
3326 tso_nsegs
= (m_head
->m_pkthdr
.len
/
3327 m_head
->m_pkthdr
.tso_segsz
) - 1;
3328 if (tso_nsegs
> (BNX_TSO_NSTATS
- 1))
3329 tso_nsegs
= BNX_TSO_NSTATS
- 1;
3330 else if (tso_nsegs
< 0)
3332 txr
->bnx_sc
->bnx_tsosegs
[tso_nsegs
]++;
3334 } else if (m_head
->m_pkthdr
.csum_flags
& BNX_CSUM_FEATURES
) {
3335 if (m_head
->m_pkthdr
.csum_flags
& CSUM_IP
)
3336 csum_flags
|= BGE_TXBDFLAG_IP_CSUM
;
3337 if (m_head
->m_pkthdr
.csum_flags
& (CSUM_TCP
| CSUM_UDP
))
3338 csum_flags
|= BGE_TXBDFLAG_TCP_UDP_CSUM
;
3339 if (m_head
->m_flags
& M_LASTFRAG
)
3340 csum_flags
|= BGE_TXBDFLAG_IP_FRAG_END
;
3341 else if (m_head
->m_flags
& M_FRAG
)
3342 csum_flags
|= BGE_TXBDFLAG_IP_FRAG
;
3344 if (m_head
->m_flags
& M_VLANTAG
) {
3345 csum_flags
|= BGE_TXBDFLAG_VLAN_TAG
;
3346 vlan_tag
= m_head
->m_pkthdr
.ether_vlantag
;
3350 map
= txr
->bnx_tx_buf
[idx
].bnx_tx_dmamap
;
3352 maxsegs
= (BGE_TX_RING_CNT
- txr
->bnx_tx_cnt
) - BNX_NSEG_RSVD
;
3353 KASSERT(maxsegs
>= BNX_NSEG_SPARE
,
3354 ("not enough segments %d", maxsegs
));
3356 if (maxsegs
> BNX_NSEG_NEW
)
3357 maxsegs
= BNX_NSEG_NEW
;
3360 * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason.
3361 * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN,
3362 * but when such padded frames employ the bge IP/TCP checksum
3363 * offload, the hardware checksum assist gives incorrect results
3364 * (possibly from incorporating its own padding into the UDP/TCP
3365 * checksum; who knows). If we pad such runts with zeros, the
3366 * onboard checksum comes out correct.
3368 if ((csum_flags
& BGE_TXBDFLAG_TCP_UDP_CSUM
) &&
3369 m_head
->m_pkthdr
.len
< BNX_MIN_FRAMELEN
) {
3370 error
= m_devpad(m_head
, BNX_MIN_FRAMELEN
);
3375 if ((txr
->bnx_tx_flags
& BNX_TX_FLAG_SHORTDMA
) &&
3376 m_head
->m_next
!= NULL
) {
3377 m_new
= bnx_defrag_shortdma(m_head
);
3378 if (m_new
== NULL
) {
3382 *m_head0
= m_head
= m_new
;
3384 if ((m_head
->m_pkthdr
.csum_flags
& CSUM_TSO
) == 0 &&
3385 (txr
->bnx_tx_flags
& BNX_TX_FLAG_FORCE_DEFRAG
) &&
3386 m_head
->m_next
!= NULL
) {
3388 * Forcefully defragment mbuf chain to overcome hardware
3389 * limitation which only support a single outstanding
3390 * DMA read operation. If it fails, keep moving on using
3391 * the original mbuf chain.
3393 m_new
= m_defrag(m_head
, M_NOWAIT
);
3395 *m_head0
= m_head
= m_new
;
3398 error
= bus_dmamap_load_mbuf_defrag(txr
->bnx_tx_mtag
, map
,
3399 m_head0
, segs
, maxsegs
, &nsegs
, BUS_DMA_NOWAIT
);
3402 *segs_used
+= nsegs
;
3405 bus_dmamap_sync(txr
->bnx_tx_mtag
, map
, BUS_DMASYNC_PREWRITE
);
3407 for (i
= 0; ; i
++) {
3408 d
= &txr
->bnx_tx_ring
[idx
];
3410 d
->bge_addr
.bge_addr_lo
= BGE_ADDR_LO(segs
[i
].ds_addr
);
3411 d
->bge_addr
.bge_addr_hi
= BGE_ADDR_HI(segs
[i
].ds_addr
);
3412 d
->bge_len
= segs
[i
].ds_len
;
3413 d
->bge_flags
= csum_flags
;
3414 d
->bge_vlan_tag
= vlan_tag
;
3419 BNX_INC(idx
, BGE_TX_RING_CNT
);
3421 /* Mark the last segment as end of packet... */
3422 d
->bge_flags
|= BGE_TXBDFLAG_END
;
3425 * Insure that the map for this transmission is placed at
3426 * the array index of the last descriptor in this chain.
3428 txr
->bnx_tx_buf
[*txidx
].bnx_tx_dmamap
= txr
->bnx_tx_buf
[idx
].bnx_tx_dmamap
;
3429 txr
->bnx_tx_buf
[idx
].bnx_tx_dmamap
= map
;
3430 txr
->bnx_tx_buf
[idx
].bnx_tx_mbuf
= m_head
;
3431 txr
->bnx_tx_cnt
+= nsegs
;
3433 BNX_INC(idx
, BGE_TX_RING_CNT
);
3444 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3445 * to the mbuf data regions directly in the transmit descriptors.
3448 bnx_start(struct ifnet
*ifp
, struct ifaltq_subque
*ifsq
)
3450 struct bnx_tx_ring
*txr
= ifsq_get_priv(ifsq
);
3451 struct mbuf
*m_head
= NULL
;
3455 KKASSERT(txr
->bnx_ifsq
== ifsq
);
3456 ASSERT_SERIALIZED(&txr
->bnx_tx_serialize
);
3458 if ((ifp
->if_flags
& IFF_RUNNING
) == 0 || ifsq_is_oactive(ifsq
))
3461 prodidx
= txr
->bnx_tx_prodidx
;
3463 while (txr
->bnx_tx_buf
[prodidx
].bnx_tx_mbuf
== NULL
) {
3465 * Sanity check: avoid coming within BGE_NSEG_RSVD
3466 * descriptors of the end of the ring. Also make
3467 * sure there are BGE_NSEG_SPARE descriptors for
3468 * jumbo buffers' or TSO segments' defragmentation.
3470 if ((BGE_TX_RING_CNT
- txr
->bnx_tx_cnt
) <
3471 (BNX_NSEG_RSVD
+ BNX_NSEG_SPARE
)) {
3472 ifsq_set_oactive(ifsq
);
3476 m_head
= ifsq_dequeue(ifsq
);
3481 * Pack the data into the transmit ring. If we
3482 * don't have room, set the OACTIVE flag and wait
3483 * for the NIC to drain the ring.
3485 if (bnx_encap(txr
, &m_head
, &prodidx
, &nsegs
)) {
3486 ifsq_set_oactive(ifsq
);
3487 IFNET_STAT_INC(ifp
, oerrors
, 1);
3491 if (nsegs
>= txr
->bnx_tx_wreg
) {
3493 bnx_writembx(txr
->bnx_sc
, txr
->bnx_tx_mbx
, prodidx
);
3497 ETHER_BPF_MTAP(ifp
, m_head
);
3500 * Set a timeout in case the chip goes out to lunch.
3502 txr
->bnx_tx_watchdog
.wd_timer
= 5;
3507 bnx_writembx(txr
->bnx_sc
, txr
->bnx_tx_mbx
, prodidx
);
3509 txr
->bnx_tx_prodidx
= prodidx
;
3515 struct bnx_softc
*sc
= xsc
;
3516 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3522 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
3524 /* Cancel pending I/O and flush buffers. */
3527 bnx_sig_pre_reset(sc
, BNX_RESET_START
);
3529 bnx_sig_post_reset(sc
, BNX_RESET_START
);
3534 * Init the various state machines, ring
3535 * control blocks and firmware.
3537 if (bnx_blockinit(sc
)) {
3538 if_printf(ifp
, "initialization failure\n");
3544 CSR_WRITE_4(sc
, BGE_RX_MTU
, ifp
->if_mtu
+
3545 ETHER_HDR_LEN
+ ETHER_CRC_LEN
+ EVL_ENCAPLEN
);
3547 /* Load our MAC address. */
3548 m
= (uint16_t *)&sc
->arpcom
.ac_enaddr
[0];
3549 CSR_WRITE_4(sc
, BGE_MAC_ADDR1_LO
, htons(m
[0]));
3550 CSR_WRITE_4(sc
, BGE_MAC_ADDR1_HI
, (htons(m
[1]) << 16) | htons(m
[2]));
3552 /* Enable or disable promiscuous mode as needed. */
3555 /* Program multicast filter. */
3559 if (bnx_init_rx_ring_std(&sc
->bnx_rx_std_ring
)) {
3560 if_printf(ifp
, "RX ring initialization failed\n");
3565 /* Init jumbo RX ring. */
3566 if (ifp
->if_mtu
> (ETHERMTU
+ ETHER_HDR_LEN
+ ETHER_CRC_LEN
)) {
3567 if (bnx_init_rx_ring_jumbo(sc
)) {
3568 if_printf(ifp
, "Jumbo RX ring initialization failed\n");
3574 /* Init our RX return ring index */
3575 for (i
= 0; i
< sc
->bnx_rx_retcnt
; ++i
) {
3576 struct bnx_rx_ret_ring
*ret
= &sc
->bnx_rx_ret_ring
[i
];
3578 ret
->bnx_rx_saved_considx
= 0;
3579 ret
->bnx_rx_cnt
= 0;
3583 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
)
3584 bnx_init_tx_ring(&sc
->bnx_tx_ring
[i
]);
3586 /* Enable TX MAC state machine lockup fix. */
3587 mode
= CSR_READ_4(sc
, BGE_TX_MODE
);
3588 mode
|= BGE_TXMODE_MBUF_LOCKUP_FIX
;
3589 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5720
||
3590 sc
->bnx_asicrev
== BGE_ASICREV_BCM5762
) {
3591 mode
&= ~(BGE_TXMODE_JMB_FRM_LEN
| BGE_TXMODE_CNT_DN_MODE
);
3592 mode
|= CSR_READ_4(sc
, BGE_TX_MODE
) &
3593 (BGE_TXMODE_JMB_FRM_LEN
| BGE_TXMODE_CNT_DN_MODE
);
3595 /* Turn on transmitter */
3596 CSR_WRITE_4(sc
, BGE_TX_MODE
, mode
| BGE_TXMODE_ENABLE
);
3599 /* Initialize RSS */
3600 mode
= BGE_RXMODE_ENABLE
| BGE_RXMODE_IPV6_ENABLE
;
3601 if (BNX_RSS_ENABLED(sc
)) {
3603 mode
|= BGE_RXMODE_RSS_ENABLE
|
3604 BGE_RXMODE_RSS_HASH_MASK_BITS
|
3605 BGE_RXMODE_RSS_IPV4_HASH
|
3606 BGE_RXMODE_RSS_TCP_IPV4_HASH
;
3608 /* Turn on receiver */
3609 BNX_SETBIT(sc
, BGE_RX_MODE
, mode
);
3613 * Set the number of good frames to receive after RX MBUF
3614 * Low Watermark has been reached. After the RX MAC receives
3615 * this number of frames, it will drop subsequent incoming
3616 * frames until the MBUF High Watermark is reached.
3618 if (BNX_IS_57765_FAMILY(sc
))
3619 CSR_WRITE_4(sc
, BGE_MAX_RX_FRAME_LOWAT
, 1);
3621 CSR_WRITE_4(sc
, BGE_MAX_RX_FRAME_LOWAT
, 2);
3623 if (sc
->bnx_intr_type
== PCI_INTR_TYPE_MSI
||
3624 sc
->bnx_intr_type
== PCI_INTR_TYPE_MSIX
) {
3626 if_printf(ifp
, "MSI_MODE: %#x\n",
3627 CSR_READ_4(sc
, BGE_MSI_MODE
));
3631 /* Tell firmware we're alive. */
3632 BNX_SETBIT(sc
, BGE_MODE_CTL
, BGE_MODECTL_STACKUP
);
3634 /* Enable host interrupts if polling(4) is not enabled. */
3635 PCI_SETBIT(sc
->bnx_dev
, BGE_PCI_MISC_CTL
, BGE_PCIMISCCTL_CLEAR_INTA
, 4);
3638 #ifdef IFPOLL_ENABLE
3639 if (ifp
->if_flags
& IFF_NPOLLING
)
3643 bnx_disable_intr(sc
);
3645 bnx_enable_intr(sc
);
3646 bnx_set_tick_cpuid(sc
, polling
);
3648 ifp
->if_flags
|= IFF_RUNNING
;
3649 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
3650 struct bnx_tx_ring
*txr
= &sc
->bnx_tx_ring
[i
];
3652 ifsq_clr_oactive(txr
->bnx_ifsq
);
3653 ifsq_watchdog_start(&txr
->bnx_tx_watchdog
);
3656 bnx_ifmedia_upd(ifp
);
3658 callout_reset_bycpu(&sc
->bnx_tick_timer
, hz
, bnx_tick
, sc
,
3659 sc
->bnx_tick_cpuid
);
3663 * Set media options.
3666 bnx_ifmedia_upd(struct ifnet
*ifp
)
3668 struct bnx_softc
*sc
= ifp
->if_softc
;
3670 /* If this is a 1000baseX NIC, enable the TBI port. */
3671 if (sc
->bnx_flags
& BNX_FLAG_TBI
) {
3672 struct ifmedia
*ifm
= &sc
->bnx_ifmedia
;
3674 if (IFM_TYPE(ifm
->ifm_media
) != IFM_ETHER
)
3677 switch(IFM_SUBTYPE(ifm
->ifm_media
)) {
3682 if ((ifm
->ifm_media
& IFM_GMASK
) == IFM_FDX
) {
3683 BNX_CLRBIT(sc
, BGE_MAC_MODE
,
3684 BGE_MACMODE_HALF_DUPLEX
);
3686 BNX_SETBIT(sc
, BGE_MAC_MODE
,
3687 BGE_MACMODE_HALF_DUPLEX
);
3695 struct mii_data
*mii
= device_get_softc(sc
->bnx_miibus
);
3699 if (mii
->mii_instance
) {
3700 struct mii_softc
*miisc
;
3702 LIST_FOREACH(miisc
, &mii
->mii_phys
, mii_list
)
3703 mii_phy_reset(miisc
);
3708 * Force an interrupt so that we will call bnx_link_upd
3709 * if needed and clear any pending link state attention.
3710 * Without this we are not getting any further interrupts
3711 * for link state changes and thus will not UP the link and
3712 * not be able to send in bnx_start. The only way to get
3713 * things working was to receive a packet and get an RX
3716 * bnx_tick should help for fiber cards and we might not
3717 * need to do this here if BNX_FLAG_TBI is set but as
3718 * we poll for fiber anyway it should not harm.
3720 BNX_SETBIT(sc
, BGE_HCC_MODE
, BGE_HCCMODE_COAL_NOW
);
3726 * Report current media status.
3729 bnx_ifmedia_sts(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
3731 struct bnx_softc
*sc
= ifp
->if_softc
;
3733 if ((ifp
->if_flags
& IFF_RUNNING
) == 0)
3736 if (sc
->bnx_flags
& BNX_FLAG_TBI
) {
3737 ifmr
->ifm_status
= IFM_AVALID
;
3738 ifmr
->ifm_active
= IFM_ETHER
;
3739 if (CSR_READ_4(sc
, BGE_MAC_STS
) &
3740 BGE_MACSTAT_TBI_PCS_SYNCHED
) {
3741 ifmr
->ifm_status
|= IFM_ACTIVE
;
3743 ifmr
->ifm_active
|= IFM_NONE
;
3747 ifmr
->ifm_active
|= IFM_1000_SX
;
3748 if (CSR_READ_4(sc
, BGE_MAC_MODE
) & BGE_MACMODE_HALF_DUPLEX
)
3749 ifmr
->ifm_active
|= IFM_HDX
;
3751 ifmr
->ifm_active
|= IFM_FDX
;
3753 struct mii_data
*mii
= device_get_softc(sc
->bnx_miibus
);
3756 ifmr
->ifm_active
= mii
->mii_media_active
;
3757 ifmr
->ifm_status
= mii
->mii_media_status
;
3762 bnx_ioctl(struct ifnet
*ifp
, u_long command
, caddr_t data
, struct ucred
*cr
)
3764 struct bnx_softc
*sc
= ifp
->if_softc
;
3765 struct ifreq
*ifr
= (struct ifreq
*)data
;
3766 int mask
, error
= 0;
3768 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
3772 if ((!BNX_IS_JUMBO_CAPABLE(sc
) && ifr
->ifr_mtu
> ETHERMTU
) ||
3773 (BNX_IS_JUMBO_CAPABLE(sc
) &&
3774 ifr
->ifr_mtu
> BNX_JUMBO_MTU
)) {
3776 } else if (ifp
->if_mtu
!= ifr
->ifr_mtu
) {
3777 ifp
->if_mtu
= ifr
->ifr_mtu
;
3778 if (ifp
->if_flags
& IFF_RUNNING
)
3783 if (ifp
->if_flags
& IFF_UP
) {
3784 if (ifp
->if_flags
& IFF_RUNNING
) {
3785 mask
= ifp
->if_flags
^ sc
->bnx_if_flags
;
3788 * If only the state of the PROMISC flag
3789 * changed, then just use the 'set promisc
3790 * mode' command instead of reinitializing
3791 * the entire NIC. Doing a full re-init
3792 * means reloading the firmware and waiting
3793 * for it to start up, which may take a
3794 * second or two. Similarly for ALLMULTI.
3796 if (mask
& IFF_PROMISC
)
3798 if (mask
& IFF_ALLMULTI
)
3803 } else if (ifp
->if_flags
& IFF_RUNNING
) {
3806 sc
->bnx_if_flags
= ifp
->if_flags
;
3810 if (ifp
->if_flags
& IFF_RUNNING
)
3815 if (sc
->bnx_flags
& BNX_FLAG_TBI
) {
3816 error
= ifmedia_ioctl(ifp
, ifr
,
3817 &sc
->bnx_ifmedia
, command
);
3819 struct mii_data
*mii
;
3821 mii
= device_get_softc(sc
->bnx_miibus
);
3822 error
= ifmedia_ioctl(ifp
, ifr
,
3823 &mii
->mii_media
, command
);
3827 mask
= ifr
->ifr_reqcap
^ ifp
->if_capenable
;
3828 if (mask
& IFCAP_HWCSUM
) {
3829 ifp
->if_capenable
^= (mask
& IFCAP_HWCSUM
);
3830 if (ifp
->if_capenable
& IFCAP_TXCSUM
)
3831 ifp
->if_hwassist
|= BNX_CSUM_FEATURES
;
3833 ifp
->if_hwassist
&= ~BNX_CSUM_FEATURES
;
3835 if (mask
& IFCAP_TSO
) {
3836 ifp
->if_capenable
^= (mask
& IFCAP_TSO
);
3837 if (ifp
->if_capenable
& IFCAP_TSO
)
3838 ifp
->if_hwassist
|= CSUM_TSO
;
3840 ifp
->if_hwassist
&= ~CSUM_TSO
;
3842 if (mask
& IFCAP_RSS
)
3843 ifp
->if_capenable
^= IFCAP_RSS
;
3846 error
= ether_ioctl(ifp
, command
, data
);
3853 bnx_watchdog(struct ifaltq_subque
*ifsq
)
3855 struct ifnet
*ifp
= ifsq_get_ifp(ifsq
);
3856 struct bnx_softc
*sc
= ifp
->if_softc
;
3859 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
3861 if_printf(ifp
, "watchdog timeout -- resetting\n");
3865 IFNET_STAT_INC(ifp
, oerrors
, 1);
3867 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
)
3868 ifsq_devstart_sched(sc
->bnx_tx_ring
[i
].bnx_ifsq
);
3872 * Stop the adapter and free any mbufs allocated to the
3876 bnx_stop(struct bnx_softc
*sc
)
3878 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3881 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
3883 callout_stop(&sc
->bnx_tick_timer
);
3885 /* Disable host interrupts. */
3886 bnx_disable_intr(sc
);
3889 * Tell firmware we're shutting down.
3891 bnx_sig_pre_reset(sc
, BNX_RESET_SHUTDOWN
);
3894 * Disable all of the receiver blocks
3896 bnx_stop_block(sc
, BGE_RX_MODE
, BGE_RXMODE_ENABLE
);
3897 bnx_stop_block(sc
, BGE_RBDI_MODE
, BGE_RBDIMODE_ENABLE
);
3898 bnx_stop_block(sc
, BGE_RXLP_MODE
, BGE_RXLPMODE_ENABLE
);
3899 bnx_stop_block(sc
, BGE_RDBDI_MODE
, BGE_RBDIMODE_ENABLE
);
3900 bnx_stop_block(sc
, BGE_RDC_MODE
, BGE_RDCMODE_ENABLE
);
3901 bnx_stop_block(sc
, BGE_RBDC_MODE
, BGE_RBDCMODE_ENABLE
);
3904 * Disable all of the transmit blocks
3906 bnx_stop_block(sc
, BGE_SRS_MODE
, BGE_SRSMODE_ENABLE
);
3907 bnx_stop_block(sc
, BGE_SBDI_MODE
, BGE_SBDIMODE_ENABLE
);
3908 bnx_stop_block(sc
, BGE_SDI_MODE
, BGE_SDIMODE_ENABLE
);
3909 bnx_stop_block(sc
, BGE_RDMA_MODE
, BGE_RDMAMODE_ENABLE
);
3910 bnx_stop_block(sc
, BGE_SDC_MODE
, BGE_SDCMODE_ENABLE
);
3911 bnx_stop_block(sc
, BGE_SBDC_MODE
, BGE_SBDCMODE_ENABLE
);
3914 * Shut down all of the memory managers and related
3917 bnx_stop_block(sc
, BGE_HCC_MODE
, BGE_HCCMODE_ENABLE
);
3918 bnx_stop_block(sc
, BGE_WDMA_MODE
, BGE_WDMAMODE_ENABLE
);
3919 CSR_WRITE_4(sc
, BGE_FTQ_RESET
, 0xFFFFFFFF);
3920 CSR_WRITE_4(sc
, BGE_FTQ_RESET
, 0);
3923 bnx_sig_post_reset(sc
, BNX_RESET_SHUTDOWN
);
3926 * Tell firmware we're shutting down.
3928 BNX_CLRBIT(sc
, BGE_MODE_CTL
, BGE_MODECTL_STACKUP
);
3930 /* Free the RX lists. */
3931 bnx_free_rx_ring_std(&sc
->bnx_rx_std_ring
);
3933 /* Free jumbo RX list. */
3934 if (BNX_IS_JUMBO_CAPABLE(sc
))
3935 bnx_free_rx_ring_jumbo(sc
);
3937 /* Free TX buffers. */
3938 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
3939 struct bnx_tx_ring
*txr
= &sc
->bnx_tx_ring
[i
];
3941 txr
->bnx_saved_status_tag
= 0;
3942 bnx_free_tx_ring(txr
);
3945 /* Clear saved status tag */
3946 for (i
= 0; i
< sc
->bnx_rx_retcnt
; ++i
)
3947 sc
->bnx_rx_ret_ring
[i
].bnx_saved_status_tag
= 0;
3950 sc
->bnx_coal_chg
= 0;
3952 ifp
->if_flags
&= ~IFF_RUNNING
;
3953 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
3954 struct bnx_tx_ring
*txr
= &sc
->bnx_tx_ring
[i
];
3956 ifsq_clr_oactive(txr
->bnx_ifsq
);
3957 ifsq_watchdog_stop(&txr
->bnx_tx_watchdog
);
3962 * Stop all chip I/O so that the kernel's probe routines don't
3963 * get confused by errant DMAs when rebooting.
3966 bnx_shutdown(device_t dev
)
3968 struct bnx_softc
*sc
= device_get_softc(dev
);
3969 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3971 ifnet_serialize_all(ifp
);
3973 ifnet_deserialize_all(ifp
);
3977 bnx_suspend(device_t dev
)
3979 struct bnx_softc
*sc
= device_get_softc(dev
);
3980 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3982 ifnet_serialize_all(ifp
);
3984 ifnet_deserialize_all(ifp
);
3990 bnx_resume(device_t dev
)
3992 struct bnx_softc
*sc
= device_get_softc(dev
);
3993 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3995 ifnet_serialize_all(ifp
);
3997 if (ifp
->if_flags
& IFF_UP
) {
4001 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
)
4002 ifsq_devstart_sched(sc
->bnx_tx_ring
[i
].bnx_ifsq
);
4005 ifnet_deserialize_all(ifp
);
4011 bnx_setpromisc(struct bnx_softc
*sc
)
4013 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
4015 if (ifp
->if_flags
& IFF_PROMISC
)
4016 BNX_SETBIT(sc
, BGE_RX_MODE
, BGE_RXMODE_RX_PROMISC
);
4018 BNX_CLRBIT(sc
, BGE_RX_MODE
, BGE_RXMODE_RX_PROMISC
);
4022 bnx_dma_free(struct bnx_softc
*sc
)
4024 struct bnx_rx_std_ring
*std
= &sc
->bnx_rx_std_ring
;
4027 /* Destroy RX return rings */
4028 if (sc
->bnx_rx_ret_ring
!= NULL
) {
4029 for (i
= 0; i
< sc
->bnx_rx_retcnt
; ++i
)
4030 bnx_destroy_rx_ret_ring(&sc
->bnx_rx_ret_ring
[i
]);
4031 kfree(sc
->bnx_rx_ret_ring
, M_DEVBUF
);
4034 /* Destroy RX mbuf DMA stuffs. */
4035 if (std
->bnx_rx_mtag
!= NULL
) {
4036 for (i
= 0; i
< BGE_STD_RX_RING_CNT
; i
++) {
4037 KKASSERT(std
->bnx_rx_std_buf
[i
].bnx_rx_mbuf
== NULL
);
4038 bus_dmamap_destroy(std
->bnx_rx_mtag
,
4039 std
->bnx_rx_std_buf
[i
].bnx_rx_dmamap
);
4041 bus_dma_tag_destroy(std
->bnx_rx_mtag
);
4044 /* Destroy standard RX ring */
4045 bnx_dma_block_free(std
->bnx_rx_std_ring_tag
,
4046 std
->bnx_rx_std_ring_map
, std
->bnx_rx_std_ring
);
4048 /* Destroy TX rings */
4049 if (sc
->bnx_tx_ring
!= NULL
) {
4050 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
)
4051 bnx_destroy_tx_ring(&sc
->bnx_tx_ring
[i
]);
4052 kfree(sc
->bnx_tx_ring
, M_DEVBUF
);
4055 if (BNX_IS_JUMBO_CAPABLE(sc
))
4056 bnx_free_jumbo_mem(sc
);
4058 /* Destroy status blocks */
4059 for (i
= 0; i
< sc
->bnx_intr_cnt
; ++i
) {
4060 struct bnx_intr_data
*intr
= &sc
->bnx_intr_data
[i
];
4062 bnx_dma_block_free(intr
->bnx_status_tag
,
4063 intr
->bnx_status_map
, intr
->bnx_status_block
);
4066 /* Destroy the parent tag */
4067 if (sc
->bnx_cdata
.bnx_parent_tag
!= NULL
)
4068 bus_dma_tag_destroy(sc
->bnx_cdata
.bnx_parent_tag
);
4072 bnx_dma_alloc(device_t dev
)
4074 struct bnx_softc
*sc
= device_get_softc(dev
);
4075 struct bnx_rx_std_ring
*std
= &sc
->bnx_rx_std_ring
;
4079 * Allocate the parent bus DMA tag appropriate for PCI.
4081 * All of the NetExtreme/NetLink controllers have 4GB boundary
4083 * Whenever an address crosses a multiple of the 4GB boundary
4084 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
4085 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
4086 * state machine will lockup and cause the device to hang.
4088 error
= bus_dma_tag_create(NULL
, 1, BGE_DMA_BOUNDARY_4G
,
4089 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
, NULL
, NULL
,
4090 BUS_SPACE_MAXSIZE_32BIT
, 0, BUS_SPACE_MAXSIZE_32BIT
,
4091 0, &sc
->bnx_cdata
.bnx_parent_tag
);
4093 device_printf(dev
, "could not create parent DMA tag\n");
4098 * Create DMA stuffs for status blocks.
4100 for (i
= 0; i
< sc
->bnx_intr_cnt
; ++i
) {
4101 struct bnx_intr_data
*intr
= &sc
->bnx_intr_data
[i
];
4103 error
= bnx_dma_block_alloc(sc
,
4104 __VM_CACHELINE_ALIGN(BGE_STATUS_BLK_SZ
),
4105 &intr
->bnx_status_tag
, &intr
->bnx_status_map
,
4106 (void *)&intr
->bnx_status_block
,
4107 &intr
->bnx_status_block_paddr
);
4110 "could not create %dth status block\n", i
);
4114 sc
->bnx_hw_status
= &sc
->bnx_intr_data
[0].bnx_status_block
->bge_status
;
4115 if (sc
->bnx_flags
& BNX_FLAG_STATUS_HASTAG
) {
4116 sc
->bnx_hw_status_tag
=
4117 &sc
->bnx_intr_data
[0].bnx_status_block
->bge_status_tag
;
4121 * Create DMA tag and maps for RX mbufs.
4124 lwkt_serialize_init(&std
->bnx_rx_std_serialize
);
4125 error
= bus_dma_tag_create(sc
->bnx_cdata
.bnx_parent_tag
, 1, 0,
4126 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
4127 NULL
, NULL
, MCLBYTES
, 1, MCLBYTES
,
4128 BUS_DMA_ALLOCNOW
| BUS_DMA_WAITOK
, &std
->bnx_rx_mtag
);
4130 device_printf(dev
, "could not create RX mbuf DMA tag\n");
4134 for (i
= 0; i
< BGE_STD_RX_RING_CNT
; ++i
) {
4135 error
= bus_dmamap_create(std
->bnx_rx_mtag
, BUS_DMA_WAITOK
,
4136 &std
->bnx_rx_std_buf
[i
].bnx_rx_dmamap
);
4140 for (j
= 0; j
< i
; ++j
) {
4141 bus_dmamap_destroy(std
->bnx_rx_mtag
,
4142 std
->bnx_rx_std_buf
[j
].bnx_rx_dmamap
);
4144 bus_dma_tag_destroy(std
->bnx_rx_mtag
);
4145 std
->bnx_rx_mtag
= NULL
;
4148 "could not create %dth RX mbuf DMA map\n", i
);
4154 * Create DMA stuffs for standard RX ring.
4156 error
= bnx_dma_block_alloc(sc
, BGE_STD_RX_RING_SZ
,
4157 &std
->bnx_rx_std_ring_tag
,
4158 &std
->bnx_rx_std_ring_map
,
4159 (void *)&std
->bnx_rx_std_ring
,
4160 &std
->bnx_rx_std_ring_paddr
);
4162 device_printf(dev
, "could not create std RX ring\n");
4167 * Create RX return rings
4169 mbx
= BGE_MBX_RX_CONS0_LO
;
4170 sc
->bnx_rx_ret_ring
= kmalloc_cachealign(
4171 sizeof(struct bnx_rx_ret_ring
) * sc
->bnx_rx_retcnt
, M_DEVBUF
,
4173 for (i
= 0; i
< sc
->bnx_rx_retcnt
; ++i
) {
4174 struct bnx_rx_ret_ring
*ret
= &sc
->bnx_rx_ret_ring
[i
];
4175 struct bnx_intr_data
*intr
;
4179 ret
->bnx_rx_mbx
= mbx
;
4180 ret
->bnx_rx_cntmax
= (BGE_STD_RX_RING_CNT
/ 4) /
4182 ret
->bnx_rx_mask
= 1 << i
;
4184 if (!BNX_RSS_ENABLED(sc
)) {
4185 intr
= &sc
->bnx_intr_data
[0];
4187 KKASSERT(i
+ 1 < sc
->bnx_intr_cnt
);
4188 intr
= &sc
->bnx_intr_data
[i
+ 1];
4192 ret
->bnx_rx_considx
=
4193 &intr
->bnx_status_block
->bge_idx
[0].bge_rx_prod_idx
;
4194 } else if (i
== 1) {
4195 ret
->bnx_rx_considx
=
4196 &intr
->bnx_status_block
->bge_rx_jumbo_cons_idx
;
4197 } else if (i
== 2) {
4198 ret
->bnx_rx_considx
=
4199 &intr
->bnx_status_block
->bge_rsvd1
;
4200 } else if (i
== 3) {
4201 ret
->bnx_rx_considx
=
4202 &intr
->bnx_status_block
->bge_rx_mini_cons_idx
;
4204 panic("unknown RX return ring %d\n", i
);
4206 ret
->bnx_hw_status_tag
=
4207 &intr
->bnx_status_block
->bge_status_tag
;
4209 error
= bnx_create_rx_ret_ring(ret
);
4212 "could not create %dth RX ret ring\n", i
);
4221 sc
->bnx_tx_ring
= kmalloc_cachealign(
4222 sizeof(struct bnx_tx_ring
) * sc
->bnx_tx_ringcnt
, M_DEVBUF
,
4224 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
4225 struct bnx_tx_ring
*txr
= &sc
->bnx_tx_ring
[i
];
4226 struct bnx_intr_data
*intr
;
4229 txr
->bnx_tx_mbx
= bnx_tx_mailbox
[i
];
4231 if (sc
->bnx_tx_ringcnt
== 1) {
4232 intr
= &sc
->bnx_intr_data
[0];
4234 KKASSERT(i
+ 1 < sc
->bnx_intr_cnt
);
4235 intr
= &sc
->bnx_intr_data
[i
+ 1];
4238 if ((sc
->bnx_flags
& BNX_FLAG_RXTX_BUNDLE
) == 0) {
4239 txr
->bnx_hw_status_tag
=
4240 &intr
->bnx_status_block
->bge_status_tag
;
4242 txr
->bnx_tx_considx
=
4243 &intr
->bnx_status_block
->bge_idx
[0].bge_tx_cons_idx
;
4245 error
= bnx_create_tx_ring(txr
);
4248 "could not create %dth TX ring\n", i
);
4254 * Create jumbo buffer pool.
4256 if (BNX_IS_JUMBO_CAPABLE(sc
)) {
4257 error
= bnx_alloc_jumbo_mem(sc
);
4260 "could not create jumbo buffer pool\n");
4269 bnx_dma_block_alloc(struct bnx_softc
*sc
, bus_size_t size
, bus_dma_tag_t
*tag
,
4270 bus_dmamap_t
*map
, void **addr
, bus_addr_t
*paddr
)
4275 error
= bus_dmamem_coherent(sc
->bnx_cdata
.bnx_parent_tag
, PAGE_SIZE
, 0,
4276 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
4277 size
, BUS_DMA_WAITOK
| BUS_DMA_ZERO
, &dmem
);
4281 *tag
= dmem
.dmem_tag
;
4282 *map
= dmem
.dmem_map
;
4283 *addr
= dmem
.dmem_addr
;
4284 *paddr
= dmem
.dmem_busaddr
;
4290 bnx_dma_block_free(bus_dma_tag_t tag
, bus_dmamap_t map
, void *addr
)
4293 bus_dmamap_unload(tag
, map
);
4294 bus_dmamem_free(tag
, addr
, map
);
4295 bus_dma_tag_destroy(tag
);
4300 bnx_tbi_link_upd(struct bnx_softc
*sc
, uint32_t status
)
4302 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
4304 #define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
4307 * Sometimes PCS encoding errors are detected in
4308 * TBI mode (on fiber NICs), and for some reason
4309 * the chip will signal them as link changes.
4310 * If we get a link change event, but the 'PCS
4311 * encoding error' bit in the MAC status register
4312 * is set, don't bother doing a link check.
4313 * This avoids spurious "gigabit link up" messages
4314 * that sometimes appear on fiber NICs during
4315 * periods of heavy traffic.
4317 if (status
& BGE_MACSTAT_TBI_PCS_SYNCHED
) {
4318 if (!sc
->bnx_link
) {
4320 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5704
) {
4321 BNX_CLRBIT(sc
, BGE_MAC_MODE
,
4322 BGE_MACMODE_TBI_SEND_CFGS
);
4325 CSR_WRITE_4(sc
, BGE_MAC_STS
, 0xFFFFFFFF);
4328 if_printf(ifp
, "link UP\n");
4330 ifp
->if_link_state
= LINK_STATE_UP
;
4331 if_link_state_change(ifp
);
4333 } else if ((status
& PCS_ENCODE_ERR
) != PCS_ENCODE_ERR
) {
4338 if_printf(ifp
, "link DOWN\n");
4340 ifp
->if_link_state
= LINK_STATE_DOWN
;
4341 if_link_state_change(ifp
);
4345 #undef PCS_ENCODE_ERR
4347 /* Clear the attention. */
4348 CSR_WRITE_4(sc
, BGE_MAC_STS
, BGE_MACSTAT_SYNC_CHANGED
|
4349 BGE_MACSTAT_CFG_CHANGED
| BGE_MACSTAT_MI_COMPLETE
|
4350 BGE_MACSTAT_LINK_CHANGED
);
4354 bnx_copper_link_upd(struct bnx_softc
*sc
, uint32_t status __unused
)
4356 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
4357 struct mii_data
*mii
= device_get_softc(sc
->bnx_miibus
);
4360 bnx_miibus_statchg(sc
->bnx_dev
);
4364 if_printf(ifp
, "link UP\n");
4366 if_printf(ifp
, "link DOWN\n");
4369 /* Clear the attention. */
4370 CSR_WRITE_4(sc
, BGE_MAC_STS
, BGE_MACSTAT_SYNC_CHANGED
|
4371 BGE_MACSTAT_CFG_CHANGED
| BGE_MACSTAT_MI_COMPLETE
|
4372 BGE_MACSTAT_LINK_CHANGED
);
4376 bnx_autopoll_link_upd(struct bnx_softc
*sc
, uint32_t status __unused
)
4378 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
4379 struct mii_data
*mii
= device_get_softc(sc
->bnx_miibus
);
4383 if (!sc
->bnx_link
&&
4384 (mii
->mii_media_status
& IFM_ACTIVE
) &&
4385 IFM_SUBTYPE(mii
->mii_media_active
) != IFM_NONE
) {
4388 if_printf(ifp
, "link UP\n");
4389 } else if (sc
->bnx_link
&&
4390 (!(mii
->mii_media_status
& IFM_ACTIVE
) ||
4391 IFM_SUBTYPE(mii
->mii_media_active
) == IFM_NONE
)) {
4394 if_printf(ifp
, "link DOWN\n");
4397 /* Clear the attention. */
4398 CSR_WRITE_4(sc
, BGE_MAC_STS
, BGE_MACSTAT_SYNC_CHANGED
|
4399 BGE_MACSTAT_CFG_CHANGED
| BGE_MACSTAT_MI_COMPLETE
|
4400 BGE_MACSTAT_LINK_CHANGED
);
4404 bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS
)
4406 struct bnx_softc
*sc
= arg1
;
4408 return bnx_sysctl_coal_chg(oidp
, arg1
, arg2
, req
,
4409 &sc
->bnx_rx_coal_ticks
,
4410 BNX_RX_COAL_TICKS_MIN
, BNX_RX_COAL_TICKS_MAX
,
4411 BNX_RX_COAL_TICKS_CHG
);
4415 bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS
)
4417 struct bnx_softc
*sc
= arg1
;
4419 return bnx_sysctl_coal_chg(oidp
, arg1
, arg2
, req
,
4420 &sc
->bnx_tx_coal_ticks
,
4421 BNX_TX_COAL_TICKS_MIN
, BNX_TX_COAL_TICKS_MAX
,
4422 BNX_TX_COAL_TICKS_CHG
);
4426 bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS
)
4428 struct bnx_softc
*sc
= arg1
;
4430 return bnx_sysctl_coal_chg(oidp
, arg1
, arg2
, req
,
4431 &sc
->bnx_rx_coal_bds
,
4432 BNX_RX_COAL_BDS_MIN
, BNX_RX_COAL_BDS_MAX
,
4433 BNX_RX_COAL_BDS_CHG
);
4437 bnx_sysctl_rx_coal_bds_poll(SYSCTL_HANDLER_ARGS
)
4439 struct bnx_softc
*sc
= arg1
;
4441 return bnx_sysctl_coal_chg(oidp
, arg1
, arg2
, req
,
4442 &sc
->bnx_rx_coal_bds_poll
,
4443 BNX_RX_COAL_BDS_MIN
, BNX_RX_COAL_BDS_MAX
,
4444 BNX_RX_COAL_BDS_CHG
);
4448 bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS
)
4450 struct bnx_softc
*sc
= arg1
;
4452 return bnx_sysctl_coal_chg(oidp
, arg1
, arg2
, req
,
4453 &sc
->bnx_tx_coal_bds
,
4454 BNX_TX_COAL_BDS_MIN
, BNX_TX_COAL_BDS_MAX
,
4455 BNX_TX_COAL_BDS_CHG
);
4459 bnx_sysctl_tx_coal_bds_poll(SYSCTL_HANDLER_ARGS
)
4461 struct bnx_softc
*sc
= arg1
;
4463 return bnx_sysctl_coal_chg(oidp
, arg1
, arg2
, req
,
4464 &sc
->bnx_tx_coal_bds_poll
,
4465 BNX_TX_COAL_BDS_MIN
, BNX_TX_COAL_BDS_MAX
,
4466 BNX_TX_COAL_BDS_CHG
);
4470 bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS
)
4472 struct bnx_softc
*sc
= arg1
;
4474 return bnx_sysctl_coal_chg(oidp
, arg1
, arg2
, req
,
4475 &sc
->bnx_rx_coal_bds_int
,
4476 BNX_RX_COAL_BDS_MIN
, BNX_RX_COAL_BDS_MAX
,
4477 BNX_RX_COAL_BDS_INT_CHG
);
4481 bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS
)
4483 struct bnx_softc
*sc
= arg1
;
4485 return bnx_sysctl_coal_chg(oidp
, arg1
, arg2
, req
,
4486 &sc
->bnx_tx_coal_bds_int
,
4487 BNX_TX_COAL_BDS_MIN
, BNX_TX_COAL_BDS_MAX
,
4488 BNX_TX_COAL_BDS_INT_CHG
);
4492 bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS
, uint32_t *coal
,
4493 int coal_min
, int coal_max
, uint32_t coal_chg_mask
)
4495 struct bnx_softc
*sc
= arg1
;
4496 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
4499 ifnet_serialize_all(ifp
);
4502 error
= sysctl_handle_int(oidp
, &v
, 0, req
);
4503 if (!error
&& req
->newptr
!= NULL
) {
4504 if (v
< coal_min
|| v
> coal_max
) {
4508 sc
->bnx_coal_chg
|= coal_chg_mask
;
4510 /* Commit changes */
4511 bnx_coal_change(sc
);
4515 ifnet_deserialize_all(ifp
);
4520 bnx_coal_change(struct bnx_softc
*sc
)
4522 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
4525 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
4527 if (sc
->bnx_coal_chg
& BNX_RX_COAL_TICKS_CHG
) {
4528 if (sc
->bnx_rx_retcnt
== 1) {
4529 CSR_WRITE_4(sc
, BGE_HCC_RX_COAL_TICKS
,
4530 sc
->bnx_rx_coal_ticks
);
4533 CSR_WRITE_4(sc
, BGE_HCC_RX_COAL_TICKS
, 0);
4534 for (i
= 0; i
< sc
->bnx_rx_retcnt
; ++i
) {
4535 CSR_WRITE_4(sc
, BGE_VEC1_RX_COAL_TICKS
+
4536 (i
* BGE_VEC_COALSET_SIZE
),
4537 sc
->bnx_rx_coal_ticks
);
4540 for (; i
< BNX_INTR_MAX
- 1; ++i
) {
4541 CSR_WRITE_4(sc
, BGE_VEC1_RX_COAL_TICKS
+
4542 (i
* BGE_VEC_COALSET_SIZE
), 0);
4545 if_printf(ifp
, "rx_coal_ticks -> %u\n",
4546 sc
->bnx_rx_coal_ticks
);
4550 if (sc
->bnx_coal_chg
& BNX_TX_COAL_TICKS_CHG
) {
4551 if (sc
->bnx_tx_ringcnt
== 1) {
4552 CSR_WRITE_4(sc
, BGE_HCC_TX_COAL_TICKS
,
4553 sc
->bnx_tx_coal_ticks
);
4556 CSR_WRITE_4(sc
, BGE_HCC_TX_COAL_TICKS
, 0);
4557 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
4558 CSR_WRITE_4(sc
, BGE_VEC1_TX_COAL_TICKS
+
4559 (i
* BGE_VEC_COALSET_SIZE
),
4560 sc
->bnx_tx_coal_ticks
);
4563 for (; i
< BNX_INTR_MAX
- 1; ++i
) {
4564 CSR_WRITE_4(sc
, BGE_VEC1_TX_COAL_TICKS
+
4565 (i
* BGE_VEC_COALSET_SIZE
), 0);
4568 if_printf(ifp
, "tx_coal_ticks -> %u\n",
4569 sc
->bnx_tx_coal_ticks
);
4573 if (sc
->bnx_coal_chg
& BNX_RX_COAL_BDS_CHG
) {
4574 uint32_t rx_coal_bds
;
4576 if (ifp
->if_flags
& IFF_NPOLLING
)
4577 rx_coal_bds
= sc
->bnx_rx_coal_bds_poll
;
4579 rx_coal_bds
= sc
->bnx_rx_coal_bds
;
4581 if (sc
->bnx_rx_retcnt
== 1) {
4582 CSR_WRITE_4(sc
, BGE_HCC_RX_MAX_COAL_BDS
, rx_coal_bds
);
4585 CSR_WRITE_4(sc
, BGE_HCC_RX_MAX_COAL_BDS
, 0);
4586 for (i
= 0; i
< sc
->bnx_rx_retcnt
; ++i
) {
4587 CSR_WRITE_4(sc
, BGE_VEC1_RX_MAX_COAL_BDS
+
4588 (i
* BGE_VEC_COALSET_SIZE
), rx_coal_bds
);
4591 for (; i
< BNX_INTR_MAX
- 1; ++i
) {
4592 CSR_WRITE_4(sc
, BGE_VEC1_RX_MAX_COAL_BDS
+
4593 (i
* BGE_VEC_COALSET_SIZE
), 0);
4596 if_printf(ifp
, "%srx_coal_bds -> %u\n",
4597 (ifp
->if_flags
& IFF_NPOLLING
) ? "polling " : "",
4602 if (sc
->bnx_coal_chg
& BNX_TX_COAL_BDS_CHG
) {
4603 uint32_t tx_coal_bds
;
4605 if (ifp
->if_flags
& IFF_NPOLLING
)
4606 tx_coal_bds
= sc
->bnx_tx_coal_bds_poll
;
4608 tx_coal_bds
= sc
->bnx_tx_coal_bds
;
4610 if (sc
->bnx_tx_ringcnt
== 1) {
4611 CSR_WRITE_4(sc
, BGE_HCC_TX_MAX_COAL_BDS
, tx_coal_bds
);
4614 CSR_WRITE_4(sc
, BGE_HCC_TX_MAX_COAL_BDS
, 0);
4615 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
4616 CSR_WRITE_4(sc
, BGE_VEC1_TX_MAX_COAL_BDS
+
4617 (i
* BGE_VEC_COALSET_SIZE
), tx_coal_bds
);
4620 for (; i
< BNX_INTR_MAX
- 1; ++i
) {
4621 CSR_WRITE_4(sc
, BGE_VEC1_TX_MAX_COAL_BDS
+
4622 (i
* BGE_VEC_COALSET_SIZE
), 0);
4625 if_printf(ifp
, "%stx_coal_bds -> %u\n",
4626 (ifp
->if_flags
& IFF_NPOLLING
) ? "polling " : "",
4631 if (sc
->bnx_coal_chg
& BNX_RX_COAL_BDS_INT_CHG
) {
4632 if (sc
->bnx_rx_retcnt
== 1) {
4633 CSR_WRITE_4(sc
, BGE_HCC_RX_MAX_COAL_BDS_INT
,
4634 sc
->bnx_rx_coal_bds_int
);
4637 CSR_WRITE_4(sc
, BGE_HCC_RX_MAX_COAL_BDS_INT
, 0);
4638 for (i
= 0; i
< sc
->bnx_rx_retcnt
; ++i
) {
4639 CSR_WRITE_4(sc
, BGE_VEC1_RX_MAX_COAL_BDS_INT
+
4640 (i
* BGE_VEC_COALSET_SIZE
),
4641 sc
->bnx_rx_coal_bds_int
);
4644 for (; i
< BNX_INTR_MAX
- 1; ++i
) {
4645 CSR_WRITE_4(sc
, BGE_VEC1_RX_MAX_COAL_BDS_INT
+
4646 (i
* BGE_VEC_COALSET_SIZE
), 0);
4649 if_printf(ifp
, "rx_coal_bds_int -> %u\n",
4650 sc
->bnx_rx_coal_bds_int
);
4654 if (sc
->bnx_coal_chg
& BNX_TX_COAL_BDS_INT_CHG
) {
4655 if (sc
->bnx_tx_ringcnt
== 1) {
4656 CSR_WRITE_4(sc
, BGE_HCC_TX_MAX_COAL_BDS_INT
,
4657 sc
->bnx_tx_coal_bds_int
);
4660 CSR_WRITE_4(sc
, BGE_HCC_TX_MAX_COAL_BDS_INT
, 0);
4661 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
4662 CSR_WRITE_4(sc
, BGE_VEC1_TX_MAX_COAL_BDS_INT
+
4663 (i
* BGE_VEC_COALSET_SIZE
),
4664 sc
->bnx_tx_coal_bds_int
);
4667 for (; i
< BNX_INTR_MAX
- 1; ++i
) {
4668 CSR_WRITE_4(sc
, BGE_VEC1_TX_MAX_COAL_BDS_INT
+
4669 (i
* BGE_VEC_COALSET_SIZE
), 0);
4672 if_printf(ifp
, "tx_coal_bds_int -> %u\n",
4673 sc
->bnx_tx_coal_bds_int
);
4677 sc
->bnx_coal_chg
= 0;
4681 bnx_check_intr_rxtx(void *xintr
)
4683 struct bnx_intr_data
*intr
= xintr
;
4684 struct bnx_rx_ret_ring
*ret
;
4685 struct bnx_tx_ring
*txr
;
4688 lwkt_serialize_enter(intr
->bnx_intr_serialize
);
4690 KKASSERT(mycpuid
== intr
->bnx_intr_cpuid
);
4692 ifp
= &intr
->bnx_sc
->arpcom
.ac_if
;
4693 if ((ifp
->if_flags
& (IFF_RUNNING
| IFF_NPOLLING
)) != IFF_RUNNING
) {
4694 lwkt_serialize_exit(intr
->bnx_intr_serialize
);
4698 txr
= intr
->bnx_txr
;
4699 ret
= intr
->bnx_ret
;
4701 if (*ret
->bnx_rx_considx
!= ret
->bnx_rx_saved_considx
||
4702 *txr
->bnx_tx_considx
!= txr
->bnx_tx_saved_considx
) {
4703 if (intr
->bnx_rx_check_considx
== ret
->bnx_rx_saved_considx
&&
4704 intr
->bnx_tx_check_considx
== txr
->bnx_tx_saved_considx
) {
4705 if (!intr
->bnx_intr_maylose
) {
4706 intr
->bnx_intr_maylose
= TRUE
;
4710 if_printf(ifp
, "lost interrupt\n");
4711 intr
->bnx_intr_func(intr
->bnx_intr_arg
);
4714 intr
->bnx_intr_maylose
= FALSE
;
4715 intr
->bnx_rx_check_considx
= ret
->bnx_rx_saved_considx
;
4716 intr
->bnx_tx_check_considx
= txr
->bnx_tx_saved_considx
;
4719 callout_reset(&intr
->bnx_intr_timer
, BNX_INTR_CKINTVL
,
4720 intr
->bnx_intr_check
, intr
);
4721 lwkt_serialize_exit(intr
->bnx_intr_serialize
);
4725 bnx_check_intr_tx(void *xintr
)
4727 struct bnx_intr_data
*intr
= xintr
;
4728 struct bnx_tx_ring
*txr
;
4731 lwkt_serialize_enter(intr
->bnx_intr_serialize
);
4733 KKASSERT(mycpuid
== intr
->bnx_intr_cpuid
);
4735 ifp
= &intr
->bnx_sc
->arpcom
.ac_if
;
4736 if ((ifp
->if_flags
& (IFF_RUNNING
| IFF_NPOLLING
)) != IFF_RUNNING
) {
4737 lwkt_serialize_exit(intr
->bnx_intr_serialize
);
4741 txr
= intr
->bnx_txr
;
4743 if (*txr
->bnx_tx_considx
!= txr
->bnx_tx_saved_considx
) {
4744 if (intr
->bnx_tx_check_considx
== txr
->bnx_tx_saved_considx
) {
4745 if (!intr
->bnx_intr_maylose
) {
4746 intr
->bnx_intr_maylose
= TRUE
;
4750 if_printf(ifp
, "lost interrupt\n");
4751 intr
->bnx_intr_func(intr
->bnx_intr_arg
);
4754 intr
->bnx_intr_maylose
= FALSE
;
4755 intr
->bnx_tx_check_considx
= txr
->bnx_tx_saved_considx
;
4758 callout_reset(&intr
->bnx_intr_timer
, BNX_INTR_CKINTVL
,
4759 intr
->bnx_intr_check
, intr
);
4760 lwkt_serialize_exit(intr
->bnx_intr_serialize
);
4764 bnx_check_intr_rx(void *xintr
)
4766 struct bnx_intr_data
*intr
= xintr
;
4767 struct bnx_rx_ret_ring
*ret
;
4770 lwkt_serialize_enter(intr
->bnx_intr_serialize
);
4772 KKASSERT(mycpuid
== intr
->bnx_intr_cpuid
);
4774 ifp
= &intr
->bnx_sc
->arpcom
.ac_if
;
4775 if ((ifp
->if_flags
& (IFF_RUNNING
| IFF_NPOLLING
)) != IFF_RUNNING
) {
4776 lwkt_serialize_exit(intr
->bnx_intr_serialize
);
4780 ret
= intr
->bnx_ret
;
4782 if (*ret
->bnx_rx_considx
!= ret
->bnx_rx_saved_considx
) {
4783 if (intr
->bnx_rx_check_considx
== ret
->bnx_rx_saved_considx
) {
4784 if (!intr
->bnx_intr_maylose
) {
4785 intr
->bnx_intr_maylose
= TRUE
;
4789 if_printf(ifp
, "lost interrupt\n");
4790 intr
->bnx_intr_func(intr
->bnx_intr_arg
);
4793 intr
->bnx_intr_maylose
= FALSE
;
4794 intr
->bnx_rx_check_considx
= ret
->bnx_rx_saved_considx
;
4797 callout_reset(&intr
->bnx_intr_timer
, BNX_INTR_CKINTVL
,
4798 intr
->bnx_intr_check
, intr
);
4799 lwkt_serialize_exit(intr
->bnx_intr_serialize
);
4803 bnx_enable_intr(struct bnx_softc
*sc
)
4805 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
4808 for (i
= 0; i
< sc
->bnx_intr_cnt
; ++i
) {
4809 lwkt_serialize_handler_enable(
4810 sc
->bnx_intr_data
[i
].bnx_intr_serialize
);
4816 for (i
= 0; i
< sc
->bnx_intr_cnt
; ++i
) {
4817 struct bnx_intr_data
*intr
= &sc
->bnx_intr_data
[i
];
4819 bnx_writembx(sc
, intr
->bnx_intr_mbx
,
4820 (*intr
->bnx_saved_status_tag
) << 24);
4821 /* XXX Linux driver */
4822 bnx_writembx(sc
, intr
->bnx_intr_mbx
,
4823 (*intr
->bnx_saved_status_tag
) << 24);
4827 * Unmask the interrupt when we stop polling.
4829 PCI_CLRBIT(sc
->bnx_dev
, BGE_PCI_MISC_CTL
,
4830 BGE_PCIMISCCTL_MASK_PCI_INTR
, 4);
4833 * Trigger another interrupt, since above writing
4834 * to interrupt mailbox0 may acknowledge pending
4837 BNX_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_INTR_SET
);
4839 if (sc
->bnx_flags
& BNX_FLAG_STATUSTAG_BUG
) {
4841 if_printf(ifp
, "status tag bug workaround\n");
4843 for (i
= 0; i
< sc
->bnx_intr_cnt
; ++i
) {
4844 struct bnx_intr_data
*intr
= &sc
->bnx_intr_data
[i
];
4846 if (intr
->bnx_intr_check
== NULL
)
4848 intr
->bnx_intr_maylose
= FALSE
;
4849 intr
->bnx_rx_check_considx
= 0;
4850 intr
->bnx_tx_check_considx
= 0;
4851 callout_reset_bycpu(&intr
->bnx_intr_timer
,
4852 BNX_INTR_CKINTVL
, intr
->bnx_intr_check
, intr
,
4853 intr
->bnx_intr_cpuid
);
4859 bnx_disable_intr(struct bnx_softc
*sc
)
4863 for (i
= 0; i
< sc
->bnx_intr_cnt
; ++i
) {
4864 struct bnx_intr_data
*intr
= &sc
->bnx_intr_data
[i
];
4866 callout_stop(&intr
->bnx_intr_timer
);
4867 intr
->bnx_intr_maylose
= FALSE
;
4868 intr
->bnx_rx_check_considx
= 0;
4869 intr
->bnx_tx_check_considx
= 0;
4873 * Mask the interrupt when we start polling.
4875 PCI_SETBIT(sc
->bnx_dev
, BGE_PCI_MISC_CTL
,
4876 BGE_PCIMISCCTL_MASK_PCI_INTR
, 4);
4879 * Acknowledge possible asserted interrupt.
4881 for (i
= 0; i
< BNX_INTR_MAX
; ++i
)
4882 bnx_writembx(sc
, sc
->bnx_intr_data
[i
].bnx_intr_mbx
, 1);
4884 for (i
= 0; i
< sc
->bnx_intr_cnt
; ++i
) {
4885 lwkt_serialize_handler_disable(
4886 sc
->bnx_intr_data
[i
].bnx_intr_serialize
);
4891 bnx_get_eaddr_mem(struct bnx_softc
*sc
, uint8_t ether_addr
[])
4896 mac_addr
= bnx_readmem_ind(sc
, 0x0c14);
4897 if ((mac_addr
>> 16) == 0x484b) {
4898 ether_addr
[0] = (uint8_t)(mac_addr
>> 8);
4899 ether_addr
[1] = (uint8_t)mac_addr
;
4900 mac_addr
= bnx_readmem_ind(sc
, 0x0c18);
4901 ether_addr
[2] = (uint8_t)(mac_addr
>> 24);
4902 ether_addr
[3] = (uint8_t)(mac_addr
>> 16);
4903 ether_addr
[4] = (uint8_t)(mac_addr
>> 8);
4904 ether_addr
[5] = (uint8_t)mac_addr
;
4911 bnx_get_eaddr_nvram(struct bnx_softc
*sc
, uint8_t ether_addr
[])
4913 int mac_offset
= BGE_EE_MAC_OFFSET
;
4915 if (BNX_IS_5717_PLUS(sc
)) {
4918 f
= pci_get_function(sc
->bnx_dev
);
4920 mac_offset
= BGE_EE_MAC_OFFSET_5717
;
4922 mac_offset
+= BGE_EE_MAC_OFFSET_5717_OFF
;
4925 return bnx_read_nvram(sc
, ether_addr
, mac_offset
+ 2, ETHER_ADDR_LEN
);
4929 bnx_get_eaddr_eeprom(struct bnx_softc
*sc
, uint8_t ether_addr
[])
4931 if (sc
->bnx_flags
& BNX_FLAG_NO_EEPROM
)
4934 return bnx_read_eeprom(sc
, ether_addr
, BGE_EE_MAC_OFFSET
+ 2,
4939 bnx_get_eaddr(struct bnx_softc
*sc
, uint8_t eaddr
[])
4941 static const bnx_eaddr_fcn_t bnx_eaddr_funcs
[] = {
4942 /* NOTE: Order is critical */
4944 bnx_get_eaddr_nvram
,
4945 bnx_get_eaddr_eeprom
,
4948 const bnx_eaddr_fcn_t
*func
;
4950 for (func
= bnx_eaddr_funcs
; *func
!= NULL
; ++func
) {
4951 if ((*func
)(sc
, eaddr
) == 0)
4954 return (*func
== NULL
? ENXIO
: 0);
4958 * NOTE: 'm' is not freed upon failure
4961 bnx_defrag_shortdma(struct mbuf
*m
)
4967 * If device receive two back-to-back send BDs with less than
4968 * or equal to 8 total bytes then the device may hang. The two
4969 * back-to-back send BDs must in the same frame for this failure
4970 * to occur. Scan mbuf chains and see whether two back-to-back
4971 * send BDs are there. If this is the case, allocate new mbuf
4972 * and copy the frame to workaround the silicon bug.
4974 for (n
= m
, found
= 0; n
!= NULL
; n
= n
->m_next
) {
4985 n
= m_defrag(m
, M_NOWAIT
);
4992 bnx_stop_block(struct bnx_softc
*sc
, bus_size_t reg
, uint32_t bit
)
4996 BNX_CLRBIT(sc
, reg
, bit
);
4997 for (i
= 0; i
< BNX_TIMEOUT
; i
++) {
4998 if ((CSR_READ_4(sc
, reg
) & bit
) == 0)
5005 bnx_link_poll(struct bnx_softc
*sc
)
5009 status
= CSR_READ_4(sc
, BGE_MAC_STS
);
5010 if ((status
& sc
->bnx_link_chg
) || sc
->bnx_link_evt
) {
5011 sc
->bnx_link_evt
= 0;
5012 sc
->bnx_link_upd(sc
, status
);
5017 bnx_enable_msi(struct bnx_softc
*sc
, boolean_t is_msix
)
5021 msi_mode
= CSR_READ_4(sc
, BGE_MSI_MODE
);
5022 msi_mode
|= BGE_MSIMODE_ENABLE
;
5025 * 5718-PG105-R says that "one shot" mode does not work
5026 * if MSI is used, however, it obviously works.
5028 msi_mode
&= ~BGE_MSIMODE_ONESHOT_DISABLE
;
5030 msi_mode
|= BGE_MSIMODE_MSIX_MULTIMODE
;
5032 msi_mode
&= ~BGE_MSIMODE_MSIX_MULTIMODE
;
5033 CSR_WRITE_4(sc
, BGE_MSI_MODE
, msi_mode
);
5037 bnx_dma_swap_options(struct bnx_softc
*sc
)
5039 uint32_t dma_options
;
5041 dma_options
= BGE_MODECTL_WORDSWAP_NONFRAME
|
5042 BGE_MODECTL_BYTESWAP_DATA
| BGE_MODECTL_WORDSWAP_DATA
;
5043 #if BYTE_ORDER == BIG_ENDIAN
5044 dma_options
|= BGE_MODECTL_BYTESWAP_NONFRAME
;
5050 bnx_setup_tso(struct bnx_tx_ring
*txr
, struct mbuf
**mp
,
5051 uint16_t *mss0
, uint16_t *flags0
)
5056 int thoff
, iphlen
, hoff
, hlen
;
5057 uint16_t flags
, mss
;
5060 KASSERT(M_WRITABLE(m
), ("TSO mbuf not writable"));
5062 hoff
= m
->m_pkthdr
.csum_lhlen
;
5063 iphlen
= m
->m_pkthdr
.csum_iphlen
;
5064 thoff
= m
->m_pkthdr
.csum_thlen
;
5066 KASSERT(hoff
> 0, ("invalid ether header len"));
5067 KASSERT(iphlen
> 0, ("invalid ip header len"));
5068 KASSERT(thoff
> 0, ("invalid tcp header len"));
5070 if (__predict_false(m
->m_len
< hoff
+ iphlen
+ thoff
)) {
5071 m
= m_pullup(m
, hoff
+ iphlen
+ thoff
);
5078 ip
= mtodoff(m
, struct ip
*, hoff
);
5079 th
= mtodoff(m
, struct tcphdr
*, hoff
+ iphlen
);
5081 mss
= m
->m_pkthdr
.tso_segsz
;
5082 flags
= BGE_TXBDFLAG_CPU_PRE_DMA
| BGE_TXBDFLAG_CPU_POST_DMA
;
5084 ip
->ip_len
= htons(mss
+ iphlen
+ thoff
);
5087 hlen
= (iphlen
+ thoff
) >> 2;
5088 mss
|= ((hlen
& 0x3) << 14);
5089 flags
|= ((hlen
& 0xf8) << 7) | ((hlen
& 0x4) << 2);
5098 bnx_create_tx_ring(struct bnx_tx_ring
*txr
)
5100 bus_size_t txmaxsz
, txmaxsegsz
;
5103 lwkt_serialize_init(&txr
->bnx_tx_serialize
);
5106 * Create DMA tag and maps for TX mbufs.
5108 if (txr
->bnx_sc
->bnx_flags
& BNX_FLAG_TSO
)
5109 txmaxsz
= IP_MAXPACKET
+ sizeof(struct ether_vlan_header
);
5111 txmaxsz
= BNX_JUMBO_FRAMELEN
;
5112 if (txr
->bnx_sc
->bnx_asicrev
== BGE_ASICREV_BCM57766
)
5113 txmaxsegsz
= MCLBYTES
;
5115 txmaxsegsz
= PAGE_SIZE
;
5116 error
= bus_dma_tag_create(txr
->bnx_sc
->bnx_cdata
.bnx_parent_tag
,
5117 1, 0, BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
, NULL
, NULL
,
5118 txmaxsz
, BNX_NSEG_NEW
, txmaxsegsz
,
5119 BUS_DMA_ALLOCNOW
| BUS_DMA_WAITOK
| BUS_DMA_ONEBPAGE
,
5122 device_printf(txr
->bnx_sc
->bnx_dev
,
5123 "could not create TX mbuf DMA tag\n");
5127 for (i
= 0; i
< BGE_TX_RING_CNT
; i
++) {
5128 error
= bus_dmamap_create(txr
->bnx_tx_mtag
,
5129 BUS_DMA_WAITOK
| BUS_DMA_ONEBPAGE
,
5130 &txr
->bnx_tx_buf
[i
].bnx_tx_dmamap
);
5134 for (j
= 0; j
< i
; ++j
) {
5135 bus_dmamap_destroy(txr
->bnx_tx_mtag
,
5136 txr
->bnx_tx_buf
[j
].bnx_tx_dmamap
);
5138 bus_dma_tag_destroy(txr
->bnx_tx_mtag
);
5139 txr
->bnx_tx_mtag
= NULL
;
5141 device_printf(txr
->bnx_sc
->bnx_dev
,
5142 "could not create TX mbuf DMA map\n");
5148 * Create DMA stuffs for TX ring.
5150 error
= bnx_dma_block_alloc(txr
->bnx_sc
, BGE_TX_RING_SZ
,
5151 &txr
->bnx_tx_ring_tag
,
5152 &txr
->bnx_tx_ring_map
,
5153 (void *)&txr
->bnx_tx_ring
,
5154 &txr
->bnx_tx_ring_paddr
);
5156 device_printf(txr
->bnx_sc
->bnx_dev
,
5157 "could not create TX ring\n");
5161 txr
->bnx_tx_flags
|= BNX_TX_FLAG_SHORTDMA
;
5162 txr
->bnx_tx_wreg
= BNX_TX_WREG_NSEGS
;
5168 bnx_destroy_tx_ring(struct bnx_tx_ring
*txr
)
5170 /* Destroy TX mbuf DMA stuffs. */
5171 if (txr
->bnx_tx_mtag
!= NULL
) {
5174 for (i
= 0; i
< BGE_TX_RING_CNT
; i
++) {
5175 KKASSERT(txr
->bnx_tx_buf
[i
].bnx_tx_mbuf
== NULL
);
5176 bus_dmamap_destroy(txr
->bnx_tx_mtag
,
5177 txr
->bnx_tx_buf
[i
].bnx_tx_dmamap
);
5179 bus_dma_tag_destroy(txr
->bnx_tx_mtag
);
5182 /* Destroy TX ring */
5183 bnx_dma_block_free(txr
->bnx_tx_ring_tag
,
5184 txr
->bnx_tx_ring_map
, txr
->bnx_tx_ring
);
5188 bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS
)
5190 struct bnx_softc
*sc
= (void *)arg1
;
5191 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
5192 struct bnx_tx_ring
*txr
= &sc
->bnx_tx_ring
[0];
5193 int error
, defrag
, i
;
5195 if (txr
->bnx_tx_flags
& BNX_TX_FLAG_FORCE_DEFRAG
)
5200 error
= sysctl_handle_int(oidp
, &defrag
, 0, req
);
5201 if (error
|| req
->newptr
== NULL
)
5204 ifnet_serialize_all(ifp
);
5205 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
5206 txr
= &sc
->bnx_tx_ring
[i
];
5208 txr
->bnx_tx_flags
|= BNX_TX_FLAG_FORCE_DEFRAG
;
5210 txr
->bnx_tx_flags
&= ~BNX_TX_FLAG_FORCE_DEFRAG
;
5212 ifnet_deserialize_all(ifp
);
5218 bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS
)
5220 struct bnx_softc
*sc
= (void *)arg1
;
5221 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
5222 struct bnx_tx_ring
*txr
= &sc
->bnx_tx_ring
[0];
5223 int error
, tx_wreg
, i
;
5225 tx_wreg
= txr
->bnx_tx_wreg
;
5226 error
= sysctl_handle_int(oidp
, &tx_wreg
, 0, req
);
5227 if (error
|| req
->newptr
== NULL
)
5230 ifnet_serialize_all(ifp
);
5231 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
)
5232 sc
->bnx_tx_ring
[i
].bnx_tx_wreg
= tx_wreg
;
5233 ifnet_deserialize_all(ifp
);
5239 bnx_create_rx_ret_ring(struct bnx_rx_ret_ring
*ret
)
5243 lwkt_serialize_init(&ret
->bnx_rx_ret_serialize
);
5246 * Create DMA stuffs for RX return ring.
5248 error
= bnx_dma_block_alloc(ret
->bnx_sc
,
5249 BGE_RX_RTN_RING_SZ(BNX_RETURN_RING_CNT
),
5250 &ret
->bnx_rx_ret_ring_tag
,
5251 &ret
->bnx_rx_ret_ring_map
,
5252 (void *)&ret
->bnx_rx_ret_ring
,
5253 &ret
->bnx_rx_ret_ring_paddr
);
5255 device_printf(ret
->bnx_sc
->bnx_dev
,
5256 "could not create RX ret ring\n");
5260 /* Shadow standard ring's RX mbuf DMA tag */
5261 ret
->bnx_rx_mtag
= ret
->bnx_std
->bnx_rx_mtag
;
5264 * Create tmp DMA map for RX mbufs.
5266 error
= bus_dmamap_create(ret
->bnx_rx_mtag
, BUS_DMA_WAITOK
,
5267 &ret
->bnx_rx_tmpmap
);
5269 device_printf(ret
->bnx_sc
->bnx_dev
,
5270 "could not create tmp RX mbuf DMA map\n");
5271 ret
->bnx_rx_mtag
= NULL
;
5278 bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring
*ret
)
5280 /* Destroy tmp RX mbuf DMA map */
5281 if (ret
->bnx_rx_mtag
!= NULL
)
5282 bus_dmamap_destroy(ret
->bnx_rx_mtag
, ret
->bnx_rx_tmpmap
);
5284 /* Destroy RX return ring */
5285 bnx_dma_block_free(ret
->bnx_rx_ret_ring_tag
,
5286 ret
->bnx_rx_ret_ring_map
, ret
->bnx_rx_ret_ring
);
5290 bnx_alloc_intr(struct bnx_softc
*sc
)
5292 struct bnx_intr_data
*intr
;
5296 if (sc
->bnx_intr_cnt
> 1) {
5297 error
= bnx_alloc_msix(sc
);
5300 KKASSERT(sc
->bnx_intr_type
== PCI_INTR_TYPE_MSIX
);
5304 KKASSERT(sc
->bnx_intr_cnt
== 1);
5306 intr
= &sc
->bnx_intr_data
[0];
5307 intr
->bnx_ret
= &sc
->bnx_rx_ret_ring
[0];
5308 intr
->bnx_txr
= &sc
->bnx_tx_ring
[0];
5309 intr
->bnx_intr_serialize
= &sc
->bnx_main_serialize
;
5310 intr
->bnx_intr_check
= bnx_check_intr_rxtx
;
5311 intr
->bnx_saved_status_tag
= &intr
->bnx_ret
->bnx_saved_status_tag
;
5313 sc
->bnx_intr_type
= pci_alloc_1intr(sc
->bnx_dev
, bnx_msi_enable
,
5314 &intr
->bnx_intr_rid
, &intr_flags
);
5316 intr
->bnx_intr_res
= bus_alloc_resource_any(sc
->bnx_dev
, SYS_RES_IRQ
,
5317 &intr
->bnx_intr_rid
, intr_flags
);
5318 if (intr
->bnx_intr_res
== NULL
) {
5319 device_printf(sc
->bnx_dev
, "could not alloc interrupt\n");
5323 if (sc
->bnx_intr_type
== PCI_INTR_TYPE_MSI
) {
5324 bnx_enable_msi(sc
, FALSE
);
5325 intr
->bnx_intr_func
= bnx_msi
;
5327 device_printf(sc
->bnx_dev
, "oneshot MSI\n");
5329 intr
->bnx_intr_func
= bnx_intr_legacy
;
5331 intr
->bnx_intr_arg
= sc
;
5332 intr
->bnx_intr_cpuid
= rman_get_cpuid(intr
->bnx_intr_res
);
5334 intr
->bnx_txr
->bnx_tx_cpuid
= intr
->bnx_intr_cpuid
;
5340 bnx_setup_intr(struct bnx_softc
*sc
)
5344 for (i
= 0; i
< sc
->bnx_intr_cnt
; ++i
) {
5345 struct bnx_intr_data
*intr
= &sc
->bnx_intr_data
[i
];
5347 error
= bus_setup_intr_descr(sc
->bnx_dev
, intr
->bnx_intr_res
,
5348 INTR_MPSAFE
, intr
->bnx_intr_func
, intr
->bnx_intr_arg
,
5349 &intr
->bnx_intr_hand
, intr
->bnx_intr_serialize
,
5350 intr
->bnx_intr_desc
);
5352 device_printf(sc
->bnx_dev
,
5353 "could not set up %dth intr\n", i
);
5354 bnx_teardown_intr(sc
, i
);
5362 bnx_teardown_intr(struct bnx_softc
*sc
, int cnt
)
5366 for (i
= 0; i
< cnt
; ++i
) {
5367 struct bnx_intr_data
*intr
= &sc
->bnx_intr_data
[i
];
5369 bus_teardown_intr(sc
->bnx_dev
, intr
->bnx_intr_res
,
5370 intr
->bnx_intr_hand
);
5375 bnx_free_intr(struct bnx_softc
*sc
)
5377 if (sc
->bnx_intr_type
!= PCI_INTR_TYPE_MSIX
) {
5378 struct bnx_intr_data
*intr
;
5380 KKASSERT(sc
->bnx_intr_cnt
<= 1);
5381 intr
= &sc
->bnx_intr_data
[0];
5383 if (intr
->bnx_intr_res
!= NULL
) {
5384 bus_release_resource(sc
->bnx_dev
, SYS_RES_IRQ
,
5385 intr
->bnx_intr_rid
, intr
->bnx_intr_res
);
5387 if (sc
->bnx_intr_type
== PCI_INTR_TYPE_MSI
)
5388 pci_release_msi(sc
->bnx_dev
);
5390 bnx_free_msix(sc
, TRUE
);
5395 bnx_setup_serialize(struct bnx_softc
*sc
)
5400 * Allocate serializer array
5403 /* Main + RX STD + TX + RX RET */
5404 sc
->bnx_serialize_cnt
= 1 + 1 + sc
->bnx_tx_ringcnt
+ sc
->bnx_rx_retcnt
;
5407 kmalloc(sc
->bnx_serialize_cnt
* sizeof(struct lwkt_serialize
*),
5408 M_DEVBUF
, M_WAITOK
| M_ZERO
);
5413 * NOTE: Order is critical
5418 KKASSERT(i
< sc
->bnx_serialize_cnt
);
5419 sc
->bnx_serialize
[i
++] = &sc
->bnx_main_serialize
;
5421 KKASSERT(i
< sc
->bnx_serialize_cnt
);
5422 sc
->bnx_serialize
[i
++] = &sc
->bnx_rx_std_ring
.bnx_rx_std_serialize
;
5424 for (j
= 0; j
< sc
->bnx_rx_retcnt
; ++j
) {
5425 KKASSERT(i
< sc
->bnx_serialize_cnt
);
5426 sc
->bnx_serialize
[i
++] =
5427 &sc
->bnx_rx_ret_ring
[j
].bnx_rx_ret_serialize
;
5430 for (j
= 0; j
< sc
->bnx_tx_ringcnt
; ++j
) {
5431 KKASSERT(i
< sc
->bnx_serialize_cnt
);
5432 sc
->bnx_serialize
[i
++] =
5433 &sc
->bnx_tx_ring
[j
].bnx_tx_serialize
;
5436 KKASSERT(i
== sc
->bnx_serialize_cnt
);
5440 bnx_serialize(struct ifnet
*ifp
, enum ifnet_serialize slz
)
5442 struct bnx_softc
*sc
= ifp
->if_softc
;
5444 ifnet_serialize_array_enter(sc
->bnx_serialize
,
5445 sc
->bnx_serialize_cnt
, slz
);
5449 bnx_deserialize(struct ifnet
*ifp
, enum ifnet_serialize slz
)
5451 struct bnx_softc
*sc
= ifp
->if_softc
;
5453 ifnet_serialize_array_exit(sc
->bnx_serialize
,
5454 sc
->bnx_serialize_cnt
, slz
);
5458 bnx_tryserialize(struct ifnet
*ifp
, enum ifnet_serialize slz
)
5460 struct bnx_softc
*sc
= ifp
->if_softc
;
5462 return ifnet_serialize_array_try(sc
->bnx_serialize
,
5463 sc
->bnx_serialize_cnt
, slz
);
5469 bnx_serialize_assert(struct ifnet
*ifp
, enum ifnet_serialize slz
,
5470 boolean_t serialized
)
5472 struct bnx_softc
*sc
= ifp
->if_softc
;
5474 ifnet_serialize_array_assert(sc
->bnx_serialize
, sc
->bnx_serialize_cnt
,
5478 #endif /* INVARIANTS */
5481 bnx_set_tick_cpuid(struct bnx_softc
*sc
, boolean_t polling
)
5484 sc
->bnx_tick_cpuid
= 0; /* XXX */
5486 sc
->bnx_tick_cpuid
= sc
->bnx_intr_data
[0].bnx_intr_cpuid
;
5490 bnx_rx_std_refill_ithread(void *xstd
)
5492 struct bnx_rx_std_ring
*std
= xstd
;
5493 struct globaldata
*gd
= mycpu
;
5497 while (!std
->bnx_rx_std_stop
) {
5498 if (std
->bnx_rx_std_refill
) {
5499 lwkt_serialize_handler_call(
5500 &std
->bnx_rx_std_serialize
,
5501 bnx_rx_std_refill
, std
, NULL
);
5507 atomic_poll_release_int(&std
->bnx_rx_std_running
);
5510 if (!std
->bnx_rx_std_refill
&& !std
->bnx_rx_std_stop
) {
5511 lwkt_deschedule_self(gd
->gd_curthread
);
5524 bnx_rx_std_refill(void *xstd
, void *frame __unused
)
5526 struct bnx_rx_std_ring
*std
= xstd
;
5527 int cnt
, refill_mask
;
5533 refill_mask
= std
->bnx_rx_std_refill
;
5534 atomic_clear_int(&std
->bnx_rx_std_refill
, refill_mask
);
5536 while (refill_mask
) {
5537 uint16_t check_idx
= std
->bnx_rx_std
;
5540 ret_idx
= bsfl(refill_mask
);
5542 struct bnx_rx_buf
*rb
;
5545 BNX_INC(check_idx
, BGE_STD_RX_RING_CNT
);
5546 rb
= &std
->bnx_rx_std_buf
[check_idx
];
5547 refilled
= rb
->bnx_rx_refilled
;
5550 bnx_setup_rxdesc_std(std
, check_idx
);
5551 std
->bnx_rx_std
= check_idx
;
5554 atomic_subtract_int(
5555 &std
->bnx_rx_std_used
, cnt
);
5556 bnx_writembx(std
->bnx_sc
,
5557 BGE_MBX_RX_STD_PROD_LO
,
5565 refill_mask
&= ~(1 << ret_idx
);
5569 atomic_subtract_int(&std
->bnx_rx_std_used
, cnt
);
5570 bnx_writembx(std
->bnx_sc
, BGE_MBX_RX_STD_PROD_LO
,
5574 if (std
->bnx_rx_std_refill
)
5577 atomic_poll_release_int(&std
->bnx_rx_std_running
);
5580 if (std
->bnx_rx_std_refill
)
5585 bnx_sysctl_std_refill(SYSCTL_HANDLER_ARGS
)
5587 struct bnx_softc
*sc
= (void *)arg1
;
5588 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
5589 struct bnx_rx_ret_ring
*ret
= &sc
->bnx_rx_ret_ring
[0];
5590 int error
, cntmax
, i
;
5592 cntmax
= ret
->bnx_rx_cntmax
;
5593 error
= sysctl_handle_int(oidp
, &cntmax
, 0, req
);
5594 if (error
|| req
->newptr
== NULL
)
5597 ifnet_serialize_all(ifp
);
5599 if ((cntmax
* sc
->bnx_rx_retcnt
) >= BGE_STD_RX_RING_CNT
/ 2) {
5604 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
)
5605 sc
->bnx_rx_ret_ring
[i
].bnx_rx_cntmax
= cntmax
;
5609 ifnet_deserialize_all(ifp
);
5615 bnx_init_rss(struct bnx_softc
*sc
)
5617 uint8_t key
[BGE_RSS_KEYREG_CNT
* BGE_RSS_KEYREG_SIZE
];
5620 KKASSERT(BNX_RSS_ENABLED(sc
));
5623 * Configure RSS redirect table.
5625 if_ringmap_rdrtable(sc
->bnx_rx_rmap
, sc
->bnx_rdr_table
,
5628 for (j
= 0; j
< BGE_RSS_INDIR_TBL_CNT
; ++j
) {
5631 for (i
= 0; i
< BGE_RSS_INDIR_TBLENT_CNT
; ++i
) {
5634 q
= sc
->bnx_rdr_table
[r
];
5635 tbl
|= q
<< (BGE_RSS_INDIR_TBLENT_SHIFT
*
5636 (BGE_RSS_INDIR_TBLENT_CNT
- i
- 1));
5640 BNX_RSS_DPRINTF(sc
, 1, "tbl%d %08x\n", j
, tbl
);
5641 CSR_WRITE_4(sc
, BGE_RSS_INDIR_TBL(j
), tbl
);
5644 toeplitz_get_key(key
, sizeof(key
));
5645 for (i
= 0; i
< BGE_RSS_KEYREG_CNT
; ++i
) {
5648 keyreg
= BGE_RSS_KEYREG_VAL(key
, i
);
5650 BNX_RSS_DPRINTF(sc
, 1, "key%d %08x\n", i
, keyreg
);
5651 CSR_WRITE_4(sc
, BGE_RSS_KEYREG(i
), keyreg
);
5656 bnx_setup_ring_cnt(struct bnx_softc
*sc
)
5658 int msix_enable
, msix_cnt
, msix_ring
, ring_max
, ring_cnt
;
5661 sc
->bnx_rx_rmap
= if_ringmap_alloc(sc
->bnx_dev
, 1, 1);
5663 if (netisr_ncpus
== 1)
5666 msix_enable
= device_getenv_int(sc
->bnx_dev
, "msix.enable",
5672 * One MSI-X vector is dedicated to status or single TX queue,
5673 * so make sure that there are enough MSI-X vectors.
5675 msix_cnt
= pci_msix_count(sc
->bnx_dev
);
5679 device_printf(sc
->bnx_dev
, "MSI-X count %d\n", msix_cnt
);
5680 msix_ring
= msix_cnt
- 1;
5683 * Setup RX ring count
5685 ring_max
= BNX_RX_RING_MAX
;
5686 if (ring_max
> msix_ring
)
5687 ring_max
= msix_ring
;
5688 ring_cnt
= device_getenv_int(sc
->bnx_dev
, "rx_rings", bnx_rx_rings
);
5690 if_ringmap_free(sc
->bnx_rx_rmap
);
5691 sc
->bnx_rx_rmap
= if_ringmap_alloc(sc
->bnx_dev
, ring_cnt
, ring_max
);
5694 sc
->bnx_rx_retcnt
= if_ringmap_count(sc
->bnx_rx_rmap
);
5697 * Setup TX ring count
5699 * Currently only BCM5719 and BCM5720 support multiple TX rings
5700 * and the TX ring count must be less than the RX ring count.
5702 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5719
||
5703 sc
->bnx_asicrev
== BGE_ASICREV_BCM5720
) {
5704 ring_max
= BNX_TX_RING_MAX
;
5705 if (ring_max
> sc
->bnx_rx_retcnt
)
5706 ring_max
= sc
->bnx_rx_retcnt
;
5707 ring_cnt
= device_getenv_int(sc
->bnx_dev
, "tx_rings",
5713 sc
->bnx_tx_rmap
= if_ringmap_alloc(sc
->bnx_dev
, ring_cnt
, ring_max
);
5714 if_ringmap_align(sc
->bnx_dev
, sc
->bnx_rx_rmap
, sc
->bnx_tx_rmap
);
5716 sc
->bnx_tx_ringcnt
= if_ringmap_count(sc
->bnx_tx_rmap
);
5717 KASSERT(sc
->bnx_tx_ringcnt
<= sc
->bnx_rx_retcnt
,
5718 ("invalid TX ring count %d and RX ring count %d",
5719 sc
->bnx_tx_ringcnt
, sc
->bnx_rx_retcnt
));
5722 * Setup interrupt count.
5724 if (sc
->bnx_rx_retcnt
== 1) {
5725 sc
->bnx_intr_cnt
= 1;
5728 * We need one extra MSI-X vector for link status or
5729 * TX ring (if only one TX ring is enabled).
5731 sc
->bnx_intr_cnt
= sc
->bnx_rx_retcnt
+ 1;
5733 KKASSERT(sc
->bnx_intr_cnt
<= BNX_INTR_MAX
);
5736 device_printf(sc
->bnx_dev
, "intr count %d, "
5737 "RX ring %d, TX ring %d\n", sc
->bnx_intr_cnt
,
5738 sc
->bnx_rx_retcnt
, sc
->bnx_tx_ringcnt
);
5743 bnx_alloc_msix(struct bnx_softc
*sc
)
5745 struct bnx_intr_data
*intr
;
5746 boolean_t setup
= FALSE
;
5749 KKASSERT(sc
->bnx_intr_cnt
> 1);
5750 KKASSERT(sc
->bnx_intr_cnt
== sc
->bnx_rx_retcnt
+ 1);
5752 if (sc
->bnx_flags
& BNX_FLAG_RXTX_BUNDLE
) {
5756 intr
= &sc
->bnx_intr_data
[0];
5758 intr
->bnx_intr_serialize
= &sc
->bnx_main_serialize
;
5759 intr
->bnx_saved_status_tag
= &sc
->bnx_saved_status_tag
;
5761 intr
->bnx_intr_func
= bnx_msix_status
;
5762 intr
->bnx_intr_arg
= sc
;
5763 intr
->bnx_intr_cpuid
= 0; /* XXX */
5765 ksnprintf(intr
->bnx_intr_desc0
, sizeof(intr
->bnx_intr_desc0
),
5766 "%s sts", device_get_nameunit(sc
->bnx_dev
));
5767 intr
->bnx_intr_desc
= intr
->bnx_intr_desc0
;
5772 for (i
= 1; i
< sc
->bnx_intr_cnt
; ++i
) {
5775 intr
= &sc
->bnx_intr_data
[i
];
5777 KKASSERT(idx
< sc
->bnx_rx_retcnt
);
5778 intr
->bnx_ret
= &sc
->bnx_rx_ret_ring
[idx
];
5779 if (idx
< sc
->bnx_tx_ringcnt
) {
5780 intr
->bnx_txr
= &sc
->bnx_tx_ring
[idx
];
5781 intr
->bnx_ret
->bnx_txr
= intr
->bnx_txr
;
5784 intr
->bnx_intr_serialize
=
5785 &intr
->bnx_ret
->bnx_rx_ret_serialize
;
5786 intr
->bnx_saved_status_tag
=
5787 &intr
->bnx_ret
->bnx_saved_status_tag
;
5789 intr
->bnx_intr_arg
= intr
->bnx_ret
;
5790 intr
->bnx_intr_cpuid
=
5791 if_ringmap_cpumap(sc
->bnx_rx_rmap
, idx
);
5792 KKASSERT(intr
->bnx_intr_cpuid
< netisr_ncpus
);
5794 if (intr
->bnx_txr
== NULL
) {
5795 intr
->bnx_intr_check
= bnx_check_intr_rx
;
5796 intr
->bnx_intr_func
= bnx_msix_rx
;
5797 ksnprintf(intr
->bnx_intr_desc0
,
5798 sizeof(intr
->bnx_intr_desc0
), "%s rx%d",
5799 device_get_nameunit(sc
->bnx_dev
), idx
);
5805 intr
->bnx_intr_check
= bnx_check_intr_rxtx
;
5806 intr
->bnx_intr_func
= bnx_msix_rxtx
;
5807 ksnprintf(intr
->bnx_intr_desc0
,
5808 sizeof(intr
->bnx_intr_desc0
), "%s rxtx%d",
5809 device_get_nameunit(sc
->bnx_dev
), idx
);
5812 tx_cpuid
= if_ringmap_cpumap(sc
->bnx_tx_rmap
,
5814 KASSERT(intr
->bnx_intr_cpuid
== tx_cpuid
,
5815 ("RX intr cpu%d, TX intr cpu%d, mismatch",
5816 intr
->bnx_intr_cpuid
, tx_cpuid
));
5818 intr
->bnx_txr
->bnx_tx_cpuid
=
5819 intr
->bnx_intr_cpuid
;
5821 intr
->bnx_intr_desc
= intr
->bnx_intr_desc0
;
5823 intr
->bnx_ret
->bnx_msix_mbx
= intr
->bnx_intr_mbx
;
5827 * TX ring0 and link status
5829 intr
= &sc
->bnx_intr_data
[0];
5831 intr
->bnx_txr
= &sc
->bnx_tx_ring
[0];
5832 intr
->bnx_intr_serialize
= &sc
->bnx_main_serialize
;
5833 intr
->bnx_intr_check
= bnx_check_intr_tx
;
5834 intr
->bnx_saved_status_tag
=
5835 &intr
->bnx_txr
->bnx_saved_status_tag
;
5837 intr
->bnx_intr_func
= bnx_msix_tx_status
;
5838 intr
->bnx_intr_arg
= intr
->bnx_txr
;
5839 intr
->bnx_intr_cpuid
= if_ringmap_cpumap(sc
->bnx_tx_rmap
, 0);
5840 KKASSERT(intr
->bnx_intr_cpuid
< netisr_ncpus
);
5842 ksnprintf(intr
->bnx_intr_desc0
, sizeof(intr
->bnx_intr_desc0
),
5843 "%s ststx", device_get_nameunit(sc
->bnx_dev
));
5844 intr
->bnx_intr_desc
= intr
->bnx_intr_desc0
;
5846 intr
->bnx_txr
->bnx_tx_cpuid
= intr
->bnx_intr_cpuid
;
5851 for (i
= 1; i
< sc
->bnx_intr_cnt
; ++i
) {
5854 intr
= &sc
->bnx_intr_data
[i
];
5856 KKASSERT(idx
< sc
->bnx_rx_retcnt
);
5857 intr
->bnx_ret
= &sc
->bnx_rx_ret_ring
[idx
];
5858 intr
->bnx_intr_serialize
=
5859 &intr
->bnx_ret
->bnx_rx_ret_serialize
;
5860 intr
->bnx_intr_check
= bnx_check_intr_rx
;
5861 intr
->bnx_saved_status_tag
=
5862 &intr
->bnx_ret
->bnx_saved_status_tag
;
5864 intr
->bnx_intr_func
= bnx_msix_rx
;
5865 intr
->bnx_intr_arg
= intr
->bnx_ret
;
5866 intr
->bnx_intr_cpuid
=
5867 if_ringmap_cpumap(sc
->bnx_rx_rmap
, idx
);
5868 KKASSERT(intr
->bnx_intr_cpuid
< netisr_ncpus
);
5870 ksnprintf(intr
->bnx_intr_desc0
,
5871 sizeof(intr
->bnx_intr_desc0
), "%s rx%d",
5872 device_get_nameunit(sc
->bnx_dev
), idx
);
5873 intr
->bnx_intr_desc
= intr
->bnx_intr_desc0
;
5875 intr
->bnx_ret
->bnx_msix_mbx
= intr
->bnx_intr_mbx
;
5879 if (BNX_IS_5717_PLUS(sc
)) {
5880 sc
->bnx_msix_mem_rid
= PCIR_BAR(4);
5882 if (sc
->bnx_res2
== NULL
)
5883 sc
->bnx_msix_mem_rid
= PCIR_BAR(2);
5885 if (sc
->bnx_msix_mem_rid
!= 0) {
5886 sc
->bnx_msix_mem_res
= bus_alloc_resource_any(sc
->bnx_dev
,
5887 SYS_RES_MEMORY
, &sc
->bnx_msix_mem_rid
, RF_ACTIVE
);
5888 if (sc
->bnx_msix_mem_res
== NULL
) {
5889 device_printf(sc
->bnx_dev
,
5890 "could not alloc MSI-X table\n");
5895 bnx_enable_msi(sc
, TRUE
);
5897 error
= pci_setup_msix(sc
->bnx_dev
);
5899 device_printf(sc
->bnx_dev
, "could not setup MSI-X\n");
5904 for (i
= 0; i
< sc
->bnx_intr_cnt
; ++i
) {
5905 intr
= &sc
->bnx_intr_data
[i
];
5907 error
= pci_alloc_msix_vector(sc
->bnx_dev
, i
,
5908 &intr
->bnx_intr_rid
, intr
->bnx_intr_cpuid
);
5910 device_printf(sc
->bnx_dev
,
5911 "could not alloc MSI-X %d on cpu%d\n",
5912 i
, intr
->bnx_intr_cpuid
);
5916 intr
->bnx_intr_res
= bus_alloc_resource_any(sc
->bnx_dev
,
5917 SYS_RES_IRQ
, &intr
->bnx_intr_rid
, RF_ACTIVE
);
5918 if (intr
->bnx_intr_res
== NULL
) {
5919 device_printf(sc
->bnx_dev
,
5920 "could not alloc MSI-X %d resource\n", i
);
5926 pci_enable_msix(sc
->bnx_dev
);
5927 sc
->bnx_intr_type
= PCI_INTR_TYPE_MSIX
;
5930 bnx_free_msix(sc
, setup
);
5935 bnx_free_msix(struct bnx_softc
*sc
, boolean_t setup
)
5939 KKASSERT(sc
->bnx_intr_cnt
> 1);
5941 for (i
= 0; i
< sc
->bnx_intr_cnt
; ++i
) {
5942 struct bnx_intr_data
*intr
= &sc
->bnx_intr_data
[i
];
5944 if (intr
->bnx_intr_res
!= NULL
) {
5945 bus_release_resource(sc
->bnx_dev
, SYS_RES_IRQ
,
5946 intr
->bnx_intr_rid
, intr
->bnx_intr_res
);
5948 if (intr
->bnx_intr_rid
>= 0) {
5949 pci_release_msix_vector(sc
->bnx_dev
,
5950 intr
->bnx_intr_rid
);
5954 pci_teardown_msix(sc
->bnx_dev
);
5958 bnx_rx_std_refill_sched_ipi(void *xret
)
5960 struct bnx_rx_ret_ring
*ret
= xret
;
5961 struct bnx_rx_std_ring
*std
= ret
->bnx_std
;
5962 struct globaldata
*gd
= mycpu
;
5966 atomic_set_int(&std
->bnx_rx_std_refill
, ret
->bnx_rx_mask
);
5969 KKASSERT(std
->bnx_rx_std_ithread
->td_gd
== gd
);
5970 lwkt_schedule(std
->bnx_rx_std_ithread
);
5976 bnx_rx_std_refill_stop(void *xstd
)
5978 struct bnx_rx_std_ring
*std
= xstd
;
5979 struct globaldata
*gd
= mycpu
;
5983 std
->bnx_rx_std_stop
= 1;
5986 KKASSERT(std
->bnx_rx_std_ithread
->td_gd
== gd
);
5987 lwkt_schedule(std
->bnx_rx_std_ithread
);
5993 bnx_serialize_skipmain(struct bnx_softc
*sc
)
5995 lwkt_serialize_array_enter(sc
->bnx_serialize
,
5996 sc
->bnx_serialize_cnt
, 1);
6000 bnx_deserialize_skipmain(struct bnx_softc
*sc
)
6002 lwkt_serialize_array_exit(sc
->bnx_serialize
,
6003 sc
->bnx_serialize_cnt
, 1);
6007 bnx_rx_std_refill_sched(struct bnx_rx_ret_ring
*ret
,
6008 struct bnx_rx_std_ring
*std
)
6010 struct globaldata
*gd
= mycpu
;
6012 ret
->bnx_rx_cnt
= 0;
6017 atomic_set_int(&std
->bnx_rx_std_refill
, ret
->bnx_rx_mask
);
6019 if (atomic_poll_acquire_int(&std
->bnx_rx_std_running
)) {
6020 if (std
->bnx_rx_std_ithread
->td_gd
== gd
) {
6021 lwkt_schedule(std
->bnx_rx_std_ithread
);
6023 lwkt_send_ipiq(std
->bnx_rx_std_ithread
->td_gd
,
6024 bnx_rx_std_refill_sched_ipi
, ret
);
6031 static struct pktinfo
*
6032 bnx_rss_info(struct pktinfo
*pi
, const struct bge_rx_bd
*cur_rx
)
6034 /* Don't pick up IPv6 packet */
6035 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_IPV6
)
6038 /* Don't pick up IP packet w/o IP checksum */
6039 if ((cur_rx
->bge_flags
& BGE_RXBDFLAG_IP_CSUM
) == 0 ||
6040 (cur_rx
->bge_error_flag
& BGE_RXERRFLAG_IP_CSUM_NOK
))
6043 /* Don't pick up IP packet w/o TCP/UDP checksum */
6044 if ((cur_rx
->bge_flags
& BGE_RXBDFLAG_TCP_UDP_CSUM
) == 0)
6047 /* May be IP fragment */
6048 if (cur_rx
->bge_tcp_udp_csum
!= 0xffff)
6051 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_TCP_UDP_IS_TCP
)
6052 pi
->pi_l3proto
= IPPROTO_TCP
;
6054 pi
->pi_l3proto
= IPPROTO_UDP
;
6055 pi
->pi_netisr
= NETISR_IP
;
6062 bnx_sig_pre_reset(struct bnx_softc
*sc
, int type
)
6064 if (type
== BNX_RESET_START
|| type
== BNX_RESET_SUSPEND
)
6065 bnx_ape_driver_state_change(sc
, type
);
6069 bnx_sig_post_reset(struct bnx_softc
*sc
, int type
)
6071 if (type
== BNX_RESET_SHUTDOWN
)
6072 bnx_ape_driver_state_change(sc
, type
);
6076 * Clear all stale locks and select the lock for this driver instance.
6079 bnx_ape_lock_init(struct bnx_softc
*sc
)
6081 uint32_t bit
, regbase
;
6084 regbase
= BGE_APE_PER_LOCK_GRANT
;
6086 /* Clear any stale locks. */
6087 for (i
= BGE_APE_LOCK_PHY0
; i
<= BGE_APE_LOCK_GPIO
; i
++) {
6089 case BGE_APE_LOCK_PHY0
:
6090 case BGE_APE_LOCK_PHY1
:
6091 case BGE_APE_LOCK_PHY2
:
6092 case BGE_APE_LOCK_PHY3
:
6093 bit
= BGE_APE_LOCK_GRANT_DRIVER0
;
6097 if (sc
->bnx_func_addr
== 0)
6098 bit
= BGE_APE_LOCK_GRANT_DRIVER0
;
6100 bit
= 1 << sc
->bnx_func_addr
;
6103 APE_WRITE_4(sc
, regbase
+ 4 * i
, bit
);
6106 /* Select the PHY lock based on the device's function number. */
6107 switch (sc
->bnx_func_addr
) {
6109 sc
->bnx_phy_ape_lock
= BGE_APE_LOCK_PHY0
;
6113 sc
->bnx_phy_ape_lock
= BGE_APE_LOCK_PHY1
;
6117 sc
->bnx_phy_ape_lock
= BGE_APE_LOCK_PHY2
;
6121 sc
->bnx_phy_ape_lock
= BGE_APE_LOCK_PHY3
;
6125 device_printf(sc
->bnx_dev
,
6126 "PHY lock not supported on this function\n");
6132 * Check for APE firmware, set flags, and print version info.
6135 bnx_ape_read_fw_ver(struct bnx_softc
*sc
)
6138 uint32_t apedata
, features
;
6140 /* Check for a valid APE signature in shared memory. */
6141 apedata
= APE_READ_4(sc
, BGE_APE_SEG_SIG
);
6142 if (apedata
!= BGE_APE_SEG_SIG_MAGIC
) {
6143 device_printf(sc
->bnx_dev
, "no APE signature\n");
6144 sc
->bnx_mfw_flags
&= ~BNX_MFW_ON_APE
;
6148 /* Check if APE firmware is running. */
6149 apedata
= APE_READ_4(sc
, BGE_APE_FW_STATUS
);
6150 if ((apedata
& BGE_APE_FW_STATUS_READY
) == 0) {
6151 device_printf(sc
->bnx_dev
, "APE signature found "
6152 "but FW status not ready! 0x%08x\n", apedata
);
6156 sc
->bnx_mfw_flags
|= BNX_MFW_ON_APE
;
6158 /* Fetch the APE firwmare type and version. */
6159 apedata
= APE_READ_4(sc
, BGE_APE_FW_VERSION
);
6160 features
= APE_READ_4(sc
, BGE_APE_FW_FEATURES
);
6161 if (features
& BGE_APE_FW_FEATURE_NCSI
) {
6162 sc
->bnx_mfw_flags
|= BNX_MFW_TYPE_NCSI
;
6164 } else if (features
& BGE_APE_FW_FEATURE_DASH
) {
6165 sc
->bnx_mfw_flags
|= BNX_MFW_TYPE_DASH
;
6171 /* Print the APE firmware version. */
6172 device_printf(sc
->bnx_dev
, "APE FW version: %s v%d.%d.%d.%d\n",
6174 (apedata
& BGE_APE_FW_VERSION_MAJMSK
) >> BGE_APE_FW_VERSION_MAJSFT
,
6175 (apedata
& BGE_APE_FW_VERSION_MINMSK
) >> BGE_APE_FW_VERSION_MINSFT
,
6176 (apedata
& BGE_APE_FW_VERSION_REVMSK
) >> BGE_APE_FW_VERSION_REVSFT
,
6177 (apedata
& BGE_APE_FW_VERSION_BLDMSK
));
6181 bnx_ape_lock(struct bnx_softc
*sc
, int locknum
)
6183 uint32_t bit
, gnt
, req
, status
;
6186 if ((sc
->bnx_mfw_flags
& BNX_MFW_ON_APE
) == 0)
6189 /* Lock request/grant registers have different bases. */
6190 req
= BGE_APE_PER_LOCK_REQ
;
6191 gnt
= BGE_APE_PER_LOCK_GRANT
;
6196 case BGE_APE_LOCK_GPIO
:
6197 /* Lock required when using GPIO. */
6198 if (sc
->bnx_func_addr
== 0)
6199 bit
= BGE_APE_LOCK_REQ_DRIVER0
;
6201 bit
= 1 << sc
->bnx_func_addr
;
6204 case BGE_APE_LOCK_GRC
:
6205 /* Lock required to reset the device. */
6206 if (sc
->bnx_func_addr
== 0)
6207 bit
= BGE_APE_LOCK_REQ_DRIVER0
;
6209 bit
= 1 << sc
->bnx_func_addr
;
6212 case BGE_APE_LOCK_MEM
:
6213 /* Lock required when accessing certain APE memory. */
6214 if (sc
->bnx_func_addr
== 0)
6215 bit
= BGE_APE_LOCK_REQ_DRIVER0
;
6217 bit
= 1 << sc
->bnx_func_addr
;
6220 case BGE_APE_LOCK_PHY0
:
6221 case BGE_APE_LOCK_PHY1
:
6222 case BGE_APE_LOCK_PHY2
:
6223 case BGE_APE_LOCK_PHY3
:
6224 /* Lock required when accessing PHYs. */
6225 bit
= BGE_APE_LOCK_REQ_DRIVER0
;
6232 /* Request a lock. */
6233 APE_WRITE_4(sc
, req
+ off
, bit
);
6235 /* Wait up to 1 second to acquire lock. */
6236 for (i
= 0; i
< 20000; i
++) {
6237 status
= APE_READ_4(sc
, gnt
+ off
);
6243 /* Handle any errors. */
6244 if (status
!= bit
) {
6245 if_printf(&sc
->arpcom
.ac_if
, "APE lock %d request failed! "
6246 "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n",
6247 locknum
, req
+ off
, bit
& 0xFFFF, gnt
+ off
,
6249 /* Revoke the lock request. */
6250 APE_WRITE_4(sc
, gnt
+ off
, bit
);
6258 bnx_ape_unlock(struct bnx_softc
*sc
, int locknum
)
6263 if ((sc
->bnx_mfw_flags
& BNX_MFW_ON_APE
) == 0)
6266 gnt
= BGE_APE_PER_LOCK_GRANT
;
6271 case BGE_APE_LOCK_GPIO
:
6272 if (sc
->bnx_func_addr
== 0)
6273 bit
= BGE_APE_LOCK_GRANT_DRIVER0
;
6275 bit
= 1 << sc
->bnx_func_addr
;
6278 case BGE_APE_LOCK_GRC
:
6279 if (sc
->bnx_func_addr
== 0)
6280 bit
= BGE_APE_LOCK_GRANT_DRIVER0
;
6282 bit
= 1 << sc
->bnx_func_addr
;
6285 case BGE_APE_LOCK_MEM
:
6286 if (sc
->bnx_func_addr
== 0)
6287 bit
= BGE_APE_LOCK_GRANT_DRIVER0
;
6289 bit
= 1 << sc
->bnx_func_addr
;
6292 case BGE_APE_LOCK_PHY0
:
6293 case BGE_APE_LOCK_PHY1
:
6294 case BGE_APE_LOCK_PHY2
:
6295 case BGE_APE_LOCK_PHY3
:
6296 bit
= BGE_APE_LOCK_GRANT_DRIVER0
;
6303 APE_WRITE_4(sc
, gnt
+ off
, bit
);
6307 * Send an event to the APE firmware.
6310 bnx_ape_send_event(struct bnx_softc
*sc
, uint32_t event
)
6315 /* NCSI does not support APE events. */
6316 if ((sc
->bnx_mfw_flags
& BNX_MFW_ON_APE
) == 0)
6319 /* Wait up to 1ms for APE to service previous event. */
6320 for (i
= 10; i
> 0; i
--) {
6321 if (bnx_ape_lock(sc
, BGE_APE_LOCK_MEM
) != 0)
6323 apedata
= APE_READ_4(sc
, BGE_APE_EVENT_STATUS
);
6324 if ((apedata
& BGE_APE_EVENT_STATUS_EVENT_PENDING
) == 0) {
6325 APE_WRITE_4(sc
, BGE_APE_EVENT_STATUS
, event
|
6326 BGE_APE_EVENT_STATUS_EVENT_PENDING
);
6327 bnx_ape_unlock(sc
, BGE_APE_LOCK_MEM
);
6328 APE_WRITE_4(sc
, BGE_APE_EVENT
, BGE_APE_EVENT_1
);
6331 bnx_ape_unlock(sc
, BGE_APE_LOCK_MEM
);
6335 if_printf(&sc
->arpcom
.ac_if
,
6336 "APE event 0x%08x send timed out\n", event
);
6341 bnx_ape_driver_state_change(struct bnx_softc
*sc
, int kind
)
6343 uint32_t apedata
, event
;
6345 if ((sc
->bnx_mfw_flags
& BNX_MFW_ON_APE
) == 0)
6349 case BNX_RESET_START
:
6350 /* If this is the first load, clear the load counter. */
6351 apedata
= APE_READ_4(sc
, BGE_APE_HOST_SEG_SIG
);
6352 if (apedata
!= BGE_APE_HOST_SEG_SIG_MAGIC
) {
6353 APE_WRITE_4(sc
, BGE_APE_HOST_INIT_COUNT
, 0);
6355 apedata
= APE_READ_4(sc
, BGE_APE_HOST_INIT_COUNT
);
6356 APE_WRITE_4(sc
, BGE_APE_HOST_INIT_COUNT
, ++apedata
);
6358 APE_WRITE_4(sc
, BGE_APE_HOST_SEG_SIG
,
6359 BGE_APE_HOST_SEG_SIG_MAGIC
);
6360 APE_WRITE_4(sc
, BGE_APE_HOST_SEG_LEN
,
6361 BGE_APE_HOST_SEG_LEN_MAGIC
);
6363 /* Add some version info if bnx(4) supports it. */
6364 APE_WRITE_4(sc
, BGE_APE_HOST_DRIVER_ID
,
6365 BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0));
6366 APE_WRITE_4(sc
, BGE_APE_HOST_BEHAVIOR
,
6367 BGE_APE_HOST_BEHAV_NO_PHYLOCK
);
6368 APE_WRITE_4(sc
, BGE_APE_HOST_HEARTBEAT_INT_MS
,
6369 BGE_APE_HOST_HEARTBEAT_INT_DISABLE
);
6370 APE_WRITE_4(sc
, BGE_APE_HOST_DRVR_STATE
,
6371 BGE_APE_HOST_DRVR_STATE_START
);
6372 event
= BGE_APE_EVENT_STATUS_STATE_START
;
6375 case BNX_RESET_SHUTDOWN
:
6376 APE_WRITE_4(sc
, BGE_APE_HOST_DRVR_STATE
,
6377 BGE_APE_HOST_DRVR_STATE_UNLOAD
);
6378 event
= BGE_APE_EVENT_STATUS_STATE_UNLOAD
;
6381 case BNX_RESET_SUSPEND
:
6382 event
= BGE_APE_EVENT_STATUS_STATE_SUSPEND
;
6389 bnx_ape_send_event(sc
, event
| BGE_APE_EVENT_STATUS_DRIVER_EVNT
|
6390 BGE_APE_EVENT_STATUS_STATE_CHNGE
);