2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
37 #include "opt_ifpoll.h"
39 #include <sys/param.h>
41 #include <sys/endian.h>
42 #include <sys/kernel.h>
43 #include <sys/interrupt.h>
45 #include <sys/malloc.h>
46 #include <sys/queue.h>
48 #include <sys/serialize.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
53 #include <netinet/ip.h>
54 #include <netinet/tcp.h>
57 #include <net/ethernet.h>
59 #include <net/if_arp.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_poll.h>
63 #include <net/if_types.h>
64 #include <net/ifq_var.h>
65 #include <net/toeplitz.h>
66 #include <net/toeplitz2.h>
67 #include <net/vlan/if_vlan_var.h>
68 #include <net/vlan/if_vlan_ether.h>
70 #include <dev/netif/mii_layer/mii.h>
71 #include <dev/netif/mii_layer/miivar.h>
72 #include <dev/netif/mii_layer/brgphyreg.h>
75 #include <bus/pci/pcireg.h>
76 #include <bus/pci/pcivar.h>
78 #include <dev/netif/bge/if_bgereg.h>
79 #include <dev/netif/bnx/if_bnxvar.h>
81 /* "device miibus" required. See GENERIC if you get errors here. */
82 #include "miibus_if.h"
84 #define BNX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
86 #define BNX_RESET_SHUTDOWN 0
87 #define BNX_RESET_START 1
88 #define BNX_RESET_SUSPEND 2
90 #define BNX_INTR_CKINTVL ((10 * hz) / 1000) /* 10ms */
93 #define BNX_RSS_DPRINTF(sc, lvl, fmt, ...) \
95 if (sc->bnx_rss_debug >= lvl) \
96 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
98 #else /* !BNX_RSS_DEBUG */
99 #define BNX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
100 #endif /* BNX_RSS_DEBUG */
102 static const struct bnx_type
{
107 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5717
,
108 "Broadcom BCM5717 Gigabit Ethernet" },
109 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5717C
,
110 "Broadcom BCM5717C Gigabit Ethernet" },
111 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5718
,
112 "Broadcom BCM5718 Gigabit Ethernet" },
113 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5719
,
114 "Broadcom BCM5719 Gigabit Ethernet" },
115 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5720_ALT
,
116 "Broadcom BCM5720 Gigabit Ethernet" },
118 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5725
,
119 "Broadcom BCM5725 Gigabit Ethernet" },
120 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5727
,
121 "Broadcom BCM5727 Gigabit Ethernet" },
122 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM5762
,
123 "Broadcom BCM5762 Gigabit Ethernet" },
125 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM57761
,
126 "Broadcom BCM57761 Gigabit Ethernet" },
127 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM57762
,
128 "Broadcom BCM57762 Gigabit Ethernet" },
129 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM57765
,
130 "Broadcom BCM57765 Gigabit Ethernet" },
131 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM57766
,
132 "Broadcom BCM57766 Gigabit Ethernet" },
133 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM57781
,
134 "Broadcom BCM57781 Gigabit Ethernet" },
135 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM57782
,
136 "Broadcom BCM57782 Gigabit Ethernet" },
137 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM57785
,
138 "Broadcom BCM57785 Gigabit Ethernet" },
139 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM57786
,
140 "Broadcom BCM57786 Gigabit Ethernet" },
141 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM57791
,
142 "Broadcom BCM57791 Fast Ethernet" },
143 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM57795
,
144 "Broadcom BCM57795 Fast Ethernet" },
149 static const int bnx_tx_mailbox
[BNX_TX_RING_MAX
] = {
150 BGE_MBX_TX_HOST_PROD0_LO
,
151 BGE_MBX_TX_HOST_PROD0_HI
,
152 BGE_MBX_TX_HOST_PROD1_LO
,
153 BGE_MBX_TX_HOST_PROD1_HI
156 #define BNX_IS_JUMBO_CAPABLE(sc) ((sc)->bnx_flags & BNX_FLAG_JUMBO)
157 #define BNX_IS_5717_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_5717_PLUS)
158 #define BNX_IS_57765_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_57765_PLUS)
159 #define BNX_IS_57765_FAMILY(sc) \
160 ((sc)->bnx_flags & BNX_FLAG_57765_FAMILY)
162 typedef int (*bnx_eaddr_fcn_t
)(struct bnx_softc
*, uint8_t[]);
164 static int bnx_probe(device_t
);
165 static int bnx_attach(device_t
);
166 static int bnx_detach(device_t
);
167 static void bnx_shutdown(device_t
);
168 static int bnx_suspend(device_t
);
169 static int bnx_resume(device_t
);
170 static int bnx_miibus_readreg(device_t
, int, int);
171 static int bnx_miibus_writereg(device_t
, int, int, int);
172 static void bnx_miibus_statchg(device_t
);
174 static int bnx_handle_status(struct bnx_softc
*);
176 static void bnx_npoll(struct ifnet
*, struct ifpoll_info
*);
177 static void bnx_npoll_rx(struct ifnet
*, void *, int);
178 static void bnx_npoll_tx(struct ifnet
*, void *, int);
179 static void bnx_npoll_tx_notag(struct ifnet
*, void *, int);
180 static void bnx_npoll_status(struct ifnet
*);
181 static void bnx_npoll_status_notag(struct ifnet
*);
183 static void bnx_intr_legacy(void *);
184 static void bnx_msi(void *);
185 static void bnx_intr(struct bnx_softc
*);
186 static void bnx_msix_status(void *);
187 static void bnx_msix_tx_status(void *);
188 static void bnx_msix_rx(void *);
189 static void bnx_msix_rxtx(void *);
190 static void bnx_enable_intr(struct bnx_softc
*);
191 static void bnx_disable_intr(struct bnx_softc
*);
192 static void bnx_txeof(struct bnx_tx_ring
*, uint16_t);
193 static void bnx_rxeof(struct bnx_rx_ret_ring
*, uint16_t, int);
194 static int bnx_alloc_intr(struct bnx_softc
*);
195 static int bnx_setup_intr(struct bnx_softc
*);
196 static void bnx_free_intr(struct bnx_softc
*);
197 static void bnx_teardown_intr(struct bnx_softc
*, int);
198 static int bnx_alloc_msix(struct bnx_softc
*);
199 static void bnx_free_msix(struct bnx_softc
*, boolean_t
);
200 static void bnx_check_intr_rxtx(void *);
201 static void bnx_check_intr_rx(void *);
202 static void bnx_check_intr_tx(void *);
203 static void bnx_rx_std_refill_ithread(void *);
204 static void bnx_rx_std_refill(void *, void *);
205 static void bnx_rx_std_refill_sched_ipi(void *);
206 static void bnx_rx_std_refill_stop(void *);
207 static void bnx_rx_std_refill_sched(struct bnx_rx_ret_ring
*,
208 struct bnx_rx_std_ring
*);
210 static void bnx_start(struct ifnet
*, struct ifaltq_subque
*);
211 static int bnx_ioctl(struct ifnet
*, u_long
, caddr_t
, struct ucred
*);
212 static void bnx_init(void *);
213 static void bnx_stop(struct bnx_softc
*);
214 static void bnx_watchdog(struct ifaltq_subque
*);
215 static int bnx_ifmedia_upd(struct ifnet
*);
216 static void bnx_ifmedia_sts(struct ifnet
*, struct ifmediareq
*);
217 static void bnx_tick(void *);
218 static void bnx_serialize(struct ifnet
*, enum ifnet_serialize
);
219 static void bnx_deserialize(struct ifnet
*, enum ifnet_serialize
);
220 static int bnx_tryserialize(struct ifnet
*, enum ifnet_serialize
);
222 static void bnx_serialize_assert(struct ifnet
*, enum ifnet_serialize
,
225 static void bnx_serialize_skipmain(struct bnx_softc
*);
226 static void bnx_deserialize_skipmain(struct bnx_softc
*sc
);
228 static int bnx_alloc_jumbo_mem(struct bnx_softc
*);
229 static void bnx_free_jumbo_mem(struct bnx_softc
*);
230 static struct bnx_jslot
231 *bnx_jalloc(struct bnx_softc
*);
232 static void bnx_jfree(void *);
233 static void bnx_jref(void *);
234 static int bnx_newbuf_std(struct bnx_rx_ret_ring
*, int, int);
235 static int bnx_newbuf_jumbo(struct bnx_softc
*, int, int);
236 static void bnx_setup_rxdesc_std(struct bnx_rx_std_ring
*, int);
237 static void bnx_setup_rxdesc_jumbo(struct bnx_softc
*, int);
238 static int bnx_init_rx_ring_std(struct bnx_rx_std_ring
*);
239 static void bnx_free_rx_ring_std(struct bnx_rx_std_ring
*);
240 static int bnx_init_rx_ring_jumbo(struct bnx_softc
*);
241 static void bnx_free_rx_ring_jumbo(struct bnx_softc
*);
242 static void bnx_free_tx_ring(struct bnx_tx_ring
*);
243 static int bnx_init_tx_ring(struct bnx_tx_ring
*);
244 static int bnx_create_tx_ring(struct bnx_tx_ring
*);
245 static void bnx_destroy_tx_ring(struct bnx_tx_ring
*);
246 static int bnx_create_rx_ret_ring(struct bnx_rx_ret_ring
*);
247 static void bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring
*);
248 static int bnx_dma_alloc(device_t
);
249 static void bnx_dma_free(struct bnx_softc
*);
250 static int bnx_dma_block_alloc(struct bnx_softc
*, bus_size_t
,
251 bus_dma_tag_t
*, bus_dmamap_t
*, void **, bus_addr_t
*);
252 static void bnx_dma_block_free(bus_dma_tag_t
, bus_dmamap_t
, void *);
254 bnx_defrag_shortdma(struct mbuf
*);
255 static int bnx_encap(struct bnx_tx_ring
*, struct mbuf
**,
257 static int bnx_setup_tso(struct bnx_tx_ring
*, struct mbuf
**,
258 uint16_t *, uint16_t *);
259 static void bnx_setup_serialize(struct bnx_softc
*);
260 static void bnx_set_tick_cpuid(struct bnx_softc
*, boolean_t
);
261 static void bnx_setup_ring_cnt(struct bnx_softc
*);
263 static struct pktinfo
*bnx_rss_info(struct pktinfo
*,
264 const struct bge_rx_bd
*);
265 static void bnx_init_rss(struct bnx_softc
*);
266 static void bnx_reset(struct bnx_softc
*);
267 static int bnx_chipinit(struct bnx_softc
*);
268 static int bnx_blockinit(struct bnx_softc
*);
269 static void bnx_stop_block(struct bnx_softc
*, bus_size_t
, uint32_t);
270 static void bnx_enable_msi(struct bnx_softc
*, boolean_t
);
271 static void bnx_setmulti(struct bnx_softc
*);
272 static void bnx_setpromisc(struct bnx_softc
*);
273 static void bnx_stats_update_regs(struct bnx_softc
*);
274 static uint32_t bnx_dma_swap_options(struct bnx_softc
*);
276 static uint32_t bnx_readmem_ind(struct bnx_softc
*, uint32_t);
277 static void bnx_writemem_ind(struct bnx_softc
*, uint32_t, uint32_t);
279 static uint32_t bnx_readreg_ind(struct bnx_softc
*, uint32_t);
281 static void bnx_writemem_direct(struct bnx_softc
*, uint32_t, uint32_t);
282 static void bnx_writembx(struct bnx_softc
*, int, int);
283 static int bnx_read_nvram(struct bnx_softc
*, caddr_t
, int, int);
284 static uint8_t bnx_eeprom_getbyte(struct bnx_softc
*, uint32_t, uint8_t *);
285 static int bnx_read_eeprom(struct bnx_softc
*, caddr_t
, uint32_t, size_t);
287 static void bnx_tbi_link_upd(struct bnx_softc
*, uint32_t);
288 static void bnx_copper_link_upd(struct bnx_softc
*, uint32_t);
289 static void bnx_autopoll_link_upd(struct bnx_softc
*, uint32_t);
290 static void bnx_link_poll(struct bnx_softc
*);
292 static int bnx_get_eaddr_mem(struct bnx_softc
*, uint8_t[]);
293 static int bnx_get_eaddr_nvram(struct bnx_softc
*, uint8_t[]);
294 static int bnx_get_eaddr_eeprom(struct bnx_softc
*, uint8_t[]);
295 static int bnx_get_eaddr(struct bnx_softc
*, uint8_t[]);
297 static void bnx_coal_change(struct bnx_softc
*);
298 static int bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS
);
299 static int bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS
);
300 static int bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS
);
301 static int bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS
);
302 static int bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS
);
303 static int bnx_sysctl_rx_coal_bds_poll(SYSCTL_HANDLER_ARGS
);
304 static int bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS
);
305 static int bnx_sysctl_tx_coal_bds_poll(SYSCTL_HANDLER_ARGS
);
306 static int bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS
);
307 static int bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS
);
308 static int bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS
, uint32_t *,
311 static int bnx_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS
);
312 static int bnx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS
);
313 static int bnx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS
);
315 static int bnx_sysctl_std_refill(SYSCTL_HANDLER_ARGS
);
317 static void bnx_sig_post_reset(struct bnx_softc
*, int);
318 static void bnx_sig_pre_reset(struct bnx_softc
*, int);
319 static void bnx_ape_lock_init(struct bnx_softc
*);
320 static void bnx_ape_read_fw_ver(struct bnx_softc
*);
321 static int bnx_ape_lock(struct bnx_softc
*, int);
322 static void bnx_ape_unlock(struct bnx_softc
*, int);
323 static void bnx_ape_send_event(struct bnx_softc
*, uint32_t);
324 static void bnx_ape_driver_state_change(struct bnx_softc
*, int);
326 static int bnx_msi_enable
= 1;
327 static int bnx_msix_enable
= 1;
329 static int bnx_rx_rings
= 0; /* auto */
330 static int bnx_tx_rings
= 0; /* auto */
332 TUNABLE_INT("hw.bnx.msi.enable", &bnx_msi_enable
);
333 TUNABLE_INT("hw.bnx.msix.enable", &bnx_msix_enable
);
334 TUNABLE_INT("hw.bnx.rx_rings", &bnx_rx_rings
);
335 TUNABLE_INT("hw.bnx.tx_rings", &bnx_tx_rings
);
337 static device_method_t bnx_methods
[] = {
338 /* Device interface */
339 DEVMETHOD(device_probe
, bnx_probe
),
340 DEVMETHOD(device_attach
, bnx_attach
),
341 DEVMETHOD(device_detach
, bnx_detach
),
342 DEVMETHOD(device_shutdown
, bnx_shutdown
),
343 DEVMETHOD(device_suspend
, bnx_suspend
),
344 DEVMETHOD(device_resume
, bnx_resume
),
347 DEVMETHOD(bus_print_child
, bus_generic_print_child
),
348 DEVMETHOD(bus_driver_added
, bus_generic_driver_added
),
351 DEVMETHOD(miibus_readreg
, bnx_miibus_readreg
),
352 DEVMETHOD(miibus_writereg
, bnx_miibus_writereg
),
353 DEVMETHOD(miibus_statchg
, bnx_miibus_statchg
),
358 static DEFINE_CLASS_0(bnx
, bnx_driver
, bnx_methods
, sizeof(struct bnx_softc
));
359 static devclass_t bnx_devclass
;
361 DECLARE_DUMMY_MODULE(if_bnx
);
362 MODULE_DEPEND(if_bnx
, miibus
, 1, 1, 1);
363 DRIVER_MODULE(if_bnx
, pci
, bnx_driver
, bnx_devclass
, NULL
, NULL
);
364 DRIVER_MODULE(miibus
, bnx
, miibus_driver
, miibus_devclass
, NULL
, NULL
);
367 bnx_readmem_ind(struct bnx_softc
*sc
, uint32_t off
)
369 device_t dev
= sc
->bnx_dev
;
372 pci_write_config(dev
, BGE_PCI_MEMWIN_BASEADDR
, off
, 4);
373 val
= pci_read_config(dev
, BGE_PCI_MEMWIN_DATA
, 4);
374 pci_write_config(dev
, BGE_PCI_MEMWIN_BASEADDR
, 0, 4);
379 bnx_writemem_ind(struct bnx_softc
*sc
, uint32_t off
, uint32_t val
)
381 device_t dev
= sc
->bnx_dev
;
383 pci_write_config(dev
, BGE_PCI_MEMWIN_BASEADDR
, off
, 4);
384 pci_write_config(dev
, BGE_PCI_MEMWIN_DATA
, val
, 4);
385 pci_write_config(dev
, BGE_PCI_MEMWIN_BASEADDR
, 0, 4);
389 bnx_writemem_direct(struct bnx_softc
*sc
, uint32_t off
, uint32_t val
)
391 CSR_WRITE_4(sc
, off
, val
);
395 bnx_writembx(struct bnx_softc
*sc
, int off
, int val
)
397 CSR_WRITE_4(sc
, off
, val
);
401 * Read a sequence of bytes from NVRAM.
404 bnx_read_nvram(struct bnx_softc
*sc
, caddr_t dest
, int off
, int cnt
)
410 * Read a byte of data stored in the EEPROM at address 'addr.' The
411 * BCM570x supports both the traditional bitbang interface and an
412 * auto access interface for reading the EEPROM. We use the auto
416 bnx_eeprom_getbyte(struct bnx_softc
*sc
, uint32_t addr
, uint8_t *dest
)
422 * Enable use of auto EEPROM access so we can avoid
423 * having to use the bitbang method.
425 BNX_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_AUTO_EEPROM
);
427 /* Reset the EEPROM, load the clock period. */
428 CSR_WRITE_4(sc
, BGE_EE_ADDR
,
429 BGE_EEADDR_RESET
|BGE_EEHALFCLK(BGE_HALFCLK_384SCL
));
432 /* Issue the read EEPROM command. */
433 CSR_WRITE_4(sc
, BGE_EE_ADDR
, BGE_EE_READCMD
| addr
);
435 /* Wait for completion */
436 for(i
= 0; i
< BNX_TIMEOUT
* 10; i
++) {
438 if (CSR_READ_4(sc
, BGE_EE_ADDR
) & BGE_EEADDR_DONE
)
442 if (i
== BNX_TIMEOUT
) {
443 if_printf(&sc
->arpcom
.ac_if
, "eeprom read timed out\n");
448 byte
= CSR_READ_4(sc
, BGE_EE_DATA
);
450 *dest
= (byte
>> ((addr
% 4) * 8)) & 0xFF;
456 * Read a sequence of bytes from the EEPROM.
459 bnx_read_eeprom(struct bnx_softc
*sc
, caddr_t dest
, uint32_t off
, size_t len
)
465 for (byte
= 0, err
= 0, i
= 0; i
< len
; i
++) {
466 err
= bnx_eeprom_getbyte(sc
, off
+ i
, &byte
);
476 bnx_miibus_readreg(device_t dev
, int phy
, int reg
)
478 struct bnx_softc
*sc
= device_get_softc(dev
);
482 KASSERT(phy
== sc
->bnx_phyno
,
483 ("invalid phyno %d, should be %d", phy
, sc
->bnx_phyno
));
485 if (bnx_ape_lock(sc
, sc
->bnx_phy_ape_lock
) != 0)
488 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
489 if (sc
->bnx_mi_mode
& BGE_MIMODE_AUTOPOLL
) {
490 CSR_WRITE_4(sc
, BGE_MI_MODE
,
491 sc
->bnx_mi_mode
& ~BGE_MIMODE_AUTOPOLL
);
495 CSR_WRITE_4(sc
, BGE_MI_COMM
, BGE_MICMD_READ
| BGE_MICOMM_BUSY
|
496 BGE_MIPHY(phy
) | BGE_MIREG(reg
));
498 /* Poll for the PHY register access to complete. */
499 for (i
= 0; i
< BNX_TIMEOUT
; i
++) {
501 val
= CSR_READ_4(sc
, BGE_MI_COMM
);
502 if ((val
& BGE_MICOMM_BUSY
) == 0) {
504 val
= CSR_READ_4(sc
, BGE_MI_COMM
);
508 if (i
== BNX_TIMEOUT
) {
509 if_printf(&sc
->arpcom
.ac_if
, "PHY read timed out "
510 "(phy %d, reg %d, val 0x%08x)\n", phy
, reg
, val
);
514 /* Restore the autopoll bit if necessary. */
515 if (sc
->bnx_mi_mode
& BGE_MIMODE_AUTOPOLL
) {
516 CSR_WRITE_4(sc
, BGE_MI_MODE
, sc
->bnx_mi_mode
);
520 bnx_ape_unlock(sc
, sc
->bnx_phy_ape_lock
);
522 if (val
& BGE_MICOMM_READFAIL
)
525 return (val
& 0xFFFF);
529 bnx_miibus_writereg(device_t dev
, int phy
, int reg
, int val
)
531 struct bnx_softc
*sc
= device_get_softc(dev
);
534 KASSERT(phy
== sc
->bnx_phyno
,
535 ("invalid phyno %d, should be %d", phy
, sc
->bnx_phyno
));
537 if (bnx_ape_lock(sc
, sc
->bnx_phy_ape_lock
) != 0)
540 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
541 if (sc
->bnx_mi_mode
& BGE_MIMODE_AUTOPOLL
) {
542 CSR_WRITE_4(sc
, BGE_MI_MODE
,
543 sc
->bnx_mi_mode
& ~BGE_MIMODE_AUTOPOLL
);
547 CSR_WRITE_4(sc
, BGE_MI_COMM
, BGE_MICMD_WRITE
| BGE_MICOMM_BUSY
|
548 BGE_MIPHY(phy
) | BGE_MIREG(reg
) | val
);
550 for (i
= 0; i
< BNX_TIMEOUT
; i
++) {
552 if (!(CSR_READ_4(sc
, BGE_MI_COMM
) & BGE_MICOMM_BUSY
)) {
554 CSR_READ_4(sc
, BGE_MI_COMM
); /* dummy read */
558 if (i
== BNX_TIMEOUT
) {
559 if_printf(&sc
->arpcom
.ac_if
, "PHY write timed out "
560 "(phy %d, reg %d, val %d)\n", phy
, reg
, val
);
563 /* Restore the autopoll bit if necessary. */
564 if (sc
->bnx_mi_mode
& BGE_MIMODE_AUTOPOLL
) {
565 CSR_WRITE_4(sc
, BGE_MI_MODE
, sc
->bnx_mi_mode
);
569 bnx_ape_unlock(sc
, sc
->bnx_phy_ape_lock
);
575 bnx_miibus_statchg(device_t dev
)
577 struct bnx_softc
*sc
;
578 struct mii_data
*mii
;
581 sc
= device_get_softc(dev
);
582 if ((sc
->arpcom
.ac_if
.if_flags
& IFF_RUNNING
) == 0)
585 mii
= device_get_softc(sc
->bnx_miibus
);
587 if ((mii
->mii_media_status
& (IFM_ACTIVE
| IFM_AVALID
)) ==
588 (IFM_ACTIVE
| IFM_AVALID
)) {
589 switch (IFM_SUBTYPE(mii
->mii_media_active
)) {
606 if (sc
->bnx_link
== 0)
610 * APE firmware touches these registers to keep the MAC
611 * connected to the outside world. Try to keep the
615 mac_mode
= CSR_READ_4(sc
, BGE_MAC_MODE
) &
616 ~(BGE_MACMODE_PORTMODE
| BGE_MACMODE_HALF_DUPLEX
);
618 if (IFM_SUBTYPE(mii
->mii_media_active
) == IFM_1000_T
||
619 IFM_SUBTYPE(mii
->mii_media_active
) == IFM_1000_SX
)
620 mac_mode
|= BGE_PORTMODE_GMII
;
622 mac_mode
|= BGE_PORTMODE_MII
;
624 if ((mii
->mii_media_active
& IFM_GMASK
) != IFM_FDX
)
625 mac_mode
|= BGE_MACMODE_HALF_DUPLEX
;
627 CSR_WRITE_4(sc
, BGE_MAC_MODE
, mac_mode
);
632 * Memory management for jumbo frames.
635 bnx_alloc_jumbo_mem(struct bnx_softc
*sc
)
637 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
638 struct bnx_jslot
*entry
;
644 * Create tag for jumbo mbufs.
645 * This is really a bit of a kludge. We allocate a special
646 * jumbo buffer pool which (thanks to the way our DMA
647 * memory allocation works) will consist of contiguous
648 * pages. This means that even though a jumbo buffer might
649 * be larger than a page size, we don't really need to
650 * map it into more than one DMA segment. However, the
651 * default mbuf tag will result in multi-segment mappings,
652 * so we have to create a special jumbo mbuf tag that
653 * lets us get away with mapping the jumbo buffers as
654 * a single segment. I think eventually the driver should
655 * be changed so that it uses ordinary mbufs and cluster
656 * buffers, i.e. jumbo frames can span multiple DMA
657 * descriptors. But that's a project for another day.
661 * Create DMA stuffs for jumbo RX ring.
663 error
= bnx_dma_block_alloc(sc
, BGE_JUMBO_RX_RING_SZ
,
664 &sc
->bnx_cdata
.bnx_rx_jumbo_ring_tag
,
665 &sc
->bnx_cdata
.bnx_rx_jumbo_ring_map
,
666 (void *)&sc
->bnx_ldata
.bnx_rx_jumbo_ring
,
667 &sc
->bnx_ldata
.bnx_rx_jumbo_ring_paddr
);
669 if_printf(ifp
, "could not create jumbo RX ring\n");
674 * Create DMA stuffs for jumbo buffer block.
676 error
= bnx_dma_block_alloc(sc
, BNX_JMEM
,
677 &sc
->bnx_cdata
.bnx_jumbo_tag
,
678 &sc
->bnx_cdata
.bnx_jumbo_map
,
679 (void **)&sc
->bnx_ldata
.bnx_jumbo_buf
,
682 if_printf(ifp
, "could not create jumbo buffer\n");
686 SLIST_INIT(&sc
->bnx_jfree_listhead
);
689 * Now divide it up into 9K pieces and save the addresses
690 * in an array. Note that we play an evil trick here by using
691 * the first few bytes in the buffer to hold the the address
692 * of the softc structure for this interface. This is because
693 * bnx_jfree() needs it, but it is called by the mbuf management
694 * code which will not pass it to us explicitly.
696 for (i
= 0, ptr
= sc
->bnx_ldata
.bnx_jumbo_buf
; i
< BNX_JSLOTS
; i
++) {
697 entry
= &sc
->bnx_cdata
.bnx_jslots
[i
];
699 entry
->bnx_buf
= ptr
;
700 entry
->bnx_paddr
= paddr
;
701 entry
->bnx_inuse
= 0;
703 SLIST_INSERT_HEAD(&sc
->bnx_jfree_listhead
, entry
, jslot_link
);
712 bnx_free_jumbo_mem(struct bnx_softc
*sc
)
714 /* Destroy jumbo RX ring. */
715 bnx_dma_block_free(sc
->bnx_cdata
.bnx_rx_jumbo_ring_tag
,
716 sc
->bnx_cdata
.bnx_rx_jumbo_ring_map
,
717 sc
->bnx_ldata
.bnx_rx_jumbo_ring
);
719 /* Destroy jumbo buffer block. */
720 bnx_dma_block_free(sc
->bnx_cdata
.bnx_jumbo_tag
,
721 sc
->bnx_cdata
.bnx_jumbo_map
,
722 sc
->bnx_ldata
.bnx_jumbo_buf
);
726 * Allocate a jumbo buffer.
728 static struct bnx_jslot
*
729 bnx_jalloc(struct bnx_softc
*sc
)
731 struct bnx_jslot
*entry
;
733 lwkt_serialize_enter(&sc
->bnx_jslot_serializer
);
734 entry
= SLIST_FIRST(&sc
->bnx_jfree_listhead
);
736 SLIST_REMOVE_HEAD(&sc
->bnx_jfree_listhead
, jslot_link
);
737 entry
->bnx_inuse
= 1;
739 if_printf(&sc
->arpcom
.ac_if
, "no free jumbo buffers\n");
741 lwkt_serialize_exit(&sc
->bnx_jslot_serializer
);
746 * Adjust usage count on a jumbo buffer.
751 struct bnx_jslot
*entry
= (struct bnx_jslot
*)arg
;
752 struct bnx_softc
*sc
= entry
->bnx_sc
;
755 panic("bnx_jref: can't find softc pointer!");
757 if (&sc
->bnx_cdata
.bnx_jslots
[entry
->bnx_slot
] != entry
) {
758 panic("bnx_jref: asked to reference buffer "
759 "that we don't manage!");
760 } else if (entry
->bnx_inuse
== 0) {
761 panic("bnx_jref: buffer already free!");
763 atomic_add_int(&entry
->bnx_inuse
, 1);
768 * Release a jumbo buffer.
773 struct bnx_jslot
*entry
= (struct bnx_jslot
*)arg
;
774 struct bnx_softc
*sc
= entry
->bnx_sc
;
777 panic("bnx_jfree: can't find softc pointer!");
779 if (&sc
->bnx_cdata
.bnx_jslots
[entry
->bnx_slot
] != entry
) {
780 panic("bnx_jfree: asked to free buffer that we don't manage!");
781 } else if (entry
->bnx_inuse
== 0) {
782 panic("bnx_jfree: buffer already free!");
785 * Possible MP race to 0, use the serializer. The atomic insn
786 * is still needed for races against bnx_jref().
788 lwkt_serialize_enter(&sc
->bnx_jslot_serializer
);
789 atomic_subtract_int(&entry
->bnx_inuse
, 1);
790 if (entry
->bnx_inuse
== 0) {
791 SLIST_INSERT_HEAD(&sc
->bnx_jfree_listhead
,
794 lwkt_serialize_exit(&sc
->bnx_jslot_serializer
);
800 * Intialize a standard receive ring descriptor.
803 bnx_newbuf_std(struct bnx_rx_ret_ring
*ret
, int i
, int init
)
805 struct mbuf
*m_new
= NULL
;
806 bus_dma_segment_t seg
;
809 struct bnx_rx_buf
*rb
;
811 rb
= &ret
->bnx_std
->bnx_rx_std_buf
[i
];
812 KASSERT(!rb
->bnx_rx_refilled
, ("RX buf %dth has been refilled", i
));
814 m_new
= m_getcl(init
? M_WAITOK
: M_NOWAIT
, MT_DATA
, M_PKTHDR
);
819 m_new
->m_len
= m_new
->m_pkthdr
.len
= MCLBYTES
;
820 m_adj(m_new
, ETHER_ALIGN
);
822 error
= bus_dmamap_load_mbuf_segment(ret
->bnx_rx_mtag
,
823 ret
->bnx_rx_tmpmap
, m_new
, &seg
, 1, &nsegs
, BUS_DMA_NOWAIT
);
830 bus_dmamap_sync(ret
->bnx_rx_mtag
, rb
->bnx_rx_dmamap
,
831 BUS_DMASYNC_POSTREAD
);
832 bus_dmamap_unload(ret
->bnx_rx_mtag
, rb
->bnx_rx_dmamap
);
835 map
= ret
->bnx_rx_tmpmap
;
836 ret
->bnx_rx_tmpmap
= rb
->bnx_rx_dmamap
;
838 rb
->bnx_rx_dmamap
= map
;
839 rb
->bnx_rx_mbuf
= m_new
;
840 rb
->bnx_rx_paddr
= seg
.ds_addr
;
841 rb
->bnx_rx_len
= m_new
->m_len
;
844 rb
->bnx_rx_refilled
= 1;
849 bnx_setup_rxdesc_std(struct bnx_rx_std_ring
*std
, int i
)
851 struct bnx_rx_buf
*rb
;
856 rb
= &std
->bnx_rx_std_buf
[i
];
857 KASSERT(rb
->bnx_rx_refilled
, ("RX buf %dth is not refilled", i
));
859 paddr
= rb
->bnx_rx_paddr
;
860 len
= rb
->bnx_rx_len
;
864 rb
->bnx_rx_refilled
= 0;
866 r
= &std
->bnx_rx_std_ring
[i
];
867 r
->bge_addr
.bge_addr_lo
= BGE_ADDR_LO(paddr
);
868 r
->bge_addr
.bge_addr_hi
= BGE_ADDR_HI(paddr
);
871 r
->bge_flags
= BGE_RXBDFLAG_END
;
875 * Initialize a jumbo receive ring descriptor. This allocates
876 * a jumbo buffer from the pool managed internally by the driver.
879 bnx_newbuf_jumbo(struct bnx_softc
*sc
, int i
, int init
)
881 struct mbuf
*m_new
= NULL
;
882 struct bnx_jslot
*buf
;
885 /* Allocate the mbuf. */
886 MGETHDR(m_new
, init
? M_WAITOK
: M_NOWAIT
, MT_DATA
);
890 /* Allocate the jumbo buffer */
891 buf
= bnx_jalloc(sc
);
897 /* Attach the buffer to the mbuf. */
898 m_new
->m_ext
.ext_arg
= buf
;
899 m_new
->m_ext
.ext_buf
= buf
->bnx_buf
;
900 m_new
->m_ext
.ext_free
= bnx_jfree
;
901 m_new
->m_ext
.ext_ref
= bnx_jref
;
902 m_new
->m_ext
.ext_size
= BNX_JUMBO_FRAMELEN
;
904 m_new
->m_flags
|= M_EXT
;
906 m_new
->m_data
= m_new
->m_ext
.ext_buf
;
907 m_new
->m_len
= m_new
->m_pkthdr
.len
= m_new
->m_ext
.ext_size
;
909 paddr
= buf
->bnx_paddr
;
910 m_adj(m_new
, ETHER_ALIGN
);
911 paddr
+= ETHER_ALIGN
;
913 /* Save necessary information */
914 sc
->bnx_cdata
.bnx_rx_jumbo_chain
[i
].bnx_rx_mbuf
= m_new
;
915 sc
->bnx_cdata
.bnx_rx_jumbo_chain
[i
].bnx_rx_paddr
= paddr
;
917 /* Set up the descriptor. */
918 bnx_setup_rxdesc_jumbo(sc
, i
);
923 bnx_setup_rxdesc_jumbo(struct bnx_softc
*sc
, int i
)
926 struct bnx_rx_buf
*rc
;
928 r
= &sc
->bnx_ldata
.bnx_rx_jumbo_ring
[i
];
929 rc
= &sc
->bnx_cdata
.bnx_rx_jumbo_chain
[i
];
931 r
->bge_addr
.bge_addr_lo
= BGE_ADDR_LO(rc
->bnx_rx_paddr
);
932 r
->bge_addr
.bge_addr_hi
= BGE_ADDR_HI(rc
->bnx_rx_paddr
);
933 r
->bge_len
= rc
->bnx_rx_mbuf
->m_len
;
935 r
->bge_flags
= BGE_RXBDFLAG_END
|BGE_RXBDFLAG_JUMBO_RING
;
939 bnx_init_rx_ring_std(struct bnx_rx_std_ring
*std
)
943 for (i
= 0; i
< BGE_STD_RX_RING_CNT
; i
++) {
944 /* Use the first RX return ring's tmp RX mbuf DMA map */
945 error
= bnx_newbuf_std(&std
->bnx_sc
->bnx_rx_ret_ring
[0], i
, 1);
948 bnx_setup_rxdesc_std(std
, i
);
951 std
->bnx_rx_std_used
= 0;
952 std
->bnx_rx_std_refill
= 0;
953 std
->bnx_rx_std_running
= 0;
955 lwkt_serialize_handler_enable(&std
->bnx_rx_std_serialize
);
957 std
->bnx_rx_std
= BGE_STD_RX_RING_CNT
- 1;
958 bnx_writembx(std
->bnx_sc
, BGE_MBX_RX_STD_PROD_LO
, std
->bnx_rx_std
);
964 bnx_free_rx_ring_std(struct bnx_rx_std_ring
*std
)
968 lwkt_serialize_handler_disable(&std
->bnx_rx_std_serialize
);
970 for (i
= 0; i
< BGE_STD_RX_RING_CNT
; i
++) {
971 struct bnx_rx_buf
*rb
= &std
->bnx_rx_std_buf
[i
];
973 rb
->bnx_rx_refilled
= 0;
974 if (rb
->bnx_rx_mbuf
!= NULL
) {
975 bus_dmamap_unload(std
->bnx_rx_mtag
, rb
->bnx_rx_dmamap
);
976 m_freem(rb
->bnx_rx_mbuf
);
977 rb
->bnx_rx_mbuf
= NULL
;
979 bzero(&std
->bnx_rx_std_ring
[i
], sizeof(struct bge_rx_bd
));
984 bnx_init_rx_ring_jumbo(struct bnx_softc
*sc
)
989 for (i
= 0; i
< BGE_JUMBO_RX_RING_CNT
; i
++) {
990 error
= bnx_newbuf_jumbo(sc
, i
, 1);
995 sc
->bnx_jumbo
= BGE_JUMBO_RX_RING_CNT
- 1;
997 rcb
= &sc
->bnx_ldata
.bnx_info
.bnx_jumbo_rx_rcb
;
998 rcb
->bge_maxlen_flags
= BGE_RCB_MAXLEN_FLAGS(0, 0);
999 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS
, rcb
->bge_maxlen_flags
);
1001 bnx_writembx(sc
, BGE_MBX_RX_JUMBO_PROD_LO
, sc
->bnx_jumbo
);
1007 bnx_free_rx_ring_jumbo(struct bnx_softc
*sc
)
1011 for (i
= 0; i
< BGE_JUMBO_RX_RING_CNT
; i
++) {
1012 struct bnx_rx_buf
*rc
= &sc
->bnx_cdata
.bnx_rx_jumbo_chain
[i
];
1014 if (rc
->bnx_rx_mbuf
!= NULL
) {
1015 m_freem(rc
->bnx_rx_mbuf
);
1016 rc
->bnx_rx_mbuf
= NULL
;
1018 bzero(&sc
->bnx_ldata
.bnx_rx_jumbo_ring
[i
],
1019 sizeof(struct bge_rx_bd
));
1024 bnx_free_tx_ring(struct bnx_tx_ring
*txr
)
1028 for (i
= 0; i
< BGE_TX_RING_CNT
; i
++) {
1029 struct bnx_tx_buf
*buf
= &txr
->bnx_tx_buf
[i
];
1031 if (buf
->bnx_tx_mbuf
!= NULL
) {
1032 bus_dmamap_unload(txr
->bnx_tx_mtag
,
1033 buf
->bnx_tx_dmamap
);
1034 m_freem(buf
->bnx_tx_mbuf
);
1035 buf
->bnx_tx_mbuf
= NULL
;
1037 bzero(&txr
->bnx_tx_ring
[i
], sizeof(struct bge_tx_bd
));
1039 txr
->bnx_tx_saved_considx
= BNX_TXCONS_UNSET
;
1043 bnx_init_tx_ring(struct bnx_tx_ring
*txr
)
1045 txr
->bnx_tx_cnt
= 0;
1046 txr
->bnx_tx_saved_considx
= 0;
1047 txr
->bnx_tx_prodidx
= 0;
1049 /* Initialize transmit producer index for host-memory send ring. */
1050 bnx_writembx(txr
->bnx_sc
, txr
->bnx_tx_mbx
, txr
->bnx_tx_prodidx
);
1056 bnx_setmulti(struct bnx_softc
*sc
)
1059 struct ifmultiaddr
*ifma
;
1060 uint32_t hashes
[4] = { 0, 0, 0, 0 };
1063 ifp
= &sc
->arpcom
.ac_if
;
1065 if (ifp
->if_flags
& IFF_ALLMULTI
|| ifp
->if_flags
& IFF_PROMISC
) {
1066 for (i
= 0; i
< 4; i
++)
1067 CSR_WRITE_4(sc
, BGE_MAR0
+ (i
* 4), 0xFFFFFFFF);
1071 /* First, zot all the existing filters. */
1072 for (i
= 0; i
< 4; i
++)
1073 CSR_WRITE_4(sc
, BGE_MAR0
+ (i
* 4), 0);
1075 /* Now program new ones. */
1076 TAILQ_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
1077 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
1080 LLADDR((struct sockaddr_dl
*)ifma
->ifma_addr
),
1081 ETHER_ADDR_LEN
) & 0x7f;
1082 hashes
[(h
& 0x60) >> 5] |= 1 << (h
& 0x1F);
1085 for (i
= 0; i
< 4; i
++)
1086 CSR_WRITE_4(sc
, BGE_MAR0
+ (i
* 4), hashes
[i
]);
1090 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1091 * self-test results.
1094 bnx_chipinit(struct bnx_softc
*sc
)
1096 uint32_t dma_rw_ctl
, mode_ctl
;
1099 /* Set endian type before we access any non-PCI registers. */
1100 pci_write_config(sc
->bnx_dev
, BGE_PCI_MISC_CTL
,
1101 BGE_INIT
| BGE_PCIMISCCTL_TAGGED_STATUS
, 4);
1104 * Clear the MAC statistics block in the NIC's
1107 for (i
= BGE_STATS_BLOCK
;
1108 i
< BGE_STATS_BLOCK_END
+ 1; i
+= sizeof(uint32_t))
1109 BNX_MEMWIN_WRITE(sc
, i
, 0);
1111 for (i
= BGE_STATUS_BLOCK
;
1112 i
< BGE_STATUS_BLOCK_END
+ 1; i
+= sizeof(uint32_t))
1113 BNX_MEMWIN_WRITE(sc
, i
, 0);
1115 if (BNX_IS_57765_FAMILY(sc
)) {
1118 if (sc
->bnx_chipid
== BGE_CHIPID_BCM57765_A0
) {
1119 mode_ctl
= CSR_READ_4(sc
, BGE_MODE_CTL
);
1120 val
= mode_ctl
& ~BGE_MODECTL_PCIE_PORTS
;
1122 /* Access the lower 1K of PL PCI-E block registers. */
1123 CSR_WRITE_4(sc
, BGE_MODE_CTL
,
1124 val
| BGE_MODECTL_PCIE_PL_SEL
);
1126 val
= CSR_READ_4(sc
, BGE_PCIE_PL_LO_PHYCTL5
);
1127 val
|= BGE_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
;
1128 CSR_WRITE_4(sc
, BGE_PCIE_PL_LO_PHYCTL5
, val
);
1130 CSR_WRITE_4(sc
, BGE_MODE_CTL
, mode_ctl
);
1132 if (sc
->bnx_chiprev
!= BGE_CHIPREV_57765_AX
) {
1133 /* Fix transmit hangs */
1134 val
= CSR_READ_4(sc
, BGE_CPMU_PADRNG_CTL
);
1135 val
|= BGE_CPMU_PADRNG_CTL_RDIV2
;
1136 CSR_WRITE_4(sc
, BGE_CPMU_PADRNG_CTL
, val
);
1138 mode_ctl
= CSR_READ_4(sc
, BGE_MODE_CTL
);
1139 val
= mode_ctl
& ~BGE_MODECTL_PCIE_PORTS
;
1141 /* Access the lower 1K of DL PCI-E block registers. */
1142 CSR_WRITE_4(sc
, BGE_MODE_CTL
,
1143 val
| BGE_MODECTL_PCIE_DL_SEL
);
1145 val
= CSR_READ_4(sc
, BGE_PCIE_DL_LO_FTSMAX
);
1146 val
&= ~BGE_PCIE_DL_LO_FTSMAX_MASK
;
1147 val
|= BGE_PCIE_DL_LO_FTSMAX_VAL
;
1148 CSR_WRITE_4(sc
, BGE_PCIE_DL_LO_FTSMAX
, val
);
1150 CSR_WRITE_4(sc
, BGE_MODE_CTL
, mode_ctl
);
1153 val
= CSR_READ_4(sc
, BGE_CPMU_LSPD_10MB_CLK
);
1154 val
&= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK
;
1155 val
|= BGE_CPMU_LSPD_10MB_MACCLK_6_25
;
1156 CSR_WRITE_4(sc
, BGE_CPMU_LSPD_10MB_CLK
, val
);
1160 * Set up the PCI DMA control register.
1162 dma_rw_ctl
= pci_read_config(sc
->bnx_dev
, BGE_PCI_DMA_RW_CTL
, 4);
1164 * Disable 32bytes cache alignment for DMA write to host memory
1167 * 64bytes cache alignment for DMA write to host memory is still
1170 dma_rw_ctl
|= BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT
;
1171 if (sc
->bnx_chipid
== BGE_CHIPID_BCM57765_A0
)
1172 dma_rw_ctl
&= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK
;
1174 * Enable HW workaround for controllers that misinterpret
1175 * a status tag update and leave interrupts permanently
1178 if (sc
->bnx_asicrev
!= BGE_ASICREV_BCM5717
&&
1179 sc
->bnx_asicrev
!= BGE_ASICREV_BCM5762
&&
1180 !BNX_IS_57765_FAMILY(sc
))
1181 dma_rw_ctl
|= BGE_PCIDMARWCTL_TAGGED_STATUS_WA
;
1183 if_printf(&sc
->arpcom
.ac_if
, "DMA read/write %#x\n",
1186 pci_write_config(sc
->bnx_dev
, BGE_PCI_DMA_RW_CTL
, dma_rw_ctl
, 4);
1189 * Set up general mode register.
1191 mode_ctl
= bnx_dma_swap_options(sc
);
1192 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5720
||
1193 sc
->bnx_asicrev
== BGE_ASICREV_BCM5762
) {
1194 /* Retain Host-2-BMC settings written by APE firmware. */
1195 mode_ctl
|= CSR_READ_4(sc
, BGE_MODE_CTL
) &
1196 (BGE_MODECTL_BYTESWAP_B2HRX_DATA
|
1197 BGE_MODECTL_WORDSWAP_B2HRX_DATA
|
1198 BGE_MODECTL_B2HRX_ENABLE
| BGE_MODECTL_HTX2B_ENABLE
);
1200 mode_ctl
|= BGE_MODECTL_MAC_ATTN_INTR
|
1201 BGE_MODECTL_HOST_SEND_BDS
| BGE_MODECTL_TX_NO_PHDR_CSUM
;
1202 CSR_WRITE_4(sc
, BGE_MODE_CTL
, mode_ctl
);
1205 * Disable memory write invalidate. Apparently it is not supported
1206 * properly by these devices. Also ensure that INTx isn't disabled,
1207 * as these chips need it even when using MSI.
1209 PCI_CLRBIT(sc
->bnx_dev
, BGE_PCI_CMD
,
1210 (PCIM_CMD_MWRICEN
| PCIM_CMD_INTxDIS
), 4);
1212 /* Set the timer prescaler (always 66Mhz) */
1213 CSR_WRITE_4(sc
, BGE_MISC_CFG
, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1219 bnx_blockinit(struct bnx_softc
*sc
)
1221 struct bnx_intr_data
*intr
;
1222 struct bge_rcb
*rcb
;
1229 * Initialize the memory window pointer register so that
1230 * we can access the first 32K of internal NIC RAM. This will
1231 * allow us to set up the TX send ring RCBs and the RX return
1232 * ring RCBs, plus other things which live in NIC memory.
1234 CSR_WRITE_4(sc
, BGE_PCI_MEMWIN_BASEADDR
, 0);
1236 /* Configure mbuf pool watermarks */
1237 if (BNX_IS_57765_PLUS(sc
)) {
1238 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_READDMA_LOWAT
, 0x0);
1239 if (sc
->arpcom
.ac_if
.if_mtu
> ETHERMTU
) {
1240 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_MACRX_LOWAT
, 0x7e);
1241 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_HIWAT
, 0xea);
1243 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_MACRX_LOWAT
, 0x2a);
1244 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_HIWAT
, 0xa0);
1247 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_READDMA_LOWAT
, 0x0);
1248 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_MACRX_LOWAT
, 0x10);
1249 CSR_WRITE_4(sc
, BGE_BMAN_MBUFPOOL_HIWAT
, 0x60);
1252 /* Configure DMA resource watermarks */
1253 CSR_WRITE_4(sc
, BGE_BMAN_DMA_DESCPOOL_LOWAT
, 5);
1254 CSR_WRITE_4(sc
, BGE_BMAN_DMA_DESCPOOL_HIWAT
, 10);
1256 /* Enable buffer manager */
1257 val
= BGE_BMANMODE_ENABLE
| BGE_BMANMODE_LOMBUF_ATTN
;
1259 * Change the arbitration algorithm of TXMBUF read request to
1260 * round-robin instead of priority based for BCM5719. When
1261 * TXFIFO is almost empty, RDMA will hold its request until
1262 * TXFIFO is not almost empty.
1264 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5719
)
1265 val
|= BGE_BMANMODE_NO_TX_UNDERRUN
;
1266 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5717
||
1267 sc
->bnx_chipid
== BGE_CHIPID_BCM5719_A0
||
1268 sc
->bnx_chipid
== BGE_CHIPID_BCM5720_A0
)
1269 val
|= BGE_BMANMODE_LOMBUF_ATTN
;
1270 CSR_WRITE_4(sc
, BGE_BMAN_MODE
, val
);
1272 /* Poll for buffer manager start indication */
1273 for (i
= 0; i
< BNX_TIMEOUT
; i
++) {
1274 if (CSR_READ_4(sc
, BGE_BMAN_MODE
) & BGE_BMANMODE_ENABLE
)
1279 if (i
== BNX_TIMEOUT
) {
1280 if_printf(&sc
->arpcom
.ac_if
,
1281 "buffer manager failed to start\n");
1285 /* Enable flow-through queues */
1286 CSR_WRITE_4(sc
, BGE_FTQ_RESET
, 0xFFFFFFFF);
1287 CSR_WRITE_4(sc
, BGE_FTQ_RESET
, 0);
1289 /* Wait until queue initialization is complete */
1290 for (i
= 0; i
< BNX_TIMEOUT
; i
++) {
1291 if (CSR_READ_4(sc
, BGE_FTQ_RESET
) == 0)
1296 if (i
== BNX_TIMEOUT
) {
1297 if_printf(&sc
->arpcom
.ac_if
,
1298 "flow-through queue init failed\n");
1303 * Summary of rings supported by the controller:
1305 * Standard Receive Producer Ring
1306 * - This ring is used to feed receive buffers for "standard"
1307 * sized frames (typically 1536 bytes) to the controller.
1309 * Jumbo Receive Producer Ring
1310 * - This ring is used to feed receive buffers for jumbo sized
1311 * frames (i.e. anything bigger than the "standard" frames)
1312 * to the controller.
1314 * Mini Receive Producer Ring
1315 * - This ring is used to feed receive buffers for "mini"
1316 * sized frames to the controller.
1317 * - This feature required external memory for the controller
1318 * but was never used in a production system. Should always
1321 * Receive Return Ring
1322 * - After the controller has placed an incoming frame into a
1323 * receive buffer that buffer is moved into a receive return
1324 * ring. The driver is then responsible to passing the
1325 * buffer up to the stack. BCM5718/BCM57785 families support
1326 * multiple receive return rings.
1329 * - This ring is used for outgoing frames. BCM5719/BCM5720
1330 * support multiple send rings.
1333 /* Initialize the standard receive producer ring control block. */
1334 rcb
= &sc
->bnx_ldata
.bnx_info
.bnx_std_rx_rcb
;
1335 rcb
->bge_hostaddr
.bge_addr_lo
=
1336 BGE_ADDR_LO(sc
->bnx_rx_std_ring
.bnx_rx_std_ring_paddr
);
1337 rcb
->bge_hostaddr
.bge_addr_hi
=
1338 BGE_ADDR_HI(sc
->bnx_rx_std_ring
.bnx_rx_std_ring_paddr
);
1339 if (BNX_IS_57765_PLUS(sc
)) {
1341 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1342 * Bits 15-2 : Maximum RX frame size
1343 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1346 rcb
->bge_maxlen_flags
=
1347 BGE_RCB_MAXLEN_FLAGS(512, BNX_MAX_FRAMELEN
<< 2);
1350 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1351 * Bits 15-2 : Reserved (should be 0)
1352 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1355 rcb
->bge_maxlen_flags
= BGE_RCB_MAXLEN_FLAGS(512, 0);
1357 if (BNX_IS_5717_PLUS(sc
))
1358 rcb
->bge_nicaddr
= BGE_STD_RX_RINGS_5717
;
1360 rcb
->bge_nicaddr
= BGE_STD_RX_RINGS
;
1361 /* Write the standard receive producer ring control block. */
1362 CSR_WRITE_4(sc
, BGE_RX_STD_RCB_HADDR_HI
, rcb
->bge_hostaddr
.bge_addr_hi
);
1363 CSR_WRITE_4(sc
, BGE_RX_STD_RCB_HADDR_LO
, rcb
->bge_hostaddr
.bge_addr_lo
);
1364 CSR_WRITE_4(sc
, BGE_RX_STD_RCB_MAXLEN_FLAGS
, rcb
->bge_maxlen_flags
);
1365 if (!BNX_IS_5717_PLUS(sc
))
1366 CSR_WRITE_4(sc
, BGE_RX_STD_RCB_NICADDR
, rcb
->bge_nicaddr
);
1367 /* Reset the standard receive producer ring producer index. */
1368 bnx_writembx(sc
, BGE_MBX_RX_STD_PROD_LO
, 0);
1371 * Initialize the jumbo RX producer ring control
1372 * block. We set the 'ring disabled' bit in the
1373 * flags field until we're actually ready to start
1374 * using this ring (i.e. once we set the MTU
1375 * high enough to require it).
1377 if (BNX_IS_JUMBO_CAPABLE(sc
)) {
1378 rcb
= &sc
->bnx_ldata
.bnx_info
.bnx_jumbo_rx_rcb
;
1379 /* Get the jumbo receive producer ring RCB parameters. */
1380 rcb
->bge_hostaddr
.bge_addr_lo
=
1381 BGE_ADDR_LO(sc
->bnx_ldata
.bnx_rx_jumbo_ring_paddr
);
1382 rcb
->bge_hostaddr
.bge_addr_hi
=
1383 BGE_ADDR_HI(sc
->bnx_ldata
.bnx_rx_jumbo_ring_paddr
);
1384 rcb
->bge_maxlen_flags
=
1385 BGE_RCB_MAXLEN_FLAGS(BNX_MAX_FRAMELEN
,
1386 BGE_RCB_FLAG_RING_DISABLED
);
1387 if (BNX_IS_5717_PLUS(sc
))
1388 rcb
->bge_nicaddr
= BGE_JUMBO_RX_RINGS_5717
;
1390 rcb
->bge_nicaddr
= BGE_JUMBO_RX_RINGS
;
1391 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_HADDR_HI
,
1392 rcb
->bge_hostaddr
.bge_addr_hi
);
1393 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_HADDR_LO
,
1394 rcb
->bge_hostaddr
.bge_addr_lo
);
1395 /* Program the jumbo receive producer ring RCB parameters. */
1396 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS
,
1397 rcb
->bge_maxlen_flags
);
1398 CSR_WRITE_4(sc
, BGE_RX_JUMBO_RCB_NICADDR
, rcb
->bge_nicaddr
);
1399 /* Reset the jumbo receive producer ring producer index. */
1400 bnx_writembx(sc
, BGE_MBX_RX_JUMBO_PROD_LO
, 0);
1404 * The BD ring replenish thresholds control how often the
1405 * hardware fetches new BD's from the producer rings in host
1406 * memory. Setting the value too low on a busy system can
1407 * starve the hardware and recue the throughpout.
1409 * Set the BD ring replentish thresholds. The recommended
1410 * values are 1/8th the number of descriptors allocated to
1414 CSR_WRITE_4(sc
, BGE_RBDI_STD_REPL_THRESH
, val
);
1415 if (BNX_IS_JUMBO_CAPABLE(sc
)) {
1416 CSR_WRITE_4(sc
, BGE_RBDI_JUMBO_REPL_THRESH
,
1417 BGE_JUMBO_RX_RING_CNT
/8);
1419 if (BNX_IS_57765_PLUS(sc
)) {
1420 CSR_WRITE_4(sc
, BGE_STD_REPLENISH_LWM
, 32);
1421 CSR_WRITE_4(sc
, BGE_JMB_REPLENISH_LWM
, 16);
1425 * Disable all send rings by setting the 'ring disabled' bit
1426 * in the flags field of all the TX send ring control blocks,
1427 * located in NIC memory.
1429 if (BNX_IS_5717_PLUS(sc
))
1431 else if (BNX_IS_57765_FAMILY(sc
) ||
1432 sc
->bnx_asicrev
== BGE_ASICREV_BCM5762
)
1436 vrcb
= BGE_MEMWIN_START
+ BGE_SEND_RING_RCB
;
1437 for (i
= 0; i
< limit
; i
++) {
1438 RCB_WRITE_4(sc
, vrcb
, bge_maxlen_flags
,
1439 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED
));
1440 vrcb
+= sizeof(struct bge_rcb
);
1444 * Configure send ring RCBs
1446 vrcb
= BGE_MEMWIN_START
+ BGE_SEND_RING_RCB
;
1447 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
1448 struct bnx_tx_ring
*txr
= &sc
->bnx_tx_ring
[i
];
1450 BGE_HOSTADDR(taddr
, txr
->bnx_tx_ring_paddr
);
1451 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_hi
,
1453 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_lo
,
1455 RCB_WRITE_4(sc
, vrcb
, bge_maxlen_flags
,
1456 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT
, 0));
1457 vrcb
+= sizeof(struct bge_rcb
);
1461 * Disable all receive return rings by setting the
1462 * 'ring disabled' bit in the flags field of all the receive
1463 * return ring control blocks, located in NIC memory.
1465 if (BNX_IS_5717_PLUS(sc
)) {
1466 /* Should be 17, use 16 until we get an SRAM map. */
1468 } else if (BNX_IS_57765_FAMILY(sc
) ||
1469 sc
->bnx_asicrev
== BGE_ASICREV_BCM5762
) {
1474 /* Disable all receive return rings. */
1475 vrcb
= BGE_MEMWIN_START
+ BGE_RX_RETURN_RING_RCB
;
1476 for (i
= 0; i
< limit
; i
++) {
1477 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_hi
, 0);
1478 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_lo
, 0);
1479 RCB_WRITE_4(sc
, vrcb
, bge_maxlen_flags
,
1480 BGE_RCB_FLAG_RING_DISABLED
);
1481 bnx_writembx(sc
, BGE_MBX_RX_CONS0_LO
+
1482 (i
* (sizeof(uint64_t))), 0);
1483 vrcb
+= sizeof(struct bge_rcb
);
1487 * Set up receive return rings.
1489 vrcb
= BGE_MEMWIN_START
+ BGE_RX_RETURN_RING_RCB
;
1490 for (i
= 0; i
< sc
->bnx_rx_retcnt
; ++i
) {
1491 struct bnx_rx_ret_ring
*ret
= &sc
->bnx_rx_ret_ring
[i
];
1493 BGE_HOSTADDR(taddr
, ret
->bnx_rx_ret_ring_paddr
);
1494 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_hi
,
1496 RCB_WRITE_4(sc
, vrcb
, bge_hostaddr
.bge_addr_lo
,
1498 RCB_WRITE_4(sc
, vrcb
, bge_maxlen_flags
,
1499 BGE_RCB_MAXLEN_FLAGS(BNX_RETURN_RING_CNT
, 0));
1500 vrcb
+= sizeof(struct bge_rcb
);
1503 /* Set random backoff seed for TX */
1504 CSR_WRITE_4(sc
, BGE_TX_RANDOM_BACKOFF
,
1505 (sc
->arpcom
.ac_enaddr
[0] + sc
->arpcom
.ac_enaddr
[1] +
1506 sc
->arpcom
.ac_enaddr
[2] + sc
->arpcom
.ac_enaddr
[3] +
1507 sc
->arpcom
.ac_enaddr
[4] + sc
->arpcom
.ac_enaddr
[5]) &
1508 BGE_TX_BACKOFF_SEED_MASK
);
1510 /* Set inter-packet gap */
1512 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5720
||
1513 sc
->bnx_asicrev
== BGE_ASICREV_BCM5762
) {
1514 val
|= CSR_READ_4(sc
, BGE_TX_LENGTHS
) &
1515 (BGE_TXLEN_JMB_FRM_LEN_MSK
| BGE_TXLEN_CNT_DN_VAL_MSK
);
1517 CSR_WRITE_4(sc
, BGE_TX_LENGTHS
, val
);
1520 * Specify which ring to use for packets that don't match
1523 CSR_WRITE_4(sc
, BGE_RX_RULES_CFG
, 0x08);
1526 * Configure number of RX lists. One interrupt distribution
1527 * list, sixteen active lists, one bad frames class.
1529 CSR_WRITE_4(sc
, BGE_RXLP_CFG
, 0x181);
1531 /* Inialize RX list placement stats mask. */
1532 CSR_WRITE_4(sc
, BGE_RXLP_STATS_ENABLE_MASK
, 0x007FFFFF);
1533 CSR_WRITE_4(sc
, BGE_RXLP_STATS_CTL
, 0x1);
1535 /* Disable host coalescing until we get it set up */
1536 CSR_WRITE_4(sc
, BGE_HCC_MODE
, 0x00000000);
1538 /* Poll to make sure it's shut down. */
1539 for (i
= 0; i
< BNX_TIMEOUT
; i
++) {
1540 if (!(CSR_READ_4(sc
, BGE_HCC_MODE
) & BGE_HCCMODE_ENABLE
))
1545 if (i
== BNX_TIMEOUT
) {
1546 if_printf(&sc
->arpcom
.ac_if
,
1547 "host coalescing engine failed to idle\n");
1551 /* Set up host coalescing defaults */
1552 sc
->bnx_coal_chg
= BNX_RX_COAL_TICKS_CHG
|
1553 BNX_TX_COAL_TICKS_CHG
|
1554 BNX_RX_COAL_BDS_CHG
|
1555 BNX_TX_COAL_BDS_CHG
|
1556 BNX_RX_COAL_BDS_INT_CHG
|
1557 BNX_TX_COAL_BDS_INT_CHG
;
1558 bnx_coal_change(sc
);
1561 * Set up addresses of status blocks
1563 intr
= &sc
->bnx_intr_data
[0];
1564 bzero(intr
->bnx_status_block
, BGE_STATUS_BLK_SZ
);
1565 CSR_WRITE_4(sc
, BGE_HCC_STATUSBLK_ADDR_HI
,
1566 BGE_ADDR_HI(intr
->bnx_status_block_paddr
));
1567 CSR_WRITE_4(sc
, BGE_HCC_STATUSBLK_ADDR_LO
,
1568 BGE_ADDR_LO(intr
->bnx_status_block_paddr
));
1569 for (i
= 1; i
< sc
->bnx_intr_cnt
; ++i
) {
1570 intr
= &sc
->bnx_intr_data
[i
];
1571 bzero(intr
->bnx_status_block
, BGE_STATUS_BLK_SZ
);
1572 CSR_WRITE_4(sc
, BGE_VEC1_STATUSBLK_ADDR_HI
+ ((i
- 1) * 8),
1573 BGE_ADDR_HI(intr
->bnx_status_block_paddr
));
1574 CSR_WRITE_4(sc
, BGE_VEC1_STATUSBLK_ADDR_LO
+ ((i
- 1) * 8),
1575 BGE_ADDR_LO(intr
->bnx_status_block_paddr
));
1578 /* Set up status block partail update size. */
1579 val
= BGE_STATBLKSZ_32BYTE
;
1582 * Does not seem to have visible effect in both
1583 * bulk data (1472B UDP datagram) and tiny data
1584 * (18B UDP datagram) TX tests.
1586 val
|= BGE_HCCMODE_CLRTICK_TX
;
1588 /* Turn on host coalescing state machine */
1589 CSR_WRITE_4(sc
, BGE_HCC_MODE
, val
| BGE_HCCMODE_ENABLE
);
1591 /* Turn on RX BD completion state machine and enable attentions */
1592 CSR_WRITE_4(sc
, BGE_RBDC_MODE
,
1593 BGE_RBDCMODE_ENABLE
|BGE_RBDCMODE_ATTN
);
1595 /* Turn on RX list placement state machine */
1596 CSR_WRITE_4(sc
, BGE_RXLP_MODE
, BGE_RXLPMODE_ENABLE
);
1598 val
= BGE_MACMODE_TXDMA_ENB
| BGE_MACMODE_RXDMA_ENB
|
1599 BGE_MACMODE_RX_STATS_CLEAR
| BGE_MACMODE_TX_STATS_CLEAR
|
1600 BGE_MACMODE_RX_STATS_ENB
| BGE_MACMODE_TX_STATS_ENB
|
1601 BGE_MACMODE_FRMHDR_DMA_ENB
;
1603 if (sc
->bnx_flags
& BNX_FLAG_TBI
)
1604 val
|= BGE_PORTMODE_TBI
;
1605 else if (sc
->bnx_flags
& BNX_FLAG_MII_SERDES
)
1606 val
|= BGE_PORTMODE_GMII
;
1608 val
|= BGE_PORTMODE_MII
;
1610 /* Allow APE to send/receive frames. */
1611 if (sc
->bnx_mfw_flags
& BNX_MFW_ON_APE
)
1612 val
|= BGE_MACMODE_APE_RX_EN
| BGE_MACMODE_APE_TX_EN
;
1614 /* Turn on DMA, clear stats */
1615 CSR_WRITE_4(sc
, BGE_MAC_MODE
, val
);
1618 /* Set misc. local control, enable interrupts on attentions */
1619 BNX_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_INTR_ONATTN
);
1622 /* Assert GPIO pins for PHY reset */
1623 BNX_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_MISCIO_OUT0
|
1624 BGE_MLC_MISCIO_OUT1
|BGE_MLC_MISCIO_OUT2
);
1625 BNX_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_MISCIO_OUTEN0
|
1626 BGE_MLC_MISCIO_OUTEN1
|BGE_MLC_MISCIO_OUTEN2
);
1629 if (sc
->bnx_intr_type
== PCI_INTR_TYPE_MSIX
)
1630 bnx_enable_msi(sc
, TRUE
);
1632 /* Turn on write DMA state machine */
1633 val
= BGE_WDMAMODE_ENABLE
|BGE_WDMAMODE_ALL_ATTNS
;
1634 /* Enable host coalescing bug fix. */
1635 val
|= BGE_WDMAMODE_STATUS_TAG_FIX
;
1636 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5785
) {
1637 /* Request larger DMA burst size to get better performance. */
1638 val
|= BGE_WDMAMODE_BURST_ALL_DATA
;
1640 CSR_WRITE_4(sc
, BGE_WDMA_MODE
, val
);
1643 if (BNX_IS_57765_PLUS(sc
)) {
1644 uint32_t dmactl
, dmactl_reg
;
1646 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5762
)
1647 dmactl_reg
= BGE_RDMA_RSRVCTRL2
;
1649 dmactl_reg
= BGE_RDMA_RSRVCTRL
;
1651 dmactl
= CSR_READ_4(sc
, dmactl_reg
);
1653 * Adjust tx margin to prevent TX data corruption and
1654 * fix internal FIFO overflow.
1656 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5719
||
1657 sc
->bnx_asicrev
== BGE_ASICREV_BCM5720
||
1658 sc
->bnx_asicrev
== BGE_ASICREV_BCM5762
) {
1659 dmactl
&= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
1660 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK
|
1661 BGE_RDMA_RSRVCTRL_TXMRGN_MASK
);
1662 dmactl
|= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
1663 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K
|
1664 BGE_RDMA_RSRVCTRL_TXMRGN_320B
;
1667 * Enable fix for read DMA FIFO overruns.
1668 * The fix is to limit the number of RX BDs
1669 * the hardware would fetch at a fime.
1671 CSR_WRITE_4(sc
, dmactl_reg
,
1672 dmactl
| BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
1675 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5719
) {
1676 CSR_WRITE_4(sc
, BGE_RDMA_LSO_CRPTEN_CTRL
,
1677 CSR_READ_4(sc
, BGE_RDMA_LSO_CRPTEN_CTRL
) |
1678 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K
|
1679 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K
);
1680 } else if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5720
||
1681 sc
->bnx_asicrev
== BGE_ASICREV_BCM5762
) {
1684 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5762
)
1685 ctrl_reg
= BGE_RDMA_LSO_CRPTEN_CTRL2
;
1687 ctrl_reg
= BGE_RDMA_LSO_CRPTEN_CTRL
;
1690 * Allow 4KB burst length reads for non-LSO frames.
1691 * Enable 512B burst length reads for buffer descriptors.
1693 CSR_WRITE_4(sc
, ctrl_reg
,
1694 CSR_READ_4(sc
, ctrl_reg
) |
1695 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512
|
1696 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K
);
1699 /* Turn on read DMA state machine */
1700 val
= BGE_RDMAMODE_ENABLE
| BGE_RDMAMODE_ALL_ATTNS
;
1701 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5717
)
1702 val
|= BGE_RDMAMODE_MULT_DMA_RD_DIS
;
1703 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5784
||
1704 sc
->bnx_asicrev
== BGE_ASICREV_BCM5785
||
1705 sc
->bnx_asicrev
== BGE_ASICREV_BCM57780
) {
1706 val
|= BGE_RDMAMODE_BD_SBD_CRPT_ATTN
|
1707 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN
|
1708 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN
;
1710 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5720
||
1711 sc
->bnx_asicrev
== BGE_ASICREV_BCM5762
) {
1712 val
|= CSR_READ_4(sc
, BGE_RDMA_MODE
) &
1713 BGE_RDMAMODE_H2BNC_VLAN_DET
;
1715 * Allow multiple outstanding read requests from
1716 * non-LSO read DMA engine.
1718 val
&= ~BGE_RDMAMODE_MULT_DMA_RD_DIS
;
1720 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM57766
)
1721 val
|= BGE_RDMAMODE_JMB_2K_MMRR
;
1722 if (sc
->bnx_flags
& BNX_FLAG_TSO
)
1723 val
|= BGE_RDMAMODE_TSO4_ENABLE
;
1724 val
|= BGE_RDMAMODE_FIFO_LONG_BURST
;
1725 CSR_WRITE_4(sc
, BGE_RDMA_MODE
, val
);
1728 /* Turn on RX data completion state machine */
1729 CSR_WRITE_4(sc
, BGE_RDC_MODE
, BGE_RDCMODE_ENABLE
);
1731 /* Turn on RX BD initiator state machine */
1732 CSR_WRITE_4(sc
, BGE_RBDI_MODE
, BGE_RBDIMODE_ENABLE
);
1734 /* Turn on RX data and RX BD initiator state machine */
1735 CSR_WRITE_4(sc
, BGE_RDBDI_MODE
, BGE_RDBDIMODE_ENABLE
);
1737 /* Turn on send BD completion state machine */
1738 CSR_WRITE_4(sc
, BGE_SBDC_MODE
, BGE_SBDCMODE_ENABLE
);
1740 /* Turn on send data completion state machine */
1741 val
= BGE_SDCMODE_ENABLE
;
1742 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5761
)
1743 val
|= BGE_SDCMODE_CDELAY
;
1744 CSR_WRITE_4(sc
, BGE_SDC_MODE
, val
);
1746 /* Turn on send data initiator state machine */
1747 if (sc
->bnx_flags
& BNX_FLAG_TSO
) {
1748 CSR_WRITE_4(sc
, BGE_SDI_MODE
, BGE_SDIMODE_ENABLE
|
1749 BGE_SDIMODE_HW_LSO_PRE_DMA
);
1751 CSR_WRITE_4(sc
, BGE_SDI_MODE
, BGE_SDIMODE_ENABLE
);
1754 /* Turn on send BD initiator state machine */
1755 val
= BGE_SBDIMODE_ENABLE
;
1756 if (sc
->bnx_tx_ringcnt
> 1)
1757 val
|= BGE_SBDIMODE_MULTI_TXR
;
1758 CSR_WRITE_4(sc
, BGE_SBDI_MODE
, val
);
1760 /* Turn on send BD selector state machine */
1761 CSR_WRITE_4(sc
, BGE_SRS_MODE
, BGE_SRSMODE_ENABLE
);
1763 CSR_WRITE_4(sc
, BGE_SDI_STATS_ENABLE_MASK
, 0x007FFFFF);
1764 CSR_WRITE_4(sc
, BGE_SDI_STATS_CTL
,
1765 BGE_SDISTATSCTL_ENABLE
|BGE_SDISTATSCTL_FASTER
);
1767 /* ack/clear link change events */
1768 CSR_WRITE_4(sc
, BGE_MAC_STS
, BGE_MACSTAT_SYNC_CHANGED
|
1769 BGE_MACSTAT_CFG_CHANGED
|BGE_MACSTAT_MI_COMPLETE
|
1770 BGE_MACSTAT_LINK_CHANGED
);
1771 CSR_WRITE_4(sc
, BGE_MI_STS
, 0);
1774 * Enable attention when the link has changed state for
1775 * devices that use auto polling.
1777 if (sc
->bnx_flags
& BNX_FLAG_TBI
) {
1778 CSR_WRITE_4(sc
, BGE_MI_STS
, BGE_MISTS_LINK
);
1780 if (sc
->bnx_mi_mode
& BGE_MIMODE_AUTOPOLL
) {
1781 CSR_WRITE_4(sc
, BGE_MI_MODE
, sc
->bnx_mi_mode
);
1787 * Clear any pending link state attention.
1788 * Otherwise some link state change events may be lost until attention
1789 * is cleared by bnx_intr() -> bnx_softc.bnx_link_upd() sequence.
1790 * It's not necessary on newer BCM chips - perhaps enabling link
1791 * state change attentions implies clearing pending attention.
1793 CSR_WRITE_4(sc
, BGE_MAC_STS
, BGE_MACSTAT_SYNC_CHANGED
|
1794 BGE_MACSTAT_CFG_CHANGED
|BGE_MACSTAT_MI_COMPLETE
|
1795 BGE_MACSTAT_LINK_CHANGED
);
1797 /* Enable link state change attentions. */
1798 BNX_SETBIT(sc
, BGE_MAC_EVT_ENB
, BGE_EVTENB_LINK_CHANGED
);
1804 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1805 * against our list and return its name if we find a match. Note
1806 * that since the Broadcom controller contains VPD support, we
1807 * can get the device name string from the controller itself instead
1808 * of the compiled-in string. This is a little slow, but it guarantees
1809 * we'll always announce the right product name.
1812 bnx_probe(device_t dev
)
1814 const struct bnx_type
*t
;
1815 uint16_t product
, vendor
;
1817 if (!pci_is_pcie(dev
))
1820 product
= pci_get_device(dev
);
1821 vendor
= pci_get_vendor(dev
);
1823 for (t
= bnx_devs
; t
->bnx_name
!= NULL
; t
++) {
1824 if (vendor
== t
->bnx_vid
&& product
== t
->bnx_did
)
1827 if (t
->bnx_name
== NULL
)
1830 device_set_desc(dev
, t
->bnx_name
);
1835 bnx_attach(device_t dev
)
1838 struct bnx_softc
*sc
;
1839 struct bnx_rx_std_ring
*std
;
1840 struct sysctl_ctx_list
*ctx
;
1841 struct sysctl_oid_list
*tree
;
1843 int error
= 0, rid
, capmask
, i
, std_cpuid
, std_cpuid_def
;
1844 uint8_t ether_addr
[ETHER_ADDR_LEN
];
1846 uintptr_t mii_priv
= 0;
1847 #if defined(BNX_TSO_DEBUG) || defined(BNX_RSS_DEBUG) || defined(BNX_TSS_DEBUG)
1850 #ifdef IFPOLL_ENABLE
1851 int offset
, offset_def
;
1854 sc
= device_get_softc(dev
);
1856 callout_init_mp(&sc
->bnx_tick_timer
);
1857 lwkt_serialize_init(&sc
->bnx_jslot_serializer
);
1858 lwkt_serialize_init(&sc
->bnx_main_serialize
);
1860 /* Always setup interrupt mailboxes */
1861 for (i
= 0; i
< BNX_INTR_MAX
; ++i
) {
1862 callout_init_mp(&sc
->bnx_intr_data
[i
].bnx_intr_timer
);
1863 sc
->bnx_intr_data
[i
].bnx_sc
= sc
;
1864 sc
->bnx_intr_data
[i
].bnx_intr_mbx
= BGE_MBX_IRQ0_LO
+ (i
* 8);
1865 sc
->bnx_intr_data
[i
].bnx_intr_rid
= -1;
1866 sc
->bnx_intr_data
[i
].bnx_intr_cpuid
= -1;
1869 sc
->bnx_func_addr
= pci_get_function(dev
);
1870 product
= pci_get_device(dev
);
1872 #ifndef BURN_BRIDGES
1873 if (pci_get_powerstate(dev
) != PCI_POWERSTATE_D0
) {
1876 irq
= pci_read_config(dev
, PCIR_INTLINE
, 4);
1877 mem
= pci_read_config(dev
, BGE_PCI_BAR0
, 4);
1879 device_printf(dev
, "chip is in D%d power mode "
1880 "-- setting to D0\n", pci_get_powerstate(dev
));
1882 pci_set_powerstate(dev
, PCI_POWERSTATE_D0
);
1884 pci_write_config(dev
, PCIR_INTLINE
, irq
, 4);
1885 pci_write_config(dev
, BGE_PCI_BAR0
, mem
, 4);
1887 #endif /* !BURN_BRIDGE */
1890 * Map control/status registers.
1892 pci_enable_busmaster(dev
);
1895 sc
->bnx_res
= bus_alloc_resource_any(dev
, SYS_RES_MEMORY
, &rid
,
1898 if (sc
->bnx_res
== NULL
) {
1899 device_printf(dev
, "couldn't map memory\n");
1903 sc
->bnx_btag
= rman_get_bustag(sc
->bnx_res
);
1904 sc
->bnx_bhandle
= rman_get_bushandle(sc
->bnx_res
);
1906 /* Save various chip information */
1908 pci_read_config(dev
, BGE_PCI_MISC_CTL
, 4) >>
1909 BGE_PCIMISCCTL_ASICREV_SHIFT
;
1910 if (BGE_ASICREV(sc
->bnx_chipid
) == BGE_ASICREV_USE_PRODID_REG
) {
1911 /* All chips having dedicated ASICREV register have CPMU */
1912 sc
->bnx_flags
|= BNX_FLAG_CPMU
;
1915 case PCI_PRODUCT_BROADCOM_BCM5717
:
1916 case PCI_PRODUCT_BROADCOM_BCM5717C
:
1917 case PCI_PRODUCT_BROADCOM_BCM5718
:
1918 case PCI_PRODUCT_BROADCOM_BCM5719
:
1919 case PCI_PRODUCT_BROADCOM_BCM5720_ALT
:
1920 case PCI_PRODUCT_BROADCOM_BCM5725
:
1921 case PCI_PRODUCT_BROADCOM_BCM5727
:
1922 case PCI_PRODUCT_BROADCOM_BCM5762
:
1923 sc
->bnx_chipid
= pci_read_config(dev
,
1924 BGE_PCI_GEN2_PRODID_ASICREV
, 4);
1927 case PCI_PRODUCT_BROADCOM_BCM57761
:
1928 case PCI_PRODUCT_BROADCOM_BCM57762
:
1929 case PCI_PRODUCT_BROADCOM_BCM57765
:
1930 case PCI_PRODUCT_BROADCOM_BCM57766
:
1931 case PCI_PRODUCT_BROADCOM_BCM57781
:
1932 case PCI_PRODUCT_BROADCOM_BCM57782
:
1933 case PCI_PRODUCT_BROADCOM_BCM57785
:
1934 case PCI_PRODUCT_BROADCOM_BCM57786
:
1935 case PCI_PRODUCT_BROADCOM_BCM57791
:
1936 case PCI_PRODUCT_BROADCOM_BCM57795
:
1937 sc
->bnx_chipid
= pci_read_config(dev
,
1938 BGE_PCI_GEN15_PRODID_ASICREV
, 4);
1942 sc
->bnx_chipid
= pci_read_config(dev
,
1943 BGE_PCI_PRODID_ASICREV
, 4);
1947 if (sc
->bnx_chipid
== BGE_CHIPID_BCM5717_C0
)
1948 sc
->bnx_chipid
= BGE_CHIPID_BCM5720_A0
;
1950 sc
->bnx_asicrev
= BGE_ASICREV(sc
->bnx_chipid
);
1951 sc
->bnx_chiprev
= BGE_CHIPREV(sc
->bnx_chipid
);
1953 switch (sc
->bnx_asicrev
) {
1954 case BGE_ASICREV_BCM5717
:
1955 case BGE_ASICREV_BCM5719
:
1956 case BGE_ASICREV_BCM5720
:
1957 sc
->bnx_flags
|= BNX_FLAG_5717_PLUS
| BNX_FLAG_57765_PLUS
;
1960 case BGE_ASICREV_BCM5762
:
1961 sc
->bnx_flags
|= BNX_FLAG_57765_PLUS
;
1964 case BGE_ASICREV_BCM57765
:
1965 case BGE_ASICREV_BCM57766
:
1966 sc
->bnx_flags
|= BNX_FLAG_57765_FAMILY
| BNX_FLAG_57765_PLUS
;
1970 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5717
||
1971 sc
->bnx_asicrev
== BGE_ASICREV_BCM5719
||
1972 sc
->bnx_asicrev
== BGE_ASICREV_BCM5720
||
1973 sc
->bnx_asicrev
== BGE_ASICREV_BCM5762
)
1974 sc
->bnx_flags
|= BNX_FLAG_APE
;
1976 sc
->bnx_flags
|= BNX_FLAG_TSO
;
1977 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5719
&&
1978 sc
->bnx_chipid
== BGE_CHIPID_BCM5719_A0
)
1979 sc
->bnx_flags
&= ~BNX_FLAG_TSO
;
1981 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5717
||
1982 BNX_IS_57765_FAMILY(sc
)) {
1984 * All BCM57785 and BCM5718 families chips have a bug that
1985 * under certain situation interrupt will not be enabled
1986 * even if status tag is written to interrupt mailbox.
1988 * While BCM5719 and BCM5720 have a hardware workaround
1989 * which could fix the above bug.
1990 * See the comment near BGE_PCIDMARWCTL_TAGGED_STATUS_WA in
1993 * For the rest of the chips in these two families, we will
1994 * have to poll the status block at high rate (10ms currently)
1995 * to check whether the interrupt is hosed or not.
1996 * See bnx_check_intr_*() for details.
1998 sc
->bnx_flags
|= BNX_FLAG_STATUSTAG_BUG
;
2001 sc
->bnx_pciecap
= pci_get_pciecap_ptr(sc
->bnx_dev
);
2002 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5719
||
2003 sc
->bnx_asicrev
== BGE_ASICREV_BCM5720
)
2004 pcie_set_max_readrq(dev
, PCIEM_DEVCTL_MAX_READRQ_2048
);
2006 pcie_set_max_readrq(dev
, PCIEM_DEVCTL_MAX_READRQ_4096
);
2007 device_printf(dev
, "CHIP ID 0x%08x; "
2008 "ASIC REV 0x%02x; CHIP REV 0x%02x\n",
2009 sc
->bnx_chipid
, sc
->bnx_asicrev
, sc
->bnx_chiprev
);
2012 * Set various PHY quirk flags.
2015 capmask
= MII_CAPMASK_DEFAULT
;
2016 if (product
== PCI_PRODUCT_BROADCOM_BCM57791
||
2017 product
== PCI_PRODUCT_BROADCOM_BCM57795
) {
2019 capmask
&= ~BMSR_EXTSTAT
;
2022 mii_priv
|= BRGPHY_FLAG_WIRESPEED
;
2023 if (sc
->bnx_chipid
== BGE_CHIPID_BCM5762_A0
)
2024 mii_priv
|= BRGPHY_FLAG_5762_A0
;
2027 * Chips with APE need BAR2 access for APE registers/memory.
2029 if (sc
->bnx_flags
& BNX_FLAG_APE
) {
2033 sc
->bnx_res2
= bus_alloc_resource_any(dev
, SYS_RES_MEMORY
, &rid
,
2035 if (sc
->bnx_res2
== NULL
) {
2036 device_printf(dev
, "couldn't map BAR2 memory\n");
2041 /* Enable APE register/memory access by host driver. */
2042 pcistate
= pci_read_config(dev
, BGE_PCI_PCISTATE
, 4);
2043 pcistate
|= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR
|
2044 BGE_PCISTATE_ALLOW_APE_SHMEM_WR
|
2045 BGE_PCISTATE_ALLOW_APE_PSPACE_WR
;
2046 pci_write_config(dev
, BGE_PCI_PCISTATE
, pcistate
, 4);
2048 bnx_ape_lock_init(sc
);
2049 bnx_ape_read_fw_ver(sc
);
2052 /* Initialize if_name earlier, so if_printf could be used */
2053 ifp
= &sc
->arpcom
.ac_if
;
2054 if_initname(ifp
, device_get_name(dev
), device_get_unit(dev
));
2057 * Try to reset the chip.
2059 bnx_sig_pre_reset(sc
, BNX_RESET_SHUTDOWN
);
2061 bnx_sig_post_reset(sc
, BNX_RESET_SHUTDOWN
);
2063 if (bnx_chipinit(sc
)) {
2064 device_printf(dev
, "chip initialization failed\n");
2070 * Get station address
2072 error
= bnx_get_eaddr(sc
, ether_addr
);
2074 device_printf(dev
, "failed to read station address\n");
2078 /* Setup RX/TX and interrupt count */
2079 bnx_setup_ring_cnt(sc
);
2081 if ((sc
->bnx_rx_retcnt
== 1 && sc
->bnx_tx_ringcnt
== 1) ||
2082 (sc
->bnx_rx_retcnt
> 1 && sc
->bnx_tx_ringcnt
> 1)) {
2084 * The RX ring and the corresponding TX ring processing
2085 * should be on the same CPU, since they share the same
2088 sc
->bnx_flags
|= BNX_FLAG_RXTX_BUNDLE
;
2090 device_printf(dev
, "RX/TX bundle\n");
2091 if (sc
->bnx_tx_ringcnt
> 1) {
2093 * Multiple TX rings do not share status block
2094 * with link status, so link status will have
2095 * to save its own status_tag.
2097 sc
->bnx_flags
|= BNX_FLAG_STATUS_HASTAG
;
2099 device_printf(dev
, "status needs tag\n");
2102 KKASSERT(sc
->bnx_rx_retcnt
> 1 && sc
->bnx_tx_ringcnt
== 1);
2104 device_printf(dev
, "RX/TX not bundled\n");
2107 error
= bnx_dma_alloc(dev
);
2111 #ifdef IFPOLL_ENABLE
2112 if (sc
->bnx_flags
& BNX_FLAG_RXTX_BUNDLE
) {
2114 * NPOLLING RX/TX CPU offset
2116 if (sc
->bnx_rx_retcnt
== ncpus2
) {
2120 (sc
->bnx_rx_retcnt
* device_get_unit(dev
)) % ncpus2
;
2121 offset
= device_getenv_int(dev
, "npoll.offset",
2123 if (offset
>= ncpus2
||
2124 offset
% sc
->bnx_rx_retcnt
!= 0) {
2125 device_printf(dev
, "invalid npoll.offset %d, "
2126 "use %d\n", offset
, offset_def
);
2127 offset
= offset_def
;
2130 sc
->bnx_npoll_rxoff
= offset
;
2131 sc
->bnx_npoll_txoff
= offset
;
2134 * NPOLLING RX CPU offset
2136 if (sc
->bnx_rx_retcnt
== ncpus2
) {
2140 (sc
->bnx_rx_retcnt
* device_get_unit(dev
)) % ncpus2
;
2141 offset
= device_getenv_int(dev
, "npoll.rxoff",
2143 if (offset
>= ncpus2
||
2144 offset
% sc
->bnx_rx_retcnt
!= 0) {
2145 device_printf(dev
, "invalid npoll.rxoff %d, "
2146 "use %d\n", offset
, offset_def
);
2147 offset
= offset_def
;
2150 sc
->bnx_npoll_rxoff
= offset
;
2153 * NPOLLING TX CPU offset
2155 offset_def
= device_get_unit(dev
) % ncpus2
;
2156 offset
= device_getenv_int(dev
, "npoll.txoff", offset_def
);
2157 if (offset
>= ncpus2
) {
2158 device_printf(dev
, "invalid npoll.txoff %d, use %d\n",
2159 offset
, offset_def
);
2160 offset
= offset_def
;
2162 sc
->bnx_npoll_txoff
= offset
;
2164 #endif /* IFPOLL_ENABLE */
2167 * Allocate interrupt
2169 error
= bnx_alloc_intr(sc
);
2173 /* Setup serializers */
2174 bnx_setup_serialize(sc
);
2176 /* Set default tuneable values. */
2177 sc
->bnx_rx_coal_ticks
= BNX_RX_COAL_TICKS_DEF
;
2178 sc
->bnx_tx_coal_ticks
= BNX_TX_COAL_TICKS_DEF
;
2179 sc
->bnx_rx_coal_bds
= BNX_RX_COAL_BDS_DEF
;
2180 sc
->bnx_rx_coal_bds_poll
= sc
->bnx_rx_ret_ring
[0].bnx_rx_cntmax
;
2181 sc
->bnx_tx_coal_bds
= BNX_TX_COAL_BDS_DEF
;
2182 sc
->bnx_tx_coal_bds_poll
= BNX_TX_COAL_BDS_POLL_DEF
;
2183 sc
->bnx_rx_coal_bds_int
= BNX_RX_COAL_BDS_INT_DEF
;
2184 sc
->bnx_tx_coal_bds_int
= BNX_TX_COAL_BDS_INT_DEF
;
2186 /* Set up ifnet structure */
2188 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
2189 ifp
->if_ioctl
= bnx_ioctl
;
2190 ifp
->if_start
= bnx_start
;
2191 #ifdef IFPOLL_ENABLE
2192 ifp
->if_npoll
= bnx_npoll
;
2194 ifp
->if_init
= bnx_init
;
2195 ifp
->if_serialize
= bnx_serialize
;
2196 ifp
->if_deserialize
= bnx_deserialize
;
2197 ifp
->if_tryserialize
= bnx_tryserialize
;
2199 ifp
->if_serialize_assert
= bnx_serialize_assert
;
2201 ifp
->if_mtu
= ETHERMTU
;
2202 ifp
->if_capabilities
= IFCAP_VLAN_HWTAGGING
| IFCAP_VLAN_MTU
;
2204 ifp
->if_capabilities
|= IFCAP_HWCSUM
;
2205 ifp
->if_hwassist
= BNX_CSUM_FEATURES
;
2206 if (sc
->bnx_flags
& BNX_FLAG_TSO
) {
2207 ifp
->if_capabilities
|= IFCAP_TSO
;
2208 ifp
->if_hwassist
|= CSUM_TSO
;
2210 if (BNX_RSS_ENABLED(sc
))
2211 ifp
->if_capabilities
|= IFCAP_RSS
;
2212 ifp
->if_capenable
= ifp
->if_capabilities
;
2214 ifp
->if_nmbclusters
= BGE_STD_RX_RING_CNT
;
2216 ifq_set_maxlen(&ifp
->if_snd
, BGE_TX_RING_CNT
- 1);
2217 ifq_set_ready(&ifp
->if_snd
);
2218 ifq_set_subq_cnt(&ifp
->if_snd
, sc
->bnx_tx_ringcnt
);
2220 if (sc
->bnx_tx_ringcnt
> 1) {
2221 ifp
->if_mapsubq
= ifq_mapsubq_mask
;
2222 ifq_set_subq_mask(&ifp
->if_snd
, sc
->bnx_tx_ringcnt
- 1);
2226 * Figure out what sort of media we have by checking the
2227 * hardware config word in the first 32k of NIC internal memory,
2228 * or fall back to examining the EEPROM if necessary.
2229 * Note: on some BCM5700 cards, this value appears to be unset.
2230 * If that's the case, we have to rely on identifying the NIC
2231 * by its PCI subsystem ID, as we do below for the SysKonnect
2234 if (bnx_readmem_ind(sc
, BGE_SRAM_DATA_SIG
) == BGE_SRAM_DATA_SIG_MAGIC
) {
2235 hwcfg
= bnx_readmem_ind(sc
, BGE_SRAM_DATA_CFG
);
2237 if (bnx_read_eeprom(sc
, (caddr_t
)&hwcfg
, BGE_EE_HWCFG_OFFSET
,
2239 device_printf(dev
, "failed to read EEPROM\n");
2243 hwcfg
= ntohl(hwcfg
);
2246 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2247 if (pci_get_subvendor(dev
) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41
||
2248 (hwcfg
& BGE_HWCFG_MEDIA
) == BGE_MEDIA_FIBER
)
2249 sc
->bnx_flags
|= BNX_FLAG_TBI
;
2252 if (sc
->bnx_flags
& BNX_FLAG_CPMU
)
2253 sc
->bnx_mi_mode
= BGE_MIMODE_500KHZ_CONST
;
2255 sc
->bnx_mi_mode
= BGE_MIMODE_BASE
;
2257 /* Setup link status update stuffs */
2258 if (sc
->bnx_flags
& BNX_FLAG_TBI
) {
2259 sc
->bnx_link_upd
= bnx_tbi_link_upd
;
2260 sc
->bnx_link_chg
= BGE_MACSTAT_LINK_CHANGED
;
2261 } else if (sc
->bnx_mi_mode
& BGE_MIMODE_AUTOPOLL
) {
2262 sc
->bnx_link_upd
= bnx_autopoll_link_upd
;
2263 sc
->bnx_link_chg
= BGE_MACSTAT_LINK_CHANGED
;
2265 sc
->bnx_link_upd
= bnx_copper_link_upd
;
2266 sc
->bnx_link_chg
= BGE_MACSTAT_LINK_CHANGED
;
2269 /* Set default PHY address */
2273 * PHY address mapping for various devices.
2275 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2276 * ---------+-------+-------+-------+-------+
2277 * BCM57XX | 1 | X | X | X |
2278 * BCM5717 | 1 | 8 | 2 | 9 |
2279 * BCM5719 | 1 | 8 | 2 | 9 |
2280 * BCM5720 | 1 | 8 | 2 | 9 |
2282 * | F2 Cu | F2 Sr | F3 Cu | F3 Sr |
2283 * ---------+-------+-------+-------+-------+
2284 * BCM57XX | X | X | X | X |
2285 * BCM5717 | X | X | X | X |
2286 * BCM5719 | 3 | 10 | 4 | 11 |
2287 * BCM5720 | X | X | X | X |
2289 * Other addresses may respond but they are not
2290 * IEEE compliant PHYs and should be ignored.
2292 if (BNX_IS_5717_PLUS(sc
)) {
2293 if (sc
->bnx_chipid
== BGE_CHIPID_BCM5717_A0
) {
2294 if (CSR_READ_4(sc
, BGE_SGDIG_STS
) &
2295 BGE_SGDIGSTS_IS_SERDES
)
2296 sc
->bnx_phyno
= sc
->bnx_func_addr
+ 8;
2298 sc
->bnx_phyno
= sc
->bnx_func_addr
+ 1;
2300 if (CSR_READ_4(sc
, BGE_CPMU_PHY_STRAP
) &
2301 BGE_CPMU_PHY_STRAP_IS_SERDES
)
2302 sc
->bnx_phyno
= sc
->bnx_func_addr
+ 8;
2304 sc
->bnx_phyno
= sc
->bnx_func_addr
+ 1;
2308 if (sc
->bnx_flags
& BNX_FLAG_TBI
) {
2309 ifmedia_init(&sc
->bnx_ifmedia
, IFM_IMASK
,
2310 bnx_ifmedia_upd
, bnx_ifmedia_sts
);
2311 ifmedia_add(&sc
->bnx_ifmedia
, IFM_ETHER
|IFM_1000_SX
, 0, NULL
);
2312 ifmedia_add(&sc
->bnx_ifmedia
,
2313 IFM_ETHER
|IFM_1000_SX
|IFM_FDX
, 0, NULL
);
2314 ifmedia_add(&sc
->bnx_ifmedia
, IFM_ETHER
|IFM_AUTO
, 0, NULL
);
2315 ifmedia_set(&sc
->bnx_ifmedia
, IFM_ETHER
|IFM_AUTO
);
2316 sc
->bnx_ifmedia
.ifm_media
= sc
->bnx_ifmedia
.ifm_cur
->ifm_media
;
2318 struct mii_probe_args mii_args
;
2320 mii_probe_args_init(&mii_args
, bnx_ifmedia_upd
, bnx_ifmedia_sts
);
2321 mii_args
.mii_probemask
= 1 << sc
->bnx_phyno
;
2322 mii_args
.mii_capmask
= capmask
;
2323 mii_args
.mii_privtag
= MII_PRIVTAG_BRGPHY
;
2324 mii_args
.mii_priv
= mii_priv
;
2326 error
= mii_probe(dev
, &sc
->bnx_miibus
, &mii_args
);
2328 device_printf(dev
, "MII without any PHY!\n");
2333 ctx
= device_get_sysctl_ctx(sc
->bnx_dev
);
2334 tree
= SYSCTL_CHILDREN(device_get_sysctl_tree(sc
->bnx_dev
));
2336 SYSCTL_ADD_INT(ctx
, tree
, OID_AUTO
,
2337 "rx_rings", CTLFLAG_RD
, &sc
->bnx_rx_retcnt
, 0, "# of RX rings");
2338 SYSCTL_ADD_INT(ctx
, tree
, OID_AUTO
,
2339 "tx_rings", CTLFLAG_RD
, &sc
->bnx_tx_ringcnt
, 0, "# of TX rings");
2341 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
, "rx_coal_ticks",
2342 CTLTYPE_INT
| CTLFLAG_RW
,
2343 sc
, 0, bnx_sysctl_rx_coal_ticks
, "I",
2344 "Receive coalescing ticks (usec).");
2345 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
, "tx_coal_ticks",
2346 CTLTYPE_INT
| CTLFLAG_RW
,
2347 sc
, 0, bnx_sysctl_tx_coal_ticks
, "I",
2348 "Transmit coalescing ticks (usec).");
2349 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
, "rx_coal_bds",
2350 CTLTYPE_INT
| CTLFLAG_RW
,
2351 sc
, 0, bnx_sysctl_rx_coal_bds
, "I",
2352 "Receive max coalesced BD count.");
2353 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
, "rx_coal_bds_poll",
2354 CTLTYPE_INT
| CTLFLAG_RW
,
2355 sc
, 0, bnx_sysctl_rx_coal_bds_poll
, "I",
2356 "Receive max coalesced BD count in polling.");
2357 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
, "tx_coal_bds",
2358 CTLTYPE_INT
| CTLFLAG_RW
,
2359 sc
, 0, bnx_sysctl_tx_coal_bds
, "I",
2360 "Transmit max coalesced BD count.");
2361 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
, "tx_coal_bds_poll",
2362 CTLTYPE_INT
| CTLFLAG_RW
,
2363 sc
, 0, bnx_sysctl_tx_coal_bds_poll
, "I",
2364 "Transmit max coalesced BD count in polling.");
2366 * A common design characteristic for many Broadcom
2367 * client controllers is that they only support a
2368 * single outstanding DMA read operation on the PCIe
2369 * bus. This means that it will take twice as long to
2370 * fetch a TX frame that is split into header and
2371 * payload buffers as it does to fetch a single,
2372 * contiguous TX frame (2 reads vs. 1 read). For these
2373 * controllers, coalescing buffers to reduce the number
2374 * of memory reads is effective way to get maximum
2375 * performance(about 940Mbps). Without collapsing TX
2376 * buffers the maximum TCP bulk transfer performance
2377 * is about 850Mbps. However forcing coalescing mbufs
2378 * consumes a lot of CPU cycles, so leave it off by
2381 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
,
2382 "force_defrag", CTLTYPE_INT
| CTLFLAG_RW
,
2383 sc
, 0, bnx_sysctl_force_defrag
, "I",
2384 "Force defragment on TX path");
2386 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
,
2387 "tx_wreg", CTLTYPE_INT
| CTLFLAG_RW
,
2388 sc
, 0, bnx_sysctl_tx_wreg
, "I",
2389 "# of segments before writing to hardware register");
2391 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
,
2392 "std_refill", CTLTYPE_INT
| CTLFLAG_RW
,
2393 sc
, 0, bnx_sysctl_std_refill
, "I",
2394 "# of packets received before scheduling standard refilling");
2396 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
,
2397 "rx_coal_bds_int", CTLTYPE_INT
| CTLFLAG_RW
,
2398 sc
, 0, bnx_sysctl_rx_coal_bds_int
, "I",
2399 "Receive max coalesced BD count during interrupt.");
2400 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
,
2401 "tx_coal_bds_int", CTLTYPE_INT
| CTLFLAG_RW
,
2402 sc
, 0, bnx_sysctl_tx_coal_bds_int
, "I",
2403 "Transmit max coalesced BD count during interrupt.");
2405 #ifdef IFPOLL_ENABLE
2406 if (sc
->bnx_flags
& BNX_FLAG_RXTX_BUNDLE
) {
2407 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
,
2408 "npoll_offset", CTLTYPE_INT
| CTLFLAG_RW
,
2409 sc
, 0, bnx_sysctl_npoll_offset
, "I",
2410 "NPOLLING cpu offset");
2412 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
,
2413 "npoll_rxoff", CTLTYPE_INT
| CTLFLAG_RW
,
2414 sc
, 0, bnx_sysctl_npoll_rxoff
, "I",
2415 "NPOLLING RX cpu offset");
2416 SYSCTL_ADD_PROC(ctx
, tree
, OID_AUTO
,
2417 "npoll_txoff", CTLTYPE_INT
| CTLFLAG_RW
,
2418 sc
, 0, bnx_sysctl_npoll_txoff
, "I",
2419 "NPOLLING TX cpu offset");
2423 #ifdef BNX_RSS_DEBUG
2424 SYSCTL_ADD_INT(ctx
, tree
, OID_AUTO
,
2425 "std_refill_mask", CTLFLAG_RD
,
2426 &sc
->bnx_rx_std_ring
.bnx_rx_std_refill
, 0, "");
2427 SYSCTL_ADD_INT(ctx
, tree
, OID_AUTO
,
2428 "std_used", CTLFLAG_RD
,
2429 &sc
->bnx_rx_std_ring
.bnx_rx_std_used
, 0, "");
2430 SYSCTL_ADD_INT(ctx
, tree
, OID_AUTO
,
2431 "rss_debug", CTLFLAG_RW
, &sc
->bnx_rss_debug
, 0, "");
2432 for (i
= 0; i
< sc
->bnx_rx_retcnt
; ++i
) {
2433 ksnprintf(desc
, sizeof(desc
), "rx_pkt%d", i
);
2434 SYSCTL_ADD_ULONG(ctx
, tree
, OID_AUTO
,
2435 desc
, CTLFLAG_RW
, &sc
->bnx_rx_ret_ring
[i
].bnx_rx_pkt
, "");
2437 ksnprintf(desc
, sizeof(desc
), "rx_force_sched%d", i
);
2438 SYSCTL_ADD_ULONG(ctx
, tree
, OID_AUTO
,
2440 &sc
->bnx_rx_ret_ring
[i
].bnx_rx_force_sched
, "");
2443 #ifdef BNX_TSS_DEBUG
2444 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
2445 ksnprintf(desc
, sizeof(desc
), "tx_pkt%d", i
);
2446 SYSCTL_ADD_ULONG(ctx
, tree
, OID_AUTO
,
2447 desc
, CTLFLAG_RW
, &sc
->bnx_tx_ring
[i
].bnx_tx_pkt
, "");
2451 SYSCTL_ADD_ULONG(ctx
, tree
, OID_AUTO
,
2452 "norxbds", CTLFLAG_RW
, &sc
->bnx_norxbds
, "");
2454 SYSCTL_ADD_ULONG(ctx
, tree
, OID_AUTO
,
2455 "errors", CTLFLAG_RW
, &sc
->bnx_errors
, "");
2457 #ifdef BNX_TSO_DEBUG
2458 for (i
= 0; i
< BNX_TSO_NSTATS
; ++i
) {
2459 ksnprintf(desc
, sizeof(desc
), "tso%d", i
+ 1);
2460 SYSCTL_ADD_ULONG(ctx
, tree
, OID_AUTO
,
2461 desc
, CTLFLAG_RW
, &sc
->bnx_tsosegs
[i
], "");
2466 * Call MI attach routine.
2468 ether_ifattach(ifp
, ether_addr
, NULL
);
2470 /* Setup TX rings and subqueues */
2471 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
2472 struct ifaltq_subque
*ifsq
= ifq_get_subq(&ifp
->if_snd
, i
);
2473 struct bnx_tx_ring
*txr
= &sc
->bnx_tx_ring
[i
];
2475 ifsq_set_cpuid(ifsq
, txr
->bnx_tx_cpuid
);
2476 ifsq_set_hw_serialize(ifsq
, &txr
->bnx_tx_serialize
);
2477 ifsq_set_priv(ifsq
, txr
);
2478 txr
->bnx_ifsq
= ifsq
;
2480 ifsq_watchdog_init(&txr
->bnx_tx_watchdog
, ifsq
, bnx_watchdog
);
2483 device_printf(dev
, "txr %d -> cpu%d\n", i
,
2488 error
= bnx_setup_intr(sc
);
2490 ether_ifdetach(ifp
);
2493 bnx_set_tick_cpuid(sc
, FALSE
);
2496 * Create RX standard ring refilling thread
2498 std_cpuid_def
= device_get_unit(dev
) % ncpus
;
2499 std_cpuid
= device_getenv_int(dev
, "std.cpuid", std_cpuid_def
);
2500 if (std_cpuid
< 0 || std_cpuid
>= ncpus
) {
2501 device_printf(dev
, "invalid std.cpuid %d, use %d\n",
2502 std_cpuid
, std_cpuid_def
);
2503 std_cpuid
= std_cpuid_def
;
2506 std
= &sc
->bnx_rx_std_ring
;
2507 lwkt_create(bnx_rx_std_refill_ithread
, std
, NULL
,
2508 &std
->bnx_rx_std_ithread
, TDF_NOSTART
| TDF_INTTHREAD
, std_cpuid
,
2509 "%s std", device_get_nameunit(dev
));
2510 lwkt_setpri(&std
->bnx_rx_std_ithread
, TDPRI_INT_MED
);
2511 std
->bnx_rx_std_ithread
.td_preemptable
= lwkt_preempt
;
2512 sc
->bnx_flags
|= BNX_FLAG_STD_THREAD
;
2521 bnx_detach(device_t dev
)
2523 struct bnx_softc
*sc
= device_get_softc(dev
);
2525 if (device_is_attached(dev
)) {
2526 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2528 ifnet_serialize_all(ifp
);
2530 bnx_teardown_intr(sc
, sc
->bnx_intr_cnt
);
2531 ifnet_deserialize_all(ifp
);
2533 ether_ifdetach(ifp
);
2536 if (sc
->bnx_flags
& BNX_FLAG_STD_THREAD
) {
2537 struct bnx_rx_std_ring
*std
= &sc
->bnx_rx_std_ring
;
2539 tsleep_interlock(std
, 0);
2541 if (std
->bnx_rx_std_ithread
.td_gd
== mycpu
) {
2542 bnx_rx_std_refill_stop(std
);
2544 lwkt_send_ipiq(std
->bnx_rx_std_ithread
.td_gd
,
2545 bnx_rx_std_refill_stop
, std
);
2548 tsleep(std
, PINTERLOCKED
, "bnx_detach", 0);
2550 device_printf(dev
, "RX std ithread exited\n");
2552 lwkt_synchronize_ipiqs("bnx_detach_ipiq");
2555 if (sc
->bnx_flags
& BNX_FLAG_TBI
)
2556 ifmedia_removeall(&sc
->bnx_ifmedia
);
2558 device_delete_child(dev
, sc
->bnx_miibus
);
2559 bus_generic_detach(dev
);
2563 if (sc
->bnx_msix_mem_res
!= NULL
) {
2564 bus_release_resource(dev
, SYS_RES_MEMORY
, sc
->bnx_msix_mem_rid
,
2565 sc
->bnx_msix_mem_res
);
2567 if (sc
->bnx_res
!= NULL
) {
2568 bus_release_resource(dev
, SYS_RES_MEMORY
,
2569 BGE_PCI_BAR0
, sc
->bnx_res
);
2571 if (sc
->bnx_res2
!= NULL
) {
2572 bus_release_resource(dev
, SYS_RES_MEMORY
,
2573 PCIR_BAR(2), sc
->bnx_res2
);
2578 if (sc
->bnx_serialize
!= NULL
)
2579 kfree(sc
->bnx_serialize
, M_DEVBUF
);
2585 bnx_reset(struct bnx_softc
*sc
)
2587 device_t dev
= sc
->bnx_dev
;
2588 uint32_t cachesize
, command
, reset
, mac_mode
, mac_mode_mask
;
2589 void (*write_op
)(struct bnx_softc
*, uint32_t, uint32_t);
2593 mac_mode_mask
= BGE_MACMODE_HALF_DUPLEX
| BGE_MACMODE_PORTMODE
;
2594 if (sc
->bnx_mfw_flags
& BNX_MFW_ON_APE
)
2595 mac_mode_mask
|= BGE_MACMODE_APE_RX_EN
| BGE_MACMODE_APE_TX_EN
;
2596 mac_mode
= CSR_READ_4(sc
, BGE_MAC_MODE
) & mac_mode_mask
;
2598 write_op
= bnx_writemem_direct
;
2600 CSR_WRITE_4(sc
, BGE_NVRAM_SWARB
, BGE_NVRAMSWARB_SET1
);
2601 for (i
= 0; i
< 8000; i
++) {
2602 if (CSR_READ_4(sc
, BGE_NVRAM_SWARB
) & BGE_NVRAMSWARB_GNT1
)
2607 if_printf(&sc
->arpcom
.ac_if
, "NVRAM lock timedout!\n");
2609 /* Take APE lock when performing reset. */
2610 bnx_ape_lock(sc
, BGE_APE_LOCK_GRC
);
2612 /* Save some important PCI state. */
2613 cachesize
= pci_read_config(dev
, BGE_PCI_CACHESZ
, 4);
2614 command
= pci_read_config(dev
, BGE_PCI_CMD
, 4);
2616 pci_write_config(dev
, BGE_PCI_MISC_CTL
,
2617 BGE_PCIMISCCTL_INDIRECT_ACCESS
|BGE_PCIMISCCTL_MASK_PCI_INTR
|
2618 BGE_HIF_SWAP_OPTIONS
|BGE_PCIMISCCTL_PCISTATE_RW
|
2619 BGE_PCIMISCCTL_TAGGED_STATUS
, 4);
2621 /* Disable fastboot on controllers that support it. */
2623 if_printf(&sc
->arpcom
.ac_if
, "Disabling fastboot\n");
2624 CSR_WRITE_4(sc
, BGE_FASTBOOT_PC
, 0x0);
2627 * Write the magic number to SRAM at offset 0xB50.
2628 * When firmware finishes its initialization it will
2629 * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
2631 bnx_writemem_ind(sc
, BGE_SRAM_FW_MB
, BGE_SRAM_FW_MB_MAGIC
);
2633 reset
= BGE_MISCCFG_RESET_CORE_CLOCKS
|(65<<1);
2635 /* XXX: Broadcom Linux driver. */
2636 /* Force PCI-E 1.0a mode */
2637 if (!BNX_IS_57765_PLUS(sc
) &&
2638 CSR_READ_4(sc
, BGE_PCIE_PHY_TSTCTL
) ==
2639 (BGE_PCIE_PHY_TSTCTL_PSCRAM
|
2640 BGE_PCIE_PHY_TSTCTL_PCIE10
)) {
2641 CSR_WRITE_4(sc
, BGE_PCIE_PHY_TSTCTL
,
2642 BGE_PCIE_PHY_TSTCTL_PSCRAM
);
2644 if (sc
->bnx_chipid
!= BGE_CHIPID_BCM5750_A0
) {
2645 /* Prevent PCIE link training during global reset */
2646 CSR_WRITE_4(sc
, BGE_MISC_CFG
, (1<<29));
2651 * Set GPHY Power Down Override to leave GPHY
2652 * powered up in D0 uninitialized.
2654 if ((sc
->bnx_flags
& BNX_FLAG_CPMU
) == 0)
2655 reset
|= BGE_MISCCFG_GPHY_PD_OVERRIDE
;
2657 /* Issue global reset */
2658 write_op(sc
, BGE_MISC_CFG
, reset
);
2662 /* XXX: Broadcom Linux driver. */
2663 if (sc
->bnx_chipid
== BGE_CHIPID_BCM5750_A0
) {
2666 DELAY(500000); /* wait for link training to complete */
2667 v
= pci_read_config(dev
, 0xc4, 4);
2668 pci_write_config(dev
, 0xc4, v
| (1<<15), 4);
2671 devctl
= pci_read_config(dev
, sc
->bnx_pciecap
+ PCIER_DEVCTRL
, 2);
2673 /* Disable no snoop and disable relaxed ordering. */
2674 devctl
&= ~(PCIEM_DEVCTL_RELAX_ORDER
| PCIEM_DEVCTL_NOSNOOP
);
2676 /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */
2677 if ((sc
->bnx_flags
& BNX_FLAG_CPMU
) == 0) {
2678 devctl
&= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK
;
2679 devctl
|= PCIEM_DEVCTL_MAX_PAYLOAD_128
;
2682 pci_write_config(dev
, sc
->bnx_pciecap
+ PCIER_DEVCTRL
,
2685 /* Clear error status. */
2686 pci_write_config(dev
, sc
->bnx_pciecap
+ PCIER_DEVSTS
,
2687 PCIEM_DEVSTS_CORR_ERR
|
2688 PCIEM_DEVSTS_NFATAL_ERR
|
2689 PCIEM_DEVSTS_FATAL_ERR
|
2690 PCIEM_DEVSTS_UNSUPP_REQ
, 2);
2692 /* Reset some of the PCI state that got zapped by reset */
2693 pci_write_config(dev
, BGE_PCI_MISC_CTL
,
2694 BGE_PCIMISCCTL_INDIRECT_ACCESS
|BGE_PCIMISCCTL_MASK_PCI_INTR
|
2695 BGE_HIF_SWAP_OPTIONS
|BGE_PCIMISCCTL_PCISTATE_RW
|
2696 BGE_PCIMISCCTL_TAGGED_STATUS
, 4);
2697 val
= BGE_PCISTATE_ROM_ENABLE
| BGE_PCISTATE_ROM_RETRY_ENABLE
;
2698 if (sc
->bnx_mfw_flags
& BNX_MFW_ON_APE
) {
2699 val
|= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR
|
2700 BGE_PCISTATE_ALLOW_APE_SHMEM_WR
|
2701 BGE_PCISTATE_ALLOW_APE_PSPACE_WR
;
2703 pci_write_config(dev
, BGE_PCI_PCISTATE
, val
, 4);
2704 pci_write_config(dev
, BGE_PCI_CACHESZ
, cachesize
, 4);
2705 pci_write_config(dev
, BGE_PCI_CMD
, command
, 4);
2707 /* Enable memory arbiter */
2708 CSR_WRITE_4(sc
, BGE_MARB_MODE
, BGE_MARBMODE_ENABLE
);
2710 /* Fix up byte swapping */
2711 CSR_WRITE_4(sc
, BGE_MODE_CTL
, bnx_dma_swap_options(sc
));
2713 val
= CSR_READ_4(sc
, BGE_MAC_MODE
);
2714 val
= (val
& ~mac_mode_mask
) | mac_mode
;
2715 CSR_WRITE_4(sc
, BGE_MAC_MODE
, val
);
2718 bnx_ape_unlock(sc
, BGE_APE_LOCK_GRC
);
2721 * Poll until we see the 1's complement of the magic number.
2722 * This indicates that the firmware initialization is complete.
2724 for (i
= 0; i
< BNX_FIRMWARE_TIMEOUT
; i
++) {
2725 val
= bnx_readmem_ind(sc
, BGE_SRAM_FW_MB
);
2726 if (val
== ~BGE_SRAM_FW_MB_MAGIC
)
2730 if (i
== BNX_FIRMWARE_TIMEOUT
) {
2731 if_printf(&sc
->arpcom
.ac_if
, "firmware handshake "
2732 "timed out, found 0x%08x\n", val
);
2735 /* BCM57765 A0 needs additional time before accessing. */
2736 if (sc
->bnx_chipid
== BGE_CHIPID_BCM57765_A0
)
2740 * The 5704 in TBI mode apparently needs some special
2741 * adjustment to insure the SERDES drive level is set
2744 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5704
&&
2745 (sc
->bnx_flags
& BNX_FLAG_TBI
)) {
2748 serdescfg
= CSR_READ_4(sc
, BGE_SERDES_CFG
);
2749 serdescfg
= (serdescfg
& ~0xFFF) | 0x880;
2750 CSR_WRITE_4(sc
, BGE_SERDES_CFG
, serdescfg
);
2753 CSR_WRITE_4(sc
, BGE_MI_MODE
,
2754 sc
->bnx_mi_mode
& ~BGE_MIMODE_AUTOPOLL
);
2757 /* XXX: Broadcom Linux driver. */
2758 if (!BNX_IS_57765_PLUS(sc
)) {
2761 /* Enable Data FIFO protection. */
2762 v
= CSR_READ_4(sc
, BGE_PCIE_TLDLPL_PORT
);
2763 CSR_WRITE_4(sc
, BGE_PCIE_TLDLPL_PORT
, v
| (1 << 25));
2768 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5720
) {
2769 BNX_CLRBIT(sc
, BGE_CPMU_CLCK_ORIDE
,
2770 CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
2775 * Frame reception handling. This is called if there's a frame
2776 * on the receive return list.
2778 * Note: we have to be able to handle two possibilities here:
2779 * 1) the frame is from the jumbo recieve ring
2780 * 2) the frame is from the standard receive ring
2784 bnx_rxeof(struct bnx_rx_ret_ring
*ret
, uint16_t rx_prod
, int count
)
2786 struct bnx_softc
*sc
= ret
->bnx_sc
;
2787 struct bnx_rx_std_ring
*std
= ret
->bnx_std
;
2788 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2789 int std_used
= 0, cpuid
= mycpuid
;
2791 while (ret
->bnx_rx_saved_considx
!= rx_prod
&& count
!= 0) {
2792 struct pktinfo pi0
, *pi
= NULL
;
2793 struct bge_rx_bd
*cur_rx
;
2794 struct bnx_rx_buf
*rb
;
2796 struct mbuf
*m
= NULL
;
2797 uint16_t vlan_tag
= 0;
2802 cur_rx
= &ret
->bnx_rx_ret_ring
[ret
->bnx_rx_saved_considx
];
2804 rxidx
= cur_rx
->bge_idx
;
2805 KKASSERT(rxidx
< BGE_STD_RX_RING_CNT
);
2807 BNX_INC(ret
->bnx_rx_saved_considx
, BNX_RETURN_RING_CNT
);
2808 #ifdef BNX_RSS_DEBUG
2812 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_VLAN_TAG
) {
2814 vlan_tag
= cur_rx
->bge_vlan_tag
;
2817 if (ret
->bnx_rx_cnt
>= ret
->bnx_rx_cntmax
) {
2818 atomic_add_int(&std
->bnx_rx_std_used
, std_used
);
2821 bnx_rx_std_refill_sched(ret
, std
);
2826 rb
= &std
->bnx_rx_std_buf
[rxidx
];
2827 m
= rb
->bnx_rx_mbuf
;
2828 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_ERROR
) {
2829 IFNET_STAT_INC(ifp
, ierrors
, 1);
2831 rb
->bnx_rx_refilled
= 1;
2834 if (bnx_newbuf_std(ret
, rxidx
, 0)) {
2835 IFNET_STAT_INC(ifp
, ierrors
, 1);
2839 IFNET_STAT_INC(ifp
, ipackets
, 1);
2840 m
->m_pkthdr
.len
= m
->m_len
= cur_rx
->bge_len
- ETHER_CRC_LEN
;
2841 m
->m_pkthdr
.rcvif
= ifp
;
2843 if ((ifp
->if_capenable
& IFCAP_RXCSUM
) &&
2844 (cur_rx
->bge_flags
& BGE_RXBDFLAG_IPV6
) == 0) {
2845 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_IP_CSUM
) {
2846 m
->m_pkthdr
.csum_flags
|= CSUM_IP_CHECKED
;
2847 if ((cur_rx
->bge_error_flag
&
2848 BGE_RXERRFLAG_IP_CSUM_NOK
) == 0)
2849 m
->m_pkthdr
.csum_flags
|= CSUM_IP_VALID
;
2851 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_TCP_UDP_CSUM
) {
2852 m
->m_pkthdr
.csum_data
=
2853 cur_rx
->bge_tcp_udp_csum
;
2854 m
->m_pkthdr
.csum_flags
|= CSUM_DATA_VALID
|
2858 if (ifp
->if_capenable
& IFCAP_RSS
) {
2859 pi
= bnx_rss_info(&pi0
, cur_rx
);
2861 (cur_rx
->bge_flags
& BGE_RXBDFLAG_RSS_HASH
)) {
2862 m
->m_flags
|= M_HASH
;
2864 toeplitz_hash(cur_rx
->bge_hash
);
2869 * If we received a packet with a vlan tag, pass it
2870 * to vlan_input() instead of ether_input().
2873 m
->m_flags
|= M_VLANTAG
;
2874 m
->m_pkthdr
.ether_vlantag
= vlan_tag
;
2876 ifp
->if_input(ifp
, m
, pi
, cpuid
);
2878 bnx_writembx(sc
, ret
->bnx_rx_mbx
, ret
->bnx_rx_saved_considx
);
2883 cur_std_used
= atomic_fetchadd_int(&std
->bnx_rx_std_used
,
2885 if (cur_std_used
+ std_used
>= (BGE_STD_RX_RING_CNT
/ 2)) {
2886 #ifdef BNX_RSS_DEBUG
2887 ret
->bnx_rx_force_sched
++;
2889 bnx_rx_std_refill_sched(ret
, std
);
2895 bnx_txeof(struct bnx_tx_ring
*txr
, uint16_t tx_cons
)
2897 struct ifnet
*ifp
= &txr
->bnx_sc
->arpcom
.ac_if
;
2900 * Go through our tx ring and free mbufs for those
2901 * frames that have been sent.
2903 while (txr
->bnx_tx_saved_considx
!= tx_cons
) {
2904 struct bnx_tx_buf
*buf
;
2907 idx
= txr
->bnx_tx_saved_considx
;
2908 buf
= &txr
->bnx_tx_buf
[idx
];
2909 if (buf
->bnx_tx_mbuf
!= NULL
) {
2910 IFNET_STAT_INC(ifp
, opackets
, 1);
2911 #ifdef BNX_TSS_DEBUG
2914 bus_dmamap_unload(txr
->bnx_tx_mtag
,
2915 buf
->bnx_tx_dmamap
);
2916 m_freem(buf
->bnx_tx_mbuf
);
2917 buf
->bnx_tx_mbuf
= NULL
;
2920 BNX_INC(txr
->bnx_tx_saved_considx
, BGE_TX_RING_CNT
);
2923 if ((BGE_TX_RING_CNT
- txr
->bnx_tx_cnt
) >=
2924 (BNX_NSEG_RSVD
+ BNX_NSEG_SPARE
))
2925 ifsq_clr_oactive(txr
->bnx_ifsq
);
2927 if (txr
->bnx_tx_cnt
== 0)
2928 txr
->bnx_tx_watchdog
.wd_timer
= 0;
2930 if (!ifsq_is_empty(txr
->bnx_ifsq
))
2931 ifsq_devstart(txr
->bnx_ifsq
);
2935 bnx_handle_status(struct bnx_softc
*sc
)
2940 status
= *sc
->bnx_hw_status
;
2942 if (status
& BGE_STATFLAG_ERROR
) {
2948 val
= CSR_READ_4(sc
, BGE_FLOW_ATTN
);
2949 if (val
& ~BGE_FLOWATTN_MB_LOWAT
) {
2950 if_printf(&sc
->arpcom
.ac_if
,
2951 "flow attn 0x%08x\n", val
);
2955 val
= CSR_READ_4(sc
, BGE_MSI_STATUS
);
2956 if (val
& ~BGE_MSISTAT_MSI_PCI_REQ
) {
2957 if_printf(&sc
->arpcom
.ac_if
,
2958 "msi status 0x%08x\n", val
);
2962 val
= CSR_READ_4(sc
, BGE_RDMA_STATUS
);
2964 if_printf(&sc
->arpcom
.ac_if
,
2965 "rmda status 0x%08x\n", val
);
2969 val
= CSR_READ_4(sc
, BGE_WDMA_STATUS
);
2971 if_printf(&sc
->arpcom
.ac_if
,
2972 "wdma status 0x%08x\n", val
);
2977 bnx_serialize_skipmain(sc
);
2979 bnx_deserialize_skipmain(sc
);
2984 if ((status
& BGE_STATFLAG_LINKSTATE_CHANGED
) || sc
->bnx_link_evt
) {
2986 if_printf(&sc
->arpcom
.ac_if
, "link change, "
2987 "link_evt %d\n", sc
->bnx_link_evt
);
2996 #ifdef IFPOLL_ENABLE
2999 bnx_npoll_rx(struct ifnet
*ifp __unused
, void *xret
, int cycle
)
3001 struct bnx_rx_ret_ring
*ret
= xret
;
3004 ASSERT_SERIALIZED(&ret
->bnx_rx_ret_serialize
);
3006 ret
->bnx_saved_status_tag
= *ret
->bnx_hw_status_tag
;
3009 rx_prod
= *ret
->bnx_rx_considx
;
3010 if (ret
->bnx_rx_saved_considx
!= rx_prod
)
3011 bnx_rxeof(ret
, rx_prod
, cycle
);
3015 bnx_npoll_tx_notag(struct ifnet
*ifp __unused
, void *xtxr
, int cycle __unused
)
3017 struct bnx_tx_ring
*txr
= xtxr
;
3020 ASSERT_SERIALIZED(&txr
->bnx_tx_serialize
);
3022 tx_cons
= *txr
->bnx_tx_considx
;
3023 if (txr
->bnx_tx_saved_considx
!= tx_cons
)
3024 bnx_txeof(txr
, tx_cons
);
3028 bnx_npoll_tx(struct ifnet
*ifp
, void *xtxr
, int cycle
)
3030 struct bnx_tx_ring
*txr
= xtxr
;
3032 ASSERT_SERIALIZED(&txr
->bnx_tx_serialize
);
3034 txr
->bnx_saved_status_tag
= *txr
->bnx_hw_status_tag
;
3036 bnx_npoll_tx_notag(ifp
, txr
, cycle
);
3040 bnx_npoll_status_notag(struct ifnet
*ifp
)
3042 struct bnx_softc
*sc
= ifp
->if_softc
;
3044 ASSERT_SERIALIZED(&sc
->bnx_main_serialize
);
3046 if (bnx_handle_status(sc
)) {
3048 * Status changes are handled; force the chip to
3049 * update the status block to reflect whether there
3050 * are more status changes or not, else staled status
3051 * changes are always seen.
3053 BNX_SETBIT(sc
, BGE_HCC_MODE
, BGE_HCCMODE_COAL_NOW
);
3058 bnx_npoll_status(struct ifnet
*ifp
)
3060 struct bnx_softc
*sc
= ifp
->if_softc
;
3062 ASSERT_SERIALIZED(&sc
->bnx_main_serialize
);
3064 sc
->bnx_saved_status_tag
= *sc
->bnx_hw_status_tag
;
3066 bnx_npoll_status_notag(ifp
);
3070 bnx_npoll(struct ifnet
*ifp
, struct ifpoll_info
*info
)
3072 struct bnx_softc
*sc
= ifp
->if_softc
;
3075 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
3078 if (sc
->bnx_flags
& BNX_FLAG_STATUS_HASTAG
)
3079 info
->ifpi_status
.status_func
= bnx_npoll_status
;
3081 info
->ifpi_status
.status_func
= bnx_npoll_status_notag
;
3082 info
->ifpi_status
.serializer
= &sc
->bnx_main_serialize
;
3084 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
3085 struct bnx_tx_ring
*txr
= &sc
->bnx_tx_ring
[i
];
3086 int idx
= i
+ sc
->bnx_npoll_txoff
;
3088 KKASSERT(idx
< ncpus2
);
3089 if (sc
->bnx_flags
& BNX_FLAG_RXTX_BUNDLE
) {
3090 info
->ifpi_tx
[idx
].poll_func
=
3093 info
->ifpi_tx
[idx
].poll_func
= bnx_npoll_tx
;
3095 info
->ifpi_tx
[idx
].arg
= txr
;
3096 info
->ifpi_tx
[idx
].serializer
= &txr
->bnx_tx_serialize
;
3097 ifsq_set_cpuid(txr
->bnx_ifsq
, idx
);
3100 for (i
= 0; i
< sc
->bnx_rx_retcnt
; ++i
) {
3101 struct bnx_rx_ret_ring
*ret
= &sc
->bnx_rx_ret_ring
[i
];
3102 int idx
= i
+ sc
->bnx_npoll_rxoff
;
3104 KKASSERT(idx
< ncpus2
);
3105 info
->ifpi_rx
[idx
].poll_func
= bnx_npoll_rx
;
3106 info
->ifpi_rx
[idx
].arg
= ret
;
3107 info
->ifpi_rx
[idx
].serializer
=
3108 &ret
->bnx_rx_ret_serialize
;
3111 if (ifp
->if_flags
& IFF_RUNNING
) {
3112 bnx_disable_intr(sc
);
3113 bnx_set_tick_cpuid(sc
, TRUE
);
3115 sc
->bnx_coal_chg
= BNX_TX_COAL_BDS_CHG
|
3116 BNX_RX_COAL_BDS_CHG
;
3117 bnx_coal_change(sc
);
3120 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
3121 ifsq_set_cpuid(sc
->bnx_tx_ring
[i
].bnx_ifsq
,
3122 sc
->bnx_tx_ring
[i
].bnx_tx_cpuid
);
3124 if (ifp
->if_flags
& IFF_RUNNING
) {
3125 sc
->bnx_coal_chg
= BNX_TX_COAL_BDS_CHG
|
3126 BNX_RX_COAL_BDS_CHG
;
3127 bnx_coal_change(sc
);
3129 bnx_enable_intr(sc
);
3130 bnx_set_tick_cpuid(sc
, FALSE
);
3135 #endif /* IFPOLL_ENABLE */
3138 bnx_intr_legacy(void *xsc
)
3140 struct bnx_softc
*sc
= xsc
;
3141 struct bnx_rx_ret_ring
*ret
= &sc
->bnx_rx_ret_ring
[0];
3143 if (ret
->bnx_saved_status_tag
== *ret
->bnx_hw_status_tag
) {
3146 val
= pci_read_config(sc
->bnx_dev
, BGE_PCI_PCISTATE
, 4);
3147 if (val
& BGE_PCISTAT_INTR_NOTACT
)
3153 * Interrupt will have to be disabled if tagged status
3154 * is used, else interrupt will always be asserted on
3155 * certain chips (at least on BCM5750 AX/BX).
3157 bnx_writembx(sc
, BGE_MBX_IRQ0_LO
, 1);
3169 bnx_intr(struct bnx_softc
*sc
)
3171 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3172 struct bnx_rx_ret_ring
*ret
= &sc
->bnx_rx_ret_ring
[0];
3174 ASSERT_SERIALIZED(&sc
->bnx_main_serialize
);
3176 ret
->bnx_saved_status_tag
= *ret
->bnx_hw_status_tag
;
3178 * Use a load fence to ensure that status_tag is saved
3179 * before rx_prod, tx_cons and status.
3183 bnx_handle_status(sc
);
3185 if (ifp
->if_flags
& IFF_RUNNING
) {
3186 struct bnx_tx_ring
*txr
= &sc
->bnx_tx_ring
[0];
3187 uint16_t rx_prod
, tx_cons
;
3189 lwkt_serialize_enter(&ret
->bnx_rx_ret_serialize
);
3190 rx_prod
= *ret
->bnx_rx_considx
;
3191 if (ret
->bnx_rx_saved_considx
!= rx_prod
)
3192 bnx_rxeof(ret
, rx_prod
, -1);
3193 lwkt_serialize_exit(&ret
->bnx_rx_ret_serialize
);
3195 lwkt_serialize_enter(&txr
->bnx_tx_serialize
);
3196 tx_cons
= *txr
->bnx_tx_considx
;
3197 if (txr
->bnx_tx_saved_considx
!= tx_cons
)
3198 bnx_txeof(txr
, tx_cons
);
3199 lwkt_serialize_exit(&txr
->bnx_tx_serialize
);
3202 bnx_writembx(sc
, BGE_MBX_IRQ0_LO
, ret
->bnx_saved_status_tag
<< 24);
3206 bnx_msix_tx_status(void *xtxr
)
3208 struct bnx_tx_ring
*txr
= xtxr
;
3209 struct bnx_softc
*sc
= txr
->bnx_sc
;
3210 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3212 ASSERT_SERIALIZED(&sc
->bnx_main_serialize
);
3214 txr
->bnx_saved_status_tag
= *txr
->bnx_hw_status_tag
;
3216 * Use a load fence to ensure that status_tag is saved
3217 * before tx_cons and status.
3221 bnx_handle_status(sc
);
3223 if (ifp
->if_flags
& IFF_RUNNING
) {
3226 lwkt_serialize_enter(&txr
->bnx_tx_serialize
);
3227 tx_cons
= *txr
->bnx_tx_considx
;
3228 if (txr
->bnx_tx_saved_considx
!= tx_cons
)
3229 bnx_txeof(txr
, tx_cons
);
3230 lwkt_serialize_exit(&txr
->bnx_tx_serialize
);
3233 bnx_writembx(sc
, BGE_MBX_IRQ0_LO
, txr
->bnx_saved_status_tag
<< 24);
3237 bnx_msix_rx(void *xret
)
3239 struct bnx_rx_ret_ring
*ret
= xret
;
3242 ASSERT_SERIALIZED(&ret
->bnx_rx_ret_serialize
);
3244 ret
->bnx_saved_status_tag
= *ret
->bnx_hw_status_tag
;
3246 * Use a load fence to ensure that status_tag is saved
3251 rx_prod
= *ret
->bnx_rx_considx
;
3252 if (ret
->bnx_rx_saved_considx
!= rx_prod
)
3253 bnx_rxeof(ret
, rx_prod
, -1);
3255 bnx_writembx(ret
->bnx_sc
, ret
->bnx_msix_mbx
,
3256 ret
->bnx_saved_status_tag
<< 24);
3260 bnx_msix_rxtx(void *xret
)
3262 struct bnx_rx_ret_ring
*ret
= xret
;
3263 struct bnx_tx_ring
*txr
= ret
->bnx_txr
;
3264 uint16_t rx_prod
, tx_cons
;
3266 ASSERT_SERIALIZED(&ret
->bnx_rx_ret_serialize
);
3268 ret
->bnx_saved_status_tag
= *ret
->bnx_hw_status_tag
;
3270 * Use a load fence to ensure that status_tag is saved
3271 * before rx_prod and tx_cons.
3275 rx_prod
= *ret
->bnx_rx_considx
;
3276 if (ret
->bnx_rx_saved_considx
!= rx_prod
)
3277 bnx_rxeof(ret
, rx_prod
, -1);
3279 lwkt_serialize_enter(&txr
->bnx_tx_serialize
);
3280 tx_cons
= *txr
->bnx_tx_considx
;
3281 if (txr
->bnx_tx_saved_considx
!= tx_cons
)
3282 bnx_txeof(txr
, tx_cons
);
3283 lwkt_serialize_exit(&txr
->bnx_tx_serialize
);
3285 bnx_writembx(ret
->bnx_sc
, ret
->bnx_msix_mbx
,
3286 ret
->bnx_saved_status_tag
<< 24);
3290 bnx_msix_status(void *xsc
)
3292 struct bnx_softc
*sc
= xsc
;
3294 ASSERT_SERIALIZED(&sc
->bnx_main_serialize
);
3296 sc
->bnx_saved_status_tag
= *sc
->bnx_hw_status_tag
;
3298 * Use a load fence to ensure that status_tag is saved
3303 bnx_handle_status(sc
);
3305 bnx_writembx(sc
, BGE_MBX_IRQ0_LO
, sc
->bnx_saved_status_tag
<< 24);
3311 struct bnx_softc
*sc
= xsc
;
3313 lwkt_serialize_enter(&sc
->bnx_main_serialize
);
3315 bnx_stats_update_regs(sc
);
3317 if (sc
->bnx_flags
& BNX_FLAG_TBI
) {
3319 * Since in TBI mode auto-polling can't be used we should poll
3320 * link status manually. Here we register pending link event
3321 * and trigger interrupt.
3324 BNX_SETBIT(sc
, BGE_HCC_MODE
, BGE_HCCMODE_COAL_NOW
);
3325 } else if (!sc
->bnx_link
) {
3326 mii_tick(device_get_softc(sc
->bnx_miibus
));
3329 callout_reset_bycpu(&sc
->bnx_tick_timer
, hz
, bnx_tick
, sc
,
3330 sc
->bnx_tick_cpuid
);
3332 lwkt_serialize_exit(&sc
->bnx_main_serialize
);
3336 bnx_stats_update_regs(struct bnx_softc
*sc
)
3338 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3339 struct bge_mac_stats_regs stats
;
3343 s
= (uint32_t *)&stats
;
3344 for (i
= 0; i
< sizeof(struct bge_mac_stats_regs
); i
+= 4) {
3345 *s
= CSR_READ_4(sc
, BGE_RX_STATS
+ i
);
3349 IFNET_STAT_SET(ifp
, collisions
,
3350 (stats
.dot3StatsSingleCollisionFrames
+
3351 stats
.dot3StatsMultipleCollisionFrames
+
3352 stats
.dot3StatsExcessiveCollisions
+
3353 stats
.dot3StatsLateCollisions
));
3355 val
= CSR_READ_4(sc
, BGE_RXLP_LOCSTAT_OUT_OF_BDS
);
3356 sc
->bnx_norxbds
+= val
;
3360 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3361 * pointers to descriptors.
3364 bnx_encap(struct bnx_tx_ring
*txr
, struct mbuf
**m_head0
, uint32_t *txidx
,
3367 struct bge_tx_bd
*d
= NULL
;
3368 uint16_t csum_flags
= 0, vlan_tag
= 0, mss
= 0;
3369 bus_dma_segment_t segs
[BNX_NSEG_NEW
];
3371 int error
, maxsegs
, nsegs
, idx
, i
;
3372 struct mbuf
*m_head
= *m_head0
, *m_new
;
3374 if (m_head
->m_pkthdr
.csum_flags
& CSUM_TSO
) {
3375 #ifdef BNX_TSO_DEBUG
3379 error
= bnx_setup_tso(txr
, m_head0
, &mss
, &csum_flags
);
3384 #ifdef BNX_TSO_DEBUG
3385 tso_nsegs
= (m_head
->m_pkthdr
.len
/
3386 m_head
->m_pkthdr
.tso_segsz
) - 1;
3387 if (tso_nsegs
> (BNX_TSO_NSTATS
- 1))
3388 tso_nsegs
= BNX_TSO_NSTATS
- 1;
3389 else if (tso_nsegs
< 0)
3391 txr
->bnx_sc
->bnx_tsosegs
[tso_nsegs
]++;
3393 } else if (m_head
->m_pkthdr
.csum_flags
& BNX_CSUM_FEATURES
) {
3394 if (m_head
->m_pkthdr
.csum_flags
& CSUM_IP
)
3395 csum_flags
|= BGE_TXBDFLAG_IP_CSUM
;
3396 if (m_head
->m_pkthdr
.csum_flags
& (CSUM_TCP
| CSUM_UDP
))
3397 csum_flags
|= BGE_TXBDFLAG_TCP_UDP_CSUM
;
3398 if (m_head
->m_flags
& M_LASTFRAG
)
3399 csum_flags
|= BGE_TXBDFLAG_IP_FRAG_END
;
3400 else if (m_head
->m_flags
& M_FRAG
)
3401 csum_flags
|= BGE_TXBDFLAG_IP_FRAG
;
3403 if (m_head
->m_flags
& M_VLANTAG
) {
3404 csum_flags
|= BGE_TXBDFLAG_VLAN_TAG
;
3405 vlan_tag
= m_head
->m_pkthdr
.ether_vlantag
;
3409 map
= txr
->bnx_tx_buf
[idx
].bnx_tx_dmamap
;
3411 maxsegs
= (BGE_TX_RING_CNT
- txr
->bnx_tx_cnt
) - BNX_NSEG_RSVD
;
3412 KASSERT(maxsegs
>= BNX_NSEG_SPARE
,
3413 ("not enough segments %d", maxsegs
));
3415 if (maxsegs
> BNX_NSEG_NEW
)
3416 maxsegs
= BNX_NSEG_NEW
;
3419 * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason.
3420 * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN,
3421 * but when such padded frames employ the bge IP/TCP checksum
3422 * offload, the hardware checksum assist gives incorrect results
3423 * (possibly from incorporating its own padding into the UDP/TCP
3424 * checksum; who knows). If we pad such runts with zeros, the
3425 * onboard checksum comes out correct.
3427 if ((csum_flags
& BGE_TXBDFLAG_TCP_UDP_CSUM
) &&
3428 m_head
->m_pkthdr
.len
< BNX_MIN_FRAMELEN
) {
3429 error
= m_devpad(m_head
, BNX_MIN_FRAMELEN
);
3434 if ((txr
->bnx_tx_flags
& BNX_TX_FLAG_SHORTDMA
) &&
3435 m_head
->m_next
!= NULL
) {
3436 m_new
= bnx_defrag_shortdma(m_head
);
3437 if (m_new
== NULL
) {
3441 *m_head0
= m_head
= m_new
;
3443 if ((m_head
->m_pkthdr
.csum_flags
& CSUM_TSO
) == 0 &&
3444 (txr
->bnx_tx_flags
& BNX_TX_FLAG_FORCE_DEFRAG
) &&
3445 m_head
->m_next
!= NULL
) {
3447 * Forcefully defragment mbuf chain to overcome hardware
3448 * limitation which only support a single outstanding
3449 * DMA read operation. If it fails, keep moving on using
3450 * the original mbuf chain.
3452 m_new
= m_defrag(m_head
, M_NOWAIT
);
3454 *m_head0
= m_head
= m_new
;
3457 error
= bus_dmamap_load_mbuf_defrag(txr
->bnx_tx_mtag
, map
,
3458 m_head0
, segs
, maxsegs
, &nsegs
, BUS_DMA_NOWAIT
);
3461 *segs_used
+= nsegs
;
3464 bus_dmamap_sync(txr
->bnx_tx_mtag
, map
, BUS_DMASYNC_PREWRITE
);
3466 for (i
= 0; ; i
++) {
3467 d
= &txr
->bnx_tx_ring
[idx
];
3469 d
->bge_addr
.bge_addr_lo
= BGE_ADDR_LO(segs
[i
].ds_addr
);
3470 d
->bge_addr
.bge_addr_hi
= BGE_ADDR_HI(segs
[i
].ds_addr
);
3471 d
->bge_len
= segs
[i
].ds_len
;
3472 d
->bge_flags
= csum_flags
;
3473 d
->bge_vlan_tag
= vlan_tag
;
3478 BNX_INC(idx
, BGE_TX_RING_CNT
);
3480 /* Mark the last segment as end of packet... */
3481 d
->bge_flags
|= BGE_TXBDFLAG_END
;
3484 * Insure that the map for this transmission is placed at
3485 * the array index of the last descriptor in this chain.
3487 txr
->bnx_tx_buf
[*txidx
].bnx_tx_dmamap
= txr
->bnx_tx_buf
[idx
].bnx_tx_dmamap
;
3488 txr
->bnx_tx_buf
[idx
].bnx_tx_dmamap
= map
;
3489 txr
->bnx_tx_buf
[idx
].bnx_tx_mbuf
= m_head
;
3490 txr
->bnx_tx_cnt
+= nsegs
;
3492 BNX_INC(idx
, BGE_TX_RING_CNT
);
3503 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3504 * to the mbuf data regions directly in the transmit descriptors.
3507 bnx_start(struct ifnet
*ifp
, struct ifaltq_subque
*ifsq
)
3509 struct bnx_tx_ring
*txr
= ifsq_get_priv(ifsq
);
3510 struct mbuf
*m_head
= NULL
;
3514 KKASSERT(txr
->bnx_ifsq
== ifsq
);
3515 ASSERT_SERIALIZED(&txr
->bnx_tx_serialize
);
3517 if ((ifp
->if_flags
& IFF_RUNNING
) == 0 || ifsq_is_oactive(ifsq
))
3520 prodidx
= txr
->bnx_tx_prodidx
;
3522 while (txr
->bnx_tx_buf
[prodidx
].bnx_tx_mbuf
== NULL
) {
3524 * Sanity check: avoid coming within BGE_NSEG_RSVD
3525 * descriptors of the end of the ring. Also make
3526 * sure there are BGE_NSEG_SPARE descriptors for
3527 * jumbo buffers' or TSO segments' defragmentation.
3529 if ((BGE_TX_RING_CNT
- txr
->bnx_tx_cnt
) <
3530 (BNX_NSEG_RSVD
+ BNX_NSEG_SPARE
)) {
3531 ifsq_set_oactive(ifsq
);
3535 m_head
= ifsq_dequeue(ifsq
);
3540 * Pack the data into the transmit ring. If we
3541 * don't have room, set the OACTIVE flag and wait
3542 * for the NIC to drain the ring.
3544 if (bnx_encap(txr
, &m_head
, &prodidx
, &nsegs
)) {
3545 ifsq_set_oactive(ifsq
);
3546 IFNET_STAT_INC(ifp
, oerrors
, 1);
3550 if (nsegs
>= txr
->bnx_tx_wreg
) {
3552 bnx_writembx(txr
->bnx_sc
, txr
->bnx_tx_mbx
, prodidx
);
3556 ETHER_BPF_MTAP(ifp
, m_head
);
3559 * Set a timeout in case the chip goes out to lunch.
3561 txr
->bnx_tx_watchdog
.wd_timer
= 5;
3566 bnx_writembx(txr
->bnx_sc
, txr
->bnx_tx_mbx
, prodidx
);
3568 txr
->bnx_tx_prodidx
= prodidx
;
3574 struct bnx_softc
*sc
= xsc
;
3575 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3581 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
3583 /* Cancel pending I/O and flush buffers. */
3586 bnx_sig_pre_reset(sc
, BNX_RESET_START
);
3588 bnx_sig_post_reset(sc
, BNX_RESET_START
);
3593 * Init the various state machines, ring
3594 * control blocks and firmware.
3596 if (bnx_blockinit(sc
)) {
3597 if_printf(ifp
, "initialization failure\n");
3603 CSR_WRITE_4(sc
, BGE_RX_MTU
, ifp
->if_mtu
+
3604 ETHER_HDR_LEN
+ ETHER_CRC_LEN
+ EVL_ENCAPLEN
);
3606 /* Load our MAC address. */
3607 m
= (uint16_t *)&sc
->arpcom
.ac_enaddr
[0];
3608 CSR_WRITE_4(sc
, BGE_MAC_ADDR1_LO
, htons(m
[0]));
3609 CSR_WRITE_4(sc
, BGE_MAC_ADDR1_HI
, (htons(m
[1]) << 16) | htons(m
[2]));
3611 /* Enable or disable promiscuous mode as needed. */
3614 /* Program multicast filter. */
3618 if (bnx_init_rx_ring_std(&sc
->bnx_rx_std_ring
)) {
3619 if_printf(ifp
, "RX ring initialization failed\n");
3624 /* Init jumbo RX ring. */
3625 if (ifp
->if_mtu
> (ETHERMTU
+ ETHER_HDR_LEN
+ ETHER_CRC_LEN
)) {
3626 if (bnx_init_rx_ring_jumbo(sc
)) {
3627 if_printf(ifp
, "Jumbo RX ring initialization failed\n");
3633 /* Init our RX return ring index */
3634 for (i
= 0; i
< sc
->bnx_rx_retcnt
; ++i
) {
3635 struct bnx_rx_ret_ring
*ret
= &sc
->bnx_rx_ret_ring
[i
];
3637 ret
->bnx_rx_saved_considx
= 0;
3638 ret
->bnx_rx_cnt
= 0;
3642 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
)
3643 bnx_init_tx_ring(&sc
->bnx_tx_ring
[i
]);
3645 /* Enable TX MAC state machine lockup fix. */
3646 mode
= CSR_READ_4(sc
, BGE_TX_MODE
);
3647 mode
|= BGE_TXMODE_MBUF_LOCKUP_FIX
;
3648 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5720
||
3649 sc
->bnx_asicrev
== BGE_ASICREV_BCM5762
) {
3650 mode
&= ~(BGE_TXMODE_JMB_FRM_LEN
| BGE_TXMODE_CNT_DN_MODE
);
3651 mode
|= CSR_READ_4(sc
, BGE_TX_MODE
) &
3652 (BGE_TXMODE_JMB_FRM_LEN
| BGE_TXMODE_CNT_DN_MODE
);
3654 /* Turn on transmitter */
3655 CSR_WRITE_4(sc
, BGE_TX_MODE
, mode
| BGE_TXMODE_ENABLE
);
3658 /* Initialize RSS */
3659 mode
= BGE_RXMODE_ENABLE
| BGE_RXMODE_IPV6_ENABLE
;
3660 if (BNX_RSS_ENABLED(sc
)) {
3662 mode
|= BGE_RXMODE_RSS_ENABLE
|
3663 BGE_RXMODE_RSS_HASH_MASK_BITS
|
3664 BGE_RXMODE_RSS_IPV4_HASH
|
3665 BGE_RXMODE_RSS_TCP_IPV4_HASH
;
3667 /* Turn on receiver */
3668 BNX_SETBIT(sc
, BGE_RX_MODE
, mode
);
3672 * Set the number of good frames to receive after RX MBUF
3673 * Low Watermark has been reached. After the RX MAC receives
3674 * this number of frames, it will drop subsequent incoming
3675 * frames until the MBUF High Watermark is reached.
3677 if (BNX_IS_57765_FAMILY(sc
))
3678 CSR_WRITE_4(sc
, BGE_MAX_RX_FRAME_LOWAT
, 1);
3680 CSR_WRITE_4(sc
, BGE_MAX_RX_FRAME_LOWAT
, 2);
3682 if (sc
->bnx_intr_type
== PCI_INTR_TYPE_MSI
||
3683 sc
->bnx_intr_type
== PCI_INTR_TYPE_MSIX
) {
3685 if_printf(ifp
, "MSI_MODE: %#x\n",
3686 CSR_READ_4(sc
, BGE_MSI_MODE
));
3690 /* Tell firmware we're alive. */
3691 BNX_SETBIT(sc
, BGE_MODE_CTL
, BGE_MODECTL_STACKUP
);
3693 /* Enable host interrupts if polling(4) is not enabled. */
3694 PCI_SETBIT(sc
->bnx_dev
, BGE_PCI_MISC_CTL
, BGE_PCIMISCCTL_CLEAR_INTA
, 4);
3697 #ifdef IFPOLL_ENABLE
3698 if (ifp
->if_flags
& IFF_NPOLLING
)
3702 bnx_disable_intr(sc
);
3704 bnx_enable_intr(sc
);
3705 bnx_set_tick_cpuid(sc
, polling
);
3707 ifp
->if_flags
|= IFF_RUNNING
;
3708 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
3709 struct bnx_tx_ring
*txr
= &sc
->bnx_tx_ring
[i
];
3711 ifsq_clr_oactive(txr
->bnx_ifsq
);
3712 ifsq_watchdog_start(&txr
->bnx_tx_watchdog
);
3715 bnx_ifmedia_upd(ifp
);
3717 callout_reset_bycpu(&sc
->bnx_tick_timer
, hz
, bnx_tick
, sc
,
3718 sc
->bnx_tick_cpuid
);
3722 * Set media options.
3725 bnx_ifmedia_upd(struct ifnet
*ifp
)
3727 struct bnx_softc
*sc
= ifp
->if_softc
;
3729 /* If this is a 1000baseX NIC, enable the TBI port. */
3730 if (sc
->bnx_flags
& BNX_FLAG_TBI
) {
3731 struct ifmedia
*ifm
= &sc
->bnx_ifmedia
;
3733 if (IFM_TYPE(ifm
->ifm_media
) != IFM_ETHER
)
3736 switch(IFM_SUBTYPE(ifm
->ifm_media
)) {
3741 if ((ifm
->ifm_media
& IFM_GMASK
) == IFM_FDX
) {
3742 BNX_CLRBIT(sc
, BGE_MAC_MODE
,
3743 BGE_MACMODE_HALF_DUPLEX
);
3745 BNX_SETBIT(sc
, BGE_MAC_MODE
,
3746 BGE_MACMODE_HALF_DUPLEX
);
3754 struct mii_data
*mii
= device_get_softc(sc
->bnx_miibus
);
3758 if (mii
->mii_instance
) {
3759 struct mii_softc
*miisc
;
3761 LIST_FOREACH(miisc
, &mii
->mii_phys
, mii_list
)
3762 mii_phy_reset(miisc
);
3767 * Force an interrupt so that we will call bnx_link_upd
3768 * if needed and clear any pending link state attention.
3769 * Without this we are not getting any further interrupts
3770 * for link state changes and thus will not UP the link and
3771 * not be able to send in bnx_start. The only way to get
3772 * things working was to receive a packet and get an RX
3775 * bnx_tick should help for fiber cards and we might not
3776 * need to do this here if BNX_FLAG_TBI is set but as
3777 * we poll for fiber anyway it should not harm.
3779 BNX_SETBIT(sc
, BGE_HCC_MODE
, BGE_HCCMODE_COAL_NOW
);
3785 * Report current media status.
3788 bnx_ifmedia_sts(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
3790 struct bnx_softc
*sc
= ifp
->if_softc
;
3792 if ((ifp
->if_flags
& IFF_RUNNING
) == 0)
3795 if (sc
->bnx_flags
& BNX_FLAG_TBI
) {
3796 ifmr
->ifm_status
= IFM_AVALID
;
3797 ifmr
->ifm_active
= IFM_ETHER
;
3798 if (CSR_READ_4(sc
, BGE_MAC_STS
) &
3799 BGE_MACSTAT_TBI_PCS_SYNCHED
) {
3800 ifmr
->ifm_status
|= IFM_ACTIVE
;
3802 ifmr
->ifm_active
|= IFM_NONE
;
3806 ifmr
->ifm_active
|= IFM_1000_SX
;
3807 if (CSR_READ_4(sc
, BGE_MAC_MODE
) & BGE_MACMODE_HALF_DUPLEX
)
3808 ifmr
->ifm_active
|= IFM_HDX
;
3810 ifmr
->ifm_active
|= IFM_FDX
;
3812 struct mii_data
*mii
= device_get_softc(sc
->bnx_miibus
);
3815 ifmr
->ifm_active
= mii
->mii_media_active
;
3816 ifmr
->ifm_status
= mii
->mii_media_status
;
3821 bnx_ioctl(struct ifnet
*ifp
, u_long command
, caddr_t data
, struct ucred
*cr
)
3823 struct bnx_softc
*sc
= ifp
->if_softc
;
3824 struct ifreq
*ifr
= (struct ifreq
*)data
;
3825 int mask
, error
= 0;
3827 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
3831 if ((!BNX_IS_JUMBO_CAPABLE(sc
) && ifr
->ifr_mtu
> ETHERMTU
) ||
3832 (BNX_IS_JUMBO_CAPABLE(sc
) &&
3833 ifr
->ifr_mtu
> BNX_JUMBO_MTU
)) {
3835 } else if (ifp
->if_mtu
!= ifr
->ifr_mtu
) {
3836 ifp
->if_mtu
= ifr
->ifr_mtu
;
3837 if (ifp
->if_flags
& IFF_RUNNING
)
3842 if (ifp
->if_flags
& IFF_UP
) {
3843 if (ifp
->if_flags
& IFF_RUNNING
) {
3844 mask
= ifp
->if_flags
^ sc
->bnx_if_flags
;
3847 * If only the state of the PROMISC flag
3848 * changed, then just use the 'set promisc
3849 * mode' command instead of reinitializing
3850 * the entire NIC. Doing a full re-init
3851 * means reloading the firmware and waiting
3852 * for it to start up, which may take a
3853 * second or two. Similarly for ALLMULTI.
3855 if (mask
& IFF_PROMISC
)
3857 if (mask
& IFF_ALLMULTI
)
3862 } else if (ifp
->if_flags
& IFF_RUNNING
) {
3865 sc
->bnx_if_flags
= ifp
->if_flags
;
3869 if (ifp
->if_flags
& IFF_RUNNING
)
3874 if (sc
->bnx_flags
& BNX_FLAG_TBI
) {
3875 error
= ifmedia_ioctl(ifp
, ifr
,
3876 &sc
->bnx_ifmedia
, command
);
3878 struct mii_data
*mii
;
3880 mii
= device_get_softc(sc
->bnx_miibus
);
3881 error
= ifmedia_ioctl(ifp
, ifr
,
3882 &mii
->mii_media
, command
);
3886 mask
= ifr
->ifr_reqcap
^ ifp
->if_capenable
;
3887 if (mask
& IFCAP_HWCSUM
) {
3888 ifp
->if_capenable
^= (mask
& IFCAP_HWCSUM
);
3889 if (ifp
->if_capenable
& IFCAP_TXCSUM
)
3890 ifp
->if_hwassist
|= BNX_CSUM_FEATURES
;
3892 ifp
->if_hwassist
&= ~BNX_CSUM_FEATURES
;
3894 if (mask
& IFCAP_TSO
) {
3895 ifp
->if_capenable
^= (mask
& IFCAP_TSO
);
3896 if (ifp
->if_capenable
& IFCAP_TSO
)
3897 ifp
->if_hwassist
|= CSUM_TSO
;
3899 ifp
->if_hwassist
&= ~CSUM_TSO
;
3901 if (mask
& IFCAP_RSS
)
3902 ifp
->if_capenable
^= IFCAP_RSS
;
3905 error
= ether_ioctl(ifp
, command
, data
);
3912 bnx_watchdog(struct ifaltq_subque
*ifsq
)
3914 struct ifnet
*ifp
= ifsq_get_ifp(ifsq
);
3915 struct bnx_softc
*sc
= ifp
->if_softc
;
3918 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
3920 if_printf(ifp
, "watchdog timeout -- resetting\n");
3924 IFNET_STAT_INC(ifp
, oerrors
, 1);
3926 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
)
3927 ifsq_devstart_sched(sc
->bnx_tx_ring
[i
].bnx_ifsq
);
3931 * Stop the adapter and free any mbufs allocated to the
3935 bnx_stop(struct bnx_softc
*sc
)
3937 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3940 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
3942 callout_stop(&sc
->bnx_tick_timer
);
3944 /* Disable host interrupts. */
3945 bnx_disable_intr(sc
);
3948 * Tell firmware we're shutting down.
3950 bnx_sig_pre_reset(sc
, BNX_RESET_SHUTDOWN
);
3953 * Disable all of the receiver blocks
3955 bnx_stop_block(sc
, BGE_RX_MODE
, BGE_RXMODE_ENABLE
);
3956 bnx_stop_block(sc
, BGE_RBDI_MODE
, BGE_RBDIMODE_ENABLE
);
3957 bnx_stop_block(sc
, BGE_RXLP_MODE
, BGE_RXLPMODE_ENABLE
);
3958 bnx_stop_block(sc
, BGE_RDBDI_MODE
, BGE_RBDIMODE_ENABLE
);
3959 bnx_stop_block(sc
, BGE_RDC_MODE
, BGE_RDCMODE_ENABLE
);
3960 bnx_stop_block(sc
, BGE_RBDC_MODE
, BGE_RBDCMODE_ENABLE
);
3963 * Disable all of the transmit blocks
3965 bnx_stop_block(sc
, BGE_SRS_MODE
, BGE_SRSMODE_ENABLE
);
3966 bnx_stop_block(sc
, BGE_SBDI_MODE
, BGE_SBDIMODE_ENABLE
);
3967 bnx_stop_block(sc
, BGE_SDI_MODE
, BGE_SDIMODE_ENABLE
);
3968 bnx_stop_block(sc
, BGE_RDMA_MODE
, BGE_RDMAMODE_ENABLE
);
3969 bnx_stop_block(sc
, BGE_SDC_MODE
, BGE_SDCMODE_ENABLE
);
3970 bnx_stop_block(sc
, BGE_SBDC_MODE
, BGE_SBDCMODE_ENABLE
);
3973 * Shut down all of the memory managers and related
3976 bnx_stop_block(sc
, BGE_HCC_MODE
, BGE_HCCMODE_ENABLE
);
3977 bnx_stop_block(sc
, BGE_WDMA_MODE
, BGE_WDMAMODE_ENABLE
);
3978 CSR_WRITE_4(sc
, BGE_FTQ_RESET
, 0xFFFFFFFF);
3979 CSR_WRITE_4(sc
, BGE_FTQ_RESET
, 0);
3982 bnx_sig_post_reset(sc
, BNX_RESET_SHUTDOWN
);
3985 * Tell firmware we're shutting down.
3987 BNX_CLRBIT(sc
, BGE_MODE_CTL
, BGE_MODECTL_STACKUP
);
3989 /* Free the RX lists. */
3990 bnx_free_rx_ring_std(&sc
->bnx_rx_std_ring
);
3992 /* Free jumbo RX list. */
3993 if (BNX_IS_JUMBO_CAPABLE(sc
))
3994 bnx_free_rx_ring_jumbo(sc
);
3996 /* Free TX buffers. */
3997 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
3998 struct bnx_tx_ring
*txr
= &sc
->bnx_tx_ring
[i
];
4000 txr
->bnx_saved_status_tag
= 0;
4001 bnx_free_tx_ring(txr
);
4004 /* Clear saved status tag */
4005 for (i
= 0; i
< sc
->bnx_rx_retcnt
; ++i
)
4006 sc
->bnx_rx_ret_ring
[i
].bnx_saved_status_tag
= 0;
4009 sc
->bnx_coal_chg
= 0;
4011 ifp
->if_flags
&= ~IFF_RUNNING
;
4012 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
4013 struct bnx_tx_ring
*txr
= &sc
->bnx_tx_ring
[i
];
4015 ifsq_clr_oactive(txr
->bnx_ifsq
);
4016 ifsq_watchdog_stop(&txr
->bnx_tx_watchdog
);
4021 * Stop all chip I/O so that the kernel's probe routines don't
4022 * get confused by errant DMAs when rebooting.
4025 bnx_shutdown(device_t dev
)
4027 struct bnx_softc
*sc
= device_get_softc(dev
);
4028 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
4030 ifnet_serialize_all(ifp
);
4032 ifnet_deserialize_all(ifp
);
4036 bnx_suspend(device_t dev
)
4038 struct bnx_softc
*sc
= device_get_softc(dev
);
4039 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
4041 ifnet_serialize_all(ifp
);
4043 ifnet_deserialize_all(ifp
);
4049 bnx_resume(device_t dev
)
4051 struct bnx_softc
*sc
= device_get_softc(dev
);
4052 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
4054 ifnet_serialize_all(ifp
);
4056 if (ifp
->if_flags
& IFF_UP
) {
4060 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
)
4061 ifsq_devstart_sched(sc
->bnx_tx_ring
[i
].bnx_ifsq
);
4064 ifnet_deserialize_all(ifp
);
4070 bnx_setpromisc(struct bnx_softc
*sc
)
4072 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
4074 if (ifp
->if_flags
& IFF_PROMISC
)
4075 BNX_SETBIT(sc
, BGE_RX_MODE
, BGE_RXMODE_RX_PROMISC
);
4077 BNX_CLRBIT(sc
, BGE_RX_MODE
, BGE_RXMODE_RX_PROMISC
);
4081 bnx_dma_free(struct bnx_softc
*sc
)
4083 struct bnx_rx_std_ring
*std
= &sc
->bnx_rx_std_ring
;
4086 /* Destroy RX return rings */
4087 if (sc
->bnx_rx_ret_ring
!= NULL
) {
4088 for (i
= 0; i
< sc
->bnx_rx_retcnt
; ++i
)
4089 bnx_destroy_rx_ret_ring(&sc
->bnx_rx_ret_ring
[i
]);
4090 kfree(sc
->bnx_rx_ret_ring
, M_DEVBUF
);
4093 /* Destroy RX mbuf DMA stuffs. */
4094 if (std
->bnx_rx_mtag
!= NULL
) {
4095 for (i
= 0; i
< BGE_STD_RX_RING_CNT
; i
++) {
4096 KKASSERT(std
->bnx_rx_std_buf
[i
].bnx_rx_mbuf
== NULL
);
4097 bus_dmamap_destroy(std
->bnx_rx_mtag
,
4098 std
->bnx_rx_std_buf
[i
].bnx_rx_dmamap
);
4100 bus_dma_tag_destroy(std
->bnx_rx_mtag
);
4103 /* Destroy standard RX ring */
4104 bnx_dma_block_free(std
->bnx_rx_std_ring_tag
,
4105 std
->bnx_rx_std_ring_map
, std
->bnx_rx_std_ring
);
4107 /* Destroy TX rings */
4108 if (sc
->bnx_tx_ring
!= NULL
) {
4109 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
)
4110 bnx_destroy_tx_ring(&sc
->bnx_tx_ring
[i
]);
4111 kfree(sc
->bnx_tx_ring
, M_DEVBUF
);
4114 if (BNX_IS_JUMBO_CAPABLE(sc
))
4115 bnx_free_jumbo_mem(sc
);
4117 /* Destroy status blocks */
4118 for (i
= 0; i
< sc
->bnx_intr_cnt
; ++i
) {
4119 struct bnx_intr_data
*intr
= &sc
->bnx_intr_data
[i
];
4121 bnx_dma_block_free(intr
->bnx_status_tag
,
4122 intr
->bnx_status_map
, intr
->bnx_status_block
);
4125 /* Destroy the parent tag */
4126 if (sc
->bnx_cdata
.bnx_parent_tag
!= NULL
)
4127 bus_dma_tag_destroy(sc
->bnx_cdata
.bnx_parent_tag
);
4131 bnx_dma_alloc(device_t dev
)
4133 struct bnx_softc
*sc
= device_get_softc(dev
);
4134 struct bnx_rx_std_ring
*std
= &sc
->bnx_rx_std_ring
;
4138 * Allocate the parent bus DMA tag appropriate for PCI.
4140 * All of the NetExtreme/NetLink controllers have 4GB boundary
4142 * Whenever an address crosses a multiple of the 4GB boundary
4143 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
4144 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
4145 * state machine will lockup and cause the device to hang.
4147 error
= bus_dma_tag_create(NULL
, 1, BGE_DMA_BOUNDARY_4G
,
4148 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
, NULL
, NULL
,
4149 BUS_SPACE_MAXSIZE_32BIT
, 0, BUS_SPACE_MAXSIZE_32BIT
,
4150 0, &sc
->bnx_cdata
.bnx_parent_tag
);
4152 device_printf(dev
, "could not create parent DMA tag\n");
4157 * Create DMA stuffs for status blocks.
4159 for (i
= 0; i
< sc
->bnx_intr_cnt
; ++i
) {
4160 struct bnx_intr_data
*intr
= &sc
->bnx_intr_data
[i
];
4162 error
= bnx_dma_block_alloc(sc
,
4163 __VM_CACHELINE_ALIGN(BGE_STATUS_BLK_SZ
),
4164 &intr
->bnx_status_tag
, &intr
->bnx_status_map
,
4165 (void *)&intr
->bnx_status_block
,
4166 &intr
->bnx_status_block_paddr
);
4169 "could not create %dth status block\n", i
);
4173 sc
->bnx_hw_status
= &sc
->bnx_intr_data
[0].bnx_status_block
->bge_status
;
4174 if (sc
->bnx_flags
& BNX_FLAG_STATUS_HASTAG
) {
4175 sc
->bnx_hw_status_tag
=
4176 &sc
->bnx_intr_data
[0].bnx_status_block
->bge_status_tag
;
4180 * Create DMA tag and maps for RX mbufs.
4183 lwkt_serialize_init(&std
->bnx_rx_std_serialize
);
4184 error
= bus_dma_tag_create(sc
->bnx_cdata
.bnx_parent_tag
, 1, 0,
4185 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
4186 NULL
, NULL
, MCLBYTES
, 1, MCLBYTES
,
4187 BUS_DMA_ALLOCNOW
| BUS_DMA_WAITOK
, &std
->bnx_rx_mtag
);
4189 device_printf(dev
, "could not create RX mbuf DMA tag\n");
4193 for (i
= 0; i
< BGE_STD_RX_RING_CNT
; ++i
) {
4194 error
= bus_dmamap_create(std
->bnx_rx_mtag
, BUS_DMA_WAITOK
,
4195 &std
->bnx_rx_std_buf
[i
].bnx_rx_dmamap
);
4199 for (j
= 0; j
< i
; ++j
) {
4200 bus_dmamap_destroy(std
->bnx_rx_mtag
,
4201 std
->bnx_rx_std_buf
[j
].bnx_rx_dmamap
);
4203 bus_dma_tag_destroy(std
->bnx_rx_mtag
);
4204 std
->bnx_rx_mtag
= NULL
;
4207 "could not create %dth RX mbuf DMA map\n", i
);
4213 * Create DMA stuffs for standard RX ring.
4215 error
= bnx_dma_block_alloc(sc
, BGE_STD_RX_RING_SZ
,
4216 &std
->bnx_rx_std_ring_tag
,
4217 &std
->bnx_rx_std_ring_map
,
4218 (void *)&std
->bnx_rx_std_ring
,
4219 &std
->bnx_rx_std_ring_paddr
);
4221 device_printf(dev
, "could not create std RX ring\n");
4226 * Create RX return rings
4228 mbx
= BGE_MBX_RX_CONS0_LO
;
4229 sc
->bnx_rx_ret_ring
= kmalloc_cachealign(
4230 sizeof(struct bnx_rx_ret_ring
) * sc
->bnx_rx_retcnt
, M_DEVBUF
,
4232 for (i
= 0; i
< sc
->bnx_rx_retcnt
; ++i
) {
4233 struct bnx_rx_ret_ring
*ret
= &sc
->bnx_rx_ret_ring
[i
];
4234 struct bnx_intr_data
*intr
;
4238 ret
->bnx_rx_mbx
= mbx
;
4239 ret
->bnx_rx_cntmax
= (BGE_STD_RX_RING_CNT
/ 4) /
4241 ret
->bnx_rx_mask
= 1 << i
;
4243 if (!BNX_RSS_ENABLED(sc
)) {
4244 intr
= &sc
->bnx_intr_data
[0];
4246 KKASSERT(i
+ 1 < sc
->bnx_intr_cnt
);
4247 intr
= &sc
->bnx_intr_data
[i
+ 1];
4251 ret
->bnx_rx_considx
=
4252 &intr
->bnx_status_block
->bge_idx
[0].bge_rx_prod_idx
;
4253 } else if (i
== 1) {
4254 ret
->bnx_rx_considx
=
4255 &intr
->bnx_status_block
->bge_rx_jumbo_cons_idx
;
4256 } else if (i
== 2) {
4257 ret
->bnx_rx_considx
=
4258 &intr
->bnx_status_block
->bge_rsvd1
;
4259 } else if (i
== 3) {
4260 ret
->bnx_rx_considx
=
4261 &intr
->bnx_status_block
->bge_rx_mini_cons_idx
;
4263 panic("unknown RX return ring %d\n", i
);
4265 ret
->bnx_hw_status_tag
=
4266 &intr
->bnx_status_block
->bge_status_tag
;
4268 error
= bnx_create_rx_ret_ring(ret
);
4271 "could not create %dth RX ret ring\n", i
);
4280 sc
->bnx_tx_ring
= kmalloc_cachealign(
4281 sizeof(struct bnx_tx_ring
) * sc
->bnx_tx_ringcnt
, M_DEVBUF
,
4283 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
4284 struct bnx_tx_ring
*txr
= &sc
->bnx_tx_ring
[i
];
4285 struct bnx_intr_data
*intr
;
4288 txr
->bnx_tx_mbx
= bnx_tx_mailbox
[i
];
4290 if (sc
->bnx_tx_ringcnt
== 1) {
4291 intr
= &sc
->bnx_intr_data
[0];
4293 KKASSERT(i
+ 1 < sc
->bnx_intr_cnt
);
4294 intr
= &sc
->bnx_intr_data
[i
+ 1];
4297 if ((sc
->bnx_flags
& BNX_FLAG_RXTX_BUNDLE
) == 0) {
4298 txr
->bnx_hw_status_tag
=
4299 &intr
->bnx_status_block
->bge_status_tag
;
4301 txr
->bnx_tx_considx
=
4302 &intr
->bnx_status_block
->bge_idx
[0].bge_tx_cons_idx
;
4304 error
= bnx_create_tx_ring(txr
);
4307 "could not create %dth TX ring\n", i
);
4313 * Create jumbo buffer pool.
4315 if (BNX_IS_JUMBO_CAPABLE(sc
)) {
4316 error
= bnx_alloc_jumbo_mem(sc
);
4319 "could not create jumbo buffer pool\n");
4328 bnx_dma_block_alloc(struct bnx_softc
*sc
, bus_size_t size
, bus_dma_tag_t
*tag
,
4329 bus_dmamap_t
*map
, void **addr
, bus_addr_t
*paddr
)
4334 error
= bus_dmamem_coherent(sc
->bnx_cdata
.bnx_parent_tag
, PAGE_SIZE
, 0,
4335 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
4336 size
, BUS_DMA_WAITOK
| BUS_DMA_ZERO
, &dmem
);
4340 *tag
= dmem
.dmem_tag
;
4341 *map
= dmem
.dmem_map
;
4342 *addr
= dmem
.dmem_addr
;
4343 *paddr
= dmem
.dmem_busaddr
;
4349 bnx_dma_block_free(bus_dma_tag_t tag
, bus_dmamap_t map
, void *addr
)
4352 bus_dmamap_unload(tag
, map
);
4353 bus_dmamem_free(tag
, addr
, map
);
4354 bus_dma_tag_destroy(tag
);
4359 bnx_tbi_link_upd(struct bnx_softc
*sc
, uint32_t status
)
4361 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
4363 #define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
4366 * Sometimes PCS encoding errors are detected in
4367 * TBI mode (on fiber NICs), and for some reason
4368 * the chip will signal them as link changes.
4369 * If we get a link change event, but the 'PCS
4370 * encoding error' bit in the MAC status register
4371 * is set, don't bother doing a link check.
4372 * This avoids spurious "gigabit link up" messages
4373 * that sometimes appear on fiber NICs during
4374 * periods of heavy traffic.
4376 if (status
& BGE_MACSTAT_TBI_PCS_SYNCHED
) {
4377 if (!sc
->bnx_link
) {
4379 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5704
) {
4380 BNX_CLRBIT(sc
, BGE_MAC_MODE
,
4381 BGE_MACMODE_TBI_SEND_CFGS
);
4384 CSR_WRITE_4(sc
, BGE_MAC_STS
, 0xFFFFFFFF);
4387 if_printf(ifp
, "link UP\n");
4389 ifp
->if_link_state
= LINK_STATE_UP
;
4390 if_link_state_change(ifp
);
4392 } else if ((status
& PCS_ENCODE_ERR
) != PCS_ENCODE_ERR
) {
4397 if_printf(ifp
, "link DOWN\n");
4399 ifp
->if_link_state
= LINK_STATE_DOWN
;
4400 if_link_state_change(ifp
);
4404 #undef PCS_ENCODE_ERR
4406 /* Clear the attention. */
4407 CSR_WRITE_4(sc
, BGE_MAC_STS
, BGE_MACSTAT_SYNC_CHANGED
|
4408 BGE_MACSTAT_CFG_CHANGED
| BGE_MACSTAT_MI_COMPLETE
|
4409 BGE_MACSTAT_LINK_CHANGED
);
4413 bnx_copper_link_upd(struct bnx_softc
*sc
, uint32_t status __unused
)
4415 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
4416 struct mii_data
*mii
= device_get_softc(sc
->bnx_miibus
);
4419 bnx_miibus_statchg(sc
->bnx_dev
);
4423 if_printf(ifp
, "link UP\n");
4425 if_printf(ifp
, "link DOWN\n");
4428 /* Clear the attention. */
4429 CSR_WRITE_4(sc
, BGE_MAC_STS
, BGE_MACSTAT_SYNC_CHANGED
|
4430 BGE_MACSTAT_CFG_CHANGED
| BGE_MACSTAT_MI_COMPLETE
|
4431 BGE_MACSTAT_LINK_CHANGED
);
4435 bnx_autopoll_link_upd(struct bnx_softc
*sc
, uint32_t status __unused
)
4437 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
4438 struct mii_data
*mii
= device_get_softc(sc
->bnx_miibus
);
4442 if (!sc
->bnx_link
&&
4443 (mii
->mii_media_status
& IFM_ACTIVE
) &&
4444 IFM_SUBTYPE(mii
->mii_media_active
) != IFM_NONE
) {
4447 if_printf(ifp
, "link UP\n");
4448 } else if (sc
->bnx_link
&&
4449 (!(mii
->mii_media_status
& IFM_ACTIVE
) ||
4450 IFM_SUBTYPE(mii
->mii_media_active
) == IFM_NONE
)) {
4453 if_printf(ifp
, "link DOWN\n");
4456 /* Clear the attention. */
4457 CSR_WRITE_4(sc
, BGE_MAC_STS
, BGE_MACSTAT_SYNC_CHANGED
|
4458 BGE_MACSTAT_CFG_CHANGED
| BGE_MACSTAT_MI_COMPLETE
|
4459 BGE_MACSTAT_LINK_CHANGED
);
4463 bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS
)
4465 struct bnx_softc
*sc
= arg1
;
4467 return bnx_sysctl_coal_chg(oidp
, arg1
, arg2
, req
,
4468 &sc
->bnx_rx_coal_ticks
,
4469 BNX_RX_COAL_TICKS_MIN
, BNX_RX_COAL_TICKS_MAX
,
4470 BNX_RX_COAL_TICKS_CHG
);
4474 bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS
)
4476 struct bnx_softc
*sc
= arg1
;
4478 return bnx_sysctl_coal_chg(oidp
, arg1
, arg2
, req
,
4479 &sc
->bnx_tx_coal_ticks
,
4480 BNX_TX_COAL_TICKS_MIN
, BNX_TX_COAL_TICKS_MAX
,
4481 BNX_TX_COAL_TICKS_CHG
);
4485 bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS
)
4487 struct bnx_softc
*sc
= arg1
;
4489 return bnx_sysctl_coal_chg(oidp
, arg1
, arg2
, req
,
4490 &sc
->bnx_rx_coal_bds
,
4491 BNX_RX_COAL_BDS_MIN
, BNX_RX_COAL_BDS_MAX
,
4492 BNX_RX_COAL_BDS_CHG
);
4496 bnx_sysctl_rx_coal_bds_poll(SYSCTL_HANDLER_ARGS
)
4498 struct bnx_softc
*sc
= arg1
;
4500 return bnx_sysctl_coal_chg(oidp
, arg1
, arg2
, req
,
4501 &sc
->bnx_rx_coal_bds_poll
,
4502 BNX_RX_COAL_BDS_MIN
, BNX_RX_COAL_BDS_MAX
,
4503 BNX_RX_COAL_BDS_CHG
);
4507 bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS
)
4509 struct bnx_softc
*sc
= arg1
;
4511 return bnx_sysctl_coal_chg(oidp
, arg1
, arg2
, req
,
4512 &sc
->bnx_tx_coal_bds
,
4513 BNX_TX_COAL_BDS_MIN
, BNX_TX_COAL_BDS_MAX
,
4514 BNX_TX_COAL_BDS_CHG
);
4518 bnx_sysctl_tx_coal_bds_poll(SYSCTL_HANDLER_ARGS
)
4520 struct bnx_softc
*sc
= arg1
;
4522 return bnx_sysctl_coal_chg(oidp
, arg1
, arg2
, req
,
4523 &sc
->bnx_tx_coal_bds_poll
,
4524 BNX_TX_COAL_BDS_MIN
, BNX_TX_COAL_BDS_MAX
,
4525 BNX_TX_COAL_BDS_CHG
);
4529 bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS
)
4531 struct bnx_softc
*sc
= arg1
;
4533 return bnx_sysctl_coal_chg(oidp
, arg1
, arg2
, req
,
4534 &sc
->bnx_rx_coal_bds_int
,
4535 BNX_RX_COAL_BDS_MIN
, BNX_RX_COAL_BDS_MAX
,
4536 BNX_RX_COAL_BDS_INT_CHG
);
4540 bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS
)
4542 struct bnx_softc
*sc
= arg1
;
4544 return bnx_sysctl_coal_chg(oidp
, arg1
, arg2
, req
,
4545 &sc
->bnx_tx_coal_bds_int
,
4546 BNX_TX_COAL_BDS_MIN
, BNX_TX_COAL_BDS_MAX
,
4547 BNX_TX_COAL_BDS_INT_CHG
);
4551 bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS
, uint32_t *coal
,
4552 int coal_min
, int coal_max
, uint32_t coal_chg_mask
)
4554 struct bnx_softc
*sc
= arg1
;
4555 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
4558 ifnet_serialize_all(ifp
);
4561 error
= sysctl_handle_int(oidp
, &v
, 0, req
);
4562 if (!error
&& req
->newptr
!= NULL
) {
4563 if (v
< coal_min
|| v
> coal_max
) {
4567 sc
->bnx_coal_chg
|= coal_chg_mask
;
4569 /* Commit changes */
4570 bnx_coal_change(sc
);
4574 ifnet_deserialize_all(ifp
);
4579 bnx_coal_change(struct bnx_softc
*sc
)
4581 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
4584 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
4586 if (sc
->bnx_coal_chg
& BNX_RX_COAL_TICKS_CHG
) {
4587 if (sc
->bnx_rx_retcnt
== 1) {
4588 CSR_WRITE_4(sc
, BGE_HCC_RX_COAL_TICKS
,
4589 sc
->bnx_rx_coal_ticks
);
4592 CSR_WRITE_4(sc
, BGE_HCC_RX_COAL_TICKS
, 0);
4593 for (i
= 0; i
< sc
->bnx_rx_retcnt
; ++i
) {
4594 CSR_WRITE_4(sc
, BGE_VEC1_RX_COAL_TICKS
+
4595 (i
* BGE_VEC_COALSET_SIZE
),
4596 sc
->bnx_rx_coal_ticks
);
4599 for (; i
< BNX_INTR_MAX
- 1; ++i
) {
4600 CSR_WRITE_4(sc
, BGE_VEC1_RX_COAL_TICKS
+
4601 (i
* BGE_VEC_COALSET_SIZE
), 0);
4604 if_printf(ifp
, "rx_coal_ticks -> %u\n",
4605 sc
->bnx_rx_coal_ticks
);
4609 if (sc
->bnx_coal_chg
& BNX_TX_COAL_TICKS_CHG
) {
4610 if (sc
->bnx_tx_ringcnt
== 1) {
4611 CSR_WRITE_4(sc
, BGE_HCC_TX_COAL_TICKS
,
4612 sc
->bnx_tx_coal_ticks
);
4615 CSR_WRITE_4(sc
, BGE_HCC_TX_COAL_TICKS
, 0);
4616 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
4617 CSR_WRITE_4(sc
, BGE_VEC1_TX_COAL_TICKS
+
4618 (i
* BGE_VEC_COALSET_SIZE
),
4619 sc
->bnx_tx_coal_ticks
);
4622 for (; i
< BNX_INTR_MAX
- 1; ++i
) {
4623 CSR_WRITE_4(sc
, BGE_VEC1_TX_COAL_TICKS
+
4624 (i
* BGE_VEC_COALSET_SIZE
), 0);
4627 if_printf(ifp
, "tx_coal_ticks -> %u\n",
4628 sc
->bnx_tx_coal_ticks
);
4632 if (sc
->bnx_coal_chg
& BNX_RX_COAL_BDS_CHG
) {
4633 uint32_t rx_coal_bds
;
4635 if (ifp
->if_flags
& IFF_NPOLLING
)
4636 rx_coal_bds
= sc
->bnx_rx_coal_bds_poll
;
4638 rx_coal_bds
= sc
->bnx_rx_coal_bds
;
4640 if (sc
->bnx_rx_retcnt
== 1) {
4641 CSR_WRITE_4(sc
, BGE_HCC_RX_MAX_COAL_BDS
, rx_coal_bds
);
4644 CSR_WRITE_4(sc
, BGE_HCC_RX_MAX_COAL_BDS
, 0);
4645 for (i
= 0; i
< sc
->bnx_rx_retcnt
; ++i
) {
4646 CSR_WRITE_4(sc
, BGE_VEC1_RX_MAX_COAL_BDS
+
4647 (i
* BGE_VEC_COALSET_SIZE
), rx_coal_bds
);
4650 for (; i
< BNX_INTR_MAX
- 1; ++i
) {
4651 CSR_WRITE_4(sc
, BGE_VEC1_RX_MAX_COAL_BDS
+
4652 (i
* BGE_VEC_COALSET_SIZE
), 0);
4655 if_printf(ifp
, "%srx_coal_bds -> %u\n",
4656 (ifp
->if_flags
& IFF_NPOLLING
) ? "polling " : "",
4661 if (sc
->bnx_coal_chg
& BNX_TX_COAL_BDS_CHG
) {
4662 uint32_t tx_coal_bds
;
4664 if (ifp
->if_flags
& IFF_NPOLLING
)
4665 tx_coal_bds
= sc
->bnx_tx_coal_bds_poll
;
4667 tx_coal_bds
= sc
->bnx_tx_coal_bds
;
4669 if (sc
->bnx_tx_ringcnt
== 1) {
4670 CSR_WRITE_4(sc
, BGE_HCC_TX_MAX_COAL_BDS
, tx_coal_bds
);
4673 CSR_WRITE_4(sc
, BGE_HCC_TX_MAX_COAL_BDS
, 0);
4674 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
4675 CSR_WRITE_4(sc
, BGE_VEC1_TX_MAX_COAL_BDS
+
4676 (i
* BGE_VEC_COALSET_SIZE
), tx_coal_bds
);
4679 for (; i
< BNX_INTR_MAX
- 1; ++i
) {
4680 CSR_WRITE_4(sc
, BGE_VEC1_TX_MAX_COAL_BDS
+
4681 (i
* BGE_VEC_COALSET_SIZE
), 0);
4684 if_printf(ifp
, "%stx_coal_bds -> %u\n",
4685 (ifp
->if_flags
& IFF_NPOLLING
) ? "polling " : "",
4690 if (sc
->bnx_coal_chg
& BNX_RX_COAL_BDS_INT_CHG
) {
4691 if (sc
->bnx_rx_retcnt
== 1) {
4692 CSR_WRITE_4(sc
, BGE_HCC_RX_MAX_COAL_BDS_INT
,
4693 sc
->bnx_rx_coal_bds_int
);
4696 CSR_WRITE_4(sc
, BGE_HCC_RX_MAX_COAL_BDS_INT
, 0);
4697 for (i
= 0; i
< sc
->bnx_rx_retcnt
; ++i
) {
4698 CSR_WRITE_4(sc
, BGE_VEC1_RX_MAX_COAL_BDS_INT
+
4699 (i
* BGE_VEC_COALSET_SIZE
),
4700 sc
->bnx_rx_coal_bds_int
);
4703 for (; i
< BNX_INTR_MAX
- 1; ++i
) {
4704 CSR_WRITE_4(sc
, BGE_VEC1_RX_MAX_COAL_BDS_INT
+
4705 (i
* BGE_VEC_COALSET_SIZE
), 0);
4708 if_printf(ifp
, "rx_coal_bds_int -> %u\n",
4709 sc
->bnx_rx_coal_bds_int
);
4713 if (sc
->bnx_coal_chg
& BNX_TX_COAL_BDS_INT_CHG
) {
4714 if (sc
->bnx_tx_ringcnt
== 1) {
4715 CSR_WRITE_4(sc
, BGE_HCC_TX_MAX_COAL_BDS_INT
,
4716 sc
->bnx_tx_coal_bds_int
);
4719 CSR_WRITE_4(sc
, BGE_HCC_TX_MAX_COAL_BDS_INT
, 0);
4720 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
4721 CSR_WRITE_4(sc
, BGE_VEC1_TX_MAX_COAL_BDS_INT
+
4722 (i
* BGE_VEC_COALSET_SIZE
),
4723 sc
->bnx_tx_coal_bds_int
);
4726 for (; i
< BNX_INTR_MAX
- 1; ++i
) {
4727 CSR_WRITE_4(sc
, BGE_VEC1_TX_MAX_COAL_BDS_INT
+
4728 (i
* BGE_VEC_COALSET_SIZE
), 0);
4731 if_printf(ifp
, "tx_coal_bds_int -> %u\n",
4732 sc
->bnx_tx_coal_bds_int
);
4736 sc
->bnx_coal_chg
= 0;
4740 bnx_check_intr_rxtx(void *xintr
)
4742 struct bnx_intr_data
*intr
= xintr
;
4743 struct bnx_rx_ret_ring
*ret
;
4744 struct bnx_tx_ring
*txr
;
4747 lwkt_serialize_enter(intr
->bnx_intr_serialize
);
4749 KKASSERT(mycpuid
== intr
->bnx_intr_cpuid
);
4751 ifp
= &intr
->bnx_sc
->arpcom
.ac_if
;
4752 if ((ifp
->if_flags
& (IFF_RUNNING
| IFF_NPOLLING
)) != IFF_RUNNING
) {
4753 lwkt_serialize_exit(intr
->bnx_intr_serialize
);
4757 txr
= intr
->bnx_txr
;
4758 ret
= intr
->bnx_ret
;
4760 if (*ret
->bnx_rx_considx
!= ret
->bnx_rx_saved_considx
||
4761 *txr
->bnx_tx_considx
!= txr
->bnx_tx_saved_considx
) {
4762 if (intr
->bnx_rx_check_considx
== ret
->bnx_rx_saved_considx
&&
4763 intr
->bnx_tx_check_considx
== txr
->bnx_tx_saved_considx
) {
4764 if (!intr
->bnx_intr_maylose
) {
4765 intr
->bnx_intr_maylose
= TRUE
;
4769 if_printf(ifp
, "lost interrupt\n");
4770 intr
->bnx_intr_func(intr
->bnx_intr_arg
);
4773 intr
->bnx_intr_maylose
= FALSE
;
4774 intr
->bnx_rx_check_considx
= ret
->bnx_rx_saved_considx
;
4775 intr
->bnx_tx_check_considx
= txr
->bnx_tx_saved_considx
;
4778 callout_reset(&intr
->bnx_intr_timer
, BNX_INTR_CKINTVL
,
4779 intr
->bnx_intr_check
, intr
);
4780 lwkt_serialize_exit(intr
->bnx_intr_serialize
);
4784 bnx_check_intr_tx(void *xintr
)
4786 struct bnx_intr_data
*intr
= xintr
;
4787 struct bnx_tx_ring
*txr
;
4790 lwkt_serialize_enter(intr
->bnx_intr_serialize
);
4792 KKASSERT(mycpuid
== intr
->bnx_intr_cpuid
);
4794 ifp
= &intr
->bnx_sc
->arpcom
.ac_if
;
4795 if ((ifp
->if_flags
& (IFF_RUNNING
| IFF_NPOLLING
)) != IFF_RUNNING
) {
4796 lwkt_serialize_exit(intr
->bnx_intr_serialize
);
4800 txr
= intr
->bnx_txr
;
4802 if (*txr
->bnx_tx_considx
!= txr
->bnx_tx_saved_considx
) {
4803 if (intr
->bnx_tx_check_considx
== txr
->bnx_tx_saved_considx
) {
4804 if (!intr
->bnx_intr_maylose
) {
4805 intr
->bnx_intr_maylose
= TRUE
;
4809 if_printf(ifp
, "lost interrupt\n");
4810 intr
->bnx_intr_func(intr
->bnx_intr_arg
);
4813 intr
->bnx_intr_maylose
= FALSE
;
4814 intr
->bnx_tx_check_considx
= txr
->bnx_tx_saved_considx
;
4817 callout_reset(&intr
->bnx_intr_timer
, BNX_INTR_CKINTVL
,
4818 intr
->bnx_intr_check
, intr
);
4819 lwkt_serialize_exit(intr
->bnx_intr_serialize
);
4823 bnx_check_intr_rx(void *xintr
)
4825 struct bnx_intr_data
*intr
= xintr
;
4826 struct bnx_rx_ret_ring
*ret
;
4829 lwkt_serialize_enter(intr
->bnx_intr_serialize
);
4831 KKASSERT(mycpuid
== intr
->bnx_intr_cpuid
);
4833 ifp
= &intr
->bnx_sc
->arpcom
.ac_if
;
4834 if ((ifp
->if_flags
& (IFF_RUNNING
| IFF_NPOLLING
)) != IFF_RUNNING
) {
4835 lwkt_serialize_exit(intr
->bnx_intr_serialize
);
4839 ret
= intr
->bnx_ret
;
4841 if (*ret
->bnx_rx_considx
!= ret
->bnx_rx_saved_considx
) {
4842 if (intr
->bnx_rx_check_considx
== ret
->bnx_rx_saved_considx
) {
4843 if (!intr
->bnx_intr_maylose
) {
4844 intr
->bnx_intr_maylose
= TRUE
;
4848 if_printf(ifp
, "lost interrupt\n");
4849 intr
->bnx_intr_func(intr
->bnx_intr_arg
);
4852 intr
->bnx_intr_maylose
= FALSE
;
4853 intr
->bnx_rx_check_considx
= ret
->bnx_rx_saved_considx
;
4856 callout_reset(&intr
->bnx_intr_timer
, BNX_INTR_CKINTVL
,
4857 intr
->bnx_intr_check
, intr
);
4858 lwkt_serialize_exit(intr
->bnx_intr_serialize
);
4862 bnx_enable_intr(struct bnx_softc
*sc
)
4864 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
4867 for (i
= 0; i
< sc
->bnx_intr_cnt
; ++i
) {
4868 lwkt_serialize_handler_enable(
4869 sc
->bnx_intr_data
[i
].bnx_intr_serialize
);
4875 for (i
= 0; i
< sc
->bnx_intr_cnt
; ++i
) {
4876 struct bnx_intr_data
*intr
= &sc
->bnx_intr_data
[i
];
4878 bnx_writembx(sc
, intr
->bnx_intr_mbx
,
4879 (*intr
->bnx_saved_status_tag
) << 24);
4880 /* XXX Linux driver */
4881 bnx_writembx(sc
, intr
->bnx_intr_mbx
,
4882 (*intr
->bnx_saved_status_tag
) << 24);
4886 * Unmask the interrupt when we stop polling.
4888 PCI_CLRBIT(sc
->bnx_dev
, BGE_PCI_MISC_CTL
,
4889 BGE_PCIMISCCTL_MASK_PCI_INTR
, 4);
4892 * Trigger another interrupt, since above writing
4893 * to interrupt mailbox0 may acknowledge pending
4896 BNX_SETBIT(sc
, BGE_MISC_LOCAL_CTL
, BGE_MLC_INTR_SET
);
4898 if (sc
->bnx_flags
& BNX_FLAG_STATUSTAG_BUG
) {
4900 if_printf(ifp
, "status tag bug workaround\n");
4902 for (i
= 0; i
< sc
->bnx_intr_cnt
; ++i
) {
4903 struct bnx_intr_data
*intr
= &sc
->bnx_intr_data
[i
];
4905 if (intr
->bnx_intr_check
== NULL
)
4907 intr
->bnx_intr_maylose
= FALSE
;
4908 intr
->bnx_rx_check_considx
= 0;
4909 intr
->bnx_tx_check_considx
= 0;
4910 callout_reset_bycpu(&intr
->bnx_intr_timer
,
4911 BNX_INTR_CKINTVL
, intr
->bnx_intr_check
, intr
,
4912 intr
->bnx_intr_cpuid
);
4918 bnx_disable_intr(struct bnx_softc
*sc
)
4922 for (i
= 0; i
< sc
->bnx_intr_cnt
; ++i
) {
4923 struct bnx_intr_data
*intr
= &sc
->bnx_intr_data
[i
];
4925 callout_stop(&intr
->bnx_intr_timer
);
4926 intr
->bnx_intr_maylose
= FALSE
;
4927 intr
->bnx_rx_check_considx
= 0;
4928 intr
->bnx_tx_check_considx
= 0;
4932 * Mask the interrupt when we start polling.
4934 PCI_SETBIT(sc
->bnx_dev
, BGE_PCI_MISC_CTL
,
4935 BGE_PCIMISCCTL_MASK_PCI_INTR
, 4);
4938 * Acknowledge possible asserted interrupt.
4940 for (i
= 0; i
< BNX_INTR_MAX
; ++i
)
4941 bnx_writembx(sc
, sc
->bnx_intr_data
[i
].bnx_intr_mbx
, 1);
4943 for (i
= 0; i
< sc
->bnx_intr_cnt
; ++i
) {
4944 lwkt_serialize_handler_disable(
4945 sc
->bnx_intr_data
[i
].bnx_intr_serialize
);
4950 bnx_get_eaddr_mem(struct bnx_softc
*sc
, uint8_t ether_addr
[])
4955 mac_addr
= bnx_readmem_ind(sc
, 0x0c14);
4956 if ((mac_addr
>> 16) == 0x484b) {
4957 ether_addr
[0] = (uint8_t)(mac_addr
>> 8);
4958 ether_addr
[1] = (uint8_t)mac_addr
;
4959 mac_addr
= bnx_readmem_ind(sc
, 0x0c18);
4960 ether_addr
[2] = (uint8_t)(mac_addr
>> 24);
4961 ether_addr
[3] = (uint8_t)(mac_addr
>> 16);
4962 ether_addr
[4] = (uint8_t)(mac_addr
>> 8);
4963 ether_addr
[5] = (uint8_t)mac_addr
;
4970 bnx_get_eaddr_nvram(struct bnx_softc
*sc
, uint8_t ether_addr
[])
4972 int mac_offset
= BGE_EE_MAC_OFFSET
;
4974 if (BNX_IS_5717_PLUS(sc
)) {
4977 f
= pci_get_function(sc
->bnx_dev
);
4979 mac_offset
= BGE_EE_MAC_OFFSET_5717
;
4981 mac_offset
+= BGE_EE_MAC_OFFSET_5717_OFF
;
4984 return bnx_read_nvram(sc
, ether_addr
, mac_offset
+ 2, ETHER_ADDR_LEN
);
4988 bnx_get_eaddr_eeprom(struct bnx_softc
*sc
, uint8_t ether_addr
[])
4990 if (sc
->bnx_flags
& BNX_FLAG_NO_EEPROM
)
4993 return bnx_read_eeprom(sc
, ether_addr
, BGE_EE_MAC_OFFSET
+ 2,
4998 bnx_get_eaddr(struct bnx_softc
*sc
, uint8_t eaddr
[])
5000 static const bnx_eaddr_fcn_t bnx_eaddr_funcs
[] = {
5001 /* NOTE: Order is critical */
5003 bnx_get_eaddr_nvram
,
5004 bnx_get_eaddr_eeprom
,
5007 const bnx_eaddr_fcn_t
*func
;
5009 for (func
= bnx_eaddr_funcs
; *func
!= NULL
; ++func
) {
5010 if ((*func
)(sc
, eaddr
) == 0)
5013 return (*func
== NULL
? ENXIO
: 0);
5017 * NOTE: 'm' is not freed upon failure
5020 bnx_defrag_shortdma(struct mbuf
*m
)
5026 * If device receive two back-to-back send BDs with less than
5027 * or equal to 8 total bytes then the device may hang. The two
5028 * back-to-back send BDs must in the same frame for this failure
5029 * to occur. Scan mbuf chains and see whether two back-to-back
5030 * send BDs are there. If this is the case, allocate new mbuf
5031 * and copy the frame to workaround the silicon bug.
5033 for (n
= m
, found
= 0; n
!= NULL
; n
= n
->m_next
) {
5044 n
= m_defrag(m
, M_NOWAIT
);
5051 bnx_stop_block(struct bnx_softc
*sc
, bus_size_t reg
, uint32_t bit
)
5055 BNX_CLRBIT(sc
, reg
, bit
);
5056 for (i
= 0; i
< BNX_TIMEOUT
; i
++) {
5057 if ((CSR_READ_4(sc
, reg
) & bit
) == 0)
5064 bnx_link_poll(struct bnx_softc
*sc
)
5068 status
= CSR_READ_4(sc
, BGE_MAC_STS
);
5069 if ((status
& sc
->bnx_link_chg
) || sc
->bnx_link_evt
) {
5070 sc
->bnx_link_evt
= 0;
5071 sc
->bnx_link_upd(sc
, status
);
5076 bnx_enable_msi(struct bnx_softc
*sc
, boolean_t is_msix
)
5080 msi_mode
= CSR_READ_4(sc
, BGE_MSI_MODE
);
5081 msi_mode
|= BGE_MSIMODE_ENABLE
;
5084 * 5718-PG105-R says that "one shot" mode does not work
5085 * if MSI is used, however, it obviously works.
5087 msi_mode
&= ~BGE_MSIMODE_ONESHOT_DISABLE
;
5089 msi_mode
|= BGE_MSIMODE_MSIX_MULTIMODE
;
5091 msi_mode
&= ~BGE_MSIMODE_MSIX_MULTIMODE
;
5092 CSR_WRITE_4(sc
, BGE_MSI_MODE
, msi_mode
);
5096 bnx_dma_swap_options(struct bnx_softc
*sc
)
5098 uint32_t dma_options
;
5100 dma_options
= BGE_MODECTL_WORDSWAP_NONFRAME
|
5101 BGE_MODECTL_BYTESWAP_DATA
| BGE_MODECTL_WORDSWAP_DATA
;
5102 #if BYTE_ORDER == BIG_ENDIAN
5103 dma_options
|= BGE_MODECTL_BYTESWAP_NONFRAME
;
5109 bnx_setup_tso(struct bnx_tx_ring
*txr
, struct mbuf
**mp
,
5110 uint16_t *mss0
, uint16_t *flags0
)
5115 int thoff
, iphlen
, hoff
, hlen
;
5116 uint16_t flags
, mss
;
5119 KASSERT(M_WRITABLE(m
), ("TSO mbuf not writable"));
5121 hoff
= m
->m_pkthdr
.csum_lhlen
;
5122 iphlen
= m
->m_pkthdr
.csum_iphlen
;
5123 thoff
= m
->m_pkthdr
.csum_thlen
;
5125 KASSERT(hoff
> 0, ("invalid ether header len"));
5126 KASSERT(iphlen
> 0, ("invalid ip header len"));
5127 KASSERT(thoff
> 0, ("invalid tcp header len"));
5129 if (__predict_false(m
->m_len
< hoff
+ iphlen
+ thoff
)) {
5130 m
= m_pullup(m
, hoff
+ iphlen
+ thoff
);
5137 ip
= mtodoff(m
, struct ip
*, hoff
);
5138 th
= mtodoff(m
, struct tcphdr
*, hoff
+ iphlen
);
5140 mss
= m
->m_pkthdr
.tso_segsz
;
5141 flags
= BGE_TXBDFLAG_CPU_PRE_DMA
| BGE_TXBDFLAG_CPU_POST_DMA
;
5143 ip
->ip_len
= htons(mss
+ iphlen
+ thoff
);
5146 hlen
= (iphlen
+ thoff
) >> 2;
5147 mss
|= ((hlen
& 0x3) << 14);
5148 flags
|= ((hlen
& 0xf8) << 7) | ((hlen
& 0x4) << 2);
5157 bnx_create_tx_ring(struct bnx_tx_ring
*txr
)
5159 bus_size_t txmaxsz
, txmaxsegsz
;
5162 lwkt_serialize_init(&txr
->bnx_tx_serialize
);
5165 * Create DMA tag and maps for TX mbufs.
5167 if (txr
->bnx_sc
->bnx_flags
& BNX_FLAG_TSO
)
5168 txmaxsz
= IP_MAXPACKET
+ sizeof(struct ether_vlan_header
);
5170 txmaxsz
= BNX_JUMBO_FRAMELEN
;
5171 if (txr
->bnx_sc
->bnx_asicrev
== BGE_ASICREV_BCM57766
)
5172 txmaxsegsz
= MCLBYTES
;
5174 txmaxsegsz
= PAGE_SIZE
;
5175 error
= bus_dma_tag_create(txr
->bnx_sc
->bnx_cdata
.bnx_parent_tag
,
5176 1, 0, BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
, NULL
, NULL
,
5177 txmaxsz
, BNX_NSEG_NEW
, txmaxsegsz
,
5178 BUS_DMA_ALLOCNOW
| BUS_DMA_WAITOK
| BUS_DMA_ONEBPAGE
,
5181 device_printf(txr
->bnx_sc
->bnx_dev
,
5182 "could not create TX mbuf DMA tag\n");
5186 for (i
= 0; i
< BGE_TX_RING_CNT
; i
++) {
5187 error
= bus_dmamap_create(txr
->bnx_tx_mtag
,
5188 BUS_DMA_WAITOK
| BUS_DMA_ONEBPAGE
,
5189 &txr
->bnx_tx_buf
[i
].bnx_tx_dmamap
);
5193 for (j
= 0; j
< i
; ++j
) {
5194 bus_dmamap_destroy(txr
->bnx_tx_mtag
,
5195 txr
->bnx_tx_buf
[j
].bnx_tx_dmamap
);
5197 bus_dma_tag_destroy(txr
->bnx_tx_mtag
);
5198 txr
->bnx_tx_mtag
= NULL
;
5200 device_printf(txr
->bnx_sc
->bnx_dev
,
5201 "could not create TX mbuf DMA map\n");
5207 * Create DMA stuffs for TX ring.
5209 error
= bnx_dma_block_alloc(txr
->bnx_sc
, BGE_TX_RING_SZ
,
5210 &txr
->bnx_tx_ring_tag
,
5211 &txr
->bnx_tx_ring_map
,
5212 (void *)&txr
->bnx_tx_ring
,
5213 &txr
->bnx_tx_ring_paddr
);
5215 device_printf(txr
->bnx_sc
->bnx_dev
,
5216 "could not create TX ring\n");
5220 txr
->bnx_tx_flags
|= BNX_TX_FLAG_SHORTDMA
;
5221 txr
->bnx_tx_wreg
= BNX_TX_WREG_NSEGS
;
5227 bnx_destroy_tx_ring(struct bnx_tx_ring
*txr
)
5229 /* Destroy TX mbuf DMA stuffs. */
5230 if (txr
->bnx_tx_mtag
!= NULL
) {
5233 for (i
= 0; i
< BGE_TX_RING_CNT
; i
++) {
5234 KKASSERT(txr
->bnx_tx_buf
[i
].bnx_tx_mbuf
== NULL
);
5235 bus_dmamap_destroy(txr
->bnx_tx_mtag
,
5236 txr
->bnx_tx_buf
[i
].bnx_tx_dmamap
);
5238 bus_dma_tag_destroy(txr
->bnx_tx_mtag
);
5241 /* Destroy TX ring */
5242 bnx_dma_block_free(txr
->bnx_tx_ring_tag
,
5243 txr
->bnx_tx_ring_map
, txr
->bnx_tx_ring
);
5247 bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS
)
5249 struct bnx_softc
*sc
= (void *)arg1
;
5250 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
5251 struct bnx_tx_ring
*txr
= &sc
->bnx_tx_ring
[0];
5252 int error
, defrag
, i
;
5254 if (txr
->bnx_tx_flags
& BNX_TX_FLAG_FORCE_DEFRAG
)
5259 error
= sysctl_handle_int(oidp
, &defrag
, 0, req
);
5260 if (error
|| req
->newptr
== NULL
)
5263 ifnet_serialize_all(ifp
);
5264 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
) {
5265 txr
= &sc
->bnx_tx_ring
[i
];
5267 txr
->bnx_tx_flags
|= BNX_TX_FLAG_FORCE_DEFRAG
;
5269 txr
->bnx_tx_flags
&= ~BNX_TX_FLAG_FORCE_DEFRAG
;
5271 ifnet_deserialize_all(ifp
);
5277 bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS
)
5279 struct bnx_softc
*sc
= (void *)arg1
;
5280 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
5281 struct bnx_tx_ring
*txr
= &sc
->bnx_tx_ring
[0];
5282 int error
, tx_wreg
, i
;
5284 tx_wreg
= txr
->bnx_tx_wreg
;
5285 error
= sysctl_handle_int(oidp
, &tx_wreg
, 0, req
);
5286 if (error
|| req
->newptr
== NULL
)
5289 ifnet_serialize_all(ifp
);
5290 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
)
5291 sc
->bnx_tx_ring
[i
].bnx_tx_wreg
= tx_wreg
;
5292 ifnet_deserialize_all(ifp
);
5298 bnx_create_rx_ret_ring(struct bnx_rx_ret_ring
*ret
)
5302 lwkt_serialize_init(&ret
->bnx_rx_ret_serialize
);
5305 * Create DMA stuffs for RX return ring.
5307 error
= bnx_dma_block_alloc(ret
->bnx_sc
,
5308 BGE_RX_RTN_RING_SZ(BNX_RETURN_RING_CNT
),
5309 &ret
->bnx_rx_ret_ring_tag
,
5310 &ret
->bnx_rx_ret_ring_map
,
5311 (void *)&ret
->bnx_rx_ret_ring
,
5312 &ret
->bnx_rx_ret_ring_paddr
);
5314 device_printf(ret
->bnx_sc
->bnx_dev
,
5315 "could not create RX ret ring\n");
5319 /* Shadow standard ring's RX mbuf DMA tag */
5320 ret
->bnx_rx_mtag
= ret
->bnx_std
->bnx_rx_mtag
;
5323 * Create tmp DMA map for RX mbufs.
5325 error
= bus_dmamap_create(ret
->bnx_rx_mtag
, BUS_DMA_WAITOK
,
5326 &ret
->bnx_rx_tmpmap
);
5328 device_printf(ret
->bnx_sc
->bnx_dev
,
5329 "could not create tmp RX mbuf DMA map\n");
5330 ret
->bnx_rx_mtag
= NULL
;
5337 bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring
*ret
)
5339 /* Destroy tmp RX mbuf DMA map */
5340 if (ret
->bnx_rx_mtag
!= NULL
)
5341 bus_dmamap_destroy(ret
->bnx_rx_mtag
, ret
->bnx_rx_tmpmap
);
5343 /* Destroy RX return ring */
5344 bnx_dma_block_free(ret
->bnx_rx_ret_ring_tag
,
5345 ret
->bnx_rx_ret_ring_map
, ret
->bnx_rx_ret_ring
);
5349 bnx_alloc_intr(struct bnx_softc
*sc
)
5351 struct bnx_intr_data
*intr
;
5355 if (sc
->bnx_intr_cnt
> 1) {
5356 error
= bnx_alloc_msix(sc
);
5359 KKASSERT(sc
->bnx_intr_type
== PCI_INTR_TYPE_MSIX
);
5363 KKASSERT(sc
->bnx_intr_cnt
== 1);
5365 intr
= &sc
->bnx_intr_data
[0];
5366 intr
->bnx_ret
= &sc
->bnx_rx_ret_ring
[0];
5367 intr
->bnx_txr
= &sc
->bnx_tx_ring
[0];
5368 intr
->bnx_intr_serialize
= &sc
->bnx_main_serialize
;
5369 intr
->bnx_intr_check
= bnx_check_intr_rxtx
;
5370 intr
->bnx_saved_status_tag
= &intr
->bnx_ret
->bnx_saved_status_tag
;
5372 sc
->bnx_intr_type
= pci_alloc_1intr(sc
->bnx_dev
, bnx_msi_enable
,
5373 &intr
->bnx_intr_rid
, &intr_flags
);
5375 intr
->bnx_intr_res
= bus_alloc_resource_any(sc
->bnx_dev
, SYS_RES_IRQ
,
5376 &intr
->bnx_intr_rid
, intr_flags
);
5377 if (intr
->bnx_intr_res
== NULL
) {
5378 device_printf(sc
->bnx_dev
, "could not alloc interrupt\n");
5382 if (sc
->bnx_intr_type
== PCI_INTR_TYPE_MSI
) {
5383 bnx_enable_msi(sc
, FALSE
);
5384 intr
->bnx_intr_func
= bnx_msi
;
5386 device_printf(sc
->bnx_dev
, "oneshot MSI\n");
5388 intr
->bnx_intr_func
= bnx_intr_legacy
;
5390 intr
->bnx_intr_arg
= sc
;
5391 intr
->bnx_intr_cpuid
= rman_get_cpuid(intr
->bnx_intr_res
);
5393 intr
->bnx_txr
->bnx_tx_cpuid
= intr
->bnx_intr_cpuid
;
5399 bnx_setup_intr(struct bnx_softc
*sc
)
5403 for (i
= 0; i
< sc
->bnx_intr_cnt
; ++i
) {
5404 struct bnx_intr_data
*intr
= &sc
->bnx_intr_data
[i
];
5406 error
= bus_setup_intr_descr(sc
->bnx_dev
, intr
->bnx_intr_res
,
5407 INTR_MPSAFE
, intr
->bnx_intr_func
, intr
->bnx_intr_arg
,
5408 &intr
->bnx_intr_hand
, intr
->bnx_intr_serialize
,
5409 intr
->bnx_intr_desc
);
5411 device_printf(sc
->bnx_dev
,
5412 "could not set up %dth intr\n", i
);
5413 bnx_teardown_intr(sc
, i
);
5421 bnx_teardown_intr(struct bnx_softc
*sc
, int cnt
)
5425 for (i
= 0; i
< cnt
; ++i
) {
5426 struct bnx_intr_data
*intr
= &sc
->bnx_intr_data
[i
];
5428 bus_teardown_intr(sc
->bnx_dev
, intr
->bnx_intr_res
,
5429 intr
->bnx_intr_hand
);
5434 bnx_free_intr(struct bnx_softc
*sc
)
5436 if (sc
->bnx_intr_type
!= PCI_INTR_TYPE_MSIX
) {
5437 struct bnx_intr_data
*intr
;
5439 KKASSERT(sc
->bnx_intr_cnt
<= 1);
5440 intr
= &sc
->bnx_intr_data
[0];
5442 if (intr
->bnx_intr_res
!= NULL
) {
5443 bus_release_resource(sc
->bnx_dev
, SYS_RES_IRQ
,
5444 intr
->bnx_intr_rid
, intr
->bnx_intr_res
);
5446 if (sc
->bnx_intr_type
== PCI_INTR_TYPE_MSI
)
5447 pci_release_msi(sc
->bnx_dev
);
5449 bnx_free_msix(sc
, TRUE
);
5454 bnx_setup_serialize(struct bnx_softc
*sc
)
5459 * Allocate serializer array
5462 /* Main + RX STD + TX + RX RET */
5463 sc
->bnx_serialize_cnt
= 1 + 1 + sc
->bnx_tx_ringcnt
+ sc
->bnx_rx_retcnt
;
5466 kmalloc(sc
->bnx_serialize_cnt
* sizeof(struct lwkt_serialize
*),
5467 M_DEVBUF
, M_WAITOK
| M_ZERO
);
5472 * NOTE: Order is critical
5477 KKASSERT(i
< sc
->bnx_serialize_cnt
);
5478 sc
->bnx_serialize
[i
++] = &sc
->bnx_main_serialize
;
5480 KKASSERT(i
< sc
->bnx_serialize_cnt
);
5481 sc
->bnx_serialize
[i
++] = &sc
->bnx_rx_std_ring
.bnx_rx_std_serialize
;
5483 for (j
= 0; j
< sc
->bnx_rx_retcnt
; ++j
) {
5484 KKASSERT(i
< sc
->bnx_serialize_cnt
);
5485 sc
->bnx_serialize
[i
++] =
5486 &sc
->bnx_rx_ret_ring
[j
].bnx_rx_ret_serialize
;
5489 for (j
= 0; j
< sc
->bnx_tx_ringcnt
; ++j
) {
5490 KKASSERT(i
< sc
->bnx_serialize_cnt
);
5491 sc
->bnx_serialize
[i
++] =
5492 &sc
->bnx_tx_ring
[j
].bnx_tx_serialize
;
5495 KKASSERT(i
== sc
->bnx_serialize_cnt
);
5499 bnx_serialize(struct ifnet
*ifp
, enum ifnet_serialize slz
)
5501 struct bnx_softc
*sc
= ifp
->if_softc
;
5503 ifnet_serialize_array_enter(sc
->bnx_serialize
,
5504 sc
->bnx_serialize_cnt
, slz
);
5508 bnx_deserialize(struct ifnet
*ifp
, enum ifnet_serialize slz
)
5510 struct bnx_softc
*sc
= ifp
->if_softc
;
5512 ifnet_serialize_array_exit(sc
->bnx_serialize
,
5513 sc
->bnx_serialize_cnt
, slz
);
5517 bnx_tryserialize(struct ifnet
*ifp
, enum ifnet_serialize slz
)
5519 struct bnx_softc
*sc
= ifp
->if_softc
;
5521 return ifnet_serialize_array_try(sc
->bnx_serialize
,
5522 sc
->bnx_serialize_cnt
, slz
);
5528 bnx_serialize_assert(struct ifnet
*ifp
, enum ifnet_serialize slz
,
5529 boolean_t serialized
)
5531 struct bnx_softc
*sc
= ifp
->if_softc
;
5533 ifnet_serialize_array_assert(sc
->bnx_serialize
, sc
->bnx_serialize_cnt
,
5537 #endif /* INVARIANTS */
5539 #ifdef IFPOLL_ENABLE
5542 bnx_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS
)
5544 struct bnx_softc
*sc
= (void *)arg1
;
5545 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
5548 off
= sc
->bnx_npoll_rxoff
;
5549 error
= sysctl_handle_int(oidp
, &off
, 0, req
);
5550 if (error
|| req
->newptr
== NULL
)
5555 ifnet_serialize_all(ifp
);
5556 if (off
>= ncpus2
|| off
% sc
->bnx_rx_retcnt
!= 0) {
5560 sc
->bnx_npoll_txoff
= off
;
5561 sc
->bnx_npoll_rxoff
= off
;
5563 ifnet_deserialize_all(ifp
);
5569 bnx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS
)
5571 struct bnx_softc
*sc
= (void *)arg1
;
5572 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
5575 off
= sc
->bnx_npoll_rxoff
;
5576 error
= sysctl_handle_int(oidp
, &off
, 0, req
);
5577 if (error
|| req
->newptr
== NULL
)
5582 ifnet_serialize_all(ifp
);
5583 if (off
>= ncpus2
|| off
% sc
->bnx_rx_retcnt
!= 0) {
5587 sc
->bnx_npoll_rxoff
= off
;
5589 ifnet_deserialize_all(ifp
);
5595 bnx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS
)
5597 struct bnx_softc
*sc
= (void *)arg1
;
5598 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
5601 off
= sc
->bnx_npoll_txoff
;
5602 error
= sysctl_handle_int(oidp
, &off
, 0, req
);
5603 if (error
|| req
->newptr
== NULL
)
5608 ifnet_serialize_all(ifp
);
5609 if (off
>= ncpus2
) {
5613 sc
->bnx_npoll_txoff
= off
;
5615 ifnet_deserialize_all(ifp
);
5620 #endif /* IFPOLL_ENABLE */
5623 bnx_set_tick_cpuid(struct bnx_softc
*sc
, boolean_t polling
)
5626 sc
->bnx_tick_cpuid
= 0; /* XXX */
5628 sc
->bnx_tick_cpuid
= sc
->bnx_intr_data
[0].bnx_intr_cpuid
;
5632 bnx_rx_std_refill_ithread(void *xstd
)
5634 struct bnx_rx_std_ring
*std
= xstd
;
5635 struct globaldata
*gd
= mycpu
;
5639 while (!std
->bnx_rx_std_stop
) {
5640 if (std
->bnx_rx_std_refill
) {
5641 lwkt_serialize_handler_call(
5642 &std
->bnx_rx_std_serialize
,
5643 bnx_rx_std_refill
, std
, NULL
);
5649 atomic_poll_release_int(&std
->bnx_rx_std_running
);
5652 if (!std
->bnx_rx_std_refill
&& !std
->bnx_rx_std_stop
) {
5653 lwkt_deschedule_self(gd
->gd_curthread
);
5666 bnx_rx_std_refill(void *xstd
, void *frame __unused
)
5668 struct bnx_rx_std_ring
*std
= xstd
;
5669 int cnt
, refill_mask
;
5675 refill_mask
= std
->bnx_rx_std_refill
;
5676 atomic_clear_int(&std
->bnx_rx_std_refill
, refill_mask
);
5678 while (refill_mask
) {
5679 uint16_t check_idx
= std
->bnx_rx_std
;
5682 ret_idx
= bsfl(refill_mask
);
5684 struct bnx_rx_buf
*rb
;
5687 BNX_INC(check_idx
, BGE_STD_RX_RING_CNT
);
5688 rb
= &std
->bnx_rx_std_buf
[check_idx
];
5689 refilled
= rb
->bnx_rx_refilled
;
5692 bnx_setup_rxdesc_std(std
, check_idx
);
5693 std
->bnx_rx_std
= check_idx
;
5696 atomic_subtract_int(
5697 &std
->bnx_rx_std_used
, cnt
);
5698 bnx_writembx(std
->bnx_sc
,
5699 BGE_MBX_RX_STD_PROD_LO
,
5707 refill_mask
&= ~(1 << ret_idx
);
5711 atomic_subtract_int(&std
->bnx_rx_std_used
, cnt
);
5712 bnx_writembx(std
->bnx_sc
, BGE_MBX_RX_STD_PROD_LO
,
5716 if (std
->bnx_rx_std_refill
)
5719 atomic_poll_release_int(&std
->bnx_rx_std_running
);
5722 if (std
->bnx_rx_std_refill
)
5727 bnx_sysctl_std_refill(SYSCTL_HANDLER_ARGS
)
5729 struct bnx_softc
*sc
= (void *)arg1
;
5730 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
5731 struct bnx_rx_ret_ring
*ret
= &sc
->bnx_rx_ret_ring
[0];
5732 int error
, cntmax
, i
;
5734 cntmax
= ret
->bnx_rx_cntmax
;
5735 error
= sysctl_handle_int(oidp
, &cntmax
, 0, req
);
5736 if (error
|| req
->newptr
== NULL
)
5739 ifnet_serialize_all(ifp
);
5741 if ((cntmax
* sc
->bnx_rx_retcnt
) >= BGE_STD_RX_RING_CNT
/ 2) {
5746 for (i
= 0; i
< sc
->bnx_tx_ringcnt
; ++i
)
5747 sc
->bnx_rx_ret_ring
[i
].bnx_rx_cntmax
= cntmax
;
5751 ifnet_deserialize_all(ifp
);
5757 bnx_init_rss(struct bnx_softc
*sc
)
5759 uint8_t key
[BGE_RSS_KEYREG_CNT
* BGE_RSS_KEYREG_SIZE
];
5762 KKASSERT(BNX_RSS_ENABLED(sc
));
5765 * Configure RSS redirect table in following fashion:
5766 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
5769 for (j
= 0; j
< BGE_RSS_INDIR_TBL_CNT
; ++j
) {
5772 for (i
= 0; i
< BGE_RSS_INDIR_TBLENT_CNT
; ++i
) {
5775 q
= r
% sc
->bnx_rx_retcnt
;
5776 tbl
|= q
<< (BGE_RSS_INDIR_TBLENT_SHIFT
*
5777 (BGE_RSS_INDIR_TBLENT_CNT
- i
- 1));
5781 BNX_RSS_DPRINTF(sc
, 1, "tbl%d %08x\n", j
, tbl
);
5782 CSR_WRITE_4(sc
, BGE_RSS_INDIR_TBL(j
), tbl
);
5785 toeplitz_get_key(key
, sizeof(key
));
5786 for (i
= 0; i
< BGE_RSS_KEYREG_CNT
; ++i
) {
5789 keyreg
= BGE_RSS_KEYREG_VAL(key
, i
);
5791 BNX_RSS_DPRINTF(sc
, 1, "key%d %08x\n", i
, keyreg
);
5792 CSR_WRITE_4(sc
, BGE_RSS_KEYREG(i
), keyreg
);
5797 bnx_setup_ring_cnt(struct bnx_softc
*sc
)
5799 int msix_enable
, i
, msix_cnt
, msix_cnt2
, ring_max
;
5801 sc
->bnx_tx_ringcnt
= 1;
5802 sc
->bnx_rx_retcnt
= 1;
5803 sc
->bnx_intr_cnt
= 1;
5805 msix_enable
= device_getenv_int(sc
->bnx_dev
, "msix.enable",
5813 msix_cnt
= pci_msix_count(sc
->bnx_dev
);
5818 while ((1 << (i
+ 1)) <= msix_cnt
)
5823 * One MSI-X vector is dedicated to status or single TX queue,
5824 * so make sure that there are enough MSI-X vectors.
5826 if (msix_cnt
== msix_cnt2
) {
5829 * This probably will not happen; 57785/5718 families
5830 * come with at least 5 MSI-X vectors.
5833 if (msix_cnt2
<= 1) {
5834 device_printf(sc
->bnx_dev
,
5835 "MSI-X count %d could not be used\n", msix_cnt
);
5838 device_printf(sc
->bnx_dev
, "MSI-X count %d is power of 2\n",
5843 * Setup RX ring count
5845 ring_max
= BNX_RX_RING_MAX
;
5846 if (ring_max
> msix_cnt2
)
5847 ring_max
= msix_cnt2
;
5848 sc
->bnx_rx_retcnt
= device_getenv_int(sc
->bnx_dev
, "rx_rings",
5850 sc
->bnx_rx_retcnt
= if_ring_count2(sc
->bnx_rx_retcnt
, ring_max
);
5852 if (sc
->bnx_rx_retcnt
== 1)
5856 * We need one extra MSI-X vector for link status or
5857 * TX ring (if only one TX ring is enabled).
5859 sc
->bnx_intr_cnt
= sc
->bnx_rx_retcnt
+ 1;
5862 * Setup TX ring count
5864 * Currently only BCM5719 and BCM5720 support multiple TX rings
5865 * and the TX ring count must be less than the RX ring count.
5867 if (sc
->bnx_asicrev
== BGE_ASICREV_BCM5719
||
5868 sc
->bnx_asicrev
== BGE_ASICREV_BCM5720
) {
5869 ring_max
= BNX_TX_RING_MAX
;
5870 if (ring_max
> msix_cnt2
)
5871 ring_max
= msix_cnt2
;
5872 if (ring_max
> sc
->bnx_rx_retcnt
)
5873 ring_max
= sc
->bnx_rx_retcnt
;
5874 sc
->bnx_tx_ringcnt
= device_getenv_int(sc
->bnx_dev
, "tx_rings",
5876 sc
->bnx_tx_ringcnt
= if_ring_count2(sc
->bnx_tx_ringcnt
,
5882 bnx_alloc_msix(struct bnx_softc
*sc
)
5884 struct bnx_intr_data
*intr
;
5885 boolean_t setup
= FALSE
;
5886 int error
, i
, offset
, offset_def
;
5888 KKASSERT(sc
->bnx_intr_cnt
> 1);
5889 KKASSERT(sc
->bnx_intr_cnt
== sc
->bnx_rx_retcnt
+ 1);
5891 if (sc
->bnx_flags
& BNX_FLAG_RXTX_BUNDLE
) {
5895 intr
= &sc
->bnx_intr_data
[0];
5897 intr
->bnx_intr_serialize
= &sc
->bnx_main_serialize
;
5898 intr
->bnx_saved_status_tag
= &sc
->bnx_saved_status_tag
;
5900 intr
->bnx_intr_func
= bnx_msix_status
;
5901 intr
->bnx_intr_arg
= sc
;
5902 intr
->bnx_intr_cpuid
= 0; /* XXX */
5904 ksnprintf(intr
->bnx_intr_desc0
, sizeof(intr
->bnx_intr_desc0
),
5905 "%s sts", device_get_nameunit(sc
->bnx_dev
));
5906 intr
->bnx_intr_desc
= intr
->bnx_intr_desc0
;
5911 if (sc
->bnx_rx_retcnt
== ncpus2
) {
5914 offset_def
= (sc
->bnx_rx_retcnt
*
5915 device_get_unit(sc
->bnx_dev
)) % ncpus2
;
5917 offset
= device_getenv_int(sc
->bnx_dev
,
5918 "msix.offset", offset_def
);
5919 if (offset
>= ncpus2
||
5920 offset
% sc
->bnx_rx_retcnt
!= 0) {
5921 device_printf(sc
->bnx_dev
,
5922 "invalid msix.offset %d, use %d\n",
5923 offset
, offset_def
);
5924 offset
= offset_def
;
5928 for (i
= 1; i
< sc
->bnx_intr_cnt
; ++i
) {
5931 intr
= &sc
->bnx_intr_data
[i
];
5933 KKASSERT(idx
< sc
->bnx_rx_retcnt
);
5934 intr
->bnx_ret
= &sc
->bnx_rx_ret_ring
[idx
];
5935 if (idx
< sc
->bnx_tx_ringcnt
) {
5936 intr
->bnx_txr
= &sc
->bnx_tx_ring
[idx
];
5937 intr
->bnx_ret
->bnx_txr
= intr
->bnx_txr
;
5940 intr
->bnx_intr_serialize
=
5941 &intr
->bnx_ret
->bnx_rx_ret_serialize
;
5942 intr
->bnx_saved_status_tag
=
5943 &intr
->bnx_ret
->bnx_saved_status_tag
;
5945 intr
->bnx_intr_arg
= intr
->bnx_ret
;
5946 KKASSERT(idx
+ offset
< ncpus2
);
5947 intr
->bnx_intr_cpuid
= idx
+ offset
;
5949 if (intr
->bnx_txr
== NULL
) {
5950 intr
->bnx_intr_check
= bnx_check_intr_rx
;
5951 intr
->bnx_intr_func
= bnx_msix_rx
;
5952 ksnprintf(intr
->bnx_intr_desc0
,
5953 sizeof(intr
->bnx_intr_desc0
), "%s rx%d",
5954 device_get_nameunit(sc
->bnx_dev
), idx
);
5956 intr
->bnx_intr_check
= bnx_check_intr_rxtx
;
5957 intr
->bnx_intr_func
= bnx_msix_rxtx
;
5958 ksnprintf(intr
->bnx_intr_desc0
,
5959 sizeof(intr
->bnx_intr_desc0
), "%s rxtx%d",
5960 device_get_nameunit(sc
->bnx_dev
), idx
);
5962 intr
->bnx_txr
->bnx_tx_cpuid
=
5963 intr
->bnx_intr_cpuid
;
5965 intr
->bnx_intr_desc
= intr
->bnx_intr_desc0
;
5967 intr
->bnx_ret
->bnx_msix_mbx
= intr
->bnx_intr_mbx
;
5971 * TX ring and link status
5973 offset_def
= device_get_unit(sc
->bnx_dev
) % ncpus2
;
5974 offset
= device_getenv_int(sc
->bnx_dev
, "msix.txoff",
5976 if (offset
>= ncpus2
) {
5977 device_printf(sc
->bnx_dev
,
5978 "invalid msix.txoff %d, use %d\n",
5979 offset
, offset_def
);
5980 offset
= offset_def
;
5983 intr
= &sc
->bnx_intr_data
[0];
5985 intr
->bnx_txr
= &sc
->bnx_tx_ring
[0];
5986 intr
->bnx_intr_serialize
= &sc
->bnx_main_serialize
;
5987 intr
->bnx_intr_check
= bnx_check_intr_tx
;
5988 intr
->bnx_saved_status_tag
=
5989 &intr
->bnx_txr
->bnx_saved_status_tag
;
5991 intr
->bnx_intr_func
= bnx_msix_tx_status
;
5992 intr
->bnx_intr_arg
= intr
->bnx_txr
;
5993 intr
->bnx_intr_cpuid
= offset
;
5995 ksnprintf(intr
->bnx_intr_desc0
, sizeof(intr
->bnx_intr_desc0
),
5996 "%s ststx", device_get_nameunit(sc
->bnx_dev
));
5997 intr
->bnx_intr_desc
= intr
->bnx_intr_desc0
;
5999 intr
->bnx_txr
->bnx_tx_cpuid
= intr
->bnx_intr_cpuid
;
6004 if (sc
->bnx_rx_retcnt
== ncpus2
) {
6007 offset_def
= (sc
->bnx_rx_retcnt
*
6008 device_get_unit(sc
->bnx_dev
)) % ncpus2
;
6010 offset
= device_getenv_int(sc
->bnx_dev
,
6011 "msix.rxoff", offset_def
);
6012 if (offset
>= ncpus2
||
6013 offset
% sc
->bnx_rx_retcnt
!= 0) {
6014 device_printf(sc
->bnx_dev
,
6015 "invalid msix.rxoff %d, use %d\n",
6016 offset
, offset_def
);
6017 offset
= offset_def
;
6021 for (i
= 1; i
< sc
->bnx_intr_cnt
; ++i
) {
6024 intr
= &sc
->bnx_intr_data
[i
];
6026 KKASSERT(idx
< sc
->bnx_rx_retcnt
);
6027 intr
->bnx_ret
= &sc
->bnx_rx_ret_ring
[idx
];
6028 intr
->bnx_intr_serialize
=
6029 &intr
->bnx_ret
->bnx_rx_ret_serialize
;
6030 intr
->bnx_intr_check
= bnx_check_intr_rx
;
6031 intr
->bnx_saved_status_tag
=
6032 &intr
->bnx_ret
->bnx_saved_status_tag
;
6034 intr
->bnx_intr_func
= bnx_msix_rx
;
6035 intr
->bnx_intr_arg
= intr
->bnx_ret
;
6036 KKASSERT(idx
+ offset
< ncpus2
);
6037 intr
->bnx_intr_cpuid
= idx
+ offset
;
6039 ksnprintf(intr
->bnx_intr_desc0
,
6040 sizeof(intr
->bnx_intr_desc0
), "%s rx%d",
6041 device_get_nameunit(sc
->bnx_dev
), idx
);
6042 intr
->bnx_intr_desc
= intr
->bnx_intr_desc0
;
6044 intr
->bnx_ret
->bnx_msix_mbx
= intr
->bnx_intr_mbx
;
6048 if (BNX_IS_5717_PLUS(sc
)) {
6049 sc
->bnx_msix_mem_rid
= PCIR_BAR(4);
6051 if (sc
->bnx_res2
== NULL
)
6052 sc
->bnx_msix_mem_rid
= PCIR_BAR(2);
6054 if (sc
->bnx_msix_mem_rid
!= 0) {
6055 sc
->bnx_msix_mem_res
= bus_alloc_resource_any(sc
->bnx_dev
,
6056 SYS_RES_MEMORY
, &sc
->bnx_msix_mem_rid
, RF_ACTIVE
);
6057 if (sc
->bnx_msix_mem_res
== NULL
) {
6058 device_printf(sc
->bnx_dev
,
6059 "could not alloc MSI-X table\n");
6064 bnx_enable_msi(sc
, TRUE
);
6066 error
= pci_setup_msix(sc
->bnx_dev
);
6068 device_printf(sc
->bnx_dev
, "could not setup MSI-X\n");
6073 for (i
= 0; i
< sc
->bnx_intr_cnt
; ++i
) {
6074 intr
= &sc
->bnx_intr_data
[i
];
6076 error
= pci_alloc_msix_vector(sc
->bnx_dev
, i
,
6077 &intr
->bnx_intr_rid
, intr
->bnx_intr_cpuid
);
6079 device_printf(sc
->bnx_dev
,
6080 "could not alloc MSI-X %d on cpu%d\n",
6081 i
, intr
->bnx_intr_cpuid
);
6085 intr
->bnx_intr_res
= bus_alloc_resource_any(sc
->bnx_dev
,
6086 SYS_RES_IRQ
, &intr
->bnx_intr_rid
, RF_ACTIVE
);
6087 if (intr
->bnx_intr_res
== NULL
) {
6088 device_printf(sc
->bnx_dev
,
6089 "could not alloc MSI-X %d resource\n", i
);
6095 pci_enable_msix(sc
->bnx_dev
);
6096 sc
->bnx_intr_type
= PCI_INTR_TYPE_MSIX
;
6099 bnx_free_msix(sc
, setup
);
6104 bnx_free_msix(struct bnx_softc
*sc
, boolean_t setup
)
6108 KKASSERT(sc
->bnx_intr_cnt
> 1);
6110 for (i
= 0; i
< sc
->bnx_intr_cnt
; ++i
) {
6111 struct bnx_intr_data
*intr
= &sc
->bnx_intr_data
[i
];
6113 if (intr
->bnx_intr_res
!= NULL
) {
6114 bus_release_resource(sc
->bnx_dev
, SYS_RES_IRQ
,
6115 intr
->bnx_intr_rid
, intr
->bnx_intr_res
);
6117 if (intr
->bnx_intr_rid
>= 0) {
6118 pci_release_msix_vector(sc
->bnx_dev
,
6119 intr
->bnx_intr_rid
);
6123 pci_teardown_msix(sc
->bnx_dev
);
6127 bnx_rx_std_refill_sched_ipi(void *xret
)
6129 struct bnx_rx_ret_ring
*ret
= xret
;
6130 struct bnx_rx_std_ring
*std
= ret
->bnx_std
;
6131 struct globaldata
*gd
= mycpu
;
6135 atomic_set_int(&std
->bnx_rx_std_refill
, ret
->bnx_rx_mask
);
6138 KKASSERT(std
->bnx_rx_std_ithread
.td_gd
== gd
);
6139 lwkt_schedule(&std
->bnx_rx_std_ithread
);
6145 bnx_rx_std_refill_stop(void *xstd
)
6147 struct bnx_rx_std_ring
*std
= xstd
;
6148 struct globaldata
*gd
= mycpu
;
6152 std
->bnx_rx_std_stop
= 1;
6155 KKASSERT(std
->bnx_rx_std_ithread
.td_gd
== gd
);
6156 lwkt_schedule(&std
->bnx_rx_std_ithread
);
6162 bnx_serialize_skipmain(struct bnx_softc
*sc
)
6164 lwkt_serialize_array_enter(sc
->bnx_serialize
,
6165 sc
->bnx_serialize_cnt
, 1);
6169 bnx_deserialize_skipmain(struct bnx_softc
*sc
)
6171 lwkt_serialize_array_exit(sc
->bnx_serialize
,
6172 sc
->bnx_serialize_cnt
, 1);
6176 bnx_rx_std_refill_sched(struct bnx_rx_ret_ring
*ret
,
6177 struct bnx_rx_std_ring
*std
)
6179 struct globaldata
*gd
= mycpu
;
6181 ret
->bnx_rx_cnt
= 0;
6186 atomic_set_int(&std
->bnx_rx_std_refill
, ret
->bnx_rx_mask
);
6188 if (atomic_poll_acquire_int(&std
->bnx_rx_std_running
)) {
6189 if (std
->bnx_rx_std_ithread
.td_gd
== gd
) {
6190 lwkt_schedule(&std
->bnx_rx_std_ithread
);
6193 std
->bnx_rx_std_ithread
.td_gd
,
6194 bnx_rx_std_refill_sched_ipi
, ret
);
6201 static struct pktinfo
*
6202 bnx_rss_info(struct pktinfo
*pi
, const struct bge_rx_bd
*cur_rx
)
6204 /* Don't pick up IPv6 packet */
6205 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_IPV6
)
6208 /* Don't pick up IP packet w/o IP checksum */
6209 if ((cur_rx
->bge_flags
& BGE_RXBDFLAG_IP_CSUM
) == 0 ||
6210 (cur_rx
->bge_error_flag
& BGE_RXERRFLAG_IP_CSUM_NOK
))
6213 /* Don't pick up IP packet w/o TCP/UDP checksum */
6214 if ((cur_rx
->bge_flags
& BGE_RXBDFLAG_TCP_UDP_CSUM
) == 0)
6217 /* May be IP fragment */
6218 if (cur_rx
->bge_tcp_udp_csum
!= 0xffff)
6221 if (cur_rx
->bge_flags
& BGE_RXBDFLAG_TCP_UDP_IS_TCP
)
6222 pi
->pi_l3proto
= IPPROTO_TCP
;
6224 pi
->pi_l3proto
= IPPROTO_UDP
;
6225 pi
->pi_netisr
= NETISR_IP
;
6232 bnx_sig_pre_reset(struct bnx_softc
*sc
, int type
)
6234 if (type
== BNX_RESET_START
|| type
== BNX_RESET_SUSPEND
)
6235 bnx_ape_driver_state_change(sc
, type
);
6239 bnx_sig_post_reset(struct bnx_softc
*sc
, int type
)
6241 if (type
== BNX_RESET_SHUTDOWN
)
6242 bnx_ape_driver_state_change(sc
, type
);
6246 * Clear all stale locks and select the lock for this driver instance.
6249 bnx_ape_lock_init(struct bnx_softc
*sc
)
6251 uint32_t bit
, regbase
;
6254 regbase
= BGE_APE_PER_LOCK_GRANT
;
6256 /* Clear any stale locks. */
6257 for (i
= BGE_APE_LOCK_PHY0
; i
<= BGE_APE_LOCK_GPIO
; i
++) {
6259 case BGE_APE_LOCK_PHY0
:
6260 case BGE_APE_LOCK_PHY1
:
6261 case BGE_APE_LOCK_PHY2
:
6262 case BGE_APE_LOCK_PHY3
:
6263 bit
= BGE_APE_LOCK_GRANT_DRIVER0
;
6267 if (sc
->bnx_func_addr
== 0)
6268 bit
= BGE_APE_LOCK_GRANT_DRIVER0
;
6270 bit
= 1 << sc
->bnx_func_addr
;
6273 APE_WRITE_4(sc
, regbase
+ 4 * i
, bit
);
6276 /* Select the PHY lock based on the device's function number. */
6277 switch (sc
->bnx_func_addr
) {
6279 sc
->bnx_phy_ape_lock
= BGE_APE_LOCK_PHY0
;
6283 sc
->bnx_phy_ape_lock
= BGE_APE_LOCK_PHY1
;
6287 sc
->bnx_phy_ape_lock
= BGE_APE_LOCK_PHY2
;
6291 sc
->bnx_phy_ape_lock
= BGE_APE_LOCK_PHY3
;
6295 device_printf(sc
->bnx_dev
,
6296 "PHY lock not supported on this function\n");
6302 * Check for APE firmware, set flags, and print version info.
6305 bnx_ape_read_fw_ver(struct bnx_softc
*sc
)
6308 uint32_t apedata
, features
;
6310 /* Check for a valid APE signature in shared memory. */
6311 apedata
= APE_READ_4(sc
, BGE_APE_SEG_SIG
);
6312 if (apedata
!= BGE_APE_SEG_SIG_MAGIC
) {
6313 device_printf(sc
->bnx_dev
, "no APE signature\n");
6314 sc
->bnx_mfw_flags
&= ~BNX_MFW_ON_APE
;
6318 /* Check if APE firmware is running. */
6319 apedata
= APE_READ_4(sc
, BGE_APE_FW_STATUS
);
6320 if ((apedata
& BGE_APE_FW_STATUS_READY
) == 0) {
6321 device_printf(sc
->bnx_dev
, "APE signature found "
6322 "but FW status not ready! 0x%08x\n", apedata
);
6326 sc
->bnx_mfw_flags
|= BNX_MFW_ON_APE
;
6328 /* Fetch the APE firwmare type and version. */
6329 apedata
= APE_READ_4(sc
, BGE_APE_FW_VERSION
);
6330 features
= APE_READ_4(sc
, BGE_APE_FW_FEATURES
);
6331 if (features
& BGE_APE_FW_FEATURE_NCSI
) {
6332 sc
->bnx_mfw_flags
|= BNX_MFW_TYPE_NCSI
;
6334 } else if (features
& BGE_APE_FW_FEATURE_DASH
) {
6335 sc
->bnx_mfw_flags
|= BNX_MFW_TYPE_DASH
;
6341 /* Print the APE firmware version. */
6342 device_printf(sc
->bnx_dev
, "APE FW version: %s v%d.%d.%d.%d\n",
6344 (apedata
& BGE_APE_FW_VERSION_MAJMSK
) >> BGE_APE_FW_VERSION_MAJSFT
,
6345 (apedata
& BGE_APE_FW_VERSION_MINMSK
) >> BGE_APE_FW_VERSION_MINSFT
,
6346 (apedata
& BGE_APE_FW_VERSION_REVMSK
) >> BGE_APE_FW_VERSION_REVSFT
,
6347 (apedata
& BGE_APE_FW_VERSION_BLDMSK
));
6351 bnx_ape_lock(struct bnx_softc
*sc
, int locknum
)
6353 uint32_t bit
, gnt
, req
, status
;
6356 if ((sc
->bnx_mfw_flags
& BNX_MFW_ON_APE
) == 0)
6359 /* Lock request/grant registers have different bases. */
6360 req
= BGE_APE_PER_LOCK_REQ
;
6361 gnt
= BGE_APE_PER_LOCK_GRANT
;
6366 case BGE_APE_LOCK_GPIO
:
6367 /* Lock required when using GPIO. */
6368 if (sc
->bnx_func_addr
== 0)
6369 bit
= BGE_APE_LOCK_REQ_DRIVER0
;
6371 bit
= 1 << sc
->bnx_func_addr
;
6374 case BGE_APE_LOCK_GRC
:
6375 /* Lock required to reset the device. */
6376 if (sc
->bnx_func_addr
== 0)
6377 bit
= BGE_APE_LOCK_REQ_DRIVER0
;
6379 bit
= 1 << sc
->bnx_func_addr
;
6382 case BGE_APE_LOCK_MEM
:
6383 /* Lock required when accessing certain APE memory. */
6384 if (sc
->bnx_func_addr
== 0)
6385 bit
= BGE_APE_LOCK_REQ_DRIVER0
;
6387 bit
= 1 << sc
->bnx_func_addr
;
6390 case BGE_APE_LOCK_PHY0
:
6391 case BGE_APE_LOCK_PHY1
:
6392 case BGE_APE_LOCK_PHY2
:
6393 case BGE_APE_LOCK_PHY3
:
6394 /* Lock required when accessing PHYs. */
6395 bit
= BGE_APE_LOCK_REQ_DRIVER0
;
6402 /* Request a lock. */
6403 APE_WRITE_4(sc
, req
+ off
, bit
);
6405 /* Wait up to 1 second to acquire lock. */
6406 for (i
= 0; i
< 20000; i
++) {
6407 status
= APE_READ_4(sc
, gnt
+ off
);
6413 /* Handle any errors. */
6414 if (status
!= bit
) {
6415 if_printf(&sc
->arpcom
.ac_if
, "APE lock %d request failed! "
6416 "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n",
6417 locknum
, req
+ off
, bit
& 0xFFFF, gnt
+ off
,
6419 /* Revoke the lock request. */
6420 APE_WRITE_4(sc
, gnt
+ off
, bit
);
6428 bnx_ape_unlock(struct bnx_softc
*sc
, int locknum
)
6433 if ((sc
->bnx_mfw_flags
& BNX_MFW_ON_APE
) == 0)
6436 gnt
= BGE_APE_PER_LOCK_GRANT
;
6441 case BGE_APE_LOCK_GPIO
:
6442 if (sc
->bnx_func_addr
== 0)
6443 bit
= BGE_APE_LOCK_GRANT_DRIVER0
;
6445 bit
= 1 << sc
->bnx_func_addr
;
6448 case BGE_APE_LOCK_GRC
:
6449 if (sc
->bnx_func_addr
== 0)
6450 bit
= BGE_APE_LOCK_GRANT_DRIVER0
;
6452 bit
= 1 << sc
->bnx_func_addr
;
6455 case BGE_APE_LOCK_MEM
:
6456 if (sc
->bnx_func_addr
== 0)
6457 bit
= BGE_APE_LOCK_GRANT_DRIVER0
;
6459 bit
= 1 << sc
->bnx_func_addr
;
6462 case BGE_APE_LOCK_PHY0
:
6463 case BGE_APE_LOCK_PHY1
:
6464 case BGE_APE_LOCK_PHY2
:
6465 case BGE_APE_LOCK_PHY3
:
6466 bit
= BGE_APE_LOCK_GRANT_DRIVER0
;
6473 APE_WRITE_4(sc
, gnt
+ off
, bit
);
6477 * Send an event to the APE firmware.
6480 bnx_ape_send_event(struct bnx_softc
*sc
, uint32_t event
)
6485 /* NCSI does not support APE events. */
6486 if ((sc
->bnx_mfw_flags
& BNX_MFW_ON_APE
) == 0)
6489 /* Wait up to 1ms for APE to service previous event. */
6490 for (i
= 10; i
> 0; i
--) {
6491 if (bnx_ape_lock(sc
, BGE_APE_LOCK_MEM
) != 0)
6493 apedata
= APE_READ_4(sc
, BGE_APE_EVENT_STATUS
);
6494 if ((apedata
& BGE_APE_EVENT_STATUS_EVENT_PENDING
) == 0) {
6495 APE_WRITE_4(sc
, BGE_APE_EVENT_STATUS
, event
|
6496 BGE_APE_EVENT_STATUS_EVENT_PENDING
);
6497 bnx_ape_unlock(sc
, BGE_APE_LOCK_MEM
);
6498 APE_WRITE_4(sc
, BGE_APE_EVENT
, BGE_APE_EVENT_1
);
6501 bnx_ape_unlock(sc
, BGE_APE_LOCK_MEM
);
6505 if_printf(&sc
->arpcom
.ac_if
,
6506 "APE event 0x%08x send timed out\n", event
);
6511 bnx_ape_driver_state_change(struct bnx_softc
*sc
, int kind
)
6513 uint32_t apedata
, event
;
6515 if ((sc
->bnx_mfw_flags
& BNX_MFW_ON_APE
) == 0)
6519 case BNX_RESET_START
:
6520 /* If this is the first load, clear the load counter. */
6521 apedata
= APE_READ_4(sc
, BGE_APE_HOST_SEG_SIG
);
6522 if (apedata
!= BGE_APE_HOST_SEG_SIG_MAGIC
) {
6523 APE_WRITE_4(sc
, BGE_APE_HOST_INIT_COUNT
, 0);
6525 apedata
= APE_READ_4(sc
, BGE_APE_HOST_INIT_COUNT
);
6526 APE_WRITE_4(sc
, BGE_APE_HOST_INIT_COUNT
, ++apedata
);
6528 APE_WRITE_4(sc
, BGE_APE_HOST_SEG_SIG
,
6529 BGE_APE_HOST_SEG_SIG_MAGIC
);
6530 APE_WRITE_4(sc
, BGE_APE_HOST_SEG_LEN
,
6531 BGE_APE_HOST_SEG_LEN_MAGIC
);
6533 /* Add some version info if bnx(4) supports it. */
6534 APE_WRITE_4(sc
, BGE_APE_HOST_DRIVER_ID
,
6535 BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0));
6536 APE_WRITE_4(sc
, BGE_APE_HOST_BEHAVIOR
,
6537 BGE_APE_HOST_BEHAV_NO_PHYLOCK
);
6538 APE_WRITE_4(sc
, BGE_APE_HOST_HEARTBEAT_INT_MS
,
6539 BGE_APE_HOST_HEARTBEAT_INT_DISABLE
);
6540 APE_WRITE_4(sc
, BGE_APE_HOST_DRVR_STATE
,
6541 BGE_APE_HOST_DRVR_STATE_START
);
6542 event
= BGE_APE_EVENT_STATUS_STATE_START
;
6545 case BNX_RESET_SHUTDOWN
:
6546 APE_WRITE_4(sc
, BGE_APE_HOST_DRVR_STATE
,
6547 BGE_APE_HOST_DRVR_STATE_UNLOAD
);
6548 event
= BGE_APE_EVENT_STATUS_STATE_UNLOAD
;
6551 case BNX_RESET_SUSPEND
:
6552 event
= BGE_APE_EVENT_STATUS_STATE_SUSPEND
;
6559 bnx_ape_send_event(sc
, event
| BGE_APE_EVENT_STATUS_DRIVER_EVNT
|
6560 BGE_APE_EVENT_STATUS_STATE_CHNGE
);