1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
46 #include <net/checksum.h>
47 #include <linux/version.h>
48 #include <net/ip6_checksum.h>
49 #include <linux/workqueue.h>
50 #include <linux/crc32.h>
51 #include <linux/crc32c.h>
52 #include <linux/prefetch.h>
53 #include <linux/zlib.h>
56 #include "bnx2x_reg.h"
57 #include "bnx2x_fw_defs.h"
58 #include "bnx2x_hsi.h"
59 #include "bnx2x_link.h"
61 #include "bnx2x_init.h"
63 #define DRV_MODULE_VERSION "1.45.6"
64 #define DRV_MODULE_RELDATE "2008/06/23"
65 #define BNX2X_BC_VER 0x040200
67 /* Time in jiffies before concluding the transmitter is hung */
68 #define TX_TIMEOUT (5*HZ)
70 static char version
[] __devinitdata
=
71 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
72 DRV_MODULE_NAME
" " DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
74 MODULE_AUTHOR("Eliezer Tamir");
75 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76 MODULE_LICENSE("GPL");
77 MODULE_VERSION(DRV_MODULE_VERSION
);
82 static int disable_tpa
;
84 static int load_count
[3]; /* 0-common, 1-port0, 2-port1 */
87 module_param(use_inta
, int, 0);
88 module_param(poll
, int, 0);
89 module_param(debug
, int, 0);
90 module_param(disable_tpa
, int, 0);
91 module_param(nomcp
, int, 0);
92 MODULE_PARM_DESC(use_inta
, "use INT#A instead of MSI-X");
93 MODULE_PARM_DESC(poll
, "use polling (for debug)");
94 MODULE_PARM_DESC(debug
, "default debug msglevel");
95 MODULE_PARM_DESC(nomcp
, "ignore management CPU");
98 module_param(use_multi
, int, 0);
99 MODULE_PARM_DESC(use_multi
, "use per-CPU queues");
102 enum bnx2x_board_type
{
108 /* indexed by board_type, above */
111 } board_info
[] __devinitdata
= {
112 { "Broadcom NetXtreme II BCM57710 XGb" },
113 { "Broadcom NetXtreme II BCM57711 XGb" },
114 { "Broadcom NetXtreme II BCM57711E XGb" }
118 static const struct pci_device_id bnx2x_pci_tbl
[] = {
119 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_57710
,
120 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM57710
},
121 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_57711
,
122 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM57711
},
123 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_57711E
,
124 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM57711E
},
128 MODULE_DEVICE_TABLE(pci
, bnx2x_pci_tbl
);
130 /****************************************************************************
131 * General service functions
132 ****************************************************************************/
135 * locking is done by mcp
137 static void bnx2x_reg_wr_ind(struct bnx2x
*bp
, u32 addr
, u32 val
)
139 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
140 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, val
);
141 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
142 PCICFG_VENDOR_ID_OFFSET
);
145 static u32
bnx2x_reg_rd_ind(struct bnx2x
*bp
, u32 addr
)
149 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
150 pci_read_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, &val
);
151 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
152 PCICFG_VENDOR_ID_OFFSET
);
157 static const u32 dmae_reg_go_c
[] = {
158 DMAE_REG_GO_C0
, DMAE_REG_GO_C1
, DMAE_REG_GO_C2
, DMAE_REG_GO_C3
,
159 DMAE_REG_GO_C4
, DMAE_REG_GO_C5
, DMAE_REG_GO_C6
, DMAE_REG_GO_C7
,
160 DMAE_REG_GO_C8
, DMAE_REG_GO_C9
, DMAE_REG_GO_C10
, DMAE_REG_GO_C11
,
161 DMAE_REG_GO_C12
, DMAE_REG_GO_C13
, DMAE_REG_GO_C14
, DMAE_REG_GO_C15
164 /* copy command into DMAE command memory and set DMAE command go */
165 static void bnx2x_post_dmae(struct bnx2x
*bp
, struct dmae_command
*dmae
,
171 cmd_offset
= (DMAE_REG_CMD_MEM
+ sizeof(struct dmae_command
) * idx
);
172 for (i
= 0; i
< (sizeof(struct dmae_command
)/4); i
++) {
173 REG_WR(bp
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
175 DP(BNX2X_MSG_OFF
, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
176 idx
, i
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
178 REG_WR(bp
, dmae_reg_go_c
[idx
], 1);
181 void bnx2x_write_dmae(struct bnx2x
*bp
, dma_addr_t dma_addr
, u32 dst_addr
,
184 struct dmae_command
*dmae
= &bp
->init_dmae
;
185 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
188 if (!bp
->dmae_ready
) {
189 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
191 DP(BNX2X_MSG_OFF
, "DMAE is not ready (dst_addr %08x len32 %d)"
192 " using indirect\n", dst_addr
, len32
);
193 bnx2x_init_ind_wr(bp
, dst_addr
, data
, len32
);
197 mutex_lock(&bp
->dmae_mutex
);
199 memset(dmae
, 0, sizeof(struct dmae_command
));
201 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
202 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
203 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
205 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
207 DMAE_CMD_ENDIANITY_DW_SWAP
|
209 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
210 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
211 dmae
->src_addr_lo
= U64_LO(dma_addr
);
212 dmae
->src_addr_hi
= U64_HI(dma_addr
);
213 dmae
->dst_addr_lo
= dst_addr
>> 2;
214 dmae
->dst_addr_hi
= 0;
216 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
217 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
218 dmae
->comp_val
= DMAE_COMP_VAL
;
220 DP(BNX2X_MSG_OFF
, "dmae: opcode 0x%08x\n"
221 DP_LEVEL
"src_addr [%x:%08x] len [%d *4] "
222 "dst_addr [%x:%08x (%08x)]\n"
223 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
224 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
225 dmae
->len
, dmae
->dst_addr_hi
, dmae
->dst_addr_lo
, dst_addr
,
226 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
, dmae
->comp_val
);
227 DP(BNX2X_MSG_OFF
, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
228 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
229 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
233 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
237 while (*wb_comp
!= DMAE_COMP_VAL
) {
238 DP(BNX2X_MSG_OFF
, "wb_comp 0x%08x\n", *wb_comp
);
240 /* adjust delay for emulation/FPGA */
241 if (CHIP_REV_IS_SLOW(bp
))
247 BNX2X_ERR("dmae timeout!\n");
253 mutex_unlock(&bp
->dmae_mutex
);
256 void bnx2x_read_dmae(struct bnx2x
*bp
, u32 src_addr
, u32 len32
)
258 struct dmae_command
*dmae
= &bp
->init_dmae
;
259 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
262 if (!bp
->dmae_ready
) {
263 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
266 DP(BNX2X_MSG_OFF
, "DMAE is not ready (src_addr %08x len32 %d)"
267 " using indirect\n", src_addr
, len32
);
268 for (i
= 0; i
< len32
; i
++)
269 data
[i
] = bnx2x_reg_rd_ind(bp
, src_addr
+ i
*4);
273 mutex_lock(&bp
->dmae_mutex
);
275 memset(bnx2x_sp(bp
, wb_data
[0]), 0, sizeof(u32
) * 4);
276 memset(dmae
, 0, sizeof(struct dmae_command
));
278 dmae
->opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
279 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
280 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
282 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
284 DMAE_CMD_ENDIANITY_DW_SWAP
|
286 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
287 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
288 dmae
->src_addr_lo
= src_addr
>> 2;
289 dmae
->src_addr_hi
= 0;
290 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_data
));
291 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_data
));
293 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
294 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
295 dmae
->comp_val
= DMAE_COMP_VAL
;
297 DP(BNX2X_MSG_OFF
, "dmae: opcode 0x%08x\n"
298 DP_LEVEL
"src_addr [%x:%08x] len [%d *4] "
299 "dst_addr [%x:%08x (%08x)]\n"
300 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
301 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
302 dmae
->len
, dmae
->dst_addr_hi
, dmae
->dst_addr_lo
, src_addr
,
303 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
, dmae
->comp_val
);
307 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
311 while (*wb_comp
!= DMAE_COMP_VAL
) {
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp
))
320 BNX2X_ERR("dmae timeout!\n");
325 DP(BNX2X_MSG_OFF
, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
326 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
327 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
329 mutex_unlock(&bp
->dmae_mutex
);
332 /* used only for slowpath so not inlined */
333 static void bnx2x_wb_wr(struct bnx2x
*bp
, int reg
, u32 val_hi
, u32 val_lo
)
337 wb_write
[0] = val_hi
;
338 wb_write
[1] = val_lo
;
339 REG_WR_DMAE(bp
, reg
, wb_write
, 2);
343 static u64
bnx2x_wb_rd(struct bnx2x
*bp
, int reg
)
347 REG_RD_DMAE(bp
, reg
, wb_data
, 2);
349 return HILO_U64(wb_data
[0], wb_data
[1]);
353 static int bnx2x_mc_assert(struct bnx2x
*bp
)
357 u32 row0
, row1
, row2
, row3
;
360 last_idx
= REG_RD8(bp
, BAR_XSTRORM_INTMEM
+
361 XSTORM_ASSERT_LIST_INDEX_OFFSET
);
363 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
365 /* print the asserts */
366 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
368 row0
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
369 XSTORM_ASSERT_LIST_OFFSET(i
));
370 row1
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
371 XSTORM_ASSERT_LIST_OFFSET(i
) + 4);
372 row2
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
373 XSTORM_ASSERT_LIST_OFFSET(i
) + 8);
374 row3
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
375 XSTORM_ASSERT_LIST_OFFSET(i
) + 12);
377 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
378 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
379 " 0x%08x 0x%08x 0x%08x\n",
380 i
, row3
, row2
, row1
, row0
);
388 last_idx
= REG_RD8(bp
, BAR_TSTRORM_INTMEM
+
389 TSTORM_ASSERT_LIST_INDEX_OFFSET
);
391 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
393 /* print the asserts */
394 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
396 row0
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
397 TSTORM_ASSERT_LIST_OFFSET(i
));
398 row1
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
399 TSTORM_ASSERT_LIST_OFFSET(i
) + 4);
400 row2
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
401 TSTORM_ASSERT_LIST_OFFSET(i
) + 8);
402 row3
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
403 TSTORM_ASSERT_LIST_OFFSET(i
) + 12);
405 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
406 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
407 " 0x%08x 0x%08x 0x%08x\n",
408 i
, row3
, row2
, row1
, row0
);
416 last_idx
= REG_RD8(bp
, BAR_CSTRORM_INTMEM
+
417 CSTORM_ASSERT_LIST_INDEX_OFFSET
);
419 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
421 /* print the asserts */
422 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
424 row0
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
425 CSTORM_ASSERT_LIST_OFFSET(i
));
426 row1
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
427 CSTORM_ASSERT_LIST_OFFSET(i
) + 4);
428 row2
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
429 CSTORM_ASSERT_LIST_OFFSET(i
) + 8);
430 row3
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
431 CSTORM_ASSERT_LIST_OFFSET(i
) + 12);
433 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
434 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
435 " 0x%08x 0x%08x 0x%08x\n",
436 i
, row3
, row2
, row1
, row0
);
444 last_idx
= REG_RD8(bp
, BAR_USTRORM_INTMEM
+
445 USTORM_ASSERT_LIST_INDEX_OFFSET
);
447 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
449 /* print the asserts */
450 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
452 row0
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
453 USTORM_ASSERT_LIST_OFFSET(i
));
454 row1
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
455 USTORM_ASSERT_LIST_OFFSET(i
) + 4);
456 row2
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
457 USTORM_ASSERT_LIST_OFFSET(i
) + 8);
458 row3
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
459 USTORM_ASSERT_LIST_OFFSET(i
) + 12);
461 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
462 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
463 " 0x%08x 0x%08x 0x%08x\n",
464 i
, row3
, row2
, row1
, row0
);
474 static void bnx2x_fw_dump(struct bnx2x
*bp
)
480 mark
= REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+ 0xf104);
481 mark
= ((mark
+ 0x3) & ~0x3);
482 printk(KERN_ERR PFX
"begin fw dump (mark 0x%x)\n" KERN_ERR
, mark
);
484 for (offset
= mark
- 0x08000000; offset
<= 0xF900; offset
+= 0x8*4) {
485 for (word
= 0; word
< 8; word
++)
486 data
[word
] = htonl(REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+
489 printk(KERN_CONT
"%s", (char *)data
);
491 for (offset
= 0xF108; offset
<= mark
- 0x08000000; offset
+= 0x8*4) {
492 for (word
= 0; word
< 8; word
++)
493 data
[word
] = htonl(REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+
496 printk(KERN_CONT
"%s", (char *)data
);
498 printk("\n" KERN_ERR PFX
"end of fw dump\n");
501 static void bnx2x_panic_dump(struct bnx2x
*bp
)
506 BNX2X_ERR("begin crash dump -----------------\n");
508 for_each_queue(bp
, i
) {
509 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
510 struct eth_tx_db_data
*hw_prods
= fp
->hw_tx_prods
;
512 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
513 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
514 i
, fp
->tx_pkt_prod
, fp
->tx_pkt_cons
, fp
->tx_bd_prod
,
515 fp
->tx_bd_cons
, le16_to_cpu(*fp
->tx_cons_sb
));
516 BNX2X_ERR(" rx_comp_prod(%x) rx_comp_cons(%x)"
517 " *rx_cons_sb(%x) *rx_bd_cons_sb(%x)"
518 " rx_sge_prod(%x) last_max_sge(%x)\n",
519 fp
->rx_comp_prod
, fp
->rx_comp_cons
,
520 le16_to_cpu(*fp
->rx_cons_sb
),
521 le16_to_cpu(*fp
->rx_bd_cons_sb
),
522 fp
->rx_sge_prod
, fp
->last_max_sge
);
523 BNX2X_ERR(" fp_c_idx(%x) fp_u_idx(%x)"
524 " bd data(%x,%x) rx_alloc_failed(%lx)\n",
525 fp
->fp_c_idx
, fp
->fp_u_idx
, hw_prods
->packets_prod
,
526 hw_prods
->bds_prod
, fp
->rx_alloc_failed
);
528 start
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) - 10);
529 end
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) + 245);
530 for (j
= start
; j
< end
; j
++) {
531 struct sw_tx_bd
*sw_bd
= &fp
->tx_buf_ring
[j
];
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j
,
534 sw_bd
->skb
, sw_bd
->first_bd
);
537 start
= TX_BD(fp
->tx_bd_cons
- 10);
538 end
= TX_BD(fp
->tx_bd_cons
+ 254);
539 for (j
= start
; j
< end
; j
++) {
540 u32
*tx_bd
= (u32
*)&fp
->tx_desc_ring
[j
];
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j
, tx_bd
[0], tx_bd
[1], tx_bd
[2], tx_bd
[3]);
546 start
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) - 10);
547 end
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) + 503);
548 for (j
= start
; j
< end
; j
++) {
549 u32
*rx_bd
= (u32
*)&fp
->rx_desc_ring
[j
];
550 struct sw_rx_bd
*sw_bd
= &fp
->rx_buf_ring
[j
];
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
553 j
, rx_bd
[1], rx_bd
[0], sw_bd
->skb
);
557 end
= RX_SGE_CNT
*NUM_RX_SGE_PAGES
;
558 for (j
= start
; j
< end
; j
++) {
559 u32
*rx_sge
= (u32
*)&fp
->rx_sge_ring
[j
];
560 struct sw_rx_page
*sw_page
= &fp
->rx_page_ring
[j
];
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j
, rx_sge
[1], rx_sge
[0], sw_page
->page
);
566 start
= RCQ_BD(fp
->rx_comp_cons
- 10);
567 end
= RCQ_BD(fp
->rx_comp_cons
+ 503);
568 for (j
= start
; j
< end
; j
++) {
569 u32
*cqe
= (u32
*)&fp
->rx_comp_ring
[j
];
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j
, cqe
[0], cqe
[1], cqe
[2], cqe
[3]);
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
578 " spq_prod_idx(%u)\n",
579 bp
->def_c_idx
, bp
->def_u_idx
, bp
->def_x_idx
, bp
->def_t_idx
,
580 bp
->def_att_idx
, bp
->attn_state
, bp
->spq_prod_idx
);
584 BNX2X_ERR("end crash dump -----------------\n");
586 bp
->stats_state
= STATS_STATE_DISABLED
;
587 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
590 static void bnx2x_int_enable(struct bnx2x
*bp
)
592 int port
= BP_PORT(bp
);
593 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
594 u32 val
= REG_RD(bp
, addr
);
595 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
598 val
&= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0
;
599 val
|= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
600 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
602 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
603 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
604 HC_CONFIG_0_REG_INT_LINE_EN_0
|
605 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
607 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
608 val
, port
, addr
, msix
);
610 REG_WR(bp
, addr
, val
);
612 val
&= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
;
615 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
616 val
, port
, addr
, msix
);
618 REG_WR(bp
, addr
, val
);
620 if (CHIP_IS_E1H(bp
)) {
621 /* init leading/trailing edge */
623 val
= (0xfe0f | (1 << (BP_E1HVN(bp
) + 4)));
625 /* enable nig attention */
630 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
631 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
635 static void bnx2x_int_disable(struct bnx2x
*bp
)
637 int port
= BP_PORT(bp
);
638 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
639 u32 val
= REG_RD(bp
, addr
);
641 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
642 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
643 HC_CONFIG_0_REG_INT_LINE_EN_0
|
644 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
646 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
649 REG_WR(bp
, addr
, val
);
650 if (REG_RD(bp
, addr
) != val
)
651 BNX2X_ERR("BUG! proper val not read from IGU!\n");
654 static void bnx2x_int_disable_sync(struct bnx2x
*bp
)
656 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
659 /* disable interrupt handling */
660 atomic_inc(&bp
->intr_sem
);
661 /* prevent the HW from sending interrupts */
662 bnx2x_int_disable(bp
);
664 /* make sure all ISRs are done */
666 for_each_queue(bp
, i
)
667 synchronize_irq(bp
->msix_table
[i
].vector
);
669 /* one more for the Slow Path IRQ */
670 synchronize_irq(bp
->msix_table
[i
].vector
);
672 synchronize_irq(bp
->pdev
->irq
);
674 /* make sure sp_task is not running */
675 cancel_work_sync(&bp
->sp_task
);
681 * General service functions
684 static inline void bnx2x_ack_sb(struct bnx2x
*bp
, u8 sb_id
,
685 u8 storm
, u16 index
, u8 op
, u8 update
)
687 u32 igu_addr
= (IGU_ADDR_INT_ACK
+ IGU_FUNC_BASE
* BP_FUNC(bp
)) * 8;
688 struct igu_ack_register igu_ack
;
690 igu_ack
.status_block_index
= index
;
691 igu_ack
.sb_id_and_flags
=
692 ((sb_id
<< IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT
) |
693 (storm
<< IGU_ACK_REGISTER_STORM_ID_SHIFT
) |
694 (update
<< IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT
) |
695 (op
<< IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT
));
697 DP(BNX2X_MSG_OFF
, "write 0x%08x to IGU addr 0x%x\n",
698 (*(u32
*)&igu_ack
), BAR_IGU_INTMEM
+ igu_addr
);
699 REG_WR(bp
, BAR_IGU_INTMEM
+ igu_addr
, (*(u32
*)&igu_ack
));
702 static inline u16
bnx2x_update_fpsb_idx(struct bnx2x_fastpath
*fp
)
704 struct host_status_block
*fpsb
= fp
->status_blk
;
707 barrier(); /* status block is written to by the chip */
708 if (fp
->fp_c_idx
!= fpsb
->c_status_block
.status_block_index
) {
709 fp
->fp_c_idx
= fpsb
->c_status_block
.status_block_index
;
712 if (fp
->fp_u_idx
!= fpsb
->u_status_block
.status_block_index
) {
713 fp
->fp_u_idx
= fpsb
->u_status_block
.status_block_index
;
719 static inline int bnx2x_has_work(struct bnx2x_fastpath
*fp
)
721 u16 rx_cons_sb
= le16_to_cpu(*fp
->rx_cons_sb
);
723 if ((rx_cons_sb
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
726 if ((fp
->rx_comp_cons
!= rx_cons_sb
) ||
727 (fp
->tx_pkt_prod
!= le16_to_cpu(*fp
->tx_cons_sb
)) ||
728 (fp
->tx_pkt_prod
!= fp
->tx_pkt_cons
))
734 static u16
bnx2x_ack_int(struct bnx2x
*bp
)
736 u32 igu_addr
= (IGU_ADDR_SIMD_MASK
+ IGU_FUNC_BASE
* BP_FUNC(bp
)) * 8;
737 u32 result
= REG_RD(bp
, BAR_IGU_INTMEM
+ igu_addr
);
739 DP(BNX2X_MSG_OFF
, "read 0x%08x from IGU addr 0x%x\n",
740 result
, BAR_IGU_INTMEM
+ igu_addr
);
743 #warning IGU_DEBUG active
745 BNX2X_ERR("read %x from IGU\n", result
);
746 REG_WR(bp
, TM_REG_TIMER_SOFT_RST
, 0);
754 * fast path service functions
757 /* free skb in the packet ring at pos idx
758 * return idx of last bd freed
760 static u16
bnx2x_free_tx_pkt(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
763 struct sw_tx_bd
*tx_buf
= &fp
->tx_buf_ring
[idx
];
764 struct eth_tx_bd
*tx_bd
;
765 struct sk_buff
*skb
= tx_buf
->skb
;
766 u16 bd_idx
= TX_BD(tx_buf
->first_bd
), new_cons
;
769 DP(BNX2X_MSG_OFF
, "pkt_idx %d buff @(%p)->skb %p\n",
773 DP(BNX2X_MSG_OFF
, "free bd_idx %d\n", bd_idx
);
774 tx_bd
= &fp
->tx_desc_ring
[bd_idx
];
775 pci_unmap_single(bp
->pdev
, BD_UNMAP_ADDR(tx_bd
),
776 BD_UNMAP_LEN(tx_bd
), PCI_DMA_TODEVICE
);
778 nbd
= le16_to_cpu(tx_bd
->nbd
) - 1;
779 new_cons
= nbd
+ tx_buf
->first_bd
;
780 #ifdef BNX2X_STOP_ON_ERROR
781 if (nbd
> (MAX_SKB_FRAGS
+ 2)) {
782 BNX2X_ERR("BAD nbd!\n");
787 /* Skip a parse bd and the TSO split header bd
788 since they have no mapping */
790 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
792 if (tx_bd
->bd_flags
.as_bitfield
& (ETH_TX_BD_FLAGS_IP_CSUM
|
793 ETH_TX_BD_FLAGS_TCP_CSUM
|
794 ETH_TX_BD_FLAGS_SW_LSO
)) {
796 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
797 tx_bd
= &fp
->tx_desc_ring
[bd_idx
];
798 /* is this a TSO split header bd? */
799 if (tx_bd
->bd_flags
.as_bitfield
& ETH_TX_BD_FLAGS_SW_LSO
) {
801 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
808 DP(BNX2X_MSG_OFF
, "free frag bd_idx %d\n", bd_idx
);
809 tx_bd
= &fp
->tx_desc_ring
[bd_idx
];
810 pci_unmap_page(bp
->pdev
, BD_UNMAP_ADDR(tx_bd
),
811 BD_UNMAP_LEN(tx_bd
), PCI_DMA_TODEVICE
);
813 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
819 tx_buf
->first_bd
= 0;
825 static inline u16
bnx2x_tx_avail(struct bnx2x_fastpath
*fp
)
831 barrier(); /* Tell compiler that prod and cons can change */
832 prod
= fp
->tx_bd_prod
;
833 cons
= fp
->tx_bd_cons
;
835 /* NUM_TX_RINGS = number of "next-page" entries
836 It will be used as a threshold */
837 used
= SUB_S16(prod
, cons
) + (s16
)NUM_TX_RINGS
;
839 #ifdef BNX2X_STOP_ON_ERROR
841 BUG_TRAP(used
<= fp
->bp
->tx_ring_size
);
842 BUG_TRAP((fp
->bp
->tx_ring_size
- used
) <= MAX_TX_AVAIL
);
845 return (s16
)(fp
->bp
->tx_ring_size
) - used
;
848 static void bnx2x_tx_int(struct bnx2x_fastpath
*fp
, int work
)
850 struct bnx2x
*bp
= fp
->bp
;
851 u16 hw_cons
, sw_cons
, bd_cons
= fp
->tx_bd_cons
;
854 #ifdef BNX2X_STOP_ON_ERROR
855 if (unlikely(bp
->panic
))
859 hw_cons
= le16_to_cpu(*fp
->tx_cons_sb
);
860 sw_cons
= fp
->tx_pkt_cons
;
862 while (sw_cons
!= hw_cons
) {
865 pkt_cons
= TX_BD(sw_cons
);
867 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
869 DP(NETIF_MSG_TX_DONE
, "hw_cons %u sw_cons %u pkt_cons %u\n",
870 hw_cons
, sw_cons
, pkt_cons
);
872 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
874 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
877 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, pkt_cons
);
885 fp
->tx_pkt_cons
= sw_cons
;
886 fp
->tx_bd_cons
= bd_cons
;
888 /* Need to make the tx_cons update visible to start_xmit()
889 * before checking for netif_queue_stopped(). Without the
890 * memory barrier, there is a small possibility that start_xmit()
891 * will miss it and cause the queue to be stopped forever.
895 /* TBD need a thresh? */
896 if (unlikely(netif_queue_stopped(bp
->dev
))) {
898 netif_tx_lock(bp
->dev
);
900 if (netif_queue_stopped(bp
->dev
) &&
901 (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3))
902 netif_wake_queue(bp
->dev
);
904 netif_tx_unlock(bp
->dev
);
908 static void bnx2x_sp_event(struct bnx2x_fastpath
*fp
,
909 union eth_rx_cqe
*rr_cqe
)
911 struct bnx2x
*bp
= fp
->bp
;
912 int cid
= SW_CID(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
913 int command
= CQE_CMD(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
916 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
917 FP_IDX(fp
), cid
, command
, bp
->state
,
918 rr_cqe
->ramrod_cqe
.ramrod_type
);
923 switch (command
| fp
->state
) {
924 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP
|
925 BNX2X_FP_STATE_OPENING
):
926 DP(NETIF_MSG_IFUP
, "got MULTI[%d] setup ramrod\n",
928 fp
->state
= BNX2X_FP_STATE_OPEN
;
931 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_FP_STATE_HALTING
):
932 DP(NETIF_MSG_IFDOWN
, "got MULTI[%d] halt ramrod\n",
934 fp
->state
= BNX2X_FP_STATE_HALTED
;
938 BNX2X_ERR("unexpected MC reply (%d) "
939 "fp->state is %x\n", command
, fp
->state
);
942 mb(); /* force bnx2x_wait_ramrod() to see the change */
946 switch (command
| bp
->state
) {
947 case (RAMROD_CMD_ID_ETH_PORT_SETUP
| BNX2X_STATE_OPENING_WAIT4_PORT
):
948 DP(NETIF_MSG_IFUP
, "got setup ramrod\n");
949 bp
->state
= BNX2X_STATE_OPEN
;
952 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
953 DP(NETIF_MSG_IFDOWN
, "got halt ramrod\n");
954 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_DELETE
;
955 fp
->state
= BNX2X_FP_STATE_HALTED
;
958 case (RAMROD_CMD_ID_ETH_CFC_DEL
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
959 DP(NETIF_MSG_IFDOWN
, "got delete ramrod for MULTI[%d]\n", cid
);
960 bnx2x_fp(bp
, cid
, state
) = BNX2X_FP_STATE_CLOSED
;
963 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_OPEN
):
964 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_DIAG
):
965 DP(NETIF_MSG_IFUP
, "got set mac ramrod\n");
966 bp
->set_mac_pending
= 0;
969 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
970 DP(NETIF_MSG_IFDOWN
, "got (un)set mac ramrod\n");
974 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
978 mb(); /* force bnx2x_wait_ramrod() to see the change */
981 static inline void bnx2x_free_rx_sge(struct bnx2x
*bp
,
982 struct bnx2x_fastpath
*fp
, u16 index
)
984 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
985 struct page
*page
= sw_buf
->page
;
986 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
988 /* Skip "next page" elements */
992 pci_unmap_page(bp
->pdev
, pci_unmap_addr(sw_buf
, mapping
),
993 BCM_PAGE_SIZE
*PAGES_PER_SGE
, PCI_DMA_FROMDEVICE
);
994 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
1001 static inline void bnx2x_free_rx_sge_range(struct bnx2x
*bp
,
1002 struct bnx2x_fastpath
*fp
, int last
)
1006 for (i
= 0; i
< last
; i
++)
1007 bnx2x_free_rx_sge(bp
, fp
, i
);
1010 static inline int bnx2x_alloc_rx_sge(struct bnx2x
*bp
,
1011 struct bnx2x_fastpath
*fp
, u16 index
)
1013 struct page
*page
= alloc_pages(GFP_ATOMIC
, PAGES_PER_SGE_SHIFT
);
1014 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
1015 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
1018 if (unlikely(page
== NULL
))
1021 mapping
= pci_map_page(bp
->pdev
, page
, 0, BCM_PAGE_SIZE
*PAGES_PER_SGE
,
1022 PCI_DMA_FROMDEVICE
);
1023 if (unlikely(dma_mapping_error(mapping
))) {
1024 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
1028 sw_buf
->page
= page
;
1029 pci_unmap_addr_set(sw_buf
, mapping
, mapping
);
1031 sge
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1032 sge
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1037 static inline int bnx2x_alloc_rx_skb(struct bnx2x
*bp
,
1038 struct bnx2x_fastpath
*fp
, u16 index
)
1040 struct sk_buff
*skb
;
1041 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[index
];
1042 struct eth_rx_bd
*rx_bd
= &fp
->rx_desc_ring
[index
];
1045 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
1046 if (unlikely(skb
== NULL
))
1049 mapping
= pci_map_single(bp
->pdev
, skb
->data
, bp
->rx_buf_use_size
,
1050 PCI_DMA_FROMDEVICE
);
1051 if (unlikely(dma_mapping_error(mapping
))) {
1057 pci_unmap_addr_set(rx_buf
, mapping
, mapping
);
1059 rx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1060 rx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1065 /* note that we are not allocating a new skb,
1066 * we are just moving one from cons to prod
1067 * we are not creating a new mapping,
1068 * so there is no need to check for dma_mapping_error().
1070 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath
*fp
,
1071 struct sk_buff
*skb
, u16 cons
, u16 prod
)
1073 struct bnx2x
*bp
= fp
->bp
;
1074 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
1075 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
1076 struct eth_rx_bd
*cons_bd
= &fp
->rx_desc_ring
[cons
];
1077 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
1079 pci_dma_sync_single_for_device(bp
->pdev
,
1080 pci_unmap_addr(cons_rx_buf
, mapping
),
1081 bp
->rx_offset
+ RX_COPY_THRESH
,
1082 PCI_DMA_FROMDEVICE
);
1084 prod_rx_buf
->skb
= cons_rx_buf
->skb
;
1085 pci_unmap_addr_set(prod_rx_buf
, mapping
,
1086 pci_unmap_addr(cons_rx_buf
, mapping
));
1087 *prod_bd
= *cons_bd
;
1090 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath
*fp
,
1093 u16 last_max
= fp
->last_max_sge
;
1095 if (SUB_S16(idx
, last_max
) > 0)
1096 fp
->last_max_sge
= idx
;
1099 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath
*fp
)
1103 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
1104 int idx
= RX_SGE_CNT
* i
- 1;
1106 for (j
= 0; j
< 2; j
++) {
1107 SGE_MASK_CLEAR_BIT(fp
, idx
);
1113 static void bnx2x_update_sge_prod(struct bnx2x_fastpath
*fp
,
1114 struct eth_fast_path_rx_cqe
*fp_cqe
)
1116 struct bnx2x
*bp
= fp
->bp
;
1117 u16 sge_len
= BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe
->pkt_len
) -
1118 le16_to_cpu(fp_cqe
->len_on_bd
)) >>
1120 u16 last_max
, last_elem
, first_elem
;
1127 /* First mark all used pages */
1128 for (i
= 0; i
< sge_len
; i
++)
1129 SGE_MASK_CLEAR_BIT(fp
, RX_SGE(le16_to_cpu(fp_cqe
->sgl
[i
])));
1131 DP(NETIF_MSG_RX_STATUS
, "fp_cqe->sgl[%d] = %d\n",
1132 sge_len
- 1, le16_to_cpu(fp_cqe
->sgl
[sge_len
- 1]));
1134 /* Here we assume that the last SGE index is the biggest */
1135 prefetch((void *)(fp
->sge_mask
));
1136 bnx2x_update_last_max_sge(fp
, le16_to_cpu(fp_cqe
->sgl
[sge_len
- 1]));
1138 last_max
= RX_SGE(fp
->last_max_sge
);
1139 last_elem
= last_max
>> RX_SGE_MASK_ELEM_SHIFT
;
1140 first_elem
= RX_SGE(fp
->rx_sge_prod
) >> RX_SGE_MASK_ELEM_SHIFT
;
1142 /* If ring is not full */
1143 if (last_elem
+ 1 != first_elem
)
1146 /* Now update the prod */
1147 for (i
= first_elem
; i
!= last_elem
; i
= NEXT_SGE_MASK_ELEM(i
)) {
1148 if (likely(fp
->sge_mask
[i
]))
1151 fp
->sge_mask
[i
] = RX_SGE_MASK_ELEM_ONE_MASK
;
1152 delta
+= RX_SGE_MASK_ELEM_SZ
;
1156 fp
->rx_sge_prod
+= delta
;
1157 /* clear page-end entries */
1158 bnx2x_clear_sge_mask_next_elems(fp
);
1161 DP(NETIF_MSG_RX_STATUS
,
1162 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1163 fp
->last_max_sge
, fp
->rx_sge_prod
);
1166 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath
*fp
)
1168 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1169 memset(fp
->sge_mask
, 0xff,
1170 (NUM_RX_SGE
>> RX_SGE_MASK_ELEM_SHIFT
)*sizeof(u64
));
1172 /* Clear the two last indeces in the page to 1:
1173 these are the indeces that correspond to the "next" element,
1174 hence will never be indicated and should be removed from
1175 the calculations. */
1176 bnx2x_clear_sge_mask_next_elems(fp
);
1179 static void bnx2x_tpa_start(struct bnx2x_fastpath
*fp
, u16 queue
,
1180 struct sk_buff
*skb
, u16 cons
, u16 prod
)
1182 struct bnx2x
*bp
= fp
->bp
;
1183 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
1184 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
1185 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
1188 /* move empty skb from pool to prod and map it */
1189 prod_rx_buf
->skb
= fp
->tpa_pool
[queue
].skb
;
1190 mapping
= pci_map_single(bp
->pdev
, fp
->tpa_pool
[queue
].skb
->data
,
1191 bp
->rx_buf_use_size
, PCI_DMA_FROMDEVICE
);
1192 pci_unmap_addr_set(prod_rx_buf
, mapping
, mapping
);
1194 /* move partial skb from cons to pool (don't unmap yet) */
1195 fp
->tpa_pool
[queue
] = *cons_rx_buf
;
1197 /* mark bin state as start - print error if current state != stop */
1198 if (fp
->tpa_state
[queue
] != BNX2X_TPA_STOP
)
1199 BNX2X_ERR("start of bin not in stop [%d]\n", queue
);
1201 fp
->tpa_state
[queue
] = BNX2X_TPA_START
;
1203 /* point prod_bd to new skb */
1204 prod_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1205 prod_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1207 #ifdef BNX2X_STOP_ON_ERROR
1208 fp
->tpa_queue_used
|= (1 << queue
);
1209 #ifdef __powerpc64__
1210 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%lx\n",
1212 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%llx\n",
1214 fp
->tpa_queue_used
);
1218 static int bnx2x_fill_frag_skb(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
1219 struct sk_buff
*skb
,
1220 struct eth_fast_path_rx_cqe
*fp_cqe
,
1223 struct sw_rx_page
*rx_pg
, old_rx_pg
;
1225 u16 len_on_bd
= le16_to_cpu(fp_cqe
->len_on_bd
);
1226 u32 i
, frag_len
, frag_size
, pages
;
1230 frag_size
= le16_to_cpu(fp_cqe
->pkt_len
) - len_on_bd
;
1231 pages
= BCM_PAGE_ALIGN(frag_size
) >> BCM_PAGE_SHIFT
;
1233 /* This is needed in order to enable forwarding support */
1235 skb_shinfo(skb
)->gso_size
= min((u32
)BCM_PAGE_SIZE
,
1236 max(frag_size
, (u32
)len_on_bd
));
1238 #ifdef BNX2X_STOP_ON_ERROR
1239 if (pages
> 8*PAGES_PER_SGE
) {
1240 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1242 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1243 fp_cqe
->pkt_len
, len_on_bd
);
1249 /* Run through the SGL and compose the fragmented skb */
1250 for (i
= 0, j
= 0; i
< pages
; i
+= PAGES_PER_SGE
, j
++) {
1251 u16 sge_idx
= RX_SGE(le16_to_cpu(fp_cqe
->sgl
[j
]));
1253 /* FW gives the indices of the SGE as if the ring is an array
1254 (meaning that "next" element will consume 2 indices) */
1255 frag_len
= min(frag_size
, (u32
)(BCM_PAGE_SIZE
*PAGES_PER_SGE
));
1256 rx_pg
= &fp
->rx_page_ring
[sge_idx
];
1260 /* If we fail to allocate a substitute page, we simply stop
1261 where we are and drop the whole packet */
1262 err
= bnx2x_alloc_rx_sge(bp
, fp
, sge_idx
);
1263 if (unlikely(err
)) {
1264 fp
->rx_alloc_failed
++;
1268 /* Unmap the page as we r going to pass it to the stack */
1269 pci_unmap_page(bp
->pdev
, pci_unmap_addr(&old_rx_pg
, mapping
),
1270 BCM_PAGE_SIZE
*PAGES_PER_SGE
, PCI_DMA_FROMDEVICE
);
1272 /* Add one frag and update the appropriate fields in the skb */
1273 skb_fill_page_desc(skb
, j
, old_rx_pg
.page
, 0, frag_len
);
1275 skb
->data_len
+= frag_len
;
1276 skb
->truesize
+= frag_len
;
1277 skb
->len
+= frag_len
;
1279 frag_size
-= frag_len
;
1285 static void bnx2x_tpa_stop(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
1286 u16 queue
, int pad
, int len
, union eth_rx_cqe
*cqe
,
1289 struct sw_rx_bd
*rx_buf
= &fp
->tpa_pool
[queue
];
1290 struct sk_buff
*skb
= rx_buf
->skb
;
1292 struct sk_buff
*new_skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
1294 /* Unmap skb in the pool anyway, as we are going to change
1295 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1297 pci_unmap_single(bp
->pdev
, pci_unmap_addr(rx_buf
, mapping
),
1298 bp
->rx_buf_use_size
, PCI_DMA_FROMDEVICE
);
1300 /* if alloc failed drop the packet and keep the buffer in the bin */
1301 if (likely(new_skb
)) {
1304 prefetch(((char *)(skb
)) + 128);
1306 /* else fix ip xsum and give it to the stack */
1307 /* (no need to map the new skb) */
1308 #ifdef BNX2X_STOP_ON_ERROR
1309 if (pad
+ len
> bp
->rx_buf_size
) {
1310 BNX2X_ERR("skb_put is about to fail... "
1311 "pad %d len %d rx_buf_size %d\n",
1312 pad
, len
, bp
->rx_buf_size
);
1318 skb_reserve(skb
, pad
);
1321 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1322 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1327 iph
= (struct iphdr
*)skb
->data
;
1329 iph
->check
= ip_fast_csum((u8
*)iph
, iph
->ihl
);
1332 if (!bnx2x_fill_frag_skb(bp
, fp
, skb
,
1333 &cqe
->fast_path_cqe
, cqe_idx
)) {
1335 if ((bp
->vlgrp
!= NULL
) &&
1336 (le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
) &
1337 PARSING_FLAGS_VLAN
))
1338 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
,
1339 le16_to_cpu(cqe
->fast_path_cqe
.
1343 netif_receive_skb(skb
);
1345 DP(NETIF_MSG_RX_STATUS
, "Failed to allocate new pages"
1346 " - dropping packet!\n");
1350 bp
->dev
->last_rx
= jiffies
;
1352 /* put new skb in bin */
1353 fp
->tpa_pool
[queue
].skb
= new_skb
;
1356 DP(NETIF_MSG_RX_STATUS
,
1357 "Failed to allocate new skb - dropping packet!\n");
1358 fp
->rx_alloc_failed
++;
1361 fp
->tpa_state
[queue
] = BNX2X_TPA_STOP
;
1364 static inline void bnx2x_update_rx_prod(struct bnx2x
*bp
,
1365 struct bnx2x_fastpath
*fp
,
1366 u16 bd_prod
, u16 rx_comp_prod
,
1369 struct tstorm_eth_rx_producers rx_prods
= {0};
1372 /* Update producers */
1373 rx_prods
.bd_prod
= bd_prod
;
1374 rx_prods
.cqe_prod
= rx_comp_prod
;
1375 rx_prods
.sge_prod
= rx_sge_prod
;
1377 for (i
= 0; i
< sizeof(struct tstorm_eth_rx_producers
)/4; i
++)
1378 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
1379 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp
), FP_CL_ID(fp
)) + i
*4,
1380 ((u32
*)&rx_prods
)[i
]);
1382 DP(NETIF_MSG_RX_STATUS
,
1383 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1384 bd_prod
, rx_comp_prod
, rx_sge_prod
);
1387 static int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
)
1389 struct bnx2x
*bp
= fp
->bp
;
1390 u16 bd_cons
, bd_prod
, bd_prod_fw
, comp_ring_cons
;
1391 u16 hw_comp_cons
, sw_comp_cons
, sw_comp_prod
;
1395 #ifdef BNX2X_STOP_ON_ERROR
1396 if (unlikely(bp
->panic
))
1400 /* CQ "next element" is of the size of the regular element,
1401 that's why it's ok here */
1402 hw_comp_cons
= le16_to_cpu(*fp
->rx_cons_sb
);
1403 if ((hw_comp_cons
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
1406 bd_cons
= fp
->rx_bd_cons
;
1407 bd_prod
= fp
->rx_bd_prod
;
1408 bd_prod_fw
= bd_prod
;
1409 sw_comp_cons
= fp
->rx_comp_cons
;
1410 sw_comp_prod
= fp
->rx_comp_prod
;
1412 /* Memory barrier necessary as speculative reads of the rx
1413 * buffer can be ahead of the index in the status block
1417 DP(NETIF_MSG_RX_STATUS
,
1418 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1419 FP_IDX(fp
), hw_comp_cons
, sw_comp_cons
);
1421 while (sw_comp_cons
!= hw_comp_cons
) {
1422 struct sw_rx_bd
*rx_buf
= NULL
;
1423 struct sk_buff
*skb
;
1424 union eth_rx_cqe
*cqe
;
1428 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
1429 bd_prod
= RX_BD(bd_prod
);
1430 bd_cons
= RX_BD(bd_cons
);
1432 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
1433 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
1435 DP(NETIF_MSG_RX_STATUS
, "CQE type %x err %x status %x"
1436 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags
),
1437 cqe_fp_flags
, cqe
->fast_path_cqe
.status_flags
,
1438 cqe
->fast_path_cqe
.rss_hash_result
,
1439 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
),
1440 le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
));
1442 /* is this a slowpath msg? */
1443 if (unlikely(CQE_TYPE(cqe_fp_flags
))) {
1444 bnx2x_sp_event(fp
, cqe
);
1447 /* this is an rx packet */
1449 rx_buf
= &fp
->rx_buf_ring
[bd_cons
];
1451 len
= le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
);
1452 pad
= cqe
->fast_path_cqe
.placement_offset
;
1454 /* If CQE is marked both TPA_START and TPA_END
1455 it is a non-TPA CQE */
1456 if ((!fp
->disable_tpa
) &&
1457 (TPA_TYPE(cqe_fp_flags
) !=
1458 (TPA_TYPE_START
| TPA_TYPE_END
))) {
1459 queue
= cqe
->fast_path_cqe
.queue_index
;
1461 if (TPA_TYPE(cqe_fp_flags
) == TPA_TYPE_START
) {
1462 DP(NETIF_MSG_RX_STATUS
,
1463 "calling tpa_start on queue %d\n",
1466 bnx2x_tpa_start(fp
, queue
, skb
,
1471 if (TPA_TYPE(cqe_fp_flags
) == TPA_TYPE_END
) {
1472 DP(NETIF_MSG_RX_STATUS
,
1473 "calling tpa_stop on queue %d\n",
1476 if (!BNX2X_RX_SUM_FIX(cqe
))
1477 BNX2X_ERR("STOP on none TCP "
1480 /* This is a size of the linear data
1482 len
= le16_to_cpu(cqe
->fast_path_cqe
.
1484 bnx2x_tpa_stop(bp
, fp
, queue
, pad
,
1485 len
, cqe
, comp_ring_cons
);
1486 #ifdef BNX2X_STOP_ON_ERROR
1491 bnx2x_update_sge_prod(fp
,
1492 &cqe
->fast_path_cqe
);
1497 pci_dma_sync_single_for_device(bp
->pdev
,
1498 pci_unmap_addr(rx_buf
, mapping
),
1499 pad
+ RX_COPY_THRESH
,
1500 PCI_DMA_FROMDEVICE
);
1502 prefetch(((char *)(skb
)) + 128);
1504 /* is this an error packet? */
1505 if (unlikely(cqe_fp_flags
& ETH_RX_ERROR_FALGS
)) {
1506 /* do we sometimes forward error packets anyway? */
1507 DP(NETIF_MSG_RX_ERR
,
1508 "ERROR flags %x rx packet %u\n",
1509 cqe_fp_flags
, sw_comp_cons
);
1510 /* TBD make sure MC counts this as a drop */
1514 /* Since we don't have a jumbo ring
1515 * copy small packets if mtu > 1500
1517 if ((bp
->dev
->mtu
> ETH_MAX_PACKET_SIZE
) &&
1518 (len
<= RX_COPY_THRESH
)) {
1519 struct sk_buff
*new_skb
;
1521 new_skb
= netdev_alloc_skb(bp
->dev
,
1523 if (new_skb
== NULL
) {
1524 DP(NETIF_MSG_RX_ERR
,
1525 "ERROR packet dropped "
1526 "because of alloc failure\n");
1527 fp
->rx_alloc_failed
++;
1532 skb_copy_from_linear_data_offset(skb
, pad
,
1533 new_skb
->data
+ pad
, len
);
1534 skb_reserve(new_skb
, pad
);
1535 skb_put(new_skb
, len
);
1537 bnx2x_reuse_rx_skb(fp
, skb
, bd_cons
, bd_prod
);
1541 } else if (bnx2x_alloc_rx_skb(bp
, fp
, bd_prod
) == 0) {
1542 pci_unmap_single(bp
->pdev
,
1543 pci_unmap_addr(rx_buf
, mapping
),
1544 bp
->rx_buf_use_size
,
1545 PCI_DMA_FROMDEVICE
);
1546 skb_reserve(skb
, pad
);
1550 DP(NETIF_MSG_RX_ERR
,
1551 "ERROR packet dropped because "
1552 "of alloc failure\n");
1553 fp
->rx_alloc_failed
++;
1555 bnx2x_reuse_rx_skb(fp
, skb
, bd_cons
, bd_prod
);
1559 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1561 skb
->ip_summed
= CHECKSUM_NONE
;
1562 if (bp
->rx_csum
&& BNX2X_RX_SUM_OK(cqe
))
1563 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1565 /* TBD do we pass bad csum packets in promisc */
1569 if ((bp
->vlgrp
!= NULL
) &&
1570 (le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
) &
1571 PARSING_FLAGS_VLAN
))
1572 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
,
1573 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
));
1576 netif_receive_skb(skb
);
1578 bp
->dev
->last_rx
= jiffies
;
1583 bd_cons
= NEXT_RX_IDX(bd_cons
);
1584 bd_prod
= NEXT_RX_IDX(bd_prod
);
1585 bd_prod_fw
= NEXT_RX_IDX(bd_prod_fw
);
1588 sw_comp_prod
= NEXT_RCQ_IDX(sw_comp_prod
);
1589 sw_comp_cons
= NEXT_RCQ_IDX(sw_comp_cons
);
1591 if (rx_pkt
== budget
)
1595 fp
->rx_bd_cons
= bd_cons
;
1596 fp
->rx_bd_prod
= bd_prod_fw
;
1597 fp
->rx_comp_cons
= sw_comp_cons
;
1598 fp
->rx_comp_prod
= sw_comp_prod
;
1600 /* Update producers */
1601 bnx2x_update_rx_prod(bp
, fp
, bd_prod_fw
, sw_comp_prod
,
1603 mmiowb(); /* keep prod updates ordered */
1605 fp
->rx_pkt
+= rx_pkt
;
1611 static irqreturn_t
bnx2x_msix_fp_int(int irq
, void *fp_cookie
)
1613 struct bnx2x_fastpath
*fp
= fp_cookie
;
1614 struct bnx2x
*bp
= fp
->bp
;
1615 struct net_device
*dev
= bp
->dev
;
1616 int index
= FP_IDX(fp
);
1618 DP(BNX2X_MSG_FP
, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1619 index
, FP_SB_ID(fp
));
1620 bnx2x_ack_sb(bp
, FP_SB_ID(fp
), USTORM_ID
, 0, IGU_INT_DISABLE
, 0);
1622 #ifdef BNX2X_STOP_ON_ERROR
1623 if (unlikely(bp
->panic
))
1627 prefetch(fp
->rx_cons_sb
);
1628 prefetch(fp
->tx_cons_sb
);
1629 prefetch(&fp
->status_blk
->c_status_block
.status_block_index
);
1630 prefetch(&fp
->status_blk
->u_status_block
.status_block_index
);
1632 netif_rx_schedule(dev
, &bnx2x_fp(bp
, index
, napi
));
1637 static irqreturn_t
bnx2x_interrupt(int irq
, void *dev_instance
)
1639 struct net_device
*dev
= dev_instance
;
1640 struct bnx2x
*bp
= netdev_priv(dev
);
1641 u16 status
= bnx2x_ack_int(bp
);
1644 /* Return here if interrupt is shared and it's not for us */
1645 if (unlikely(status
== 0)) {
1646 DP(NETIF_MSG_INTR
, "not our interrupt!\n");
1649 DP(NETIF_MSG_INTR
, "got an interrupt status %u\n", status
);
1651 #ifdef BNX2X_STOP_ON_ERROR
1652 if (unlikely(bp
->panic
))
1656 /* Return here if interrupt is disabled */
1657 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
1658 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
1662 mask
= 0x2 << bp
->fp
[0].sb_id
;
1663 if (status
& mask
) {
1664 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
1666 prefetch(fp
->rx_cons_sb
);
1667 prefetch(fp
->tx_cons_sb
);
1668 prefetch(&fp
->status_blk
->c_status_block
.status_block_index
);
1669 prefetch(&fp
->status_blk
->u_status_block
.status_block_index
);
1671 netif_rx_schedule(dev
, &bnx2x_fp(bp
, 0, napi
));
1677 if (unlikely(status
& 0x1)) {
1678 schedule_work(&bp
->sp_task
);
1686 DP(NETIF_MSG_INTR
, "got an unknown interrupt! (status %u)\n",
1692 /* end of fast path */
1694 static void bnx2x_stats_handle(struct bnx2x
*bp
, enum bnx2x_stats_event event
);
1699 * General service functions
1702 static int bnx2x_hw_lock(struct bnx2x
*bp
, u32 resource
)
1705 u32 resource_bit
= (1 << resource
);
1706 u8 port
= BP_PORT(bp
);
1709 /* Validating that the resource is within range */
1710 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1712 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1713 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1717 /* Validating that the resource is not already taken */
1718 lock_status
= REG_RD(bp
, MISC_REG_DRIVER_CONTROL_1
+ port
*8);
1719 if (lock_status
& resource_bit
) {
1720 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1721 lock_status
, resource_bit
);
1725 /* Try for 1 second every 5ms */
1726 for (cnt
= 0; cnt
< 200; cnt
++) {
1727 /* Try to acquire the lock */
1728 REG_WR(bp
, MISC_REG_DRIVER_CONTROL_1
+ port
*8 + 4,
1730 lock_status
= REG_RD(bp
, MISC_REG_DRIVER_CONTROL_1
+ port
*8);
1731 if (lock_status
& resource_bit
)
1736 DP(NETIF_MSG_HW
, "Timeout\n");
1740 static int bnx2x_hw_unlock(struct bnx2x
*bp
, u32 resource
)
1743 u32 resource_bit
= (1 << resource
);
1744 u8 port
= BP_PORT(bp
);
1746 /* Validating that the resource is within range */
1747 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1749 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1750 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1754 /* Validating that the resource is currently taken */
1755 lock_status
= REG_RD(bp
, MISC_REG_DRIVER_CONTROL_1
+ port
*8);
1756 if (!(lock_status
& resource_bit
)) {
1757 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1758 lock_status
, resource_bit
);
1762 REG_WR(bp
, MISC_REG_DRIVER_CONTROL_1
+ port
*8, resource_bit
);
1766 /* HW Lock for shared dual port PHYs */
1767 static void bnx2x_phy_hw_lock(struct bnx2x
*bp
)
1769 u32 ext_phy_type
= XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
1771 mutex_lock(&bp
->port
.phy_mutex
);
1773 if ((ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
) ||
1774 (ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
))
1775 bnx2x_hw_lock(bp
, HW_LOCK_RESOURCE_8072_MDIO
);
1778 static void bnx2x_phy_hw_unlock(struct bnx2x
*bp
)
1780 u32 ext_phy_type
= XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
1782 if ((ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
) ||
1783 (ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
))
1784 bnx2x_hw_unlock(bp
, HW_LOCK_RESOURCE_8072_MDIO
);
1786 mutex_unlock(&bp
->port
.phy_mutex
);
1789 int bnx2x_set_gpio(struct bnx2x
*bp
, int gpio_num
, u32 mode
)
1791 /* The GPIO should be swapped if swap register is set and active */
1792 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1793 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ BP_PORT(bp
);
1794 int gpio_shift
= gpio_num
+
1795 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1796 u32 gpio_mask
= (1 << gpio_shift
);
1799 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1800 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1804 bnx2x_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1805 /* read GPIO and mask except the float bits */
1806 gpio_reg
= (REG_RD(bp
, MISC_REG_GPIO
) & MISC_REGISTERS_GPIO_FLOAT
);
1809 case MISC_REGISTERS_GPIO_OUTPUT_LOW
:
1810 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output low\n",
1811 gpio_num
, gpio_shift
);
1812 /* clear FLOAT and set CLR */
1813 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1814 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_CLR_POS
);
1817 case MISC_REGISTERS_GPIO_OUTPUT_HIGH
:
1818 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output high\n",
1819 gpio_num
, gpio_shift
);
1820 /* clear FLOAT and set SET */
1821 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1822 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_SET_POS
);
1825 case MISC_REGISTERS_GPIO_INPUT_HI_Z
:
1826 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> input\n",
1827 gpio_num
, gpio_shift
);
1829 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1836 REG_WR(bp
, MISC_REG_GPIO
, gpio_reg
);
1837 bnx2x_hw_unlock(bp
, HW_LOCK_RESOURCE_GPIO
);
1842 static int bnx2x_set_spio(struct bnx2x
*bp
, int spio_num
, u32 mode
)
1844 u32 spio_mask
= (1 << spio_num
);
1847 if ((spio_num
< MISC_REGISTERS_SPIO_4
) ||
1848 (spio_num
> MISC_REGISTERS_SPIO_7
)) {
1849 BNX2X_ERR("Invalid SPIO %d\n", spio_num
);
1853 bnx2x_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
1854 /* read SPIO and mask except the float bits */
1855 spio_reg
= (REG_RD(bp
, MISC_REG_SPIO
) & MISC_REGISTERS_SPIO_FLOAT
);
1858 case MISC_REGISTERS_SPIO_OUTPUT_LOW
:
1859 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output low\n", spio_num
);
1860 /* clear FLOAT and set CLR */
1861 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1862 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_CLR_POS
);
1865 case MISC_REGISTERS_SPIO_OUTPUT_HIGH
:
1866 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output high\n", spio_num
);
1867 /* clear FLOAT and set SET */
1868 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1869 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_SET_POS
);
1872 case MISC_REGISTERS_SPIO_INPUT_HI_Z
:
1873 DP(NETIF_MSG_LINK
, "Set SPIO %d -> input\n", spio_num
);
1875 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1882 REG_WR(bp
, MISC_REG_SPIO
, spio_reg
);
1883 bnx2x_hw_unlock(bp
, HW_LOCK_RESOURCE_SPIO
);
1888 static void bnx2x_calc_fc_adv(struct bnx2x
*bp
)
1890 switch (bp
->link_vars
.ieee_fc
) {
1891 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE
:
1892 bp
->port
.advertising
&= ~(ADVERTISED_Asym_Pause
|
1895 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH
:
1896 bp
->port
.advertising
|= (ADVERTISED_Asym_Pause
|
1899 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC
:
1900 bp
->port
.advertising
|= ADVERTISED_Asym_Pause
;
1903 bp
->port
.advertising
&= ~(ADVERTISED_Asym_Pause
|
1909 static void bnx2x_link_report(struct bnx2x
*bp
)
1911 if (bp
->link_vars
.link_up
) {
1912 if (bp
->state
== BNX2X_STATE_OPEN
)
1913 netif_carrier_on(bp
->dev
);
1914 printk(KERN_INFO PFX
"%s NIC Link is Up, ", bp
->dev
->name
);
1916 printk("%d Mbps ", bp
->link_vars
.line_speed
);
1918 if (bp
->link_vars
.duplex
== DUPLEX_FULL
)
1919 printk("full duplex");
1921 printk("half duplex");
1923 if (bp
->link_vars
.flow_ctrl
!= FLOW_CTRL_NONE
) {
1924 if (bp
->link_vars
.flow_ctrl
& FLOW_CTRL_RX
) {
1925 printk(", receive ");
1926 if (bp
->link_vars
.flow_ctrl
& FLOW_CTRL_TX
)
1927 printk("& transmit ");
1929 printk(", transmit ");
1931 printk("flow control ON");
1935 } else { /* link_down */
1936 netif_carrier_off(bp
->dev
);
1937 printk(KERN_ERR PFX
"%s NIC Link is Down\n", bp
->dev
->name
);
1941 static u8
bnx2x_initial_phy_init(struct bnx2x
*bp
)
1945 /* Initialize link parameters structure variables */
1946 bp
->link_params
.mtu
= bp
->dev
->mtu
;
1948 bnx2x_phy_hw_lock(bp
);
1949 rc
= bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
1950 bnx2x_phy_hw_unlock(bp
);
1952 if (bp
->link_vars
.link_up
)
1953 bnx2x_link_report(bp
);
1955 bnx2x_calc_fc_adv(bp
);
1960 static void bnx2x_link_set(struct bnx2x
*bp
)
1962 bnx2x_phy_hw_lock(bp
);
1963 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
1964 bnx2x_phy_hw_unlock(bp
);
1966 bnx2x_calc_fc_adv(bp
);
1969 static void bnx2x__link_reset(struct bnx2x
*bp
)
1971 bnx2x_phy_hw_lock(bp
);
1972 bnx2x_link_reset(&bp
->link_params
, &bp
->link_vars
);
1973 bnx2x_phy_hw_unlock(bp
);
1976 static u8
bnx2x_link_test(struct bnx2x
*bp
)
1980 bnx2x_phy_hw_lock(bp
);
1981 rc
= bnx2x_test_link(&bp
->link_params
, &bp
->link_vars
);
1982 bnx2x_phy_hw_unlock(bp
);
1987 /* Calculates the sum of vn_min_rates.
1988 It's needed for further normalizing of the min_rates.
1993 0 - if all the min_rates are 0.
1994 In the later case fainess algorithm should be deactivated.
1995 If not all min_rates are zero then those that are zeroes will
1998 static u32
bnx2x_calc_vn_wsum(struct bnx2x
*bp
)
2000 int i
, port
= BP_PORT(bp
);
2004 for (i
= 0; i
< E1HVN_MAX
; i
++) {
2006 SHMEM_RD(bp
, mf_cfg
.func_mf_config
[2*i
+ port
].config
);
2007 u32 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
2008 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
2009 if (!(vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
)) {
2010 /* If min rate is zero - set it to 1 */
2012 vn_min_rate
= DEF_MIN_RATE
;
2016 wsum
+= vn_min_rate
;
2020 /* ... only if all min rates are zeros - disable FAIRNESS */
2027 static void bnx2x_init_port_minmax(struct bnx2x
*bp
,
2030 struct cmng_struct_per_port
*m_cmng_port
)
2032 u32 r_param
= port_rate
/ 8;
2033 int port
= BP_PORT(bp
);
2036 memset(m_cmng_port
, 0, sizeof(struct cmng_struct_per_port
));
2038 /* Enable minmax only if we are in e1hmf mode */
2040 u32 fair_periodic_timeout_usec
;
2043 /* Enable rate shaping and fairness */
2044 m_cmng_port
->flags
.cmng_vn_enable
= 1;
2045 m_cmng_port
->flags
.fairness_enable
= en_fness
? 1 : 0;
2046 m_cmng_port
->flags
.rate_shaping_enable
= 1;
2049 DP(NETIF_MSG_IFUP
, "All MIN values are zeroes"
2050 " fairness will be disabled\n");
2052 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2053 m_cmng_port
->rs_vars
.rs_periodic_timeout
=
2054 RS_PERIODIC_TIMEOUT_USEC
/ 4;
2056 /* this is the threshold below which no timer arming will occur
2057 1.25 coefficient is for the threshold to be a little bigger
2058 than the real time, to compensate for timer in-accuracy */
2059 m_cmng_port
->rs_vars
.rs_threshold
=
2060 (RS_PERIODIC_TIMEOUT_USEC
* r_param
* 5) / 4;
2062 /* resolution of fairness timer */
2063 fair_periodic_timeout_usec
= QM_ARB_BYTES
/ r_param
;
2064 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2065 t_fair
= T_FAIR_COEF
/ port_rate
;
2067 /* this is the threshold below which we won't arm
2068 the timer anymore */
2069 m_cmng_port
->fair_vars
.fair_threshold
= QM_ARB_BYTES
;
2071 /* we multiply by 1e3/8 to get bytes/msec.
2072 We don't want the credits to pass a credit
2073 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2074 m_cmng_port
->fair_vars
.upper_bound
=
2075 r_param
* t_fair
* FAIR_MEM
;
2076 /* since each tick is 4 usec */
2077 m_cmng_port
->fair_vars
.fairness_timeout
=
2078 fair_periodic_timeout_usec
/ 4;
2081 /* Disable rate shaping and fairness */
2082 m_cmng_port
->flags
.cmng_vn_enable
= 0;
2083 m_cmng_port
->flags
.fairness_enable
= 0;
2084 m_cmng_port
->flags
.rate_shaping_enable
= 0;
2087 "Single function mode minmax will be disabled\n");
2090 /* Store it to internal memory */
2091 for (i
= 0; i
< sizeof(struct cmng_struct_per_port
) / 4; i
++)
2092 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2093 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
) + i
* 4,
2094 ((u32
*)(m_cmng_port
))[i
]);
2097 static void bnx2x_init_vn_minmax(struct bnx2x
*bp
, int func
,
2098 u32 wsum
, u16 port_rate
,
2099 struct cmng_struct_per_port
*m_cmng_port
)
2101 struct rate_shaping_vars_per_vn m_rs_vn
;
2102 struct fairness_vars_per_vn m_fair_vn
;
2103 u32 vn_cfg
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
2104 u16 vn_min_rate
, vn_max_rate
;
2107 /* If function is hidden - set min and max to zeroes */
2108 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
) {
2113 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
2114 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
2115 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2116 if current min rate is zero - set it to 1.
2117 This is a requirment of the algorithm. */
2118 if ((vn_min_rate
== 0) && wsum
)
2119 vn_min_rate
= DEF_MIN_RATE
;
2120 vn_max_rate
= ((vn_cfg
& FUNC_MF_CFG_MAX_BW_MASK
) >>
2121 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
2124 DP(NETIF_MSG_IFUP
, "func %d: vn_min_rate=%d vn_max_rate=%d "
2125 "wsum=%d\n", func
, vn_min_rate
, vn_max_rate
, wsum
);
2127 memset(&m_rs_vn
, 0, sizeof(struct rate_shaping_vars_per_vn
));
2128 memset(&m_fair_vn
, 0, sizeof(struct fairness_vars_per_vn
));
2130 /* global vn counter - maximal Mbps for this vn */
2131 m_rs_vn
.vn_counter
.rate
= vn_max_rate
;
2133 /* quota - number of bytes transmitted in this period */
2134 m_rs_vn
.vn_counter
.quota
=
2135 (vn_max_rate
* RS_PERIODIC_TIMEOUT_USEC
) / 8;
2137 #ifdef BNX2X_PER_PROT_QOS
2138 /* per protocol counter */
2139 for (protocol
= 0; protocol
< NUM_OF_PROTOCOLS
; protocol
++) {
2140 /* maximal Mbps for this protocol */
2141 m_rs_vn
.protocol_counters
[protocol
].rate
=
2142 protocol_max_rate
[protocol
];
2143 /* the quota in each timer period -
2144 number of bytes transmitted in this period */
2145 m_rs_vn
.protocol_counters
[protocol
].quota
=
2146 (u32
)(rs_periodic_timeout_usec
*
2148 protocol_counters
[protocol
].rate
/8));
2153 /* credit for each period of the fairness algorithm:
2154 number of bytes in T_FAIR (the vn share the port rate).
2155 wsum should not be larger than 10000, thus
2156 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2157 m_fair_vn
.vn_credit_delta
=
2158 max((u64
)(vn_min_rate
* (T_FAIR_COEF
/ (8 * wsum
))),
2159 (u64
)(m_cmng_port
->fair_vars
.fair_threshold
* 2));
2160 DP(NETIF_MSG_IFUP
, "m_fair_vn.vn_credit_delta=%d\n",
2161 m_fair_vn
.vn_credit_delta
);
2164 #ifdef BNX2X_PER_PROT_QOS
2166 u32 protocolWeightSum
= 0;
2168 for (protocol
= 0; protocol
< NUM_OF_PROTOCOLS
; protocol
++)
2169 protocolWeightSum
+=
2170 drvInit
.protocol_min_rate
[protocol
];
2171 /* per protocol counter -
2172 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2173 if (protocolWeightSum
> 0) {
2175 protocol
< NUM_OF_PROTOCOLS
; protocol
++)
2176 /* credit for each period of the
2177 fairness algorithm - number of bytes in
2178 T_FAIR (the protocol share the vn rate) */
2179 m_fair_vn
.protocol_credit_delta
[protocol
] =
2180 (u32
)((vn_min_rate
/ 8) * t_fair
*
2181 protocol_min_rate
/ protocolWeightSum
);
2186 /* Store it to internal memory */
2187 for (i
= 0; i
< sizeof(struct rate_shaping_vars_per_vn
)/4; i
++)
2188 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2189 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func
) + i
* 4,
2190 ((u32
*)(&m_rs_vn
))[i
]);
2192 for (i
= 0; i
< sizeof(struct fairness_vars_per_vn
)/4; i
++)
2193 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2194 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func
) + i
* 4,
2195 ((u32
*)(&m_fair_vn
))[i
]);
2198 /* This function is called upon link interrupt */
2199 static void bnx2x_link_attn(struct bnx2x
*bp
)
2203 /* Make sure that we are synced with the current statistics */
2204 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2206 bnx2x_phy_hw_lock(bp
);
2207 bnx2x_link_update(&bp
->link_params
, &bp
->link_vars
);
2208 bnx2x_phy_hw_unlock(bp
);
2210 if (bp
->link_vars
.link_up
) {
2212 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
2213 struct host_port_stats
*pstats
;
2215 pstats
= bnx2x_sp(bp
, port_stats
);
2216 /* reset old bmac stats */
2217 memset(&(pstats
->mac_stx
[0]), 0,
2218 sizeof(struct mac_stx
));
2220 if ((bp
->state
== BNX2X_STATE_OPEN
) ||
2221 (bp
->state
== BNX2X_STATE_DISABLED
))
2222 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2225 /* indicate link status */
2226 bnx2x_link_report(bp
);
2231 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
2232 if (vn
== BP_E1HVN(bp
))
2235 func
= ((vn
<< 1) | BP_PORT(bp
));
2237 /* Set the attention towards other drivers
2239 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_0
+
2240 (LINK_SYNC_ATTENTION_BIT_FUNC_0
+ func
)*4, 1);
2244 if (CHIP_IS_E1H(bp
) && (bp
->link_vars
.line_speed
> 0)) {
2245 struct cmng_struct_per_port m_cmng_port
;
2247 int port
= BP_PORT(bp
);
2249 /* Init RATE SHAPING and FAIRNESS contexts */
2250 wsum
= bnx2x_calc_vn_wsum(bp
);
2251 bnx2x_init_port_minmax(bp
, (int)wsum
,
2252 bp
->link_vars
.line_speed
,
2255 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
2256 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
,
2257 wsum
, bp
->link_vars
.line_speed
,
2262 static void bnx2x__link_status_update(struct bnx2x
*bp
)
2264 if (bp
->state
!= BNX2X_STATE_OPEN
)
2267 bnx2x_link_status_update(&bp
->link_params
, &bp
->link_vars
);
2269 if (bp
->link_vars
.link_up
)
2270 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2272 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2274 /* indicate link status */
2275 bnx2x_link_report(bp
);
2278 static void bnx2x_pmf_update(struct bnx2x
*bp
)
2280 int port
= BP_PORT(bp
);
2284 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
2286 /* enable nig attention */
2287 val
= (0xff0f | (1 << (BP_E1HVN(bp
) + 4)));
2288 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
2289 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
2291 bnx2x_stats_handle(bp
, STATS_EVENT_PMF
);
2299 * General service functions
2302 /* the slow path queue is odd since completions arrive on the fastpath ring */
2303 static int bnx2x_sp_post(struct bnx2x
*bp
, int command
, int cid
,
2304 u32 data_hi
, u32 data_lo
, int common
)
2306 int func
= BP_FUNC(bp
);
2308 DP(BNX2X_MSG_SP
/*NETIF_MSG_TIMER*/,
2309 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2310 (u32
)U64_HI(bp
->spq_mapping
), (u32
)(U64_LO(bp
->spq_mapping
) +
2311 (void *)bp
->spq_prod_bd
- (void *)bp
->spq
), command
,
2312 HW_CID(bp
, cid
), data_hi
, data_lo
, bp
->spq_left
);
2314 #ifdef BNX2X_STOP_ON_ERROR
2315 if (unlikely(bp
->panic
))
2319 spin_lock_bh(&bp
->spq_lock
);
2321 if (!bp
->spq_left
) {
2322 BNX2X_ERR("BUG! SPQ ring full!\n");
2323 spin_unlock_bh(&bp
->spq_lock
);
2328 /* CID needs port number to be encoded int it */
2329 bp
->spq_prod_bd
->hdr
.conn_and_cmd_data
=
2330 cpu_to_le32(((command
<< SPE_HDR_CMD_ID_SHIFT
) |
2332 bp
->spq_prod_bd
->hdr
.type
= cpu_to_le16(ETH_CONNECTION_TYPE
);
2334 bp
->spq_prod_bd
->hdr
.type
|=
2335 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT
));
2337 bp
->spq_prod_bd
->data
.mac_config_addr
.hi
= cpu_to_le32(data_hi
);
2338 bp
->spq_prod_bd
->data
.mac_config_addr
.lo
= cpu_to_le32(data_lo
);
2342 if (bp
->spq_prod_bd
== bp
->spq_last_bd
) {
2343 bp
->spq_prod_bd
= bp
->spq
;
2344 bp
->spq_prod_idx
= 0;
2345 DP(NETIF_MSG_TIMER
, "end of spq\n");
2352 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_SPQ_PROD_OFFSET(func
),
2355 spin_unlock_bh(&bp
->spq_lock
);
2359 /* acquire split MCP access lock register */
2360 static int bnx2x_lock_alr(struct bnx2x
*bp
)
2367 for (j
= 0; j
< i
*10; j
++) {
2369 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
2370 val
= REG_RD(bp
, GRCBASE_MCP
+ 0x9c);
2371 if (val
& (1L << 31))
2376 if (!(val
& (1L << 31))) {
2377 BNX2X_ERR("Cannot acquire nvram interface\n");
2384 /* Release split MCP access lock register */
2385 static void bnx2x_unlock_alr(struct bnx2x
*bp
)
2389 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
2392 static inline u16
bnx2x_update_dsb_idx(struct bnx2x
*bp
)
2394 struct host_def_status_block
*def_sb
= bp
->def_status_blk
;
2397 barrier(); /* status block is written to by the chip */
2399 if (bp
->def_att_idx
!= def_sb
->atten_status_block
.attn_bits_index
) {
2400 bp
->def_att_idx
= def_sb
->atten_status_block
.attn_bits_index
;
2403 if (bp
->def_c_idx
!= def_sb
->c_def_status_block
.status_block_index
) {
2404 bp
->def_c_idx
= def_sb
->c_def_status_block
.status_block_index
;
2407 if (bp
->def_u_idx
!= def_sb
->u_def_status_block
.status_block_index
) {
2408 bp
->def_u_idx
= def_sb
->u_def_status_block
.status_block_index
;
2411 if (bp
->def_x_idx
!= def_sb
->x_def_status_block
.status_block_index
) {
2412 bp
->def_x_idx
= def_sb
->x_def_status_block
.status_block_index
;
2415 if (bp
->def_t_idx
!= def_sb
->t_def_status_block
.status_block_index
) {
2416 bp
->def_t_idx
= def_sb
->t_def_status_block
.status_block_index
;
2423 * slow path service functions
2426 static void bnx2x_attn_int_asserted(struct bnx2x
*bp
, u32 asserted
)
2428 int port
= BP_PORT(bp
);
2429 int func
= BP_FUNC(bp
);
2430 u32 igu_addr
= (IGU_ADDR_ATTN_BITS_SET
+ IGU_FUNC_BASE
* func
) * 8;
2431 u32 aeu_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
2432 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
2433 u32 nig_int_mask_addr
= port
? NIG_REG_MASK_INTERRUPT_PORT1
:
2434 NIG_REG_MASK_INTERRUPT_PORT0
;
2436 if (~bp
->aeu_mask
& (asserted
& 0xff))
2437 BNX2X_ERR("IGU ERROR\n");
2438 if (bp
->attn_state
& asserted
)
2439 BNX2X_ERR("IGU ERROR\n");
2441 DP(NETIF_MSG_HW
, "aeu_mask %x newly asserted %x\n",
2442 bp
->aeu_mask
, asserted
);
2443 bp
->aeu_mask
&= ~(asserted
& 0xff);
2444 DP(NETIF_MSG_HW
, "after masking: aeu_mask %x\n", bp
->aeu_mask
);
2446 REG_WR(bp
, aeu_addr
, bp
->aeu_mask
);
2448 bp
->attn_state
|= asserted
;
2450 if (asserted
& ATTN_HARD_WIRED_MASK
) {
2451 if (asserted
& ATTN_NIG_FOR_FUNC
) {
2453 /* save nig interrupt mask */
2454 bp
->nig_mask
= REG_RD(bp
, nig_int_mask_addr
);
2455 REG_WR(bp
, nig_int_mask_addr
, 0);
2457 bnx2x_link_attn(bp
);
2459 /* handle unicore attn? */
2461 if (asserted
& ATTN_SW_TIMER_4_FUNC
)
2462 DP(NETIF_MSG_HW
, "ATTN_SW_TIMER_4_FUNC!\n");
2464 if (asserted
& GPIO_2_FUNC
)
2465 DP(NETIF_MSG_HW
, "GPIO_2_FUNC!\n");
2467 if (asserted
& GPIO_3_FUNC
)
2468 DP(NETIF_MSG_HW
, "GPIO_3_FUNC!\n");
2470 if (asserted
& GPIO_4_FUNC
)
2471 DP(NETIF_MSG_HW
, "GPIO_4_FUNC!\n");
2474 if (asserted
& ATTN_GENERAL_ATTN_1
) {
2475 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_1!\n");
2476 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_1
, 0x0);
2478 if (asserted
& ATTN_GENERAL_ATTN_2
) {
2479 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_2!\n");
2480 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_2
, 0x0);
2482 if (asserted
& ATTN_GENERAL_ATTN_3
) {
2483 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_3!\n");
2484 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_3
, 0x0);
2487 if (asserted
& ATTN_GENERAL_ATTN_4
) {
2488 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_4!\n");
2489 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_4
, 0x0);
2491 if (asserted
& ATTN_GENERAL_ATTN_5
) {
2492 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_5!\n");
2493 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_5
, 0x0);
2495 if (asserted
& ATTN_GENERAL_ATTN_6
) {
2496 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_6!\n");
2497 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_6
, 0x0);
2501 } /* if hardwired */
2503 DP(NETIF_MSG_HW
, "about to mask 0x%08x at IGU addr 0x%x\n",
2504 asserted
, BAR_IGU_INTMEM
+ igu_addr
);
2505 REG_WR(bp
, BAR_IGU_INTMEM
+ igu_addr
, asserted
);
2507 /* now set back the mask */
2508 if (asserted
& ATTN_NIG_FOR_FUNC
)
2509 REG_WR(bp
, nig_int_mask_addr
, bp
->nig_mask
);
2512 static inline void bnx2x_attn_int_deasserted0(struct bnx2x
*bp
, u32 attn
)
2514 int port
= BP_PORT(bp
);
2518 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
2519 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
2521 if (attn
& AEU_INPUTS_ATTN_BITS_SPIO5
) {
2523 val
= REG_RD(bp
, reg_offset
);
2524 val
&= ~AEU_INPUTS_ATTN_BITS_SPIO5
;
2525 REG_WR(bp
, reg_offset
, val
);
2527 BNX2X_ERR("SPIO5 hw attention\n");
2529 switch (bp
->common
.board
& SHARED_HW_CFG_BOARD_TYPE_MASK
) {
2530 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G
:
2531 /* Fan failure attention */
2533 /* The PHY reset is controled by GPIO 1 */
2534 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_1
,
2535 MISC_REGISTERS_GPIO_OUTPUT_LOW
);
2536 /* Low power mode is controled by GPIO 2 */
2537 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_2
,
2538 MISC_REGISTERS_GPIO_OUTPUT_LOW
);
2539 /* mark the failure */
2540 bp
->link_params
.ext_phy_config
&=
2541 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
2542 bp
->link_params
.ext_phy_config
|=
2543 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
;
2545 dev_info
.port_hw_config
[port
].
2546 external_phy_config
,
2547 bp
->link_params
.ext_phy_config
);
2548 /* log the failure */
2549 printk(KERN_ERR PFX
"Fan Failure on Network"
2550 " Controller %s has caused the driver to"
2551 " shutdown the card to prevent permanent"
2552 " damage. Please contact Dell Support for"
2553 " assistance\n", bp
->dev
->name
);
2561 if (attn
& HW_INTERRUT_ASSERT_SET_0
) {
2563 val
= REG_RD(bp
, reg_offset
);
2564 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_0
);
2565 REG_WR(bp
, reg_offset
, val
);
2567 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2568 (attn
& HW_INTERRUT_ASSERT_SET_0
));
2573 static inline void bnx2x_attn_int_deasserted1(struct bnx2x
*bp
, u32 attn
)
2577 if (attn
& BNX2X_DOORQ_ASSERT
) {
2579 val
= REG_RD(bp
, DORQ_REG_DORQ_INT_STS_CLR
);
2580 BNX2X_ERR("DB hw attention 0x%x\n", val
);
2581 /* DORQ discard attention */
2583 BNX2X_ERR("FATAL error from DORQ\n");
2586 if (attn
& HW_INTERRUT_ASSERT_SET_1
) {
2588 int port
= BP_PORT(bp
);
2591 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1
:
2592 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1
);
2594 val
= REG_RD(bp
, reg_offset
);
2595 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_1
);
2596 REG_WR(bp
, reg_offset
, val
);
2598 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2599 (attn
& HW_INTERRUT_ASSERT_SET_1
));
2604 static inline void bnx2x_attn_int_deasserted2(struct bnx2x
*bp
, u32 attn
)
2608 if (attn
& AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT
) {
2610 val
= REG_RD(bp
, CFC_REG_CFC_INT_STS_CLR
);
2611 BNX2X_ERR("CFC hw attention 0x%x\n", val
);
2612 /* CFC error attention */
2614 BNX2X_ERR("FATAL error from CFC\n");
2617 if (attn
& AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT
) {
2619 val
= REG_RD(bp
, PXP_REG_PXP_INT_STS_CLR_0
);
2620 BNX2X_ERR("PXP hw attention 0x%x\n", val
);
2621 /* RQ_USDMDP_FIFO_OVERFLOW */
2623 BNX2X_ERR("FATAL error from PXP\n");
2626 if (attn
& HW_INTERRUT_ASSERT_SET_2
) {
2628 int port
= BP_PORT(bp
);
2631 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2
:
2632 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2
);
2634 val
= REG_RD(bp
, reg_offset
);
2635 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_2
);
2636 REG_WR(bp
, reg_offset
, val
);
2638 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2639 (attn
& HW_INTERRUT_ASSERT_SET_2
));
2644 static inline void bnx2x_attn_int_deasserted3(struct bnx2x
*bp
, u32 attn
)
2648 if (attn
& EVEREST_GEN_ATTN_IN_USE_MASK
) {
2650 if (attn
& BNX2X_PMF_LINK_ASSERT
) {
2651 int func
= BP_FUNC(bp
);
2653 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
2654 bnx2x__link_status_update(bp
);
2655 if (SHMEM_RD(bp
, func_mb
[func
].drv_status
) &
2657 bnx2x_pmf_update(bp
);
2659 } else if (attn
& BNX2X_MC_ASSERT_BITS
) {
2661 BNX2X_ERR("MC assert!\n");
2662 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_10
, 0);
2663 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_9
, 0);
2664 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_8
, 0);
2665 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_7
, 0);
2668 } else if (attn
& BNX2X_MCP_ASSERT
) {
2670 BNX2X_ERR("MCP assert!\n");
2671 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_11
, 0);
2675 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn
);
2678 if (attn
& EVEREST_LATCHED_ATTN_IN_USE_MASK
) {
2679 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn
);
2680 if (attn
& BNX2X_GRC_TIMEOUT
) {
2681 val
= CHIP_IS_E1H(bp
) ?
2682 REG_RD(bp
, MISC_REG_GRC_TIMEOUT_ATTN
) : 0;
2683 BNX2X_ERR("GRC time-out 0x%08x\n", val
);
2685 if (attn
& BNX2X_GRC_RSV
) {
2686 val
= CHIP_IS_E1H(bp
) ?
2687 REG_RD(bp
, MISC_REG_GRC_RSV_ATTN
) : 0;
2688 BNX2X_ERR("GRC reserved 0x%08x\n", val
);
2690 REG_WR(bp
, MISC_REG_AEU_CLR_LATCH_SIGNAL
, 0x7ff);
2694 static void bnx2x_attn_int_deasserted(struct bnx2x
*bp
, u32 deasserted
)
2696 struct attn_route attn
;
2697 struct attn_route group_mask
;
2698 int port
= BP_PORT(bp
);
2703 /* need to take HW lock because MCP or other port might also
2704 try to handle this event */
2707 attn
.sig
[0] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ port
*4);
2708 attn
.sig
[1] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0
+ port
*4);
2709 attn
.sig
[2] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0
+ port
*4);
2710 attn
.sig
[3] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0
+ port
*4);
2711 DP(NETIF_MSG_HW
, "attn: %08x %08x %08x %08x\n",
2712 attn
.sig
[0], attn
.sig
[1], attn
.sig
[2], attn
.sig
[3]);
2714 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
2715 if (deasserted
& (1 << index
)) {
2716 group_mask
= bp
->attn_group
[index
];
2718 DP(NETIF_MSG_HW
, "group[%d]: %08x %08x %08x %08x\n",
2719 index
, group_mask
.sig
[0], group_mask
.sig
[1],
2720 group_mask
.sig
[2], group_mask
.sig
[3]);
2722 bnx2x_attn_int_deasserted3(bp
,
2723 attn
.sig
[3] & group_mask
.sig
[3]);
2724 bnx2x_attn_int_deasserted1(bp
,
2725 attn
.sig
[1] & group_mask
.sig
[1]);
2726 bnx2x_attn_int_deasserted2(bp
,
2727 attn
.sig
[2] & group_mask
.sig
[2]);
2728 bnx2x_attn_int_deasserted0(bp
,
2729 attn
.sig
[0] & group_mask
.sig
[0]);
2731 if ((attn
.sig
[0] & group_mask
.sig
[0] &
2732 HW_PRTY_ASSERT_SET_0
) ||
2733 (attn
.sig
[1] & group_mask
.sig
[1] &
2734 HW_PRTY_ASSERT_SET_1
) ||
2735 (attn
.sig
[2] & group_mask
.sig
[2] &
2736 HW_PRTY_ASSERT_SET_2
))
2737 BNX2X_ERR("FATAL HW block parity attention\n");
2741 bnx2x_unlock_alr(bp
);
2743 reg_addr
= (IGU_ADDR_ATTN_BITS_CLR
+ IGU_FUNC_BASE
* BP_FUNC(bp
)) * 8;
2746 /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
2747 val, BAR_IGU_INTMEM + reg_addr); */
2748 REG_WR(bp
, BAR_IGU_INTMEM
+ reg_addr
, val
);
2750 if (bp
->aeu_mask
& (deasserted
& 0xff))
2751 BNX2X_ERR("IGU BUG!\n");
2752 if (~bp
->attn_state
& deasserted
)
2753 BNX2X_ERR("IGU BUG!\n");
2755 reg_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
2756 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
2758 DP(NETIF_MSG_HW
, "aeu_mask %x\n", bp
->aeu_mask
);
2759 bp
->aeu_mask
|= (deasserted
& 0xff);
2761 DP(NETIF_MSG_HW
, "new mask %x\n", bp
->aeu_mask
);
2762 REG_WR(bp
, reg_addr
, bp
->aeu_mask
);
2764 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
2765 bp
->attn_state
&= ~deasserted
;
2766 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
2769 static void bnx2x_attn_int(struct bnx2x
*bp
)
2771 /* read local copy of bits */
2772 u32 attn_bits
= bp
->def_status_blk
->atten_status_block
.attn_bits
;
2773 u32 attn_ack
= bp
->def_status_blk
->atten_status_block
.attn_bits_ack
;
2774 u32 attn_state
= bp
->attn_state
;
2776 /* look for changed bits */
2777 u32 asserted
= attn_bits
& ~attn_ack
& ~attn_state
;
2778 u32 deasserted
= ~attn_bits
& attn_ack
& attn_state
;
2781 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2782 attn_bits
, attn_ack
, asserted
, deasserted
);
2784 if (~(attn_bits
^ attn_ack
) & (attn_bits
^ attn_state
))
2785 BNX2X_ERR("BAD attention state\n");
2787 /* handle bits that were raised */
2789 bnx2x_attn_int_asserted(bp
, asserted
);
2792 bnx2x_attn_int_deasserted(bp
, deasserted
);
2795 static void bnx2x_sp_task(struct work_struct
*work
)
2797 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, sp_task
);
2801 /* Return here if interrupt is disabled */
2802 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
2803 DP(BNX2X_MSG_SP
, "called but intr_sem not 0, returning\n");
2807 status
= bnx2x_update_dsb_idx(bp
);
2808 /* if (status == 0) */
2809 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2811 DP(BNX2X_MSG_SP
, "got a slowpath interrupt (updated %x)\n", status
);
2817 /* CStorm events: query_stats, port delete ramrod */
2819 bp
->stats_pending
= 0;
2821 bnx2x_ack_sb(bp
, DEF_SB_ID
, ATTENTION_ID
, bp
->def_att_idx
,
2823 bnx2x_ack_sb(bp
, DEF_SB_ID
, USTORM_ID
, le16_to_cpu(bp
->def_u_idx
),
2825 bnx2x_ack_sb(bp
, DEF_SB_ID
, CSTORM_ID
, le16_to_cpu(bp
->def_c_idx
),
2827 bnx2x_ack_sb(bp
, DEF_SB_ID
, XSTORM_ID
, le16_to_cpu(bp
->def_x_idx
),
2829 bnx2x_ack_sb(bp
, DEF_SB_ID
, TSTORM_ID
, le16_to_cpu(bp
->def_t_idx
),
2834 static irqreturn_t
bnx2x_msix_sp_int(int irq
, void *dev_instance
)
2836 struct net_device
*dev
= dev_instance
;
2837 struct bnx2x
*bp
= netdev_priv(dev
);
2839 /* Return here if interrupt is disabled */
2840 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
2841 DP(BNX2X_MSG_SP
, "called but intr_sem not 0, returning\n");
2845 bnx2x_ack_sb(bp
, DEF_SB_ID
, XSTORM_ID
, 0, IGU_INT_DISABLE
, 0);
2847 #ifdef BNX2X_STOP_ON_ERROR
2848 if (unlikely(bp
->panic
))
2852 schedule_work(&bp
->sp_task
);
2857 /* end of slow path */
2861 /****************************************************************************
2863 ****************************************************************************/
2865 /* sum[hi:lo] += add[hi:lo] */
2866 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2869 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2872 /* difference = minuend - subtrahend */
2873 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2875 if (m_lo < s_lo) { \
2877 d_hi = m_hi - s_hi; \
2879 /* we can 'loan' 1 */ \
2881 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2883 /* m_hi <= s_hi */ \
2888 /* m_lo >= s_lo */ \
2889 if (m_hi < s_hi) { \
2893 /* m_hi >= s_hi */ \
2894 d_hi = m_hi - s_hi; \
2895 d_lo = m_lo - s_lo; \
2900 #define UPDATE_STAT64(s, t) \
2902 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2903 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2904 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2905 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2906 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2907 pstats->mac_stx[1].t##_lo, diff.lo); \
2910 #define UPDATE_STAT64_NIG(s, t) \
2912 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2913 diff.lo, new->s##_lo, old->s##_lo); \
2914 ADD_64(estats->t##_hi, diff.hi, \
2915 estats->t##_lo, diff.lo); \
2918 /* sum[hi:lo] += add */
2919 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2922 s_hi += (s_lo < a) ? 1 : 0; \
2925 #define UPDATE_EXTEND_STAT(s) \
2927 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2928 pstats->mac_stx[1].s##_lo, \
2932 #define UPDATE_EXTEND_TSTAT(s, t) \
2934 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2935 old_tclient->s = le32_to_cpu(tclient->s); \
2936 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2939 #define UPDATE_EXTEND_XSTAT(s, t) \
2941 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2942 old_xclient->s = le32_to_cpu(xclient->s); \
2943 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2947 * General service functions
2950 static inline long bnx2x_hilo(u32
*hiref
)
2952 u32 lo
= *(hiref
+ 1);
2953 #if (BITS_PER_LONG == 64)
2956 return HILO_U64(hi
, lo
);
2963 * Init service functions
2966 static void bnx2x_storm_stats_init(struct bnx2x
*bp
)
2968 int func
= BP_FUNC(bp
);
2970 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(func
), 1);
2971 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2972 XSTORM_STATS_FLAGS_OFFSET(func
) + 4, 0);
2974 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(func
), 1);
2975 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
2976 TSTORM_STATS_FLAGS_OFFSET(func
) + 4, 0);
2978 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(func
), 0);
2979 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
2980 CSTORM_STATS_FLAGS_OFFSET(func
) + 4, 0);
2982 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2983 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
2984 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
2985 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2986 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
2987 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
2989 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
2990 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
2991 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
2992 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
2993 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
2994 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
2997 static void bnx2x_storm_stats_post(struct bnx2x
*bp
)
2999 if (!bp
->stats_pending
) {
3000 struct eth_query_ramrod_data ramrod_data
= {0};
3003 ramrod_data
.drv_counter
= bp
->stats_counter
++;
3004 ramrod_data
.collect_port_1b
= bp
->port
.pmf
? 1 : 0;
3005 ramrod_data
.ctr_id_vector
= (1 << BP_CL_ID(bp
));
3007 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_STAT_QUERY
, 0,
3008 ((u32
*)&ramrod_data
)[1],
3009 ((u32
*)&ramrod_data
)[0], 0);
3011 /* stats ramrod has it's own slot on the spq */
3013 bp
->stats_pending
= 1;
3018 static void bnx2x_stats_init(struct bnx2x
*bp
)
3020 int port
= BP_PORT(bp
);
3022 bp
->executer_idx
= 0;
3023 bp
->stats_counter
= 0;
3027 bp
->port
.port_stx
= SHMEM_RD(bp
, port_mb
[port
].port_stx
);
3029 bp
->port
.port_stx
= 0;
3030 DP(BNX2X_MSG_STATS
, "port_stx 0x%x\n", bp
->port
.port_stx
);
3032 memset(&(bp
->port
.old_nig_stats
), 0, sizeof(struct nig_stats
));
3033 bp
->port
.old_nig_stats
.brb_discard
=
3034 REG_RD(bp
, NIG_REG_STAT0_BRB_DISCARD
+ port
*0x38);
3035 REG_RD_DMAE(bp
, NIG_REG_STAT0_EGRESS_MAC_PKT0
+ port
*0x50,
3036 &(bp
->port
.old_nig_stats
.egress_mac_pkt0_lo
), 2);
3037 REG_RD_DMAE(bp
, NIG_REG_STAT0_EGRESS_MAC_PKT1
+ port
*0x50,
3038 &(bp
->port
.old_nig_stats
.egress_mac_pkt1_lo
), 2);
3040 /* function stats */
3041 memset(&bp
->dev
->stats
, 0, sizeof(struct net_device_stats
));
3042 memset(&bp
->old_tclient
, 0, sizeof(struct tstorm_per_client_stats
));
3043 memset(&bp
->old_xclient
, 0, sizeof(struct xstorm_per_client_stats
));
3044 memset(&bp
->eth_stats
, 0, sizeof(struct bnx2x_eth_stats
));
3046 bp
->stats_state
= STATS_STATE_DISABLED
;
3047 if (IS_E1HMF(bp
) && bp
->port
.pmf
&& bp
->port
.port_stx
)
3048 bnx2x_stats_handle(bp
, STATS_EVENT_PMF
);
3051 static void bnx2x_hw_stats_post(struct bnx2x
*bp
)
3053 struct dmae_command
*dmae
= &bp
->stats_dmae
;
3054 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3056 *stats_comp
= DMAE_COMP_VAL
;
3059 if (bp
->executer_idx
) {
3060 int loader_idx
= PMF_DMAE_C(bp
);
3062 memset(dmae
, 0, sizeof(struct dmae_command
));
3064 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3065 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3066 DMAE_CMD_DST_RESET
|
3068 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3070 DMAE_CMD_ENDIANITY_DW_SWAP
|
3072 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
:
3074 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3075 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, dmae
[0]));
3076 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, dmae
[0]));
3077 dmae
->dst_addr_lo
= (DMAE_REG_CMD_MEM
+
3078 sizeof(struct dmae_command
) *
3079 (loader_idx
+ 1)) >> 2;
3080 dmae
->dst_addr_hi
= 0;
3081 dmae
->len
= sizeof(struct dmae_command
) >> 2;
3084 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
+ 1] >> 2;
3085 dmae
->comp_addr_hi
= 0;
3089 bnx2x_post_dmae(bp
, dmae
, loader_idx
);
3091 } else if (bp
->func_stx
) {
3093 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
3097 static int bnx2x_stats_comp(struct bnx2x
*bp
)
3099 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3103 while (*stats_comp
!= DMAE_COMP_VAL
) {
3106 BNX2X_ERR("timeout waiting for stats finished\n");
3115 * Statistics service functions
3118 static void bnx2x_stats_pmf_update(struct bnx2x
*bp
)
3120 struct dmae_command
*dmae
;
3122 int loader_idx
= PMF_DMAE_C(bp
);
3123 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3126 if (!IS_E1HMF(bp
) || !bp
->port
.pmf
|| !bp
->port
.port_stx
) {
3127 BNX2X_ERR("BUG!\n");
3131 bp
->executer_idx
= 0;
3133 opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3135 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3137 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3139 DMAE_CMD_ENDIANITY_DW_SWAP
|
3141 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3142 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3144 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3145 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_GRC
);
3146 dmae
->src_addr_lo
= bp
->port
.port_stx
>> 2;
3147 dmae
->src_addr_hi
= 0;
3148 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
3149 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
3150 dmae
->len
= DMAE_LEN32_RD_MAX
;
3151 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3152 dmae
->comp_addr_hi
= 0;
3155 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3156 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
3157 dmae
->src_addr_lo
= (bp
->port
.port_stx
>> 2) + DMAE_LEN32_RD_MAX
;
3158 dmae
->src_addr_hi
= 0;
3159 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
) +
3160 DMAE_LEN32_RD_MAX
* 4);
3161 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
) +
3162 DMAE_LEN32_RD_MAX
* 4);
3163 dmae
->len
= (sizeof(struct host_port_stats
) >> 2) - DMAE_LEN32_RD_MAX
;
3164 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3165 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3166 dmae
->comp_val
= DMAE_COMP_VAL
;
3169 bnx2x_hw_stats_post(bp
);
3170 bnx2x_stats_comp(bp
);
3173 static void bnx2x_port_stats_init(struct bnx2x
*bp
)
3175 struct dmae_command
*dmae
;
3176 int port
= BP_PORT(bp
);
3177 int vn
= BP_E1HVN(bp
);
3179 int loader_idx
= PMF_DMAE_C(bp
);
3181 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3184 if (!bp
->link_vars
.link_up
|| !bp
->port
.pmf
) {
3185 BNX2X_ERR("BUG!\n");
3189 bp
->executer_idx
= 0;
3192 opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3193 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3194 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3196 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3198 DMAE_CMD_ENDIANITY_DW_SWAP
|
3200 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3201 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3203 if (bp
->port
.port_stx
) {
3205 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3206 dmae
->opcode
= opcode
;
3207 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
3208 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
3209 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
3210 dmae
->dst_addr_hi
= 0;
3211 dmae
->len
= sizeof(struct host_port_stats
) >> 2;
3212 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3213 dmae
->comp_addr_hi
= 0;
3219 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3220 dmae
->opcode
= opcode
;
3221 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
3222 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
3223 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
3224 dmae
->dst_addr_hi
= 0;
3225 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
3226 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3227 dmae
->comp_addr_hi
= 0;
3232 opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3233 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3234 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3236 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3238 DMAE_CMD_ENDIANITY_DW_SWAP
|
3240 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3241 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3243 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
3245 mac_addr
= (port
? NIG_REG_INGRESS_BMAC1_MEM
:
3246 NIG_REG_INGRESS_BMAC0_MEM
);
3248 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3249 BIGMAC_REGISTER_TX_STAT_GTBYT */
3250 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3251 dmae
->opcode
= opcode
;
3252 dmae
->src_addr_lo
= (mac_addr
+
3253 BIGMAC_REGISTER_TX_STAT_GTPKT
) >> 2;
3254 dmae
->src_addr_hi
= 0;
3255 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
));
3256 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
));
3257 dmae
->len
= (8 + BIGMAC_REGISTER_TX_STAT_GTBYT
-
3258 BIGMAC_REGISTER_TX_STAT_GTPKT
) >> 2;
3259 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3260 dmae
->comp_addr_hi
= 0;
3263 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3264 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3265 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3266 dmae
->opcode
= opcode
;
3267 dmae
->src_addr_lo
= (mac_addr
+
3268 BIGMAC_REGISTER_RX_STAT_GR64
) >> 2;
3269 dmae
->src_addr_hi
= 0;
3270 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3271 offsetof(struct bmac_stats
, rx_stat_gr64_lo
));
3272 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3273 offsetof(struct bmac_stats
, rx_stat_gr64_lo
));
3274 dmae
->len
= (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ
-
3275 BIGMAC_REGISTER_RX_STAT_GR64
) >> 2;
3276 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3277 dmae
->comp_addr_hi
= 0;
3280 } else if (bp
->link_vars
.mac_type
== MAC_TYPE_EMAC
) {
3282 mac_addr
= (port
? GRCBASE_EMAC1
: GRCBASE_EMAC0
);
3284 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3285 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3286 dmae
->opcode
= opcode
;
3287 dmae
->src_addr_lo
= (mac_addr
+
3288 EMAC_REG_EMAC_RX_STAT_AC
) >> 2;
3289 dmae
->src_addr_hi
= 0;
3290 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
));
3291 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
));
3292 dmae
->len
= EMAC_REG_EMAC_RX_STAT_AC_COUNT
;
3293 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3294 dmae
->comp_addr_hi
= 0;
3297 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3298 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3299 dmae
->opcode
= opcode
;
3300 dmae
->src_addr_lo
= (mac_addr
+
3301 EMAC_REG_EMAC_RX_STAT_AC_28
) >> 2;
3302 dmae
->src_addr_hi
= 0;
3303 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3304 offsetof(struct emac_stats
, rx_stat_falsecarriererrors
));
3305 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3306 offsetof(struct emac_stats
, rx_stat_falsecarriererrors
));
3308 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3309 dmae
->comp_addr_hi
= 0;
3312 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3313 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3314 dmae
->opcode
= opcode
;
3315 dmae
->src_addr_lo
= (mac_addr
+
3316 EMAC_REG_EMAC_TX_STAT_AC
) >> 2;
3317 dmae
->src_addr_hi
= 0;
3318 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3319 offsetof(struct emac_stats
, tx_stat_ifhcoutoctets
));
3320 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3321 offsetof(struct emac_stats
, tx_stat_ifhcoutoctets
));
3322 dmae
->len
= EMAC_REG_EMAC_TX_STAT_AC_COUNT
;
3323 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3324 dmae
->comp_addr_hi
= 0;
3329 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3330 dmae
->opcode
= opcode
;
3331 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_BRB_DISCARD
:
3332 NIG_REG_STAT0_BRB_DISCARD
) >> 2;
3333 dmae
->src_addr_hi
= 0;
3334 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
));
3335 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
));
3336 dmae
->len
= (sizeof(struct nig_stats
) - 4*sizeof(u32
)) >> 2;
3337 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3338 dmae
->comp_addr_hi
= 0;
3341 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3342 dmae
->opcode
= opcode
;
3343 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_EGRESS_MAC_PKT0
:
3344 NIG_REG_STAT0_EGRESS_MAC_PKT0
) >> 2;
3345 dmae
->src_addr_hi
= 0;
3346 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
) +
3347 offsetof(struct nig_stats
, egress_mac_pkt0_lo
));
3348 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
) +
3349 offsetof(struct nig_stats
, egress_mac_pkt0_lo
));
3350 dmae
->len
= (2*sizeof(u32
)) >> 2;
3351 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3352 dmae
->comp_addr_hi
= 0;
3355 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3356 dmae
->opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3357 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
3358 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3360 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3362 DMAE_CMD_ENDIANITY_DW_SWAP
|
3364 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3365 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3366 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_EGRESS_MAC_PKT1
:
3367 NIG_REG_STAT0_EGRESS_MAC_PKT1
) >> 2;
3368 dmae
->src_addr_hi
= 0;
3369 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
) +
3370 offsetof(struct nig_stats
, egress_mac_pkt1_lo
));
3371 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
) +
3372 offsetof(struct nig_stats
, egress_mac_pkt1_lo
));
3373 dmae
->len
= (2*sizeof(u32
)) >> 2;
3374 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3375 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3376 dmae
->comp_val
= DMAE_COMP_VAL
;
3381 static void bnx2x_func_stats_init(struct bnx2x
*bp
)
3383 struct dmae_command
*dmae
= &bp
->stats_dmae
;
3384 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3387 if (!bp
->func_stx
) {
3388 BNX2X_ERR("BUG!\n");
3392 bp
->executer_idx
= 0;
3393 memset(dmae
, 0, sizeof(struct dmae_command
));
3395 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3396 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
3397 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3399 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3401 DMAE_CMD_ENDIANITY_DW_SWAP
|
3403 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3404 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3405 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
3406 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
3407 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
3408 dmae
->dst_addr_hi
= 0;
3409 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
3410 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3411 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3412 dmae
->comp_val
= DMAE_COMP_VAL
;
3417 static void bnx2x_stats_start(struct bnx2x
*bp
)
3420 bnx2x_port_stats_init(bp
);
3422 else if (bp
->func_stx
)
3423 bnx2x_func_stats_init(bp
);
3425 bnx2x_hw_stats_post(bp
);
3426 bnx2x_storm_stats_post(bp
);
3429 static void bnx2x_stats_pmf_start(struct bnx2x
*bp
)
3431 bnx2x_stats_comp(bp
);
3432 bnx2x_stats_pmf_update(bp
);
3433 bnx2x_stats_start(bp
);
3436 static void bnx2x_stats_restart(struct bnx2x
*bp
)
3438 bnx2x_stats_comp(bp
);
3439 bnx2x_stats_start(bp
);
3442 static void bnx2x_bmac_stats_update(struct bnx2x
*bp
)
3444 struct bmac_stats
*new = bnx2x_sp(bp
, mac_stats
.bmac_stats
);
3445 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3446 struct regpair diff
;
3448 UPDATE_STAT64(rx_stat_grerb
, rx_stat_ifhcinbadoctets
);
3449 UPDATE_STAT64(rx_stat_grfcs
, rx_stat_dot3statsfcserrors
);
3450 UPDATE_STAT64(rx_stat_grund
, rx_stat_etherstatsundersizepkts
);
3451 UPDATE_STAT64(rx_stat_grovr
, rx_stat_dot3statsframestoolong
);
3452 UPDATE_STAT64(rx_stat_grfrg
, rx_stat_etherstatsfragments
);
3453 UPDATE_STAT64(rx_stat_grjbr
, rx_stat_etherstatsjabbers
);
3454 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_bmac_xpf
);
3455 UPDATE_STAT64(rx_stat_grxcf
, rx_stat_bmac_xcf
);
3456 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_xoffstateentered
);
3457 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_xoffpauseframesreceived
);
3458 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_outxoffsent
);
3459 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_flowcontroldone
);
3460 UPDATE_STAT64(tx_stat_gt64
, tx_stat_etherstatspkts64octets
);
3461 UPDATE_STAT64(tx_stat_gt127
,
3462 tx_stat_etherstatspkts65octetsto127octets
);
3463 UPDATE_STAT64(tx_stat_gt255
,
3464 tx_stat_etherstatspkts128octetsto255octets
);
3465 UPDATE_STAT64(tx_stat_gt511
,
3466 tx_stat_etherstatspkts256octetsto511octets
);
3467 UPDATE_STAT64(tx_stat_gt1023
,
3468 tx_stat_etherstatspkts512octetsto1023octets
);
3469 UPDATE_STAT64(tx_stat_gt1518
,
3470 tx_stat_etherstatspkts1024octetsto1522octets
);
3471 UPDATE_STAT64(tx_stat_gt2047
, tx_stat_bmac_2047
);
3472 UPDATE_STAT64(tx_stat_gt4095
, tx_stat_bmac_4095
);
3473 UPDATE_STAT64(tx_stat_gt9216
, tx_stat_bmac_9216
);
3474 UPDATE_STAT64(tx_stat_gt16383
, tx_stat_bmac_16383
);
3475 UPDATE_STAT64(tx_stat_gterr
,
3476 tx_stat_dot3statsinternalmactransmiterrors
);
3477 UPDATE_STAT64(tx_stat_gtufl
, tx_stat_bmac_ufl
);
3480 static void bnx2x_emac_stats_update(struct bnx2x
*bp
)
3482 struct emac_stats
*new = bnx2x_sp(bp
, mac_stats
.emac_stats
);
3483 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3485 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets
);
3486 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets
);
3487 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors
);
3488 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors
);
3489 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors
);
3490 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors
);
3491 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts
);
3492 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong
);
3493 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments
);
3494 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers
);
3495 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived
);
3496 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered
);
3497 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived
);
3498 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived
);
3499 UPDATE_EXTEND_STAT(tx_stat_outxonsent
);
3500 UPDATE_EXTEND_STAT(tx_stat_outxoffsent
);
3501 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone
);
3502 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions
);
3503 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes
);
3504 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes
);
3505 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions
);
3506 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions
);
3507 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions
);
3508 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets
);
3509 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets
);
3510 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets
);
3511 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets
);
3512 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets
);
3513 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets
);
3514 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets
);
3515 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors
);
3518 static int bnx2x_hw_stats_update(struct bnx2x
*bp
)
3520 struct nig_stats
*new = bnx2x_sp(bp
, nig_stats
);
3521 struct nig_stats
*old
= &(bp
->port
.old_nig_stats
);
3522 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3523 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3524 struct regpair diff
;
3526 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
)
3527 bnx2x_bmac_stats_update(bp
);
3529 else if (bp
->link_vars
.mac_type
== MAC_TYPE_EMAC
)
3530 bnx2x_emac_stats_update(bp
);
3532 else { /* unreached */
3533 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3537 ADD_EXTEND_64(pstats
->brb_drop_hi
, pstats
->brb_drop_lo
,
3538 new->brb_discard
- old
->brb_discard
);
3540 UPDATE_STAT64_NIG(egress_mac_pkt0
,
3541 etherstatspkts1024octetsto1522octets
);
3542 UPDATE_STAT64_NIG(egress_mac_pkt1
, etherstatspktsover1522octets
);
3544 memcpy(old
, new, sizeof(struct nig_stats
));
3546 memcpy(&(estats
->rx_stat_ifhcinbadoctets_hi
), &(pstats
->mac_stx
[1]),
3547 sizeof(struct mac_stx
));
3548 estats
->brb_drop_hi
= pstats
->brb_drop_hi
;
3549 estats
->brb_drop_lo
= pstats
->brb_drop_lo
;
3551 pstats
->host_port_stats_start
= ++pstats
->host_port_stats_end
;
3556 static int bnx2x_storm_stats_update(struct bnx2x
*bp
)
3558 struct eth_stats_query
*stats
= bnx2x_sp(bp
, fw_stats
);
3559 int cl_id
= BP_CL_ID(bp
);
3560 struct tstorm_per_port_stats
*tport
=
3561 &stats
->tstorm_common
.port_statistics
;
3562 struct tstorm_per_client_stats
*tclient
=
3563 &stats
->tstorm_common
.client_statistics
[cl_id
];
3564 struct tstorm_per_client_stats
*old_tclient
= &bp
->old_tclient
;
3565 struct xstorm_per_client_stats
*xclient
=
3566 &stats
->xstorm_common
.client_statistics
[cl_id
];
3567 struct xstorm_per_client_stats
*old_xclient
= &bp
->old_xclient
;
3568 struct host_func_stats
*fstats
= bnx2x_sp(bp
, func_stats
);
3569 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3572 /* are storm stats valid? */
3573 if ((u16
)(le16_to_cpu(tclient
->stats_counter
) + 1) !=
3574 bp
->stats_counter
) {
3575 DP(BNX2X_MSG_STATS
, "stats not updated by tstorm"
3576 " tstorm counter (%d) != stats_counter (%d)\n",
3577 tclient
->stats_counter
, bp
->stats_counter
);
3580 if ((u16
)(le16_to_cpu(xclient
->stats_counter
) + 1) !=
3581 bp
->stats_counter
) {
3582 DP(BNX2X_MSG_STATS
, "stats not updated by xstorm"
3583 " xstorm counter (%d) != stats_counter (%d)\n",
3584 xclient
->stats_counter
, bp
->stats_counter
);
3588 fstats
->total_bytes_received_hi
=
3589 fstats
->valid_bytes_received_hi
=
3590 le32_to_cpu(tclient
->total_rcv_bytes
.hi
);
3591 fstats
->total_bytes_received_lo
=
3592 fstats
->valid_bytes_received_lo
=
3593 le32_to_cpu(tclient
->total_rcv_bytes
.lo
);
3595 estats
->error_bytes_received_hi
=
3596 le32_to_cpu(tclient
->rcv_error_bytes
.hi
);
3597 estats
->error_bytes_received_lo
=
3598 le32_to_cpu(tclient
->rcv_error_bytes
.lo
);
3599 ADD_64(estats
->error_bytes_received_hi
,
3600 estats
->rx_stat_ifhcinbadoctets_hi
,
3601 estats
->error_bytes_received_lo
,
3602 estats
->rx_stat_ifhcinbadoctets_lo
);
3604 ADD_64(fstats
->total_bytes_received_hi
,
3605 estats
->error_bytes_received_hi
,
3606 fstats
->total_bytes_received_lo
,
3607 estats
->error_bytes_received_lo
);
3609 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts
, total_unicast_packets_received
);
3610 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts
,
3611 total_multicast_packets_received
);
3612 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts
,
3613 total_broadcast_packets_received
);
3615 fstats
->total_bytes_transmitted_hi
=
3616 le32_to_cpu(xclient
->total_sent_bytes
.hi
);
3617 fstats
->total_bytes_transmitted_lo
=
3618 le32_to_cpu(xclient
->total_sent_bytes
.lo
);
3620 UPDATE_EXTEND_XSTAT(unicast_pkts_sent
,
3621 total_unicast_packets_transmitted
);
3622 UPDATE_EXTEND_XSTAT(multicast_pkts_sent
,
3623 total_multicast_packets_transmitted
);
3624 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent
,
3625 total_broadcast_packets_transmitted
);
3627 memcpy(estats
, &(fstats
->total_bytes_received_hi
),
3628 sizeof(struct host_func_stats
) - 2*sizeof(u32
));
3630 estats
->mac_filter_discard
= le32_to_cpu(tport
->mac_filter_discard
);
3631 estats
->xxoverflow_discard
= le32_to_cpu(tport
->xxoverflow_discard
);
3632 estats
->brb_truncate_discard
=
3633 le32_to_cpu(tport
->brb_truncate_discard
);
3634 estats
->mac_discard
= le32_to_cpu(tport
->mac_discard
);
3636 old_tclient
->rcv_unicast_bytes
.hi
=
3637 le32_to_cpu(tclient
->rcv_unicast_bytes
.hi
);
3638 old_tclient
->rcv_unicast_bytes
.lo
=
3639 le32_to_cpu(tclient
->rcv_unicast_bytes
.lo
);
3640 old_tclient
->rcv_broadcast_bytes
.hi
=
3641 le32_to_cpu(tclient
->rcv_broadcast_bytes
.hi
);
3642 old_tclient
->rcv_broadcast_bytes
.lo
=
3643 le32_to_cpu(tclient
->rcv_broadcast_bytes
.lo
);
3644 old_tclient
->rcv_multicast_bytes
.hi
=
3645 le32_to_cpu(tclient
->rcv_multicast_bytes
.hi
);
3646 old_tclient
->rcv_multicast_bytes
.lo
=
3647 le32_to_cpu(tclient
->rcv_multicast_bytes
.lo
);
3648 old_tclient
->total_rcv_pkts
= le32_to_cpu(tclient
->total_rcv_pkts
);
3650 old_tclient
->checksum_discard
= le32_to_cpu(tclient
->checksum_discard
);
3651 old_tclient
->packets_too_big_discard
=
3652 le32_to_cpu(tclient
->packets_too_big_discard
);
3653 estats
->no_buff_discard
=
3654 old_tclient
->no_buff_discard
= le32_to_cpu(tclient
->no_buff_discard
);
3655 old_tclient
->ttl0_discard
= le32_to_cpu(tclient
->ttl0_discard
);
3657 old_xclient
->total_sent_pkts
= le32_to_cpu(xclient
->total_sent_pkts
);
3658 old_xclient
->unicast_bytes_sent
.hi
=
3659 le32_to_cpu(xclient
->unicast_bytes_sent
.hi
);
3660 old_xclient
->unicast_bytes_sent
.lo
=
3661 le32_to_cpu(xclient
->unicast_bytes_sent
.lo
);
3662 old_xclient
->multicast_bytes_sent
.hi
=
3663 le32_to_cpu(xclient
->multicast_bytes_sent
.hi
);
3664 old_xclient
->multicast_bytes_sent
.lo
=
3665 le32_to_cpu(xclient
->multicast_bytes_sent
.lo
);
3666 old_xclient
->broadcast_bytes_sent
.hi
=
3667 le32_to_cpu(xclient
->broadcast_bytes_sent
.hi
);
3668 old_xclient
->broadcast_bytes_sent
.lo
=
3669 le32_to_cpu(xclient
->broadcast_bytes_sent
.lo
);
3671 fstats
->host_func_stats_start
= ++fstats
->host_func_stats_end
;
3676 static void bnx2x_net_stats_update(struct bnx2x
*bp
)
3678 struct tstorm_per_client_stats
*old_tclient
= &bp
->old_tclient
;
3679 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3680 struct net_device_stats
*nstats
= &bp
->dev
->stats
;
3682 nstats
->rx_packets
=
3683 bnx2x_hilo(&estats
->total_unicast_packets_received_hi
) +
3684 bnx2x_hilo(&estats
->total_multicast_packets_received_hi
) +
3685 bnx2x_hilo(&estats
->total_broadcast_packets_received_hi
);
3687 nstats
->tx_packets
=
3688 bnx2x_hilo(&estats
->total_unicast_packets_transmitted_hi
) +
3689 bnx2x_hilo(&estats
->total_multicast_packets_transmitted_hi
) +
3690 bnx2x_hilo(&estats
->total_broadcast_packets_transmitted_hi
);
3692 nstats
->rx_bytes
= bnx2x_hilo(&estats
->valid_bytes_received_hi
);
3694 nstats
->tx_bytes
= bnx2x_hilo(&estats
->total_bytes_transmitted_hi
);
3696 nstats
->rx_dropped
= old_tclient
->checksum_discard
+
3697 estats
->mac_discard
;
3698 nstats
->tx_dropped
= 0;
3701 bnx2x_hilo(&estats
->total_multicast_packets_transmitted_hi
);
3703 nstats
->collisions
=
3704 estats
->tx_stat_dot3statssinglecollisionframes_lo
+
3705 estats
->tx_stat_dot3statsmultiplecollisionframes_lo
+
3706 estats
->tx_stat_dot3statslatecollisions_lo
+
3707 estats
->tx_stat_dot3statsexcessivecollisions_lo
;
3709 estats
->jabber_packets_received
=
3710 old_tclient
->packets_too_big_discard
+
3711 estats
->rx_stat_dot3statsframestoolong_lo
;
3713 nstats
->rx_length_errors
=
3714 estats
->rx_stat_etherstatsundersizepkts_lo
+
3715 estats
->jabber_packets_received
;
3716 nstats
->rx_over_errors
= estats
->brb_drop_lo
+
3717 estats
->brb_truncate_discard
;
3718 nstats
->rx_crc_errors
= estats
->rx_stat_dot3statsfcserrors_lo
;
3719 nstats
->rx_frame_errors
= estats
->rx_stat_dot3statsalignmenterrors_lo
;
3720 nstats
->rx_fifo_errors
= old_tclient
->no_buff_discard
;
3721 nstats
->rx_missed_errors
= estats
->xxoverflow_discard
;
3723 nstats
->rx_errors
= nstats
->rx_length_errors
+
3724 nstats
->rx_over_errors
+
3725 nstats
->rx_crc_errors
+
3726 nstats
->rx_frame_errors
+
3727 nstats
->rx_fifo_errors
+
3728 nstats
->rx_missed_errors
;
3730 nstats
->tx_aborted_errors
=
3731 estats
->tx_stat_dot3statslatecollisions_lo
+
3732 estats
->tx_stat_dot3statsexcessivecollisions_lo
;
3733 nstats
->tx_carrier_errors
= estats
->rx_stat_falsecarriererrors_lo
;
3734 nstats
->tx_fifo_errors
= 0;
3735 nstats
->tx_heartbeat_errors
= 0;
3736 nstats
->tx_window_errors
= 0;
3738 nstats
->tx_errors
= nstats
->tx_aborted_errors
+
3739 nstats
->tx_carrier_errors
;
3742 static void bnx2x_stats_update(struct bnx2x
*bp
)
3744 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3747 if (*stats_comp
!= DMAE_COMP_VAL
)
3751 update
= (bnx2x_hw_stats_update(bp
) == 0);
3753 update
|= (bnx2x_storm_stats_update(bp
) == 0);
3756 bnx2x_net_stats_update(bp
);
3759 if (bp
->stats_pending
) {
3760 bp
->stats_pending
++;
3761 if (bp
->stats_pending
== 3) {
3762 BNX2X_ERR("stats not updated for 3 times\n");
3769 if (bp
->msglevel
& NETIF_MSG_TIMER
) {
3770 struct tstorm_per_client_stats
*old_tclient
= &bp
->old_tclient
;
3771 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3772 struct net_device_stats
*nstats
= &bp
->dev
->stats
;
3775 printk(KERN_DEBUG
"%s:\n", bp
->dev
->name
);
3776 printk(KERN_DEBUG
" tx avail (%4x) tx hc idx (%x)"
3778 bnx2x_tx_avail(bp
->fp
),
3779 le16_to_cpu(*bp
->fp
->tx_cons_sb
), nstats
->tx_packets
);
3780 printk(KERN_DEBUG
" rx usage (%4x) rx hc idx (%x)"
3782 (u16
)(le16_to_cpu(*bp
->fp
->rx_cons_sb
) -
3783 bp
->fp
->rx_comp_cons
),
3784 le16_to_cpu(*bp
->fp
->rx_cons_sb
), nstats
->rx_packets
);
3785 printk(KERN_DEBUG
" %s (Xoff events %u) brb drops %u\n",
3786 netif_queue_stopped(bp
->dev
)? "Xoff" : "Xon",
3787 estats
->driver_xoff
, estats
->brb_drop_lo
);
3788 printk(KERN_DEBUG
"tstats: checksum_discard %u "
3789 "packets_too_big_discard %u no_buff_discard %u "
3790 "mac_discard %u mac_filter_discard %u "
3791 "xxovrflow_discard %u brb_truncate_discard %u "
3792 "ttl0_discard %u\n",
3793 old_tclient
->checksum_discard
,
3794 old_tclient
->packets_too_big_discard
,
3795 old_tclient
->no_buff_discard
, estats
->mac_discard
,
3796 estats
->mac_filter_discard
, estats
->xxoverflow_discard
,
3797 estats
->brb_truncate_discard
,
3798 old_tclient
->ttl0_discard
);
3800 for_each_queue(bp
, i
) {
3801 printk(KERN_DEBUG
"[%d]: %lu\t%lu\t%lu\n", i
,
3802 bnx2x_fp(bp
, i
, tx_pkt
),
3803 bnx2x_fp(bp
, i
, rx_pkt
),
3804 bnx2x_fp(bp
, i
, rx_calls
));
3808 bnx2x_hw_stats_post(bp
);
3809 bnx2x_storm_stats_post(bp
);
3812 static void bnx2x_port_stats_stop(struct bnx2x
*bp
)
3814 struct dmae_command
*dmae
;
3816 int loader_idx
= PMF_DMAE_C(bp
);
3817 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3819 bp
->executer_idx
= 0;
3821 opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3823 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3825 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3827 DMAE_CMD_ENDIANITY_DW_SWAP
|
3829 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3830 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3832 if (bp
->port
.port_stx
) {
3834 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3836 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_GRC
);
3838 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
3839 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
3840 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
3841 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
3842 dmae
->dst_addr_hi
= 0;
3843 dmae
->len
= sizeof(struct host_port_stats
) >> 2;
3845 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3846 dmae
->comp_addr_hi
= 0;
3849 dmae
->comp_addr_lo
=
3850 U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3851 dmae
->comp_addr_hi
=
3852 U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3853 dmae
->comp_val
= DMAE_COMP_VAL
;
3861 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3862 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
3863 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
3864 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
3865 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
3866 dmae
->dst_addr_hi
= 0;
3867 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
3868 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3869 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3870 dmae
->comp_val
= DMAE_COMP_VAL
;
3876 static void bnx2x_stats_stop(struct bnx2x
*bp
)
3880 bnx2x_stats_comp(bp
);
3883 update
= (bnx2x_hw_stats_update(bp
) == 0);
3885 update
|= (bnx2x_storm_stats_update(bp
) == 0);
3888 bnx2x_net_stats_update(bp
);
3891 bnx2x_port_stats_stop(bp
);
3893 bnx2x_hw_stats_post(bp
);
3894 bnx2x_stats_comp(bp
);
3898 static void bnx2x_stats_do_nothing(struct bnx2x
*bp
)
3902 static const struct {
3903 void (*action
)(struct bnx2x
*bp
);
3904 enum bnx2x_stats_state next_state
;
3905 } bnx2x_stats_stm
[STATS_STATE_MAX
][STATS_EVENT_MAX
] = {
3908 /* DISABLED PMF */ {bnx2x_stats_pmf_update
, STATS_STATE_DISABLED
},
3909 /* LINK_UP */ {bnx2x_stats_start
, STATS_STATE_ENABLED
},
3910 /* UPDATE */ {bnx2x_stats_do_nothing
, STATS_STATE_DISABLED
},
3911 /* STOP */ {bnx2x_stats_do_nothing
, STATS_STATE_DISABLED
}
3914 /* ENABLED PMF */ {bnx2x_stats_pmf_start
, STATS_STATE_ENABLED
},
3915 /* LINK_UP */ {bnx2x_stats_restart
, STATS_STATE_ENABLED
},
3916 /* UPDATE */ {bnx2x_stats_update
, STATS_STATE_ENABLED
},
3917 /* STOP */ {bnx2x_stats_stop
, STATS_STATE_DISABLED
}
3921 static void bnx2x_stats_handle(struct bnx2x
*bp
, enum bnx2x_stats_event event
)
3923 enum bnx2x_stats_state state
= bp
->stats_state
;
3925 bnx2x_stats_stm
[state
][event
].action(bp
);
3926 bp
->stats_state
= bnx2x_stats_stm
[state
][event
].next_state
;
3928 if ((event
!= STATS_EVENT_UPDATE
) || (bp
->msglevel
& NETIF_MSG_TIMER
))
3929 DP(BNX2X_MSG_STATS
, "state %d -> event %d -> state %d\n",
3930 state
, event
, bp
->stats_state
);
3933 static void bnx2x_timer(unsigned long data
)
3935 struct bnx2x
*bp
= (struct bnx2x
*) data
;
3937 if (!netif_running(bp
->dev
))
3940 if (atomic_read(&bp
->intr_sem
) != 0)
3944 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
3947 bnx2x_tx_int(fp
, 1000);
3948 rc
= bnx2x_rx_int(fp
, 1000);
3951 if (!BP_NOMCP(bp
)) {
3952 int func
= BP_FUNC(bp
);
3956 ++bp
->fw_drv_pulse_wr_seq
;
3957 bp
->fw_drv_pulse_wr_seq
&= DRV_PULSE_SEQ_MASK
;
3958 /* TBD - add SYSTEM_TIME */
3959 drv_pulse
= bp
->fw_drv_pulse_wr_seq
;
3960 SHMEM_WR(bp
, func_mb
[func
].drv_pulse_mb
, drv_pulse
);
3962 mcp_pulse
= (SHMEM_RD(bp
, func_mb
[func
].mcp_pulse_mb
) &
3963 MCP_PULSE_SEQ_MASK
);
3964 /* The delta between driver pulse and mcp response
3965 * should be 1 (before mcp response) or 0 (after mcp response)
3967 if ((drv_pulse
!= mcp_pulse
) &&
3968 (drv_pulse
!= ((mcp_pulse
+ 1) & MCP_PULSE_SEQ_MASK
))) {
3969 /* someone lost a heartbeat... */
3970 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3971 drv_pulse
, mcp_pulse
);
3975 if ((bp
->state
== BNX2X_STATE_OPEN
) ||
3976 (bp
->state
== BNX2X_STATE_DISABLED
))
3977 bnx2x_stats_handle(bp
, STATS_EVENT_UPDATE
);
3980 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
3983 /* end of Statistics */
3988 * nic init service functions
3991 static void bnx2x_zero_sb(struct bnx2x
*bp
, int sb_id
)
3993 int port
= BP_PORT(bp
);
3995 bnx2x_init_fill(bp
, BAR_USTRORM_INTMEM
+
3996 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port
, sb_id
), 0,
3997 sizeof(struct ustorm_def_status_block
)/4);
3998 bnx2x_init_fill(bp
, BAR_CSTRORM_INTMEM
+
3999 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port
, sb_id
), 0,
4000 sizeof(struct cstorm_def_status_block
)/4);
4003 static void bnx2x_init_sb(struct bnx2x
*bp
, int sb_id
,
4004 struct host_status_block
*sb
, dma_addr_t mapping
)
4006 int port
= BP_PORT(bp
);
4007 int func
= BP_FUNC(bp
);
4012 section
= ((u64
)mapping
) + offsetof(struct host_status_block
,
4014 sb
->u_status_block
.status_block_id
= sb_id
;
4016 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4017 USTORM_SB_HOST_SB_ADDR_OFFSET(port
, sb_id
), U64_LO(section
));
4018 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4019 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port
, sb_id
)) + 4),
4021 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ FP_USB_FUNC_OFF
+
4022 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port
, sb_id
), func
);
4024 for (index
= 0; index
< HC_USTORM_SB_NUM_INDICES
; index
++)
4025 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
4026 USTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
, index
), 1);
4029 section
= ((u64
)mapping
) + offsetof(struct host_status_block
,
4031 sb
->c_status_block
.status_block_id
= sb_id
;
4033 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4034 CSTORM_SB_HOST_SB_ADDR_OFFSET(port
, sb_id
), U64_LO(section
));
4035 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4036 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port
, sb_id
)) + 4),
4038 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ FP_CSB_FUNC_OFF
+
4039 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port
, sb_id
), func
);
4041 for (index
= 0; index
< HC_CSTORM_SB_NUM_INDICES
; index
++)
4042 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4043 CSTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
, index
), 1);
4045 bnx2x_ack_sb(bp
, sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4048 static void bnx2x_zero_def_sb(struct bnx2x
*bp
)
4050 int func
= BP_FUNC(bp
);
4052 bnx2x_init_fill(bp
, BAR_USTRORM_INTMEM
+
4053 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4054 sizeof(struct ustorm_def_status_block
)/4);
4055 bnx2x_init_fill(bp
, BAR_CSTRORM_INTMEM
+
4056 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4057 sizeof(struct cstorm_def_status_block
)/4);
4058 bnx2x_init_fill(bp
, BAR_XSTRORM_INTMEM
+
4059 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4060 sizeof(struct xstorm_def_status_block
)/4);
4061 bnx2x_init_fill(bp
, BAR_TSTRORM_INTMEM
+
4062 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4063 sizeof(struct tstorm_def_status_block
)/4);
4066 static void bnx2x_init_def_sb(struct bnx2x
*bp
,
4067 struct host_def_status_block
*def_sb
,
4068 dma_addr_t mapping
, int sb_id
)
4070 int port
= BP_PORT(bp
);
4071 int func
= BP_FUNC(bp
);
4072 int index
, val
, reg_offset
;
4076 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4077 atten_status_block
);
4078 def_sb
->atten_status_block
.status_block_id
= sb_id
;
4080 bp
->def_att_idx
= 0;
4083 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
4084 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
4086 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
4087 bp
->attn_group
[index
].sig
[0] = REG_RD(bp
,
4088 reg_offset
+ 0x10*index
);
4089 bp
->attn_group
[index
].sig
[1] = REG_RD(bp
,
4090 reg_offset
+ 0x4 + 0x10*index
);
4091 bp
->attn_group
[index
].sig
[2] = REG_RD(bp
,
4092 reg_offset
+ 0x8 + 0x10*index
);
4093 bp
->attn_group
[index
].sig
[3] = REG_RD(bp
,
4094 reg_offset
+ 0xc + 0x10*index
);
4097 bp
->aeu_mask
= REG_RD(bp
, (port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
4098 MISC_REG_AEU_MASK_ATTN_FUNC_0
));
4100 reg_offset
= (port
? HC_REG_ATTN_MSG1_ADDR_L
:
4101 HC_REG_ATTN_MSG0_ADDR_L
);
4103 REG_WR(bp
, reg_offset
, U64_LO(section
));
4104 REG_WR(bp
, reg_offset
+ 4, U64_HI(section
));
4106 reg_offset
= (port
? HC_REG_ATTN_NUM_P1
: HC_REG_ATTN_NUM_P0
);
4108 val
= REG_RD(bp
, reg_offset
);
4110 REG_WR(bp
, reg_offset
, val
);
4113 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4114 u_def_status_block
);
4115 def_sb
->u_def_status_block
.status_block_id
= sb_id
;
4119 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4120 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4121 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4122 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4124 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ DEF_USB_FUNC_OFF
+
4125 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4126 REG_WR(bp
, BAR_USTRORM_INTMEM
+ USTORM_HC_BTR_OFFSET(func
),
4129 for (index
= 0; index
< HC_USTORM_DEF_SB_NUM_INDICES
; index
++)
4130 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
4131 USTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4134 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4135 c_def_status_block
);
4136 def_sb
->c_def_status_block
.status_block_id
= sb_id
;
4140 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4141 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4142 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4143 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4145 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ DEF_CSB_FUNC_OFF
+
4146 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4147 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_HC_BTR_OFFSET(func
),
4150 for (index
= 0; index
< HC_CSTORM_DEF_SB_NUM_INDICES
; index
++)
4151 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4152 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4155 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4156 t_def_status_block
);
4157 def_sb
->t_def_status_block
.status_block_id
= sb_id
;
4161 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4162 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4163 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4164 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4166 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ DEF_TSB_FUNC_OFF
+
4167 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4168 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_HC_BTR_OFFSET(func
),
4171 for (index
= 0; index
< HC_TSTORM_DEF_SB_NUM_INDICES
; index
++)
4172 REG_WR16(bp
, BAR_TSTRORM_INTMEM
+
4173 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4176 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4177 x_def_status_block
);
4178 def_sb
->x_def_status_block
.status_block_id
= sb_id
;
4182 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4183 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4184 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4185 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4187 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ DEF_XSB_FUNC_OFF
+
4188 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4189 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_HC_BTR_OFFSET(func
),
4192 for (index
= 0; index
< HC_XSTORM_DEF_SB_NUM_INDICES
; index
++)
4193 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+
4194 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4196 bp
->stats_pending
= 0;
4198 bnx2x_ack_sb(bp
, sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4201 static void bnx2x_update_coalesce(struct bnx2x
*bp
)
4203 int port
= BP_PORT(bp
);
4206 for_each_queue(bp
, i
) {
4207 int sb_id
= bp
->fp
[i
].sb_id
;
4209 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4210 REG_WR8(bp
, BAR_USTRORM_INTMEM
+
4211 USTORM_SB_HC_TIMEOUT_OFFSET(port
, sb_id
,
4212 HC_INDEX_U_ETH_RX_CQ_CONS
),
4214 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
4215 USTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
,
4216 HC_INDEX_U_ETH_RX_CQ_CONS
),
4217 bp
->rx_ticks
? 0 : 1);
4219 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4220 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+
4221 CSTORM_SB_HC_TIMEOUT_OFFSET(port
, sb_id
,
4222 HC_INDEX_C_ETH_TX_CQ_CONS
),
4224 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4225 CSTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
,
4226 HC_INDEX_C_ETH_TX_CQ_CONS
),
4227 bp
->tx_ticks
? 0 : 1);
4231 static inline void bnx2x_free_tpa_pool(struct bnx2x
*bp
,
4232 struct bnx2x_fastpath
*fp
, int last
)
4236 for (i
= 0; i
< last
; i
++) {
4237 struct sw_rx_bd
*rx_buf
= &(fp
->tpa_pool
[i
]);
4238 struct sk_buff
*skb
= rx_buf
->skb
;
4241 DP(NETIF_MSG_IFDOWN
, "tpa bin %d empty on free\n", i
);
4245 if (fp
->tpa_state
[i
] == BNX2X_TPA_START
)
4246 pci_unmap_single(bp
->pdev
,
4247 pci_unmap_addr(rx_buf
, mapping
),
4248 bp
->rx_buf_use_size
,
4249 PCI_DMA_FROMDEVICE
);
4256 static void bnx2x_init_rx_rings(struct bnx2x
*bp
)
4258 int func
= BP_FUNC(bp
);
4259 u16 ring_prod
, cqe_ring_prod
= 0;
4262 bp
->rx_buf_use_size
= bp
->dev
->mtu
;
4263 bp
->rx_buf_use_size
+= bp
->rx_offset
+ ETH_OVREHEAD
;
4264 bp
->rx_buf_size
= bp
->rx_buf_use_size
+ 64;
4266 if (bp
->flags
& TPA_ENABLE_FLAG
) {
4268 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n",
4269 bp
->rx_buf_use_size
, bp
->rx_buf_size
,
4270 bp
->dev
->mtu
+ ETH_OVREHEAD
);
4272 for_each_queue(bp
, j
) {
4273 for (i
= 0; i
< ETH_MAX_AGGREGATION_QUEUES_E1H
; i
++) {
4274 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4276 fp
->tpa_pool
[i
].skb
=
4277 netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
4278 if (!fp
->tpa_pool
[i
].skb
) {
4279 BNX2X_ERR("Failed to allocate TPA "
4280 "skb pool for queue[%d] - "
4281 "disabling TPA on this "
4283 bnx2x_free_tpa_pool(bp
, fp
, i
);
4284 fp
->disable_tpa
= 1;
4287 pci_unmap_addr_set((struct sw_rx_bd
*)
4288 &bp
->fp
->tpa_pool
[i
],
4290 fp
->tpa_state
[i
] = BNX2X_TPA_STOP
;
4295 for_each_queue(bp
, j
) {
4296 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4299 fp
->rx_cons_sb
= BNX2X_RX_SB_INDEX
;
4300 fp
->rx_bd_cons_sb
= BNX2X_RX_SB_BD_INDEX
;
4302 /* "next page" elements initialization */
4304 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
4305 struct eth_rx_sge
*sge
;
4307 sge
= &fp
->rx_sge_ring
[RX_SGE_CNT
* i
- 2];
4309 cpu_to_le32(U64_HI(fp
->rx_sge_mapping
+
4310 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
4312 cpu_to_le32(U64_LO(fp
->rx_sge_mapping
+
4313 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
4316 bnx2x_init_sge_ring_bit_mask(fp
);
4319 for (i
= 1; i
<= NUM_RX_RINGS
; i
++) {
4320 struct eth_rx_bd
*rx_bd
;
4322 rx_bd
= &fp
->rx_desc_ring
[RX_DESC_CNT
* i
- 2];
4324 cpu_to_le32(U64_HI(fp
->rx_desc_mapping
+
4325 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
4327 cpu_to_le32(U64_LO(fp
->rx_desc_mapping
+
4328 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
4332 for (i
= 1; i
<= NUM_RCQ_RINGS
; i
++) {
4333 struct eth_rx_cqe_next_page
*nextpg
;
4335 nextpg
= (struct eth_rx_cqe_next_page
*)
4336 &fp
->rx_comp_ring
[RCQ_DESC_CNT
* i
- 1];
4338 cpu_to_le32(U64_HI(fp
->rx_comp_mapping
+
4339 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
4341 cpu_to_le32(U64_LO(fp
->rx_comp_mapping
+
4342 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
4345 /* Allocate SGEs and initialize the ring elements */
4346 for (i
= 0, ring_prod
= 0;
4347 i
< MAX_RX_SGE_CNT
*NUM_RX_SGE_PAGES
; i
++) {
4349 if (bnx2x_alloc_rx_sge(bp
, fp
, ring_prod
) < 0) {
4350 BNX2X_ERR("was only able to allocate "
4352 BNX2X_ERR("disabling TPA for queue[%d]\n", j
);
4353 /* Cleanup already allocated elements */
4354 bnx2x_free_rx_sge_range(bp
, fp
, ring_prod
);
4355 bnx2x_free_tpa_pool(bp
, fp
,
4356 ETH_MAX_AGGREGATION_QUEUES_E1H
);
4357 fp
->disable_tpa
= 1;
4361 ring_prod
= NEXT_SGE_IDX(ring_prod
);
4363 fp
->rx_sge_prod
= ring_prod
;
4365 /* Allocate BDs and initialize BD ring */
4366 fp
->rx_comp_cons
= fp
->rx_alloc_failed
= 0;
4367 cqe_ring_prod
= ring_prod
= 0;
4368 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
4369 if (bnx2x_alloc_rx_skb(bp
, fp
, ring_prod
) < 0) {
4370 BNX2X_ERR("was only able to allocate "
4372 fp
->rx_alloc_failed
++;
4375 ring_prod
= NEXT_RX_IDX(ring_prod
);
4376 cqe_ring_prod
= NEXT_RCQ_IDX(cqe_ring_prod
);
4377 BUG_TRAP(ring_prod
> i
);
4380 fp
->rx_bd_prod
= ring_prod
;
4381 /* must not have more available CQEs than BDs */
4382 fp
->rx_comp_prod
= min((u16
)(NUM_RCQ_RINGS
*RCQ_DESC_CNT
),
4384 fp
->rx_pkt
= fp
->rx_calls
= 0;
4387 * this will generate an interrupt (to the TSTORM)
4388 * must only be done after chip is initialized
4390 bnx2x_update_rx_prod(bp
, fp
, ring_prod
, fp
->rx_comp_prod
,
4395 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4396 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
),
4397 U64_LO(fp
->rx_comp_mapping
));
4398 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4399 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
) + 4,
4400 U64_HI(fp
->rx_comp_mapping
));
4404 static void bnx2x_init_tx_ring(struct bnx2x
*bp
)
4408 for_each_queue(bp
, j
) {
4409 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4411 for (i
= 1; i
<= NUM_TX_RINGS
; i
++) {
4412 struct eth_tx_bd
*tx_bd
=
4413 &fp
->tx_desc_ring
[TX_DESC_CNT
* i
- 1];
4416 cpu_to_le32(U64_HI(fp
->tx_desc_mapping
+
4417 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
4419 cpu_to_le32(U64_LO(fp
->tx_desc_mapping
+
4420 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
4423 fp
->tx_pkt_prod
= 0;
4424 fp
->tx_pkt_cons
= 0;
4427 fp
->tx_cons_sb
= BNX2X_TX_SB_INDEX
;
4432 static void bnx2x_init_sp_ring(struct bnx2x
*bp
)
4434 int func
= BP_FUNC(bp
);
4436 spin_lock_init(&bp
->spq_lock
);
4438 bp
->spq_left
= MAX_SPQ_PENDING
;
4439 bp
->spq_prod_idx
= 0;
4440 bp
->dsb_sp_prod
= BNX2X_SP_DSB_INDEX
;
4441 bp
->spq_prod_bd
= bp
->spq
;
4442 bp
->spq_last_bd
= bp
->spq_prod_bd
+ MAX_SP_DESC_CNT
;
4444 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PAGE_BASE_OFFSET(func
),
4445 U64_LO(bp
->spq_mapping
));
4447 XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PAGE_BASE_OFFSET(func
) + 4,
4448 U64_HI(bp
->spq_mapping
));
4450 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PROD_OFFSET(func
),
4454 static void bnx2x_init_context(struct bnx2x
*bp
)
4458 for_each_queue(bp
, i
) {
4459 struct eth_context
*context
= bnx2x_sp(bp
, context
[i
].eth
);
4460 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
4461 u8 sb_id
= FP_SB_ID(fp
);
4463 context
->xstorm_st_context
.tx_bd_page_base_hi
=
4464 U64_HI(fp
->tx_desc_mapping
);
4465 context
->xstorm_st_context
.tx_bd_page_base_lo
=
4466 U64_LO(fp
->tx_desc_mapping
);
4467 context
->xstorm_st_context
.db_data_addr_hi
=
4468 U64_HI(fp
->tx_prods_mapping
);
4469 context
->xstorm_st_context
.db_data_addr_lo
=
4470 U64_LO(fp
->tx_prods_mapping
);
4471 context
->xstorm_st_context
.statistics_data
= (BP_CL_ID(bp
) |
4472 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE
);
4474 context
->ustorm_st_context
.common
.sb_index_numbers
=
4475 BNX2X_RX_SB_INDEX_NUM
;
4476 context
->ustorm_st_context
.common
.clientId
= FP_CL_ID(fp
);
4477 context
->ustorm_st_context
.common
.status_block_id
= sb_id
;
4478 context
->ustorm_st_context
.common
.flags
=
4479 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT
;
4480 context
->ustorm_st_context
.common
.mc_alignment_size
= 64;
4481 context
->ustorm_st_context
.common
.bd_buff_size
=
4482 bp
->rx_buf_use_size
;
4483 context
->ustorm_st_context
.common
.bd_page_base_hi
=
4484 U64_HI(fp
->rx_desc_mapping
);
4485 context
->ustorm_st_context
.common
.bd_page_base_lo
=
4486 U64_LO(fp
->rx_desc_mapping
);
4487 if (!fp
->disable_tpa
) {
4488 context
->ustorm_st_context
.common
.flags
|=
4489 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA
|
4490 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING
);
4491 context
->ustorm_st_context
.common
.sge_buff_size
=
4492 (u16
)(BCM_PAGE_SIZE
*PAGES_PER_SGE
);
4493 context
->ustorm_st_context
.common
.sge_page_base_hi
=
4494 U64_HI(fp
->rx_sge_mapping
);
4495 context
->ustorm_st_context
.common
.sge_page_base_lo
=
4496 U64_LO(fp
->rx_sge_mapping
);
4499 context
->cstorm_st_context
.sb_index_number
=
4500 HC_INDEX_C_ETH_TX_CQ_CONS
;
4501 context
->cstorm_st_context
.status_block_id
= sb_id
;
4503 context
->xstorm_ag_context
.cdu_reserved
=
4504 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, i
),
4505 CDU_REGION_NUMBER_XCM_AG
,
4506 ETH_CONNECTION_TYPE
);
4507 context
->ustorm_ag_context
.cdu_usage
=
4508 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, i
),
4509 CDU_REGION_NUMBER_UCM_AG
,
4510 ETH_CONNECTION_TYPE
);
4514 static void bnx2x_init_ind_table(struct bnx2x
*bp
)
4516 int port
= BP_PORT(bp
);
4522 DP(NETIF_MSG_IFUP
, "Initializing indirection table\n");
4523 for (i
= 0; i
< TSTORM_INDIRECTION_TABLE_SIZE
; i
++)
4524 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+
4525 TSTORM_INDIRECTION_TABLE_OFFSET(port
) + i
,
4526 i
% bp
->num_queues
);
4528 REG_WR(bp
, PRS_REG_A_PRSU_20
, 0xf);
4531 static void bnx2x_set_client_config(struct bnx2x
*bp
)
4533 struct tstorm_eth_client_config tstorm_client
= {0};
4534 int port
= BP_PORT(bp
);
4537 tstorm_client
.mtu
= bp
->dev
->mtu
+ ETH_OVREHEAD
;
4538 tstorm_client
.statistics_counter_id
= 0;
4539 tstorm_client
.config_flags
=
4540 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE
;
4542 if (bp
->rx_mode
&& bp
->vlgrp
) {
4543 tstorm_client
.config_flags
|=
4544 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE
;
4545 DP(NETIF_MSG_IFUP
, "vlan removal enabled\n");
4549 if (bp
->flags
& TPA_ENABLE_FLAG
) {
4550 tstorm_client
.max_sges_for_packet
=
4551 BCM_PAGE_ALIGN(tstorm_client
.mtu
) >> BCM_PAGE_SHIFT
;
4552 tstorm_client
.max_sges_for_packet
=
4553 ((tstorm_client
.max_sges_for_packet
+
4554 PAGES_PER_SGE
- 1) & (~(PAGES_PER_SGE
- 1))) >>
4555 PAGES_PER_SGE_SHIFT
;
4557 tstorm_client
.config_flags
|=
4558 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING
;
4561 for_each_queue(bp
, i
) {
4562 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4563 TSTORM_CLIENT_CONFIG_OFFSET(port
, bp
->fp
[i
].cl_id
),
4564 ((u32
*)&tstorm_client
)[0]);
4565 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4566 TSTORM_CLIENT_CONFIG_OFFSET(port
, bp
->fp
[i
].cl_id
) + 4,
4567 ((u32
*)&tstorm_client
)[1]);
4570 DP(BNX2X_MSG_OFF
, "tstorm_client: 0x%08x 0x%08x\n",
4571 ((u32
*)&tstorm_client
)[0], ((u32
*)&tstorm_client
)[1]);
4574 static void bnx2x_set_storm_rx_mode(struct bnx2x
*bp
)
4576 struct tstorm_eth_mac_filter_config tstorm_mac_filter
= {0};
4577 int mode
= bp
->rx_mode
;
4578 int mask
= (1 << BP_L_ID(bp
));
4579 int func
= BP_FUNC(bp
);
4582 DP(NETIF_MSG_RX_STATUS
, "rx mode is %d\n", mode
);
4585 case BNX2X_RX_MODE_NONE
: /* no Rx */
4586 tstorm_mac_filter
.ucast_drop_all
= mask
;
4587 tstorm_mac_filter
.mcast_drop_all
= mask
;
4588 tstorm_mac_filter
.bcast_drop_all
= mask
;
4590 case BNX2X_RX_MODE_NORMAL
:
4591 tstorm_mac_filter
.bcast_accept_all
= mask
;
4593 case BNX2X_RX_MODE_ALLMULTI
:
4594 tstorm_mac_filter
.mcast_accept_all
= mask
;
4595 tstorm_mac_filter
.bcast_accept_all
= mask
;
4597 case BNX2X_RX_MODE_PROMISC
:
4598 tstorm_mac_filter
.ucast_accept_all
= mask
;
4599 tstorm_mac_filter
.mcast_accept_all
= mask
;
4600 tstorm_mac_filter
.bcast_accept_all
= mask
;
4603 BNX2X_ERR("BAD rx mode (%d)\n", mode
);
4607 for (i
= 0; i
< sizeof(struct tstorm_eth_mac_filter_config
)/4; i
++) {
4608 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4609 TSTORM_MAC_FILTER_CONFIG_OFFSET(func
) + i
* 4,
4610 ((u32
*)&tstorm_mac_filter
)[i
]);
4612 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4613 ((u32 *)&tstorm_mac_filter)[i]); */
4616 if (mode
!= BNX2X_RX_MODE_NONE
)
4617 bnx2x_set_client_config(bp
);
4620 static void bnx2x_init_internal(struct bnx2x
*bp
)
4622 struct tstorm_eth_function_common_config tstorm_config
= {0};
4623 struct stats_indication_flags stats_flags
= {0};
4624 int port
= BP_PORT(bp
);
4625 int func
= BP_FUNC(bp
);
4629 tstorm_config
.config_flags
= MULTI_FLAGS
;
4630 tstorm_config
.rss_result_mask
= MULTI_MASK
;
4633 tstorm_config
.leading_client_id
= BP_L_ID(bp
);
4635 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4636 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func
),
4637 (*(u32
*)&tstorm_config
));
4639 /* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
4640 (*(u32 *)&tstorm_config)); */
4642 bp
->rx_mode
= BNX2X_RX_MODE_NONE
; /* no rx until link is up */
4643 bnx2x_set_storm_rx_mode(bp
);
4645 stats_flags
.collect_eth
= 1;
4647 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(port
),
4648 ((u32
*)&stats_flags
)[0]);
4649 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(port
) + 4,
4650 ((u32
*)&stats_flags
)[1]);
4652 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(port
),
4653 ((u32
*)&stats_flags
)[0]);
4654 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(port
) + 4,
4655 ((u32
*)&stats_flags
)[1]);
4657 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(port
),
4658 ((u32
*)&stats_flags
)[0]);
4659 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(port
) + 4,
4660 ((u32
*)&stats_flags
)[1]);
4662 /* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
4663 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
4665 if (CHIP_IS_E1H(bp
)) {
4666 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_FUNCTION_MODE_OFFSET
,
4668 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_FUNCTION_MODE_OFFSET
,
4670 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_FUNCTION_MODE_OFFSET
,
4672 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_FUNCTION_MODE_OFFSET
,
4675 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_E1HOV_OFFSET(func
),
4679 /* Zero this manualy as its initialization is
4680 currently missing in the initTool */
4681 for (i
= 0; i
< USTORM_AGG_DATA_SIZE
>> 2; i
++)
4682 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4683 USTORM_AGG_DATA_OFFSET
+ 4*i
, 0);
4685 for_each_queue(bp
, i
) {
4686 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
4689 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4690 USTORM_CQE_PAGE_BASE_OFFSET(port
, FP_CL_ID(fp
)),
4691 U64_LO(fp
->rx_comp_mapping
));
4692 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4693 USTORM_CQE_PAGE_BASE_OFFSET(port
, FP_CL_ID(fp
)) + 4,
4694 U64_HI(fp
->rx_comp_mapping
));
4696 max_agg_size
= min((u32
)(bp
->rx_buf_use_size
+
4697 8*BCM_PAGE_SIZE
*PAGES_PER_SGE
),
4699 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
4700 USTORM_MAX_AGG_SIZE_OFFSET(port
, FP_CL_ID(fp
)),
4705 static void bnx2x_nic_init(struct bnx2x
*bp
)
4709 for_each_queue(bp
, i
) {
4710 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
4713 fp
->state
= BNX2X_FP_STATE_CLOSED
;
4715 fp
->cl_id
= BP_L_ID(bp
) + i
;
4716 fp
->sb_id
= fp
->cl_id
;
4718 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4719 bp
, fp
->status_blk
, i
, FP_CL_ID(fp
), FP_SB_ID(fp
));
4720 bnx2x_init_sb(bp
, FP_SB_ID(fp
), fp
->status_blk
,
4721 fp
->status_blk_mapping
);
4724 bnx2x_init_def_sb(bp
, bp
->def_status_blk
,
4725 bp
->def_status_blk_mapping
, DEF_SB_ID
);
4726 bnx2x_update_coalesce(bp
);
4727 bnx2x_init_rx_rings(bp
);
4728 bnx2x_init_tx_ring(bp
);
4729 bnx2x_init_sp_ring(bp
);
4730 bnx2x_init_context(bp
);
4731 bnx2x_init_internal(bp
);
4732 bnx2x_storm_stats_init(bp
);
4733 bnx2x_init_ind_table(bp
);
4734 bnx2x_int_enable(bp
);
4737 /* end of nic init */
4740 * gzip service functions
4743 static int bnx2x_gunzip_init(struct bnx2x
*bp
)
4745 bp
->gunzip_buf
= pci_alloc_consistent(bp
->pdev
, FW_BUF_SIZE
,
4746 &bp
->gunzip_mapping
);
4747 if (bp
->gunzip_buf
== NULL
)
4750 bp
->strm
= kmalloc(sizeof(*bp
->strm
), GFP_KERNEL
);
4751 if (bp
->strm
== NULL
)
4754 bp
->strm
->workspace
= kmalloc(zlib_inflate_workspacesize(),
4756 if (bp
->strm
->workspace
== NULL
)
4766 pci_free_consistent(bp
->pdev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
4767 bp
->gunzip_mapping
);
4768 bp
->gunzip_buf
= NULL
;
4771 printk(KERN_ERR PFX
"%s: Cannot allocate firmware buffer for"
4772 " un-compression\n", bp
->dev
->name
);
4776 static void bnx2x_gunzip_end(struct bnx2x
*bp
)
4778 kfree(bp
->strm
->workspace
);
4783 if (bp
->gunzip_buf
) {
4784 pci_free_consistent(bp
->pdev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
4785 bp
->gunzip_mapping
);
4786 bp
->gunzip_buf
= NULL
;
4790 static int bnx2x_gunzip(struct bnx2x
*bp
, u8
*zbuf
, int len
)
4794 /* check gzip header */
4795 if ((zbuf
[0] != 0x1f) || (zbuf
[1] != 0x8b) || (zbuf
[2] != Z_DEFLATED
))
4802 if (zbuf
[3] & FNAME
)
4803 while ((zbuf
[n
++] != 0) && (n
< len
));
4805 bp
->strm
->next_in
= zbuf
+ n
;
4806 bp
->strm
->avail_in
= len
- n
;
4807 bp
->strm
->next_out
= bp
->gunzip_buf
;
4808 bp
->strm
->avail_out
= FW_BUF_SIZE
;
4810 rc
= zlib_inflateInit2(bp
->strm
, -MAX_WBITS
);
4814 rc
= zlib_inflate(bp
->strm
, Z_FINISH
);
4815 if ((rc
!= Z_OK
) && (rc
!= Z_STREAM_END
))
4816 printk(KERN_ERR PFX
"%s: Firmware decompression error: %s\n",
4817 bp
->dev
->name
, bp
->strm
->msg
);
4819 bp
->gunzip_outlen
= (FW_BUF_SIZE
- bp
->strm
->avail_out
);
4820 if (bp
->gunzip_outlen
& 0x3)
4821 printk(KERN_ERR PFX
"%s: Firmware decompression error:"
4822 " gunzip_outlen (%d) not aligned\n",
4823 bp
->dev
->name
, bp
->gunzip_outlen
);
4824 bp
->gunzip_outlen
>>= 2;
4826 zlib_inflateEnd(bp
->strm
);
4828 if (rc
== Z_STREAM_END
)
4834 /* nic load/unload */
4837 * General service functions
4840 /* send a NIG loopback debug packet */
4841 static void bnx2x_lb_pckt(struct bnx2x
*bp
)
4845 /* Ethernet source and destination addresses */
4846 wb_write
[0] = 0x55555555;
4847 wb_write
[1] = 0x55555555;
4848 wb_write
[2] = 0x20; /* SOP */
4849 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
4851 /* NON-IP protocol */
4852 wb_write
[0] = 0x09000000;
4853 wb_write
[1] = 0x55555555;
4854 wb_write
[2] = 0x10; /* EOP, eop_bvalid = 0 */
4855 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
4858 /* some of the internal memories
4859 * are not directly readable from the driver
4860 * to test them we send debug packets
4862 static int bnx2x_int_mem_test(struct bnx2x
*bp
)
4868 if (CHIP_REV_IS_FPGA(bp
))
4870 else if (CHIP_REV_IS_EMUL(bp
))
4875 DP(NETIF_MSG_HW
, "start part1\n");
4877 /* Disable inputs of parser neighbor blocks */
4878 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
4879 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
4880 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
4881 NIG_WR(NIG_REG_PRS_REQ_IN_EN
, 0x0);
4883 /* Write 0 to parser credits for CFC search request */
4884 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
4886 /* send Ethernet packet */
4889 /* TODO do i reset NIG statistic? */
4890 /* Wait until NIG register shows 1 packet of size 0x10 */
4891 count
= 1000 * factor
;
4894 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
4895 val
= *bnx2x_sp(bp
, wb_data
[0]);
4903 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
4907 /* Wait until PRS register shows 1 packet */
4908 count
= 1000 * factor
;
4910 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
4918 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
4922 /* Reset and init BRB, PRS */
4923 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
4925 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
4927 bnx2x_init_block(bp
, BRB1_COMMON_START
, BRB1_COMMON_END
);
4928 bnx2x_init_block(bp
, PRS_COMMON_START
, PRS_COMMON_END
);
4930 DP(NETIF_MSG_HW
, "part2\n");
4932 /* Disable inputs of parser neighbor blocks */
4933 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
4934 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
4935 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
4936 NIG_WR(NIG_REG_PRS_REQ_IN_EN
, 0x0);
4938 /* Write 0 to parser credits for CFC search request */
4939 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
4941 /* send 10 Ethernet packets */
4942 for (i
= 0; i
< 10; i
++)
4945 /* Wait until NIG register shows 10 + 1
4946 packets of size 11*0x10 = 0xb0 */
4947 count
= 1000 * factor
;
4950 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
4951 val
= *bnx2x_sp(bp
, wb_data
[0]);
4959 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
4963 /* Wait until PRS register shows 2 packets */
4964 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
4966 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
4968 /* Write 1 to parser credits for CFC search request */
4969 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x1);
4971 /* Wait until PRS register shows 3 packets */
4972 msleep(10 * factor
);
4973 /* Wait until NIG register shows 1 packet of size 0x10 */
4974 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
4976 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
4978 /* clear NIG EOP FIFO */
4979 for (i
= 0; i
< 11; i
++)
4980 REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_FIFO
);
4981 val
= REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_EMPTY
);
4983 BNX2X_ERR("clear of NIG failed\n");
4987 /* Reset and init BRB, PRS, NIG */
4988 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
4990 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
4992 bnx2x_init_block(bp
, BRB1_COMMON_START
, BRB1_COMMON_END
);
4993 bnx2x_init_block(bp
, PRS_COMMON_START
, PRS_COMMON_END
);
4996 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
4999 /* Enable inputs of parser neighbor blocks */
5000 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x7fffffff);
5001 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x1);
5002 REG_WR(bp
, CFC_REG_DEBUG0
, 0x0);
5003 NIG_WR(NIG_REG_PRS_REQ_IN_EN
, 0x1);
5005 DP(NETIF_MSG_HW
, "done\n");
5010 static void enable_blocks_attention(struct bnx2x
*bp
)
5012 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
5013 REG_WR(bp
, PXP_REG_PXP_INT_MASK_1
, 0);
5014 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
5015 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
5016 REG_WR(bp
, QM_REG_QM_INT_MASK
, 0);
5017 REG_WR(bp
, TM_REG_TM_INT_MASK
, 0);
5018 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_0
, 0);
5019 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_1
, 0);
5020 REG_WR(bp
, XCM_REG_XCM_INT_MASK
, 0);
5021 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5022 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5023 REG_WR(bp
, USDM_REG_USDM_INT_MASK_0
, 0);
5024 REG_WR(bp
, USDM_REG_USDM_INT_MASK_1
, 0);
5025 REG_WR(bp
, UCM_REG_UCM_INT_MASK
, 0);
5026 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5027 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5028 REG_WR(bp
, GRCBASE_UPB
+ PB_REG_PB_INT_MASK
, 0);
5029 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_0
, 0);
5030 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_1
, 0);
5031 REG_WR(bp
, CCM_REG_CCM_INT_MASK
, 0);
5032 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5033 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5034 if (CHIP_REV_IS_FPGA(bp
))
5035 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x580000);
5037 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x480000);
5038 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_0
, 0);
5039 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_1
, 0);
5040 REG_WR(bp
, TCM_REG_TCM_INT_MASK
, 0);
5041 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5042 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5043 REG_WR(bp
, CDU_REG_CDU_INT_MASK
, 0);
5044 REG_WR(bp
, DMAE_REG_DMAE_INT_MASK
, 0);
5045 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5046 REG_WR(bp
, PBF_REG_PBF_INT_MASK
, 0X18); /* bit 3,4 masked */
5050 static int bnx2x_init_common(struct bnx2x
*bp
)
5054 DP(BNX2X_MSG_MCP
, "starting common init func %d\n", BP_FUNC(bp
));
5056 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0xffffffff);
5057 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_SET
, 0xfffc);
5059 bnx2x_init_block(bp
, MISC_COMMON_START
, MISC_COMMON_END
);
5060 if (CHIP_IS_E1H(bp
))
5061 REG_WR(bp
, MISC_REG_E1HMF_MODE
, IS_E1HMF(bp
));
5063 REG_WR(bp
, MISC_REG_LCPLL_CTRL_REG_2
, 0x100);
5065 REG_WR(bp
, MISC_REG_LCPLL_CTRL_REG_2
, 0x0);
5067 bnx2x_init_block(bp
, PXP_COMMON_START
, PXP_COMMON_END
);
5068 if (CHIP_IS_E1(bp
)) {
5069 /* enable HW interrupt from PXP on USDM overflow
5070 bit 16 on INT_MASK_0 */
5071 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
5074 bnx2x_init_block(bp
, PXP2_COMMON_START
, PXP2_COMMON_END
);
5078 REG_WR(bp
, PXP2_REG_RQ_QM_ENDIAN_M
, 1);
5079 REG_WR(bp
, PXP2_REG_RQ_TM_ENDIAN_M
, 1);
5080 REG_WR(bp
, PXP2_REG_RQ_SRC_ENDIAN_M
, 1);
5081 REG_WR(bp
, PXP2_REG_RQ_CDU_ENDIAN_M
, 1);
5082 REG_WR(bp
, PXP2_REG_RQ_DBG_ENDIAN_M
, 1);
5083 REG_WR(bp
, PXP2_REG_RQ_HC_ENDIAN_M
, 1);
5085 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5086 REG_WR(bp
, PXP2_REG_RD_QM_SWAP_MODE
, 1);
5087 REG_WR(bp
, PXP2_REG_RD_TM_SWAP_MODE
, 1);
5088 REG_WR(bp
, PXP2_REG_RD_SRC_SWAP_MODE
, 1);
5089 REG_WR(bp
, PXP2_REG_RD_CDURD_SWAP_MODE
, 1);
5094 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
5097 REG_WR(bp
, PXP2_REG_RQ_CDU_P_SIZE
, 2);
5099 REG_WR(bp
, PXP2_REG_RQ_TM_P_SIZE
, 5);
5100 REG_WR(bp
, PXP2_REG_RQ_QM_P_SIZE
, 5);
5101 REG_WR(bp
, PXP2_REG_RQ_SRC_P_SIZE
, 5);
5104 if (CHIP_REV_IS_FPGA(bp
) && CHIP_IS_E1H(bp
))
5105 REG_WR(bp
, PXP2_REG_PGL_TAGS_LIMIT
, 0x1);
5107 /* let the HW do it's magic ... */
5109 /* finish PXP init */
5110 val
= REG_RD(bp
, PXP2_REG_RQ_CFG_DONE
);
5112 BNX2X_ERR("PXP2 CFG failed\n");
5115 val
= REG_RD(bp
, PXP2_REG_RD_INIT_DONE
);
5117 BNX2X_ERR("PXP2 RD_INIT failed\n");
5121 REG_WR(bp
, PXP2_REG_RQ_DISABLE_INPUTS
, 0);
5122 REG_WR(bp
, PXP2_REG_RD_DISABLE_INPUTS
, 0);
5124 bnx2x_init_block(bp
, DMAE_COMMON_START
, DMAE_COMMON_END
);
5126 /* clean the DMAE memory */
5128 bnx2x_init_fill(bp
, TSEM_REG_PRAM
, 0, 8);
5130 bnx2x_init_block(bp
, TCM_COMMON_START
, TCM_COMMON_END
);
5131 bnx2x_init_block(bp
, UCM_COMMON_START
, UCM_COMMON_END
);
5132 bnx2x_init_block(bp
, CCM_COMMON_START
, CCM_COMMON_END
);
5133 bnx2x_init_block(bp
, XCM_COMMON_START
, XCM_COMMON_END
);
5135 bnx2x_read_dmae(bp
, XSEM_REG_PASSIVE_BUFFER
, 3);
5136 bnx2x_read_dmae(bp
, CSEM_REG_PASSIVE_BUFFER
, 3);
5137 bnx2x_read_dmae(bp
, TSEM_REG_PASSIVE_BUFFER
, 3);
5138 bnx2x_read_dmae(bp
, USEM_REG_PASSIVE_BUFFER
, 3);
5140 bnx2x_init_block(bp
, QM_COMMON_START
, QM_COMMON_END
);
5141 /* soft reset pulse */
5142 REG_WR(bp
, QM_REG_SOFT_RESET
, 1);
5143 REG_WR(bp
, QM_REG_SOFT_RESET
, 0);
5146 bnx2x_init_block(bp
, TIMERS_COMMON_START
, TIMERS_COMMON_END
);
5149 bnx2x_init_block(bp
, DQ_COMMON_START
, DQ_COMMON_END
);
5150 REG_WR(bp
, DORQ_REG_DPM_CID_OFST
, BCM_PAGE_SHIFT
);
5151 if (!CHIP_REV_IS_SLOW(bp
)) {
5152 /* enable hw interrupt from doorbell Q */
5153 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
5156 bnx2x_init_block(bp
, BRB1_COMMON_START
, BRB1_COMMON_END
);
5157 if (CHIP_REV_IS_SLOW(bp
)) {
5158 /* fix for emulation and FPGA for no pause */
5159 REG_WR(bp
, BRB1_REG_PAUSE_HIGH_THRESHOLD_0
, 513);
5160 REG_WR(bp
, BRB1_REG_PAUSE_HIGH_THRESHOLD_1
, 513);
5161 REG_WR(bp
, BRB1_REG_PAUSE_LOW_THRESHOLD_0
, 0);
5162 REG_WR(bp
, BRB1_REG_PAUSE_LOW_THRESHOLD_1
, 0);
5165 bnx2x_init_block(bp
, PRS_COMMON_START
, PRS_COMMON_END
);
5166 if (CHIP_IS_E1H(bp
))
5167 REG_WR(bp
, PRS_REG_E1HOV_MODE
, IS_E1HMF(bp
));
5169 bnx2x_init_block(bp
, TSDM_COMMON_START
, TSDM_COMMON_END
);
5170 bnx2x_init_block(bp
, CSDM_COMMON_START
, CSDM_COMMON_END
);
5171 bnx2x_init_block(bp
, USDM_COMMON_START
, USDM_COMMON_END
);
5172 bnx2x_init_block(bp
, XSDM_COMMON_START
, XSDM_COMMON_END
);
5174 if (CHIP_IS_E1H(bp
)) {
5175 bnx2x_init_fill(bp
, TSTORM_INTMEM_ADDR
, 0,
5176 STORM_INTMEM_SIZE_E1H
/2);
5178 TSTORM_INTMEM_ADDR
+ STORM_INTMEM_SIZE_E1H
/2,
5179 0, STORM_INTMEM_SIZE_E1H
/2);
5180 bnx2x_init_fill(bp
, CSTORM_INTMEM_ADDR
, 0,
5181 STORM_INTMEM_SIZE_E1H
/2);
5183 CSTORM_INTMEM_ADDR
+ STORM_INTMEM_SIZE_E1H
/2,
5184 0, STORM_INTMEM_SIZE_E1H
/2);
5185 bnx2x_init_fill(bp
, XSTORM_INTMEM_ADDR
, 0,
5186 STORM_INTMEM_SIZE_E1H
/2);
5188 XSTORM_INTMEM_ADDR
+ STORM_INTMEM_SIZE_E1H
/2,
5189 0, STORM_INTMEM_SIZE_E1H
/2);
5190 bnx2x_init_fill(bp
, USTORM_INTMEM_ADDR
, 0,
5191 STORM_INTMEM_SIZE_E1H
/2);
5193 USTORM_INTMEM_ADDR
+ STORM_INTMEM_SIZE_E1H
/2,
5194 0, STORM_INTMEM_SIZE_E1H
/2);
5196 bnx2x_init_fill(bp
, TSTORM_INTMEM_ADDR
, 0,
5197 STORM_INTMEM_SIZE_E1
);
5198 bnx2x_init_fill(bp
, CSTORM_INTMEM_ADDR
, 0,
5199 STORM_INTMEM_SIZE_E1
);
5200 bnx2x_init_fill(bp
, XSTORM_INTMEM_ADDR
, 0,
5201 STORM_INTMEM_SIZE_E1
);
5202 bnx2x_init_fill(bp
, USTORM_INTMEM_ADDR
, 0,
5203 STORM_INTMEM_SIZE_E1
);
5206 bnx2x_init_block(bp
, TSEM_COMMON_START
, TSEM_COMMON_END
);
5207 bnx2x_init_block(bp
, USEM_COMMON_START
, USEM_COMMON_END
);
5208 bnx2x_init_block(bp
, CSEM_COMMON_START
, CSEM_COMMON_END
);
5209 bnx2x_init_block(bp
, XSEM_COMMON_START
, XSEM_COMMON_END
);
5212 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
5214 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
5217 bnx2x_init_block(bp
, UPB_COMMON_START
, UPB_COMMON_END
);
5218 bnx2x_init_block(bp
, XPB_COMMON_START
, XPB_COMMON_END
);
5219 bnx2x_init_block(bp
, PBF_COMMON_START
, PBF_COMMON_END
);
5221 REG_WR(bp
, SRC_REG_SOFT_RST
, 1);
5222 for (i
= SRC_REG_KEYRSS0_0
; i
<= SRC_REG_KEYRSS1_9
; i
+= 4) {
5223 REG_WR(bp
, i
, 0xc0cac01a);
5224 /* TODO: replace with something meaningful */
5226 if (CHIP_IS_E1H(bp
))
5227 bnx2x_init_block(bp
, SRCH_COMMON_START
, SRCH_COMMON_END
);
5228 REG_WR(bp
, SRC_REG_SOFT_RST
, 0);
5230 if (sizeof(union cdu_context
) != 1024)
5231 /* we currently assume that a context is 1024 bytes */
5232 printk(KERN_ALERT PFX
"please adjust the size of"
5233 " cdu_context(%ld)\n", (long)sizeof(union cdu_context
));
5235 bnx2x_init_block(bp
, CDU_COMMON_START
, CDU_COMMON_END
);
5236 val
= (4 << 24) + (0 << 12) + 1024;
5237 REG_WR(bp
, CDU_REG_CDU_GLOBAL_PARAMS
, val
);
5238 if (CHIP_IS_E1(bp
)) {
5239 /* !!! fix pxp client crdit until excel update */
5240 REG_WR(bp
, CDU_REG_CDU_DEBUG
, 0x264);
5241 REG_WR(bp
, CDU_REG_CDU_DEBUG
, 0);
5244 bnx2x_init_block(bp
, CFC_COMMON_START
, CFC_COMMON_END
);
5245 REG_WR(bp
, CFC_REG_INIT_REG
, 0x7FF);
5247 bnx2x_init_block(bp
, HC_COMMON_START
, HC_COMMON_END
);
5248 bnx2x_init_block(bp
, MISC_AEU_COMMON_START
, MISC_AEU_COMMON_END
);
5250 /* PXPCS COMMON comes here */
5251 /* Reset PCIE errors for debug */
5252 REG_WR(bp
, 0x2814, 0xffffffff);
5253 REG_WR(bp
, 0x3820, 0xffffffff);
5255 /* EMAC0 COMMON comes here */
5256 /* EMAC1 COMMON comes here */
5257 /* DBU COMMON comes here */
5258 /* DBG COMMON comes here */
5260 bnx2x_init_block(bp
, NIG_COMMON_START
, NIG_COMMON_END
);
5261 if (CHIP_IS_E1H(bp
)) {
5262 REG_WR(bp
, NIG_REG_LLH_MF_MODE
, IS_E1HMF(bp
));
5263 REG_WR(bp
, NIG_REG_LLH_E1HOV_MODE
, IS_E1HMF(bp
));
5266 if (CHIP_REV_IS_SLOW(bp
))
5269 /* finish CFC init */
5270 val
= reg_poll(bp
, CFC_REG_LL_INIT_DONE
, 1, 100, 10);
5272 BNX2X_ERR("CFC LL_INIT failed\n");
5275 val
= reg_poll(bp
, CFC_REG_AC_INIT_DONE
, 1, 100, 10);
5277 BNX2X_ERR("CFC AC_INIT failed\n");
5280 val
= reg_poll(bp
, CFC_REG_CAM_INIT_DONE
, 1, 100, 10);
5282 BNX2X_ERR("CFC CAM_INIT failed\n");
5285 REG_WR(bp
, CFC_REG_DEBUG0
, 0);
5287 /* read NIG statistic
5288 to see if this is our first up since powerup */
5289 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
5290 val
= *bnx2x_sp(bp
, wb_data
[0]);
5292 /* do internal memory self test */
5293 if ((CHIP_IS_E1(bp
)) && (val
== 0) && bnx2x_int_mem_test(bp
)) {
5294 BNX2X_ERR("internal mem self test failed\n");
5298 switch (bp
->common
.board
& SHARED_HW_CFG_BOARD_TYPE_MASK
) {
5299 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G
:
5300 /* Fan failure is indicated by SPIO 5 */
5301 bnx2x_set_spio(bp
, MISC_REGISTERS_SPIO_5
,
5302 MISC_REGISTERS_SPIO_INPUT_HI_Z
);
5304 /* set to active low mode */
5305 val
= REG_RD(bp
, MISC_REG_SPIO_INT
);
5306 val
|= ((1 << MISC_REGISTERS_SPIO_5
) <<
5307 MISC_REGISTERS_SPIO_INT_OLD_SET_POS
);
5308 REG_WR(bp
, MISC_REG_SPIO_INT
, val
);
5310 /* enable interrupt to signal the IGU */
5311 val
= REG_RD(bp
, MISC_REG_SPIO_EVENT_EN
);
5312 val
|= (1 << MISC_REGISTERS_SPIO_5
);
5313 REG_WR(bp
, MISC_REG_SPIO_EVENT_EN
, val
);
5320 /* clear PXP2 attentions */
5321 REG_RD(bp
, PXP2_REG_PXP2_INT_STS_CLR_0
);
5323 enable_blocks_attention(bp
);
5325 if (bp
->flags
& TPA_ENABLE_FLAG
) {
5326 struct tstorm_eth_tpa_exist tmp
= {0};
5330 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_TPA_EXIST_OFFSET
,
5332 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_TPA_EXIST_OFFSET
+ 4,
5339 static int bnx2x_init_port(struct bnx2x
*bp
)
5341 int port
= BP_PORT(bp
);
5344 DP(BNX2X_MSG_MCP
, "starting port init port %x\n", port
);
5346 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
5348 /* Port PXP comes here */
5349 /* Port PXP2 comes here */
5354 wb_write
[0] = ONCHIP_ADDR1(bp
->timers_mapping
);
5355 wb_write
[1] = ONCHIP_ADDR2(bp
->timers_mapping
);
5356 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
5357 REG_WR(bp
, PXP2_REG_PSWRQ_TM0_L2P
+ func
*4, PXP_ONE_ILT(i
));
5362 wb_write
[0] = ONCHIP_ADDR1(bp
->qm_mapping
);
5363 wb_write
[1] = ONCHIP_ADDR2(bp
->qm_mapping
);
5364 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
5365 REG_WR(bp
, PXP2_REG_PSWRQ_QM0_L2P
+ func
*4, PXP_ONE_ILT(i
));
5370 wb_write
[0] = ONCHIP_ADDR1(bp
->t1_mapping
);
5371 wb_write
[1] = ONCHIP_ADDR2(bp
->t1_mapping
);
5372 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
5373 REG_WR(bp
, PXP2_REG_PSWRQ_SRC0_L2P
+ func
*4, PXP_ONE_ILT(i
));
5375 /* Port CMs come here */
5377 /* Port QM comes here */
5379 REG_WR(bp
, TM_REG_LIN0_SCAN_TIME
+ func
*4, 1024/64*20);
5380 REG_WR(bp
, TM_REG_LIN0_MAX_ACTIVE_CID
+ func
*4, 31);
5382 bnx2x_init_block(bp
, func
? TIMERS_PORT1_START
: TIMERS_PORT0_START
,
5383 func
? TIMERS_PORT1_END
: TIMERS_PORT0_END
);
5385 /* Port DQ comes here */
5386 /* Port BRB1 comes here */
5387 /* Port PRS comes here */
5388 /* Port TSDM comes here */
5389 /* Port CSDM comes here */
5390 /* Port USDM comes here */
5391 /* Port XSDM comes here */
5392 bnx2x_init_block(bp
, port
? TSEM_PORT1_START
: TSEM_PORT0_START
,
5393 port
? TSEM_PORT1_END
: TSEM_PORT0_END
);
5394 bnx2x_init_block(bp
, port
? USEM_PORT1_START
: USEM_PORT0_START
,
5395 port
? USEM_PORT1_END
: USEM_PORT0_END
);
5396 bnx2x_init_block(bp
, port
? CSEM_PORT1_START
: CSEM_PORT0_START
,
5397 port
? CSEM_PORT1_END
: CSEM_PORT0_END
);
5398 bnx2x_init_block(bp
, port
? XSEM_PORT1_START
: XSEM_PORT0_START
,
5399 port
? XSEM_PORT1_END
: XSEM_PORT0_END
);
5400 /* Port UPB comes here */
5401 /* Port XPB comes here */
5403 bnx2x_init_block(bp
, port
? PBF_PORT1_START
: PBF_PORT0_START
,
5404 port
? PBF_PORT1_END
: PBF_PORT0_END
);
5406 /* configure PBF to work without PAUSE mtu 9000 */
5407 REG_WR(bp
, PBF_REG_P0_PAUSE_ENABLE
+ port
*4, 0);
5409 /* update threshold */
5410 REG_WR(bp
, PBF_REG_P0_ARB_THRSH
+ port
*4, (9040/16));
5411 /* update init credit */
5412 REG_WR(bp
, PBF_REG_P0_INIT_CRD
+ port
*4, (9040/16) + 553 - 22);
5415 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 1);
5417 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 0);
5420 /* tell the searcher where the T2 table is */
5421 REG_WR(bp
, SRC_REG_COUNTFREE0
+ func
*4, 16*1024/64);
5423 wb_write
[0] = U64_LO(bp
->t2_mapping
);
5424 wb_write
[1] = U64_HI(bp
->t2_mapping
);
5425 REG_WR_DMAE(bp
, SRC_REG_FIRSTFREE0
+ func
*4, wb_write
, 2);
5426 wb_write
[0] = U64_LO((u64
)bp
->t2_mapping
+ 16*1024 - 64);
5427 wb_write
[1] = U64_HI((u64
)bp
->t2_mapping
+ 16*1024 - 64);
5428 REG_WR_DMAE(bp
, SRC_REG_LASTFREE0
+ func
*4, wb_write
, 2);
5430 REG_WR(bp
, SRC_REG_NUMBER_HASH_BITS0
+ func
*4, 10);
5431 /* Port SRCH comes here */
5433 /* Port CDU comes here */
5434 /* Port CFC comes here */
5436 if (CHIP_IS_E1(bp
)) {
5437 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
5438 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
5440 bnx2x_init_block(bp
, port
? HC_PORT1_START
: HC_PORT0_START
,
5441 port
? HC_PORT1_END
: HC_PORT0_END
);
5443 bnx2x_init_block(bp
, port
? MISC_AEU_PORT1_START
:
5444 MISC_AEU_PORT0_START
,
5445 port
? MISC_AEU_PORT1_END
: MISC_AEU_PORT0_END
);
5446 /* init aeu_mask_attn_func_0/1:
5447 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5448 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5449 * bits 4-7 are used for "per vn group attention" */
5450 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4,
5451 (IS_E1HMF(bp
) ? 0xF7 : 0x7));
5453 /* Port PXPCS comes here */
5454 /* Port EMAC0 comes here */
5455 /* Port EMAC1 comes here */
5456 /* Port DBU comes here */
5457 /* Port DBG comes here */
5458 bnx2x_init_block(bp
, port
? NIG_PORT1_START
: NIG_PORT0_START
,
5459 port
? NIG_PORT1_END
: NIG_PORT0_END
);
5461 REG_WR(bp
, NIG_REG_XGXS_SERDES0_MODE_SEL
+ port
*4, 1);
5463 if (CHIP_IS_E1H(bp
)) {
5465 struct cmng_struct_per_port m_cmng_port
;
5468 /* 0x2 disable e1hov, 0x1 enable */
5469 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK_MF
+ port
*4,
5470 (IS_E1HMF(bp
) ? 0x1 : 0x2));
5472 /* Init RATE SHAPING and FAIRNESS contexts.
5473 Initialize as if there is 10G link. */
5474 wsum
= bnx2x_calc_vn_wsum(bp
);
5475 bnx2x_init_port_minmax(bp
, (int)wsum
, 10000, &m_cmng_port
);
5477 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
5478 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
,
5479 wsum
, 10000, &m_cmng_port
);
5482 /* Port MCP comes here */
5483 /* Port DMAE comes here */
5485 switch (bp
->common
.board
& SHARED_HW_CFG_BOARD_TYPE_MASK
) {
5486 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G
:
5487 /* add SPIO 5 to group 0 */
5488 val
= REG_RD(bp
, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
5489 val
|= AEU_INPUTS_ATTN_BITS_SPIO5
;
5490 REG_WR(bp
, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
, val
);
5497 bnx2x__link_reset(bp
);
5502 #define ILT_PER_FUNC (768/2)
5503 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5504 /* the phys address is shifted right 12 bits and has an added
5505 1=valid bit added to the 53rd bit
5506 then since this is a wide register(TM)
5507 we split it into two 32 bit writes
5509 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5510 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5511 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5512 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5514 #define CNIC_ILT_LINES 0
5516 static void bnx2x_ilt_wr(struct bnx2x
*bp
, u32 index
, dma_addr_t addr
)
5520 if (CHIP_IS_E1H(bp
))
5521 reg
= PXP2_REG_RQ_ONCHIP_AT_B0
+ index
*8;
5523 reg
= PXP2_REG_RQ_ONCHIP_AT
+ index
*8;
5525 bnx2x_wb_wr(bp
, reg
, ONCHIP_ADDR1(addr
), ONCHIP_ADDR2(addr
));
5528 static int bnx2x_init_func(struct bnx2x
*bp
)
5530 int port
= BP_PORT(bp
);
5531 int func
= BP_FUNC(bp
);
5534 DP(BNX2X_MSG_MCP
, "starting func init func %x\n", func
);
5536 i
= FUNC_ILT_BASE(func
);
5538 bnx2x_ilt_wr(bp
, i
, bnx2x_sp_mapping(bp
, context
));
5539 if (CHIP_IS_E1H(bp
)) {
5540 REG_WR(bp
, PXP2_REG_RQ_CDU_FIRST_ILT
, i
);
5541 REG_WR(bp
, PXP2_REG_RQ_CDU_LAST_ILT
, i
+ CNIC_ILT_LINES
);
5543 REG_WR(bp
, PXP2_REG_PSWRQ_CDU0_L2P
+ func
*4,
5544 PXP_ILT_RANGE(i
, i
+ CNIC_ILT_LINES
));
5547 if (CHIP_IS_E1H(bp
)) {
5548 for (i
= 0; i
< 9; i
++)
5549 bnx2x_init_block(bp
,
5550 cm_start
[func
][i
], cm_end
[func
][i
]);
5552 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 1);
5553 REG_WR(bp
, NIG_REG_LLH0_FUNC_VLAN_ID
+ port
*8, bp
->e1hov
);
5556 /* HC init per function */
5557 if (CHIP_IS_E1H(bp
)) {
5558 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
5560 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
5561 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
5563 bnx2x_init_block(bp
, hc_limits
[func
][0], hc_limits
[func
][1]);
5565 if (CHIP_IS_E1H(bp
))
5566 REG_WR(bp
, HC_REG_FUNC_NUM_P0
+ port
*4, func
);
5568 /* Reset PCIE errors for debug */
5569 REG_WR(bp
, 0x2114, 0xffffffff);
5570 REG_WR(bp
, 0x2120, 0xffffffff);
5575 static int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
)
5579 DP(BNX2X_MSG_MCP
, "function %d load_code %x\n",
5580 BP_FUNC(bp
), load_code
);
5583 mutex_init(&bp
->dmae_mutex
);
5584 bnx2x_gunzip_init(bp
);
5586 switch (load_code
) {
5587 case FW_MSG_CODE_DRV_LOAD_COMMON
:
5588 rc
= bnx2x_init_common(bp
);
5593 case FW_MSG_CODE_DRV_LOAD_PORT
:
5595 rc
= bnx2x_init_port(bp
);
5600 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
5602 rc
= bnx2x_init_func(bp
);
5608 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
5612 if (!BP_NOMCP(bp
)) {
5613 int func
= BP_FUNC(bp
);
5615 bp
->fw_drv_pulse_wr_seq
=
5616 (SHMEM_RD(bp
, func_mb
[func
].drv_pulse_mb
) &
5617 DRV_PULSE_SEQ_MASK
);
5618 bp
->func_stx
= SHMEM_RD(bp
, func_mb
[func
].fw_mb_param
);
5619 DP(BNX2X_MSG_MCP
, "drv_pulse 0x%x func_stx 0x%x\n",
5620 bp
->fw_drv_pulse_wr_seq
, bp
->func_stx
);
5624 /* this needs to be done before gunzip end */
5625 bnx2x_zero_def_sb(bp
);
5626 for_each_queue(bp
, i
)
5627 bnx2x_zero_sb(bp
, BP_L_ID(bp
) + i
);
5630 bnx2x_gunzip_end(bp
);
5635 /* send the MCP a request, block until there is a reply */
5636 static u32
bnx2x_fw_command(struct bnx2x
*bp
, u32 command
)
5638 int func
= BP_FUNC(bp
);
5639 u32 seq
= ++bp
->fw_seq
;
5642 SHMEM_WR(bp
, func_mb
[func
].drv_mb_header
, (command
| seq
));
5643 DP(BNX2X_MSG_MCP
, "wrote command (%x) to FW MB\n", (command
| seq
));
5645 /* let the FW do it's magic ... */
5646 msleep(100); /* TBD */
5648 if (CHIP_REV_IS_SLOW(bp
))
5651 rc
= SHMEM_RD(bp
, func_mb
[func
].fw_mb_header
);
5652 DP(BNX2X_MSG_MCP
, "read (%x) seq is (%x) from FW MB\n", rc
, seq
);
5654 /* is this a reply to our command? */
5655 if (seq
== (rc
& FW_MSG_SEQ_NUMBER_MASK
)) {
5656 rc
&= FW_MSG_CODE_MASK
;
5660 BNX2X_ERR("FW failed to respond!\n");
5668 static void bnx2x_free_mem(struct bnx2x
*bp
)
5671 #define BNX2X_PCI_FREE(x, y, size) \
5674 pci_free_consistent(bp->pdev, size, x, y); \
5680 #define BNX2X_FREE(x) \
5691 for_each_queue(bp
, i
) {
5694 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, status_blk
),
5695 bnx2x_fp(bp
, i
, status_blk_mapping
),
5696 sizeof(struct host_status_block
) +
5697 sizeof(struct eth_tx_db_data
));
5699 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5700 BNX2X_FREE(bnx2x_fp(bp
, i
, tx_buf_ring
));
5701 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, tx_desc_ring
),
5702 bnx2x_fp(bp
, i
, tx_desc_mapping
),
5703 sizeof(struct eth_tx_bd
) * NUM_TX_BD
);
5705 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_buf_ring
));
5706 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_desc_ring
),
5707 bnx2x_fp(bp
, i
, rx_desc_mapping
),
5708 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
5710 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_comp_ring
),
5711 bnx2x_fp(bp
, i
, rx_comp_mapping
),
5712 sizeof(struct eth_fast_path_rx_cqe
) *
5716 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_sge_ring
),
5717 bnx2x_fp(bp
, i
, rx_sge_mapping
),
5718 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
5720 /* end of fastpath */
5722 BNX2X_PCI_FREE(bp
->def_status_blk
, bp
->def_status_blk_mapping
,
5723 sizeof(struct host_def_status_block
));
5725 BNX2X_PCI_FREE(bp
->slowpath
, bp
->slowpath_mapping
,
5726 sizeof(struct bnx2x_slowpath
));
5729 BNX2X_PCI_FREE(bp
->t1
, bp
->t1_mapping
, 64*1024);
5730 BNX2X_PCI_FREE(bp
->t2
, bp
->t2_mapping
, 16*1024);
5731 BNX2X_PCI_FREE(bp
->timers
, bp
->timers_mapping
, 8*1024);
5732 BNX2X_PCI_FREE(bp
->qm
, bp
->qm_mapping
, 128*1024);
5734 BNX2X_PCI_FREE(bp
->spq
, bp
->spq_mapping
, BCM_PAGE_SIZE
);
5736 #undef BNX2X_PCI_FREE
5740 static int bnx2x_alloc_mem(struct bnx2x
*bp
)
5743 #define BNX2X_PCI_ALLOC(x, y, size) \
5745 x = pci_alloc_consistent(bp->pdev, size, y); \
5747 goto alloc_mem_err; \
5748 memset(x, 0, size); \
5751 #define BNX2X_ALLOC(x, size) \
5753 x = vmalloc(size); \
5755 goto alloc_mem_err; \
5756 memset(x, 0, size); \
5762 for_each_queue(bp
, i
) {
5763 bnx2x_fp(bp
, i
, bp
) = bp
;
5766 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, status_blk
),
5767 &bnx2x_fp(bp
, i
, status_blk_mapping
),
5768 sizeof(struct host_status_block
) +
5769 sizeof(struct eth_tx_db_data
));
5771 bnx2x_fp(bp
, i
, hw_tx_prods
) =
5772 (void *)(bnx2x_fp(bp
, i
, status_blk
) + 1);
5774 bnx2x_fp(bp
, i
, tx_prods_mapping
) =
5775 bnx2x_fp(bp
, i
, status_blk_mapping
) +
5776 sizeof(struct host_status_block
);
5778 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5779 BNX2X_ALLOC(bnx2x_fp(bp
, i
, tx_buf_ring
),
5780 sizeof(struct sw_tx_bd
) * NUM_TX_BD
);
5781 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, tx_desc_ring
),
5782 &bnx2x_fp(bp
, i
, tx_desc_mapping
),
5783 sizeof(struct eth_tx_bd
) * NUM_TX_BD
);
5785 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_buf_ring
),
5786 sizeof(struct sw_rx_bd
) * NUM_RX_BD
);
5787 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_desc_ring
),
5788 &bnx2x_fp(bp
, i
, rx_desc_mapping
),
5789 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
5791 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_comp_ring
),
5792 &bnx2x_fp(bp
, i
, rx_comp_mapping
),
5793 sizeof(struct eth_fast_path_rx_cqe
) *
5797 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_page_ring
),
5798 sizeof(struct sw_rx_page
) * NUM_RX_SGE
);
5799 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_sge_ring
),
5800 &bnx2x_fp(bp
, i
, rx_sge_mapping
),
5801 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
5803 /* end of fastpath */
5805 BNX2X_PCI_ALLOC(bp
->def_status_blk
, &bp
->def_status_blk_mapping
,
5806 sizeof(struct host_def_status_block
));
5808 BNX2X_PCI_ALLOC(bp
->slowpath
, &bp
->slowpath_mapping
,
5809 sizeof(struct bnx2x_slowpath
));
5812 BNX2X_PCI_ALLOC(bp
->t1
, &bp
->t1_mapping
, 64*1024);
5815 for (i
= 0; i
< 64*1024; i
+= 64) {
5816 *(u64
*)((char *)bp
->t1
+ i
+ 56) = 0x0UL
;
5817 *(u64
*)((char *)bp
->t1
+ i
+ 3) = 0x0UL
;
5820 /* allocate searcher T2 table
5821 we allocate 1/4 of alloc num for T2
5822 (which is not entered into the ILT) */
5823 BNX2X_PCI_ALLOC(bp
->t2
, &bp
->t2_mapping
, 16*1024);
5826 for (i
= 0; i
< 16*1024; i
+= 64)
5827 * (u64
*)((char *)bp
->t2
+ i
+ 56) = bp
->t2_mapping
+ i
+ 64;
5829 /* now fixup the last line in the block to point to the next block */
5830 *(u64
*)((char *)bp
->t2
+ 1024*16-8) = bp
->t2_mapping
;
5832 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5833 BNX2X_PCI_ALLOC(bp
->timers
, &bp
->timers_mapping
, 8*1024);
5835 /* QM queues (128*MAX_CONN) */
5836 BNX2X_PCI_ALLOC(bp
->qm
, &bp
->qm_mapping
, 128*1024);
5839 /* Slow path ring */
5840 BNX2X_PCI_ALLOC(bp
->spq
, &bp
->spq_mapping
, BCM_PAGE_SIZE
);
5848 #undef BNX2X_PCI_ALLOC
5852 static void bnx2x_free_tx_skbs(struct bnx2x
*bp
)
5856 for_each_queue(bp
, i
) {
5857 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5859 u16 bd_cons
= fp
->tx_bd_cons
;
5860 u16 sw_prod
= fp
->tx_pkt_prod
;
5861 u16 sw_cons
= fp
->tx_pkt_cons
;
5863 while (sw_cons
!= sw_prod
) {
5864 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, TX_BD(sw_cons
));
5870 static void bnx2x_free_rx_skbs(struct bnx2x
*bp
)
5874 for_each_queue(bp
, j
) {
5875 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
5877 for (i
= 0; i
< NUM_RX_BD
; i
++) {
5878 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[i
];
5879 struct sk_buff
*skb
= rx_buf
->skb
;
5884 pci_unmap_single(bp
->pdev
,
5885 pci_unmap_addr(rx_buf
, mapping
),
5886 bp
->rx_buf_use_size
,
5887 PCI_DMA_FROMDEVICE
);
5892 if (!fp
->disable_tpa
)
5893 bnx2x_free_tpa_pool(bp
, fp
,
5894 ETH_MAX_AGGREGATION_QUEUES_E1H
);
5898 static void bnx2x_free_skbs(struct bnx2x
*bp
)
5900 bnx2x_free_tx_skbs(bp
);
5901 bnx2x_free_rx_skbs(bp
);
5904 static void bnx2x_free_msix_irqs(struct bnx2x
*bp
)
5908 free_irq(bp
->msix_table
[0].vector
, bp
->dev
);
5909 DP(NETIF_MSG_IFDOWN
, "released sp irq (%d)\n",
5910 bp
->msix_table
[0].vector
);
5912 for_each_queue(bp
, i
) {
5913 DP(NETIF_MSG_IFDOWN
, "about to release fp #%d->%d irq "
5914 "state %x\n", i
, bp
->msix_table
[i
+ offset
].vector
,
5915 bnx2x_fp(bp
, i
, state
));
5917 if (bnx2x_fp(bp
, i
, state
) != BNX2X_FP_STATE_CLOSED
)
5918 BNX2X_ERR("IRQ of fp #%d being freed while "
5919 "state != closed\n", i
);
5921 free_irq(bp
->msix_table
[i
+ offset
].vector
, &bp
->fp
[i
]);
5925 static void bnx2x_free_irq(struct bnx2x
*bp
)
5927 if (bp
->flags
& USING_MSIX_FLAG
) {
5928 bnx2x_free_msix_irqs(bp
);
5929 pci_disable_msix(bp
->pdev
);
5930 bp
->flags
&= ~USING_MSIX_FLAG
;
5933 free_irq(bp
->pdev
->irq
, bp
->dev
);
5936 static int bnx2x_enable_msix(struct bnx2x
*bp
)
5940 bp
->msix_table
[0].entry
= 0;
5942 DP(NETIF_MSG_IFUP
, "msix_table[0].entry = 0 (slowpath)\n");
5944 for_each_queue(bp
, i
) {
5945 int igu_vec
= offset
+ i
+ BP_L_ID(bp
);
5947 bp
->msix_table
[i
+ offset
].entry
= igu_vec
;
5948 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d "
5949 "(fastpath #%u)\n", i
+ offset
, igu_vec
, i
);
5952 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0],
5953 bp
->num_queues
+ offset
);
5955 DP(NETIF_MSG_IFUP
, "MSI-X is not attainable\n");
5958 bp
->flags
|= USING_MSIX_FLAG
;
5963 static int bnx2x_req_msix_irqs(struct bnx2x
*bp
)
5965 int i
, rc
, offset
= 1;
5967 rc
= request_irq(bp
->msix_table
[0].vector
, bnx2x_msix_sp_int
, 0,
5968 bp
->dev
->name
, bp
->dev
);
5970 BNX2X_ERR("request sp irq failed\n");
5974 for_each_queue(bp
, i
) {
5975 rc
= request_irq(bp
->msix_table
[i
+ offset
].vector
,
5976 bnx2x_msix_fp_int
, 0,
5977 bp
->dev
->name
, &bp
->fp
[i
]);
5979 BNX2X_ERR("request fp #%d irq failed rc %d\n",
5981 bnx2x_free_msix_irqs(bp
);
5985 bnx2x_fp(bp
, i
, state
) = BNX2X_FP_STATE_IRQ
;
5991 static int bnx2x_req_irq(struct bnx2x
*bp
)
5995 rc
= request_irq(bp
->pdev
->irq
, bnx2x_interrupt
, IRQF_SHARED
,
5996 bp
->dev
->name
, bp
->dev
);
5998 bnx2x_fp(bp
, 0, state
) = BNX2X_FP_STATE_IRQ
;
6004 * Init service functions
6007 static void bnx2x_set_mac_addr_e1(struct bnx2x
*bp
)
6009 struct mac_configuration_cmd
*config
= bnx2x_sp(bp
, mac_config
);
6010 int port
= BP_PORT(bp
);
6013 * unicasts 0-31:port0 32-63:port1
6014 * multicast 64-127:port0 128-191:port1
6016 config
->hdr
.length_6b
= 2;
6017 config
->hdr
.offset
= port
? 31 : 0;
6018 config
->hdr
.client_id
= BP_CL_ID(bp
);
6019 config
->hdr
.reserved1
= 0;
6022 config
->config_table
[0].cam_entry
.msb_mac_addr
=
6023 swab16(*(u16
*)&bp
->dev
->dev_addr
[0]);
6024 config
->config_table
[0].cam_entry
.middle_mac_addr
=
6025 swab16(*(u16
*)&bp
->dev
->dev_addr
[2]);
6026 config
->config_table
[0].cam_entry
.lsb_mac_addr
=
6027 swab16(*(u16
*)&bp
->dev
->dev_addr
[4]);
6028 config
->config_table
[0].cam_entry
.flags
= cpu_to_le16(port
);
6029 config
->config_table
[0].target_table_entry
.flags
= 0;
6030 config
->config_table
[0].target_table_entry
.client_id
= 0;
6031 config
->config_table
[0].target_table_entry
.vlan_id
= 0;
6033 DP(NETIF_MSG_IFUP
, "setting MAC (%04x:%04x:%04x)\n",
6034 config
->config_table
[0].cam_entry
.msb_mac_addr
,
6035 config
->config_table
[0].cam_entry
.middle_mac_addr
,
6036 config
->config_table
[0].cam_entry
.lsb_mac_addr
);
6039 config
->config_table
[1].cam_entry
.msb_mac_addr
= 0xffff;
6040 config
->config_table
[1].cam_entry
.middle_mac_addr
= 0xffff;
6041 config
->config_table
[1].cam_entry
.lsb_mac_addr
= 0xffff;
6042 config
->config_table
[1].cam_entry
.flags
= cpu_to_le16(port
);
6043 config
->config_table
[1].target_table_entry
.flags
=
6044 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST
;
6045 config
->config_table
[1].target_table_entry
.client_id
= 0;
6046 config
->config_table
[1].target_table_entry
.vlan_id
= 0;
6048 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
6049 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
6050 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
6053 static void bnx2x_set_mac_addr_e1h(struct bnx2x
*bp
)
6055 struct mac_configuration_cmd_e1h
*config
=
6056 (struct mac_configuration_cmd_e1h
*)bnx2x_sp(bp
, mac_config
);
6058 if (bp
->state
!= BNX2X_STATE_OPEN
) {
6059 DP(NETIF_MSG_IFUP
, "state is %x, returning\n", bp
->state
);
6063 /* CAM allocation for E1H
6064 * unicasts: by func number
6065 * multicast: 20+FUNC*20, 20 each
6067 config
->hdr
.length_6b
= 1;
6068 config
->hdr
.offset
= BP_FUNC(bp
);
6069 config
->hdr
.client_id
= BP_CL_ID(bp
);
6070 config
->hdr
.reserved1
= 0;
6073 config
->config_table
[0].msb_mac_addr
=
6074 swab16(*(u16
*)&bp
->dev
->dev_addr
[0]);
6075 config
->config_table
[0].middle_mac_addr
=
6076 swab16(*(u16
*)&bp
->dev
->dev_addr
[2]);
6077 config
->config_table
[0].lsb_mac_addr
=
6078 swab16(*(u16
*)&bp
->dev
->dev_addr
[4]);
6079 config
->config_table
[0].client_id
= BP_L_ID(bp
);
6080 config
->config_table
[0].vlan_id
= 0;
6081 config
->config_table
[0].e1hov_id
= cpu_to_le16(bp
->e1hov
);
6082 config
->config_table
[0].flags
= BP_PORT(bp
);
6084 DP(NETIF_MSG_IFUP
, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6085 config
->config_table
[0].msb_mac_addr
,
6086 config
->config_table
[0].middle_mac_addr
,
6087 config
->config_table
[0].lsb_mac_addr
, bp
->e1hov
, BP_L_ID(bp
));
6089 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
6090 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
6091 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
6094 static int bnx2x_wait_ramrod(struct bnx2x
*bp
, int state
, int idx
,
6095 int *state_p
, int poll
)
6097 /* can take a while if any port is running */
6100 DP(NETIF_MSG_IFUP
, "%s for state to become %x on IDX [%d]\n",
6101 poll
? "polling" : "waiting", state
, idx
);
6106 bnx2x_rx_int(bp
->fp
, 10);
6107 /* if index is different from 0
6108 * the reply for some commands will
6109 * be on the none default queue
6112 bnx2x_rx_int(&bp
->fp
[idx
], 10);
6114 mb(); /* state is changed by bnx2x_sp_event() */
6116 if (*state_p
== state
)
6123 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6124 poll
? "polling" : "waiting", state
, idx
);
6125 #ifdef BNX2X_STOP_ON_ERROR
6132 static int bnx2x_setup_leading(struct bnx2x
*bp
)
6136 /* reset IGU state */
6137 bnx2x_ack_sb(bp
, bp
->fp
[0].sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
6140 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_PORT_SETUP
, 0, 0, 0, 0);
6142 /* Wait for completion */
6143 rc
= bnx2x_wait_ramrod(bp
, BNX2X_STATE_OPEN
, 0, &(bp
->state
), 0);
6148 static int bnx2x_setup_multi(struct bnx2x
*bp
, int index
)
6150 /* reset IGU state */
6151 bnx2x_ack_sb(bp
, bp
->fp
[index
].sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
6154 bp
->fp
[index
].state
= BNX2X_FP_STATE_OPENING
;
6155 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CLIENT_SETUP
, index
, 0, index
, 0);
6157 /* Wait for completion */
6158 return bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_OPEN
, index
,
6159 &(bp
->fp
[index
].state
), 0);
6162 static int bnx2x_poll(struct napi_struct
*napi
, int budget
);
6163 static void bnx2x_set_rx_mode(struct net_device
*dev
);
6165 /* must be called with rtnl_lock */
6166 static int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
)
6171 #ifdef BNX2X_STOP_ON_ERROR
6172 if (unlikely(bp
->panic
))
6176 bp
->state
= BNX2X_STATE_OPENING_WAIT4_LOAD
;
6178 /* Send LOAD_REQUEST command to MCP
6179 Returns the type of LOAD command:
6180 if it is the first port to be initialized
6181 common blocks should be initialized, otherwise - not
6183 if (!BP_NOMCP(bp
)) {
6184 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_REQ
);
6186 BNX2X_ERR("MCP response failure, unloading\n");
6189 if (load_code
== FW_MSG_CODE_DRV_LOAD_REFUSED
)
6190 return -EBUSY
; /* other port in diagnostic mode */
6193 DP(NETIF_MSG_IFUP
, "NO MCP load counts before us %d, %d, %d\n",
6194 load_count
[0], load_count
[1], load_count
[2]);
6196 load_count
[1 + BP_PORT(bp
)]++;
6197 DP(NETIF_MSG_IFUP
, "NO MCP new load counts %d, %d, %d\n",
6198 load_count
[0], load_count
[1], load_count
[2]);
6199 if (load_count
[0] == 1)
6200 load_code
= FW_MSG_CODE_DRV_LOAD_COMMON
;
6201 else if (load_count
[1 + BP_PORT(bp
)] == 1)
6202 load_code
= FW_MSG_CODE_DRV_LOAD_PORT
;
6204 load_code
= FW_MSG_CODE_DRV_LOAD_FUNCTION
;
6207 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
6208 (load_code
== FW_MSG_CODE_DRV_LOAD_PORT
))
6212 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
6214 /* if we can't use MSI-X we only need one fp,
6215 * so try to enable MSI-X with the requested number of fp's
6216 * and fallback to inta with one fp
6222 if ((use_multi
> 1) && (use_multi
<= BP_MAX_QUEUES(bp
)))
6223 /* user requested number */
6224 bp
->num_queues
= use_multi
;
6227 bp
->num_queues
= min_t(u32
, num_online_cpus(),
6232 if (bnx2x_enable_msix(bp
)) {
6233 /* failed to enable MSI-X */
6236 BNX2X_ERR("Multi requested but failed"
6237 " to enable MSI-X\n");
6241 "set number of queues to %d\n", bp
->num_queues
);
6243 if (bnx2x_alloc_mem(bp
))
6246 for_each_queue(bp
, i
)
6247 bnx2x_fp(bp
, i
, disable_tpa
) =
6248 ((bp
->flags
& TPA_ENABLE_FLAG
) == 0);
6250 /* Disable interrupt handling until HW is initialized */
6251 atomic_set(&bp
->intr_sem
, 1);
6253 if (bp
->flags
& USING_MSIX_FLAG
) {
6254 rc
= bnx2x_req_msix_irqs(bp
);
6256 pci_disable_msix(bp
->pdev
);
6261 rc
= bnx2x_req_irq(bp
);
6263 BNX2X_ERR("IRQ request failed, aborting\n");
6268 for_each_queue(bp
, i
)
6269 netif_napi_add(bp
->dev
, &bnx2x_fp(bp
, i
, napi
),
6273 rc
= bnx2x_init_hw(bp
, load_code
);
6275 BNX2X_ERR("HW init failed, aborting\n");
6279 /* Enable interrupt handling */
6280 atomic_set(&bp
->intr_sem
, 0);
6282 /* Setup NIC internals and enable interrupts */
6285 /* Send LOAD_DONE command to MCP */
6286 if (!BP_NOMCP(bp
)) {
6287 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
);
6289 BNX2X_ERR("MCP response failure, unloading\n");
6291 goto load_int_disable
;
6295 bnx2x_stats_init(bp
);
6297 bp
->state
= BNX2X_STATE_OPENING_WAIT4_PORT
;
6299 /* Enable Rx interrupt handling before sending the ramrod
6300 as it's completed on Rx FP queue */
6301 for_each_queue(bp
, i
)
6302 napi_enable(&bnx2x_fp(bp
, i
, napi
));
6304 rc
= bnx2x_setup_leading(bp
);
6306 #ifdef BNX2X_STOP_ON_ERROR
6309 goto load_stop_netif
;
6312 if (CHIP_IS_E1H(bp
))
6313 if (bp
->mf_config
& FUNC_MF_CFG_FUNC_DISABLED
) {
6314 BNX2X_ERR("!!! mf_cfg function disabled\n");
6315 bp
->state
= BNX2X_STATE_DISABLED
;
6318 if (bp
->state
== BNX2X_STATE_OPEN
)
6319 for_each_nondefault_queue(bp
, i
) {
6320 rc
= bnx2x_setup_multi(bp
, i
);
6322 goto load_stop_netif
;
6326 bnx2x_set_mac_addr_e1(bp
);
6328 bnx2x_set_mac_addr_e1h(bp
);
6331 bnx2x_initial_phy_init(bp
);
6333 /* Start fast path */
6334 switch (load_mode
) {
6336 /* Tx queue should be only reenabled */
6337 netif_wake_queue(bp
->dev
);
6338 bnx2x_set_rx_mode(bp
->dev
);
6342 /* IRQ is only requested from bnx2x_open */
6343 netif_start_queue(bp
->dev
);
6344 bnx2x_set_rx_mode(bp
->dev
);
6345 if (bp
->flags
& USING_MSIX_FLAG
)
6346 printk(KERN_INFO PFX
"%s: using MSI-X\n",
6351 bnx2x_set_rx_mode(bp
->dev
);
6352 bp
->state
= BNX2X_STATE_DIAG
;
6360 bnx2x__link_status_update(bp
);
6362 /* start the timer */
6363 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
6369 for_each_queue(bp
, i
)
6370 napi_disable(&bnx2x_fp(bp
, i
, napi
));
6373 bnx2x_int_disable_sync(bp
);
6378 /* Free SKBs, SGEs, TPA pool and driver internals */
6379 bnx2x_free_skbs(bp
);
6380 for_each_queue(bp
, i
)
6381 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
,
6382 RX_SGE_CNT
*NUM_RX_SGE_PAGES
);
6386 /* TBD we really need to reset the chip
6387 if we want to recover from this */
6391 static int bnx2x_stop_multi(struct bnx2x
*bp
, int index
)
6395 /* halt the connection */
6396 bp
->fp
[index
].state
= BNX2X_FP_STATE_HALTING
;
6397 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, index
, 0, 0, 0);
6399 /* Wait for completion */
6400 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, index
,
6401 &(bp
->fp
[index
].state
), 1);
6402 if (rc
) /* timeout */
6405 /* delete cfc entry */
6406 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CFC_DEL
, index
, 0, 0, 1);
6408 /* Wait for completion */
6409 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_CLOSED
, index
,
6410 &(bp
->fp
[index
].state
), 1);
6414 static void bnx2x_stop_leading(struct bnx2x
*bp
)
6416 u16 dsb_sp_prod_idx
;
6417 /* if the other port is handling traffic,
6418 this can take a lot of time */
6424 /* Send HALT ramrod */
6425 bp
->fp
[0].state
= BNX2X_FP_STATE_HALTING
;
6426 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, 0, 0, BP_CL_ID(bp
), 0);
6428 /* Wait for completion */
6429 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, 0,
6430 &(bp
->fp
[0].state
), 1);
6431 if (rc
) /* timeout */
6434 dsb_sp_prod_idx
= *bp
->dsb_sp_prod
;
6436 /* Send PORT_DELETE ramrod */
6437 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_PORT_DEL
, 0, 0, 0, 1);
6439 /* Wait for completion to arrive on default status block
6440 we are going to reset the chip anyway
6441 so there is not much to do if this times out
6443 while (dsb_sp_prod_idx
== *bp
->dsb_sp_prod
) {
6446 DP(NETIF_MSG_IFDOWN
, "timeout waiting for port del "
6447 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6448 *bp
->dsb_sp_prod
, dsb_sp_prod_idx
);
6449 #ifdef BNX2X_STOP_ON_ERROR
6456 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_UNLOAD
;
6457 bp
->fp
[0].state
= BNX2X_FP_STATE_CLOSED
;
6460 static void bnx2x_reset_func(struct bnx2x
*bp
)
6462 int port
= BP_PORT(bp
);
6463 int func
= BP_FUNC(bp
);
6467 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
6468 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
6470 REG_WR(bp
, HC_REG_CONFIG_0
+ port
*4, 0x1000);
6473 base
= FUNC_ILT_BASE(func
);
6474 for (i
= base
; i
< base
+ ILT_PER_FUNC
; i
++)
6475 bnx2x_ilt_wr(bp
, i
, 0);
6478 static void bnx2x_reset_port(struct bnx2x
*bp
)
6480 int port
= BP_PORT(bp
);
6483 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
6485 /* Do not rcv packets to BRB */
6486 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK
+ port
*4, 0x0);
6487 /* Do not direct rcv packets that are not for MCP to the BRB */
6488 REG_WR(bp
, (port
? NIG_REG_LLH1_BRB1_NOT_MCP
:
6489 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
6492 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4, 0);
6495 /* Check for BRB port occupancy */
6496 val
= REG_RD(bp
, BRB1_REG_PORT_NUM_OCC_BLOCKS_0
+ port
*4);
6498 DP(NETIF_MSG_IFDOWN
,
6499 "BRB1 is not empty %d blooks are occupied\n", val
);
6501 /* TODO: Close Doorbell port? */
6504 static void bnx2x_reset_common(struct bnx2x
*bp
)
6507 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
6509 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
, 0x1403);
6512 static void bnx2x_reset_chip(struct bnx2x
*bp
, u32 reset_code
)
6514 DP(BNX2X_MSG_MCP
, "function %d reset_code %x\n",
6515 BP_FUNC(bp
), reset_code
);
6517 switch (reset_code
) {
6518 case FW_MSG_CODE_DRV_UNLOAD_COMMON
:
6519 bnx2x_reset_port(bp
);
6520 bnx2x_reset_func(bp
);
6521 bnx2x_reset_common(bp
);
6524 case FW_MSG_CODE_DRV_UNLOAD_PORT
:
6525 bnx2x_reset_port(bp
);
6526 bnx2x_reset_func(bp
);
6529 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION
:
6530 bnx2x_reset_func(bp
);
6534 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code
);
6539 /* msut be called with rtnl_lock */
6540 static int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
)
6545 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_HALT
;
6547 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
6548 bnx2x_set_storm_rx_mode(bp
);
6550 if (netif_running(bp
->dev
)) {
6551 netif_tx_disable(bp
->dev
);
6552 bp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
6555 del_timer_sync(&bp
->timer
);
6556 SHMEM_WR(bp
, func_mb
[BP_FUNC(bp
)].drv_pulse_mb
,
6557 (DRV_PULSE_ALWAYS_ALIVE
| bp
->fw_drv_pulse_wr_seq
));
6558 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
6560 /* Wait until all fast path tasks complete */
6561 for_each_queue(bp
, i
) {
6562 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
6564 #ifdef BNX2X_STOP_ON_ERROR
6565 #ifdef __powerpc64__
6566 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%lx\n",
6568 DP(NETIF_MSG_IFDOWN
, "fp->tpa_queue_used = 0x%llx\n",
6570 fp
->tpa_queue_used
);
6574 while (bnx2x_has_work(fp
)) {
6577 BNX2X_ERR("timeout waiting for queue[%d]\n",
6579 #ifdef BNX2X_STOP_ON_ERROR
6591 /* Wait until all slow path tasks complete */
6593 while ((bp
->spq_left
!= MAX_SPQ_PENDING
) && cnt
--)
6596 for_each_queue(bp
, i
)
6597 napi_disable(&bnx2x_fp(bp
, i
, napi
));
6598 /* Disable interrupts after Tx and Rx are disabled on stack level */
6599 bnx2x_int_disable_sync(bp
);
6604 if (bp
->flags
& NO_WOL_FLAG
)
6605 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
;
6608 u32 emac_base
= BP_PORT(bp
) ? GRCBASE_EMAC1
: GRCBASE_EMAC0
;
6609 u8
*mac_addr
= bp
->dev
->dev_addr
;
6612 /* The mac address is written to entries 1-4 to
6613 preserve entry 0 which is used by the PMF */
6614 val
= (mac_addr
[0] << 8) | mac_addr
[1];
6615 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH
+ (BP_E1HVN(bp
) + 1)*8, val
);
6617 val
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
6618 (mac_addr
[4] << 8) | mac_addr
[5];
6619 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH
+ (BP_E1HVN(bp
) + 1)*8 + 4,
6622 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_EN
;
6625 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
6627 /* Close multi and leading connections
6628 Completions for ramrods are collected in a synchronous way */
6629 for_each_nondefault_queue(bp
, i
)
6630 if (bnx2x_stop_multi(bp
, i
))
6633 if (CHIP_IS_E1H(bp
))
6634 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ BP_PORT(bp
)*8, 0);
6636 bnx2x_stop_leading(bp
);
6637 #ifdef BNX2X_STOP_ON_ERROR
6638 /* If ramrod completion timed out - break here! */
6640 BNX2X_ERR("Stop leading failed!\n");
6645 if ((bp
->state
!= BNX2X_STATE_CLOSING_WAIT4_UNLOAD
) ||
6646 (bp
->fp
[0].state
!= BNX2X_FP_STATE_CLOSED
)) {
6647 DP(NETIF_MSG_IFDOWN
, "failed to close leading properly! "
6648 "state 0x%x fp[0].state 0x%x\n",
6649 bp
->state
, bp
->fp
[0].state
);
6654 reset_code
= bnx2x_fw_command(bp
, reset_code
);
6656 DP(NETIF_MSG_IFDOWN
, "NO MCP load counts %d, %d, %d\n",
6657 load_count
[0], load_count
[1], load_count
[2]);
6659 load_count
[1 + BP_PORT(bp
)]--;
6660 DP(NETIF_MSG_IFDOWN
, "NO MCP new load counts %d, %d, %d\n",
6661 load_count
[0], load_count
[1], load_count
[2]);
6662 if (load_count
[0] == 0)
6663 reset_code
= FW_MSG_CODE_DRV_UNLOAD_COMMON
;
6664 else if (load_count
[1 + BP_PORT(bp
)] == 0)
6665 reset_code
= FW_MSG_CODE_DRV_UNLOAD_PORT
;
6667 reset_code
= FW_MSG_CODE_DRV_UNLOAD_FUNCTION
;
6670 if ((reset_code
== FW_MSG_CODE_DRV_UNLOAD_COMMON
) ||
6671 (reset_code
== FW_MSG_CODE_DRV_UNLOAD_PORT
))
6672 bnx2x__link_reset(bp
);
6674 /* Reset the chip */
6675 bnx2x_reset_chip(bp
, reset_code
);
6677 /* Report UNLOAD_DONE to MCP */
6679 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
6681 /* Free SKBs, SGEs, TPA pool and driver internals */
6682 bnx2x_free_skbs(bp
);
6683 for_each_queue(bp
, i
)
6684 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
,
6685 RX_SGE_CNT
*NUM_RX_SGE_PAGES
);
6688 bp
->state
= BNX2X_STATE_CLOSED
;
6690 netif_carrier_off(bp
->dev
);
6695 static void bnx2x_reset_task(struct work_struct
*work
)
6697 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, reset_task
);
6699 #ifdef BNX2X_STOP_ON_ERROR
6700 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6701 " so reset not done to allow debug dump,\n"
6702 KERN_ERR
" you will need to reboot when done\n");
6708 if (!netif_running(bp
->dev
))
6709 goto reset_task_exit
;
6711 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
6712 bnx2x_nic_load(bp
, LOAD_NORMAL
);
6718 /* end of nic load/unload */
6723 * Init service functions
6726 static void __devinit
bnx2x_undi_unload(struct bnx2x
*bp
)
6730 /* Check if there is any driver already loaded */
6731 val
= REG_RD(bp
, MISC_REG_UNPREPARED
);
6733 /* Check if it is the UNDI driver
6734 * UNDI driver initializes CID offset for normal bell to 0x7
6736 val
= REG_RD(bp
, DORQ_REG_NORM_CID_OFST
);
6738 u32 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
6739 /* save our func and fw_seq */
6740 int func
= BP_FUNC(bp
);
6741 u16 fw_seq
= bp
->fw_seq
;
6743 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6745 /* try unload UNDI on port 0 */
6747 bp
->fw_seq
= (SHMEM_RD(bp
,
6748 func_mb
[bp
->func
].drv_mb_header
) &
6749 DRV_MSG_SEQ_NUMBER_MASK
);
6751 reset_code
= bnx2x_fw_command(bp
, reset_code
);
6752 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
6754 /* if UNDI is loaded on the other port */
6755 if (reset_code
!= FW_MSG_CODE_DRV_UNLOAD_COMMON
) {
6758 bp
->fw_seq
= (SHMEM_RD(bp
,
6759 func_mb
[bp
->func
].drv_mb_header
) &
6760 DRV_MSG_SEQ_NUMBER_MASK
);
6762 bnx2x_fw_command(bp
,
6763 DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
);
6764 bnx2x_fw_command(bp
,
6765 DRV_MSG_CODE_UNLOAD_DONE
);
6767 /* restore our func and fw_seq */
6769 bp
->fw_seq
= fw_seq
;
6774 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
6777 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
,
6783 static void __devinit
bnx2x_get_common_hwinfo(struct bnx2x
*bp
)
6785 u32 val
, val2
, val3
, val4
, id
;
6787 /* Get the chip revision id and number. */
6788 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6789 val
= REG_RD(bp
, MISC_REG_CHIP_NUM
);
6790 id
= ((val
& 0xffff) << 16);
6791 val
= REG_RD(bp
, MISC_REG_CHIP_REV
);
6792 id
|= ((val
& 0xf) << 12);
6793 val
= REG_RD(bp
, MISC_REG_CHIP_METAL
);
6794 id
|= ((val
& 0xff) << 4);
6795 REG_RD(bp
, MISC_REG_BOND_ID
);
6797 bp
->common
.chip_id
= id
;
6798 bp
->link_params
.chip_id
= bp
->common
.chip_id
;
6799 BNX2X_DEV_INFO("chip ID is 0x%x\n", id
);
6801 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_CFG4
);
6802 bp
->common
.flash_size
= (NVRAM_1MB_SIZE
<<
6803 (val
& MCPR_NVM_CFG4_FLASH_SIZE
));
6804 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6805 bp
->common
.flash_size
, bp
->common
.flash_size
);
6807 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
6808 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
6809 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp
->common
.shmem_base
);
6811 if (!bp
->common
.shmem_base
||
6812 (bp
->common
.shmem_base
< 0xA0000) ||
6813 (bp
->common
.shmem_base
>= 0xC0000)) {
6814 BNX2X_DEV_INFO("MCP not active\n");
6815 bp
->flags
|= NO_MCP_FLAG
;
6819 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
6820 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
6821 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
6822 BNX2X_ERR("BAD MCP validity signature\n");
6824 bp
->common
.hw_config
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.config
);
6825 bp
->common
.board
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.board
);
6827 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6828 bp
->common
.hw_config
, bp
->common
.board
);
6830 bp
->link_params
.hw_led_mode
= ((bp
->common
.hw_config
&
6831 SHARED_HW_CFG_LED_MODE_MASK
) >>
6832 SHARED_HW_CFG_LED_MODE_SHIFT
);
6834 val
= SHMEM_RD(bp
, dev_info
.bc_rev
) >> 8;
6835 bp
->common
.bc_ver
= val
;
6836 BNX2X_DEV_INFO("bc_ver %X\n", val
);
6837 if (val
< BNX2X_BC_VER
) {
6838 /* for now only warn
6839 * later we might need to enforce this */
6840 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6841 " please upgrade BC\n", BNX2X_BC_VER
, val
);
6843 BNX2X_DEV_INFO("%sWoL Capable\n",
6844 (bp
->flags
& NO_WOL_FLAG
)? "Not " : "");
6846 val
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
);
6847 val2
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[4]);
6848 val3
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[8]);
6849 val4
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[12]);
6851 printk(KERN_INFO PFX
"part number %X-%X-%X-%X\n",
6852 val
, val2
, val3
, val4
);
6855 static void __devinit
bnx2x_link_settings_supported(struct bnx2x
*bp
,
6858 int port
= BP_PORT(bp
);
6861 switch (switch_cfg
) {
6863 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg
);
6866 SERDES_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
6867 switch (ext_phy_type
) {
6868 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT
:
6869 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6872 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
6873 SUPPORTED_10baseT_Full
|
6874 SUPPORTED_100baseT_Half
|
6875 SUPPORTED_100baseT_Full
|
6876 SUPPORTED_1000baseT_Full
|
6877 SUPPORTED_2500baseX_Full
|
6882 SUPPORTED_Asym_Pause
);
6885 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482
:
6886 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6889 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
6890 SUPPORTED_10baseT_Full
|
6891 SUPPORTED_100baseT_Half
|
6892 SUPPORTED_100baseT_Full
|
6893 SUPPORTED_1000baseT_Full
|
6898 SUPPORTED_Asym_Pause
);
6902 BNX2X_ERR("NVRAM config error. "
6903 "BAD SerDes ext_phy_config 0x%x\n",
6904 bp
->link_params
.ext_phy_config
);
6908 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_SERDES0_CTRL_PHY_ADDR
+
6910 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
6913 case SWITCH_CFG_10G
:
6914 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg
);
6917 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
6918 switch (ext_phy_type
) {
6919 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
:
6920 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6923 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
6924 SUPPORTED_10baseT_Full
|
6925 SUPPORTED_100baseT_Half
|
6926 SUPPORTED_100baseT_Full
|
6927 SUPPORTED_1000baseT_Full
|
6928 SUPPORTED_2500baseX_Full
|
6929 SUPPORTED_10000baseT_Full
|
6934 SUPPORTED_Asym_Pause
);
6937 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
:
6938 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
6941 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
6944 SUPPORTED_Asym_Pause
);
6947 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
:
6948 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
6951 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
6952 SUPPORTED_1000baseT_Full
|
6955 SUPPORTED_Asym_Pause
);
6958 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
6959 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
6962 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
6963 SUPPORTED_1000baseT_Full
|
6967 SUPPORTED_Asym_Pause
);
6970 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
6971 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
6974 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
6975 SUPPORTED_2500baseX_Full
|
6976 SUPPORTED_1000baseT_Full
|
6980 SUPPORTED_Asym_Pause
);
6983 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
6984 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
6987 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
6991 SUPPORTED_Asym_Pause
);
6994 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
:
6995 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
6996 bp
->link_params
.ext_phy_config
);
7000 BNX2X_ERR("NVRAM config error. "
7001 "BAD XGXS ext_phy_config 0x%x\n",
7002 bp
->link_params
.ext_phy_config
);
7006 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_XGXS0_CTRL_PHY_ADDR
+
7008 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
7013 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7014 bp
->port
.link_config
);
7017 bp
->link_params
.phy_addr
= bp
->port
.phy_addr
;
7019 /* mask what we support according to speed_cap_mask */
7020 if (!(bp
->link_params
.speed_cap_mask
&
7021 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF
))
7022 bp
->port
.supported
&= ~SUPPORTED_10baseT_Half
;
7024 if (!(bp
->link_params
.speed_cap_mask
&
7025 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL
))
7026 bp
->port
.supported
&= ~SUPPORTED_10baseT_Full
;
7028 if (!(bp
->link_params
.speed_cap_mask
&
7029 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF
))
7030 bp
->port
.supported
&= ~SUPPORTED_100baseT_Half
;
7032 if (!(bp
->link_params
.speed_cap_mask
&
7033 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL
))
7034 bp
->port
.supported
&= ~SUPPORTED_100baseT_Full
;
7036 if (!(bp
->link_params
.speed_cap_mask
&
7037 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G
))
7038 bp
->port
.supported
&= ~(SUPPORTED_1000baseT_Half
|
7039 SUPPORTED_1000baseT_Full
);
7041 if (!(bp
->link_params
.speed_cap_mask
&
7042 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G
))
7043 bp
->port
.supported
&= ~SUPPORTED_2500baseX_Full
;
7045 if (!(bp
->link_params
.speed_cap_mask
&
7046 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G
))
7047 bp
->port
.supported
&= ~SUPPORTED_10000baseT_Full
;
7049 BNX2X_DEV_INFO("supported 0x%x\n", bp
->port
.supported
);
7052 static void __devinit
bnx2x_link_settings_requested(struct bnx2x
*bp
)
7054 bp
->link_params
.req_duplex
= DUPLEX_FULL
;
7056 switch (bp
->port
.link_config
& PORT_FEATURE_LINK_SPEED_MASK
) {
7057 case PORT_FEATURE_LINK_SPEED_AUTO
:
7058 if (bp
->port
.supported
& SUPPORTED_Autoneg
) {
7059 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
7060 bp
->port
.advertising
= bp
->port
.supported
;
7063 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
7065 if ((ext_phy_type
==
7066 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
) ||
7068 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
)) {
7069 /* force 10G, no AN */
7070 bp
->link_params
.req_line_speed
= SPEED_10000
;
7071 bp
->port
.advertising
=
7072 (ADVERTISED_10000baseT_Full
|
7076 BNX2X_ERR("NVRAM config error. "
7077 "Invalid link_config 0x%x"
7078 " Autoneg not supported\n",
7079 bp
->port
.link_config
);
7084 case PORT_FEATURE_LINK_SPEED_10M_FULL
:
7085 if (bp
->port
.supported
& SUPPORTED_10baseT_Full
) {
7086 bp
->link_params
.req_line_speed
= SPEED_10
;
7087 bp
->port
.advertising
= (ADVERTISED_10baseT_Full
|
7090 BNX2X_ERR("NVRAM config error. "
7091 "Invalid link_config 0x%x"
7092 " speed_cap_mask 0x%x\n",
7093 bp
->port
.link_config
,
7094 bp
->link_params
.speed_cap_mask
);
7099 case PORT_FEATURE_LINK_SPEED_10M_HALF
:
7100 if (bp
->port
.supported
& SUPPORTED_10baseT_Half
) {
7101 bp
->link_params
.req_line_speed
= SPEED_10
;
7102 bp
->link_params
.req_duplex
= DUPLEX_HALF
;
7103 bp
->port
.advertising
= (ADVERTISED_10baseT_Half
|
7106 BNX2X_ERR("NVRAM config error. "
7107 "Invalid link_config 0x%x"
7108 " speed_cap_mask 0x%x\n",
7109 bp
->port
.link_config
,
7110 bp
->link_params
.speed_cap_mask
);
7115 case PORT_FEATURE_LINK_SPEED_100M_FULL
:
7116 if (bp
->port
.supported
& SUPPORTED_100baseT_Full
) {
7117 bp
->link_params
.req_line_speed
= SPEED_100
;
7118 bp
->port
.advertising
= (ADVERTISED_100baseT_Full
|
7121 BNX2X_ERR("NVRAM config error. "
7122 "Invalid link_config 0x%x"
7123 " speed_cap_mask 0x%x\n",
7124 bp
->port
.link_config
,
7125 bp
->link_params
.speed_cap_mask
);
7130 case PORT_FEATURE_LINK_SPEED_100M_HALF
:
7131 if (bp
->port
.supported
& SUPPORTED_100baseT_Half
) {
7132 bp
->link_params
.req_line_speed
= SPEED_100
;
7133 bp
->link_params
.req_duplex
= DUPLEX_HALF
;
7134 bp
->port
.advertising
= (ADVERTISED_100baseT_Half
|
7137 BNX2X_ERR("NVRAM config error. "
7138 "Invalid link_config 0x%x"
7139 " speed_cap_mask 0x%x\n",
7140 bp
->port
.link_config
,
7141 bp
->link_params
.speed_cap_mask
);
7146 case PORT_FEATURE_LINK_SPEED_1G
:
7147 if (bp
->port
.supported
& SUPPORTED_1000baseT_Full
) {
7148 bp
->link_params
.req_line_speed
= SPEED_1000
;
7149 bp
->port
.advertising
= (ADVERTISED_1000baseT_Full
|
7152 BNX2X_ERR("NVRAM config error. "
7153 "Invalid link_config 0x%x"
7154 " speed_cap_mask 0x%x\n",
7155 bp
->port
.link_config
,
7156 bp
->link_params
.speed_cap_mask
);
7161 case PORT_FEATURE_LINK_SPEED_2_5G
:
7162 if (bp
->port
.supported
& SUPPORTED_2500baseX_Full
) {
7163 bp
->link_params
.req_line_speed
= SPEED_2500
;
7164 bp
->port
.advertising
= (ADVERTISED_2500baseX_Full
|
7167 BNX2X_ERR("NVRAM config error. "
7168 "Invalid link_config 0x%x"
7169 " speed_cap_mask 0x%x\n",
7170 bp
->port
.link_config
,
7171 bp
->link_params
.speed_cap_mask
);
7176 case PORT_FEATURE_LINK_SPEED_10G_CX4
:
7177 case PORT_FEATURE_LINK_SPEED_10G_KX4
:
7178 case PORT_FEATURE_LINK_SPEED_10G_KR
:
7179 if (bp
->port
.supported
& SUPPORTED_10000baseT_Full
) {
7180 bp
->link_params
.req_line_speed
= SPEED_10000
;
7181 bp
->port
.advertising
= (ADVERTISED_10000baseT_Full
|
7184 BNX2X_ERR("NVRAM config error. "
7185 "Invalid link_config 0x%x"
7186 " speed_cap_mask 0x%x\n",
7187 bp
->port
.link_config
,
7188 bp
->link_params
.speed_cap_mask
);
7194 BNX2X_ERR("NVRAM config error. "
7195 "BAD link speed link_config 0x%x\n",
7196 bp
->port
.link_config
);
7197 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
7198 bp
->port
.advertising
= bp
->port
.supported
;
7202 bp
->link_params
.req_flow_ctrl
= (bp
->port
.link_config
&
7203 PORT_FEATURE_FLOW_CONTROL_MASK
);
7204 if ((bp
->link_params
.req_flow_ctrl
== FLOW_CTRL_AUTO
) &&
7205 (!bp
->port
.supported
& SUPPORTED_Autoneg
))
7206 bp
->link_params
.req_flow_ctrl
= FLOW_CTRL_NONE
;
7208 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7209 " advertising 0x%x\n",
7210 bp
->link_params
.req_line_speed
,
7211 bp
->link_params
.req_duplex
,
7212 bp
->link_params
.req_flow_ctrl
, bp
->port
.advertising
);
7215 static void __devinit
bnx2x_get_port_hwinfo(struct bnx2x
*bp
)
7217 int port
= BP_PORT(bp
);
7220 bp
->link_params
.bp
= bp
;
7221 bp
->link_params
.port
= port
;
7223 bp
->link_params
.serdes_config
=
7224 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].serdes_config
);
7225 bp
->link_params
.lane_config
=
7226 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].lane_config
);
7227 bp
->link_params
.ext_phy_config
=
7229 dev_info
.port_hw_config
[port
].external_phy_config
);
7230 bp
->link_params
.speed_cap_mask
=
7232 dev_info
.port_hw_config
[port
].speed_capability_mask
);
7234 bp
->port
.link_config
=
7235 SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].link_config
);
7237 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7238 KERN_INFO
" ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7239 " link_config 0x%08x\n",
7240 bp
->link_params
.serdes_config
,
7241 bp
->link_params
.lane_config
,
7242 bp
->link_params
.ext_phy_config
,
7243 bp
->link_params
.speed_cap_mask
, bp
->port
.link_config
);
7245 bp
->link_params
.switch_cfg
= (bp
->port
.link_config
&
7246 PORT_FEATURE_CONNECTED_SWITCH_MASK
);
7247 bnx2x_link_settings_supported(bp
, bp
->link_params
.switch_cfg
);
7249 bnx2x_link_settings_requested(bp
);
7251 val2
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_upper
);
7252 val
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_lower
);
7253 bp
->dev
->dev_addr
[0] = (u8
)(val2
>> 8 & 0xff);
7254 bp
->dev
->dev_addr
[1] = (u8
)(val2
& 0xff);
7255 bp
->dev
->dev_addr
[2] = (u8
)(val
>> 24 & 0xff);
7256 bp
->dev
->dev_addr
[3] = (u8
)(val
>> 16 & 0xff);
7257 bp
->dev
->dev_addr
[4] = (u8
)(val
>> 8 & 0xff);
7258 bp
->dev
->dev_addr
[5] = (u8
)(val
& 0xff);
7259 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
7260 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
7263 static int __devinit
bnx2x_get_hwinfo(struct bnx2x
*bp
)
7265 int func
= BP_FUNC(bp
);
7269 bnx2x_get_common_hwinfo(bp
);
7273 if (CHIP_IS_E1H(bp
)) {
7275 SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
7278 (SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].e1hov_tag
) &
7279 FUNC_MF_CFG_E1HOV_TAG_MASK
);
7280 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
) {
7284 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7286 func
, bp
->e1hov
, bp
->e1hov
);
7288 BNX2X_DEV_INFO("Single function mode\n");
7290 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7291 " aborting\n", func
);
7297 if (!BP_NOMCP(bp
)) {
7298 bnx2x_get_port_hwinfo(bp
);
7300 bp
->fw_seq
= (SHMEM_RD(bp
, func_mb
[func
].drv_mb_header
) &
7301 DRV_MSG_SEQ_NUMBER_MASK
);
7302 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
7306 val2
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].mac_upper
);
7307 val
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].mac_lower
);
7308 if ((val2
!= FUNC_MF_CFG_UPPERMAC_DEFAULT
) &&
7309 (val
!= FUNC_MF_CFG_LOWERMAC_DEFAULT
)) {
7310 bp
->dev
->dev_addr
[0] = (u8
)(val2
>> 8 & 0xff);
7311 bp
->dev
->dev_addr
[1] = (u8
)(val2
& 0xff);
7312 bp
->dev
->dev_addr
[2] = (u8
)(val
>> 24 & 0xff);
7313 bp
->dev
->dev_addr
[3] = (u8
)(val
>> 16 & 0xff);
7314 bp
->dev
->dev_addr
[4] = (u8
)(val
>> 8 & 0xff);
7315 bp
->dev
->dev_addr
[5] = (u8
)(val
& 0xff);
7316 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
,
7318 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
,
7326 /* only supposed to happen on emulation/FPGA */
7327 BNX2X_ERR("warning rendom MAC workaround active\n");
7328 random_ether_addr(bp
->dev
->dev_addr
);
7329 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
7335 static int __devinit
bnx2x_init_bp(struct bnx2x
*bp
)
7337 int func
= BP_FUNC(bp
);
7341 bp
->flags
|= NO_MCP_FLAG
;
7343 mutex_init(&bp
->port
.phy_mutex
);
7345 INIT_WORK(&bp
->sp_task
, bnx2x_sp_task
);
7346 INIT_WORK(&bp
->reset_task
, bnx2x_reset_task
);
7348 rc
= bnx2x_get_hwinfo(bp
);
7350 /* need to reset chip if undi was active */
7352 bnx2x_undi_unload(bp
);
7354 if (CHIP_REV_IS_FPGA(bp
))
7355 printk(KERN_ERR PFX
"FPGA detected\n");
7357 if (BP_NOMCP(bp
) && (func
== 0))
7359 "MCP disabled, must load devices in order!\n");
7363 bp
->flags
&= ~TPA_ENABLE_FLAG
;
7364 bp
->dev
->features
&= ~NETIF_F_LRO
;
7366 bp
->flags
|= TPA_ENABLE_FLAG
;
7367 bp
->dev
->features
|= NETIF_F_LRO
;
7371 bp
->tx_ring_size
= MAX_TX_AVAIL
;
7372 bp
->rx_ring_size
= MAX_RX_AVAIL
;
7380 bp
->stats_ticks
= 1000000 & 0xffff00;
7382 bp
->timer_interval
= (CHIP_REV_IS_SLOW(bp
) ? 5*HZ
: HZ
);
7383 bp
->current_interval
= (poll
? poll
: bp
->timer_interval
);
7385 init_timer(&bp
->timer
);
7386 bp
->timer
.expires
= jiffies
+ bp
->current_interval
;
7387 bp
->timer
.data
= (unsigned long) bp
;
7388 bp
->timer
.function
= bnx2x_timer
;
7394 * ethtool service functions
7397 /* All ethtool functions called with rtnl_lock */
7399 static int bnx2x_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
7401 struct bnx2x
*bp
= netdev_priv(dev
);
7403 cmd
->supported
= bp
->port
.supported
;
7404 cmd
->advertising
= bp
->port
.advertising
;
7406 if (netif_carrier_ok(dev
)) {
7407 cmd
->speed
= bp
->link_vars
.line_speed
;
7408 cmd
->duplex
= bp
->link_vars
.duplex
;
7410 cmd
->speed
= bp
->link_params
.req_line_speed
;
7411 cmd
->duplex
= bp
->link_params
.req_duplex
;
7416 vn_max_rate
= ((bp
->mf_config
& FUNC_MF_CFG_MAX_BW_MASK
) >>
7417 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
7418 if (vn_max_rate
< cmd
->speed
)
7419 cmd
->speed
= vn_max_rate
;
7422 if (bp
->link_params
.switch_cfg
== SWITCH_CFG_10G
) {
7424 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
7426 switch (ext_phy_type
) {
7427 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
:
7428 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
:
7429 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
:
7430 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
7431 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
7432 cmd
->port
= PORT_FIBRE
;
7435 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
7436 cmd
->port
= PORT_TP
;
7439 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
:
7440 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7441 bp
->link_params
.ext_phy_config
);
7445 DP(NETIF_MSG_LINK
, "BAD XGXS ext_phy_config 0x%x\n",
7446 bp
->link_params
.ext_phy_config
);
7450 cmd
->port
= PORT_TP
;
7452 cmd
->phy_address
= bp
->port
.phy_addr
;
7453 cmd
->transceiver
= XCVR_INTERNAL
;
7455 if (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
)
7456 cmd
->autoneg
= AUTONEG_ENABLE
;
7458 cmd
->autoneg
= AUTONEG_DISABLE
;
7463 DP(NETIF_MSG_LINK
, "ethtool_cmd: cmd %d\n"
7464 DP_LEVEL
" supported 0x%x advertising 0x%x speed %d\n"
7465 DP_LEVEL
" duplex %d port %d phy_address %d transceiver %d\n"
7466 DP_LEVEL
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
7467 cmd
->cmd
, cmd
->supported
, cmd
->advertising
, cmd
->speed
,
7468 cmd
->duplex
, cmd
->port
, cmd
->phy_address
, cmd
->transceiver
,
7469 cmd
->autoneg
, cmd
->maxtxpkt
, cmd
->maxrxpkt
);
7474 static int bnx2x_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
7476 struct bnx2x
*bp
= netdev_priv(dev
);
7482 DP(NETIF_MSG_LINK
, "ethtool_cmd: cmd %d\n"
7483 DP_LEVEL
" supported 0x%x advertising 0x%x speed %d\n"
7484 DP_LEVEL
" duplex %d port %d phy_address %d transceiver %d\n"
7485 DP_LEVEL
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
7486 cmd
->cmd
, cmd
->supported
, cmd
->advertising
, cmd
->speed
,
7487 cmd
->duplex
, cmd
->port
, cmd
->phy_address
, cmd
->transceiver
,
7488 cmd
->autoneg
, cmd
->maxtxpkt
, cmd
->maxrxpkt
);
7490 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
7491 if (!(bp
->port
.supported
& SUPPORTED_Autoneg
)) {
7492 DP(NETIF_MSG_LINK
, "Autoneg not supported\n");
7496 /* advertise the requested speed and duplex if supported */
7497 cmd
->advertising
&= bp
->port
.supported
;
7499 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
7500 bp
->link_params
.req_duplex
= DUPLEX_FULL
;
7501 bp
->port
.advertising
|= (ADVERTISED_Autoneg
|
7504 } else { /* forced speed */
7505 /* advertise the requested speed and duplex if supported */
7506 switch (cmd
->speed
) {
7508 if (cmd
->duplex
== DUPLEX_FULL
) {
7509 if (!(bp
->port
.supported
&
7510 SUPPORTED_10baseT_Full
)) {
7512 "10M full not supported\n");
7516 advertising
= (ADVERTISED_10baseT_Full
|
7519 if (!(bp
->port
.supported
&
7520 SUPPORTED_10baseT_Half
)) {
7522 "10M half not supported\n");
7526 advertising
= (ADVERTISED_10baseT_Half
|
7532 if (cmd
->duplex
== DUPLEX_FULL
) {
7533 if (!(bp
->port
.supported
&
7534 SUPPORTED_100baseT_Full
)) {
7536 "100M full not supported\n");
7540 advertising
= (ADVERTISED_100baseT_Full
|
7543 if (!(bp
->port
.supported
&
7544 SUPPORTED_100baseT_Half
)) {
7546 "100M half not supported\n");
7550 advertising
= (ADVERTISED_100baseT_Half
|
7556 if (cmd
->duplex
!= DUPLEX_FULL
) {
7557 DP(NETIF_MSG_LINK
, "1G half not supported\n");
7561 if (!(bp
->port
.supported
& SUPPORTED_1000baseT_Full
)) {
7562 DP(NETIF_MSG_LINK
, "1G full not supported\n");
7566 advertising
= (ADVERTISED_1000baseT_Full
|
7571 if (cmd
->duplex
!= DUPLEX_FULL
) {
7573 "2.5G half not supported\n");
7577 if (!(bp
->port
.supported
& SUPPORTED_2500baseX_Full
)) {
7579 "2.5G full not supported\n");
7583 advertising
= (ADVERTISED_2500baseX_Full
|
7588 if (cmd
->duplex
!= DUPLEX_FULL
) {
7589 DP(NETIF_MSG_LINK
, "10G half not supported\n");
7593 if (!(bp
->port
.supported
& SUPPORTED_10000baseT_Full
)) {
7594 DP(NETIF_MSG_LINK
, "10G full not supported\n");
7598 advertising
= (ADVERTISED_10000baseT_Full
|
7603 DP(NETIF_MSG_LINK
, "Unsupported speed\n");
7607 bp
->link_params
.req_line_speed
= cmd
->speed
;
7608 bp
->link_params
.req_duplex
= cmd
->duplex
;
7609 bp
->port
.advertising
= advertising
;
7612 DP(NETIF_MSG_LINK
, "req_line_speed %d\n"
7613 DP_LEVEL
" req_duplex %d advertising 0x%x\n",
7614 bp
->link_params
.req_line_speed
, bp
->link_params
.req_duplex
,
7615 bp
->port
.advertising
);
7617 if (netif_running(dev
)) {
7618 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
7625 #define PHY_FW_VER_LEN 10
7627 static void bnx2x_get_drvinfo(struct net_device
*dev
,
7628 struct ethtool_drvinfo
*info
)
7630 struct bnx2x
*bp
= netdev_priv(dev
);
7631 char phy_fw_ver
[PHY_FW_VER_LEN
];
7633 strcpy(info
->driver
, DRV_MODULE_NAME
);
7634 strcpy(info
->version
, DRV_MODULE_VERSION
);
7636 phy_fw_ver
[0] = '\0';
7638 bnx2x_phy_hw_lock(bp
);
7639 bnx2x_get_ext_phy_fw_version(&bp
->link_params
,
7640 (bp
->state
!= BNX2X_STATE_CLOSED
),
7641 phy_fw_ver
, PHY_FW_VER_LEN
);
7642 bnx2x_phy_hw_unlock(bp
);
7645 snprintf(info
->fw_version
, 32, "%d.%d.%d:%d BC:%x%s%s",
7646 BCM_5710_FW_MAJOR_VERSION
, BCM_5710_FW_MINOR_VERSION
,
7647 BCM_5710_FW_REVISION_VERSION
,
7648 BCM_5710_FW_COMPILE_FLAGS
, bp
->common
.bc_ver
,
7649 ((phy_fw_ver
[0] != '\0')? " PHY:":""), phy_fw_ver
);
7650 strcpy(info
->bus_info
, pci_name(bp
->pdev
));
7651 info
->n_stats
= BNX2X_NUM_STATS
;
7652 info
->testinfo_len
= BNX2X_NUM_TESTS
;
7653 info
->eedump_len
= bp
->common
.flash_size
;
7654 info
->regdump_len
= 0;
7657 static void bnx2x_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
7659 struct bnx2x
*bp
= netdev_priv(dev
);
7661 if (bp
->flags
& NO_WOL_FLAG
) {
7665 wol
->supported
= WAKE_MAGIC
;
7667 wol
->wolopts
= WAKE_MAGIC
;
7671 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
7674 static int bnx2x_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
7676 struct bnx2x
*bp
= netdev_priv(dev
);
7678 if (wol
->wolopts
& ~WAKE_MAGIC
)
7681 if (wol
->wolopts
& WAKE_MAGIC
) {
7682 if (bp
->flags
& NO_WOL_FLAG
)
7692 static u32
bnx2x_get_msglevel(struct net_device
*dev
)
7694 struct bnx2x
*bp
= netdev_priv(dev
);
7696 return bp
->msglevel
;
7699 static void bnx2x_set_msglevel(struct net_device
*dev
, u32 level
)
7701 struct bnx2x
*bp
= netdev_priv(dev
);
7703 if (capable(CAP_NET_ADMIN
))
7704 bp
->msglevel
= level
;
7707 static int bnx2x_nway_reset(struct net_device
*dev
)
7709 struct bnx2x
*bp
= netdev_priv(dev
);
7714 if (netif_running(dev
)) {
7715 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
7722 static int bnx2x_get_eeprom_len(struct net_device
*dev
)
7724 struct bnx2x
*bp
= netdev_priv(dev
);
7726 return bp
->common
.flash_size
;
7729 static int bnx2x_acquire_nvram_lock(struct bnx2x
*bp
)
7731 int port
= BP_PORT(bp
);
7735 /* adjust timeout for emulation/FPGA */
7736 count
= NVRAM_TIMEOUT_COUNT
;
7737 if (CHIP_REV_IS_SLOW(bp
))
7740 /* request access to nvram interface */
7741 REG_WR(bp
, MCP_REG_MCPR_NVM_SW_ARB
,
7742 (MCPR_NVM_SW_ARB_ARB_REQ_SET1
<< port
));
7744 for (i
= 0; i
< count
*10; i
++) {
7745 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_SW_ARB
);
7746 if (val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
))
7752 if (!(val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
))) {
7753 DP(BNX2X_MSG_NVM
, "cannot get access to nvram interface\n");
7760 static int bnx2x_release_nvram_lock(struct bnx2x
*bp
)
7762 int port
= BP_PORT(bp
);
7766 /* adjust timeout for emulation/FPGA */
7767 count
= NVRAM_TIMEOUT_COUNT
;
7768 if (CHIP_REV_IS_SLOW(bp
))
7771 /* relinquish nvram interface */
7772 REG_WR(bp
, MCP_REG_MCPR_NVM_SW_ARB
,
7773 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1
<< port
));
7775 for (i
= 0; i
< count
*10; i
++) {
7776 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_SW_ARB
);
7777 if (!(val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
)))
7783 if (val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
)) {
7784 DP(BNX2X_MSG_NVM
, "cannot free access to nvram interface\n");
7791 static void bnx2x_enable_nvram_access(struct bnx2x
*bp
)
7795 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
);
7797 /* enable both bits, even on read */
7798 REG_WR(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
,
7799 (val
| MCPR_NVM_ACCESS_ENABLE_EN
|
7800 MCPR_NVM_ACCESS_ENABLE_WR_EN
));
7803 static void bnx2x_disable_nvram_access(struct bnx2x
*bp
)
7807 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
);
7809 /* disable both bits, even after read */
7810 REG_WR(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
,
7811 (val
& ~(MCPR_NVM_ACCESS_ENABLE_EN
|
7812 MCPR_NVM_ACCESS_ENABLE_WR_EN
)));
7815 static int bnx2x_nvram_read_dword(struct bnx2x
*bp
, u32 offset
, u32
*ret_val
,
7821 /* build the command word */
7822 cmd_flags
|= MCPR_NVM_COMMAND_DOIT
;
7824 /* need to clear DONE bit separately */
7825 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, MCPR_NVM_COMMAND_DONE
);
7827 /* address of the NVRAM to read from */
7828 REG_WR(bp
, MCP_REG_MCPR_NVM_ADDR
,
7829 (offset
& MCPR_NVM_ADDR_NVM_ADDR_VALUE
));
7831 /* issue a read command */
7832 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, cmd_flags
);
7834 /* adjust timeout for emulation/FPGA */
7835 count
= NVRAM_TIMEOUT_COUNT
;
7836 if (CHIP_REV_IS_SLOW(bp
))
7839 /* wait for completion */
7842 for (i
= 0; i
< count
; i
++) {
7844 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_COMMAND
);
7846 if (val
& MCPR_NVM_COMMAND_DONE
) {
7847 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_READ
);
7848 /* we read nvram data in cpu order
7849 * but ethtool sees it as an array of bytes
7850 * converting to big-endian will do the work */
7851 val
= cpu_to_be32(val
);
7861 static int bnx2x_nvram_read(struct bnx2x
*bp
, u32 offset
, u8
*ret_buf
,
7868 if ((offset
& 0x03) || (buf_size
& 0x03) || (buf_size
== 0)) {
7870 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
7875 if (offset
+ buf_size
> bp
->common
.flash_size
) {
7876 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
7877 " buf_size (0x%x) > flash_size (0x%x)\n",
7878 offset
, buf_size
, bp
->common
.flash_size
);
7882 /* request access to nvram interface */
7883 rc
= bnx2x_acquire_nvram_lock(bp
);
7887 /* enable access to nvram interface */
7888 bnx2x_enable_nvram_access(bp
);
7890 /* read the first word(s) */
7891 cmd_flags
= MCPR_NVM_COMMAND_FIRST
;
7892 while ((buf_size
> sizeof(u32
)) && (rc
== 0)) {
7893 rc
= bnx2x_nvram_read_dword(bp
, offset
, &val
, cmd_flags
);
7894 memcpy(ret_buf
, &val
, 4);
7896 /* advance to the next dword */
7897 offset
+= sizeof(u32
);
7898 ret_buf
+= sizeof(u32
);
7899 buf_size
-= sizeof(u32
);
7904 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
7905 rc
= bnx2x_nvram_read_dword(bp
, offset
, &val
, cmd_flags
);
7906 memcpy(ret_buf
, &val
, 4);
7909 /* disable access to nvram interface */
7910 bnx2x_disable_nvram_access(bp
);
7911 bnx2x_release_nvram_lock(bp
);
7916 static int bnx2x_get_eeprom(struct net_device
*dev
,
7917 struct ethtool_eeprom
*eeprom
, u8
*eebuf
)
7919 struct bnx2x
*bp
= netdev_priv(dev
);
7922 DP(BNX2X_MSG_NVM
, "ethtool_eeprom: cmd %d\n"
7923 DP_LEVEL
" magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
7924 eeprom
->cmd
, eeprom
->magic
, eeprom
->offset
, eeprom
->offset
,
7925 eeprom
->len
, eeprom
->len
);
7927 /* parameters already validated in ethtool_get_eeprom */
7929 rc
= bnx2x_nvram_read(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
7934 static int bnx2x_nvram_write_dword(struct bnx2x
*bp
, u32 offset
, u32 val
,
7939 /* build the command word */
7940 cmd_flags
|= MCPR_NVM_COMMAND_DOIT
| MCPR_NVM_COMMAND_WR
;
7942 /* need to clear DONE bit separately */
7943 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, MCPR_NVM_COMMAND_DONE
);
7945 /* write the data */
7946 REG_WR(bp
, MCP_REG_MCPR_NVM_WRITE
, val
);
7948 /* address of the NVRAM to write to */
7949 REG_WR(bp
, MCP_REG_MCPR_NVM_ADDR
,
7950 (offset
& MCPR_NVM_ADDR_NVM_ADDR_VALUE
));
7952 /* issue the write command */
7953 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, cmd_flags
);
7955 /* adjust timeout for emulation/FPGA */
7956 count
= NVRAM_TIMEOUT_COUNT
;
7957 if (CHIP_REV_IS_SLOW(bp
))
7960 /* wait for completion */
7962 for (i
= 0; i
< count
; i
++) {
7964 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_COMMAND
);
7965 if (val
& MCPR_NVM_COMMAND_DONE
) {
7974 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
7976 static int bnx2x_nvram_write1(struct bnx2x
*bp
, u32 offset
, u8
*data_buf
,
7984 if (offset
+ buf_size
> bp
->common
.flash_size
) {
7985 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
7986 " buf_size (0x%x) > flash_size (0x%x)\n",
7987 offset
, buf_size
, bp
->common
.flash_size
);
7991 /* request access to nvram interface */
7992 rc
= bnx2x_acquire_nvram_lock(bp
);
7996 /* enable access to nvram interface */
7997 bnx2x_enable_nvram_access(bp
);
7999 cmd_flags
= (MCPR_NVM_COMMAND_FIRST
| MCPR_NVM_COMMAND_LAST
);
8000 align_offset
= (offset
& ~0x03);
8001 rc
= bnx2x_nvram_read_dword(bp
, align_offset
, &val
, cmd_flags
);
8004 val
&= ~(0xff << BYTE_OFFSET(offset
));
8005 val
|= (*data_buf
<< BYTE_OFFSET(offset
));
8007 /* nvram data is returned as an array of bytes
8008 * convert it back to cpu order */
8009 val
= be32_to_cpu(val
);
8011 rc
= bnx2x_nvram_write_dword(bp
, align_offset
, val
,
8015 /* disable access to nvram interface */
8016 bnx2x_disable_nvram_access(bp
);
8017 bnx2x_release_nvram_lock(bp
);
8022 static int bnx2x_nvram_write(struct bnx2x
*bp
, u32 offset
, u8
*data_buf
,
8030 if (buf_size
== 1) /* ethtool */
8031 return bnx2x_nvram_write1(bp
, offset
, data_buf
, buf_size
);
8033 if ((offset
& 0x03) || (buf_size
& 0x03) || (buf_size
== 0)) {
8035 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8040 if (offset
+ buf_size
> bp
->common
.flash_size
) {
8041 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
8042 " buf_size (0x%x) > flash_size (0x%x)\n",
8043 offset
, buf_size
, bp
->common
.flash_size
);
8047 /* request access to nvram interface */
8048 rc
= bnx2x_acquire_nvram_lock(bp
);
8052 /* enable access to nvram interface */
8053 bnx2x_enable_nvram_access(bp
);
8056 cmd_flags
= MCPR_NVM_COMMAND_FIRST
;
8057 while ((written_so_far
< buf_size
) && (rc
== 0)) {
8058 if (written_so_far
== (buf_size
- sizeof(u32
)))
8059 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
8060 else if (((offset
+ 4) % NVRAM_PAGE_SIZE
) == 0)
8061 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
8062 else if ((offset
% NVRAM_PAGE_SIZE
) == 0)
8063 cmd_flags
|= MCPR_NVM_COMMAND_FIRST
;
8065 memcpy(&val
, data_buf
, 4);
8067 rc
= bnx2x_nvram_write_dword(bp
, offset
, val
, cmd_flags
);
8069 /* advance to the next dword */
8070 offset
+= sizeof(u32
);
8071 data_buf
+= sizeof(u32
);
8072 written_so_far
+= sizeof(u32
);
8076 /* disable access to nvram interface */
8077 bnx2x_disable_nvram_access(bp
);
8078 bnx2x_release_nvram_lock(bp
);
8083 static int bnx2x_set_eeprom(struct net_device
*dev
,
8084 struct ethtool_eeprom
*eeprom
, u8
*eebuf
)
8086 struct bnx2x
*bp
= netdev_priv(dev
);
8089 DP(BNX2X_MSG_NVM
, "ethtool_eeprom: cmd %d\n"
8090 DP_LEVEL
" magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8091 eeprom
->cmd
, eeprom
->magic
, eeprom
->offset
, eeprom
->offset
,
8092 eeprom
->len
, eeprom
->len
);
8094 /* parameters already validated in ethtool_set_eeprom */
8096 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8097 if (eeprom
->magic
== 0x00504859)
8100 bnx2x_phy_hw_lock(bp
);
8101 rc
= bnx2x_flash_download(bp
, BP_PORT(bp
),
8102 bp
->link_params
.ext_phy_config
,
8103 (bp
->state
!= BNX2X_STATE_CLOSED
),
8104 eebuf
, eeprom
->len
);
8105 if ((bp
->state
== BNX2X_STATE_OPEN
) ||
8106 (bp
->state
== BNX2X_STATE_DISABLED
)) {
8107 rc
|= bnx2x_link_reset(&bp
->link_params
,
8109 rc
|= bnx2x_phy_init(&bp
->link_params
,
8112 bnx2x_phy_hw_unlock(bp
);
8114 } else /* Only the PMF can access the PHY */
8117 rc
= bnx2x_nvram_write(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
8122 static int bnx2x_get_coalesce(struct net_device
*dev
,
8123 struct ethtool_coalesce
*coal
)
8125 struct bnx2x
*bp
= netdev_priv(dev
);
8127 memset(coal
, 0, sizeof(struct ethtool_coalesce
));
8129 coal
->rx_coalesce_usecs
= bp
->rx_ticks
;
8130 coal
->tx_coalesce_usecs
= bp
->tx_ticks
;
8131 coal
->stats_block_coalesce_usecs
= bp
->stats_ticks
;
8136 static int bnx2x_set_coalesce(struct net_device
*dev
,
8137 struct ethtool_coalesce
*coal
)
8139 struct bnx2x
*bp
= netdev_priv(dev
);
8141 bp
->rx_ticks
= (u16
) coal
->rx_coalesce_usecs
;
8142 if (bp
->rx_ticks
> 3000)
8143 bp
->rx_ticks
= 3000;
8145 bp
->tx_ticks
= (u16
) coal
->tx_coalesce_usecs
;
8146 if (bp
->tx_ticks
> 0x3000)
8147 bp
->tx_ticks
= 0x3000;
8149 bp
->stats_ticks
= coal
->stats_block_coalesce_usecs
;
8150 if (bp
->stats_ticks
> 0xffff00)
8151 bp
->stats_ticks
= 0xffff00;
8152 bp
->stats_ticks
&= 0xffff00;
8154 if (netif_running(dev
))
8155 bnx2x_update_coalesce(bp
);
8160 static int bnx2x_set_flags(struct net_device
*dev
, u32 data
)
8162 struct bnx2x
*bp
= netdev_priv(dev
);
8166 if (data
& ETH_FLAG_LRO
) {
8167 if (!(dev
->features
& NETIF_F_LRO
)) {
8168 dev
->features
|= NETIF_F_LRO
;
8169 bp
->flags
|= TPA_ENABLE_FLAG
;
8173 } else if (dev
->features
& NETIF_F_LRO
) {
8174 dev
->features
&= ~NETIF_F_LRO
;
8175 bp
->flags
&= ~TPA_ENABLE_FLAG
;
8179 if (changed
&& netif_running(dev
)) {
8180 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
8181 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
8187 static void bnx2x_get_ringparam(struct net_device
*dev
,
8188 struct ethtool_ringparam
*ering
)
8190 struct bnx2x
*bp
= netdev_priv(dev
);
8192 ering
->rx_max_pending
= MAX_RX_AVAIL
;
8193 ering
->rx_mini_max_pending
= 0;
8194 ering
->rx_jumbo_max_pending
= 0;
8196 ering
->rx_pending
= bp
->rx_ring_size
;
8197 ering
->rx_mini_pending
= 0;
8198 ering
->rx_jumbo_pending
= 0;
8200 ering
->tx_max_pending
= MAX_TX_AVAIL
;
8201 ering
->tx_pending
= bp
->tx_ring_size
;
8204 static int bnx2x_set_ringparam(struct net_device
*dev
,
8205 struct ethtool_ringparam
*ering
)
8207 struct bnx2x
*bp
= netdev_priv(dev
);
8210 if ((ering
->rx_pending
> MAX_RX_AVAIL
) ||
8211 (ering
->tx_pending
> MAX_TX_AVAIL
) ||
8212 (ering
->tx_pending
<= MAX_SKB_FRAGS
+ 4))
8215 bp
->rx_ring_size
= ering
->rx_pending
;
8216 bp
->tx_ring_size
= ering
->tx_pending
;
8218 if (netif_running(dev
)) {
8219 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
8220 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
8226 static void bnx2x_get_pauseparam(struct net_device
*dev
,
8227 struct ethtool_pauseparam
*epause
)
8229 struct bnx2x
*bp
= netdev_priv(dev
);
8231 epause
->autoneg
= (bp
->link_params
.req_flow_ctrl
== FLOW_CTRL_AUTO
) &&
8232 (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
);
8234 epause
->rx_pause
= ((bp
->link_vars
.flow_ctrl
& FLOW_CTRL_RX
) ==
8236 epause
->tx_pause
= ((bp
->link_vars
.flow_ctrl
& FLOW_CTRL_TX
) ==
8239 DP(NETIF_MSG_LINK
, "ethtool_pauseparam: cmd %d\n"
8240 DP_LEVEL
" autoneg %d rx_pause %d tx_pause %d\n",
8241 epause
->cmd
, epause
->autoneg
, epause
->rx_pause
, epause
->tx_pause
);
8244 static int bnx2x_set_pauseparam(struct net_device
*dev
,
8245 struct ethtool_pauseparam
*epause
)
8247 struct bnx2x
*bp
= netdev_priv(dev
);
8252 DP(NETIF_MSG_LINK
, "ethtool_pauseparam: cmd %d\n"
8253 DP_LEVEL
" autoneg %d rx_pause %d tx_pause %d\n",
8254 epause
->cmd
, epause
->autoneg
, epause
->rx_pause
, epause
->tx_pause
);
8256 bp
->link_params
.req_flow_ctrl
= FLOW_CTRL_AUTO
;
8258 if (epause
->rx_pause
)
8259 bp
->link_params
.req_flow_ctrl
|= FLOW_CTRL_RX
;
8261 if (epause
->tx_pause
)
8262 bp
->link_params
.req_flow_ctrl
|= FLOW_CTRL_TX
;
8264 if (bp
->link_params
.req_flow_ctrl
== FLOW_CTRL_AUTO
)
8265 bp
->link_params
.req_flow_ctrl
= FLOW_CTRL_NONE
;
8267 if (epause
->autoneg
) {
8268 if (!(bp
->port
.supported
& SUPPORTED_Autoneg
)) {
8269 DP(NETIF_MSG_LINK
, "Autoneg not supported\n");
8273 if (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
)
8274 bp
->link_params
.req_flow_ctrl
= FLOW_CTRL_AUTO
;
8278 "req_flow_ctrl 0x%x\n", bp
->link_params
.req_flow_ctrl
);
8280 if (netif_running(dev
)) {
8281 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
8288 static u32
bnx2x_get_rx_csum(struct net_device
*dev
)
8290 struct bnx2x
*bp
= netdev_priv(dev
);
8295 static int bnx2x_set_rx_csum(struct net_device
*dev
, u32 data
)
8297 struct bnx2x
*bp
= netdev_priv(dev
);
8303 static int bnx2x_set_tso(struct net_device
*dev
, u32 data
)
8306 dev
->features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
8307 dev
->features
|= NETIF_F_TSO6
;
8309 dev
->features
&= ~(NETIF_F_TSO
| NETIF_F_TSO_ECN
);
8310 dev
->features
&= ~NETIF_F_TSO6
;
8316 static const struct {
8317 char string
[ETH_GSTRING_LEN
];
8318 } bnx2x_tests_str_arr
[BNX2X_NUM_TESTS
] = {
8319 { "register_test (offline)" },
8320 { "memory_test (offline)" },
8321 { "loopback_test (offline)" },
8322 { "nvram_test (online)" },
8323 { "interrupt_test (online)" },
8324 { "link_test (online)" },
8325 { "idle check (online)" },
8326 { "MC errors (online)" }
8329 static int bnx2x_self_test_count(struct net_device
*dev
)
8331 return BNX2X_NUM_TESTS
;
8334 static int bnx2x_test_registers(struct bnx2x
*bp
)
8336 int idx
, i
, rc
= -ENODEV
;
8338 static const struct {
8343 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0
, 4, 0x000003ff },
8344 { DORQ_REG_DB_ADDR0
, 4, 0xffffffff },
8345 { HC_REG_AGG_INT_0
, 4, 0x000003ff },
8346 { PBF_REG_MAC_IF0_ENABLE
, 4, 0x00000001 },
8347 { PBF_REG_P0_INIT_CRD
, 4, 0x000007ff },
8348 { PRS_REG_CID_PORT_0
, 4, 0x00ffffff },
8349 { PXP2_REG_PSWRQ_CDU0_L2P
, 4, 0x000fffff },
8350 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR
, 8, 0x0003ffff },
8351 { PXP2_REG_PSWRQ_TM0_L2P
, 4, 0x000fffff },
8352 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR
, 8, 0x0003ffff },
8353 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P
, 4, 0x000fffff },
8354 { QM_REG_CONNNUM_0
, 4, 0x000fffff },
8355 { TM_REG_LIN0_MAX_ACTIVE_CID
, 4, 0x0003ffff },
8356 { SRC_REG_KEYRSS0_0
, 40, 0xffffffff },
8357 { SRC_REG_KEYRSS0_7
, 40, 0xffffffff },
8358 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00
, 4, 0x00000001 },
8359 { XCM_REG_WU_DA_CNT_CMD00
, 4, 0x00000003 },
8360 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0
, 4, 0x000000ff },
8361 { NIG_REG_EGRESS_MNG0_FIFO
, 20, 0xffffffff },
8362 { NIG_REG_LLH0_T_BIT
, 4, 0x00000001 },
8363 /* 20 */ { NIG_REG_EMAC0_IN_EN
, 4, 0x00000001 },
8364 { NIG_REG_BMAC0_IN_EN
, 4, 0x00000001 },
8365 { NIG_REG_XCM0_OUT_EN
, 4, 0x00000001 },
8366 { NIG_REG_BRB0_OUT_EN
, 4, 0x00000001 },
8367 { NIG_REG_LLH0_XCM_MASK
, 4, 0x00000007 },
8368 { NIG_REG_LLH0_ACPI_PAT_6_LEN
, 68, 0x000000ff },
8369 { NIG_REG_LLH0_ACPI_PAT_0_CRC
, 68, 0xffffffff },
8370 { NIG_REG_LLH0_DEST_MAC_0_0
, 160, 0xffffffff },
8371 { NIG_REG_LLH0_DEST_IP_0_1
, 160, 0xffffffff },
8372 { NIG_REG_LLH0_IPV4_IPV6_0
, 160, 0x00000001 },
8373 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0
, 160, 0x0000ffff },
8374 { NIG_REG_LLH0_DEST_TCP_0
, 160, 0x0000ffff },
8375 { NIG_REG_LLH0_VLAN_ID_0
, 160, 0x00000fff },
8376 { NIG_REG_XGXS_SERDES0_MODE_SEL
, 4, 0x00000001 },
8377 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
, 4, 0x00000001 },
8378 { NIG_REG_STATUS_INTERRUPT_PORT0
, 4, 0x07ffffff },
8379 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST
, 24, 0x00000001 },
8380 { NIG_REG_SERDES0_CTRL_PHY_ADDR
, 16, 0x0000001f },
8382 { 0xffffffff, 0, 0x00000000 }
8385 if (!netif_running(bp
->dev
))
8388 /* Repeat the test twice:
8389 First by writing 0x00000000, second by writing 0xffffffff */
8390 for (idx
= 0; idx
< 2; idx
++) {
8397 wr_val
= 0xffffffff;
8401 for (i
= 0; reg_tbl
[i
].offset0
!= 0xffffffff; i
++) {
8402 u32 offset
, mask
, save_val
, val
;
8403 int port
= BP_PORT(bp
);
8405 offset
= reg_tbl
[i
].offset0
+ port
*reg_tbl
[i
].offset1
;
8406 mask
= reg_tbl
[i
].mask
;
8408 save_val
= REG_RD(bp
, offset
);
8410 REG_WR(bp
, offset
, wr_val
);
8411 val
= REG_RD(bp
, offset
);
8413 /* Restore the original register's value */
8414 REG_WR(bp
, offset
, save_val
);
8416 /* verify that value is as expected value */
8417 if ((val
& mask
) != (wr_val
& mask
))
8428 static int bnx2x_test_memory(struct bnx2x
*bp
)
8430 int i
, j
, rc
= -ENODEV
;
8432 static const struct {
8436 { CCM_REG_XX_DESCR_TABLE
, CCM_REG_XX_DESCR_TABLE_SIZE
},
8437 { CFC_REG_ACTIVITY_COUNTER
, CFC_REG_ACTIVITY_COUNTER_SIZE
},
8438 { CFC_REG_LINK_LIST
, CFC_REG_LINK_LIST_SIZE
},
8439 { DMAE_REG_CMD_MEM
, DMAE_REG_CMD_MEM_SIZE
},
8440 { TCM_REG_XX_DESCR_TABLE
, TCM_REG_XX_DESCR_TABLE_SIZE
},
8441 { UCM_REG_XX_DESCR_TABLE
, UCM_REG_XX_DESCR_TABLE_SIZE
},
8442 { XCM_REG_XX_DESCR_TABLE
, XCM_REG_XX_DESCR_TABLE_SIZE
},
8446 static const struct {
8451 { "CCM_REG_CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS
, 0 },
8452 { "CFC_REG_CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS
, 0 },
8453 { "DMAE_REG_DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS
, 0 },
8454 { "TCM_REG_TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS
, 0 },
8455 { "UCM_REG_UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS
, 0 },
8456 { "XCM_REG_XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS
, 0x1 },
8458 { NULL
, 0xffffffff, 0 }
8461 if (!netif_running(bp
->dev
))
8464 /* Go through all the memories */
8465 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++)
8466 for (j
= 0; j
< mem_tbl
[i
].size
; j
++)
8467 REG_RD(bp
, mem_tbl
[i
].offset
+ j
*4);
8469 /* Check the parity status */
8470 for (i
= 0; prty_tbl
[i
].offset
!= 0xffffffff; i
++) {
8471 val
= REG_RD(bp
, prty_tbl
[i
].offset
);
8472 if (val
& ~(prty_tbl
[i
].mask
)) {
8474 "%s is 0x%x\n", prty_tbl
[i
].name
, val
);
8485 static void bnx2x_netif_start(struct bnx2x
*bp
)
8489 if (atomic_dec_and_test(&bp
->intr_sem
)) {
8490 if (netif_running(bp
->dev
)) {
8491 bnx2x_int_enable(bp
);
8492 for_each_queue(bp
, i
)
8493 napi_enable(&bnx2x_fp(bp
, i
, napi
));
8494 if (bp
->state
== BNX2X_STATE_OPEN
)
8495 netif_wake_queue(bp
->dev
);
8500 static void bnx2x_netif_stop(struct bnx2x
*bp
)
8504 if (netif_running(bp
->dev
)) {
8505 netif_tx_disable(bp
->dev
);
8506 bp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
8507 for_each_queue(bp
, i
)
8508 napi_disable(&bnx2x_fp(bp
, i
, napi
));
8510 bnx2x_int_disable_sync(bp
);
8513 static void bnx2x_wait_for_link(struct bnx2x
*bp
, u8 link_up
)
8518 while (bnx2x_link_test(bp
) && cnt
--)
8522 static int bnx2x_run_loopback(struct bnx2x
*bp
, int loopback_mode
, u8 link_up
)
8524 unsigned int pkt_size
, num_pkts
, i
;
8525 struct sk_buff
*skb
;
8526 unsigned char *packet
;
8527 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
8528 u16 tx_start_idx
, tx_idx
;
8529 u16 rx_start_idx
, rx_idx
;
8531 struct sw_tx_bd
*tx_buf
;
8532 struct eth_tx_bd
*tx_bd
;
8534 union eth_rx_cqe
*cqe
;
8536 struct sw_rx_bd
*rx_buf
;
8540 if (loopback_mode
== BNX2X_MAC_LOOPBACK
) {
8541 bp
->link_params
.loopback_mode
= LOOPBACK_BMAC
;
8542 bnx2x_phy_hw_lock(bp
);
8543 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
8544 bnx2x_phy_hw_unlock(bp
);
8546 } else if (loopback_mode
== BNX2X_PHY_LOOPBACK
) {
8547 bp
->link_params
.loopback_mode
= LOOPBACK_XGXS_10
;
8548 bnx2x_phy_hw_lock(bp
);
8549 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
8550 bnx2x_phy_hw_unlock(bp
);
8551 /* wait until link state is restored */
8552 bnx2x_wait_for_link(bp
, link_up
);
8558 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
8561 goto test_loopback_exit
;
8563 packet
= skb_put(skb
, pkt_size
);
8564 memcpy(packet
, bp
->dev
->dev_addr
, ETH_ALEN
);
8565 memset(packet
+ ETH_ALEN
, 0, (ETH_HLEN
- ETH_ALEN
));
8566 for (i
= ETH_HLEN
; i
< pkt_size
; i
++)
8567 packet
[i
] = (unsigned char) (i
& 0xff);
8570 tx_start_idx
= le16_to_cpu(*fp
->tx_cons_sb
);
8571 rx_start_idx
= le16_to_cpu(*fp
->rx_cons_sb
);
8573 pkt_prod
= fp
->tx_pkt_prod
++;
8574 tx_buf
= &fp
->tx_buf_ring
[TX_BD(pkt_prod
)];
8575 tx_buf
->first_bd
= fp
->tx_bd_prod
;
8578 tx_bd
= &fp
->tx_desc_ring
[TX_BD(fp
->tx_bd_prod
)];
8579 mapping
= pci_map_single(bp
->pdev
, skb
->data
,
8580 skb_headlen(skb
), PCI_DMA_TODEVICE
);
8581 tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
8582 tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
8583 tx_bd
->nbd
= cpu_to_le16(1);
8584 tx_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
8585 tx_bd
->vlan
= cpu_to_le16(pkt_prod
);
8586 tx_bd
->bd_flags
.as_bitfield
= (ETH_TX_BD_FLAGS_START_BD
|
8587 ETH_TX_BD_FLAGS_END_BD
);
8588 tx_bd
->general_data
= ((UNICAST_ADDRESS
<<
8589 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT
) | 1);
8591 fp
->hw_tx_prods
->bds_prod
=
8592 cpu_to_le16(le16_to_cpu(fp
->hw_tx_prods
->bds_prod
) + 1);
8593 mb(); /* FW restriction: must not reorder writing nbd and packets */
8594 fp
->hw_tx_prods
->packets_prod
=
8595 cpu_to_le32(le32_to_cpu(fp
->hw_tx_prods
->packets_prod
) + 1);
8596 DOORBELL(bp
, FP_IDX(fp
), 0);
8602 bp
->dev
->trans_start
= jiffies
;
8606 tx_idx
= le16_to_cpu(*fp
->tx_cons_sb
);
8607 if (tx_idx
!= tx_start_idx
+ num_pkts
)
8608 goto test_loopback_exit
;
8610 rx_idx
= le16_to_cpu(*fp
->rx_cons_sb
);
8611 if (rx_idx
!= rx_start_idx
+ num_pkts
)
8612 goto test_loopback_exit
;
8614 cqe
= &fp
->rx_comp_ring
[RCQ_BD(fp
->rx_comp_cons
)];
8615 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
8616 if (CQE_TYPE(cqe_fp_flags
) || (cqe_fp_flags
& ETH_RX_ERROR_FALGS
))
8617 goto test_loopback_rx_exit
;
8619 len
= le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
);
8620 if (len
!= pkt_size
)
8621 goto test_loopback_rx_exit
;
8623 rx_buf
= &fp
->rx_buf_ring
[RX_BD(fp
->rx_bd_cons
)];
8625 skb_reserve(skb
, cqe
->fast_path_cqe
.placement_offset
);
8626 for (i
= ETH_HLEN
; i
< pkt_size
; i
++)
8627 if (*(skb
->data
+ i
) != (unsigned char) (i
& 0xff))
8628 goto test_loopback_rx_exit
;
8632 test_loopback_rx_exit
:
8633 bp
->dev
->last_rx
= jiffies
;
8635 fp
->rx_bd_cons
= NEXT_RX_IDX(fp
->rx_bd_cons
);
8636 fp
->rx_bd_prod
= NEXT_RX_IDX(fp
->rx_bd_prod
);
8637 fp
->rx_comp_cons
= NEXT_RCQ_IDX(fp
->rx_comp_cons
);
8638 fp
->rx_comp_prod
= NEXT_RCQ_IDX(fp
->rx_comp_prod
);
8640 /* Update producers */
8641 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
8643 mmiowb(); /* keep prod updates ordered */
8646 bp
->link_params
.loopback_mode
= LOOPBACK_NONE
;
8651 static int bnx2x_test_loopback(struct bnx2x
*bp
, u8 link_up
)
8655 if (!netif_running(bp
->dev
))
8656 return BNX2X_LOOPBACK_FAILED
;
8658 bnx2x_netif_stop(bp
);
8660 if (bnx2x_run_loopback(bp
, BNX2X_MAC_LOOPBACK
, link_up
)) {
8661 DP(NETIF_MSG_PROBE
, "MAC loopback failed\n");
8662 rc
|= BNX2X_MAC_LOOPBACK_FAILED
;
8665 if (bnx2x_run_loopback(bp
, BNX2X_PHY_LOOPBACK
, link_up
)) {
8666 DP(NETIF_MSG_PROBE
, "PHY loopback failed\n");
8667 rc
|= BNX2X_PHY_LOOPBACK_FAILED
;
8670 bnx2x_netif_start(bp
);
8675 #define CRC32_RESIDUAL 0xdebb20e3
8677 static int bnx2x_test_nvram(struct bnx2x
*bp
)
8679 static const struct {
8683 { 0, 0x14 }, /* bootstrap */
8684 { 0x14, 0xec }, /* dir */
8685 { 0x100, 0x350 }, /* manuf_info */
8686 { 0x450, 0xf0 }, /* feature_info */
8687 { 0x640, 0x64 }, /* upgrade_key_info */
8689 { 0x708, 0x70 }, /* manuf_key_info */
8694 u8
*data
= (u8
*)buf
;
8698 rc
= bnx2x_nvram_read(bp
, 0, data
, 4);
8700 DP(NETIF_MSG_PROBE
, "magic value read (rc -%d)\n", -rc
);
8701 goto test_nvram_exit
;
8704 magic
= be32_to_cpu(buf
[0]);
8705 if (magic
!= 0x669955aa) {
8706 DP(NETIF_MSG_PROBE
, "magic value (0x%08x)\n", magic
);
8708 goto test_nvram_exit
;
8711 for (i
= 0; nvram_tbl
[i
].size
; i
++) {
8713 rc
= bnx2x_nvram_read(bp
, nvram_tbl
[i
].offset
, data
,
8717 "nvram_tbl[%d] read data (rc -%d)\n", i
, -rc
);
8718 goto test_nvram_exit
;
8721 csum
= ether_crc_le(nvram_tbl
[i
].size
, data
);
8722 if (csum
!= CRC32_RESIDUAL
) {
8724 "nvram_tbl[%d] csum value (0x%08x)\n", i
, csum
);
8726 goto test_nvram_exit
;
8734 static int bnx2x_test_intr(struct bnx2x
*bp
)
8736 struct mac_configuration_cmd
*config
= bnx2x_sp(bp
, mac_config
);
8739 if (!netif_running(bp
->dev
))
8742 config
->hdr
.length_6b
= 0;
8743 config
->hdr
.offset
= 0;
8744 config
->hdr
.client_id
= BP_CL_ID(bp
);
8745 config
->hdr
.reserved1
= 0;
8747 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
8748 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
8749 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
8751 bp
->set_mac_pending
++;
8752 for (i
= 0; i
< 10; i
++) {
8753 if (!bp
->set_mac_pending
)
8755 msleep_interruptible(10);
8764 static void bnx2x_self_test(struct net_device
*dev
,
8765 struct ethtool_test
*etest
, u64
*buf
)
8767 struct bnx2x
*bp
= netdev_priv(dev
);
8769 memset(buf
, 0, sizeof(u64
) * BNX2X_NUM_TESTS
);
8771 if (!netif_running(dev
))
8774 /* offline tests are not suppoerted in MF mode */
8776 etest
->flags
&= ~ETH_TEST_FL_OFFLINE
;
8778 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
8781 link_up
= bp
->link_vars
.link_up
;
8782 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
8783 bnx2x_nic_load(bp
, LOAD_DIAG
);
8784 /* wait until link state is restored */
8785 bnx2x_wait_for_link(bp
, link_up
);
8787 if (bnx2x_test_registers(bp
) != 0) {
8789 etest
->flags
|= ETH_TEST_FL_FAILED
;
8791 if (bnx2x_test_memory(bp
) != 0) {
8793 etest
->flags
|= ETH_TEST_FL_FAILED
;
8795 buf
[2] = bnx2x_test_loopback(bp
, link_up
);
8797 etest
->flags
|= ETH_TEST_FL_FAILED
;
8799 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
8800 bnx2x_nic_load(bp
, LOAD_NORMAL
);
8801 /* wait until link state is restored */
8802 bnx2x_wait_for_link(bp
, link_up
);
8804 if (bnx2x_test_nvram(bp
) != 0) {
8806 etest
->flags
|= ETH_TEST_FL_FAILED
;
8808 if (bnx2x_test_intr(bp
) != 0) {
8810 etest
->flags
|= ETH_TEST_FL_FAILED
;
8813 if (bnx2x_link_test(bp
) != 0) {
8815 etest
->flags
|= ETH_TEST_FL_FAILED
;
8817 buf
[7] = bnx2x_mc_assert(bp
);
8819 etest
->flags
|= ETH_TEST_FL_FAILED
;
8821 #ifdef BNX2X_EXTRA_DEBUG
8822 bnx2x_panic_dump(bp
);
8826 static const struct {
8830 char string
[ETH_GSTRING_LEN
];
8831 } bnx2x_stats_arr
[BNX2X_NUM_STATS
] = {
8832 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi
), 8, 1, "rx_bytes" },
8833 { STATS_OFFSET32(error_bytes_received_hi
), 8, 1, "rx_error_bytes" },
8834 { STATS_OFFSET32(total_bytes_transmitted_hi
), 8, 1, "tx_bytes" },
8835 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi
), 8, 0, "tx_error_bytes" },
8836 { STATS_OFFSET32(total_unicast_packets_received_hi
),
8837 8, 1, "rx_ucast_packets" },
8838 { STATS_OFFSET32(total_multicast_packets_received_hi
),
8839 8, 1, "rx_mcast_packets" },
8840 { STATS_OFFSET32(total_broadcast_packets_received_hi
),
8841 8, 1, "rx_bcast_packets" },
8842 { STATS_OFFSET32(total_unicast_packets_transmitted_hi
),
8843 8, 1, "tx_packets" },
8844 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi
),
8845 8, 0, "tx_mac_errors" },
8846 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi
),
8847 8, 0, "tx_carrier_errors" },
8848 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi
),
8849 8, 0, "rx_crc_errors" },
8850 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi
),
8851 8, 0, "rx_align_errors" },
8852 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi
),
8853 8, 0, "tx_single_collisions" },
8854 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi
),
8855 8, 0, "tx_multi_collisions" },
8856 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi
),
8857 8, 0, "tx_deferred" },
8858 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi
),
8859 8, 0, "tx_excess_collisions" },
8860 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi
),
8861 8, 0, "tx_late_collisions" },
8862 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi
),
8863 8, 0, "tx_total_collisions" },
8864 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi
),
8865 8, 0, "rx_fragments" },
8866 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi
), 8, 0, "rx_jabbers" },
8867 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi
),
8868 8, 0, "rx_undersize_packets" },
8869 { STATS_OFFSET32(jabber_packets_received
),
8870 4, 1, "rx_oversize_packets" },
8871 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi
),
8872 8, 0, "tx_64_byte_packets" },
8873 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi
),
8874 8, 0, "tx_65_to_127_byte_packets" },
8875 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi
),
8876 8, 0, "tx_128_to_255_byte_packets" },
8877 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi
),
8878 8, 0, "tx_256_to_511_byte_packets" },
8879 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi
),
8880 8, 0, "tx_512_to_1023_byte_packets" },
8881 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi
),
8882 8, 0, "tx_1024_to_1522_byte_packets" },
8883 { STATS_OFFSET32(etherstatspktsover1522octets_hi
),
8884 8, 0, "tx_1523_to_9022_byte_packets" },
8885 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi
),
8886 8, 0, "rx_xon_frames" },
8887 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi
),
8888 8, 0, "rx_xoff_frames" },
8889 { STATS_OFFSET32(tx_stat_outxonsent_hi
), 8, 0, "tx_xon_frames" },
8890 { STATS_OFFSET32(tx_stat_outxoffsent_hi
), 8, 0, "tx_xoff_frames" },
8891 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi
),
8892 8, 0, "rx_mac_ctrl_frames" },
8893 { STATS_OFFSET32(mac_filter_discard
), 4, 1, "rx_filtered_packets" },
8894 { STATS_OFFSET32(no_buff_discard
), 4, 1, "rx_discards" },
8895 { STATS_OFFSET32(xxoverflow_discard
), 4, 1, "rx_fw_discards" },
8896 { STATS_OFFSET32(brb_drop_hi
), 8, 1, "brb_discard" },
8897 /* 39 */{ STATS_OFFSET32(brb_truncate_discard
), 8, 1, "brb_truncate" }
8900 static void bnx2x_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
8902 struct bnx2x
*bp
= netdev_priv(dev
);
8905 switch (stringset
) {
8907 for (i
= 0, j
= 0; i
< BNX2X_NUM_STATS
; i
++) {
8908 if (IS_E1HMF(bp
) && (!bnx2x_stats_arr
[i
].flags
))
8910 strcpy(buf
+ j
*ETH_GSTRING_LEN
,
8911 bnx2x_stats_arr
[i
].string
);
8917 memcpy(buf
, bnx2x_tests_str_arr
, sizeof(bnx2x_tests_str_arr
));
8922 static int bnx2x_get_stats_count(struct net_device
*dev
)
8924 struct bnx2x
*bp
= netdev_priv(dev
);
8925 int i
, num_stats
= 0;
8927 for (i
= 0; i
< BNX2X_NUM_STATS
; i
++) {
8928 if (IS_E1HMF(bp
) && (!bnx2x_stats_arr
[i
].flags
))
8935 static void bnx2x_get_ethtool_stats(struct net_device
*dev
,
8936 struct ethtool_stats
*stats
, u64
*buf
)
8938 struct bnx2x
*bp
= netdev_priv(dev
);
8939 u32
*hw_stats
= (u32
*)&bp
->eth_stats
;
8942 for (i
= 0, j
= 0; i
< BNX2X_NUM_STATS
; i
++) {
8943 if (IS_E1HMF(bp
) && (!bnx2x_stats_arr
[i
].flags
))
8946 if (bnx2x_stats_arr
[i
].size
== 0) {
8947 /* skip this counter */
8952 if (bnx2x_stats_arr
[i
].size
== 4) {
8953 /* 4-byte counter */
8954 buf
[j
] = (u64
) *(hw_stats
+ bnx2x_stats_arr
[i
].offset
);
8958 /* 8-byte counter */
8959 buf
[j
] = HILO_U64(*(hw_stats
+ bnx2x_stats_arr
[i
].offset
),
8960 *(hw_stats
+ bnx2x_stats_arr
[i
].offset
+ 1));
8965 static int bnx2x_phys_id(struct net_device
*dev
, u32 data
)
8967 struct bnx2x
*bp
= netdev_priv(dev
);
8968 int port
= BP_PORT(bp
);
8971 if (!netif_running(dev
))
8980 for (i
= 0; i
< (data
* 2); i
++) {
8982 bnx2x_set_led(bp
, port
, LED_MODE_OPER
, SPEED_1000
,
8983 bp
->link_params
.hw_led_mode
,
8984 bp
->link_params
.chip_id
);
8986 bnx2x_set_led(bp
, port
, LED_MODE_OFF
, 0,
8987 bp
->link_params
.hw_led_mode
,
8988 bp
->link_params
.chip_id
);
8990 msleep_interruptible(500);
8991 if (signal_pending(current
))
8995 if (bp
->link_vars
.link_up
)
8996 bnx2x_set_led(bp
, port
, LED_MODE_OPER
,
8997 bp
->link_vars
.line_speed
,
8998 bp
->link_params
.hw_led_mode
,
8999 bp
->link_params
.chip_id
);
9004 static struct ethtool_ops bnx2x_ethtool_ops
= {
9005 .get_settings
= bnx2x_get_settings
,
9006 .set_settings
= bnx2x_set_settings
,
9007 .get_drvinfo
= bnx2x_get_drvinfo
,
9008 .get_wol
= bnx2x_get_wol
,
9009 .set_wol
= bnx2x_set_wol
,
9010 .get_msglevel
= bnx2x_get_msglevel
,
9011 .set_msglevel
= bnx2x_set_msglevel
,
9012 .nway_reset
= bnx2x_nway_reset
,
9013 .get_link
= ethtool_op_get_link
,
9014 .get_eeprom_len
= bnx2x_get_eeprom_len
,
9015 .get_eeprom
= bnx2x_get_eeprom
,
9016 .set_eeprom
= bnx2x_set_eeprom
,
9017 .get_coalesce
= bnx2x_get_coalesce
,
9018 .set_coalesce
= bnx2x_set_coalesce
,
9019 .get_ringparam
= bnx2x_get_ringparam
,
9020 .set_ringparam
= bnx2x_set_ringparam
,
9021 .get_pauseparam
= bnx2x_get_pauseparam
,
9022 .set_pauseparam
= bnx2x_set_pauseparam
,
9023 .get_rx_csum
= bnx2x_get_rx_csum
,
9024 .set_rx_csum
= bnx2x_set_rx_csum
,
9025 .get_tx_csum
= ethtool_op_get_tx_csum
,
9026 .set_tx_csum
= ethtool_op_set_tx_hw_csum
,
9027 .set_flags
= bnx2x_set_flags
,
9028 .get_flags
= ethtool_op_get_flags
,
9029 .get_sg
= ethtool_op_get_sg
,
9030 .set_sg
= ethtool_op_set_sg
,
9031 .get_tso
= ethtool_op_get_tso
,
9032 .set_tso
= bnx2x_set_tso
,
9033 .self_test_count
= bnx2x_self_test_count
,
9034 .self_test
= bnx2x_self_test
,
9035 .get_strings
= bnx2x_get_strings
,
9036 .phys_id
= bnx2x_phys_id
,
9037 .get_stats_count
= bnx2x_get_stats_count
,
9038 .get_ethtool_stats
= bnx2x_get_ethtool_stats
,
9041 /* end of ethtool_ops */
9043 /****************************************************************************
9044 * General service functions
9045 ****************************************************************************/
9047 static int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
)
9051 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
9055 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
9056 ((pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
9057 PCI_PM_CTRL_PME_STATUS
));
9059 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
9060 /* delay required during transition out of D3hot */
9065 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
9069 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
9071 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
9074 /* No more memory access after this point until
9075 * device is brought back to D0.
9086 * net_device service functions
9089 static int bnx2x_poll(struct napi_struct
*napi
, int budget
)
9091 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
9093 struct bnx2x
*bp
= fp
->bp
;
9096 #ifdef BNX2X_STOP_ON_ERROR
9097 if (unlikely(bp
->panic
))
9101 prefetch(fp
->tx_buf_ring
[TX_BD(fp
->tx_pkt_cons
)].skb
);
9102 prefetch(fp
->rx_buf_ring
[RX_BD(fp
->rx_bd_cons
)].skb
);
9103 prefetch((char *)(fp
->rx_buf_ring
[RX_BD(fp
->rx_bd_cons
)].skb
) + 256);
9105 bnx2x_update_fpsb_idx(fp
);
9107 if ((fp
->tx_pkt_prod
!= le16_to_cpu(*fp
->tx_cons_sb
)) ||
9108 (fp
->tx_pkt_prod
!= fp
->tx_pkt_cons
))
9109 bnx2x_tx_int(fp
, budget
);
9111 if (le16_to_cpu(*fp
->rx_cons_sb
) != fp
->rx_comp_cons
)
9112 work_done
= bnx2x_rx_int(fp
, budget
);
9114 rmb(); /* bnx2x_has_work() reads the status block */
9116 /* must not complete if we consumed full budget */
9117 if ((work_done
< budget
) && !bnx2x_has_work(fp
)) {
9119 #ifdef BNX2X_STOP_ON_ERROR
9122 netif_rx_complete(bp
->dev
, napi
);
9124 bnx2x_ack_sb(bp
, FP_SB_ID(fp
), USTORM_ID
,
9125 le16_to_cpu(fp
->fp_u_idx
), IGU_INT_NOP
, 1);
9126 bnx2x_ack_sb(bp
, FP_SB_ID(fp
), CSTORM_ID
,
9127 le16_to_cpu(fp
->fp_c_idx
), IGU_INT_ENABLE
, 1);
9133 /* we split the first BD into headers and data BDs
9134 * to ease the pain of our fellow micocode engineers
9135 * we use one mapping for both BDs
9136 * So far this has only been observed to happen
9137 * in Other Operating Systems(TM)
9139 static noinline u16
bnx2x_tx_split(struct bnx2x
*bp
,
9140 struct bnx2x_fastpath
*fp
,
9141 struct eth_tx_bd
**tx_bd
, u16 hlen
,
9142 u16 bd_prod
, int nbd
)
9144 struct eth_tx_bd
*h_tx_bd
= *tx_bd
;
9145 struct eth_tx_bd
*d_tx_bd
;
9147 int old_len
= le16_to_cpu(h_tx_bd
->nbytes
);
9149 /* first fix first BD */
9150 h_tx_bd
->nbd
= cpu_to_le16(nbd
);
9151 h_tx_bd
->nbytes
= cpu_to_le16(hlen
);
9153 DP(NETIF_MSG_TX_QUEUED
, "TSO split header size is %d "
9154 "(%x:%x) nbd %d\n", h_tx_bd
->nbytes
, h_tx_bd
->addr_hi
,
9155 h_tx_bd
->addr_lo
, h_tx_bd
->nbd
);
9157 /* now get a new data BD
9158 * (after the pbd) and fill it */
9159 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
9160 d_tx_bd
= &fp
->tx_desc_ring
[bd_prod
];
9162 mapping
= HILO_U64(le32_to_cpu(h_tx_bd
->addr_hi
),
9163 le32_to_cpu(h_tx_bd
->addr_lo
)) + hlen
;
9165 d_tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
9166 d_tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
9167 d_tx_bd
->nbytes
= cpu_to_le16(old_len
- hlen
);
9169 /* this marks the BD as one that has no individual mapping
9170 * the FW ignores this flag in a BD not marked start
9172 d_tx_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_SW_LSO
;
9173 DP(NETIF_MSG_TX_QUEUED
,
9174 "TSO split data size is %d (%x:%x)\n",
9175 d_tx_bd
->nbytes
, d_tx_bd
->addr_hi
, d_tx_bd
->addr_lo
);
9177 /* update tx_bd for marking the last BD flag */
9183 static inline u16
bnx2x_csum_fix(unsigned char *t_header
, u16 csum
, s8 fix
)
9186 csum
= (u16
) ~csum_fold(csum_sub(csum
,
9187 csum_partial(t_header
- fix
, fix
, 0)));
9190 csum
= (u16
) ~csum_fold(csum_add(csum
,
9191 csum_partial(t_header
, -fix
, 0)));
9193 return swab16(csum
);
9196 static inline u32
bnx2x_xmit_type(struct bnx2x
*bp
, struct sk_buff
*skb
)
9200 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
9204 if (skb
->protocol
== ntohs(ETH_P_IPV6
)) {
9206 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
9207 rc
|= XMIT_CSUM_TCP
;
9211 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
9212 rc
|= XMIT_CSUM_TCP
;
9216 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
)
9219 else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
9225 /* check if packet requires linearization (packet is too fragmented) */
9226 static int bnx2x_pkt_req_lin(struct bnx2x
*bp
, struct sk_buff
*skb
,
9231 int first_bd_sz
= 0;
9233 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9234 if (skb_shinfo(skb
)->nr_frags
>= (MAX_FETCH_BD
- 3)) {
9236 if (xmit_type
& XMIT_GSO
) {
9237 unsigned short lso_mss
= skb_shinfo(skb
)->gso_size
;
9238 /* Check if LSO packet needs to be copied:
9239 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9240 int wnd_size
= MAX_FETCH_BD
- 3;
9241 /* Number of widnows to check */
9242 int num_wnds
= skb_shinfo(skb
)->nr_frags
- wnd_size
;
9247 /* Headers length */
9248 hlen
= (int)(skb_transport_header(skb
) - skb
->data
) +
9251 /* Amount of data (w/o headers) on linear part of SKB*/
9252 first_bd_sz
= skb_headlen(skb
) - hlen
;
9254 wnd_sum
= first_bd_sz
;
9256 /* Calculate the first sum - it's special */
9257 for (frag_idx
= 0; frag_idx
< wnd_size
- 1; frag_idx
++)
9259 skb_shinfo(skb
)->frags
[frag_idx
].size
;
9261 /* If there was data on linear skb data - check it */
9262 if (first_bd_sz
> 0) {
9263 if (unlikely(wnd_sum
< lso_mss
)) {
9268 wnd_sum
-= first_bd_sz
;
9271 /* Others are easier: run through the frag list and
9272 check all windows */
9273 for (wnd_idx
= 0; wnd_idx
<= num_wnds
; wnd_idx
++) {
9275 skb_shinfo(skb
)->frags
[wnd_idx
+ wnd_size
- 1].size
;
9277 if (unlikely(wnd_sum
< lso_mss
)) {
9282 skb_shinfo(skb
)->frags
[wnd_idx
].size
;
9286 /* in non-LSO too fragmented packet should always
9293 if (unlikely(to_copy
))
9294 DP(NETIF_MSG_TX_QUEUED
,
9295 "Linearization IS REQUIRED for %s packet. "
9296 "num_frags %d hlen %d first_bd_sz %d\n",
9297 (xmit_type
& XMIT_GSO
) ? "LSO" : "non-LSO",
9298 skb_shinfo(skb
)->nr_frags
, hlen
, first_bd_sz
);
9303 /* called with netif_tx_lock
9304 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9305 * netif_wake_queue()
9307 static int bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
9309 struct bnx2x
*bp
= netdev_priv(dev
);
9310 struct bnx2x_fastpath
*fp
;
9311 struct sw_tx_bd
*tx_buf
;
9312 struct eth_tx_bd
*tx_bd
;
9313 struct eth_tx_parse_bd
*pbd
= NULL
;
9314 u16 pkt_prod
, bd_prod
;
9317 u32 xmit_type
= bnx2x_xmit_type(bp
, skb
);
9318 int vlan_off
= (bp
->e1hov
? 4 : 0);
9322 #ifdef BNX2X_STOP_ON_ERROR
9323 if (unlikely(bp
->panic
))
9324 return NETDEV_TX_BUSY
;
9327 fp_index
= (smp_processor_id() % bp
->num_queues
);
9328 fp
= &bp
->fp
[fp_index
];
9330 if (unlikely(bnx2x_tx_avail(bp
->fp
) <
9331 (skb_shinfo(skb
)->nr_frags
+ 3))) {
9332 bp
->eth_stats
.driver_xoff
++,
9333 netif_stop_queue(dev
);
9334 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9335 return NETDEV_TX_BUSY
;
9338 DP(NETIF_MSG_TX_QUEUED
, "SKB: summed %x protocol %x protocol(%x,%x)"
9339 " gso type %x xmit_type %x\n",
9340 skb
->ip_summed
, skb
->protocol
, ipv6_hdr(skb
)->nexthdr
,
9341 ip_hdr(skb
)->protocol
, skb_shinfo(skb
)->gso_type
, xmit_type
);
9343 /* First, check if we need to linearaize the skb
9344 (due to FW restrictions) */
9345 if (bnx2x_pkt_req_lin(bp
, skb
, xmit_type
)) {
9346 /* Statistics of linearization */
9348 if (skb_linearize(skb
) != 0) {
9349 DP(NETIF_MSG_TX_QUEUED
, "SKB linearization failed - "
9350 "silently dropping this SKB\n");
9351 dev_kfree_skb_any(skb
);
9357 Please read carefully. First we use one BD which we mark as start,
9358 then for TSO or xsum we have a parsing info BD,
9359 and only then we have the rest of the TSO BDs.
9360 (don't forget to mark the last one as last,
9361 and to unmap only AFTER you write to the BD ...)
9362 And above all, all pdb sizes are in words - NOT DWORDS!
9365 pkt_prod
= fp
->tx_pkt_prod
++;
9366 bd_prod
= TX_BD(fp
->tx_bd_prod
);
9368 /* get a tx_buf and first BD */
9369 tx_buf
= &fp
->tx_buf_ring
[TX_BD(pkt_prod
)];
9370 tx_bd
= &fp
->tx_desc_ring
[bd_prod
];
9372 tx_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
9373 tx_bd
->general_data
= (UNICAST_ADDRESS
<<
9374 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT
);
9375 tx_bd
->general_data
|= 1; /* header nbd */
9377 /* remember the first BD of the packet */
9378 tx_buf
->first_bd
= fp
->tx_bd_prod
;
9381 DP(NETIF_MSG_TX_QUEUED
,
9382 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9383 pkt_prod
, tx_buf
, fp
->tx_pkt_prod
, bd_prod
, tx_bd
);
9385 if ((bp
->vlgrp
!= NULL
) && vlan_tx_tag_present(skb
)) {
9386 tx_bd
->vlan
= cpu_to_le16(vlan_tx_tag_get(skb
));
9387 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_VLAN_TAG
;
9390 tx_bd
->vlan
= cpu_to_le16(pkt_prod
);
9394 /* turn on parsing and get a BD */
9395 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
9396 pbd
= (void *)&fp
->tx_desc_ring
[bd_prod
];
9398 memset(pbd
, 0, sizeof(struct eth_tx_parse_bd
));
9401 if (xmit_type
& XMIT_CSUM
) {
9402 hlen
= (skb_network_header(skb
) - skb
->data
+ vlan_off
) / 2;
9404 /* for now NS flag is not used in Linux */
9405 pbd
->global_data
= (hlen
|
9406 ((skb
->protocol
== ntohs(ETH_P_8021Q
)) <<
9407 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT
));
9409 pbd
->ip_hlen
= (skb_transport_header(skb
) -
9410 skb_network_header(skb
)) / 2;
9412 hlen
+= pbd
->ip_hlen
+ tcp_hdrlen(skb
) / 2;
9414 pbd
->total_hlen
= cpu_to_le16(hlen
);
9415 hlen
= hlen
*2 - vlan_off
;
9417 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_TCP_CSUM
;
9419 if (xmit_type
& XMIT_CSUM_V4
)
9420 tx_bd
->bd_flags
.as_bitfield
|=
9421 ETH_TX_BD_FLAGS_IP_CSUM
;
9423 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_IPV6
;
9425 if (xmit_type
& XMIT_CSUM_TCP
) {
9426 pbd
->tcp_pseudo_csum
= swab16(tcp_hdr(skb
)->check
);
9429 s8 fix
= SKB_CS_OFF(skb
); /* signed! */
9431 pbd
->global_data
|= ETH_TX_PARSE_BD_CS_ANY_FLG
;
9432 pbd
->cs_offset
= fix
/ 2;
9434 DP(NETIF_MSG_TX_QUEUED
,
9435 "hlen %d offset %d fix %d csum before fix %x\n",
9436 le16_to_cpu(pbd
->total_hlen
), pbd
->cs_offset
, fix
,
9439 /* HW bug: fixup the CSUM */
9440 pbd
->tcp_pseudo_csum
=
9441 bnx2x_csum_fix(skb_transport_header(skb
),
9444 DP(NETIF_MSG_TX_QUEUED
, "csum after fix %x\n",
9445 pbd
->tcp_pseudo_csum
);
9449 mapping
= pci_map_single(bp
->pdev
, skb
->data
,
9450 skb_headlen(skb
), PCI_DMA_TODEVICE
);
9452 tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
9453 tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
9454 nbd
= skb_shinfo(skb
)->nr_frags
+ ((pbd
== NULL
)? 1 : 2);
9455 tx_bd
->nbd
= cpu_to_le16(nbd
);
9456 tx_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
9458 DP(NETIF_MSG_TX_QUEUED
, "first bd @%p addr (%x:%x) nbd %d"
9459 " nbytes %d flags %x vlan %x\n",
9460 tx_bd
, tx_bd
->addr_hi
, tx_bd
->addr_lo
, le16_to_cpu(tx_bd
->nbd
),
9461 le16_to_cpu(tx_bd
->nbytes
), tx_bd
->bd_flags
.as_bitfield
,
9462 le16_to_cpu(tx_bd
->vlan
));
9464 if (xmit_type
& XMIT_GSO
) {
9466 DP(NETIF_MSG_TX_QUEUED
,
9467 "TSO packet len %d hlen %d total len %d tso size %d\n",
9468 skb
->len
, hlen
, skb_headlen(skb
),
9469 skb_shinfo(skb
)->gso_size
);
9471 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_SW_LSO
;
9473 if (unlikely(skb_headlen(skb
) > hlen
))
9474 bd_prod
= bnx2x_tx_split(bp
, fp
, &tx_bd
, hlen
,
9477 pbd
->lso_mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
9478 pbd
->tcp_send_seq
= swab32(tcp_hdr(skb
)->seq
);
9479 pbd
->tcp_flags
= pbd_tcp_flags(skb
);
9481 if (xmit_type
& XMIT_GSO_V4
) {
9482 pbd
->ip_id
= swab16(ip_hdr(skb
)->id
);
9483 pbd
->tcp_pseudo_csum
=
9484 swab16(~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
9486 0, IPPROTO_TCP
, 0));
9489 pbd
->tcp_pseudo_csum
=
9490 swab16(~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
9491 &ipv6_hdr(skb
)->daddr
,
9492 0, IPPROTO_TCP
, 0));
9494 pbd
->global_data
|= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN
;
9497 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
9498 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
9500 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
9501 tx_bd
= &fp
->tx_desc_ring
[bd_prod
];
9503 mapping
= pci_map_page(bp
->pdev
, frag
->page
, frag
->page_offset
,
9504 frag
->size
, PCI_DMA_TODEVICE
);
9506 tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
9507 tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
9508 tx_bd
->nbytes
= cpu_to_le16(frag
->size
);
9509 tx_bd
->vlan
= cpu_to_le16(pkt_prod
);
9510 tx_bd
->bd_flags
.as_bitfield
= 0;
9512 DP(NETIF_MSG_TX_QUEUED
,
9513 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9514 i
, tx_bd
, tx_bd
->addr_hi
, tx_bd
->addr_lo
,
9515 le16_to_cpu(tx_bd
->nbytes
), tx_bd
->bd_flags
.as_bitfield
);
9518 /* now at last mark the BD as the last BD */
9519 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_END_BD
;
9521 DP(NETIF_MSG_TX_QUEUED
, "last bd @%p flags %x\n",
9522 tx_bd
, tx_bd
->bd_flags
.as_bitfield
);
9524 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
9526 /* now send a tx doorbell, counting the next BD
9527 * if the packet contains or ends with it
9529 if (TX_BD_POFF(bd_prod
) < nbd
)
9533 DP(NETIF_MSG_TX_QUEUED
,
9534 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9535 " tcp_flags %x xsum %x seq %u hlen %u\n",
9536 pbd
, pbd
->global_data
, pbd
->ip_hlen
, pbd
->ip_id
,
9537 pbd
->lso_mss
, pbd
->tcp_flags
, pbd
->tcp_pseudo_csum
,
9538 pbd
->tcp_send_seq
, le16_to_cpu(pbd
->total_hlen
));
9540 DP(NETIF_MSG_TX_QUEUED
, "doorbell: nbd %d bd %u\n", nbd
, bd_prod
);
9542 fp
->hw_tx_prods
->bds_prod
=
9543 cpu_to_le16(le16_to_cpu(fp
->hw_tx_prods
->bds_prod
) + nbd
);
9544 mb(); /* FW restriction: must not reorder writing nbd and packets */
9545 fp
->hw_tx_prods
->packets_prod
=
9546 cpu_to_le32(le32_to_cpu(fp
->hw_tx_prods
->packets_prod
) + 1);
9547 DOORBELL(bp
, FP_IDX(fp
), 0);
9551 fp
->tx_bd_prod
+= nbd
;
9552 dev
->trans_start
= jiffies
;
9554 if (unlikely(bnx2x_tx_avail(fp
) < MAX_SKB_FRAGS
+ 3)) {
9555 netif_stop_queue(dev
);
9556 bp
->eth_stats
.driver_xoff
++;
9557 if (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3)
9558 netif_wake_queue(dev
);
9562 return NETDEV_TX_OK
;
9565 /* called with rtnl_lock */
9566 static int bnx2x_open(struct net_device
*dev
)
9568 struct bnx2x
*bp
= netdev_priv(dev
);
9570 bnx2x_set_power_state(bp
, PCI_D0
);
9572 return bnx2x_nic_load(bp
, LOAD_OPEN
);
9575 /* called with rtnl_lock */
9576 static int bnx2x_close(struct net_device
*dev
)
9578 struct bnx2x
*bp
= netdev_priv(dev
);
9580 /* Unload the driver, release IRQs */
9581 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
9582 if (atomic_read(&bp
->pdev
->enable_cnt
) == 1)
9583 if (!CHIP_REV_IS_SLOW(bp
))
9584 bnx2x_set_power_state(bp
, PCI_D3hot
);
9589 /* called with netif_tx_lock from set_multicast */
9590 static void bnx2x_set_rx_mode(struct net_device
*dev
)
9592 struct bnx2x
*bp
= netdev_priv(dev
);
9593 u32 rx_mode
= BNX2X_RX_MODE_NORMAL
;
9594 int port
= BP_PORT(bp
);
9596 if (bp
->state
!= BNX2X_STATE_OPEN
) {
9597 DP(NETIF_MSG_IFUP
, "state is %x, returning\n", bp
->state
);
9601 DP(NETIF_MSG_IFUP
, "dev->flags = %x\n", dev
->flags
);
9603 if (dev
->flags
& IFF_PROMISC
)
9604 rx_mode
= BNX2X_RX_MODE_PROMISC
;
9606 else if ((dev
->flags
& IFF_ALLMULTI
) ||
9607 ((dev
->mc_count
> BNX2X_MAX_MULTICAST
) && CHIP_IS_E1(bp
)))
9608 rx_mode
= BNX2X_RX_MODE_ALLMULTI
;
9610 else { /* some multicasts */
9611 if (CHIP_IS_E1(bp
)) {
9613 struct dev_mc_list
*mclist
;
9614 struct mac_configuration_cmd
*config
=
9615 bnx2x_sp(bp
, mcast_config
);
9617 for (i
= 0, mclist
= dev
->mc_list
;
9618 mclist
&& (i
< dev
->mc_count
);
9619 i
++, mclist
= mclist
->next
) {
9621 config
->config_table
[i
].
9622 cam_entry
.msb_mac_addr
=
9623 swab16(*(u16
*)&mclist
->dmi_addr
[0]);
9624 config
->config_table
[i
].
9625 cam_entry
.middle_mac_addr
=
9626 swab16(*(u16
*)&mclist
->dmi_addr
[2]);
9627 config
->config_table
[i
].
9628 cam_entry
.lsb_mac_addr
=
9629 swab16(*(u16
*)&mclist
->dmi_addr
[4]);
9630 config
->config_table
[i
].cam_entry
.flags
=
9632 config
->config_table
[i
].
9633 target_table_entry
.flags
= 0;
9634 config
->config_table
[i
].
9635 target_table_entry
.client_id
= 0;
9636 config
->config_table
[i
].
9637 target_table_entry
.vlan_id
= 0;
9640 "setting MCAST[%d] (%04x:%04x:%04x)\n", i
,
9641 config
->config_table
[i
].
9642 cam_entry
.msb_mac_addr
,
9643 config
->config_table
[i
].
9644 cam_entry
.middle_mac_addr
,
9645 config
->config_table
[i
].
9646 cam_entry
.lsb_mac_addr
);
9648 old
= config
->hdr
.length_6b
;
9650 for (; i
< old
; i
++) {
9651 if (CAM_IS_INVALID(config
->
9653 i
--; /* already invalidated */
9657 CAM_INVALIDATE(config
->
9662 if (CHIP_REV_IS_SLOW(bp
))
9663 offset
= BNX2X_MAX_EMUL_MULTI
*(1 + port
);
9665 offset
= BNX2X_MAX_MULTICAST
*(1 + port
);
9667 config
->hdr
.length_6b
= i
;
9668 config
->hdr
.offset
= offset
;
9669 config
->hdr
.client_id
= BP_CL_ID(bp
);
9670 config
->hdr
.reserved1
= 0;
9672 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
9673 U64_HI(bnx2x_sp_mapping(bp
, mcast_config
)),
9674 U64_LO(bnx2x_sp_mapping(bp
, mcast_config
)),
9677 /* Accept one or more multicasts */
9678 struct dev_mc_list
*mclist
;
9679 u32 mc_filter
[MC_HASH_SIZE
];
9680 u32 crc
, bit
, regidx
;
9683 memset(mc_filter
, 0, 4 * MC_HASH_SIZE
);
9685 for (i
= 0, mclist
= dev
->mc_list
;
9686 mclist
&& (i
< dev
->mc_count
);
9687 i
++, mclist
= mclist
->next
) {
9689 DP(NETIF_MSG_IFUP
, "Adding mcast MAC: "
9690 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9691 mclist
->dmi_addr
[0], mclist
->dmi_addr
[1],
9692 mclist
->dmi_addr
[2], mclist
->dmi_addr
[3],
9693 mclist
->dmi_addr
[4], mclist
->dmi_addr
[5]);
9695 crc
= crc32c_le(0, mclist
->dmi_addr
, ETH_ALEN
);
9696 bit
= (crc
>> 24) & 0xff;
9699 mc_filter
[regidx
] |= (1 << bit
);
9702 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
9703 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
),
9708 bp
->rx_mode
= rx_mode
;
9709 bnx2x_set_storm_rx_mode(bp
);
9712 /* called with rtnl_lock */
9713 static int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
)
9715 struct sockaddr
*addr
= p
;
9716 struct bnx2x
*bp
= netdev_priv(dev
);
9718 if (!is_valid_ether_addr((u8
*)(addr
->sa_data
)))
9721 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
9722 if (netif_running(dev
)) {
9724 bnx2x_set_mac_addr_e1(bp
);
9726 bnx2x_set_mac_addr_e1h(bp
);
9732 /* called with rtnl_lock */
9733 static int bnx2x_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
9735 struct mii_ioctl_data
*data
= if_mii(ifr
);
9736 struct bnx2x
*bp
= netdev_priv(dev
);
9741 data
->phy_id
= bp
->port
.phy_addr
;
9748 if (!netif_running(dev
))
9751 mutex_lock(&bp
->port
.phy_mutex
);
9752 err
= bnx2x_cl45_read(bp
, BP_PORT(bp
), 0, bp
->port
.phy_addr
,
9753 DEFAULT_PHY_DEV_ADDR
,
9754 (data
->reg_num
& 0x1f), &mii_regval
);
9755 data
->val_out
= mii_regval
;
9756 mutex_unlock(&bp
->port
.phy_mutex
);
9761 if (!capable(CAP_NET_ADMIN
))
9764 if (!netif_running(dev
))
9767 mutex_lock(&bp
->port
.phy_mutex
);
9768 err
= bnx2x_cl45_write(bp
, BP_PORT(bp
), 0, bp
->port
.phy_addr
,
9769 DEFAULT_PHY_DEV_ADDR
,
9770 (data
->reg_num
& 0x1f), data
->val_in
);
9771 mutex_unlock(&bp
->port
.phy_mutex
);
9782 /* called with rtnl_lock */
9783 static int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
)
9785 struct bnx2x
*bp
= netdev_priv(dev
);
9788 if ((new_mtu
> ETH_MAX_JUMBO_PACKET_SIZE
) ||
9789 ((new_mtu
+ ETH_HLEN
) < ETH_MIN_PACKET_SIZE
))
9792 /* This does not race with packet allocation
9793 * because the actual alloc size is
9794 * only updated as part of load
9798 if (netif_running(dev
)) {
9799 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
9800 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
9806 static void bnx2x_tx_timeout(struct net_device
*dev
)
9808 struct bnx2x
*bp
= netdev_priv(dev
);
9810 #ifdef BNX2X_STOP_ON_ERROR
9814 /* This allows the netif to be shutdown gracefully before resetting */
9815 schedule_work(&bp
->reset_task
);
9819 /* called with rtnl_lock */
9820 static void bnx2x_vlan_rx_register(struct net_device
*dev
,
9821 struct vlan_group
*vlgrp
)
9823 struct bnx2x
*bp
= netdev_priv(dev
);
9826 if (netif_running(dev
))
9827 bnx2x_set_client_config(bp
);
9832 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9833 static void poll_bnx2x(struct net_device
*dev
)
9835 struct bnx2x
*bp
= netdev_priv(dev
);
9837 disable_irq(bp
->pdev
->irq
);
9838 bnx2x_interrupt(bp
->pdev
->irq
, dev
);
9839 enable_irq(bp
->pdev
->irq
);
9843 static int __devinit
bnx2x_init_dev(struct pci_dev
*pdev
,
9844 struct net_device
*dev
)
9849 SET_NETDEV_DEV(dev
, &pdev
->dev
);
9850 bp
= netdev_priv(dev
);
9855 bp
->func
= PCI_FUNC(pdev
->devfn
);
9857 rc
= pci_enable_device(pdev
);
9859 printk(KERN_ERR PFX
"Cannot enable PCI device, aborting\n");
9863 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
9864 printk(KERN_ERR PFX
"Cannot find PCI device base address,"
9867 goto err_out_disable
;
9870 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
9871 printk(KERN_ERR PFX
"Cannot find second PCI device"
9872 " base address, aborting\n");
9874 goto err_out_disable
;
9877 if (atomic_read(&pdev
->enable_cnt
) == 1) {
9878 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
9880 printk(KERN_ERR PFX
"Cannot obtain PCI resources,"
9882 goto err_out_disable
;
9885 pci_set_master(pdev
);
9886 pci_save_state(pdev
);
9889 bp
->pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
9890 if (bp
->pm_cap
== 0) {
9891 printk(KERN_ERR PFX
"Cannot find power management"
9892 " capability, aborting\n");
9894 goto err_out_release
;
9897 bp
->pcie_cap
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
9898 if (bp
->pcie_cap
== 0) {
9899 printk(KERN_ERR PFX
"Cannot find PCI Express capability,"
9902 goto err_out_release
;
9905 if (pci_set_dma_mask(pdev
, DMA_64BIT_MASK
) == 0) {
9906 bp
->flags
|= USING_DAC_FLAG
;
9907 if (pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
) != 0) {
9908 printk(KERN_ERR PFX
"pci_set_consistent_dma_mask"
9909 " failed, aborting\n");
9911 goto err_out_release
;
9914 } else if (pci_set_dma_mask(pdev
, DMA_32BIT_MASK
) != 0) {
9915 printk(KERN_ERR PFX
"System does not support DMA,"
9918 goto err_out_release
;
9921 dev
->mem_start
= pci_resource_start(pdev
, 0);
9922 dev
->base_addr
= dev
->mem_start
;
9923 dev
->mem_end
= pci_resource_end(pdev
, 0);
9925 dev
->irq
= pdev
->irq
;
9927 bp
->regview
= ioremap_nocache(dev
->base_addr
,
9928 pci_resource_len(pdev
, 0));
9930 printk(KERN_ERR PFX
"Cannot map register space, aborting\n");
9932 goto err_out_release
;
9935 bp
->doorbells
= ioremap_nocache(pci_resource_start(pdev
, 2),
9936 min_t(u64
, BNX2X_DB_SIZE
,
9937 pci_resource_len(pdev
, 2)));
9938 if (!bp
->doorbells
) {
9939 printk(KERN_ERR PFX
"Cannot map doorbell space, aborting\n");
9944 bnx2x_set_power_state(bp
, PCI_D0
);
9946 /* clean indirect addresses */
9947 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
9948 PCICFG_VENDOR_ID_OFFSET
);
9949 REG_WR(bp
, PXP2_REG_PGL_ADDR_88_F0
+ BP_PORT(bp
)*16, 0);
9950 REG_WR(bp
, PXP2_REG_PGL_ADDR_8C_F0
+ BP_PORT(bp
)*16, 0);
9951 REG_WR(bp
, PXP2_REG_PGL_ADDR_90_F0
+ BP_PORT(bp
)*16, 0);
9952 REG_WR(bp
, PXP2_REG_PGL_ADDR_94_F0
+ BP_PORT(bp
)*16, 0);
9954 dev
->hard_start_xmit
= bnx2x_start_xmit
;
9955 dev
->watchdog_timeo
= TX_TIMEOUT
;
9957 dev
->ethtool_ops
= &bnx2x_ethtool_ops
;
9958 dev
->open
= bnx2x_open
;
9959 dev
->stop
= bnx2x_close
;
9960 dev
->set_multicast_list
= bnx2x_set_rx_mode
;
9961 dev
->set_mac_address
= bnx2x_change_mac_addr
;
9962 dev
->do_ioctl
= bnx2x_ioctl
;
9963 dev
->change_mtu
= bnx2x_change_mtu
;
9964 dev
->tx_timeout
= bnx2x_tx_timeout
;
9966 dev
->vlan_rx_register
= bnx2x_vlan_rx_register
;
9968 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9969 dev
->poll_controller
= poll_bnx2x
;
9971 dev
->features
|= NETIF_F_SG
;
9972 dev
->features
|= NETIF_F_HW_CSUM
;
9973 if (bp
->flags
& USING_DAC_FLAG
)
9974 dev
->features
|= NETIF_F_HIGHDMA
;
9976 dev
->features
|= (NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
);
9978 dev
->features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
9979 dev
->features
|= NETIF_F_TSO6
;
9985 iounmap(bp
->regview
);
9988 if (bp
->doorbells
) {
9989 iounmap(bp
->doorbells
);
9990 bp
->doorbells
= NULL
;
9994 if (atomic_read(&pdev
->enable_cnt
) == 1)
9995 pci_release_regions(pdev
);
9998 pci_disable_device(pdev
);
9999 pci_set_drvdata(pdev
, NULL
);
10005 static int __devinit
bnx2x_get_pcie_width(struct bnx2x
*bp
)
10007 u32 val
= REG_RD(bp
, PCICFG_OFFSET
+ PCICFG_LINK_CONTROL
);
10009 val
= (val
& PCICFG_LINK_WIDTH
) >> PCICFG_LINK_WIDTH_SHIFT
;
10013 /* return value of 1=2.5GHz 2=5GHz */
10014 static int __devinit
bnx2x_get_pcie_speed(struct bnx2x
*bp
)
10016 u32 val
= REG_RD(bp
, PCICFG_OFFSET
+ PCICFG_LINK_CONTROL
);
10018 val
= (val
& PCICFG_LINK_SPEED
) >> PCICFG_LINK_SPEED_SHIFT
;
10022 static int __devinit
bnx2x_init_one(struct pci_dev
*pdev
,
10023 const struct pci_device_id
*ent
)
10025 static int version_printed
;
10026 struct net_device
*dev
= NULL
;
10029 DECLARE_MAC_BUF(mac
);
10031 if (version_printed
++ == 0)
10032 printk(KERN_INFO
"%s", version
);
10034 /* dev zeroed in init_etherdev */
10035 dev
= alloc_etherdev(sizeof(*bp
));
10037 printk(KERN_ERR PFX
"Cannot allocate net device\n");
10041 netif_carrier_off(dev
);
10043 bp
= netdev_priv(dev
);
10044 bp
->msglevel
= debug
;
10046 rc
= bnx2x_init_dev(pdev
, dev
);
10052 rc
= register_netdev(dev
);
10054 dev_err(&pdev
->dev
, "Cannot register net device\n");
10055 goto init_one_exit
;
10058 pci_set_drvdata(pdev
, dev
);
10060 rc
= bnx2x_init_bp(bp
);
10062 unregister_netdev(dev
);
10063 goto init_one_exit
;
10066 bp
->common
.name
= board_info
[ent
->driver_data
].name
;
10067 printk(KERN_INFO
"%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10068 " IRQ %d, ", dev
->name
, bp
->common
.name
,
10069 (CHIP_REV(bp
) >> 12) + 'A', (CHIP_METAL(bp
) >> 4),
10070 bnx2x_get_pcie_width(bp
),
10071 (bnx2x_get_pcie_speed(bp
) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10072 dev
->base_addr
, bp
->pdev
->irq
);
10073 printk(KERN_CONT
"node addr %s\n", print_mac(mac
, dev
->dev_addr
));
10078 iounmap(bp
->regview
);
10081 iounmap(bp
->doorbells
);
10085 if (atomic_read(&pdev
->enable_cnt
) == 1)
10086 pci_release_regions(pdev
);
10088 pci_disable_device(pdev
);
10089 pci_set_drvdata(pdev
, NULL
);
10094 static void __devexit
bnx2x_remove_one(struct pci_dev
*pdev
)
10096 struct net_device
*dev
= pci_get_drvdata(pdev
);
10100 printk(KERN_ERR PFX
"BAD net device from bnx2x_init_one\n");
10103 bp
= netdev_priv(dev
);
10105 unregister_netdev(dev
);
10108 iounmap(bp
->regview
);
10111 iounmap(bp
->doorbells
);
10115 if (atomic_read(&pdev
->enable_cnt
) == 1)
10116 pci_release_regions(pdev
);
10118 pci_disable_device(pdev
);
10119 pci_set_drvdata(pdev
, NULL
);
10122 static int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
)
10124 struct net_device
*dev
= pci_get_drvdata(pdev
);
10128 printk(KERN_ERR PFX
"BAD net device from bnx2x_init_one\n");
10131 bp
= netdev_priv(dev
);
10135 pci_save_state(pdev
);
10137 if (!netif_running(dev
)) {
10142 netif_device_detach(dev
);
10144 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
10146 bnx2x_set_power_state(bp
, pci_choose_state(pdev
, state
));
10153 static int bnx2x_resume(struct pci_dev
*pdev
)
10155 struct net_device
*dev
= pci_get_drvdata(pdev
);
10160 printk(KERN_ERR PFX
"BAD net device from bnx2x_init_one\n");
10163 bp
= netdev_priv(dev
);
10167 pci_restore_state(pdev
);
10169 if (!netif_running(dev
)) {
10174 bnx2x_set_power_state(bp
, PCI_D0
);
10175 netif_device_attach(dev
);
10177 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
10185 * bnx2x_io_error_detected - called when PCI error is detected
10186 * @pdev: Pointer to PCI device
10187 * @state: The current pci connection state
10189 * This function is called after a PCI bus error affecting
10190 * this device has been detected.
10192 static pci_ers_result_t
bnx2x_io_error_detected(struct pci_dev
*pdev
,
10193 pci_channel_state_t state
)
10195 struct net_device
*dev
= pci_get_drvdata(pdev
);
10196 struct bnx2x
*bp
= netdev_priv(dev
);
10200 netif_device_detach(dev
);
10202 if (netif_running(dev
))
10203 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
10205 pci_disable_device(pdev
);
10209 /* Request a slot reset */
10210 return PCI_ERS_RESULT_NEED_RESET
;
10214 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10215 * @pdev: Pointer to PCI device
10217 * Restart the card from scratch, as if from a cold-boot.
10219 static pci_ers_result_t
bnx2x_io_slot_reset(struct pci_dev
*pdev
)
10221 struct net_device
*dev
= pci_get_drvdata(pdev
);
10222 struct bnx2x
*bp
= netdev_priv(dev
);
10226 if (pci_enable_device(pdev
)) {
10227 dev_err(&pdev
->dev
,
10228 "Cannot re-enable PCI device after reset\n");
10230 return PCI_ERS_RESULT_DISCONNECT
;
10233 pci_set_master(pdev
);
10234 pci_restore_state(pdev
);
10236 if (netif_running(dev
))
10237 bnx2x_set_power_state(bp
, PCI_D0
);
10241 return PCI_ERS_RESULT_RECOVERED
;
10245 * bnx2x_io_resume - called when traffic can start flowing again
10246 * @pdev: Pointer to PCI device
10248 * This callback is called when the error recovery driver tells us that
10249 * its OK to resume normal operation.
10251 static void bnx2x_io_resume(struct pci_dev
*pdev
)
10253 struct net_device
*dev
= pci_get_drvdata(pdev
);
10254 struct bnx2x
*bp
= netdev_priv(dev
);
10258 if (netif_running(dev
))
10259 bnx2x_nic_load(bp
, LOAD_OPEN
);
10261 netif_device_attach(dev
);
10266 static struct pci_error_handlers bnx2x_err_handler
= {
10267 .error_detected
= bnx2x_io_error_detected
,
10268 .slot_reset
= bnx2x_io_slot_reset
,
10269 .resume
= bnx2x_io_resume
,
10272 static struct pci_driver bnx2x_pci_driver
= {
10273 .name
= DRV_MODULE_NAME
,
10274 .id_table
= bnx2x_pci_tbl
,
10275 .probe
= bnx2x_init_one
,
10276 .remove
= __devexit_p(bnx2x_remove_one
),
10277 .suspend
= bnx2x_suspend
,
10278 .resume
= bnx2x_resume
,
10279 .err_handler
= &bnx2x_err_handler
,
10282 static int __init
bnx2x_init(void)
10284 return pci_register_driver(&bnx2x_pci_driver
);
10287 static void __exit
bnx2x_cleanup(void)
10289 pci_unregister_driver(&bnx2x_pci_driver
);
10292 module_init(bnx2x_init
);
10293 module_exit(bnx2x_cleanup
);