1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
46 #include <net/checksum.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/crc32c.h>
51 #include <linux/prefetch.h>
52 #include <linux/zlib.h>
55 #include "bnx2x_reg.h"
56 #include "bnx2x_fw_defs.h"
57 #include "bnx2x_hsi.h"
58 #include "bnx2x_link.h"
60 #include "bnx2x_init.h"
62 #define DRV_MODULE_VERSION "1.45.23"
63 #define DRV_MODULE_RELDATE "2008/11/03"
64 #define BNX2X_BC_VER 0x040200
66 /* Time in jiffies before concluding the transmitter is hung */
67 #define TX_TIMEOUT (5*HZ)
69 static char version
[] __devinitdata
=
70 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
71 DRV_MODULE_NAME
" " DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
73 MODULE_AUTHOR("Eliezer Tamir");
74 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
75 MODULE_LICENSE("GPL");
76 MODULE_VERSION(DRV_MODULE_VERSION
);
78 static int disable_tpa
;
82 static int load_count
[3]; /* 0-common, 1-port0, 2-port1 */
85 module_param(disable_tpa
, int, 0);
86 module_param(use_inta
, int, 0);
87 module_param(poll
, int, 0);
88 module_param(debug
, int, 0);
89 MODULE_PARM_DESC(disable_tpa
, "disable the TPA (LRO) feature");
90 MODULE_PARM_DESC(use_inta
, "use INT#A instead of MSI-X");
91 MODULE_PARM_DESC(poll
, "use polling (for debug)");
92 MODULE_PARM_DESC(debug
, "default debug msglevel");
95 module_param(use_multi
, int, 0);
96 MODULE_PARM_DESC(use_multi
, "use per-CPU queues");
99 enum bnx2x_board_type
{
105 /* indexed by board_type, above */
108 } board_info
[] __devinitdata
= {
109 { "Broadcom NetXtreme II BCM57710 XGb" },
110 { "Broadcom NetXtreme II BCM57711 XGb" },
111 { "Broadcom NetXtreme II BCM57711E XGb" }
115 static const struct pci_device_id bnx2x_pci_tbl
[] = {
116 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_57710
,
117 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM57710
},
118 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_57711
,
119 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM57711
},
120 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_57711E
,
121 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM57711E
},
125 MODULE_DEVICE_TABLE(pci
, bnx2x_pci_tbl
);
127 /****************************************************************************
128 * General service functions
129 ****************************************************************************/
132 * locking is done by mcp
134 static void bnx2x_reg_wr_ind(struct bnx2x
*bp
, u32 addr
, u32 val
)
136 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
137 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, val
);
138 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
139 PCICFG_VENDOR_ID_OFFSET
);
142 static u32
bnx2x_reg_rd_ind(struct bnx2x
*bp
, u32 addr
)
146 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
147 pci_read_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, &val
);
148 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
149 PCICFG_VENDOR_ID_OFFSET
);
154 static const u32 dmae_reg_go_c
[] = {
155 DMAE_REG_GO_C0
, DMAE_REG_GO_C1
, DMAE_REG_GO_C2
, DMAE_REG_GO_C3
,
156 DMAE_REG_GO_C4
, DMAE_REG_GO_C5
, DMAE_REG_GO_C6
, DMAE_REG_GO_C7
,
157 DMAE_REG_GO_C8
, DMAE_REG_GO_C9
, DMAE_REG_GO_C10
, DMAE_REG_GO_C11
,
158 DMAE_REG_GO_C12
, DMAE_REG_GO_C13
, DMAE_REG_GO_C14
, DMAE_REG_GO_C15
161 /* copy command into DMAE command memory and set DMAE command go */
162 static void bnx2x_post_dmae(struct bnx2x
*bp
, struct dmae_command
*dmae
,
168 cmd_offset
= (DMAE_REG_CMD_MEM
+ sizeof(struct dmae_command
) * idx
);
169 for (i
= 0; i
< (sizeof(struct dmae_command
)/4); i
++) {
170 REG_WR(bp
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
172 DP(BNX2X_MSG_OFF
, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
173 idx
, i
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
175 REG_WR(bp
, dmae_reg_go_c
[idx
], 1);
178 void bnx2x_write_dmae(struct bnx2x
*bp
, dma_addr_t dma_addr
, u32 dst_addr
,
181 struct dmae_command
*dmae
= &bp
->init_dmae
;
182 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
185 if (!bp
->dmae_ready
) {
186 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
188 DP(BNX2X_MSG_OFF
, "DMAE is not ready (dst_addr %08x len32 %d)"
189 " using indirect\n", dst_addr
, len32
);
190 bnx2x_init_ind_wr(bp
, dst_addr
, data
, len32
);
194 mutex_lock(&bp
->dmae_mutex
);
196 memset(dmae
, 0, sizeof(struct dmae_command
));
198 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
199 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
200 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
202 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
204 DMAE_CMD_ENDIANITY_DW_SWAP
|
206 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
207 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
208 dmae
->src_addr_lo
= U64_LO(dma_addr
);
209 dmae
->src_addr_hi
= U64_HI(dma_addr
);
210 dmae
->dst_addr_lo
= dst_addr
>> 2;
211 dmae
->dst_addr_hi
= 0;
213 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
214 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
215 dmae
->comp_val
= DMAE_COMP_VAL
;
217 DP(BNX2X_MSG_OFF
, "dmae: opcode 0x%08x\n"
218 DP_LEVEL
"src_addr [%x:%08x] len [%d *4] "
219 "dst_addr [%x:%08x (%08x)]\n"
220 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
221 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
222 dmae
->len
, dmae
->dst_addr_hi
, dmae
->dst_addr_lo
, dst_addr
,
223 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
, dmae
->comp_val
);
224 DP(BNX2X_MSG_OFF
, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
225 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
226 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
230 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
234 while (*wb_comp
!= DMAE_COMP_VAL
) {
235 DP(BNX2X_MSG_OFF
, "wb_comp 0x%08x\n", *wb_comp
);
238 BNX2X_ERR("dmae timeout!\n");
242 /* adjust delay for emulation/FPGA */
243 if (CHIP_REV_IS_SLOW(bp
))
249 mutex_unlock(&bp
->dmae_mutex
);
252 void bnx2x_read_dmae(struct bnx2x
*bp
, u32 src_addr
, u32 len32
)
254 struct dmae_command
*dmae
= &bp
->init_dmae
;
255 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
258 if (!bp
->dmae_ready
) {
259 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
262 DP(BNX2X_MSG_OFF
, "DMAE is not ready (src_addr %08x len32 %d)"
263 " using indirect\n", src_addr
, len32
);
264 for (i
= 0; i
< len32
; i
++)
265 data
[i
] = bnx2x_reg_rd_ind(bp
, src_addr
+ i
*4);
269 mutex_lock(&bp
->dmae_mutex
);
271 memset(bnx2x_sp(bp
, wb_data
[0]), 0, sizeof(u32
) * 4);
272 memset(dmae
, 0, sizeof(struct dmae_command
));
274 dmae
->opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
275 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
276 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
278 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
280 DMAE_CMD_ENDIANITY_DW_SWAP
|
282 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
283 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
284 dmae
->src_addr_lo
= src_addr
>> 2;
285 dmae
->src_addr_hi
= 0;
286 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_data
));
287 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_data
));
289 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
290 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
291 dmae
->comp_val
= DMAE_COMP_VAL
;
293 DP(BNX2X_MSG_OFF
, "dmae: opcode 0x%08x\n"
294 DP_LEVEL
"src_addr [%x:%08x] len [%d *4] "
295 "dst_addr [%x:%08x (%08x)]\n"
296 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
297 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
298 dmae
->len
, dmae
->dst_addr_hi
, dmae
->dst_addr_lo
, src_addr
,
299 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
, dmae
->comp_val
);
303 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
307 while (*wb_comp
!= DMAE_COMP_VAL
) {
310 BNX2X_ERR("dmae timeout!\n");
314 /* adjust delay for emulation/FPGA */
315 if (CHIP_REV_IS_SLOW(bp
))
320 DP(BNX2X_MSG_OFF
, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
321 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
322 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
324 mutex_unlock(&bp
->dmae_mutex
);
327 /* used only for slowpath so not inlined */
328 static void bnx2x_wb_wr(struct bnx2x
*bp
, int reg
, u32 val_hi
, u32 val_lo
)
332 wb_write
[0] = val_hi
;
333 wb_write
[1] = val_lo
;
334 REG_WR_DMAE(bp
, reg
, wb_write
, 2);
338 static u64
bnx2x_wb_rd(struct bnx2x
*bp
, int reg
)
342 REG_RD_DMAE(bp
, reg
, wb_data
, 2);
344 return HILO_U64(wb_data
[0], wb_data
[1]);
348 static int bnx2x_mc_assert(struct bnx2x
*bp
)
352 u32 row0
, row1
, row2
, row3
;
355 last_idx
= REG_RD8(bp
, BAR_XSTRORM_INTMEM
+
356 XSTORM_ASSERT_LIST_INDEX_OFFSET
);
358 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
360 /* print the asserts */
361 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
363 row0
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
364 XSTORM_ASSERT_LIST_OFFSET(i
));
365 row1
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
366 XSTORM_ASSERT_LIST_OFFSET(i
) + 4);
367 row2
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
368 XSTORM_ASSERT_LIST_OFFSET(i
) + 8);
369 row3
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
370 XSTORM_ASSERT_LIST_OFFSET(i
) + 12);
372 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
373 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
374 " 0x%08x 0x%08x 0x%08x\n",
375 i
, row3
, row2
, row1
, row0
);
383 last_idx
= REG_RD8(bp
, BAR_TSTRORM_INTMEM
+
384 TSTORM_ASSERT_LIST_INDEX_OFFSET
);
386 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
388 /* print the asserts */
389 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
391 row0
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
392 TSTORM_ASSERT_LIST_OFFSET(i
));
393 row1
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
394 TSTORM_ASSERT_LIST_OFFSET(i
) + 4);
395 row2
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
396 TSTORM_ASSERT_LIST_OFFSET(i
) + 8);
397 row3
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
398 TSTORM_ASSERT_LIST_OFFSET(i
) + 12);
400 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
401 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
402 " 0x%08x 0x%08x 0x%08x\n",
403 i
, row3
, row2
, row1
, row0
);
411 last_idx
= REG_RD8(bp
, BAR_CSTRORM_INTMEM
+
412 CSTORM_ASSERT_LIST_INDEX_OFFSET
);
414 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
416 /* print the asserts */
417 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
419 row0
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
420 CSTORM_ASSERT_LIST_OFFSET(i
));
421 row1
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
422 CSTORM_ASSERT_LIST_OFFSET(i
) + 4);
423 row2
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
424 CSTORM_ASSERT_LIST_OFFSET(i
) + 8);
425 row3
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
426 CSTORM_ASSERT_LIST_OFFSET(i
) + 12);
428 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
429 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
430 " 0x%08x 0x%08x 0x%08x\n",
431 i
, row3
, row2
, row1
, row0
);
439 last_idx
= REG_RD8(bp
, BAR_USTRORM_INTMEM
+
440 USTORM_ASSERT_LIST_INDEX_OFFSET
);
442 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
444 /* print the asserts */
445 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
447 row0
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
448 USTORM_ASSERT_LIST_OFFSET(i
));
449 row1
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
450 USTORM_ASSERT_LIST_OFFSET(i
) + 4);
451 row2
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
452 USTORM_ASSERT_LIST_OFFSET(i
) + 8);
453 row3
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
454 USTORM_ASSERT_LIST_OFFSET(i
) + 12);
456 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
457 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
458 " 0x%08x 0x%08x 0x%08x\n",
459 i
, row3
, row2
, row1
, row0
);
469 static void bnx2x_fw_dump(struct bnx2x
*bp
)
475 mark
= REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+ 0xf104);
476 mark
= ((mark
+ 0x3) & ~0x3);
477 printk(KERN_ERR PFX
"begin fw dump (mark 0x%x)\n" KERN_ERR
, mark
);
479 for (offset
= mark
- 0x08000000; offset
<= 0xF900; offset
+= 0x8*4) {
480 for (word
= 0; word
< 8; word
++)
481 data
[word
] = htonl(REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+
484 printk(KERN_CONT
"%s", (char *)data
);
486 for (offset
= 0xF108; offset
<= mark
- 0x08000000; offset
+= 0x8*4) {
487 for (word
= 0; word
< 8; word
++)
488 data
[word
] = htonl(REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+
491 printk(KERN_CONT
"%s", (char *)data
);
493 printk("\n" KERN_ERR PFX
"end of fw dump\n");
496 static void bnx2x_panic_dump(struct bnx2x
*bp
)
501 bp
->stats_state
= STATS_STATE_DISABLED
;
502 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
504 BNX2X_ERR("begin crash dump -----------------\n");
506 for_each_queue(bp
, i
) {
507 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
508 struct eth_tx_db_data
*hw_prods
= fp
->hw_tx_prods
;
510 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
511 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
512 i
, fp
->tx_pkt_prod
, fp
->tx_pkt_cons
, fp
->tx_bd_prod
,
513 fp
->tx_bd_cons
, le16_to_cpu(*fp
->tx_cons_sb
));
514 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
515 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
516 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
517 fp
->rx_bd_prod
, fp
->rx_bd_cons
,
518 le16_to_cpu(*fp
->rx_bd_cons_sb
), fp
->rx_comp_prod
,
519 fp
->rx_comp_cons
, le16_to_cpu(*fp
->rx_cons_sb
));
520 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
521 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
522 " *sb_u_idx(%x) bd data(%x,%x)\n",
523 fp
->rx_sge_prod
, fp
->last_max_sge
, fp
->fp_c_idx
,
524 fp
->status_blk
->c_status_block
.status_block_index
,
526 fp
->status_blk
->u_status_block
.status_block_index
,
527 hw_prods
->packets_prod
, hw_prods
->bds_prod
);
529 start
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) - 10);
530 end
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) + 245);
531 for (j
= start
; j
< end
; j
++) {
532 struct sw_tx_bd
*sw_bd
= &fp
->tx_buf_ring
[j
];
534 BNX2X_ERR("packet[%x]=[%p,%x]\n", j
,
535 sw_bd
->skb
, sw_bd
->first_bd
);
538 start
= TX_BD(fp
->tx_bd_cons
- 10);
539 end
= TX_BD(fp
->tx_bd_cons
+ 254);
540 for (j
= start
; j
< end
; j
++) {
541 u32
*tx_bd
= (u32
*)&fp
->tx_desc_ring
[j
];
543 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
544 j
, tx_bd
[0], tx_bd
[1], tx_bd
[2], tx_bd
[3]);
547 start
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) - 10);
548 end
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) + 503);
549 for (j
= start
; j
< end
; j
++) {
550 u32
*rx_bd
= (u32
*)&fp
->rx_desc_ring
[j
];
551 struct sw_rx_bd
*sw_bd
= &fp
->rx_buf_ring
[j
];
553 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
554 j
, rx_bd
[1], rx_bd
[0], sw_bd
->skb
);
557 start
= RX_SGE(fp
->rx_sge_prod
);
558 end
= RX_SGE(fp
->last_max_sge
);
559 for (j
= start
; j
< end
; j
++) {
560 u32
*rx_sge
= (u32
*)&fp
->rx_sge_ring
[j
];
561 struct sw_rx_page
*sw_page
= &fp
->rx_page_ring
[j
];
563 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
564 j
, rx_sge
[1], rx_sge
[0], sw_page
->page
);
567 start
= RCQ_BD(fp
->rx_comp_cons
- 10);
568 end
= RCQ_BD(fp
->rx_comp_cons
+ 503);
569 for (j
= start
; j
< end
; j
++) {
570 u32
*cqe
= (u32
*)&fp
->rx_comp_ring
[j
];
572 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
573 j
, cqe
[0], cqe
[1], cqe
[2], cqe
[3]);
577 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
578 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
579 " spq_prod_idx(%u)\n",
580 bp
->def_c_idx
, bp
->def_u_idx
, bp
->def_x_idx
, bp
->def_t_idx
,
581 bp
->def_att_idx
, bp
->attn_state
, bp
->spq_prod_idx
);
585 BNX2X_ERR("end crash dump -----------------\n");
588 static void bnx2x_int_enable(struct bnx2x
*bp
)
590 int port
= BP_PORT(bp
);
591 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
592 u32 val
= REG_RD(bp
, addr
);
593 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
596 val
&= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0
;
597 val
|= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
598 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
600 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
601 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
602 HC_CONFIG_0_REG_INT_LINE_EN_0
|
603 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
605 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
606 val
, port
, addr
, msix
);
608 REG_WR(bp
, addr
, val
);
610 val
&= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
;
613 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
614 val
, port
, addr
, msix
);
616 REG_WR(bp
, addr
, val
);
618 if (CHIP_IS_E1H(bp
)) {
619 /* init leading/trailing edge */
621 val
= (0xfe0f | (1 << (BP_E1HVN(bp
) + 4)));
623 /* enable nig attention */
628 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
629 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
633 static void bnx2x_int_disable(struct bnx2x
*bp
)
635 int port
= BP_PORT(bp
);
636 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
637 u32 val
= REG_RD(bp
, addr
);
639 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
640 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
641 HC_CONFIG_0_REG_INT_LINE_EN_0
|
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
644 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
647 REG_WR(bp
, addr
, val
);
648 if (REG_RD(bp
, addr
) != val
)
649 BNX2X_ERR("BUG! proper val not read from IGU!\n");
652 static void bnx2x_int_disable_sync(struct bnx2x
*bp
, int disable_hw
)
654 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
657 /* disable interrupt handling */
658 atomic_inc(&bp
->intr_sem
);
660 /* prevent the HW from sending interrupts */
661 bnx2x_int_disable(bp
);
663 /* make sure all ISRs are done */
665 for_each_queue(bp
, i
)
666 synchronize_irq(bp
->msix_table
[i
].vector
);
668 /* one more for the Slow Path IRQ */
669 synchronize_irq(bp
->msix_table
[i
].vector
);
671 synchronize_irq(bp
->pdev
->irq
);
673 /* make sure sp_task is not running */
674 cancel_work_sync(&bp
->sp_task
);
680 * General service functions
683 static inline void bnx2x_ack_sb(struct bnx2x
*bp
, u8 sb_id
,
684 u8 storm
, u16 index
, u8 op
, u8 update
)
686 u32 hc_addr
= (HC_REG_COMMAND_REG
+ BP_PORT(bp
)*32 +
687 COMMAND_REG_INT_ACK
);
688 struct igu_ack_register igu_ack
;
690 igu_ack
.status_block_index
= index
;
691 igu_ack
.sb_id_and_flags
=
692 ((sb_id
<< IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT
) |
693 (storm
<< IGU_ACK_REGISTER_STORM_ID_SHIFT
) |
694 (update
<< IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT
) |
695 (op
<< IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT
));
697 DP(BNX2X_MSG_OFF
, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32
*)&igu_ack
), hc_addr
);
699 REG_WR(bp
, hc_addr
, (*(u32
*)&igu_ack
));
702 static inline u16
bnx2x_update_fpsb_idx(struct bnx2x_fastpath
*fp
)
704 struct host_status_block
*fpsb
= fp
->status_blk
;
707 barrier(); /* status block is written to by the chip */
708 if (fp
->fp_c_idx
!= fpsb
->c_status_block
.status_block_index
) {
709 fp
->fp_c_idx
= fpsb
->c_status_block
.status_block_index
;
712 if (fp
->fp_u_idx
!= fpsb
->u_status_block
.status_block_index
) {
713 fp
->fp_u_idx
= fpsb
->u_status_block
.status_block_index
;
719 static u16
bnx2x_ack_int(struct bnx2x
*bp
)
721 u32 hc_addr
= (HC_REG_COMMAND_REG
+ BP_PORT(bp
)*32 +
722 COMMAND_REG_SIMD_MASK
);
723 u32 result
= REG_RD(bp
, hc_addr
);
725 DP(BNX2X_MSG_OFF
, "read 0x%08x from HC addr 0x%x\n",
733 * fast path service functions
736 /* free skb in the packet ring at pos idx
737 * return idx of last bd freed
739 static u16
bnx2x_free_tx_pkt(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
742 struct sw_tx_bd
*tx_buf
= &fp
->tx_buf_ring
[idx
];
743 struct eth_tx_bd
*tx_bd
;
744 struct sk_buff
*skb
= tx_buf
->skb
;
745 u16 bd_idx
= TX_BD(tx_buf
->first_bd
), new_cons
;
748 DP(BNX2X_MSG_OFF
, "pkt_idx %d buff @(%p)->skb %p\n",
752 DP(BNX2X_MSG_OFF
, "free bd_idx %d\n", bd_idx
);
753 tx_bd
= &fp
->tx_desc_ring
[bd_idx
];
754 pci_unmap_single(bp
->pdev
, BD_UNMAP_ADDR(tx_bd
),
755 BD_UNMAP_LEN(tx_bd
), PCI_DMA_TODEVICE
);
757 nbd
= le16_to_cpu(tx_bd
->nbd
) - 1;
758 new_cons
= nbd
+ tx_buf
->first_bd
;
759 #ifdef BNX2X_STOP_ON_ERROR
760 if (nbd
> (MAX_SKB_FRAGS
+ 2)) {
761 BNX2X_ERR("BAD nbd!\n");
766 /* Skip a parse bd and the TSO split header bd
767 since they have no mapping */
769 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
771 if (tx_bd
->bd_flags
.as_bitfield
& (ETH_TX_BD_FLAGS_IP_CSUM
|
772 ETH_TX_BD_FLAGS_TCP_CSUM
|
773 ETH_TX_BD_FLAGS_SW_LSO
)) {
775 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
776 tx_bd
= &fp
->tx_desc_ring
[bd_idx
];
777 /* is this a TSO split header bd? */
778 if (tx_bd
->bd_flags
.as_bitfield
& ETH_TX_BD_FLAGS_SW_LSO
) {
780 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
787 DP(BNX2X_MSG_OFF
, "free frag bd_idx %d\n", bd_idx
);
788 tx_bd
= &fp
->tx_desc_ring
[bd_idx
];
789 pci_unmap_page(bp
->pdev
, BD_UNMAP_ADDR(tx_bd
),
790 BD_UNMAP_LEN(tx_bd
), PCI_DMA_TODEVICE
);
792 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
798 tx_buf
->first_bd
= 0;
804 static inline u16
bnx2x_tx_avail(struct bnx2x_fastpath
*fp
)
810 barrier(); /* Tell compiler that prod and cons can change */
811 prod
= fp
->tx_bd_prod
;
812 cons
= fp
->tx_bd_cons
;
814 /* NUM_TX_RINGS = number of "next-page" entries
815 It will be used as a threshold */
816 used
= SUB_S16(prod
, cons
) + (s16
)NUM_TX_RINGS
;
818 #ifdef BNX2X_STOP_ON_ERROR
820 WARN_ON(used
> fp
->bp
->tx_ring_size
);
821 WARN_ON((fp
->bp
->tx_ring_size
- used
) > MAX_TX_AVAIL
);
824 return (s16
)(fp
->bp
->tx_ring_size
) - used
;
827 static void bnx2x_tx_int(struct bnx2x_fastpath
*fp
, int work
)
829 struct bnx2x
*bp
= fp
->bp
;
830 u16 hw_cons
, sw_cons
, bd_cons
= fp
->tx_bd_cons
;
833 #ifdef BNX2X_STOP_ON_ERROR
834 if (unlikely(bp
->panic
))
838 hw_cons
= le16_to_cpu(*fp
->tx_cons_sb
);
839 sw_cons
= fp
->tx_pkt_cons
;
841 while (sw_cons
!= hw_cons
) {
844 pkt_cons
= TX_BD(sw_cons
);
846 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
848 DP(NETIF_MSG_TX_DONE
, "hw_cons %u sw_cons %u pkt_cons %u\n",
849 hw_cons
, sw_cons
, pkt_cons
);
851 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
853 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
856 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, pkt_cons
);
864 fp
->tx_pkt_cons
= sw_cons
;
865 fp
->tx_bd_cons
= bd_cons
;
867 /* Need to make the tx_cons update visible to start_xmit()
868 * before checking for netif_queue_stopped(). Without the
869 * memory barrier, there is a small possibility that start_xmit()
870 * will miss it and cause the queue to be stopped forever.
874 /* TBD need a thresh? */
875 if (unlikely(netif_queue_stopped(bp
->dev
))) {
877 netif_tx_lock(bp
->dev
);
879 if (netif_queue_stopped(bp
->dev
) &&
880 (bp
->state
== BNX2X_STATE_OPEN
) &&
881 (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3))
882 netif_wake_queue(bp
->dev
);
884 netif_tx_unlock(bp
->dev
);
889 static void bnx2x_sp_event(struct bnx2x_fastpath
*fp
,
890 union eth_rx_cqe
*rr_cqe
)
892 struct bnx2x
*bp
= fp
->bp
;
893 int cid
= SW_CID(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
894 int command
= CQE_CMD(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
897 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
898 FP_IDX(fp
), cid
, command
, bp
->state
,
899 rr_cqe
->ramrod_cqe
.ramrod_type
);
904 switch (command
| fp
->state
) {
905 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP
|
906 BNX2X_FP_STATE_OPENING
):
907 DP(NETIF_MSG_IFUP
, "got MULTI[%d] setup ramrod\n",
909 fp
->state
= BNX2X_FP_STATE_OPEN
;
912 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_FP_STATE_HALTING
):
913 DP(NETIF_MSG_IFDOWN
, "got MULTI[%d] halt ramrod\n",
915 fp
->state
= BNX2X_FP_STATE_HALTED
;
919 BNX2X_ERR("unexpected MC reply (%d) "
920 "fp->state is %x\n", command
, fp
->state
);
923 mb(); /* force bnx2x_wait_ramrod() to see the change */
927 switch (command
| bp
->state
) {
928 case (RAMROD_CMD_ID_ETH_PORT_SETUP
| BNX2X_STATE_OPENING_WAIT4_PORT
):
929 DP(NETIF_MSG_IFUP
, "got setup ramrod\n");
930 bp
->state
= BNX2X_STATE_OPEN
;
933 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
934 DP(NETIF_MSG_IFDOWN
, "got halt ramrod\n");
935 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_DELETE
;
936 fp
->state
= BNX2X_FP_STATE_HALTED
;
939 case (RAMROD_CMD_ID_ETH_CFC_DEL
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
940 DP(NETIF_MSG_IFDOWN
, "got delete ramrod for MULTI[%d]\n", cid
);
941 bnx2x_fp(bp
, cid
, state
) = BNX2X_FP_STATE_CLOSED
;
945 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_OPEN
):
946 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_DIAG
):
947 DP(NETIF_MSG_IFUP
, "got set mac ramrod\n");
948 bp
->set_mac_pending
= 0;
951 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
952 DP(NETIF_MSG_IFDOWN
, "got (un)set mac ramrod\n");
956 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
960 mb(); /* force bnx2x_wait_ramrod() to see the change */
963 static inline void bnx2x_free_rx_sge(struct bnx2x
*bp
,
964 struct bnx2x_fastpath
*fp
, u16 index
)
966 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
967 struct page
*page
= sw_buf
->page
;
968 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
970 /* Skip "next page" elements */
974 pci_unmap_page(bp
->pdev
, pci_unmap_addr(sw_buf
, mapping
),
975 BCM_PAGE_SIZE
*PAGES_PER_SGE
, PCI_DMA_FROMDEVICE
);
976 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
983 static inline void bnx2x_free_rx_sge_range(struct bnx2x
*bp
,
984 struct bnx2x_fastpath
*fp
, int last
)
988 for (i
= 0; i
< last
; i
++)
989 bnx2x_free_rx_sge(bp
, fp
, i
);
992 static inline int bnx2x_alloc_rx_sge(struct bnx2x
*bp
,
993 struct bnx2x_fastpath
*fp
, u16 index
)
995 struct page
*page
= alloc_pages(GFP_ATOMIC
, PAGES_PER_SGE_SHIFT
);
996 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
997 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
1000 if (unlikely(page
== NULL
))
1003 mapping
= pci_map_page(bp
->pdev
, page
, 0, BCM_PAGE_SIZE
*PAGES_PER_SGE
,
1004 PCI_DMA_FROMDEVICE
);
1005 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
1006 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
1010 sw_buf
->page
= page
;
1011 pci_unmap_addr_set(sw_buf
, mapping
, mapping
);
1013 sge
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1014 sge
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1019 static inline int bnx2x_alloc_rx_skb(struct bnx2x
*bp
,
1020 struct bnx2x_fastpath
*fp
, u16 index
)
1022 struct sk_buff
*skb
;
1023 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[index
];
1024 struct eth_rx_bd
*rx_bd
= &fp
->rx_desc_ring
[index
];
1027 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
1028 if (unlikely(skb
== NULL
))
1031 mapping
= pci_map_single(bp
->pdev
, skb
->data
, bp
->rx_buf_size
,
1032 PCI_DMA_FROMDEVICE
);
1033 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
1039 pci_unmap_addr_set(rx_buf
, mapping
, mapping
);
1041 rx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1042 rx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1047 /* note that we are not allocating a new skb,
1048 * we are just moving one from cons to prod
1049 * we are not creating a new mapping,
1050 * so there is no need to check for dma_mapping_error().
1052 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath
*fp
,
1053 struct sk_buff
*skb
, u16 cons
, u16 prod
)
1055 struct bnx2x
*bp
= fp
->bp
;
1056 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
1057 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
1058 struct eth_rx_bd
*cons_bd
= &fp
->rx_desc_ring
[cons
];
1059 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
1061 pci_dma_sync_single_for_device(bp
->pdev
,
1062 pci_unmap_addr(cons_rx_buf
, mapping
),
1063 bp
->rx_offset
+ RX_COPY_THRESH
,
1064 PCI_DMA_FROMDEVICE
);
1066 prod_rx_buf
->skb
= cons_rx_buf
->skb
;
1067 pci_unmap_addr_set(prod_rx_buf
, mapping
,
1068 pci_unmap_addr(cons_rx_buf
, mapping
));
1069 *prod_bd
= *cons_bd
;
1072 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath
*fp
,
1075 u16 last_max
= fp
->last_max_sge
;
1077 if (SUB_S16(idx
, last_max
) > 0)
1078 fp
->last_max_sge
= idx
;
1081 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath
*fp
)
1085 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
1086 int idx
= RX_SGE_CNT
* i
- 1;
1088 for (j
= 0; j
< 2; j
++) {
1089 SGE_MASK_CLEAR_BIT(fp
, idx
);
1095 static void bnx2x_update_sge_prod(struct bnx2x_fastpath
*fp
,
1096 struct eth_fast_path_rx_cqe
*fp_cqe
)
1098 struct bnx2x
*bp
= fp
->bp
;
1099 u16 sge_len
= BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe
->pkt_len
) -
1100 le16_to_cpu(fp_cqe
->len_on_bd
)) >>
1102 u16 last_max
, last_elem
, first_elem
;
1109 /* First mark all used pages */
1110 for (i
= 0; i
< sge_len
; i
++)
1111 SGE_MASK_CLEAR_BIT(fp
, RX_SGE(le16_to_cpu(fp_cqe
->sgl
[i
])));
1113 DP(NETIF_MSG_RX_STATUS
, "fp_cqe->sgl[%d] = %d\n",
1114 sge_len
- 1, le16_to_cpu(fp_cqe
->sgl
[sge_len
- 1]));
1116 /* Here we assume that the last SGE index is the biggest */
1117 prefetch((void *)(fp
->sge_mask
));
1118 bnx2x_update_last_max_sge(fp
, le16_to_cpu(fp_cqe
->sgl
[sge_len
- 1]));
1120 last_max
= RX_SGE(fp
->last_max_sge
);
1121 last_elem
= last_max
>> RX_SGE_MASK_ELEM_SHIFT
;
1122 first_elem
= RX_SGE(fp
->rx_sge_prod
) >> RX_SGE_MASK_ELEM_SHIFT
;
1124 /* If ring is not full */
1125 if (last_elem
+ 1 != first_elem
)
1128 /* Now update the prod */
1129 for (i
= first_elem
; i
!= last_elem
; i
= NEXT_SGE_MASK_ELEM(i
)) {
1130 if (likely(fp
->sge_mask
[i
]))
1133 fp
->sge_mask
[i
] = RX_SGE_MASK_ELEM_ONE_MASK
;
1134 delta
+= RX_SGE_MASK_ELEM_SZ
;
1138 fp
->rx_sge_prod
+= delta
;
1139 /* clear page-end entries */
1140 bnx2x_clear_sge_mask_next_elems(fp
);
1143 DP(NETIF_MSG_RX_STATUS
,
1144 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1145 fp
->last_max_sge
, fp
->rx_sge_prod
);
1148 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath
*fp
)
1150 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1151 memset(fp
->sge_mask
, 0xff,
1152 (NUM_RX_SGE
>> RX_SGE_MASK_ELEM_SHIFT
)*sizeof(u64
));
1154 /* Clear the two last indices in the page to 1:
1155 these are the indices that correspond to the "next" element,
1156 hence will never be indicated and should be removed from
1157 the calculations. */
1158 bnx2x_clear_sge_mask_next_elems(fp
);
1161 static void bnx2x_tpa_start(struct bnx2x_fastpath
*fp
, u16 queue
,
1162 struct sk_buff
*skb
, u16 cons
, u16 prod
)
1164 struct bnx2x
*bp
= fp
->bp
;
1165 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
1166 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
1167 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
1170 /* move empty skb from pool to prod and map it */
1171 prod_rx_buf
->skb
= fp
->tpa_pool
[queue
].skb
;
1172 mapping
= pci_map_single(bp
->pdev
, fp
->tpa_pool
[queue
].skb
->data
,
1173 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
1174 pci_unmap_addr_set(prod_rx_buf
, mapping
, mapping
);
1176 /* move partial skb from cons to pool (don't unmap yet) */
1177 fp
->tpa_pool
[queue
] = *cons_rx_buf
;
1179 /* mark bin state as start - print error if current state != stop */
1180 if (fp
->tpa_state
[queue
] != BNX2X_TPA_STOP
)
1181 BNX2X_ERR("start of bin not in stop [%d]\n", queue
);
1183 fp
->tpa_state
[queue
] = BNX2X_TPA_START
;
1185 /* point prod_bd to new skb */
1186 prod_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1187 prod_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1189 #ifdef BNX2X_STOP_ON_ERROR
1190 fp
->tpa_queue_used
|= (1 << queue
);
1191 #ifdef __powerpc64__
1192 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%lx\n",
1194 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%llx\n",
1196 fp
->tpa_queue_used
);
1200 static int bnx2x_fill_frag_skb(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
1201 struct sk_buff
*skb
,
1202 struct eth_fast_path_rx_cqe
*fp_cqe
,
1205 struct sw_rx_page
*rx_pg
, old_rx_pg
;
1207 u16 len_on_bd
= le16_to_cpu(fp_cqe
->len_on_bd
);
1208 u32 i
, frag_len
, frag_size
, pages
;
1212 frag_size
= le16_to_cpu(fp_cqe
->pkt_len
) - len_on_bd
;
1213 pages
= BCM_PAGE_ALIGN(frag_size
) >> BCM_PAGE_SHIFT
;
1215 /* This is needed in order to enable forwarding support */
1217 skb_shinfo(skb
)->gso_size
= min((u32
)BCM_PAGE_SIZE
,
1218 max(frag_size
, (u32
)len_on_bd
));
1220 #ifdef BNX2X_STOP_ON_ERROR
1221 if (pages
> 8*PAGES_PER_SGE
) {
1222 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1224 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1225 fp_cqe
->pkt_len
, len_on_bd
);
1231 /* Run through the SGL and compose the fragmented skb */
1232 for (i
= 0, j
= 0; i
< pages
; i
+= PAGES_PER_SGE
, j
++) {
1233 u16 sge_idx
= RX_SGE(le16_to_cpu(fp_cqe
->sgl
[j
]));
1235 /* FW gives the indices of the SGE as if the ring is an array
1236 (meaning that "next" element will consume 2 indices) */
1237 frag_len
= min(frag_size
, (u32
)(BCM_PAGE_SIZE
*PAGES_PER_SGE
));
1238 rx_pg
= &fp
->rx_page_ring
[sge_idx
];
1242 /* If we fail to allocate a substitute page, we simply stop
1243 where we are and drop the whole packet */
1244 err
= bnx2x_alloc_rx_sge(bp
, fp
, sge_idx
);
1245 if (unlikely(err
)) {
1246 bp
->eth_stats
.rx_skb_alloc_failed
++;
1250 /* Unmap the page as we r going to pass it to the stack */
1251 pci_unmap_page(bp
->pdev
, pci_unmap_addr(&old_rx_pg
, mapping
),
1252 BCM_PAGE_SIZE
*PAGES_PER_SGE
, PCI_DMA_FROMDEVICE
);
1254 /* Add one frag and update the appropriate fields in the skb */
1255 skb_fill_page_desc(skb
, j
, old_rx_pg
.page
, 0, frag_len
);
1257 skb
->data_len
+= frag_len
;
1258 skb
->truesize
+= frag_len
;
1259 skb
->len
+= frag_len
;
1261 frag_size
-= frag_len
;
1267 static void bnx2x_tpa_stop(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
1268 u16 queue
, int pad
, int len
, union eth_rx_cqe
*cqe
,
1271 struct sw_rx_bd
*rx_buf
= &fp
->tpa_pool
[queue
];
1272 struct sk_buff
*skb
= rx_buf
->skb
;
1274 struct sk_buff
*new_skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
1276 /* Unmap skb in the pool anyway, as we are going to change
1277 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1279 pci_unmap_single(bp
->pdev
, pci_unmap_addr(rx_buf
, mapping
),
1280 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
1282 if (likely(new_skb
)) {
1283 /* fix ip xsum and give it to the stack */
1284 /* (no need to map the new skb) */
1287 prefetch(((char *)(skb
)) + 128);
1289 #ifdef BNX2X_STOP_ON_ERROR
1290 if (pad
+ len
> bp
->rx_buf_size
) {
1291 BNX2X_ERR("skb_put is about to fail... "
1292 "pad %d len %d rx_buf_size %d\n",
1293 pad
, len
, bp
->rx_buf_size
);
1299 skb_reserve(skb
, pad
);
1302 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1303 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1308 iph
= (struct iphdr
*)skb
->data
;
1310 iph
->check
= ip_fast_csum((u8
*)iph
, iph
->ihl
);
1313 if (!bnx2x_fill_frag_skb(bp
, fp
, skb
,
1314 &cqe
->fast_path_cqe
, cqe_idx
)) {
1316 if ((bp
->vlgrp
!= NULL
) &&
1317 (le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
) &
1318 PARSING_FLAGS_VLAN
))
1319 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
,
1320 le16_to_cpu(cqe
->fast_path_cqe
.
1324 netif_receive_skb(skb
);
1326 DP(NETIF_MSG_RX_STATUS
, "Failed to allocate new pages"
1327 " - dropping packet!\n");
1331 bp
->dev
->last_rx
= jiffies
;
1333 /* put new skb in bin */
1334 fp
->tpa_pool
[queue
].skb
= new_skb
;
1337 /* else drop the packet and keep the buffer in the bin */
1338 DP(NETIF_MSG_RX_STATUS
,
1339 "Failed to allocate new skb - dropping packet!\n");
1340 bp
->eth_stats
.rx_skb_alloc_failed
++;
1343 fp
->tpa_state
[queue
] = BNX2X_TPA_STOP
;
1346 static inline void bnx2x_update_rx_prod(struct bnx2x
*bp
,
1347 struct bnx2x_fastpath
*fp
,
1348 u16 bd_prod
, u16 rx_comp_prod
,
1351 struct tstorm_eth_rx_producers rx_prods
= {0};
1354 /* Update producers */
1355 rx_prods
.bd_prod
= bd_prod
;
1356 rx_prods
.cqe_prod
= rx_comp_prod
;
1357 rx_prods
.sge_prod
= rx_sge_prod
;
1359 for (i
= 0; i
< sizeof(struct tstorm_eth_rx_producers
)/4; i
++)
1360 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
1361 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp
), FP_CL_ID(fp
)) + i
*4,
1362 ((u32
*)&rx_prods
)[i
]);
1364 DP(NETIF_MSG_RX_STATUS
,
1365 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1366 bd_prod
, rx_comp_prod
, rx_sge_prod
);
1369 static int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
)
1371 struct bnx2x
*bp
= fp
->bp
;
1372 u16 bd_cons
, bd_prod
, bd_prod_fw
, comp_ring_cons
;
1373 u16 hw_comp_cons
, sw_comp_cons
, sw_comp_prod
;
1376 #ifdef BNX2X_STOP_ON_ERROR
1377 if (unlikely(bp
->panic
))
1381 /* CQ "next element" is of the size of the regular element,
1382 that's why it's ok here */
1383 hw_comp_cons
= le16_to_cpu(*fp
->rx_cons_sb
);
1384 if ((hw_comp_cons
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
1387 bd_cons
= fp
->rx_bd_cons
;
1388 bd_prod
= fp
->rx_bd_prod
;
1389 bd_prod_fw
= bd_prod
;
1390 sw_comp_cons
= fp
->rx_comp_cons
;
1391 sw_comp_prod
= fp
->rx_comp_prod
;
1393 /* Memory barrier necessary as speculative reads of the rx
1394 * buffer can be ahead of the index in the status block
1398 DP(NETIF_MSG_RX_STATUS
,
1399 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1400 FP_IDX(fp
), hw_comp_cons
, sw_comp_cons
);
1402 while (sw_comp_cons
!= hw_comp_cons
) {
1403 struct sw_rx_bd
*rx_buf
= NULL
;
1404 struct sk_buff
*skb
;
1405 union eth_rx_cqe
*cqe
;
1409 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
1410 bd_prod
= RX_BD(bd_prod
);
1411 bd_cons
= RX_BD(bd_cons
);
1413 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
1414 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
1416 DP(NETIF_MSG_RX_STATUS
, "CQE type %x err %x status %x"
1417 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags
),
1418 cqe_fp_flags
, cqe
->fast_path_cqe
.status_flags
,
1419 cqe
->fast_path_cqe
.rss_hash_result
,
1420 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
),
1421 le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
));
1423 /* is this a slowpath msg? */
1424 if (unlikely(CQE_TYPE(cqe_fp_flags
))) {
1425 bnx2x_sp_event(fp
, cqe
);
1428 /* this is an rx packet */
1430 rx_buf
= &fp
->rx_buf_ring
[bd_cons
];
1432 len
= le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
);
1433 pad
= cqe
->fast_path_cqe
.placement_offset
;
1435 /* If CQE is marked both TPA_START and TPA_END
1436 it is a non-TPA CQE */
1437 if ((!fp
->disable_tpa
) &&
1438 (TPA_TYPE(cqe_fp_flags
) !=
1439 (TPA_TYPE_START
| TPA_TYPE_END
))) {
1440 u16 queue
= cqe
->fast_path_cqe
.queue_index
;
1442 if (TPA_TYPE(cqe_fp_flags
) == TPA_TYPE_START
) {
1443 DP(NETIF_MSG_RX_STATUS
,
1444 "calling tpa_start on queue %d\n",
1447 bnx2x_tpa_start(fp
, queue
, skb
,
1452 if (TPA_TYPE(cqe_fp_flags
) == TPA_TYPE_END
) {
1453 DP(NETIF_MSG_RX_STATUS
,
1454 "calling tpa_stop on queue %d\n",
1457 if (!BNX2X_RX_SUM_FIX(cqe
))
1458 BNX2X_ERR("STOP on none TCP "
1461 /* This is a size of the linear data
1463 len
= le16_to_cpu(cqe
->fast_path_cqe
.
1465 bnx2x_tpa_stop(bp
, fp
, queue
, pad
,
1466 len
, cqe
, comp_ring_cons
);
1467 #ifdef BNX2X_STOP_ON_ERROR
1472 bnx2x_update_sge_prod(fp
,
1473 &cqe
->fast_path_cqe
);
1478 pci_dma_sync_single_for_device(bp
->pdev
,
1479 pci_unmap_addr(rx_buf
, mapping
),
1480 pad
+ RX_COPY_THRESH
,
1481 PCI_DMA_FROMDEVICE
);
1483 prefetch(((char *)(skb
)) + 128);
1485 /* is this an error packet? */
1486 if (unlikely(cqe_fp_flags
& ETH_RX_ERROR_FALGS
)) {
1487 DP(NETIF_MSG_RX_ERR
,
1488 "ERROR flags %x rx packet %u\n",
1489 cqe_fp_flags
, sw_comp_cons
);
1490 bp
->eth_stats
.rx_err_discard_pkt
++;
1494 /* Since we don't have a jumbo ring
1495 * copy small packets if mtu > 1500
1497 if ((bp
->dev
->mtu
> ETH_MAX_PACKET_SIZE
) &&
1498 (len
<= RX_COPY_THRESH
)) {
1499 struct sk_buff
*new_skb
;
1501 new_skb
= netdev_alloc_skb(bp
->dev
,
1503 if (new_skb
== NULL
) {
1504 DP(NETIF_MSG_RX_ERR
,
1505 "ERROR packet dropped "
1506 "because of alloc failure\n");
1507 bp
->eth_stats
.rx_skb_alloc_failed
++;
1512 skb_copy_from_linear_data_offset(skb
, pad
,
1513 new_skb
->data
+ pad
, len
);
1514 skb_reserve(new_skb
, pad
);
1515 skb_put(new_skb
, len
);
1517 bnx2x_reuse_rx_skb(fp
, skb
, bd_cons
, bd_prod
);
1521 } else if (bnx2x_alloc_rx_skb(bp
, fp
, bd_prod
) == 0) {
1522 pci_unmap_single(bp
->pdev
,
1523 pci_unmap_addr(rx_buf
, mapping
),
1525 PCI_DMA_FROMDEVICE
);
1526 skb_reserve(skb
, pad
);
1530 DP(NETIF_MSG_RX_ERR
,
1531 "ERROR packet dropped because "
1532 "of alloc failure\n");
1533 bp
->eth_stats
.rx_skb_alloc_failed
++;
1535 bnx2x_reuse_rx_skb(fp
, skb
, bd_cons
, bd_prod
);
1539 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1541 skb
->ip_summed
= CHECKSUM_NONE
;
1543 if (likely(BNX2X_RX_CSUM_OK(cqe
)))
1544 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1546 bp
->eth_stats
.hw_csum_err
++;
1551 if ((bp
->vlgrp
!= NULL
) &&
1552 (le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
) &
1553 PARSING_FLAGS_VLAN
))
1554 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
,
1555 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
));
1558 netif_receive_skb(skb
);
1560 bp
->dev
->last_rx
= jiffies
;
1565 bd_cons
= NEXT_RX_IDX(bd_cons
);
1566 bd_prod
= NEXT_RX_IDX(bd_prod
);
1567 bd_prod_fw
= NEXT_RX_IDX(bd_prod_fw
);
1570 sw_comp_prod
= NEXT_RCQ_IDX(sw_comp_prod
);
1571 sw_comp_cons
= NEXT_RCQ_IDX(sw_comp_cons
);
1573 if (rx_pkt
== budget
)
1577 fp
->rx_bd_cons
= bd_cons
;
1578 fp
->rx_bd_prod
= bd_prod_fw
;
1579 fp
->rx_comp_cons
= sw_comp_cons
;
1580 fp
->rx_comp_prod
= sw_comp_prod
;
1582 /* Update producers */
1583 bnx2x_update_rx_prod(bp
, fp
, bd_prod_fw
, sw_comp_prod
,
1585 mmiowb(); /* keep prod updates ordered */
1587 fp
->rx_pkt
+= rx_pkt
;
1593 static irqreturn_t
bnx2x_msix_fp_int(int irq
, void *fp_cookie
)
1595 struct bnx2x_fastpath
*fp
= fp_cookie
;
1596 struct bnx2x
*bp
= fp
->bp
;
1597 struct net_device
*dev
= bp
->dev
;
1598 int index
= FP_IDX(fp
);
1600 /* Return here if interrupt is disabled */
1601 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
1602 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
1606 DP(BNX2X_MSG_FP
, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1607 index
, FP_SB_ID(fp
));
1608 bnx2x_ack_sb(bp
, FP_SB_ID(fp
), USTORM_ID
, 0, IGU_INT_DISABLE
, 0);
1610 #ifdef BNX2X_STOP_ON_ERROR
1611 if (unlikely(bp
->panic
))
1615 prefetch(fp
->rx_cons_sb
);
1616 prefetch(fp
->tx_cons_sb
);
1617 prefetch(&fp
->status_blk
->c_status_block
.status_block_index
);
1618 prefetch(&fp
->status_blk
->u_status_block
.status_block_index
);
1620 netif_rx_schedule(dev
, &bnx2x_fp(bp
, index
, napi
));
1625 static irqreturn_t
bnx2x_interrupt(int irq
, void *dev_instance
)
1627 struct net_device
*dev
= dev_instance
;
1628 struct bnx2x
*bp
= netdev_priv(dev
);
1629 u16 status
= bnx2x_ack_int(bp
);
1632 /* Return here if interrupt is shared and it's not for us */
1633 if (unlikely(status
== 0)) {
1634 DP(NETIF_MSG_INTR
, "not our interrupt!\n");
1637 DP(NETIF_MSG_INTR
, "got an interrupt status %u\n", status
);
1639 /* Return here if interrupt is disabled */
1640 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
1641 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
1645 #ifdef BNX2X_STOP_ON_ERROR
1646 if (unlikely(bp
->panic
))
1650 mask
= 0x2 << bp
->fp
[0].sb_id
;
1651 if (status
& mask
) {
1652 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
1654 prefetch(fp
->rx_cons_sb
);
1655 prefetch(fp
->tx_cons_sb
);
1656 prefetch(&fp
->status_blk
->c_status_block
.status_block_index
);
1657 prefetch(&fp
->status_blk
->u_status_block
.status_block_index
);
1659 netif_rx_schedule(dev
, &bnx2x_fp(bp
, 0, napi
));
1665 if (unlikely(status
& 0x1)) {
1666 schedule_work(&bp
->sp_task
);
1674 DP(NETIF_MSG_INTR
, "got an unknown interrupt! (status %u)\n",
1680 /* end of fast path */
1682 static void bnx2x_stats_handle(struct bnx2x
*bp
, enum bnx2x_stats_event event
);
1687 * General service functions
1690 static int bnx2x_acquire_hw_lock(struct bnx2x
*bp
, u32 resource
)
1693 u32 resource_bit
= (1 << resource
);
1694 int func
= BP_FUNC(bp
);
1695 u32 hw_lock_control_reg
;
1698 /* Validating that the resource is within range */
1699 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1701 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1702 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1707 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1709 hw_lock_control_reg
=
1710 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1713 /* Validating that the resource is not already taken */
1714 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1715 if (lock_status
& resource_bit
) {
1716 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1717 lock_status
, resource_bit
);
1721 /* Try for 5 second every 5ms */
1722 for (cnt
= 0; cnt
< 1000; cnt
++) {
1723 /* Try to acquire the lock */
1724 REG_WR(bp
, hw_lock_control_reg
+ 4, resource_bit
);
1725 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1726 if (lock_status
& resource_bit
)
1731 DP(NETIF_MSG_HW
, "Timeout\n");
1735 static int bnx2x_release_hw_lock(struct bnx2x
*bp
, u32 resource
)
1738 u32 resource_bit
= (1 << resource
);
1739 int func
= BP_FUNC(bp
);
1740 u32 hw_lock_control_reg
;
1742 /* Validating that the resource is within range */
1743 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1745 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1746 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1751 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1753 hw_lock_control_reg
=
1754 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1757 /* Validating that the resource is currently taken */
1758 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1759 if (!(lock_status
& resource_bit
)) {
1760 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1761 lock_status
, resource_bit
);
1765 REG_WR(bp
, hw_lock_control_reg
, resource_bit
);
1769 /* HW Lock for shared dual port PHYs */
1770 static void bnx2x_acquire_phy_lock(struct bnx2x
*bp
)
1772 u32 ext_phy_type
= XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
1774 mutex_lock(&bp
->port
.phy_mutex
);
1776 if ((ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
) ||
1777 (ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
))
1778 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_8072_MDIO
);
1781 static void bnx2x_release_phy_lock(struct bnx2x
*bp
)
1783 u32 ext_phy_type
= XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
1785 if ((ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
) ||
1786 (ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
))
1787 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_8072_MDIO
);
1789 mutex_unlock(&bp
->port
.phy_mutex
);
1792 int bnx2x_set_gpio(struct bnx2x
*bp
, int gpio_num
, u32 mode
, u8 port
)
1794 /* The GPIO should be swapped if swap register is set and active */
1795 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1796 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1797 int gpio_shift
= gpio_num
+
1798 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1799 u32 gpio_mask
= (1 << gpio_shift
);
1802 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1803 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1807 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1808 /* read GPIO and mask except the float bits */
1809 gpio_reg
= (REG_RD(bp
, MISC_REG_GPIO
) & MISC_REGISTERS_GPIO_FLOAT
);
1812 case MISC_REGISTERS_GPIO_OUTPUT_LOW
:
1813 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output low\n",
1814 gpio_num
, gpio_shift
);
1815 /* clear FLOAT and set CLR */
1816 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1817 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_CLR_POS
);
1820 case MISC_REGISTERS_GPIO_OUTPUT_HIGH
:
1821 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output high\n",
1822 gpio_num
, gpio_shift
);
1823 /* clear FLOAT and set SET */
1824 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1825 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_SET_POS
);
1828 case MISC_REGISTERS_GPIO_INPUT_HI_Z
:
1829 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> input\n",
1830 gpio_num
, gpio_shift
);
1832 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1839 REG_WR(bp
, MISC_REG_GPIO
, gpio_reg
);
1840 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1845 static int bnx2x_set_spio(struct bnx2x
*bp
, int spio_num
, u32 mode
)
1847 u32 spio_mask
= (1 << spio_num
);
1850 if ((spio_num
< MISC_REGISTERS_SPIO_4
) ||
1851 (spio_num
> MISC_REGISTERS_SPIO_7
)) {
1852 BNX2X_ERR("Invalid SPIO %d\n", spio_num
);
1856 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
1857 /* read SPIO and mask except the float bits */
1858 spio_reg
= (REG_RD(bp
, MISC_REG_SPIO
) & MISC_REGISTERS_SPIO_FLOAT
);
1861 case MISC_REGISTERS_SPIO_OUTPUT_LOW
:
1862 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output low\n", spio_num
);
1863 /* clear FLOAT and set CLR */
1864 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1865 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_CLR_POS
);
1868 case MISC_REGISTERS_SPIO_OUTPUT_HIGH
:
1869 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output high\n", spio_num
);
1870 /* clear FLOAT and set SET */
1871 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1872 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_SET_POS
);
1875 case MISC_REGISTERS_SPIO_INPUT_HI_Z
:
1876 DP(NETIF_MSG_LINK
, "Set SPIO %d -> input\n", spio_num
);
1878 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1885 REG_WR(bp
, MISC_REG_SPIO
, spio_reg
);
1886 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
1891 static void bnx2x_calc_fc_adv(struct bnx2x
*bp
)
1893 switch (bp
->link_vars
.ieee_fc
) {
1894 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE
:
1895 bp
->port
.advertising
&= ~(ADVERTISED_Asym_Pause
|
1898 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH
:
1899 bp
->port
.advertising
|= (ADVERTISED_Asym_Pause
|
1902 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC
:
1903 bp
->port
.advertising
|= ADVERTISED_Asym_Pause
;
1906 bp
->port
.advertising
&= ~(ADVERTISED_Asym_Pause
|
1912 static void bnx2x_link_report(struct bnx2x
*bp
)
1914 if (bp
->link_vars
.link_up
) {
1915 if (bp
->state
== BNX2X_STATE_OPEN
)
1916 netif_carrier_on(bp
->dev
);
1917 printk(KERN_INFO PFX
"%s NIC Link is Up, ", bp
->dev
->name
);
1919 printk("%d Mbps ", bp
->link_vars
.line_speed
);
1921 if (bp
->link_vars
.duplex
== DUPLEX_FULL
)
1922 printk("full duplex");
1924 printk("half duplex");
1926 if (bp
->link_vars
.flow_ctrl
!= FLOW_CTRL_NONE
) {
1927 if (bp
->link_vars
.flow_ctrl
& FLOW_CTRL_RX
) {
1928 printk(", receive ");
1929 if (bp
->link_vars
.flow_ctrl
& FLOW_CTRL_TX
)
1930 printk("& transmit ");
1932 printk(", transmit ");
1934 printk("flow control ON");
1938 } else { /* link_down */
1939 netif_carrier_off(bp
->dev
);
1940 printk(KERN_ERR PFX
"%s NIC Link is Down\n", bp
->dev
->name
);
1944 static u8
bnx2x_initial_phy_init(struct bnx2x
*bp
)
1946 if (!BP_NOMCP(bp
)) {
1949 /* Initialize link parameters structure variables */
1950 /* It is recommended to turn off RX FC for jumbo frames
1951 for better performance */
1953 bp
->link_params
.req_fc_auto_adv
= FLOW_CTRL_BOTH
;
1954 else if (bp
->dev
->mtu
> 5000)
1955 bp
->link_params
.req_fc_auto_adv
= FLOW_CTRL_TX
;
1957 bp
->link_params
.req_fc_auto_adv
= FLOW_CTRL_BOTH
;
1959 bnx2x_acquire_phy_lock(bp
);
1960 rc
= bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
1961 bnx2x_release_phy_lock(bp
);
1963 if (bp
->link_vars
.link_up
)
1964 bnx2x_link_report(bp
);
1966 bnx2x_calc_fc_adv(bp
);
1970 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1974 static void bnx2x_link_set(struct bnx2x
*bp
)
1976 if (!BP_NOMCP(bp
)) {
1977 bnx2x_acquire_phy_lock(bp
);
1978 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
1979 bnx2x_release_phy_lock(bp
);
1981 bnx2x_calc_fc_adv(bp
);
1983 BNX2X_ERR("Bootcode is missing -not setting link\n");
1986 static void bnx2x__link_reset(struct bnx2x
*bp
)
1988 if (!BP_NOMCP(bp
)) {
1989 bnx2x_acquire_phy_lock(bp
);
1990 bnx2x_link_reset(&bp
->link_params
, &bp
->link_vars
);
1991 bnx2x_release_phy_lock(bp
);
1993 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1996 static u8
bnx2x_link_test(struct bnx2x
*bp
)
2000 bnx2x_acquire_phy_lock(bp
);
2001 rc
= bnx2x_test_link(&bp
->link_params
, &bp
->link_vars
);
2002 bnx2x_release_phy_lock(bp
);
2007 /* Calculates the sum of vn_min_rates.
2008 It's needed for further normalizing of the min_rates.
2013 0 - if all the min_rates are 0.
2014 In the later case fairness algorithm should be deactivated.
2015 If not all min_rates are zero then those that are zeroes will
2018 static u32
bnx2x_calc_vn_wsum(struct bnx2x
*bp
)
2020 int i
, port
= BP_PORT(bp
);
2024 for (i
= 0; i
< E1HVN_MAX
; i
++) {
2026 SHMEM_RD(bp
, mf_cfg
.func_mf_config
[2*i
+ port
].config
);
2027 u32 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
2028 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
2029 if (!(vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
)) {
2030 /* If min rate is zero - set it to 1 */
2032 vn_min_rate
= DEF_MIN_RATE
;
2036 wsum
+= vn_min_rate
;
2040 /* ... only if all min rates are zeros - disable FAIRNESS */
2047 static void bnx2x_init_port_minmax(struct bnx2x
*bp
,
2050 struct cmng_struct_per_port
*m_cmng_port
)
2052 u32 r_param
= port_rate
/ 8;
2053 int port
= BP_PORT(bp
);
2056 memset(m_cmng_port
, 0, sizeof(struct cmng_struct_per_port
));
2058 /* Enable minmax only if we are in e1hmf mode */
2060 u32 fair_periodic_timeout_usec
;
2063 /* Enable rate shaping and fairness */
2064 m_cmng_port
->flags
.cmng_vn_enable
= 1;
2065 m_cmng_port
->flags
.fairness_enable
= en_fness
? 1 : 0;
2066 m_cmng_port
->flags
.rate_shaping_enable
= 1;
2069 DP(NETIF_MSG_IFUP
, "All MIN values are zeroes"
2070 " fairness will be disabled\n");
2072 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2073 m_cmng_port
->rs_vars
.rs_periodic_timeout
=
2074 RS_PERIODIC_TIMEOUT_USEC
/ 4;
2076 /* this is the threshold below which no timer arming will occur
2077 1.25 coefficient is for the threshold to be a little bigger
2078 than the real time, to compensate for timer in-accuracy */
2079 m_cmng_port
->rs_vars
.rs_threshold
=
2080 (RS_PERIODIC_TIMEOUT_USEC
* r_param
* 5) / 4;
2082 /* resolution of fairness timer */
2083 fair_periodic_timeout_usec
= QM_ARB_BYTES
/ r_param
;
2084 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2085 t_fair
= T_FAIR_COEF
/ port_rate
;
2087 /* this is the threshold below which we won't arm
2088 the timer anymore */
2089 m_cmng_port
->fair_vars
.fair_threshold
= QM_ARB_BYTES
;
2091 /* we multiply by 1e3/8 to get bytes/msec.
2092 We don't want the credits to pass a credit
2093 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2094 m_cmng_port
->fair_vars
.upper_bound
=
2095 r_param
* t_fair
* FAIR_MEM
;
2096 /* since each tick is 4 usec */
2097 m_cmng_port
->fair_vars
.fairness_timeout
=
2098 fair_periodic_timeout_usec
/ 4;
2101 /* Disable rate shaping and fairness */
2102 m_cmng_port
->flags
.cmng_vn_enable
= 0;
2103 m_cmng_port
->flags
.fairness_enable
= 0;
2104 m_cmng_port
->flags
.rate_shaping_enable
= 0;
2107 "Single function mode minmax will be disabled\n");
2110 /* Store it to internal memory */
2111 for (i
= 0; i
< sizeof(struct cmng_struct_per_port
) / 4; i
++)
2112 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2113 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
) + i
* 4,
2114 ((u32
*)(m_cmng_port
))[i
]);
2117 static void bnx2x_init_vn_minmax(struct bnx2x
*bp
, int func
,
2118 u32 wsum
, u16 port_rate
,
2119 struct cmng_struct_per_port
*m_cmng_port
)
2121 struct rate_shaping_vars_per_vn m_rs_vn
;
2122 struct fairness_vars_per_vn m_fair_vn
;
2123 u32 vn_cfg
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
2124 u16 vn_min_rate
, vn_max_rate
;
2127 /* If function is hidden - set min and max to zeroes */
2128 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
) {
2133 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
2134 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
2135 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2136 if current min rate is zero - set it to 1.
2137 This is a requirement of the algorithm. */
2138 if ((vn_min_rate
== 0) && wsum
)
2139 vn_min_rate
= DEF_MIN_RATE
;
2140 vn_max_rate
= ((vn_cfg
& FUNC_MF_CFG_MAX_BW_MASK
) >>
2141 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
2144 DP(NETIF_MSG_IFUP
, "func %d: vn_min_rate=%d vn_max_rate=%d "
2145 "wsum=%d\n", func
, vn_min_rate
, vn_max_rate
, wsum
);
2147 memset(&m_rs_vn
, 0, sizeof(struct rate_shaping_vars_per_vn
));
2148 memset(&m_fair_vn
, 0, sizeof(struct fairness_vars_per_vn
));
2150 /* global vn counter - maximal Mbps for this vn */
2151 m_rs_vn
.vn_counter
.rate
= vn_max_rate
;
2153 /* quota - number of bytes transmitted in this period */
2154 m_rs_vn
.vn_counter
.quota
=
2155 (vn_max_rate
* RS_PERIODIC_TIMEOUT_USEC
) / 8;
2157 #ifdef BNX2X_PER_PROT_QOS
2158 /* per protocol counter */
2159 for (protocol
= 0; protocol
< NUM_OF_PROTOCOLS
; protocol
++) {
2160 /* maximal Mbps for this protocol */
2161 m_rs_vn
.protocol_counters
[protocol
].rate
=
2162 protocol_max_rate
[protocol
];
2163 /* the quota in each timer period -
2164 number of bytes transmitted in this period */
2165 m_rs_vn
.protocol_counters
[protocol
].quota
=
2166 (u32
)(rs_periodic_timeout_usec
*
2168 protocol_counters
[protocol
].rate
/8));
2173 /* credit for each period of the fairness algorithm:
2174 number of bytes in T_FAIR (the vn share the port rate).
2175 wsum should not be larger than 10000, thus
2176 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2177 m_fair_vn
.vn_credit_delta
=
2178 max((u64
)(vn_min_rate
* (T_FAIR_COEF
/ (8 * wsum
))),
2179 (u64
)(m_cmng_port
->fair_vars
.fair_threshold
* 2));
2180 DP(NETIF_MSG_IFUP
, "m_fair_vn.vn_credit_delta=%d\n",
2181 m_fair_vn
.vn_credit_delta
);
2184 #ifdef BNX2X_PER_PROT_QOS
2186 u32 protocolWeightSum
= 0;
2188 for (protocol
= 0; protocol
< NUM_OF_PROTOCOLS
; protocol
++)
2189 protocolWeightSum
+=
2190 drvInit
.protocol_min_rate
[protocol
];
2191 /* per protocol counter -
2192 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2193 if (protocolWeightSum
> 0) {
2195 protocol
< NUM_OF_PROTOCOLS
; protocol
++)
2196 /* credit for each period of the
2197 fairness algorithm - number of bytes in
2198 T_FAIR (the protocol share the vn rate) */
2199 m_fair_vn
.protocol_credit_delta
[protocol
] =
2200 (u32
)((vn_min_rate
/ 8) * t_fair
*
2201 protocol_min_rate
/ protocolWeightSum
);
2206 /* Store it to internal memory */
2207 for (i
= 0; i
< sizeof(struct rate_shaping_vars_per_vn
)/4; i
++)
2208 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2209 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func
) + i
* 4,
2210 ((u32
*)(&m_rs_vn
))[i
]);
2212 for (i
= 0; i
< sizeof(struct fairness_vars_per_vn
)/4; i
++)
2213 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2214 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func
) + i
* 4,
2215 ((u32
*)(&m_fair_vn
))[i
]);
2218 /* This function is called upon link interrupt */
2219 static void bnx2x_link_attn(struct bnx2x
*bp
)
2223 /* Make sure that we are synced with the current statistics */
2224 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2226 bnx2x_acquire_phy_lock(bp
);
2227 bnx2x_link_update(&bp
->link_params
, &bp
->link_vars
);
2228 bnx2x_release_phy_lock(bp
);
2230 if (bp
->link_vars
.link_up
) {
2232 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
2233 struct host_port_stats
*pstats
;
2235 pstats
= bnx2x_sp(bp
, port_stats
);
2236 /* reset old bmac stats */
2237 memset(&(pstats
->mac_stx
[0]), 0,
2238 sizeof(struct mac_stx
));
2240 if ((bp
->state
== BNX2X_STATE_OPEN
) ||
2241 (bp
->state
== BNX2X_STATE_DISABLED
))
2242 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2245 /* indicate link status */
2246 bnx2x_link_report(bp
);
2251 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
2252 if (vn
== BP_E1HVN(bp
))
2255 func
= ((vn
<< 1) | BP_PORT(bp
));
2257 /* Set the attention towards other drivers
2259 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_0
+
2260 (LINK_SYNC_ATTENTION_BIT_FUNC_0
+ func
)*4, 1);
2264 if (CHIP_IS_E1H(bp
) && (bp
->link_vars
.line_speed
> 0)) {
2265 struct cmng_struct_per_port m_cmng_port
;
2267 int port
= BP_PORT(bp
);
2269 /* Init RATE SHAPING and FAIRNESS contexts */
2270 wsum
= bnx2x_calc_vn_wsum(bp
);
2271 bnx2x_init_port_minmax(bp
, (int)wsum
,
2272 bp
->link_vars
.line_speed
,
2275 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
2276 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
,
2277 wsum
, bp
->link_vars
.line_speed
,
2282 static void bnx2x__link_status_update(struct bnx2x
*bp
)
2284 if (bp
->state
!= BNX2X_STATE_OPEN
)
2287 bnx2x_link_status_update(&bp
->link_params
, &bp
->link_vars
);
2289 if (bp
->link_vars
.link_up
)
2290 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2292 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2294 /* indicate link status */
2295 bnx2x_link_report(bp
);
2298 static void bnx2x_pmf_update(struct bnx2x
*bp
)
2300 int port
= BP_PORT(bp
);
2304 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
2306 /* enable nig attention */
2307 val
= (0xff0f | (1 << (BP_E1HVN(bp
) + 4)));
2308 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
2309 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
2311 bnx2x_stats_handle(bp
, STATS_EVENT_PMF
);
2319 * General service functions
2322 /* the slow path queue is odd since completions arrive on the fastpath ring */
2323 static int bnx2x_sp_post(struct bnx2x
*bp
, int command
, int cid
,
2324 u32 data_hi
, u32 data_lo
, int common
)
2326 int func
= BP_FUNC(bp
);
2328 DP(BNX2X_MSG_SP
/*NETIF_MSG_TIMER*/,
2329 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2330 (u32
)U64_HI(bp
->spq_mapping
), (u32
)(U64_LO(bp
->spq_mapping
) +
2331 (void *)bp
->spq_prod_bd
- (void *)bp
->spq
), command
,
2332 HW_CID(bp
, cid
), data_hi
, data_lo
, bp
->spq_left
);
2334 #ifdef BNX2X_STOP_ON_ERROR
2335 if (unlikely(bp
->panic
))
2339 spin_lock_bh(&bp
->spq_lock
);
2341 if (!bp
->spq_left
) {
2342 BNX2X_ERR("BUG! SPQ ring full!\n");
2343 spin_unlock_bh(&bp
->spq_lock
);
2348 /* CID needs port number to be encoded int it */
2349 bp
->spq_prod_bd
->hdr
.conn_and_cmd_data
=
2350 cpu_to_le32(((command
<< SPE_HDR_CMD_ID_SHIFT
) |
2352 bp
->spq_prod_bd
->hdr
.type
= cpu_to_le16(ETH_CONNECTION_TYPE
);
2354 bp
->spq_prod_bd
->hdr
.type
|=
2355 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT
));
2357 bp
->spq_prod_bd
->data
.mac_config_addr
.hi
= cpu_to_le32(data_hi
);
2358 bp
->spq_prod_bd
->data
.mac_config_addr
.lo
= cpu_to_le32(data_lo
);
2362 if (bp
->spq_prod_bd
== bp
->spq_last_bd
) {
2363 bp
->spq_prod_bd
= bp
->spq
;
2364 bp
->spq_prod_idx
= 0;
2365 DP(NETIF_MSG_TIMER
, "end of spq\n");
2372 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_SPQ_PROD_OFFSET(func
),
2375 spin_unlock_bh(&bp
->spq_lock
);
2379 /* acquire split MCP access lock register */
2380 static int bnx2x_acquire_alr(struct bnx2x
*bp
)
2387 for (j
= 0; j
< i
*10; j
++) {
2389 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
2390 val
= REG_RD(bp
, GRCBASE_MCP
+ 0x9c);
2391 if (val
& (1L << 31))
2396 if (!(val
& (1L << 31))) {
2397 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2404 /* release split MCP access lock register */
2405 static void bnx2x_release_alr(struct bnx2x
*bp
)
2409 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
2412 static inline u16
bnx2x_update_dsb_idx(struct bnx2x
*bp
)
2414 struct host_def_status_block
*def_sb
= bp
->def_status_blk
;
2417 barrier(); /* status block is written to by the chip */
2418 if (bp
->def_att_idx
!= def_sb
->atten_status_block
.attn_bits_index
) {
2419 bp
->def_att_idx
= def_sb
->atten_status_block
.attn_bits_index
;
2422 if (bp
->def_c_idx
!= def_sb
->c_def_status_block
.status_block_index
) {
2423 bp
->def_c_idx
= def_sb
->c_def_status_block
.status_block_index
;
2426 if (bp
->def_u_idx
!= def_sb
->u_def_status_block
.status_block_index
) {
2427 bp
->def_u_idx
= def_sb
->u_def_status_block
.status_block_index
;
2430 if (bp
->def_x_idx
!= def_sb
->x_def_status_block
.status_block_index
) {
2431 bp
->def_x_idx
= def_sb
->x_def_status_block
.status_block_index
;
2434 if (bp
->def_t_idx
!= def_sb
->t_def_status_block
.status_block_index
) {
2435 bp
->def_t_idx
= def_sb
->t_def_status_block
.status_block_index
;
2442 * slow path service functions
2445 static void bnx2x_attn_int_asserted(struct bnx2x
*bp
, u32 asserted
)
2447 int port
= BP_PORT(bp
);
2448 u32 hc_addr
= (HC_REG_COMMAND_REG
+ port
*32 +
2449 COMMAND_REG_ATTN_BITS_SET
);
2450 u32 aeu_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
2451 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
2452 u32 nig_int_mask_addr
= port
? NIG_REG_MASK_INTERRUPT_PORT1
:
2453 NIG_REG_MASK_INTERRUPT_PORT0
;
2456 if (bp
->attn_state
& asserted
)
2457 BNX2X_ERR("IGU ERROR\n");
2459 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2460 aeu_mask
= REG_RD(bp
, aeu_addr
);
2462 DP(NETIF_MSG_HW
, "aeu_mask %x newly asserted %x\n",
2463 aeu_mask
, asserted
);
2464 aeu_mask
&= ~(asserted
& 0xff);
2465 DP(NETIF_MSG_HW
, "new mask %x\n", aeu_mask
);
2467 REG_WR(bp
, aeu_addr
, aeu_mask
);
2468 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2470 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
2471 bp
->attn_state
|= asserted
;
2472 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
2474 if (asserted
& ATTN_HARD_WIRED_MASK
) {
2475 if (asserted
& ATTN_NIG_FOR_FUNC
) {
2477 /* save nig interrupt mask */
2478 bp
->nig_mask
= REG_RD(bp
, nig_int_mask_addr
);
2479 REG_WR(bp
, nig_int_mask_addr
, 0);
2481 bnx2x_link_attn(bp
);
2483 /* handle unicore attn? */
2485 if (asserted
& ATTN_SW_TIMER_4_FUNC
)
2486 DP(NETIF_MSG_HW
, "ATTN_SW_TIMER_4_FUNC!\n");
2488 if (asserted
& GPIO_2_FUNC
)
2489 DP(NETIF_MSG_HW
, "GPIO_2_FUNC!\n");
2491 if (asserted
& GPIO_3_FUNC
)
2492 DP(NETIF_MSG_HW
, "GPIO_3_FUNC!\n");
2494 if (asserted
& GPIO_4_FUNC
)
2495 DP(NETIF_MSG_HW
, "GPIO_4_FUNC!\n");
2498 if (asserted
& ATTN_GENERAL_ATTN_1
) {
2499 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_1!\n");
2500 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_1
, 0x0);
2502 if (asserted
& ATTN_GENERAL_ATTN_2
) {
2503 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_2!\n");
2504 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_2
, 0x0);
2506 if (asserted
& ATTN_GENERAL_ATTN_3
) {
2507 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_3!\n");
2508 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_3
, 0x0);
2511 if (asserted
& ATTN_GENERAL_ATTN_4
) {
2512 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_4!\n");
2513 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_4
, 0x0);
2515 if (asserted
& ATTN_GENERAL_ATTN_5
) {
2516 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_5!\n");
2517 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_5
, 0x0);
2519 if (asserted
& ATTN_GENERAL_ATTN_6
) {
2520 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_6!\n");
2521 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_6
, 0x0);
2525 } /* if hardwired */
2527 DP(NETIF_MSG_HW
, "about to mask 0x%08x at HC addr 0x%x\n",
2529 REG_WR(bp
, hc_addr
, asserted
);
2531 /* now set back the mask */
2532 if (asserted
& ATTN_NIG_FOR_FUNC
)
2533 REG_WR(bp
, nig_int_mask_addr
, bp
->nig_mask
);
2536 static inline void bnx2x_attn_int_deasserted0(struct bnx2x
*bp
, u32 attn
)
2538 int port
= BP_PORT(bp
);
2542 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
2543 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
2545 if (attn
& AEU_INPUTS_ATTN_BITS_SPIO5
) {
2547 val
= REG_RD(bp
, reg_offset
);
2548 val
&= ~AEU_INPUTS_ATTN_BITS_SPIO5
;
2549 REG_WR(bp
, reg_offset
, val
);
2551 BNX2X_ERR("SPIO5 hw attention\n");
2553 switch (bp
->common
.board
& SHARED_HW_CFG_BOARD_TYPE_MASK
) {
2554 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G
:
2555 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G
:
2556 /* Fan failure attention */
2558 /* The PHY reset is controlled by GPIO 1 */
2559 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_1
,
2560 MISC_REGISTERS_GPIO_OUTPUT_LOW
, port
);
2561 /* Low power mode is controlled by GPIO 2 */
2562 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_2
,
2563 MISC_REGISTERS_GPIO_OUTPUT_LOW
, port
);
2564 /* mark the failure */
2565 bp
->link_params
.ext_phy_config
&=
2566 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
2567 bp
->link_params
.ext_phy_config
|=
2568 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
;
2570 dev_info
.port_hw_config
[port
].
2571 external_phy_config
,
2572 bp
->link_params
.ext_phy_config
);
2573 /* log the failure */
2574 printk(KERN_ERR PFX
"Fan Failure on Network"
2575 " Controller %s has caused the driver to"
2576 " shutdown the card to prevent permanent"
2577 " damage. Please contact Dell Support for"
2578 " assistance\n", bp
->dev
->name
);
2586 if (attn
& HW_INTERRUT_ASSERT_SET_0
) {
2588 val
= REG_RD(bp
, reg_offset
);
2589 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_0
);
2590 REG_WR(bp
, reg_offset
, val
);
2592 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2593 (attn
& HW_INTERRUT_ASSERT_SET_0
));
2598 static inline void bnx2x_attn_int_deasserted1(struct bnx2x
*bp
, u32 attn
)
2602 if (attn
& BNX2X_DOORQ_ASSERT
) {
2604 val
= REG_RD(bp
, DORQ_REG_DORQ_INT_STS_CLR
);
2605 BNX2X_ERR("DB hw attention 0x%x\n", val
);
2606 /* DORQ discard attention */
2608 BNX2X_ERR("FATAL error from DORQ\n");
2611 if (attn
& HW_INTERRUT_ASSERT_SET_1
) {
2613 int port
= BP_PORT(bp
);
2616 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1
:
2617 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1
);
2619 val
= REG_RD(bp
, reg_offset
);
2620 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_1
);
2621 REG_WR(bp
, reg_offset
, val
);
2623 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2624 (attn
& HW_INTERRUT_ASSERT_SET_1
));
2629 static inline void bnx2x_attn_int_deasserted2(struct bnx2x
*bp
, u32 attn
)
2633 if (attn
& AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT
) {
2635 val
= REG_RD(bp
, CFC_REG_CFC_INT_STS_CLR
);
2636 BNX2X_ERR("CFC hw attention 0x%x\n", val
);
2637 /* CFC error attention */
2639 BNX2X_ERR("FATAL error from CFC\n");
2642 if (attn
& AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT
) {
2644 val
= REG_RD(bp
, PXP_REG_PXP_INT_STS_CLR_0
);
2645 BNX2X_ERR("PXP hw attention 0x%x\n", val
);
2646 /* RQ_USDMDP_FIFO_OVERFLOW */
2648 BNX2X_ERR("FATAL error from PXP\n");
2651 if (attn
& HW_INTERRUT_ASSERT_SET_2
) {
2653 int port
= BP_PORT(bp
);
2656 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2
:
2657 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2
);
2659 val
= REG_RD(bp
, reg_offset
);
2660 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_2
);
2661 REG_WR(bp
, reg_offset
, val
);
2663 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2664 (attn
& HW_INTERRUT_ASSERT_SET_2
));
2669 static inline void bnx2x_attn_int_deasserted3(struct bnx2x
*bp
, u32 attn
)
2673 if (attn
& EVEREST_GEN_ATTN_IN_USE_MASK
) {
2675 if (attn
& BNX2X_PMF_LINK_ASSERT
) {
2676 int func
= BP_FUNC(bp
);
2678 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
2679 bnx2x__link_status_update(bp
);
2680 if (SHMEM_RD(bp
, func_mb
[func
].drv_status
) &
2682 bnx2x_pmf_update(bp
);
2684 } else if (attn
& BNX2X_MC_ASSERT_BITS
) {
2686 BNX2X_ERR("MC assert!\n");
2687 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_10
, 0);
2688 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_9
, 0);
2689 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_8
, 0);
2690 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_7
, 0);
2693 } else if (attn
& BNX2X_MCP_ASSERT
) {
2695 BNX2X_ERR("MCP assert!\n");
2696 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_11
, 0);
2700 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn
);
2703 if (attn
& EVEREST_LATCHED_ATTN_IN_USE_MASK
) {
2704 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn
);
2705 if (attn
& BNX2X_GRC_TIMEOUT
) {
2706 val
= CHIP_IS_E1H(bp
) ?
2707 REG_RD(bp
, MISC_REG_GRC_TIMEOUT_ATTN
) : 0;
2708 BNX2X_ERR("GRC time-out 0x%08x\n", val
);
2710 if (attn
& BNX2X_GRC_RSV
) {
2711 val
= CHIP_IS_E1H(bp
) ?
2712 REG_RD(bp
, MISC_REG_GRC_RSV_ATTN
) : 0;
2713 BNX2X_ERR("GRC reserved 0x%08x\n", val
);
2715 REG_WR(bp
, MISC_REG_AEU_CLR_LATCH_SIGNAL
, 0x7ff);
2719 static void bnx2x_attn_int_deasserted(struct bnx2x
*bp
, u32 deasserted
)
2721 struct attn_route attn
;
2722 struct attn_route group_mask
;
2723 int port
= BP_PORT(bp
);
2729 /* need to take HW lock because MCP or other port might also
2730 try to handle this event */
2731 bnx2x_acquire_alr(bp
);
2733 attn
.sig
[0] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ port
*4);
2734 attn
.sig
[1] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0
+ port
*4);
2735 attn
.sig
[2] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0
+ port
*4);
2736 attn
.sig
[3] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0
+ port
*4);
2737 DP(NETIF_MSG_HW
, "attn: %08x %08x %08x %08x\n",
2738 attn
.sig
[0], attn
.sig
[1], attn
.sig
[2], attn
.sig
[3]);
2740 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
2741 if (deasserted
& (1 << index
)) {
2742 group_mask
= bp
->attn_group
[index
];
2744 DP(NETIF_MSG_HW
, "group[%d]: %08x %08x %08x %08x\n",
2745 index
, group_mask
.sig
[0], group_mask
.sig
[1],
2746 group_mask
.sig
[2], group_mask
.sig
[3]);
2748 bnx2x_attn_int_deasserted3(bp
,
2749 attn
.sig
[3] & group_mask
.sig
[3]);
2750 bnx2x_attn_int_deasserted1(bp
,
2751 attn
.sig
[1] & group_mask
.sig
[1]);
2752 bnx2x_attn_int_deasserted2(bp
,
2753 attn
.sig
[2] & group_mask
.sig
[2]);
2754 bnx2x_attn_int_deasserted0(bp
,
2755 attn
.sig
[0] & group_mask
.sig
[0]);
2757 if ((attn
.sig
[0] & group_mask
.sig
[0] &
2758 HW_PRTY_ASSERT_SET_0
) ||
2759 (attn
.sig
[1] & group_mask
.sig
[1] &
2760 HW_PRTY_ASSERT_SET_1
) ||
2761 (attn
.sig
[2] & group_mask
.sig
[2] &
2762 HW_PRTY_ASSERT_SET_2
))
2763 BNX2X_ERR("FATAL HW block parity attention\n");
2767 bnx2x_release_alr(bp
);
2769 reg_addr
= (HC_REG_COMMAND_REG
+ port
*32 + COMMAND_REG_ATTN_BITS_CLR
);
2772 DP(NETIF_MSG_HW
, "about to mask 0x%08x at HC addr 0x%x\n",
2774 REG_WR(bp
, reg_addr
, val
);
2776 if (~bp
->attn_state
& deasserted
)
2777 BNX2X_ERR("IGU ERROR\n");
2779 reg_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
2780 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
2782 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2783 aeu_mask
= REG_RD(bp
, reg_addr
);
2785 DP(NETIF_MSG_HW
, "aeu_mask %x newly deasserted %x\n",
2786 aeu_mask
, deasserted
);
2787 aeu_mask
|= (deasserted
& 0xff);
2788 DP(NETIF_MSG_HW
, "new mask %x\n", aeu_mask
);
2790 REG_WR(bp
, reg_addr
, aeu_mask
);
2791 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2793 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
2794 bp
->attn_state
&= ~deasserted
;
2795 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
2798 static void bnx2x_attn_int(struct bnx2x
*bp
)
2800 /* read local copy of bits */
2801 u32 attn_bits
= bp
->def_status_blk
->atten_status_block
.attn_bits
;
2802 u32 attn_ack
= bp
->def_status_blk
->atten_status_block
.attn_bits_ack
;
2803 u32 attn_state
= bp
->attn_state
;
2805 /* look for changed bits */
2806 u32 asserted
= attn_bits
& ~attn_ack
& ~attn_state
;
2807 u32 deasserted
= ~attn_bits
& attn_ack
& attn_state
;
2810 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2811 attn_bits
, attn_ack
, asserted
, deasserted
);
2813 if (~(attn_bits
^ attn_ack
) & (attn_bits
^ attn_state
))
2814 BNX2X_ERR("BAD attention state\n");
2816 /* handle bits that were raised */
2818 bnx2x_attn_int_asserted(bp
, asserted
);
2821 bnx2x_attn_int_deasserted(bp
, deasserted
);
2824 static void bnx2x_sp_task(struct work_struct
*work
)
2826 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, sp_task
);
2830 /* Return here if interrupt is disabled */
2831 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
2832 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
2836 status
= bnx2x_update_dsb_idx(bp
);
2837 /* if (status == 0) */
2838 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2840 DP(NETIF_MSG_INTR
, "got a slowpath interrupt (updated %x)\n", status
);
2846 /* CStorm events: query_stats, port delete ramrod */
2848 bp
->stats_pending
= 0;
2850 bnx2x_ack_sb(bp
, DEF_SB_ID
, ATTENTION_ID
, bp
->def_att_idx
,
2852 bnx2x_ack_sb(bp
, DEF_SB_ID
, USTORM_ID
, le16_to_cpu(bp
->def_u_idx
),
2854 bnx2x_ack_sb(bp
, DEF_SB_ID
, CSTORM_ID
, le16_to_cpu(bp
->def_c_idx
),
2856 bnx2x_ack_sb(bp
, DEF_SB_ID
, XSTORM_ID
, le16_to_cpu(bp
->def_x_idx
),
2858 bnx2x_ack_sb(bp
, DEF_SB_ID
, TSTORM_ID
, le16_to_cpu(bp
->def_t_idx
),
2863 static irqreturn_t
bnx2x_msix_sp_int(int irq
, void *dev_instance
)
2865 struct net_device
*dev
= dev_instance
;
2866 struct bnx2x
*bp
= netdev_priv(dev
);
2868 /* Return here if interrupt is disabled */
2869 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
2870 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
2874 bnx2x_ack_sb(bp
, DEF_SB_ID
, XSTORM_ID
, 0, IGU_INT_DISABLE
, 0);
2876 #ifdef BNX2X_STOP_ON_ERROR
2877 if (unlikely(bp
->panic
))
2881 schedule_work(&bp
->sp_task
);
2886 /* end of slow path */
2890 /****************************************************************************
2892 ****************************************************************************/
2894 /* sum[hi:lo] += add[hi:lo] */
2895 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2898 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2901 /* difference = minuend - subtrahend */
2902 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2904 if (m_lo < s_lo) { \
2906 d_hi = m_hi - s_hi; \
2908 /* we can 'loan' 1 */ \
2910 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2912 /* m_hi <= s_hi */ \
2917 /* m_lo >= s_lo */ \
2918 if (m_hi < s_hi) { \
2922 /* m_hi >= s_hi */ \
2923 d_hi = m_hi - s_hi; \
2924 d_lo = m_lo - s_lo; \
2929 #define UPDATE_STAT64(s, t) \
2931 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2932 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2933 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2934 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2935 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2936 pstats->mac_stx[1].t##_lo, diff.lo); \
2939 #define UPDATE_STAT64_NIG(s, t) \
2941 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2942 diff.lo, new->s##_lo, old->s##_lo); \
2943 ADD_64(estats->t##_hi, diff.hi, \
2944 estats->t##_lo, diff.lo); \
2947 /* sum[hi:lo] += add */
2948 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2951 s_hi += (s_lo < a) ? 1 : 0; \
2954 #define UPDATE_EXTEND_STAT(s) \
2956 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2957 pstats->mac_stx[1].s##_lo, \
2961 #define UPDATE_EXTEND_TSTAT(s, t) \
2963 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2964 old_tclient->s = le32_to_cpu(tclient->s); \
2965 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2968 #define UPDATE_EXTEND_XSTAT(s, t) \
2970 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2971 old_xclient->s = le32_to_cpu(xclient->s); \
2972 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2976 * General service functions
2979 static inline long bnx2x_hilo(u32
*hiref
)
2981 u32 lo
= *(hiref
+ 1);
2982 #if (BITS_PER_LONG == 64)
2985 return HILO_U64(hi
, lo
);
2992 * Init service functions
2995 static void bnx2x_storm_stats_post(struct bnx2x
*bp
)
2997 if (!bp
->stats_pending
) {
2998 struct eth_query_ramrod_data ramrod_data
= {0};
3001 ramrod_data
.drv_counter
= bp
->stats_counter
++;
3002 ramrod_data
.collect_port_1b
= bp
->port
.pmf
? 1 : 0;
3003 ramrod_data
.ctr_id_vector
= (1 << BP_CL_ID(bp
));
3005 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_STAT_QUERY
, 0,
3006 ((u32
*)&ramrod_data
)[1],
3007 ((u32
*)&ramrod_data
)[0], 0);
3009 /* stats ramrod has it's own slot on the spq */
3011 bp
->stats_pending
= 1;
3016 static void bnx2x_stats_init(struct bnx2x
*bp
)
3018 int port
= BP_PORT(bp
);
3020 bp
->executer_idx
= 0;
3021 bp
->stats_counter
= 0;
3025 bp
->port
.port_stx
= SHMEM_RD(bp
, port_mb
[port
].port_stx
);
3027 bp
->port
.port_stx
= 0;
3028 DP(BNX2X_MSG_STATS
, "port_stx 0x%x\n", bp
->port
.port_stx
);
3030 memset(&(bp
->port
.old_nig_stats
), 0, sizeof(struct nig_stats
));
3031 bp
->port
.old_nig_stats
.brb_discard
=
3032 REG_RD(bp
, NIG_REG_STAT0_BRB_DISCARD
+ port
*0x38);
3033 bp
->port
.old_nig_stats
.brb_truncate
=
3034 REG_RD(bp
, NIG_REG_STAT0_BRB_TRUNCATE
+ port
*0x38);
3035 REG_RD_DMAE(bp
, NIG_REG_STAT0_EGRESS_MAC_PKT0
+ port
*0x50,
3036 &(bp
->port
.old_nig_stats
.egress_mac_pkt0_lo
), 2);
3037 REG_RD_DMAE(bp
, NIG_REG_STAT0_EGRESS_MAC_PKT1
+ port
*0x50,
3038 &(bp
->port
.old_nig_stats
.egress_mac_pkt1_lo
), 2);
3040 /* function stats */
3041 memset(&bp
->dev
->stats
, 0, sizeof(struct net_device_stats
));
3042 memset(&bp
->old_tclient
, 0, sizeof(struct tstorm_per_client_stats
));
3043 memset(&bp
->old_xclient
, 0, sizeof(struct xstorm_per_client_stats
));
3044 memset(&bp
->eth_stats
, 0, sizeof(struct bnx2x_eth_stats
));
3046 bp
->stats_state
= STATS_STATE_DISABLED
;
3047 if (IS_E1HMF(bp
) && bp
->port
.pmf
&& bp
->port
.port_stx
)
3048 bnx2x_stats_handle(bp
, STATS_EVENT_PMF
);
3051 static void bnx2x_hw_stats_post(struct bnx2x
*bp
)
3053 struct dmae_command
*dmae
= &bp
->stats_dmae
;
3054 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3056 *stats_comp
= DMAE_COMP_VAL
;
3059 if (bp
->executer_idx
) {
3060 int loader_idx
= PMF_DMAE_C(bp
);
3062 memset(dmae
, 0, sizeof(struct dmae_command
));
3064 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3065 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3066 DMAE_CMD_DST_RESET
|
3068 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3070 DMAE_CMD_ENDIANITY_DW_SWAP
|
3072 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
:
3074 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3075 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, dmae
[0]));
3076 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, dmae
[0]));
3077 dmae
->dst_addr_lo
= (DMAE_REG_CMD_MEM
+
3078 sizeof(struct dmae_command
) *
3079 (loader_idx
+ 1)) >> 2;
3080 dmae
->dst_addr_hi
= 0;
3081 dmae
->len
= sizeof(struct dmae_command
) >> 2;
3084 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
+ 1] >> 2;
3085 dmae
->comp_addr_hi
= 0;
3089 bnx2x_post_dmae(bp
, dmae
, loader_idx
);
3091 } else if (bp
->func_stx
) {
3093 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
3097 static int bnx2x_stats_comp(struct bnx2x
*bp
)
3099 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3103 while (*stats_comp
!= DMAE_COMP_VAL
) {
3105 BNX2X_ERR("timeout waiting for stats finished\n");
3115 * Statistics service functions
3118 static void bnx2x_stats_pmf_update(struct bnx2x
*bp
)
3120 struct dmae_command
*dmae
;
3122 int loader_idx
= PMF_DMAE_C(bp
);
3123 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3126 if (!IS_E1HMF(bp
) || !bp
->port
.pmf
|| !bp
->port
.port_stx
) {
3127 BNX2X_ERR("BUG!\n");
3131 bp
->executer_idx
= 0;
3133 opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3135 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3137 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3139 DMAE_CMD_ENDIANITY_DW_SWAP
|
3141 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3142 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3144 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3145 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_GRC
);
3146 dmae
->src_addr_lo
= bp
->port
.port_stx
>> 2;
3147 dmae
->src_addr_hi
= 0;
3148 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
3149 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
3150 dmae
->len
= DMAE_LEN32_RD_MAX
;
3151 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3152 dmae
->comp_addr_hi
= 0;
3155 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3156 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
3157 dmae
->src_addr_lo
= (bp
->port
.port_stx
>> 2) + DMAE_LEN32_RD_MAX
;
3158 dmae
->src_addr_hi
= 0;
3159 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
) +
3160 DMAE_LEN32_RD_MAX
* 4);
3161 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
) +
3162 DMAE_LEN32_RD_MAX
* 4);
3163 dmae
->len
= (sizeof(struct host_port_stats
) >> 2) - DMAE_LEN32_RD_MAX
;
3164 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3165 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3166 dmae
->comp_val
= DMAE_COMP_VAL
;
3169 bnx2x_hw_stats_post(bp
);
3170 bnx2x_stats_comp(bp
);
3173 static void bnx2x_port_stats_init(struct bnx2x
*bp
)
3175 struct dmae_command
*dmae
;
3176 int port
= BP_PORT(bp
);
3177 int vn
= BP_E1HVN(bp
);
3179 int loader_idx
= PMF_DMAE_C(bp
);
3181 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3184 if (!bp
->link_vars
.link_up
|| !bp
->port
.pmf
) {
3185 BNX2X_ERR("BUG!\n");
3189 bp
->executer_idx
= 0;
3192 opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3193 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3194 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3196 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3198 DMAE_CMD_ENDIANITY_DW_SWAP
|
3200 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3201 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3203 if (bp
->port
.port_stx
) {
3205 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3206 dmae
->opcode
= opcode
;
3207 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
3208 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
3209 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
3210 dmae
->dst_addr_hi
= 0;
3211 dmae
->len
= sizeof(struct host_port_stats
) >> 2;
3212 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3213 dmae
->comp_addr_hi
= 0;
3219 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3220 dmae
->opcode
= opcode
;
3221 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
3222 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
3223 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
3224 dmae
->dst_addr_hi
= 0;
3225 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
3226 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3227 dmae
->comp_addr_hi
= 0;
3232 opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3233 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3234 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3236 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3238 DMAE_CMD_ENDIANITY_DW_SWAP
|
3240 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3241 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3243 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
3245 mac_addr
= (port
? NIG_REG_INGRESS_BMAC1_MEM
:
3246 NIG_REG_INGRESS_BMAC0_MEM
);
3248 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3249 BIGMAC_REGISTER_TX_STAT_GTBYT */
3250 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3251 dmae
->opcode
= opcode
;
3252 dmae
->src_addr_lo
= (mac_addr
+
3253 BIGMAC_REGISTER_TX_STAT_GTPKT
) >> 2;
3254 dmae
->src_addr_hi
= 0;
3255 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
));
3256 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
));
3257 dmae
->len
= (8 + BIGMAC_REGISTER_TX_STAT_GTBYT
-
3258 BIGMAC_REGISTER_TX_STAT_GTPKT
) >> 2;
3259 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3260 dmae
->comp_addr_hi
= 0;
3263 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3264 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3265 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3266 dmae
->opcode
= opcode
;
3267 dmae
->src_addr_lo
= (mac_addr
+
3268 BIGMAC_REGISTER_RX_STAT_GR64
) >> 2;
3269 dmae
->src_addr_hi
= 0;
3270 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3271 offsetof(struct bmac_stats
, rx_stat_gr64_lo
));
3272 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3273 offsetof(struct bmac_stats
, rx_stat_gr64_lo
));
3274 dmae
->len
= (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ
-
3275 BIGMAC_REGISTER_RX_STAT_GR64
) >> 2;
3276 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3277 dmae
->comp_addr_hi
= 0;
3280 } else if (bp
->link_vars
.mac_type
== MAC_TYPE_EMAC
) {
3282 mac_addr
= (port
? GRCBASE_EMAC1
: GRCBASE_EMAC0
);
3284 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3285 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3286 dmae
->opcode
= opcode
;
3287 dmae
->src_addr_lo
= (mac_addr
+
3288 EMAC_REG_EMAC_RX_STAT_AC
) >> 2;
3289 dmae
->src_addr_hi
= 0;
3290 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
));
3291 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
));
3292 dmae
->len
= EMAC_REG_EMAC_RX_STAT_AC_COUNT
;
3293 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3294 dmae
->comp_addr_hi
= 0;
3297 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3298 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3299 dmae
->opcode
= opcode
;
3300 dmae
->src_addr_lo
= (mac_addr
+
3301 EMAC_REG_EMAC_RX_STAT_AC_28
) >> 2;
3302 dmae
->src_addr_hi
= 0;
3303 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3304 offsetof(struct emac_stats
, rx_stat_falsecarriererrors
));
3305 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3306 offsetof(struct emac_stats
, rx_stat_falsecarriererrors
));
3308 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3309 dmae
->comp_addr_hi
= 0;
3312 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3313 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3314 dmae
->opcode
= opcode
;
3315 dmae
->src_addr_lo
= (mac_addr
+
3316 EMAC_REG_EMAC_TX_STAT_AC
) >> 2;
3317 dmae
->src_addr_hi
= 0;
3318 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3319 offsetof(struct emac_stats
, tx_stat_ifhcoutoctets
));
3320 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3321 offsetof(struct emac_stats
, tx_stat_ifhcoutoctets
));
3322 dmae
->len
= EMAC_REG_EMAC_TX_STAT_AC_COUNT
;
3323 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3324 dmae
->comp_addr_hi
= 0;
3329 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3330 dmae
->opcode
= opcode
;
3331 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_BRB_DISCARD
:
3332 NIG_REG_STAT0_BRB_DISCARD
) >> 2;
3333 dmae
->src_addr_hi
= 0;
3334 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
));
3335 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
));
3336 dmae
->len
= (sizeof(struct nig_stats
) - 4*sizeof(u32
)) >> 2;
3337 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3338 dmae
->comp_addr_hi
= 0;
3341 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3342 dmae
->opcode
= opcode
;
3343 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_EGRESS_MAC_PKT0
:
3344 NIG_REG_STAT0_EGRESS_MAC_PKT0
) >> 2;
3345 dmae
->src_addr_hi
= 0;
3346 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
) +
3347 offsetof(struct nig_stats
, egress_mac_pkt0_lo
));
3348 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
) +
3349 offsetof(struct nig_stats
, egress_mac_pkt0_lo
));
3350 dmae
->len
= (2*sizeof(u32
)) >> 2;
3351 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3352 dmae
->comp_addr_hi
= 0;
3355 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3356 dmae
->opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3357 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
3358 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3360 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3362 DMAE_CMD_ENDIANITY_DW_SWAP
|
3364 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3365 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3366 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_EGRESS_MAC_PKT1
:
3367 NIG_REG_STAT0_EGRESS_MAC_PKT1
) >> 2;
3368 dmae
->src_addr_hi
= 0;
3369 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
) +
3370 offsetof(struct nig_stats
, egress_mac_pkt1_lo
));
3371 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
) +
3372 offsetof(struct nig_stats
, egress_mac_pkt1_lo
));
3373 dmae
->len
= (2*sizeof(u32
)) >> 2;
3374 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3375 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3376 dmae
->comp_val
= DMAE_COMP_VAL
;
3381 static void bnx2x_func_stats_init(struct bnx2x
*bp
)
3383 struct dmae_command
*dmae
= &bp
->stats_dmae
;
3384 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3387 if (!bp
->func_stx
) {
3388 BNX2X_ERR("BUG!\n");
3392 bp
->executer_idx
= 0;
3393 memset(dmae
, 0, sizeof(struct dmae_command
));
3395 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3396 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
3397 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3399 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3401 DMAE_CMD_ENDIANITY_DW_SWAP
|
3403 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3404 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3405 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
3406 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
3407 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
3408 dmae
->dst_addr_hi
= 0;
3409 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
3410 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3411 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3412 dmae
->comp_val
= DMAE_COMP_VAL
;
3417 static void bnx2x_stats_start(struct bnx2x
*bp
)
3420 bnx2x_port_stats_init(bp
);
3422 else if (bp
->func_stx
)
3423 bnx2x_func_stats_init(bp
);
3425 bnx2x_hw_stats_post(bp
);
3426 bnx2x_storm_stats_post(bp
);
3429 static void bnx2x_stats_pmf_start(struct bnx2x
*bp
)
3431 bnx2x_stats_comp(bp
);
3432 bnx2x_stats_pmf_update(bp
);
3433 bnx2x_stats_start(bp
);
3436 static void bnx2x_stats_restart(struct bnx2x
*bp
)
3438 bnx2x_stats_comp(bp
);
3439 bnx2x_stats_start(bp
);
3442 static void bnx2x_bmac_stats_update(struct bnx2x
*bp
)
3444 struct bmac_stats
*new = bnx2x_sp(bp
, mac_stats
.bmac_stats
);
3445 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3446 struct regpair diff
;
3448 UPDATE_STAT64(rx_stat_grerb
, rx_stat_ifhcinbadoctets
);
3449 UPDATE_STAT64(rx_stat_grfcs
, rx_stat_dot3statsfcserrors
);
3450 UPDATE_STAT64(rx_stat_grund
, rx_stat_etherstatsundersizepkts
);
3451 UPDATE_STAT64(rx_stat_grovr
, rx_stat_dot3statsframestoolong
);
3452 UPDATE_STAT64(rx_stat_grfrg
, rx_stat_etherstatsfragments
);
3453 UPDATE_STAT64(rx_stat_grjbr
, rx_stat_etherstatsjabbers
);
3454 UPDATE_STAT64(rx_stat_grxcf
, rx_stat_maccontrolframesreceived
);
3455 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_xoffstateentered
);
3456 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_xoffpauseframesreceived
);
3457 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_outxoffsent
);
3458 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_flowcontroldone
);
3459 UPDATE_STAT64(tx_stat_gt64
, tx_stat_etherstatspkts64octets
);
3460 UPDATE_STAT64(tx_stat_gt127
,
3461 tx_stat_etherstatspkts65octetsto127octets
);
3462 UPDATE_STAT64(tx_stat_gt255
,
3463 tx_stat_etherstatspkts128octetsto255octets
);
3464 UPDATE_STAT64(tx_stat_gt511
,
3465 tx_stat_etherstatspkts256octetsto511octets
);
3466 UPDATE_STAT64(tx_stat_gt1023
,
3467 tx_stat_etherstatspkts512octetsto1023octets
);
3468 UPDATE_STAT64(tx_stat_gt1518
,
3469 tx_stat_etherstatspkts1024octetsto1522octets
);
3470 UPDATE_STAT64(tx_stat_gt2047
, tx_stat_bmac_2047
);
3471 UPDATE_STAT64(tx_stat_gt4095
, tx_stat_bmac_4095
);
3472 UPDATE_STAT64(tx_stat_gt9216
, tx_stat_bmac_9216
);
3473 UPDATE_STAT64(tx_stat_gt16383
, tx_stat_bmac_16383
);
3474 UPDATE_STAT64(tx_stat_gterr
,
3475 tx_stat_dot3statsinternalmactransmiterrors
);
3476 UPDATE_STAT64(tx_stat_gtufl
, tx_stat_bmac_ufl
);
3479 static void bnx2x_emac_stats_update(struct bnx2x
*bp
)
3481 struct emac_stats
*new = bnx2x_sp(bp
, mac_stats
.emac_stats
);
3482 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3484 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets
);
3485 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets
);
3486 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors
);
3487 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors
);
3488 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors
);
3489 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors
);
3490 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts
);
3491 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong
);
3492 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments
);
3493 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers
);
3494 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived
);
3495 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered
);
3496 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived
);
3497 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived
);
3498 UPDATE_EXTEND_STAT(tx_stat_outxonsent
);
3499 UPDATE_EXTEND_STAT(tx_stat_outxoffsent
);
3500 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone
);
3501 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions
);
3502 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes
);
3503 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes
);
3504 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions
);
3505 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions
);
3506 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions
);
3507 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets
);
3508 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets
);
3509 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets
);
3510 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets
);
3511 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets
);
3512 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets
);
3513 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets
);
3514 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors
);
3517 static int bnx2x_hw_stats_update(struct bnx2x
*bp
)
3519 struct nig_stats
*new = bnx2x_sp(bp
, nig_stats
);
3520 struct nig_stats
*old
= &(bp
->port
.old_nig_stats
);
3521 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3522 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3523 struct regpair diff
;
3525 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
)
3526 bnx2x_bmac_stats_update(bp
);
3528 else if (bp
->link_vars
.mac_type
== MAC_TYPE_EMAC
)
3529 bnx2x_emac_stats_update(bp
);
3531 else { /* unreached */
3532 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3536 ADD_EXTEND_64(pstats
->brb_drop_hi
, pstats
->brb_drop_lo
,
3537 new->brb_discard
- old
->brb_discard
);
3538 ADD_EXTEND_64(estats
->brb_truncate_hi
, estats
->brb_truncate_lo
,
3539 new->brb_truncate
- old
->brb_truncate
);
3541 UPDATE_STAT64_NIG(egress_mac_pkt0
,
3542 etherstatspkts1024octetsto1522octets
);
3543 UPDATE_STAT64_NIG(egress_mac_pkt1
, etherstatspktsover1522octets
);
3545 memcpy(old
, new, sizeof(struct nig_stats
));
3547 memcpy(&(estats
->rx_stat_ifhcinbadoctets_hi
), &(pstats
->mac_stx
[1]),
3548 sizeof(struct mac_stx
));
3549 estats
->brb_drop_hi
= pstats
->brb_drop_hi
;
3550 estats
->brb_drop_lo
= pstats
->brb_drop_lo
;
3552 pstats
->host_port_stats_start
= ++pstats
->host_port_stats_end
;
3557 static int bnx2x_storm_stats_update(struct bnx2x
*bp
)
3559 struct eth_stats_query
*stats
= bnx2x_sp(bp
, fw_stats
);
3560 int cl_id
= BP_CL_ID(bp
);
3561 struct tstorm_per_port_stats
*tport
=
3562 &stats
->tstorm_common
.port_statistics
;
3563 struct tstorm_per_client_stats
*tclient
=
3564 &stats
->tstorm_common
.client_statistics
[cl_id
];
3565 struct tstorm_per_client_stats
*old_tclient
= &bp
->old_tclient
;
3566 struct xstorm_per_client_stats
*xclient
=
3567 &stats
->xstorm_common
.client_statistics
[cl_id
];
3568 struct xstorm_per_client_stats
*old_xclient
= &bp
->old_xclient
;
3569 struct host_func_stats
*fstats
= bnx2x_sp(bp
, func_stats
);
3570 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3573 /* are storm stats valid? */
3574 if ((u16
)(le16_to_cpu(tclient
->stats_counter
) + 1) !=
3575 bp
->stats_counter
) {
3576 DP(BNX2X_MSG_STATS
, "stats not updated by tstorm"
3577 " tstorm counter (%d) != stats_counter (%d)\n",
3578 tclient
->stats_counter
, bp
->stats_counter
);
3581 if ((u16
)(le16_to_cpu(xclient
->stats_counter
) + 1) !=
3582 bp
->stats_counter
) {
3583 DP(BNX2X_MSG_STATS
, "stats not updated by xstorm"
3584 " xstorm counter (%d) != stats_counter (%d)\n",
3585 xclient
->stats_counter
, bp
->stats_counter
);
3589 fstats
->total_bytes_received_hi
=
3590 fstats
->valid_bytes_received_hi
=
3591 le32_to_cpu(tclient
->total_rcv_bytes
.hi
);
3592 fstats
->total_bytes_received_lo
=
3593 fstats
->valid_bytes_received_lo
=
3594 le32_to_cpu(tclient
->total_rcv_bytes
.lo
);
3596 estats
->error_bytes_received_hi
=
3597 le32_to_cpu(tclient
->rcv_error_bytes
.hi
);
3598 estats
->error_bytes_received_lo
=
3599 le32_to_cpu(tclient
->rcv_error_bytes
.lo
);
3600 ADD_64(estats
->error_bytes_received_hi
,
3601 estats
->rx_stat_ifhcinbadoctets_hi
,
3602 estats
->error_bytes_received_lo
,
3603 estats
->rx_stat_ifhcinbadoctets_lo
);
3605 ADD_64(fstats
->total_bytes_received_hi
,
3606 estats
->error_bytes_received_hi
,
3607 fstats
->total_bytes_received_lo
,
3608 estats
->error_bytes_received_lo
);
3610 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts
, total_unicast_packets_received
);
3611 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts
,
3612 total_multicast_packets_received
);
3613 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts
,
3614 total_broadcast_packets_received
);
3616 fstats
->total_bytes_transmitted_hi
=
3617 le32_to_cpu(xclient
->total_sent_bytes
.hi
);
3618 fstats
->total_bytes_transmitted_lo
=
3619 le32_to_cpu(xclient
->total_sent_bytes
.lo
);
3621 UPDATE_EXTEND_XSTAT(unicast_pkts_sent
,
3622 total_unicast_packets_transmitted
);
3623 UPDATE_EXTEND_XSTAT(multicast_pkts_sent
,
3624 total_multicast_packets_transmitted
);
3625 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent
,
3626 total_broadcast_packets_transmitted
);
3628 memcpy(estats
, &(fstats
->total_bytes_received_hi
),
3629 sizeof(struct host_func_stats
) - 2*sizeof(u32
));
3631 estats
->mac_filter_discard
= le32_to_cpu(tport
->mac_filter_discard
);
3632 estats
->xxoverflow_discard
= le32_to_cpu(tport
->xxoverflow_discard
);
3633 estats
->brb_truncate_discard
=
3634 le32_to_cpu(tport
->brb_truncate_discard
);
3635 estats
->mac_discard
= le32_to_cpu(tport
->mac_discard
);
3637 old_tclient
->rcv_unicast_bytes
.hi
=
3638 le32_to_cpu(tclient
->rcv_unicast_bytes
.hi
);
3639 old_tclient
->rcv_unicast_bytes
.lo
=
3640 le32_to_cpu(tclient
->rcv_unicast_bytes
.lo
);
3641 old_tclient
->rcv_broadcast_bytes
.hi
=
3642 le32_to_cpu(tclient
->rcv_broadcast_bytes
.hi
);
3643 old_tclient
->rcv_broadcast_bytes
.lo
=
3644 le32_to_cpu(tclient
->rcv_broadcast_bytes
.lo
);
3645 old_tclient
->rcv_multicast_bytes
.hi
=
3646 le32_to_cpu(tclient
->rcv_multicast_bytes
.hi
);
3647 old_tclient
->rcv_multicast_bytes
.lo
=
3648 le32_to_cpu(tclient
->rcv_multicast_bytes
.lo
);
3649 old_tclient
->total_rcv_pkts
= le32_to_cpu(tclient
->total_rcv_pkts
);
3651 old_tclient
->checksum_discard
= le32_to_cpu(tclient
->checksum_discard
);
3652 old_tclient
->packets_too_big_discard
=
3653 le32_to_cpu(tclient
->packets_too_big_discard
);
3654 estats
->no_buff_discard
=
3655 old_tclient
->no_buff_discard
= le32_to_cpu(tclient
->no_buff_discard
);
3656 old_tclient
->ttl0_discard
= le32_to_cpu(tclient
->ttl0_discard
);
3658 old_xclient
->total_sent_pkts
= le32_to_cpu(xclient
->total_sent_pkts
);
3659 old_xclient
->unicast_bytes_sent
.hi
=
3660 le32_to_cpu(xclient
->unicast_bytes_sent
.hi
);
3661 old_xclient
->unicast_bytes_sent
.lo
=
3662 le32_to_cpu(xclient
->unicast_bytes_sent
.lo
);
3663 old_xclient
->multicast_bytes_sent
.hi
=
3664 le32_to_cpu(xclient
->multicast_bytes_sent
.hi
);
3665 old_xclient
->multicast_bytes_sent
.lo
=
3666 le32_to_cpu(xclient
->multicast_bytes_sent
.lo
);
3667 old_xclient
->broadcast_bytes_sent
.hi
=
3668 le32_to_cpu(xclient
->broadcast_bytes_sent
.hi
);
3669 old_xclient
->broadcast_bytes_sent
.lo
=
3670 le32_to_cpu(xclient
->broadcast_bytes_sent
.lo
);
3672 fstats
->host_func_stats_start
= ++fstats
->host_func_stats_end
;
3677 static void bnx2x_net_stats_update(struct bnx2x
*bp
)
3679 struct tstorm_per_client_stats
*old_tclient
= &bp
->old_tclient
;
3680 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3681 struct net_device_stats
*nstats
= &bp
->dev
->stats
;
3683 nstats
->rx_packets
=
3684 bnx2x_hilo(&estats
->total_unicast_packets_received_hi
) +
3685 bnx2x_hilo(&estats
->total_multicast_packets_received_hi
) +
3686 bnx2x_hilo(&estats
->total_broadcast_packets_received_hi
);
3688 nstats
->tx_packets
=
3689 bnx2x_hilo(&estats
->total_unicast_packets_transmitted_hi
) +
3690 bnx2x_hilo(&estats
->total_multicast_packets_transmitted_hi
) +
3691 bnx2x_hilo(&estats
->total_broadcast_packets_transmitted_hi
);
3693 nstats
->rx_bytes
= bnx2x_hilo(&estats
->valid_bytes_received_hi
);
3695 nstats
->tx_bytes
= bnx2x_hilo(&estats
->total_bytes_transmitted_hi
);
3697 nstats
->rx_dropped
= old_tclient
->checksum_discard
+
3698 estats
->mac_discard
;
3699 nstats
->tx_dropped
= 0;
3702 bnx2x_hilo(&estats
->total_multicast_packets_transmitted_hi
);
3704 nstats
->collisions
=
3705 estats
->tx_stat_dot3statssinglecollisionframes_lo
+
3706 estats
->tx_stat_dot3statsmultiplecollisionframes_lo
+
3707 estats
->tx_stat_dot3statslatecollisions_lo
+
3708 estats
->tx_stat_dot3statsexcessivecollisions_lo
;
3710 estats
->jabber_packets_received
=
3711 old_tclient
->packets_too_big_discard
+
3712 estats
->rx_stat_dot3statsframestoolong_lo
;
3714 nstats
->rx_length_errors
=
3715 estats
->rx_stat_etherstatsundersizepkts_lo
+
3716 estats
->jabber_packets_received
;
3717 nstats
->rx_over_errors
= estats
->brb_drop_lo
+ estats
->brb_truncate_lo
;
3718 nstats
->rx_crc_errors
= estats
->rx_stat_dot3statsfcserrors_lo
;
3719 nstats
->rx_frame_errors
= estats
->rx_stat_dot3statsalignmenterrors_lo
;
3720 nstats
->rx_fifo_errors
= old_tclient
->no_buff_discard
;
3721 nstats
->rx_missed_errors
= estats
->xxoverflow_discard
;
3723 nstats
->rx_errors
= nstats
->rx_length_errors
+
3724 nstats
->rx_over_errors
+
3725 nstats
->rx_crc_errors
+
3726 nstats
->rx_frame_errors
+
3727 nstats
->rx_fifo_errors
+
3728 nstats
->rx_missed_errors
;
3730 nstats
->tx_aborted_errors
=
3731 estats
->tx_stat_dot3statslatecollisions_lo
+
3732 estats
->tx_stat_dot3statsexcessivecollisions_lo
;
3733 nstats
->tx_carrier_errors
= estats
->rx_stat_falsecarriererrors_lo
;
3734 nstats
->tx_fifo_errors
= 0;
3735 nstats
->tx_heartbeat_errors
= 0;
3736 nstats
->tx_window_errors
= 0;
3738 nstats
->tx_errors
= nstats
->tx_aborted_errors
+
3739 nstats
->tx_carrier_errors
;
3742 static void bnx2x_stats_update(struct bnx2x
*bp
)
3744 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3747 if (*stats_comp
!= DMAE_COMP_VAL
)
3751 update
= (bnx2x_hw_stats_update(bp
) == 0);
3753 update
|= (bnx2x_storm_stats_update(bp
) == 0);
3756 bnx2x_net_stats_update(bp
);
3759 if (bp
->stats_pending
) {
3760 bp
->stats_pending
++;
3761 if (bp
->stats_pending
== 3) {
3762 BNX2X_ERR("stats not updated for 3 times\n");
3769 if (bp
->msglevel
& NETIF_MSG_TIMER
) {
3770 struct tstorm_per_client_stats
*old_tclient
= &bp
->old_tclient
;
3771 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3772 struct net_device_stats
*nstats
= &bp
->dev
->stats
;
3775 printk(KERN_DEBUG
"%s:\n", bp
->dev
->name
);
3776 printk(KERN_DEBUG
" tx avail (%4x) tx hc idx (%x)"
3778 bnx2x_tx_avail(bp
->fp
),
3779 le16_to_cpu(*bp
->fp
->tx_cons_sb
), nstats
->tx_packets
);
3780 printk(KERN_DEBUG
" rx usage (%4x) rx hc idx (%x)"
3782 (u16
)(le16_to_cpu(*bp
->fp
->rx_cons_sb
) -
3783 bp
->fp
->rx_comp_cons
),
3784 le16_to_cpu(*bp
->fp
->rx_cons_sb
), nstats
->rx_packets
);
3785 printk(KERN_DEBUG
" %s (Xoff events %u) brb drops %u\n",
3786 netif_queue_stopped(bp
->dev
) ? "Xoff" : "Xon",
3787 estats
->driver_xoff
, estats
->brb_drop_lo
);
3788 printk(KERN_DEBUG
"tstats: checksum_discard %u "
3789 "packets_too_big_discard %u no_buff_discard %u "
3790 "mac_discard %u mac_filter_discard %u "
3791 "xxovrflow_discard %u brb_truncate_discard %u "
3792 "ttl0_discard %u\n",
3793 old_tclient
->checksum_discard
,
3794 old_tclient
->packets_too_big_discard
,
3795 old_tclient
->no_buff_discard
, estats
->mac_discard
,
3796 estats
->mac_filter_discard
, estats
->xxoverflow_discard
,
3797 estats
->brb_truncate_discard
,
3798 old_tclient
->ttl0_discard
);
3800 for_each_queue(bp
, i
) {
3801 printk(KERN_DEBUG
"[%d]: %lu\t%lu\t%lu\n", i
,
3802 bnx2x_fp(bp
, i
, tx_pkt
),
3803 bnx2x_fp(bp
, i
, rx_pkt
),
3804 bnx2x_fp(bp
, i
, rx_calls
));
3808 bnx2x_hw_stats_post(bp
);
3809 bnx2x_storm_stats_post(bp
);
3812 static void bnx2x_port_stats_stop(struct bnx2x
*bp
)
3814 struct dmae_command
*dmae
;
3816 int loader_idx
= PMF_DMAE_C(bp
);
3817 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3819 bp
->executer_idx
= 0;
3821 opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3823 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3825 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3827 DMAE_CMD_ENDIANITY_DW_SWAP
|
3829 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3830 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3832 if (bp
->port
.port_stx
) {
3834 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3836 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_GRC
);
3838 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
3839 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
3840 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
3841 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
3842 dmae
->dst_addr_hi
= 0;
3843 dmae
->len
= sizeof(struct host_port_stats
) >> 2;
3845 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3846 dmae
->comp_addr_hi
= 0;
3849 dmae
->comp_addr_lo
=
3850 U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3851 dmae
->comp_addr_hi
=
3852 U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3853 dmae
->comp_val
= DMAE_COMP_VAL
;
3861 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3862 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
3863 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
3864 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
3865 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
3866 dmae
->dst_addr_hi
= 0;
3867 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
3868 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3869 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3870 dmae
->comp_val
= DMAE_COMP_VAL
;
3876 static void bnx2x_stats_stop(struct bnx2x
*bp
)
3880 bnx2x_stats_comp(bp
);
3883 update
= (bnx2x_hw_stats_update(bp
) == 0);
3885 update
|= (bnx2x_storm_stats_update(bp
) == 0);
3888 bnx2x_net_stats_update(bp
);
3891 bnx2x_port_stats_stop(bp
);
3893 bnx2x_hw_stats_post(bp
);
3894 bnx2x_stats_comp(bp
);
3898 static void bnx2x_stats_do_nothing(struct bnx2x
*bp
)
3902 static const struct {
3903 void (*action
)(struct bnx2x
*bp
);
3904 enum bnx2x_stats_state next_state
;
3905 } bnx2x_stats_stm
[STATS_STATE_MAX
][STATS_EVENT_MAX
] = {
3908 /* DISABLED PMF */ {bnx2x_stats_pmf_update
, STATS_STATE_DISABLED
},
3909 /* LINK_UP */ {bnx2x_stats_start
, STATS_STATE_ENABLED
},
3910 /* UPDATE */ {bnx2x_stats_do_nothing
, STATS_STATE_DISABLED
},
3911 /* STOP */ {bnx2x_stats_do_nothing
, STATS_STATE_DISABLED
}
3914 /* ENABLED PMF */ {bnx2x_stats_pmf_start
, STATS_STATE_ENABLED
},
3915 /* LINK_UP */ {bnx2x_stats_restart
, STATS_STATE_ENABLED
},
3916 /* UPDATE */ {bnx2x_stats_update
, STATS_STATE_ENABLED
},
3917 /* STOP */ {bnx2x_stats_stop
, STATS_STATE_DISABLED
}
3921 static void bnx2x_stats_handle(struct bnx2x
*bp
, enum bnx2x_stats_event event
)
3923 enum bnx2x_stats_state state
= bp
->stats_state
;
3925 bnx2x_stats_stm
[state
][event
].action(bp
);
3926 bp
->stats_state
= bnx2x_stats_stm
[state
][event
].next_state
;
3928 if ((event
!= STATS_EVENT_UPDATE
) || (bp
->msglevel
& NETIF_MSG_TIMER
))
3929 DP(BNX2X_MSG_STATS
, "state %d -> event %d -> state %d\n",
3930 state
, event
, bp
->stats_state
);
3933 static void bnx2x_timer(unsigned long data
)
3935 struct bnx2x
*bp
= (struct bnx2x
*) data
;
3937 if (!netif_running(bp
->dev
))
3940 if (atomic_read(&bp
->intr_sem
) != 0)
3944 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
3947 bnx2x_tx_int(fp
, 1000);
3948 rc
= bnx2x_rx_int(fp
, 1000);
3951 if (!BP_NOMCP(bp
)) {
3952 int func
= BP_FUNC(bp
);
3956 ++bp
->fw_drv_pulse_wr_seq
;
3957 bp
->fw_drv_pulse_wr_seq
&= DRV_PULSE_SEQ_MASK
;
3958 /* TBD - add SYSTEM_TIME */
3959 drv_pulse
= bp
->fw_drv_pulse_wr_seq
;
3960 SHMEM_WR(bp
, func_mb
[func
].drv_pulse_mb
, drv_pulse
);
3962 mcp_pulse
= (SHMEM_RD(bp
, func_mb
[func
].mcp_pulse_mb
) &
3963 MCP_PULSE_SEQ_MASK
);
3964 /* The delta between driver pulse and mcp response
3965 * should be 1 (before mcp response) or 0 (after mcp response)
3967 if ((drv_pulse
!= mcp_pulse
) &&
3968 (drv_pulse
!= ((mcp_pulse
+ 1) & MCP_PULSE_SEQ_MASK
))) {
3969 /* someone lost a heartbeat... */
3970 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3971 drv_pulse
, mcp_pulse
);
3975 if ((bp
->state
== BNX2X_STATE_OPEN
) ||
3976 (bp
->state
== BNX2X_STATE_DISABLED
))
3977 bnx2x_stats_handle(bp
, STATS_EVENT_UPDATE
);
3980 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
3983 /* end of Statistics */
3988 * nic init service functions
3991 static void bnx2x_zero_sb(struct bnx2x
*bp
, int sb_id
)
3993 int port
= BP_PORT(bp
);
3995 bnx2x_init_fill(bp
, BAR_USTRORM_INTMEM
+
3996 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port
, sb_id
), 0,
3997 sizeof(struct ustorm_status_block
)/4);
3998 bnx2x_init_fill(bp
, BAR_CSTRORM_INTMEM
+
3999 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port
, sb_id
), 0,
4000 sizeof(struct cstorm_status_block
)/4);
4003 static void bnx2x_init_sb(struct bnx2x
*bp
, struct host_status_block
*sb
,
4004 dma_addr_t mapping
, int sb_id
)
4006 int port
= BP_PORT(bp
);
4007 int func
= BP_FUNC(bp
);
4012 section
= ((u64
)mapping
) + offsetof(struct host_status_block
,
4014 sb
->u_status_block
.status_block_id
= sb_id
;
4016 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4017 USTORM_SB_HOST_SB_ADDR_OFFSET(port
, sb_id
), U64_LO(section
));
4018 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4019 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port
, sb_id
)) + 4),
4021 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ FP_USB_FUNC_OFF
+
4022 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port
, sb_id
), func
);
4024 for (index
= 0; index
< HC_USTORM_SB_NUM_INDICES
; index
++)
4025 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
4026 USTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
, index
), 1);
4029 section
= ((u64
)mapping
) + offsetof(struct host_status_block
,
4031 sb
->c_status_block
.status_block_id
= sb_id
;
4033 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4034 CSTORM_SB_HOST_SB_ADDR_OFFSET(port
, sb_id
), U64_LO(section
));
4035 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4036 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port
, sb_id
)) + 4),
4038 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ FP_CSB_FUNC_OFF
+
4039 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port
, sb_id
), func
);
4041 for (index
= 0; index
< HC_CSTORM_SB_NUM_INDICES
; index
++)
4042 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4043 CSTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
, index
), 1);
4045 bnx2x_ack_sb(bp
, sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4048 static void bnx2x_zero_def_sb(struct bnx2x
*bp
)
4050 int func
= BP_FUNC(bp
);
4052 bnx2x_init_fill(bp
, BAR_USTRORM_INTMEM
+
4053 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4054 sizeof(struct ustorm_def_status_block
)/4);
4055 bnx2x_init_fill(bp
, BAR_CSTRORM_INTMEM
+
4056 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4057 sizeof(struct cstorm_def_status_block
)/4);
4058 bnx2x_init_fill(bp
, BAR_XSTRORM_INTMEM
+
4059 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4060 sizeof(struct xstorm_def_status_block
)/4);
4061 bnx2x_init_fill(bp
, BAR_TSTRORM_INTMEM
+
4062 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4063 sizeof(struct tstorm_def_status_block
)/4);
4066 static void bnx2x_init_def_sb(struct bnx2x
*bp
,
4067 struct host_def_status_block
*def_sb
,
4068 dma_addr_t mapping
, int sb_id
)
4070 int port
= BP_PORT(bp
);
4071 int func
= BP_FUNC(bp
);
4072 int index
, val
, reg_offset
;
4076 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4077 atten_status_block
);
4078 def_sb
->atten_status_block
.status_block_id
= sb_id
;
4082 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
4083 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
4085 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
4086 bp
->attn_group
[index
].sig
[0] = REG_RD(bp
,
4087 reg_offset
+ 0x10*index
);
4088 bp
->attn_group
[index
].sig
[1] = REG_RD(bp
,
4089 reg_offset
+ 0x4 + 0x10*index
);
4090 bp
->attn_group
[index
].sig
[2] = REG_RD(bp
,
4091 reg_offset
+ 0x8 + 0x10*index
);
4092 bp
->attn_group
[index
].sig
[3] = REG_RD(bp
,
4093 reg_offset
+ 0xc + 0x10*index
);
4096 reg_offset
= (port
? HC_REG_ATTN_MSG1_ADDR_L
:
4097 HC_REG_ATTN_MSG0_ADDR_L
);
4099 REG_WR(bp
, reg_offset
, U64_LO(section
));
4100 REG_WR(bp
, reg_offset
+ 4, U64_HI(section
));
4102 reg_offset
= (port
? HC_REG_ATTN_NUM_P1
: HC_REG_ATTN_NUM_P0
);
4104 val
= REG_RD(bp
, reg_offset
);
4106 REG_WR(bp
, reg_offset
, val
);
4109 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4110 u_def_status_block
);
4111 def_sb
->u_def_status_block
.status_block_id
= sb_id
;
4113 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4114 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4115 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4116 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4118 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ DEF_USB_FUNC_OFF
+
4119 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4121 for (index
= 0; index
< HC_USTORM_DEF_SB_NUM_INDICES
; index
++)
4122 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
4123 USTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4126 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4127 c_def_status_block
);
4128 def_sb
->c_def_status_block
.status_block_id
= sb_id
;
4130 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4131 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4132 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4133 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4135 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ DEF_CSB_FUNC_OFF
+
4136 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4138 for (index
= 0; index
< HC_CSTORM_DEF_SB_NUM_INDICES
; index
++)
4139 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4140 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4143 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4144 t_def_status_block
);
4145 def_sb
->t_def_status_block
.status_block_id
= sb_id
;
4147 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4148 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4149 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4150 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4152 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ DEF_TSB_FUNC_OFF
+
4153 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4155 for (index
= 0; index
< HC_TSTORM_DEF_SB_NUM_INDICES
; index
++)
4156 REG_WR16(bp
, BAR_TSTRORM_INTMEM
+
4157 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4160 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4161 x_def_status_block
);
4162 def_sb
->x_def_status_block
.status_block_id
= sb_id
;
4164 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4165 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4166 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4167 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4169 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ DEF_XSB_FUNC_OFF
+
4170 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4172 for (index
= 0; index
< HC_XSTORM_DEF_SB_NUM_INDICES
; index
++)
4173 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+
4174 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4176 bp
->stats_pending
= 0;
4177 bp
->set_mac_pending
= 0;
4179 bnx2x_ack_sb(bp
, sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4182 static void bnx2x_update_coalesce(struct bnx2x
*bp
)
4184 int port
= BP_PORT(bp
);
4187 for_each_queue(bp
, i
) {
4188 int sb_id
= bp
->fp
[i
].sb_id
;
4190 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4191 REG_WR8(bp
, BAR_USTRORM_INTMEM
+
4192 USTORM_SB_HC_TIMEOUT_OFFSET(port
, sb_id
,
4193 U_SB_ETH_RX_CQ_INDEX
),
4195 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
4196 USTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
,
4197 U_SB_ETH_RX_CQ_INDEX
),
4198 bp
->rx_ticks
? 0 : 1);
4199 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
4200 USTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
,
4201 U_SB_ETH_RX_BD_INDEX
),
4202 bp
->rx_ticks
? 0 : 1);
4204 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4205 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+
4206 CSTORM_SB_HC_TIMEOUT_OFFSET(port
, sb_id
,
4207 C_SB_ETH_TX_CQ_INDEX
),
4209 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4210 CSTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
,
4211 C_SB_ETH_TX_CQ_INDEX
),
4212 bp
->tx_ticks
? 0 : 1);
4216 static inline void bnx2x_free_tpa_pool(struct bnx2x
*bp
,
4217 struct bnx2x_fastpath
*fp
, int last
)
4221 for (i
= 0; i
< last
; i
++) {
4222 struct sw_rx_bd
*rx_buf
= &(fp
->tpa_pool
[i
]);
4223 struct sk_buff
*skb
= rx_buf
->skb
;
4226 DP(NETIF_MSG_IFDOWN
, "tpa bin %d empty on free\n", i
);
4230 if (fp
->tpa_state
[i
] == BNX2X_TPA_START
)
4231 pci_unmap_single(bp
->pdev
,
4232 pci_unmap_addr(rx_buf
, mapping
),
4234 PCI_DMA_FROMDEVICE
);
4241 static void bnx2x_init_rx_rings(struct bnx2x
*bp
)
4243 int func
= BP_FUNC(bp
);
4244 int max_agg_queues
= CHIP_IS_E1(bp
) ? ETH_MAX_AGGREGATION_QUEUES_E1
:
4245 ETH_MAX_AGGREGATION_QUEUES_E1H
;
4246 u16 ring_prod
, cqe_ring_prod
;
4249 bp
->rx_buf_size
= bp
->dev
->mtu
;
4250 bp
->rx_buf_size
+= bp
->rx_offset
+ ETH_OVREHEAD
+
4251 BCM_RX_ETH_PAYLOAD_ALIGN
;
4253 if (bp
->flags
& TPA_ENABLE_FLAG
) {
4255 "rx_buf_size %d effective_mtu %d\n",
4256 bp
->rx_buf_size
, bp
->dev
->mtu
+ ETH_OVREHEAD
);
4258 for_each_queue(bp
, j
) {
4259 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4261 for (i
= 0; i
< max_agg_queues
; i
++) {
4262 fp
->tpa_pool
[i
].skb
=
4263 netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
4264 if (!fp
->tpa_pool
[i
].skb
) {
4265 BNX2X_ERR("Failed to allocate TPA "
4266 "skb pool for queue[%d] - "
4267 "disabling TPA on this "
4269 bnx2x_free_tpa_pool(bp
, fp
, i
);
4270 fp
->disable_tpa
= 1;
4273 pci_unmap_addr_set((struct sw_rx_bd
*)
4274 &bp
->fp
->tpa_pool
[i
],
4276 fp
->tpa_state
[i
] = BNX2X_TPA_STOP
;
4281 for_each_queue(bp
, j
) {
4282 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4285 fp
->rx_cons_sb
= BNX2X_RX_SB_INDEX
;
4286 fp
->rx_bd_cons_sb
= BNX2X_RX_SB_BD_INDEX
;
4288 /* "next page" elements initialization */
4290 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
4291 struct eth_rx_sge
*sge
;
4293 sge
= &fp
->rx_sge_ring
[RX_SGE_CNT
* i
- 2];
4295 cpu_to_le32(U64_HI(fp
->rx_sge_mapping
+
4296 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
4298 cpu_to_le32(U64_LO(fp
->rx_sge_mapping
+
4299 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
4302 bnx2x_init_sge_ring_bit_mask(fp
);
4305 for (i
= 1; i
<= NUM_RX_RINGS
; i
++) {
4306 struct eth_rx_bd
*rx_bd
;
4308 rx_bd
= &fp
->rx_desc_ring
[RX_DESC_CNT
* i
- 2];
4310 cpu_to_le32(U64_HI(fp
->rx_desc_mapping
+
4311 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
4313 cpu_to_le32(U64_LO(fp
->rx_desc_mapping
+
4314 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
4318 for (i
= 1; i
<= NUM_RCQ_RINGS
; i
++) {
4319 struct eth_rx_cqe_next_page
*nextpg
;
4321 nextpg
= (struct eth_rx_cqe_next_page
*)
4322 &fp
->rx_comp_ring
[RCQ_DESC_CNT
* i
- 1];
4324 cpu_to_le32(U64_HI(fp
->rx_comp_mapping
+
4325 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
4327 cpu_to_le32(U64_LO(fp
->rx_comp_mapping
+
4328 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
4331 /* Allocate SGEs and initialize the ring elements */
4332 for (i
= 0, ring_prod
= 0;
4333 i
< MAX_RX_SGE_CNT
*NUM_RX_SGE_PAGES
; i
++) {
4335 if (bnx2x_alloc_rx_sge(bp
, fp
, ring_prod
) < 0) {
4336 BNX2X_ERR("was only able to allocate "
4338 BNX2X_ERR("disabling TPA for queue[%d]\n", j
);
4339 /* Cleanup already allocated elements */
4340 bnx2x_free_rx_sge_range(bp
, fp
, ring_prod
);
4341 bnx2x_free_tpa_pool(bp
, fp
, max_agg_queues
);
4342 fp
->disable_tpa
= 1;
4346 ring_prod
= NEXT_SGE_IDX(ring_prod
);
4348 fp
->rx_sge_prod
= ring_prod
;
4350 /* Allocate BDs and initialize BD ring */
4351 fp
->rx_comp_cons
= 0;
4352 cqe_ring_prod
= ring_prod
= 0;
4353 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
4354 if (bnx2x_alloc_rx_skb(bp
, fp
, ring_prod
) < 0) {
4355 BNX2X_ERR("was only able to allocate "
4357 bp
->eth_stats
.rx_skb_alloc_failed
++;
4360 ring_prod
= NEXT_RX_IDX(ring_prod
);
4361 cqe_ring_prod
= NEXT_RCQ_IDX(cqe_ring_prod
);
4362 WARN_ON(ring_prod
<= i
);
4365 fp
->rx_bd_prod
= ring_prod
;
4366 /* must not have more available CQEs than BDs */
4367 fp
->rx_comp_prod
= min((u16
)(NUM_RCQ_RINGS
*RCQ_DESC_CNT
),
4369 fp
->rx_pkt
= fp
->rx_calls
= 0;
4372 * this will generate an interrupt (to the TSTORM)
4373 * must only be done after chip is initialized
4375 bnx2x_update_rx_prod(bp
, fp
, ring_prod
, fp
->rx_comp_prod
,
4380 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4381 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
),
4382 U64_LO(fp
->rx_comp_mapping
));
4383 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4384 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
) + 4,
4385 U64_HI(fp
->rx_comp_mapping
));
4389 static void bnx2x_init_tx_ring(struct bnx2x
*bp
)
4393 for_each_queue(bp
, j
) {
4394 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4396 for (i
= 1; i
<= NUM_TX_RINGS
; i
++) {
4397 struct eth_tx_bd
*tx_bd
=
4398 &fp
->tx_desc_ring
[TX_DESC_CNT
* i
- 1];
4401 cpu_to_le32(U64_HI(fp
->tx_desc_mapping
+
4402 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
4404 cpu_to_le32(U64_LO(fp
->tx_desc_mapping
+
4405 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
4408 fp
->tx_pkt_prod
= 0;
4409 fp
->tx_pkt_cons
= 0;
4412 fp
->tx_cons_sb
= BNX2X_TX_SB_INDEX
;
4417 static void bnx2x_init_sp_ring(struct bnx2x
*bp
)
4419 int func
= BP_FUNC(bp
);
4421 spin_lock_init(&bp
->spq_lock
);
4423 bp
->spq_left
= MAX_SPQ_PENDING
;
4424 bp
->spq_prod_idx
= 0;
4425 bp
->dsb_sp_prod
= BNX2X_SP_DSB_INDEX
;
4426 bp
->spq_prod_bd
= bp
->spq
;
4427 bp
->spq_last_bd
= bp
->spq_prod_bd
+ MAX_SP_DESC_CNT
;
4429 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PAGE_BASE_OFFSET(func
),
4430 U64_LO(bp
->spq_mapping
));
4432 XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PAGE_BASE_OFFSET(func
) + 4,
4433 U64_HI(bp
->spq_mapping
));
4435 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PROD_OFFSET(func
),
4439 static void bnx2x_init_context(struct bnx2x
*bp
)
4443 for_each_queue(bp
, i
) {
4444 struct eth_context
*context
= bnx2x_sp(bp
, context
[i
].eth
);
4445 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
4446 u8 sb_id
= FP_SB_ID(fp
);
4448 context
->xstorm_st_context
.tx_bd_page_base_hi
=
4449 U64_HI(fp
->tx_desc_mapping
);
4450 context
->xstorm_st_context
.tx_bd_page_base_lo
=
4451 U64_LO(fp
->tx_desc_mapping
);
4452 context
->xstorm_st_context
.db_data_addr_hi
=
4453 U64_HI(fp
->tx_prods_mapping
);
4454 context
->xstorm_st_context
.db_data_addr_lo
=
4455 U64_LO(fp
->tx_prods_mapping
);
4456 context
->xstorm_st_context
.statistics_data
= (BP_CL_ID(bp
) |
4457 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE
);
4459 context
->ustorm_st_context
.common
.sb_index_numbers
=
4460 BNX2X_RX_SB_INDEX_NUM
;
4461 context
->ustorm_st_context
.common
.clientId
= FP_CL_ID(fp
);
4462 context
->ustorm_st_context
.common
.status_block_id
= sb_id
;
4463 context
->ustorm_st_context
.common
.flags
=
4464 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT
;
4465 context
->ustorm_st_context
.common
.mc_alignment_size
=
4466 BCM_RX_ETH_PAYLOAD_ALIGN
;
4467 context
->ustorm_st_context
.common
.bd_buff_size
=
4469 context
->ustorm_st_context
.common
.bd_page_base_hi
=
4470 U64_HI(fp
->rx_desc_mapping
);
4471 context
->ustorm_st_context
.common
.bd_page_base_lo
=
4472 U64_LO(fp
->rx_desc_mapping
);
4473 if (!fp
->disable_tpa
) {
4474 context
->ustorm_st_context
.common
.flags
|=
4475 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA
|
4476 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING
);
4477 context
->ustorm_st_context
.common
.sge_buff_size
=
4478 (u16
)(BCM_PAGE_SIZE
*PAGES_PER_SGE
);
4479 context
->ustorm_st_context
.common
.sge_page_base_hi
=
4480 U64_HI(fp
->rx_sge_mapping
);
4481 context
->ustorm_st_context
.common
.sge_page_base_lo
=
4482 U64_LO(fp
->rx_sge_mapping
);
4485 context
->cstorm_st_context
.sb_index_number
=
4486 C_SB_ETH_TX_CQ_INDEX
;
4487 context
->cstorm_st_context
.status_block_id
= sb_id
;
4489 context
->xstorm_ag_context
.cdu_reserved
=
4490 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, i
),
4491 CDU_REGION_NUMBER_XCM_AG
,
4492 ETH_CONNECTION_TYPE
);
4493 context
->ustorm_ag_context
.cdu_usage
=
4494 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, i
),
4495 CDU_REGION_NUMBER_UCM_AG
,
4496 ETH_CONNECTION_TYPE
);
4500 static void bnx2x_init_ind_table(struct bnx2x
*bp
)
4502 int port
= BP_PORT(bp
);
4508 DP(NETIF_MSG_IFUP
, "Initializing indirection table\n");
4509 for (i
= 0; i
< TSTORM_INDIRECTION_TABLE_SIZE
; i
++)
4510 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+
4511 TSTORM_INDIRECTION_TABLE_OFFSET(port
) + i
,
4512 i
% bp
->num_queues
);
4514 REG_WR(bp
, PRS_REG_A_PRSU_20
, 0xf);
4517 static void bnx2x_set_client_config(struct bnx2x
*bp
)
4519 struct tstorm_eth_client_config tstorm_client
= {0};
4520 int port
= BP_PORT(bp
);
4523 tstorm_client
.mtu
= bp
->dev
->mtu
+ ETH_OVREHEAD
;
4524 tstorm_client
.statistics_counter_id
= BP_CL_ID(bp
);
4525 tstorm_client
.config_flags
=
4526 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE
;
4528 if (bp
->rx_mode
&& bp
->vlgrp
) {
4529 tstorm_client
.config_flags
|=
4530 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE
;
4531 DP(NETIF_MSG_IFUP
, "vlan removal enabled\n");
4535 if (bp
->flags
& TPA_ENABLE_FLAG
) {
4536 tstorm_client
.max_sges_for_packet
=
4537 BCM_PAGE_ALIGN(tstorm_client
.mtu
) >> BCM_PAGE_SHIFT
;
4538 tstorm_client
.max_sges_for_packet
=
4539 ((tstorm_client
.max_sges_for_packet
+
4540 PAGES_PER_SGE
- 1) & (~(PAGES_PER_SGE
- 1))) >>
4541 PAGES_PER_SGE_SHIFT
;
4543 tstorm_client
.config_flags
|=
4544 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING
;
4547 for_each_queue(bp
, i
) {
4548 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4549 TSTORM_CLIENT_CONFIG_OFFSET(port
, bp
->fp
[i
].cl_id
),
4550 ((u32
*)&tstorm_client
)[0]);
4551 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4552 TSTORM_CLIENT_CONFIG_OFFSET(port
, bp
->fp
[i
].cl_id
) + 4,
4553 ((u32
*)&tstorm_client
)[1]);
4556 DP(BNX2X_MSG_OFF
, "tstorm_client: 0x%08x 0x%08x\n",
4557 ((u32
*)&tstorm_client
)[0], ((u32
*)&tstorm_client
)[1]);
4560 static void bnx2x_set_storm_rx_mode(struct bnx2x
*bp
)
4562 struct tstorm_eth_mac_filter_config tstorm_mac_filter
= {0};
4563 int mode
= bp
->rx_mode
;
4564 int mask
= (1 << BP_L_ID(bp
));
4565 int func
= BP_FUNC(bp
);
4568 DP(NETIF_MSG_IFUP
, "rx mode %d mask 0x%x\n", mode
, mask
);
4571 case BNX2X_RX_MODE_NONE
: /* no Rx */
4572 tstorm_mac_filter
.ucast_drop_all
= mask
;
4573 tstorm_mac_filter
.mcast_drop_all
= mask
;
4574 tstorm_mac_filter
.bcast_drop_all
= mask
;
4576 case BNX2X_RX_MODE_NORMAL
:
4577 tstorm_mac_filter
.bcast_accept_all
= mask
;
4579 case BNX2X_RX_MODE_ALLMULTI
:
4580 tstorm_mac_filter
.mcast_accept_all
= mask
;
4581 tstorm_mac_filter
.bcast_accept_all
= mask
;
4583 case BNX2X_RX_MODE_PROMISC
:
4584 tstorm_mac_filter
.ucast_accept_all
= mask
;
4585 tstorm_mac_filter
.mcast_accept_all
= mask
;
4586 tstorm_mac_filter
.bcast_accept_all
= mask
;
4589 BNX2X_ERR("BAD rx mode (%d)\n", mode
);
4593 for (i
= 0; i
< sizeof(struct tstorm_eth_mac_filter_config
)/4; i
++) {
4594 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4595 TSTORM_MAC_FILTER_CONFIG_OFFSET(func
) + i
* 4,
4596 ((u32
*)&tstorm_mac_filter
)[i
]);
4598 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4599 ((u32 *)&tstorm_mac_filter)[i]); */
4602 if (mode
!= BNX2X_RX_MODE_NONE
)
4603 bnx2x_set_client_config(bp
);
4606 static void bnx2x_init_internal_common(struct bnx2x
*bp
)
4610 if (bp
->flags
& TPA_ENABLE_FLAG
) {
4611 struct tstorm_eth_tpa_exist tpa
= {0};
4615 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_TPA_EXIST_OFFSET
,
4617 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_TPA_EXIST_OFFSET
+ 4,
4621 /* Zero this manually as its initialization is
4622 currently missing in the initTool */
4623 for (i
= 0; i
< (USTORM_AGG_DATA_SIZE
>> 2); i
++)
4624 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4625 USTORM_AGG_DATA_OFFSET
+ i
* 4, 0);
4628 static void bnx2x_init_internal_port(struct bnx2x
*bp
)
4630 int port
= BP_PORT(bp
);
4632 REG_WR(bp
, BAR_USTRORM_INTMEM
+ USTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
4633 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
4634 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
4635 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
4638 static void bnx2x_init_internal_func(struct bnx2x
*bp
)
4640 struct tstorm_eth_function_common_config tstorm_config
= {0};
4641 struct stats_indication_flags stats_flags
= {0};
4642 int port
= BP_PORT(bp
);
4643 int func
= BP_FUNC(bp
);
4648 tstorm_config
.config_flags
= MULTI_FLAGS
;
4649 tstorm_config
.rss_result_mask
= MULTI_MASK
;
4652 tstorm_config
.leading_client_id
= BP_L_ID(bp
);
4654 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4655 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func
),
4656 (*(u32
*)&tstorm_config
));
4658 bp
->rx_mode
= BNX2X_RX_MODE_NONE
; /* no rx until link is up */
4659 bnx2x_set_storm_rx_mode(bp
);
4661 /* reset xstorm per client statistics */
4662 for (i
= 0; i
< sizeof(struct xstorm_per_client_stats
) / 4; i
++) {
4663 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4664 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, BP_CL_ID(bp
)) +
4667 /* reset tstorm per client statistics */
4668 for (i
= 0; i
< sizeof(struct tstorm_per_client_stats
) / 4; i
++) {
4669 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4670 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, BP_CL_ID(bp
)) +
4674 /* Init statistics related context */
4675 stats_flags
.collect_eth
= 1;
4677 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(func
),
4678 ((u32
*)&stats_flags
)[0]);
4679 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(func
) + 4,
4680 ((u32
*)&stats_flags
)[1]);
4682 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(func
),
4683 ((u32
*)&stats_flags
)[0]);
4684 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(func
) + 4,
4685 ((u32
*)&stats_flags
)[1]);
4687 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(func
),
4688 ((u32
*)&stats_flags
)[0]);
4689 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(func
) + 4,
4690 ((u32
*)&stats_flags
)[1]);
4692 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4693 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
4694 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
4695 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4696 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
4697 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
4699 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4700 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
4701 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
4702 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4703 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
4704 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
4706 if (CHIP_IS_E1H(bp
)) {
4707 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_FUNCTION_MODE_OFFSET
,
4709 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_FUNCTION_MODE_OFFSET
,
4711 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_FUNCTION_MODE_OFFSET
,
4713 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_FUNCTION_MODE_OFFSET
,
4716 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_E1HOV_OFFSET(func
),
4720 /* Init CQ ring mapping and aggregation size */
4721 max_agg_size
= min((u32
)(bp
->rx_buf_size
+
4722 8*BCM_PAGE_SIZE
*PAGES_PER_SGE
),
4724 for_each_queue(bp
, i
) {
4725 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
4727 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4728 USTORM_CQE_PAGE_BASE_OFFSET(port
, FP_CL_ID(fp
)),
4729 U64_LO(fp
->rx_comp_mapping
));
4730 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4731 USTORM_CQE_PAGE_BASE_OFFSET(port
, FP_CL_ID(fp
)) + 4,
4732 U64_HI(fp
->rx_comp_mapping
));
4734 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
4735 USTORM_MAX_AGG_SIZE_OFFSET(port
, FP_CL_ID(fp
)),
4740 static void bnx2x_init_internal(struct bnx2x
*bp
, u32 load_code
)
4742 switch (load_code
) {
4743 case FW_MSG_CODE_DRV_LOAD_COMMON
:
4744 bnx2x_init_internal_common(bp
);
4747 case FW_MSG_CODE_DRV_LOAD_PORT
:
4748 bnx2x_init_internal_port(bp
);
4751 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
4752 bnx2x_init_internal_func(bp
);
4756 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
4761 static void bnx2x_nic_init(struct bnx2x
*bp
, u32 load_code
)
4765 for_each_queue(bp
, i
) {
4766 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
4769 fp
->state
= BNX2X_FP_STATE_CLOSED
;
4771 fp
->cl_id
= BP_L_ID(bp
) + i
;
4772 fp
->sb_id
= fp
->cl_id
;
4774 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4775 bp
, fp
->status_blk
, i
, FP_CL_ID(fp
), FP_SB_ID(fp
));
4776 bnx2x_init_sb(bp
, fp
->status_blk
, fp
->status_blk_mapping
,
4778 bnx2x_update_fpsb_idx(fp
);
4781 bnx2x_init_def_sb(bp
, bp
->def_status_blk
, bp
->def_status_blk_mapping
,
4783 bnx2x_update_dsb_idx(bp
);
4784 bnx2x_update_coalesce(bp
);
4785 bnx2x_init_rx_rings(bp
);
4786 bnx2x_init_tx_ring(bp
);
4787 bnx2x_init_sp_ring(bp
);
4788 bnx2x_init_context(bp
);
4789 bnx2x_init_internal(bp
, load_code
);
4790 bnx2x_init_ind_table(bp
);
4791 bnx2x_int_enable(bp
);
4794 /* end of nic init */
4797 * gzip service functions
4800 static int bnx2x_gunzip_init(struct bnx2x
*bp
)
4802 bp
->gunzip_buf
= pci_alloc_consistent(bp
->pdev
, FW_BUF_SIZE
,
4803 &bp
->gunzip_mapping
);
4804 if (bp
->gunzip_buf
== NULL
)
4807 bp
->strm
= kmalloc(sizeof(*bp
->strm
), GFP_KERNEL
);
4808 if (bp
->strm
== NULL
)
4811 bp
->strm
->workspace
= kmalloc(zlib_inflate_workspacesize(),
4813 if (bp
->strm
->workspace
== NULL
)
4823 pci_free_consistent(bp
->pdev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
4824 bp
->gunzip_mapping
);
4825 bp
->gunzip_buf
= NULL
;
4828 printk(KERN_ERR PFX
"%s: Cannot allocate firmware buffer for"
4829 " un-compression\n", bp
->dev
->name
);
4833 static void bnx2x_gunzip_end(struct bnx2x
*bp
)
4835 kfree(bp
->strm
->workspace
);
4840 if (bp
->gunzip_buf
) {
4841 pci_free_consistent(bp
->pdev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
4842 bp
->gunzip_mapping
);
4843 bp
->gunzip_buf
= NULL
;
4847 static int bnx2x_gunzip(struct bnx2x
*bp
, u8
*zbuf
, int len
)
4851 /* check gzip header */
4852 if ((zbuf
[0] != 0x1f) || (zbuf
[1] != 0x8b) || (zbuf
[2] != Z_DEFLATED
))
4859 if (zbuf
[3] & FNAME
)
4860 while ((zbuf
[n
++] != 0) && (n
< len
));
4862 bp
->strm
->next_in
= zbuf
+ n
;
4863 bp
->strm
->avail_in
= len
- n
;
4864 bp
->strm
->next_out
= bp
->gunzip_buf
;
4865 bp
->strm
->avail_out
= FW_BUF_SIZE
;
4867 rc
= zlib_inflateInit2(bp
->strm
, -MAX_WBITS
);
4871 rc
= zlib_inflate(bp
->strm
, Z_FINISH
);
4872 if ((rc
!= Z_OK
) && (rc
!= Z_STREAM_END
))
4873 printk(KERN_ERR PFX
"%s: Firmware decompression error: %s\n",
4874 bp
->dev
->name
, bp
->strm
->msg
);
4876 bp
->gunzip_outlen
= (FW_BUF_SIZE
- bp
->strm
->avail_out
);
4877 if (bp
->gunzip_outlen
& 0x3)
4878 printk(KERN_ERR PFX
"%s: Firmware decompression error:"
4879 " gunzip_outlen (%d) not aligned\n",
4880 bp
->dev
->name
, bp
->gunzip_outlen
);
4881 bp
->gunzip_outlen
>>= 2;
4883 zlib_inflateEnd(bp
->strm
);
4885 if (rc
== Z_STREAM_END
)
4891 /* nic load/unload */
4894 * General service functions
4897 /* send a NIG loopback debug packet */
4898 static void bnx2x_lb_pckt(struct bnx2x
*bp
)
4902 /* Ethernet source and destination addresses */
4903 wb_write
[0] = 0x55555555;
4904 wb_write
[1] = 0x55555555;
4905 wb_write
[2] = 0x20; /* SOP */
4906 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
4908 /* NON-IP protocol */
4909 wb_write
[0] = 0x09000000;
4910 wb_write
[1] = 0x55555555;
4911 wb_write
[2] = 0x10; /* EOP, eop_bvalid = 0 */
4912 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
4915 /* some of the internal memories
4916 * are not directly readable from the driver
4917 * to test them we send debug packets
4919 static int bnx2x_int_mem_test(struct bnx2x
*bp
)
4925 if (CHIP_REV_IS_FPGA(bp
))
4927 else if (CHIP_REV_IS_EMUL(bp
))
4932 DP(NETIF_MSG_HW
, "start part1\n");
4934 /* Disable inputs of parser neighbor blocks */
4935 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
4936 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
4937 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
4938 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x0);
4940 /* Write 0 to parser credits for CFC search request */
4941 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
4943 /* send Ethernet packet */
4946 /* TODO do i reset NIG statistic? */
4947 /* Wait until NIG register shows 1 packet of size 0x10 */
4948 count
= 1000 * factor
;
4951 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
4952 val
= *bnx2x_sp(bp
, wb_data
[0]);
4960 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
4964 /* Wait until PRS register shows 1 packet */
4965 count
= 1000 * factor
;
4967 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
4975 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
4979 /* Reset and init BRB, PRS */
4980 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
4982 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
4984 bnx2x_init_block(bp
, BRB1_COMMON_START
, BRB1_COMMON_END
);
4985 bnx2x_init_block(bp
, PRS_COMMON_START
, PRS_COMMON_END
);
4987 DP(NETIF_MSG_HW
, "part2\n");
4989 /* Disable inputs of parser neighbor blocks */
4990 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
4991 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
4992 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
4993 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x0);
4995 /* Write 0 to parser credits for CFC search request */
4996 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
4998 /* send 10 Ethernet packets */
4999 for (i
= 0; i
< 10; i
++)
5002 /* Wait until NIG register shows 10 + 1
5003 packets of size 11*0x10 = 0xb0 */
5004 count
= 1000 * factor
;
5007 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
5008 val
= *bnx2x_sp(bp
, wb_data
[0]);
5016 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
5020 /* Wait until PRS register shows 2 packets */
5021 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
5023 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
5025 /* Write 1 to parser credits for CFC search request */
5026 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x1);
5028 /* Wait until PRS register shows 3 packets */
5029 msleep(10 * factor
);
5030 /* Wait until NIG register shows 1 packet of size 0x10 */
5031 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
5033 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
5035 /* clear NIG EOP FIFO */
5036 for (i
= 0; i
< 11; i
++)
5037 REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_FIFO
);
5038 val
= REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_EMPTY
);
5040 BNX2X_ERR("clear of NIG failed\n");
5044 /* Reset and init BRB, PRS, NIG */
5045 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
5047 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
5049 bnx2x_init_block(bp
, BRB1_COMMON_START
, BRB1_COMMON_END
);
5050 bnx2x_init_block(bp
, PRS_COMMON_START
, PRS_COMMON_END
);
5053 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
5056 /* Enable inputs of parser neighbor blocks */
5057 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x7fffffff);
5058 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x1);
5059 REG_WR(bp
, CFC_REG_DEBUG0
, 0x0);
5060 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x1);
5062 DP(NETIF_MSG_HW
, "done\n");
5067 static void enable_blocks_attention(struct bnx2x
*bp
)
5069 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
5070 REG_WR(bp
, PXP_REG_PXP_INT_MASK_1
, 0);
5071 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
5072 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
5073 REG_WR(bp
, QM_REG_QM_INT_MASK
, 0);
5074 REG_WR(bp
, TM_REG_TM_INT_MASK
, 0);
5075 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_0
, 0);
5076 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_1
, 0);
5077 REG_WR(bp
, XCM_REG_XCM_INT_MASK
, 0);
5078 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5079 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5080 REG_WR(bp
, USDM_REG_USDM_INT_MASK_0
, 0);
5081 REG_WR(bp
, USDM_REG_USDM_INT_MASK_1
, 0);
5082 REG_WR(bp
, UCM_REG_UCM_INT_MASK
, 0);
5083 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5084 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5085 REG_WR(bp
, GRCBASE_UPB
+ PB_REG_PB_INT_MASK
, 0);
5086 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_0
, 0);
5087 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_1
, 0);
5088 REG_WR(bp
, CCM_REG_CCM_INT_MASK
, 0);
5089 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5090 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5091 if (CHIP_REV_IS_FPGA(bp
))
5092 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x580000);
5094 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x480000);
5095 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_0
, 0);
5096 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_1
, 0);
5097 REG_WR(bp
, TCM_REG_TCM_INT_MASK
, 0);
5098 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5099 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5100 REG_WR(bp
, CDU_REG_CDU_INT_MASK
, 0);
5101 REG_WR(bp
, DMAE_REG_DMAE_INT_MASK
, 0);
5102 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5103 REG_WR(bp
, PBF_REG_PBF_INT_MASK
, 0X18); /* bit 3,4 masked */
5107 static int bnx2x_init_common(struct bnx2x
*bp
)
5111 DP(BNX2X_MSG_MCP
, "starting common init func %d\n", BP_FUNC(bp
));
5113 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0xffffffff);
5114 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_SET
, 0xfffc);
5116 bnx2x_init_block(bp
, MISC_COMMON_START
, MISC_COMMON_END
);
5117 if (CHIP_IS_E1H(bp
))
5118 REG_WR(bp
, MISC_REG_E1HMF_MODE
, IS_E1HMF(bp
));
5120 REG_WR(bp
, MISC_REG_LCPLL_CTRL_REG_2
, 0x100);
5122 REG_WR(bp
, MISC_REG_LCPLL_CTRL_REG_2
, 0x0);
5124 bnx2x_init_block(bp
, PXP_COMMON_START
, PXP_COMMON_END
);
5125 if (CHIP_IS_E1(bp
)) {
5126 /* enable HW interrupt from PXP on USDM overflow
5127 bit 16 on INT_MASK_0 */
5128 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
5131 bnx2x_init_block(bp
, PXP2_COMMON_START
, PXP2_COMMON_END
);
5135 REG_WR(bp
, PXP2_REG_RQ_QM_ENDIAN_M
, 1);
5136 REG_WR(bp
, PXP2_REG_RQ_TM_ENDIAN_M
, 1);
5137 REG_WR(bp
, PXP2_REG_RQ_SRC_ENDIAN_M
, 1);
5138 REG_WR(bp
, PXP2_REG_RQ_CDU_ENDIAN_M
, 1);
5139 REG_WR(bp
, PXP2_REG_RQ_DBG_ENDIAN_M
, 1);
5140 REG_WR(bp
, PXP2_REG_RQ_HC_ENDIAN_M
, 1);
5142 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5143 REG_WR(bp
, PXP2_REG_RD_QM_SWAP_MODE
, 1);
5144 REG_WR(bp
, PXP2_REG_RD_TM_SWAP_MODE
, 1);
5145 REG_WR(bp
, PXP2_REG_RD_SRC_SWAP_MODE
, 1);
5146 REG_WR(bp
, PXP2_REG_RD_CDURD_SWAP_MODE
, 1);
5149 REG_WR(bp
, PXP2_REG_RQ_CDU_P_SIZE
, 2);
5151 REG_WR(bp
, PXP2_REG_RQ_TM_P_SIZE
, 5);
5152 REG_WR(bp
, PXP2_REG_RQ_QM_P_SIZE
, 5);
5153 REG_WR(bp
, PXP2_REG_RQ_SRC_P_SIZE
, 5);
5156 if (CHIP_REV_IS_FPGA(bp
) && CHIP_IS_E1H(bp
))
5157 REG_WR(bp
, PXP2_REG_PGL_TAGS_LIMIT
, 0x1);
5159 /* let the HW do it's magic ... */
5161 /* finish PXP init */
5162 val
= REG_RD(bp
, PXP2_REG_RQ_CFG_DONE
);
5164 BNX2X_ERR("PXP2 CFG failed\n");
5167 val
= REG_RD(bp
, PXP2_REG_RD_INIT_DONE
);
5169 BNX2X_ERR("PXP2 RD_INIT failed\n");
5173 REG_WR(bp
, PXP2_REG_RQ_DISABLE_INPUTS
, 0);
5174 REG_WR(bp
, PXP2_REG_RD_DISABLE_INPUTS
, 0);
5176 bnx2x_init_block(bp
, DMAE_COMMON_START
, DMAE_COMMON_END
);
5178 /* clean the DMAE memory */
5180 bnx2x_init_fill(bp
, TSEM_REG_PRAM
, 0, 8);
5182 bnx2x_init_block(bp
, TCM_COMMON_START
, TCM_COMMON_END
);
5183 bnx2x_init_block(bp
, UCM_COMMON_START
, UCM_COMMON_END
);
5184 bnx2x_init_block(bp
, CCM_COMMON_START
, CCM_COMMON_END
);
5185 bnx2x_init_block(bp
, XCM_COMMON_START
, XCM_COMMON_END
);
5187 bnx2x_read_dmae(bp
, XSEM_REG_PASSIVE_BUFFER
, 3);
5188 bnx2x_read_dmae(bp
, CSEM_REG_PASSIVE_BUFFER
, 3);
5189 bnx2x_read_dmae(bp
, TSEM_REG_PASSIVE_BUFFER
, 3);
5190 bnx2x_read_dmae(bp
, USEM_REG_PASSIVE_BUFFER
, 3);
5192 bnx2x_init_block(bp
, QM_COMMON_START
, QM_COMMON_END
);
5193 /* soft reset pulse */
5194 REG_WR(bp
, QM_REG_SOFT_RESET
, 1);
5195 REG_WR(bp
, QM_REG_SOFT_RESET
, 0);
5198 bnx2x_init_block(bp
, TIMERS_COMMON_START
, TIMERS_COMMON_END
);
5201 bnx2x_init_block(bp
, DQ_COMMON_START
, DQ_COMMON_END
);
5202 REG_WR(bp
, DORQ_REG_DPM_CID_OFST
, BCM_PAGE_SHIFT
);
5203 if (!CHIP_REV_IS_SLOW(bp
)) {
5204 /* enable hw interrupt from doorbell Q */
5205 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
5208 bnx2x_init_block(bp
, BRB1_COMMON_START
, BRB1_COMMON_END
);
5209 if (CHIP_REV_IS_SLOW(bp
)) {
5210 /* fix for emulation and FPGA for no pause */
5211 REG_WR(bp
, BRB1_REG_PAUSE_HIGH_THRESHOLD_0
, 513);
5212 REG_WR(bp
, BRB1_REG_PAUSE_HIGH_THRESHOLD_1
, 513);
5213 REG_WR(bp
, BRB1_REG_PAUSE_LOW_THRESHOLD_0
, 0);
5214 REG_WR(bp
, BRB1_REG_PAUSE_LOW_THRESHOLD_1
, 0);
5217 bnx2x_init_block(bp
, PRS_COMMON_START
, PRS_COMMON_END
);
5219 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
5220 if (CHIP_IS_E1H(bp
))
5221 REG_WR(bp
, PRS_REG_E1HOV_MODE
, IS_E1HMF(bp
));
5223 bnx2x_init_block(bp
, TSDM_COMMON_START
, TSDM_COMMON_END
);
5224 bnx2x_init_block(bp
, CSDM_COMMON_START
, CSDM_COMMON_END
);
5225 bnx2x_init_block(bp
, USDM_COMMON_START
, USDM_COMMON_END
);
5226 bnx2x_init_block(bp
, XSDM_COMMON_START
, XSDM_COMMON_END
);
5228 if (CHIP_IS_E1H(bp
)) {
5229 bnx2x_init_fill(bp
, TSTORM_INTMEM_ADDR
, 0,
5230 STORM_INTMEM_SIZE_E1H
/2);
5232 TSTORM_INTMEM_ADDR
+ STORM_INTMEM_SIZE_E1H
/2,
5233 0, STORM_INTMEM_SIZE_E1H
/2);
5234 bnx2x_init_fill(bp
, CSTORM_INTMEM_ADDR
, 0,
5235 STORM_INTMEM_SIZE_E1H
/2);
5237 CSTORM_INTMEM_ADDR
+ STORM_INTMEM_SIZE_E1H
/2,
5238 0, STORM_INTMEM_SIZE_E1H
/2);
5239 bnx2x_init_fill(bp
, XSTORM_INTMEM_ADDR
, 0,
5240 STORM_INTMEM_SIZE_E1H
/2);
5242 XSTORM_INTMEM_ADDR
+ STORM_INTMEM_SIZE_E1H
/2,
5243 0, STORM_INTMEM_SIZE_E1H
/2);
5244 bnx2x_init_fill(bp
, USTORM_INTMEM_ADDR
, 0,
5245 STORM_INTMEM_SIZE_E1H
/2);
5247 USTORM_INTMEM_ADDR
+ STORM_INTMEM_SIZE_E1H
/2,
5248 0, STORM_INTMEM_SIZE_E1H
/2);
5250 bnx2x_init_fill(bp
, TSTORM_INTMEM_ADDR
, 0,
5251 STORM_INTMEM_SIZE_E1
);
5252 bnx2x_init_fill(bp
, CSTORM_INTMEM_ADDR
, 0,
5253 STORM_INTMEM_SIZE_E1
);
5254 bnx2x_init_fill(bp
, XSTORM_INTMEM_ADDR
, 0,
5255 STORM_INTMEM_SIZE_E1
);
5256 bnx2x_init_fill(bp
, USTORM_INTMEM_ADDR
, 0,
5257 STORM_INTMEM_SIZE_E1
);
5260 bnx2x_init_block(bp
, TSEM_COMMON_START
, TSEM_COMMON_END
);
5261 bnx2x_init_block(bp
, USEM_COMMON_START
, USEM_COMMON_END
);
5262 bnx2x_init_block(bp
, CSEM_COMMON_START
, CSEM_COMMON_END
);
5263 bnx2x_init_block(bp
, XSEM_COMMON_START
, XSEM_COMMON_END
);
5266 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
5268 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
5271 bnx2x_init_block(bp
, UPB_COMMON_START
, UPB_COMMON_END
);
5272 bnx2x_init_block(bp
, XPB_COMMON_START
, XPB_COMMON_END
);
5273 bnx2x_init_block(bp
, PBF_COMMON_START
, PBF_COMMON_END
);
5275 REG_WR(bp
, SRC_REG_SOFT_RST
, 1);
5276 for (i
= SRC_REG_KEYRSS0_0
; i
<= SRC_REG_KEYRSS1_9
; i
+= 4) {
5277 REG_WR(bp
, i
, 0xc0cac01a);
5278 /* TODO: replace with something meaningful */
5280 if (CHIP_IS_E1H(bp
))
5281 bnx2x_init_block(bp
, SRCH_COMMON_START
, SRCH_COMMON_END
);
5282 REG_WR(bp
, SRC_REG_SOFT_RST
, 0);
5284 if (sizeof(union cdu_context
) != 1024)
5285 /* we currently assume that a context is 1024 bytes */
5286 printk(KERN_ALERT PFX
"please adjust the size of"
5287 " cdu_context(%ld)\n", (long)sizeof(union cdu_context
));
5289 bnx2x_init_block(bp
, CDU_COMMON_START
, CDU_COMMON_END
);
5290 val
= (4 << 24) + (0 << 12) + 1024;
5291 REG_WR(bp
, CDU_REG_CDU_GLOBAL_PARAMS
, val
);
5292 if (CHIP_IS_E1(bp
)) {
5293 /* !!! fix pxp client crdit until excel update */
5294 REG_WR(bp
, CDU_REG_CDU_DEBUG
, 0x264);
5295 REG_WR(bp
, CDU_REG_CDU_DEBUG
, 0);
5298 bnx2x_init_block(bp
, CFC_COMMON_START
, CFC_COMMON_END
);
5299 REG_WR(bp
, CFC_REG_INIT_REG
, 0x7FF);
5301 bnx2x_init_block(bp
, HC_COMMON_START
, HC_COMMON_END
);
5302 bnx2x_init_block(bp
, MISC_AEU_COMMON_START
, MISC_AEU_COMMON_END
);
5304 /* PXPCS COMMON comes here */
5305 /* Reset PCIE errors for debug */
5306 REG_WR(bp
, 0x2814, 0xffffffff);
5307 REG_WR(bp
, 0x3820, 0xffffffff);
5309 /* EMAC0 COMMON comes here */
5310 /* EMAC1 COMMON comes here */
5311 /* DBU COMMON comes here */
5312 /* DBG COMMON comes here */
5314 bnx2x_init_block(bp
, NIG_COMMON_START
, NIG_COMMON_END
);
5315 if (CHIP_IS_E1H(bp
)) {
5316 REG_WR(bp
, NIG_REG_LLH_MF_MODE
, IS_E1HMF(bp
));
5317 REG_WR(bp
, NIG_REG_LLH_E1HOV_MODE
, IS_E1HMF(bp
));
5320 if (CHIP_REV_IS_SLOW(bp
))
5323 /* finish CFC init */
5324 val
= reg_poll(bp
, CFC_REG_LL_INIT_DONE
, 1, 100, 10);
5326 BNX2X_ERR("CFC LL_INIT failed\n");
5329 val
= reg_poll(bp
, CFC_REG_AC_INIT_DONE
, 1, 100, 10);
5331 BNX2X_ERR("CFC AC_INIT failed\n");
5334 val
= reg_poll(bp
, CFC_REG_CAM_INIT_DONE
, 1, 100, 10);
5336 BNX2X_ERR("CFC CAM_INIT failed\n");
5339 REG_WR(bp
, CFC_REG_DEBUG0
, 0);
5341 /* read NIG statistic
5342 to see if this is our first up since powerup */
5343 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
5344 val
= *bnx2x_sp(bp
, wb_data
[0]);
5346 /* do internal memory self test */
5347 if ((CHIP_IS_E1(bp
)) && (val
== 0) && bnx2x_int_mem_test(bp
)) {
5348 BNX2X_ERR("internal mem self test failed\n");
5352 switch (bp
->common
.board
& SHARED_HW_CFG_BOARD_TYPE_MASK
) {
5353 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G
:
5354 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G
:
5355 /* Fan failure is indicated by SPIO 5 */
5356 bnx2x_set_spio(bp
, MISC_REGISTERS_SPIO_5
,
5357 MISC_REGISTERS_SPIO_INPUT_HI_Z
);
5359 /* set to active low mode */
5360 val
= REG_RD(bp
, MISC_REG_SPIO_INT
);
5361 val
|= ((1 << MISC_REGISTERS_SPIO_5
) <<
5362 MISC_REGISTERS_SPIO_INT_OLD_SET_POS
);
5363 REG_WR(bp
, MISC_REG_SPIO_INT
, val
);
5365 /* enable interrupt to signal the IGU */
5366 val
= REG_RD(bp
, MISC_REG_SPIO_EVENT_EN
);
5367 val
|= (1 << MISC_REGISTERS_SPIO_5
);
5368 REG_WR(bp
, MISC_REG_SPIO_EVENT_EN
, val
);
5375 /* clear PXP2 attentions */
5376 REG_RD(bp
, PXP2_REG_PXP2_INT_STS_CLR_0
);
5378 enable_blocks_attention(bp
);
5380 if (!BP_NOMCP(bp
)) {
5381 bnx2x_acquire_phy_lock(bp
);
5382 bnx2x_common_init_phy(bp
, bp
->common
.shmem_base
);
5383 bnx2x_release_phy_lock(bp
);
5385 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5390 static int bnx2x_init_port(struct bnx2x
*bp
)
5392 int port
= BP_PORT(bp
);
5395 DP(BNX2X_MSG_MCP
, "starting port init port %x\n", port
);
5397 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
5399 /* Port PXP comes here */
5400 /* Port PXP2 comes here */
5405 wb_write
[0] = ONCHIP_ADDR1(bp
->timers_mapping
);
5406 wb_write
[1] = ONCHIP_ADDR2(bp
->timers_mapping
);
5407 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
5408 REG_WR(bp
, PXP2_REG_PSWRQ_TM0_L2P
+ func
*4, PXP_ONE_ILT(i
));
5413 wb_write
[0] = ONCHIP_ADDR1(bp
->qm_mapping
);
5414 wb_write
[1] = ONCHIP_ADDR2(bp
->qm_mapping
);
5415 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
5416 REG_WR(bp
, PXP2_REG_PSWRQ_QM0_L2P
+ func
*4, PXP_ONE_ILT(i
));
5421 wb_write
[0] = ONCHIP_ADDR1(bp
->t1_mapping
);
5422 wb_write
[1] = ONCHIP_ADDR2(bp
->t1_mapping
);
5423 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
5424 REG_WR(bp
, PXP2_REG_PSWRQ_SRC0_L2P
+ func
*4, PXP_ONE_ILT(i
));
5426 /* Port CMs come here */
5428 /* Port QM comes here */
5430 REG_WR(bp
, TM_REG_LIN0_SCAN_TIME
+ func
*4, 1024/64*20);
5431 REG_WR(bp
, TM_REG_LIN0_MAX_ACTIVE_CID
+ func
*4, 31);
5433 bnx2x_init_block(bp
, func
? TIMERS_PORT1_START
: TIMERS_PORT0_START
,
5434 func
? TIMERS_PORT1_END
: TIMERS_PORT0_END
);
5436 /* Port DQ comes here */
5437 /* Port BRB1 comes here */
5438 /* Port PRS comes here */
5439 /* Port TSDM comes here */
5440 /* Port CSDM comes here */
5441 /* Port USDM comes here */
5442 /* Port XSDM comes here */
5443 bnx2x_init_block(bp
, port
? TSEM_PORT1_START
: TSEM_PORT0_START
,
5444 port
? TSEM_PORT1_END
: TSEM_PORT0_END
);
5445 bnx2x_init_block(bp
, port
? USEM_PORT1_START
: USEM_PORT0_START
,
5446 port
? USEM_PORT1_END
: USEM_PORT0_END
);
5447 bnx2x_init_block(bp
, port
? CSEM_PORT1_START
: CSEM_PORT0_START
,
5448 port
? CSEM_PORT1_END
: CSEM_PORT0_END
);
5449 bnx2x_init_block(bp
, port
? XSEM_PORT1_START
: XSEM_PORT0_START
,
5450 port
? XSEM_PORT1_END
: XSEM_PORT0_END
);
5451 /* Port UPB comes here */
5452 /* Port XPB comes here */
5454 bnx2x_init_block(bp
, port
? PBF_PORT1_START
: PBF_PORT0_START
,
5455 port
? PBF_PORT1_END
: PBF_PORT0_END
);
5457 /* configure PBF to work without PAUSE mtu 9000 */
5458 REG_WR(bp
, PBF_REG_P0_PAUSE_ENABLE
+ port
*4, 0);
5460 /* update threshold */
5461 REG_WR(bp
, PBF_REG_P0_ARB_THRSH
+ port
*4, (9040/16));
5462 /* update init credit */
5463 REG_WR(bp
, PBF_REG_P0_INIT_CRD
+ port
*4, (9040/16) + 553 - 22);
5466 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 1);
5468 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 0);
5471 /* tell the searcher where the T2 table is */
5472 REG_WR(bp
, SRC_REG_COUNTFREE0
+ func
*4, 16*1024/64);
5474 wb_write
[0] = U64_LO(bp
->t2_mapping
);
5475 wb_write
[1] = U64_HI(bp
->t2_mapping
);
5476 REG_WR_DMAE(bp
, SRC_REG_FIRSTFREE0
+ func
*4, wb_write
, 2);
5477 wb_write
[0] = U64_LO((u64
)bp
->t2_mapping
+ 16*1024 - 64);
5478 wb_write
[1] = U64_HI((u64
)bp
->t2_mapping
+ 16*1024 - 64);
5479 REG_WR_DMAE(bp
, SRC_REG_LASTFREE0
+ func
*4, wb_write
, 2);
5481 REG_WR(bp
, SRC_REG_NUMBER_HASH_BITS0
+ func
*4, 10);
5482 /* Port SRCH comes here */
5484 /* Port CDU comes here */
5485 /* Port CFC comes here */
5487 if (CHIP_IS_E1(bp
)) {
5488 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
5489 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
5491 bnx2x_init_block(bp
, port
? HC_PORT1_START
: HC_PORT0_START
,
5492 port
? HC_PORT1_END
: HC_PORT0_END
);
5494 bnx2x_init_block(bp
, port
? MISC_AEU_PORT1_START
:
5495 MISC_AEU_PORT0_START
,
5496 port
? MISC_AEU_PORT1_END
: MISC_AEU_PORT0_END
);
5497 /* init aeu_mask_attn_func_0/1:
5498 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5499 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5500 * bits 4-7 are used for "per vn group attention" */
5501 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4,
5502 (IS_E1HMF(bp
) ? 0xF7 : 0x7));
5504 /* Port PXPCS comes here */
5505 /* Port EMAC0 comes here */
5506 /* Port EMAC1 comes here */
5507 /* Port DBU comes here */
5508 /* Port DBG comes here */
5509 bnx2x_init_block(bp
, port
? NIG_PORT1_START
: NIG_PORT0_START
,
5510 port
? NIG_PORT1_END
: NIG_PORT0_END
);
5512 REG_WR(bp
, NIG_REG_XGXS_SERDES0_MODE_SEL
+ port
*4, 1);
5514 if (CHIP_IS_E1H(bp
)) {
5516 struct cmng_struct_per_port m_cmng_port
;
5519 /* 0x2 disable e1hov, 0x1 enable */
5520 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK_MF
+ port
*4,
5521 (IS_E1HMF(bp
) ? 0x1 : 0x2));
5523 /* Init RATE SHAPING and FAIRNESS contexts.
5524 Initialize as if there is 10G link. */
5525 wsum
= bnx2x_calc_vn_wsum(bp
);
5526 bnx2x_init_port_minmax(bp
, (int)wsum
, 10000, &m_cmng_port
);
5528 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
5529 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
,
5530 wsum
, 10000, &m_cmng_port
);
5533 /* Port MCP comes here */
5534 /* Port DMAE comes here */
5536 switch (bp
->common
.board
& SHARED_HW_CFG_BOARD_TYPE_MASK
) {
5537 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G
:
5538 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G
:
5539 /* add SPIO 5 to group 0 */
5540 val
= REG_RD(bp
, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
5541 val
|= AEU_INPUTS_ATTN_BITS_SPIO5
;
5542 REG_WR(bp
, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
, val
);
5549 bnx2x__link_reset(bp
);
5554 #define ILT_PER_FUNC (768/2)
5555 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5556 /* the phys address is shifted right 12 bits and has an added
5557 1=valid bit added to the 53rd bit
5558 then since this is a wide register(TM)
5559 we split it into two 32 bit writes
5561 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5562 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5563 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5564 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5566 #define CNIC_ILT_LINES 0
5568 static void bnx2x_ilt_wr(struct bnx2x
*bp
, u32 index
, dma_addr_t addr
)
5572 if (CHIP_IS_E1H(bp
))
5573 reg
= PXP2_REG_RQ_ONCHIP_AT_B0
+ index
*8;
5575 reg
= PXP2_REG_RQ_ONCHIP_AT
+ index
*8;
5577 bnx2x_wb_wr(bp
, reg
, ONCHIP_ADDR1(addr
), ONCHIP_ADDR2(addr
));
5580 static int bnx2x_init_func(struct bnx2x
*bp
)
5582 int port
= BP_PORT(bp
);
5583 int func
= BP_FUNC(bp
);
5586 DP(BNX2X_MSG_MCP
, "starting func init func %x\n", func
);
5588 i
= FUNC_ILT_BASE(func
);
5590 bnx2x_ilt_wr(bp
, i
, bnx2x_sp_mapping(bp
, context
));
5591 if (CHIP_IS_E1H(bp
)) {
5592 REG_WR(bp
, PXP2_REG_RQ_CDU_FIRST_ILT
, i
);
5593 REG_WR(bp
, PXP2_REG_RQ_CDU_LAST_ILT
, i
+ CNIC_ILT_LINES
);
5595 REG_WR(bp
, PXP2_REG_PSWRQ_CDU0_L2P
+ func
*4,
5596 PXP_ILT_RANGE(i
, i
+ CNIC_ILT_LINES
));
5599 if (CHIP_IS_E1H(bp
)) {
5600 for (i
= 0; i
< 9; i
++)
5601 bnx2x_init_block(bp
,
5602 cm_start
[func
][i
], cm_end
[func
][i
]);
5604 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 1);
5605 REG_WR(bp
, NIG_REG_LLH0_FUNC_VLAN_ID
+ port
*8, bp
->e1hov
);
5608 /* HC init per function */
5609 if (CHIP_IS_E1H(bp
)) {
5610 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
5612 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
5613 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
5615 bnx2x_init_block(bp
, hc_limits
[func
][0], hc_limits
[func
][1]);
5617 if (CHIP_IS_E1H(bp
))
5618 REG_WR(bp
, HC_REG_FUNC_NUM_P0
+ port
*4, func
);
5620 /* Reset PCIE errors for debug */
5621 REG_WR(bp
, 0x2114, 0xffffffff);
5622 REG_WR(bp
, 0x2120, 0xffffffff);
5627 static int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
)
5631 DP(BNX2X_MSG_MCP
, "function %d load_code %x\n",
5632 BP_FUNC(bp
), load_code
);
5635 mutex_init(&bp
->dmae_mutex
);
5636 bnx2x_gunzip_init(bp
);
5638 switch (load_code
) {
5639 case FW_MSG_CODE_DRV_LOAD_COMMON
:
5640 rc
= bnx2x_init_common(bp
);
5645 case FW_MSG_CODE_DRV_LOAD_PORT
:
5647 rc
= bnx2x_init_port(bp
);
5652 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
5654 rc
= bnx2x_init_func(bp
);
5660 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
5664 if (!BP_NOMCP(bp
)) {
5665 int func
= BP_FUNC(bp
);
5667 bp
->fw_drv_pulse_wr_seq
=
5668 (SHMEM_RD(bp
, func_mb
[func
].drv_pulse_mb
) &
5669 DRV_PULSE_SEQ_MASK
);
5670 bp
->func_stx
= SHMEM_RD(bp
, func_mb
[func
].fw_mb_param
);
5671 DP(BNX2X_MSG_MCP
, "drv_pulse 0x%x func_stx 0x%x\n",
5672 bp
->fw_drv_pulse_wr_seq
, bp
->func_stx
);
5676 /* this needs to be done before gunzip end */
5677 bnx2x_zero_def_sb(bp
);
5678 for_each_queue(bp
, i
)
5679 bnx2x_zero_sb(bp
, BP_L_ID(bp
) + i
);
5682 bnx2x_gunzip_end(bp
);
5687 /* send the MCP a request, block until there is a reply */
5688 static u32
bnx2x_fw_command(struct bnx2x
*bp
, u32 command
)
5690 int func
= BP_FUNC(bp
);
5691 u32 seq
= ++bp
->fw_seq
;
5694 u8 delay
= CHIP_REV_IS_SLOW(bp
) ? 100 : 10;
5696 SHMEM_WR(bp
, func_mb
[func
].drv_mb_header
, (command
| seq
));
5697 DP(BNX2X_MSG_MCP
, "wrote command (%x) to FW MB\n", (command
| seq
));
5700 /* let the FW do it's magic ... */
5703 rc
= SHMEM_RD(bp
, func_mb
[func
].fw_mb_header
);
5705 /* Give the FW up to 2 second (200*10ms) */
5706 } while ((seq
!= (rc
& FW_MSG_SEQ_NUMBER_MASK
)) && (cnt
++ < 200));
5708 DP(BNX2X_MSG_MCP
, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5709 cnt
*delay
, rc
, seq
);
5711 /* is this a reply to our command? */
5712 if (seq
== (rc
& FW_MSG_SEQ_NUMBER_MASK
)) {
5713 rc
&= FW_MSG_CODE_MASK
;
5717 BNX2X_ERR("FW failed to respond!\n");
5725 static void bnx2x_free_mem(struct bnx2x
*bp
)
5728 #define BNX2X_PCI_FREE(x, y, size) \
5731 pci_free_consistent(bp->pdev, size, x, y); \
5737 #define BNX2X_FREE(x) \
5748 for_each_queue(bp
, i
) {
5751 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, status_blk
),
5752 bnx2x_fp(bp
, i
, status_blk_mapping
),
5753 sizeof(struct host_status_block
) +
5754 sizeof(struct eth_tx_db_data
));
5756 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5757 BNX2X_FREE(bnx2x_fp(bp
, i
, tx_buf_ring
));
5758 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, tx_desc_ring
),
5759 bnx2x_fp(bp
, i
, tx_desc_mapping
),
5760 sizeof(struct eth_tx_bd
) * NUM_TX_BD
);
5762 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_buf_ring
));
5763 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_desc_ring
),
5764 bnx2x_fp(bp
, i
, rx_desc_mapping
),
5765 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
5767 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_comp_ring
),
5768 bnx2x_fp(bp
, i
, rx_comp_mapping
),
5769 sizeof(struct eth_fast_path_rx_cqe
) *
5773 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_page_ring
));
5774 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_sge_ring
),
5775 bnx2x_fp(bp
, i
, rx_sge_mapping
),
5776 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
5778 /* end of fastpath */
5780 BNX2X_PCI_FREE(bp
->def_status_blk
, bp
->def_status_blk_mapping
,
5781 sizeof(struct host_def_status_block
));
5783 BNX2X_PCI_FREE(bp
->slowpath
, bp
->slowpath_mapping
,
5784 sizeof(struct bnx2x_slowpath
));
5787 BNX2X_PCI_FREE(bp
->t1
, bp
->t1_mapping
, 64*1024);
5788 BNX2X_PCI_FREE(bp
->t2
, bp
->t2_mapping
, 16*1024);
5789 BNX2X_PCI_FREE(bp
->timers
, bp
->timers_mapping
, 8*1024);
5790 BNX2X_PCI_FREE(bp
->qm
, bp
->qm_mapping
, 128*1024);
5792 BNX2X_PCI_FREE(bp
->spq
, bp
->spq_mapping
, BCM_PAGE_SIZE
);
5794 #undef BNX2X_PCI_FREE
5798 static int bnx2x_alloc_mem(struct bnx2x
*bp
)
5801 #define BNX2X_PCI_ALLOC(x, y, size) \
5803 x = pci_alloc_consistent(bp->pdev, size, y); \
5805 goto alloc_mem_err; \
5806 memset(x, 0, size); \
5809 #define BNX2X_ALLOC(x, size) \
5811 x = vmalloc(size); \
5813 goto alloc_mem_err; \
5814 memset(x, 0, size); \
5820 for_each_queue(bp
, i
) {
5821 bnx2x_fp(bp
, i
, bp
) = bp
;
5824 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, status_blk
),
5825 &bnx2x_fp(bp
, i
, status_blk_mapping
),
5826 sizeof(struct host_status_block
) +
5827 sizeof(struct eth_tx_db_data
));
5829 bnx2x_fp(bp
, i
, hw_tx_prods
) =
5830 (void *)(bnx2x_fp(bp
, i
, status_blk
) + 1);
5832 bnx2x_fp(bp
, i
, tx_prods_mapping
) =
5833 bnx2x_fp(bp
, i
, status_blk_mapping
) +
5834 sizeof(struct host_status_block
);
5836 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5837 BNX2X_ALLOC(bnx2x_fp(bp
, i
, tx_buf_ring
),
5838 sizeof(struct sw_tx_bd
) * NUM_TX_BD
);
5839 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, tx_desc_ring
),
5840 &bnx2x_fp(bp
, i
, tx_desc_mapping
),
5841 sizeof(struct eth_tx_bd
) * NUM_TX_BD
);
5843 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_buf_ring
),
5844 sizeof(struct sw_rx_bd
) * NUM_RX_BD
);
5845 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_desc_ring
),
5846 &bnx2x_fp(bp
, i
, rx_desc_mapping
),
5847 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
5849 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_comp_ring
),
5850 &bnx2x_fp(bp
, i
, rx_comp_mapping
),
5851 sizeof(struct eth_fast_path_rx_cqe
) *
5855 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_page_ring
),
5856 sizeof(struct sw_rx_page
) * NUM_RX_SGE
);
5857 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_sge_ring
),
5858 &bnx2x_fp(bp
, i
, rx_sge_mapping
),
5859 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
5861 /* end of fastpath */
5863 BNX2X_PCI_ALLOC(bp
->def_status_blk
, &bp
->def_status_blk_mapping
,
5864 sizeof(struct host_def_status_block
));
5866 BNX2X_PCI_ALLOC(bp
->slowpath
, &bp
->slowpath_mapping
,
5867 sizeof(struct bnx2x_slowpath
));
5870 BNX2X_PCI_ALLOC(bp
->t1
, &bp
->t1_mapping
, 64*1024);
5873 for (i
= 0; i
< 64*1024; i
+= 64) {
5874 *(u64
*)((char *)bp
->t1
+ i
+ 56) = 0x0UL
;
5875 *(u64
*)((char *)bp
->t1
+ i
+ 3) = 0x0UL
;
5878 /* allocate searcher T2 table
5879 we allocate 1/4 of alloc num for T2
5880 (which is not entered into the ILT) */
5881 BNX2X_PCI_ALLOC(bp
->t2
, &bp
->t2_mapping
, 16*1024);
5884 for (i
= 0; i
< 16*1024; i
+= 64)
5885 * (u64
*)((char *)bp
->t2
+ i
+ 56) = bp
->t2_mapping
+ i
+ 64;
5887 /* now fixup the last line in the block to point to the next block */
5888 *(u64
*)((char *)bp
->t2
+ 1024*16-8) = bp
->t2_mapping
;
5890 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5891 BNX2X_PCI_ALLOC(bp
->timers
, &bp
->timers_mapping
, 8*1024);
5893 /* QM queues (128*MAX_CONN) */
5894 BNX2X_PCI_ALLOC(bp
->qm
, &bp
->qm_mapping
, 128*1024);
5897 /* Slow path ring */
5898 BNX2X_PCI_ALLOC(bp
->spq
, &bp
->spq_mapping
, BCM_PAGE_SIZE
);
5906 #undef BNX2X_PCI_ALLOC
5910 static void bnx2x_free_tx_skbs(struct bnx2x
*bp
)
5914 for_each_queue(bp
, i
) {
5915 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5917 u16 bd_cons
= fp
->tx_bd_cons
;
5918 u16 sw_prod
= fp
->tx_pkt_prod
;
5919 u16 sw_cons
= fp
->tx_pkt_cons
;
5921 while (sw_cons
!= sw_prod
) {
5922 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, TX_BD(sw_cons
));
5928 static void bnx2x_free_rx_skbs(struct bnx2x
*bp
)
5932 for_each_queue(bp
, j
) {
5933 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
5935 for (i
= 0; i
< NUM_RX_BD
; i
++) {
5936 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[i
];
5937 struct sk_buff
*skb
= rx_buf
->skb
;
5942 pci_unmap_single(bp
->pdev
,
5943 pci_unmap_addr(rx_buf
, mapping
),
5945 PCI_DMA_FROMDEVICE
);
5950 if (!fp
->disable_tpa
)
5951 bnx2x_free_tpa_pool(bp
, fp
, CHIP_IS_E1(bp
) ?
5952 ETH_MAX_AGGREGATION_QUEUES_E1
:
5953 ETH_MAX_AGGREGATION_QUEUES_E1H
);
5957 static void bnx2x_free_skbs(struct bnx2x
*bp
)
5959 bnx2x_free_tx_skbs(bp
);
5960 bnx2x_free_rx_skbs(bp
);
5963 static void bnx2x_free_msix_irqs(struct bnx2x
*bp
)
5967 free_irq(bp
->msix_table
[0].vector
, bp
->dev
);
5968 DP(NETIF_MSG_IFDOWN
, "released sp irq (%d)\n",
5969 bp
->msix_table
[0].vector
);
5971 for_each_queue(bp
, i
) {
5972 DP(NETIF_MSG_IFDOWN
, "about to release fp #%d->%d irq "
5973 "state %x\n", i
, bp
->msix_table
[i
+ offset
].vector
,
5974 bnx2x_fp(bp
, i
, state
));
5976 if (bnx2x_fp(bp
, i
, state
) != BNX2X_FP_STATE_CLOSED
)
5977 BNX2X_ERR("IRQ of fp #%d being freed while "
5978 "state != closed\n", i
);
5980 free_irq(bp
->msix_table
[i
+ offset
].vector
, &bp
->fp
[i
]);
5984 static void bnx2x_free_irq(struct bnx2x
*bp
)
5986 if (bp
->flags
& USING_MSIX_FLAG
) {
5987 bnx2x_free_msix_irqs(bp
);
5988 pci_disable_msix(bp
->pdev
);
5989 bp
->flags
&= ~USING_MSIX_FLAG
;
5992 free_irq(bp
->pdev
->irq
, bp
->dev
);
5995 static int bnx2x_enable_msix(struct bnx2x
*bp
)
5999 bp
->msix_table
[0].entry
= 0;
6001 DP(NETIF_MSG_IFUP
, "msix_table[0].entry = 0 (slowpath)\n");
6003 for_each_queue(bp
, i
) {
6004 int igu_vec
= offset
+ i
+ BP_L_ID(bp
);
6006 bp
->msix_table
[i
+ offset
].entry
= igu_vec
;
6007 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d "
6008 "(fastpath #%u)\n", i
+ offset
, igu_vec
, i
);
6011 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0],
6012 bp
->num_queues
+ offset
);
6014 DP(NETIF_MSG_IFUP
, "MSI-X is not attainable\n");
6017 bp
->flags
|= USING_MSIX_FLAG
;
6022 static int bnx2x_req_msix_irqs(struct bnx2x
*bp
)
6024 int i
, rc
, offset
= 1;
6026 rc
= request_irq(bp
->msix_table
[0].vector
, bnx2x_msix_sp_int
, 0,
6027 bp
->dev
->name
, bp
->dev
);
6029 BNX2X_ERR("request sp irq failed\n");
6033 for_each_queue(bp
, i
) {
6034 rc
= request_irq(bp
->msix_table
[i
+ offset
].vector
,
6035 bnx2x_msix_fp_int
, 0,
6036 bp
->dev
->name
, &bp
->fp
[i
]);
6038 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6040 bnx2x_free_msix_irqs(bp
);
6044 bnx2x_fp(bp
, i
, state
) = BNX2X_FP_STATE_IRQ
;
6050 static int bnx2x_req_irq(struct bnx2x
*bp
)
6054 rc
= request_irq(bp
->pdev
->irq
, bnx2x_interrupt
, IRQF_SHARED
,
6055 bp
->dev
->name
, bp
->dev
);
6057 bnx2x_fp(bp
, 0, state
) = BNX2X_FP_STATE_IRQ
;
6062 static void bnx2x_napi_enable(struct bnx2x
*bp
)
6066 for_each_queue(bp
, i
)
6067 napi_enable(&bnx2x_fp(bp
, i
, napi
));
6070 static void bnx2x_napi_disable(struct bnx2x
*bp
)
6074 for_each_queue(bp
, i
)
6075 napi_disable(&bnx2x_fp(bp
, i
, napi
));
6078 static void bnx2x_netif_start(struct bnx2x
*bp
)
6080 if (atomic_dec_and_test(&bp
->intr_sem
)) {
6081 if (netif_running(bp
->dev
)) {
6082 if (bp
->state
== BNX2X_STATE_OPEN
)
6083 netif_wake_queue(bp
->dev
);
6084 bnx2x_napi_enable(bp
);
6085 bnx2x_int_enable(bp
);
6090 static void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
)
6092 bnx2x_int_disable_sync(bp
, disable_hw
);
6093 if (netif_running(bp
->dev
)) {
6094 bnx2x_napi_disable(bp
);
6095 netif_tx_disable(bp
->dev
);
6096 bp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
6101 * Init service functions
6104 static void bnx2x_set_mac_addr_e1(struct bnx2x
*bp
, int set
)
6106 struct mac_configuration_cmd
*config
= bnx2x_sp(bp
, mac_config
);
6107 int port
= BP_PORT(bp
);
6110 * unicasts 0-31:port0 32-63:port1
6111 * multicast 64-127:port0 128-191:port1
6113 config
->hdr
.length_6b
= 2;
6114 config
->hdr
.offset
= port
? 31 : 0;
6115 config
->hdr
.client_id
= BP_CL_ID(bp
);
6116 config
->hdr
.reserved1
= 0;
6119 config
->config_table
[0].cam_entry
.msb_mac_addr
=
6120 swab16(*(u16
*)&bp
->dev
->dev_addr
[0]);
6121 config
->config_table
[0].cam_entry
.middle_mac_addr
=
6122 swab16(*(u16
*)&bp
->dev
->dev_addr
[2]);
6123 config
->config_table
[0].cam_entry
.lsb_mac_addr
=
6124 swab16(*(u16
*)&bp
->dev
->dev_addr
[4]);
6125 config
->config_table
[0].cam_entry
.flags
= cpu_to_le16(port
);
6127 config
->config_table
[0].target_table_entry
.flags
= 0;
6129 CAM_INVALIDATE(config
->config_table
[0]);
6130 config
->config_table
[0].target_table_entry
.client_id
= 0;
6131 config
->config_table
[0].target_table_entry
.vlan_id
= 0;
6133 DP(NETIF_MSG_IFUP
, "%s MAC (%04x:%04x:%04x)\n",
6134 (set
? "setting" : "clearing"),
6135 config
->config_table
[0].cam_entry
.msb_mac_addr
,
6136 config
->config_table
[0].cam_entry
.middle_mac_addr
,
6137 config
->config_table
[0].cam_entry
.lsb_mac_addr
);
6140 config
->config_table
[1].cam_entry
.msb_mac_addr
= 0xffff;
6141 config
->config_table
[1].cam_entry
.middle_mac_addr
= 0xffff;
6142 config
->config_table
[1].cam_entry
.lsb_mac_addr
= 0xffff;
6143 config
->config_table
[1].cam_entry
.flags
= cpu_to_le16(port
);
6145 config
->config_table
[1].target_table_entry
.flags
=
6146 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST
;
6148 CAM_INVALIDATE(config
->config_table
[1]);
6149 config
->config_table
[1].target_table_entry
.client_id
= 0;
6150 config
->config_table
[1].target_table_entry
.vlan_id
= 0;
6152 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
6153 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
6154 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
6157 static void bnx2x_set_mac_addr_e1h(struct bnx2x
*bp
, int set
)
6159 struct mac_configuration_cmd_e1h
*config
=
6160 (struct mac_configuration_cmd_e1h
*)bnx2x_sp(bp
, mac_config
);
6162 if (set
&& (bp
->state
!= BNX2X_STATE_OPEN
)) {
6163 DP(NETIF_MSG_IFUP
, "state is %x, returning\n", bp
->state
);
6167 /* CAM allocation for E1H
6168 * unicasts: by func number
6169 * multicast: 20+FUNC*20, 20 each
6171 config
->hdr
.length_6b
= 1;
6172 config
->hdr
.offset
= BP_FUNC(bp
);
6173 config
->hdr
.client_id
= BP_CL_ID(bp
);
6174 config
->hdr
.reserved1
= 0;
6177 config
->config_table
[0].msb_mac_addr
=
6178 swab16(*(u16
*)&bp
->dev
->dev_addr
[0]);
6179 config
->config_table
[0].middle_mac_addr
=
6180 swab16(*(u16
*)&bp
->dev
->dev_addr
[2]);
6181 config
->config_table
[0].lsb_mac_addr
=
6182 swab16(*(u16
*)&bp
->dev
->dev_addr
[4]);
6183 config
->config_table
[0].client_id
= BP_L_ID(bp
);
6184 config
->config_table
[0].vlan_id
= 0;
6185 config
->config_table
[0].e1hov_id
= cpu_to_le16(bp
->e1hov
);
6187 config
->config_table
[0].flags
= BP_PORT(bp
);
6189 config
->config_table
[0].flags
=
6190 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE
;
6192 DP(NETIF_MSG_IFUP
, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6193 (set
? "setting" : "clearing"),
6194 config
->config_table
[0].msb_mac_addr
,
6195 config
->config_table
[0].middle_mac_addr
,
6196 config
->config_table
[0].lsb_mac_addr
, bp
->e1hov
, BP_L_ID(bp
));
6198 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
6199 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
6200 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
6203 static int bnx2x_wait_ramrod(struct bnx2x
*bp
, int state
, int idx
,
6204 int *state_p
, int poll
)
6206 /* can take a while if any port is running */
6209 DP(NETIF_MSG_IFUP
, "%s for state to become %x on IDX [%d]\n",
6210 poll
? "polling" : "waiting", state
, idx
);
6215 bnx2x_rx_int(bp
->fp
, 10);
6216 /* if index is different from 0
6217 * the reply for some commands will
6218 * be on the non default queue
6221 bnx2x_rx_int(&bp
->fp
[idx
], 10);
6224 mb(); /* state is changed by bnx2x_sp_event() */
6225 if (*state_p
== state
)
6232 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6233 poll
? "polling" : "waiting", state
, idx
);
6234 #ifdef BNX2X_STOP_ON_ERROR
6241 static int bnx2x_setup_leading(struct bnx2x
*bp
)
6245 /* reset IGU state */
6246 bnx2x_ack_sb(bp
, bp
->fp
[0].sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
6249 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_PORT_SETUP
, 0, 0, 0, 0);
6251 /* Wait for completion */
6252 rc
= bnx2x_wait_ramrod(bp
, BNX2X_STATE_OPEN
, 0, &(bp
->state
), 0);
6257 static int bnx2x_setup_multi(struct bnx2x
*bp
, int index
)
6259 /* reset IGU state */
6260 bnx2x_ack_sb(bp
, bp
->fp
[index
].sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
6263 bp
->fp
[index
].state
= BNX2X_FP_STATE_OPENING
;
6264 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CLIENT_SETUP
, index
, 0, index
, 0);
6266 /* Wait for completion */
6267 return bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_OPEN
, index
,
6268 &(bp
->fp
[index
].state
), 0);
6271 static int bnx2x_poll(struct napi_struct
*napi
, int budget
);
6272 static void bnx2x_set_rx_mode(struct net_device
*dev
);
6274 /* must be called with rtnl_lock */
6275 static int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
)
6279 #ifdef BNX2X_STOP_ON_ERROR
6280 if (unlikely(bp
->panic
))
6284 bp
->state
= BNX2X_STATE_OPENING_WAIT4_LOAD
;
6286 /* Send LOAD_REQUEST command to MCP
6287 Returns the type of LOAD command:
6288 if it is the first port to be initialized
6289 common blocks should be initialized, otherwise - not
6291 if (!BP_NOMCP(bp
)) {
6292 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_REQ
);
6294 BNX2X_ERR("MCP response failure, aborting\n");
6297 if (load_code
== FW_MSG_CODE_DRV_LOAD_REFUSED
)
6298 return -EBUSY
; /* other port in diagnostic mode */
6301 int port
= BP_PORT(bp
);
6303 DP(NETIF_MSG_IFUP
, "NO MCP load counts before us %d, %d, %d\n",
6304 load_count
[0], load_count
[1], load_count
[2]);
6306 load_count
[1 + port
]++;
6307 DP(NETIF_MSG_IFUP
, "NO MCP new load counts %d, %d, %d\n",
6308 load_count
[0], load_count
[1], load_count
[2]);
6309 if (load_count
[0] == 1)
6310 load_code
= FW_MSG_CODE_DRV_LOAD_COMMON
;
6311 else if (load_count
[1 + port
] == 1)
6312 load_code
= FW_MSG_CODE_DRV_LOAD_PORT
;
6314 load_code
= FW_MSG_CODE_DRV_LOAD_FUNCTION
;
6317 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
6318 (load_code
== FW_MSG_CODE_DRV_LOAD_PORT
))
6322 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
6324 /* if we can't use MSI-X we only need one fp,
6325 * so try to enable MSI-X with the requested number of fp's
6326 * and fallback to inta with one fp
6332 if ((use_multi
> 1) && (use_multi
<= BP_MAX_QUEUES(bp
)))
6333 /* user requested number */
6334 bp
->num_queues
= use_multi
;
6337 bp
->num_queues
= min_t(u32
, num_online_cpus(),
6342 if (bnx2x_enable_msix(bp
)) {
6343 /* failed to enable MSI-X */
6346 BNX2X_ERR("Multi requested but failed"
6347 " to enable MSI-X\n");
6351 "set number of queues to %d\n", bp
->num_queues
);
6353 if (bnx2x_alloc_mem(bp
))
6356 for_each_queue(bp
, i
)
6357 bnx2x_fp(bp
, i
, disable_tpa
) =
6358 ((bp
->flags
& TPA_ENABLE_FLAG
) == 0);
6360 if (bp
->flags
& USING_MSIX_FLAG
) {
6361 rc
= bnx2x_req_msix_irqs(bp
);
6363 pci_disable_msix(bp
->pdev
);
6368 rc
= bnx2x_req_irq(bp
);
6370 BNX2X_ERR("IRQ request failed, aborting\n");
6375 for_each_queue(bp
, i
)
6376 netif_napi_add(bp
->dev
, &bnx2x_fp(bp
, i
, napi
),
6380 rc
= bnx2x_init_hw(bp
, load_code
);
6382 BNX2X_ERR("HW init failed, aborting\n");
6383 goto load_int_disable
;
6386 /* Setup NIC internals and enable interrupts */
6387 bnx2x_nic_init(bp
, load_code
);
6389 /* Send LOAD_DONE command to MCP */
6390 if (!BP_NOMCP(bp
)) {
6391 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
);
6393 BNX2X_ERR("MCP response failure, aborting\n");
6395 goto load_rings_free
;
6399 bnx2x_stats_init(bp
);
6401 bp
->state
= BNX2X_STATE_OPENING_WAIT4_PORT
;
6403 /* Enable Rx interrupt handling before sending the ramrod
6404 as it's completed on Rx FP queue */
6405 bnx2x_napi_enable(bp
);
6407 /* Enable interrupt handling */
6408 atomic_set(&bp
->intr_sem
, 0);
6410 rc
= bnx2x_setup_leading(bp
);
6412 BNX2X_ERR("Setup leading failed!\n");
6413 goto load_netif_stop
;
6416 if (CHIP_IS_E1H(bp
))
6417 if (bp
->mf_config
& FUNC_MF_CFG_FUNC_DISABLED
) {
6418 BNX2X_ERR("!!! mf_cfg function disabled\n");
6419 bp
->state
= BNX2X_STATE_DISABLED
;
6422 if (bp
->state
== BNX2X_STATE_OPEN
)
6423 for_each_nondefault_queue(bp
, i
) {
6424 rc
= bnx2x_setup_multi(bp
, i
);
6426 goto load_netif_stop
;
6430 bnx2x_set_mac_addr_e1(bp
, 1);
6432 bnx2x_set_mac_addr_e1h(bp
, 1);
6435 bnx2x_initial_phy_init(bp
);
6437 /* Start fast path */
6438 switch (load_mode
) {
6440 /* Tx queue should be only reenabled */
6441 netif_wake_queue(bp
->dev
);
6442 bnx2x_set_rx_mode(bp
->dev
);
6446 netif_start_queue(bp
->dev
);
6447 bnx2x_set_rx_mode(bp
->dev
);
6448 if (bp
->flags
& USING_MSIX_FLAG
)
6449 printk(KERN_INFO PFX
"%s: using MSI-X\n",
6454 bnx2x_set_rx_mode(bp
->dev
);
6455 bp
->state
= BNX2X_STATE_DIAG
;
6463 bnx2x__link_status_update(bp
);
6465 /* start the timer */
6466 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
6472 bnx2x_napi_disable(bp
);
6474 /* Free SKBs, SGEs, TPA pool and driver internals */
6475 bnx2x_free_skbs(bp
);
6476 for_each_queue(bp
, i
)
6477 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
6479 bnx2x_int_disable_sync(bp
, 1);
6486 /* TBD we really need to reset the chip
6487 if we want to recover from this */
6491 static int bnx2x_stop_multi(struct bnx2x
*bp
, int index
)
6495 /* halt the connection */
6496 bp
->fp
[index
].state
= BNX2X_FP_STATE_HALTING
;
6497 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, index
, 0, index
, 0);
6499 /* Wait for completion */
6500 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, index
,
6501 &(bp
->fp
[index
].state
), 1);
6502 if (rc
) /* timeout */
6505 /* delete cfc entry */
6506 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CFC_DEL
, index
, 0, 0, 1);
6508 /* Wait for completion */
6509 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_CLOSED
, index
,
6510 &(bp
->fp
[index
].state
), 1);
6514 static int bnx2x_stop_leading(struct bnx2x
*bp
)
6516 u16 dsb_sp_prod_idx
;
6517 /* if the other port is handling traffic,
6518 this can take a lot of time */
6524 /* Send HALT ramrod */
6525 bp
->fp
[0].state
= BNX2X_FP_STATE_HALTING
;
6526 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, 0, 0, BP_CL_ID(bp
), 0);
6528 /* Wait for completion */
6529 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, 0,
6530 &(bp
->fp
[0].state
), 1);
6531 if (rc
) /* timeout */
6534 dsb_sp_prod_idx
= *bp
->dsb_sp_prod
;
6536 /* Send PORT_DELETE ramrod */
6537 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_PORT_DEL
, 0, 0, 0, 1);
6539 /* Wait for completion to arrive on default status block
6540 we are going to reset the chip anyway
6541 so there is not much to do if this times out
6543 while (dsb_sp_prod_idx
== *bp
->dsb_sp_prod
) {
6545 DP(NETIF_MSG_IFDOWN
, "timeout waiting for port del "
6546 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6547 *bp
->dsb_sp_prod
, dsb_sp_prod_idx
);
6548 #ifdef BNX2X_STOP_ON_ERROR
6558 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_UNLOAD
;
6559 bp
->fp
[0].state
= BNX2X_FP_STATE_CLOSED
;
6564 static void bnx2x_reset_func(struct bnx2x
*bp
)
6566 int port
= BP_PORT(bp
);
6567 int func
= BP_FUNC(bp
);
6571 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
6572 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
6574 REG_WR(bp
, HC_REG_CONFIG_0
+ port
*4, 0x1000);
6577 base
= FUNC_ILT_BASE(func
);
6578 for (i
= base
; i
< base
+ ILT_PER_FUNC
; i
++)
6579 bnx2x_ilt_wr(bp
, i
, 0);
6582 static void bnx2x_reset_port(struct bnx2x
*bp
)
6584 int port
= BP_PORT(bp
);
6587 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
6589 /* Do not rcv packets to BRB */
6590 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK
+ port
*4, 0x0);
6591 /* Do not direct rcv packets that are not for MCP to the BRB */
6592 REG_WR(bp
, (port
? NIG_REG_LLH1_BRB1_NOT_MCP
:
6593 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
6596 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4, 0);
6599 /* Check for BRB port occupancy */
6600 val
= REG_RD(bp
, BRB1_REG_PORT_NUM_OCC_BLOCKS_0
+ port
*4);
6602 DP(NETIF_MSG_IFDOWN
,
6603 "BRB1 is not empty %d blocks are occupied\n", val
);
6605 /* TODO: Close Doorbell port? */
6608 static void bnx2x_reset_common(struct bnx2x
*bp
)
6611 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
6613 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
, 0x1403);
6616 static void bnx2x_reset_chip(struct bnx2x
*bp
, u32 reset_code
)
6618 DP(BNX2X_MSG_MCP
, "function %d reset_code %x\n",
6619 BP_FUNC(bp
), reset_code
);
6621 switch (reset_code
) {
6622 case FW_MSG_CODE_DRV_UNLOAD_COMMON
:
6623 bnx2x_reset_port(bp
);
6624 bnx2x_reset_func(bp
);
6625 bnx2x_reset_common(bp
);
6628 case FW_MSG_CODE_DRV_UNLOAD_PORT
:
6629 bnx2x_reset_port(bp
);
6630 bnx2x_reset_func(bp
);
6633 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION
:
6634 bnx2x_reset_func(bp
);
6638 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code
);
6643 /* must be called with rtnl_lock */
6644 static int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
)
6646 int port
= BP_PORT(bp
);
6650 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_HALT
;
6652 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
6653 bnx2x_set_storm_rx_mode(bp
);
6655 bnx2x_netif_stop(bp
, 1);
6656 if (!netif_running(bp
->dev
))
6657 bnx2x_napi_disable(bp
);
6658 del_timer_sync(&bp
->timer
);
6659 SHMEM_WR(bp
, func_mb
[BP_FUNC(bp
)].drv_pulse_mb
,
6660 (DRV_PULSE_ALWAYS_ALIVE
| bp
->fw_drv_pulse_wr_seq
));
6661 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
6663 /* Wait until tx fast path tasks complete */
6664 for_each_queue(bp
, i
) {
6665 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
6669 while (BNX2X_HAS_TX_WORK(fp
)) {
6671 bnx2x_tx_int(fp
, 1000);
6673 BNX2X_ERR("timeout waiting for queue[%d]\n",
6675 #ifdef BNX2X_STOP_ON_ERROR
6687 /* Give HW time to discard old tx messages */
6693 if (CHIP_IS_E1(bp
)) {
6694 struct mac_configuration_cmd
*config
=
6695 bnx2x_sp(bp
, mcast_config
);
6697 bnx2x_set_mac_addr_e1(bp
, 0);
6699 for (i
= 0; i
< config
->hdr
.length_6b
; i
++)
6700 CAM_INVALIDATE(config
->config_table
[i
]);
6702 config
->hdr
.length_6b
= i
;
6703 if (CHIP_REV_IS_SLOW(bp
))
6704 config
->hdr
.offset
= BNX2X_MAX_EMUL_MULTI
*(1 + port
);
6706 config
->hdr
.offset
= BNX2X_MAX_MULTICAST
*(1 + port
);
6707 config
->hdr
.client_id
= BP_CL_ID(bp
);
6708 config
->hdr
.reserved1
= 0;
6710 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
6711 U64_HI(bnx2x_sp_mapping(bp
, mcast_config
)),
6712 U64_LO(bnx2x_sp_mapping(bp
, mcast_config
)), 0);
6715 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 0);
6717 bnx2x_set_mac_addr_e1h(bp
, 0);
6719 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
6720 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
), 0);
6723 if (unload_mode
== UNLOAD_NORMAL
)
6724 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
6726 else if (bp
->flags
& NO_WOL_FLAG
) {
6727 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
;
6728 if (CHIP_IS_E1H(bp
))
6729 REG_WR(bp
, MISC_REG_E1HMF_MODE
, 0);
6731 } else if (bp
->wol
) {
6732 u32 emac_base
= port
? GRCBASE_EMAC1
: GRCBASE_EMAC0
;
6733 u8
*mac_addr
= bp
->dev
->dev_addr
;
6735 /* The mac address is written to entries 1-4 to
6736 preserve entry 0 which is used by the PMF */
6737 u8 entry
= (BP_E1HVN(bp
) + 1)*8;
6739 val
= (mac_addr
[0] << 8) | mac_addr
[1];
6740 EMAC_WR(bp
, EMAC_REG_EMAC_MAC_MATCH
+ entry
, val
);
6742 val
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
6743 (mac_addr
[4] << 8) | mac_addr
[5];
6744 EMAC_WR(bp
, EMAC_REG_EMAC_MAC_MATCH
+ entry
+ 4, val
);
6746 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_EN
;
6749 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
6751 /* Close multi and leading connections
6752 Completions for ramrods are collected in a synchronous way */
6753 for_each_nondefault_queue(bp
, i
)
6754 if (bnx2x_stop_multi(bp
, i
))
6757 rc
= bnx2x_stop_leading(bp
);
6759 BNX2X_ERR("Stop leading failed!\n");
6760 #ifdef BNX2X_STOP_ON_ERROR
6769 reset_code
= bnx2x_fw_command(bp
, reset_code
);
6771 DP(NETIF_MSG_IFDOWN
, "NO MCP load counts %d, %d, %d\n",
6772 load_count
[0], load_count
[1], load_count
[2]);
6774 load_count
[1 + port
]--;
6775 DP(NETIF_MSG_IFDOWN
, "NO MCP new load counts %d, %d, %d\n",
6776 load_count
[0], load_count
[1], load_count
[2]);
6777 if (load_count
[0] == 0)
6778 reset_code
= FW_MSG_CODE_DRV_UNLOAD_COMMON
;
6779 else if (load_count
[1 + port
] == 0)
6780 reset_code
= FW_MSG_CODE_DRV_UNLOAD_PORT
;
6782 reset_code
= FW_MSG_CODE_DRV_UNLOAD_FUNCTION
;
6785 if ((reset_code
== FW_MSG_CODE_DRV_UNLOAD_COMMON
) ||
6786 (reset_code
== FW_MSG_CODE_DRV_UNLOAD_PORT
))
6787 bnx2x__link_reset(bp
);
6789 /* Reset the chip */
6790 bnx2x_reset_chip(bp
, reset_code
);
6792 /* Report UNLOAD_DONE to MCP */
6794 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
6797 /* Free SKBs, SGEs, TPA pool and driver internals */
6798 bnx2x_free_skbs(bp
);
6799 for_each_queue(bp
, i
)
6800 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
6803 bp
->state
= BNX2X_STATE_CLOSED
;
6805 netif_carrier_off(bp
->dev
);
6810 static void bnx2x_reset_task(struct work_struct
*work
)
6812 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, reset_task
);
6814 #ifdef BNX2X_STOP_ON_ERROR
6815 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6816 " so reset not done to allow debug dump,\n"
6817 KERN_ERR
" you will need to reboot when done\n");
6823 if (!netif_running(bp
->dev
))
6824 goto reset_task_exit
;
6826 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
6827 bnx2x_nic_load(bp
, LOAD_NORMAL
);
6833 /* end of nic load/unload */
6838 * Init service functions
6841 static void __devinit
bnx2x_undi_unload(struct bnx2x
*bp
)
6845 /* Check if there is any driver already loaded */
6846 val
= REG_RD(bp
, MISC_REG_UNPREPARED
);
6848 /* Check if it is the UNDI driver
6849 * UNDI driver initializes CID offset for normal bell to 0x7
6851 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
6852 val
= REG_RD(bp
, DORQ_REG_NORM_CID_OFST
);
6854 REG_WR(bp
, DORQ_REG_NORM_CID_OFST
, 0);
6855 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
6858 u32 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
6860 int func
= BP_FUNC(bp
);
6864 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6866 /* try unload UNDI on port 0 */
6869 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
6870 DRV_MSG_SEQ_NUMBER_MASK
);
6871 reset_code
= bnx2x_fw_command(bp
, reset_code
);
6873 /* if UNDI is loaded on the other port */
6874 if (reset_code
!= FW_MSG_CODE_DRV_UNLOAD_COMMON
) {
6876 /* send "DONE" for previous unload */
6877 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
6879 /* unload UNDI on port 1 */
6882 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
6883 DRV_MSG_SEQ_NUMBER_MASK
);
6884 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
6886 bnx2x_fw_command(bp
, reset_code
);
6889 REG_WR(bp
, (BP_PORT(bp
) ? HC_REG_CONFIG_1
:
6890 HC_REG_CONFIG_0
), 0x1000);
6892 /* close input traffic and wait for it */
6893 /* Do not rcv packets to BRB */
6895 (BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_DRV_MASK
:
6896 NIG_REG_LLH0_BRB1_DRV_MASK
), 0x0);
6897 /* Do not direct rcv packets that are not for MCP to
6900 (BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_NOT_MCP
:
6901 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
6904 (BP_PORT(bp
) ? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
6905 MISC_REG_AEU_MASK_ATTN_FUNC_0
), 0);
6908 /* save NIG port swap info */
6909 swap_val
= REG_RD(bp
, NIG_REG_PORT_SWAP
);
6910 swap_en
= REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
);
6913 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
6916 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
,
6918 /* take the NIG out of reset and restore swap values */
6920 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
6921 MISC_REGISTERS_RESET_REG_1_RST_NIG
);
6922 REG_WR(bp
, NIG_REG_PORT_SWAP
, swap_val
);
6923 REG_WR(bp
, NIG_REG_STRAP_OVERRIDE
, swap_en
);
6925 /* send unload done to the MCP */
6926 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
6928 /* restore our func and fw_seq */
6931 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
6932 DRV_MSG_SEQ_NUMBER_MASK
);
6937 static void __devinit
bnx2x_get_common_hwinfo(struct bnx2x
*bp
)
6939 u32 val
, val2
, val3
, val4
, id
;
6942 /* Get the chip revision id and number. */
6943 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6944 val
= REG_RD(bp
, MISC_REG_CHIP_NUM
);
6945 id
= ((val
& 0xffff) << 16);
6946 val
= REG_RD(bp
, MISC_REG_CHIP_REV
);
6947 id
|= ((val
& 0xf) << 12);
6948 val
= REG_RD(bp
, MISC_REG_CHIP_METAL
);
6949 id
|= ((val
& 0xff) << 4);
6950 REG_RD(bp
, MISC_REG_BOND_ID
);
6952 bp
->common
.chip_id
= id
;
6953 bp
->link_params
.chip_id
= bp
->common
.chip_id
;
6954 BNX2X_DEV_INFO("chip ID is 0x%x\n", id
);
6956 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_CFG4
);
6957 bp
->common
.flash_size
= (NVRAM_1MB_SIZE
<<
6958 (val
& MCPR_NVM_CFG4_FLASH_SIZE
));
6959 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6960 bp
->common
.flash_size
, bp
->common
.flash_size
);
6962 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
6963 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
6964 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp
->common
.shmem_base
);
6966 if (!bp
->common
.shmem_base
||
6967 (bp
->common
.shmem_base
< 0xA0000) ||
6968 (bp
->common
.shmem_base
>= 0xC0000)) {
6969 BNX2X_DEV_INFO("MCP not active\n");
6970 bp
->flags
|= NO_MCP_FLAG
;
6974 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
6975 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
6976 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
6977 BNX2X_ERR("BAD MCP validity signature\n");
6979 bp
->common
.hw_config
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.config
);
6980 bp
->common
.board
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.board
);
6982 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6983 bp
->common
.hw_config
, bp
->common
.board
);
6985 bp
->link_params
.hw_led_mode
= ((bp
->common
.hw_config
&
6986 SHARED_HW_CFG_LED_MODE_MASK
) >>
6987 SHARED_HW_CFG_LED_MODE_SHIFT
);
6989 val
= SHMEM_RD(bp
, dev_info
.bc_rev
) >> 8;
6990 bp
->common
.bc_ver
= val
;
6991 BNX2X_DEV_INFO("bc_ver %X\n", val
);
6992 if (val
< BNX2X_BC_VER
) {
6993 /* for now only warn
6994 * later we might need to enforce this */
6995 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6996 " please upgrade BC\n", BNX2X_BC_VER
, val
);
6999 if (BP_E1HVN(bp
) == 0) {
7000 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_PMC
, &pmc
);
7001 bp
->flags
|= (pmc
& PCI_PM_CAP_PME_D3cold
) ? 0 : NO_WOL_FLAG
;
7003 /* no WOL capability for E1HVN != 0 */
7004 bp
->flags
|= NO_WOL_FLAG
;
7006 BNX2X_DEV_INFO("%sWoL capable\n",
7007 (bp
->flags
& NO_WOL_FLAG
) ? "Not " : "");
7009 val
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
);
7010 val2
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[4]);
7011 val3
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[8]);
7012 val4
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[12]);
7014 printk(KERN_INFO PFX
"part number %X-%X-%X-%X\n",
7015 val
, val2
, val3
, val4
);
7018 static void __devinit
bnx2x_link_settings_supported(struct bnx2x
*bp
,
7021 int port
= BP_PORT(bp
);
7024 switch (switch_cfg
) {
7026 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg
);
7029 SERDES_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
7030 switch (ext_phy_type
) {
7031 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT
:
7032 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7035 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
7036 SUPPORTED_10baseT_Full
|
7037 SUPPORTED_100baseT_Half
|
7038 SUPPORTED_100baseT_Full
|
7039 SUPPORTED_1000baseT_Full
|
7040 SUPPORTED_2500baseX_Full
|
7045 SUPPORTED_Asym_Pause
);
7048 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482
:
7049 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7052 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
7053 SUPPORTED_10baseT_Full
|
7054 SUPPORTED_100baseT_Half
|
7055 SUPPORTED_100baseT_Full
|
7056 SUPPORTED_1000baseT_Full
|
7061 SUPPORTED_Asym_Pause
);
7065 BNX2X_ERR("NVRAM config error. "
7066 "BAD SerDes ext_phy_config 0x%x\n",
7067 bp
->link_params
.ext_phy_config
);
7071 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_SERDES0_CTRL_PHY_ADDR
+
7073 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
7076 case SWITCH_CFG_10G
:
7077 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg
);
7080 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
7081 switch (ext_phy_type
) {
7082 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
:
7083 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7086 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
7087 SUPPORTED_10baseT_Full
|
7088 SUPPORTED_100baseT_Half
|
7089 SUPPORTED_100baseT_Full
|
7090 SUPPORTED_1000baseT_Full
|
7091 SUPPORTED_2500baseX_Full
|
7092 SUPPORTED_10000baseT_Full
|
7097 SUPPORTED_Asym_Pause
);
7100 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
:
7101 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7104 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
7107 SUPPORTED_Asym_Pause
);
7110 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
:
7111 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7114 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
7115 SUPPORTED_1000baseT_Full
|
7118 SUPPORTED_Asym_Pause
);
7121 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
7122 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7125 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
7126 SUPPORTED_1000baseT_Full
|
7130 SUPPORTED_Asym_Pause
);
7133 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
7134 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7137 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
7138 SUPPORTED_2500baseX_Full
|
7139 SUPPORTED_1000baseT_Full
|
7143 SUPPORTED_Asym_Pause
);
7146 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
7147 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7150 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
7154 SUPPORTED_Asym_Pause
);
7157 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
:
7158 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7159 bp
->link_params
.ext_phy_config
);
7163 BNX2X_ERR("NVRAM config error. "
7164 "BAD XGXS ext_phy_config 0x%x\n",
7165 bp
->link_params
.ext_phy_config
);
7169 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_XGXS0_CTRL_PHY_ADDR
+
7171 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
7176 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7177 bp
->port
.link_config
);
7180 bp
->link_params
.phy_addr
= bp
->port
.phy_addr
;
7182 /* mask what we support according to speed_cap_mask */
7183 if (!(bp
->link_params
.speed_cap_mask
&
7184 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF
))
7185 bp
->port
.supported
&= ~SUPPORTED_10baseT_Half
;
7187 if (!(bp
->link_params
.speed_cap_mask
&
7188 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL
))
7189 bp
->port
.supported
&= ~SUPPORTED_10baseT_Full
;
7191 if (!(bp
->link_params
.speed_cap_mask
&
7192 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF
))
7193 bp
->port
.supported
&= ~SUPPORTED_100baseT_Half
;
7195 if (!(bp
->link_params
.speed_cap_mask
&
7196 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL
))
7197 bp
->port
.supported
&= ~SUPPORTED_100baseT_Full
;
7199 if (!(bp
->link_params
.speed_cap_mask
&
7200 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G
))
7201 bp
->port
.supported
&= ~(SUPPORTED_1000baseT_Half
|
7202 SUPPORTED_1000baseT_Full
);
7204 if (!(bp
->link_params
.speed_cap_mask
&
7205 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G
))
7206 bp
->port
.supported
&= ~SUPPORTED_2500baseX_Full
;
7208 if (!(bp
->link_params
.speed_cap_mask
&
7209 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G
))
7210 bp
->port
.supported
&= ~SUPPORTED_10000baseT_Full
;
7212 BNX2X_DEV_INFO("supported 0x%x\n", bp
->port
.supported
);
7215 static void __devinit
bnx2x_link_settings_requested(struct bnx2x
*bp
)
7217 bp
->link_params
.req_duplex
= DUPLEX_FULL
;
7219 switch (bp
->port
.link_config
& PORT_FEATURE_LINK_SPEED_MASK
) {
7220 case PORT_FEATURE_LINK_SPEED_AUTO
:
7221 if (bp
->port
.supported
& SUPPORTED_Autoneg
) {
7222 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
7223 bp
->port
.advertising
= bp
->port
.supported
;
7226 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
7228 if ((ext_phy_type
==
7229 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
) ||
7231 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
)) {
7232 /* force 10G, no AN */
7233 bp
->link_params
.req_line_speed
= SPEED_10000
;
7234 bp
->port
.advertising
=
7235 (ADVERTISED_10000baseT_Full
|
7239 BNX2X_ERR("NVRAM config error. "
7240 "Invalid link_config 0x%x"
7241 " Autoneg not supported\n",
7242 bp
->port
.link_config
);
7247 case PORT_FEATURE_LINK_SPEED_10M_FULL
:
7248 if (bp
->port
.supported
& SUPPORTED_10baseT_Full
) {
7249 bp
->link_params
.req_line_speed
= SPEED_10
;
7250 bp
->port
.advertising
= (ADVERTISED_10baseT_Full
|
7253 BNX2X_ERR("NVRAM config error. "
7254 "Invalid link_config 0x%x"
7255 " speed_cap_mask 0x%x\n",
7256 bp
->port
.link_config
,
7257 bp
->link_params
.speed_cap_mask
);
7262 case PORT_FEATURE_LINK_SPEED_10M_HALF
:
7263 if (bp
->port
.supported
& SUPPORTED_10baseT_Half
) {
7264 bp
->link_params
.req_line_speed
= SPEED_10
;
7265 bp
->link_params
.req_duplex
= DUPLEX_HALF
;
7266 bp
->port
.advertising
= (ADVERTISED_10baseT_Half
|
7269 BNX2X_ERR("NVRAM config error. "
7270 "Invalid link_config 0x%x"
7271 " speed_cap_mask 0x%x\n",
7272 bp
->port
.link_config
,
7273 bp
->link_params
.speed_cap_mask
);
7278 case PORT_FEATURE_LINK_SPEED_100M_FULL
:
7279 if (bp
->port
.supported
& SUPPORTED_100baseT_Full
) {
7280 bp
->link_params
.req_line_speed
= SPEED_100
;
7281 bp
->port
.advertising
= (ADVERTISED_100baseT_Full
|
7284 BNX2X_ERR("NVRAM config error. "
7285 "Invalid link_config 0x%x"
7286 " speed_cap_mask 0x%x\n",
7287 bp
->port
.link_config
,
7288 bp
->link_params
.speed_cap_mask
);
7293 case PORT_FEATURE_LINK_SPEED_100M_HALF
:
7294 if (bp
->port
.supported
& SUPPORTED_100baseT_Half
) {
7295 bp
->link_params
.req_line_speed
= SPEED_100
;
7296 bp
->link_params
.req_duplex
= DUPLEX_HALF
;
7297 bp
->port
.advertising
= (ADVERTISED_100baseT_Half
|
7300 BNX2X_ERR("NVRAM config error. "
7301 "Invalid link_config 0x%x"
7302 " speed_cap_mask 0x%x\n",
7303 bp
->port
.link_config
,
7304 bp
->link_params
.speed_cap_mask
);
7309 case PORT_FEATURE_LINK_SPEED_1G
:
7310 if (bp
->port
.supported
& SUPPORTED_1000baseT_Full
) {
7311 bp
->link_params
.req_line_speed
= SPEED_1000
;
7312 bp
->port
.advertising
= (ADVERTISED_1000baseT_Full
|
7315 BNX2X_ERR("NVRAM config error. "
7316 "Invalid link_config 0x%x"
7317 " speed_cap_mask 0x%x\n",
7318 bp
->port
.link_config
,
7319 bp
->link_params
.speed_cap_mask
);
7324 case PORT_FEATURE_LINK_SPEED_2_5G
:
7325 if (bp
->port
.supported
& SUPPORTED_2500baseX_Full
) {
7326 bp
->link_params
.req_line_speed
= SPEED_2500
;
7327 bp
->port
.advertising
= (ADVERTISED_2500baseX_Full
|
7330 BNX2X_ERR("NVRAM config error. "
7331 "Invalid link_config 0x%x"
7332 " speed_cap_mask 0x%x\n",
7333 bp
->port
.link_config
,
7334 bp
->link_params
.speed_cap_mask
);
7339 case PORT_FEATURE_LINK_SPEED_10G_CX4
:
7340 case PORT_FEATURE_LINK_SPEED_10G_KX4
:
7341 case PORT_FEATURE_LINK_SPEED_10G_KR
:
7342 if (bp
->port
.supported
& SUPPORTED_10000baseT_Full
) {
7343 bp
->link_params
.req_line_speed
= SPEED_10000
;
7344 bp
->port
.advertising
= (ADVERTISED_10000baseT_Full
|
7347 BNX2X_ERR("NVRAM config error. "
7348 "Invalid link_config 0x%x"
7349 " speed_cap_mask 0x%x\n",
7350 bp
->port
.link_config
,
7351 bp
->link_params
.speed_cap_mask
);
7357 BNX2X_ERR("NVRAM config error. "
7358 "BAD link speed link_config 0x%x\n",
7359 bp
->port
.link_config
);
7360 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
7361 bp
->port
.advertising
= bp
->port
.supported
;
7365 bp
->link_params
.req_flow_ctrl
= (bp
->port
.link_config
&
7366 PORT_FEATURE_FLOW_CONTROL_MASK
);
7367 if ((bp
->link_params
.req_flow_ctrl
== FLOW_CTRL_AUTO
) &&
7368 !(bp
->port
.supported
& SUPPORTED_Autoneg
))
7369 bp
->link_params
.req_flow_ctrl
= FLOW_CTRL_NONE
;
7371 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7372 " advertising 0x%x\n",
7373 bp
->link_params
.req_line_speed
,
7374 bp
->link_params
.req_duplex
,
7375 bp
->link_params
.req_flow_ctrl
, bp
->port
.advertising
);
7378 static void __devinit
bnx2x_get_port_hwinfo(struct bnx2x
*bp
)
7380 int port
= BP_PORT(bp
);
7383 bp
->link_params
.bp
= bp
;
7384 bp
->link_params
.port
= port
;
7386 bp
->link_params
.serdes_config
=
7387 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].serdes_config
);
7388 bp
->link_params
.lane_config
=
7389 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].lane_config
);
7390 bp
->link_params
.ext_phy_config
=
7392 dev_info
.port_hw_config
[port
].external_phy_config
);
7393 bp
->link_params
.speed_cap_mask
=
7395 dev_info
.port_hw_config
[port
].speed_capability_mask
);
7397 bp
->port
.link_config
=
7398 SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].link_config
);
7400 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7401 KERN_INFO
" ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7402 " link_config 0x%08x\n",
7403 bp
->link_params
.serdes_config
,
7404 bp
->link_params
.lane_config
,
7405 bp
->link_params
.ext_phy_config
,
7406 bp
->link_params
.speed_cap_mask
, bp
->port
.link_config
);
7408 bp
->link_params
.switch_cfg
= (bp
->port
.link_config
&
7409 PORT_FEATURE_CONNECTED_SWITCH_MASK
);
7410 bnx2x_link_settings_supported(bp
, bp
->link_params
.switch_cfg
);
7412 bnx2x_link_settings_requested(bp
);
7414 val2
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_upper
);
7415 val
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_lower
);
7416 bp
->dev
->dev_addr
[0] = (u8
)(val2
>> 8 & 0xff);
7417 bp
->dev
->dev_addr
[1] = (u8
)(val2
& 0xff);
7418 bp
->dev
->dev_addr
[2] = (u8
)(val
>> 24 & 0xff);
7419 bp
->dev
->dev_addr
[3] = (u8
)(val
>> 16 & 0xff);
7420 bp
->dev
->dev_addr
[4] = (u8
)(val
>> 8 & 0xff);
7421 bp
->dev
->dev_addr
[5] = (u8
)(val
& 0xff);
7422 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
7423 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
7426 static int __devinit
bnx2x_get_hwinfo(struct bnx2x
*bp
)
7428 int func
= BP_FUNC(bp
);
7432 bnx2x_get_common_hwinfo(bp
);
7436 if (CHIP_IS_E1H(bp
)) {
7438 SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
7440 val
= (SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].e1hov_tag
) &
7441 FUNC_MF_CFG_E1HOV_TAG_MASK
);
7442 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
) {
7446 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7448 func
, bp
->e1hov
, bp
->e1hov
);
7450 BNX2X_DEV_INFO("Single function mode\n");
7452 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7453 " aborting\n", func
);
7459 if (!BP_NOMCP(bp
)) {
7460 bnx2x_get_port_hwinfo(bp
);
7462 bp
->fw_seq
= (SHMEM_RD(bp
, func_mb
[func
].drv_mb_header
) &
7463 DRV_MSG_SEQ_NUMBER_MASK
);
7464 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
7468 val2
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].mac_upper
);
7469 val
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].mac_lower
);
7470 if ((val2
!= FUNC_MF_CFG_UPPERMAC_DEFAULT
) &&
7471 (val
!= FUNC_MF_CFG_LOWERMAC_DEFAULT
)) {
7472 bp
->dev
->dev_addr
[0] = (u8
)(val2
>> 8 & 0xff);
7473 bp
->dev
->dev_addr
[1] = (u8
)(val2
& 0xff);
7474 bp
->dev
->dev_addr
[2] = (u8
)(val
>> 24 & 0xff);
7475 bp
->dev
->dev_addr
[3] = (u8
)(val
>> 16 & 0xff);
7476 bp
->dev
->dev_addr
[4] = (u8
)(val
>> 8 & 0xff);
7477 bp
->dev
->dev_addr
[5] = (u8
)(val
& 0xff);
7478 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
,
7480 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
,
7488 /* only supposed to happen on emulation/FPGA */
7489 BNX2X_ERR("warning random MAC workaround active\n");
7490 random_ether_addr(bp
->dev
->dev_addr
);
7491 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
7497 static int __devinit
bnx2x_init_bp(struct bnx2x
*bp
)
7499 int func
= BP_FUNC(bp
);
7502 /* Disable interrupt handling until HW is initialized */
7503 atomic_set(&bp
->intr_sem
, 1);
7505 mutex_init(&bp
->port
.phy_mutex
);
7507 INIT_WORK(&bp
->sp_task
, bnx2x_sp_task
);
7508 INIT_WORK(&bp
->reset_task
, bnx2x_reset_task
);
7510 rc
= bnx2x_get_hwinfo(bp
);
7512 /* need to reset chip if undi was active */
7514 bnx2x_undi_unload(bp
);
7516 if (CHIP_REV_IS_FPGA(bp
))
7517 printk(KERN_ERR PFX
"FPGA detected\n");
7519 if (BP_NOMCP(bp
) && (func
== 0))
7521 "MCP disabled, must load devices in order!\n");
7525 bp
->flags
&= ~TPA_ENABLE_FLAG
;
7526 bp
->dev
->features
&= ~NETIF_F_LRO
;
7528 bp
->flags
|= TPA_ENABLE_FLAG
;
7529 bp
->dev
->features
|= NETIF_F_LRO
;
7533 bp
->tx_ring_size
= MAX_TX_AVAIL
;
7534 bp
->rx_ring_size
= MAX_RX_AVAIL
;
7542 bp
->timer_interval
= (CHIP_REV_IS_SLOW(bp
) ? 5*HZ
: HZ
);
7543 bp
->current_interval
= (poll
? poll
: bp
->timer_interval
);
7545 init_timer(&bp
->timer
);
7546 bp
->timer
.expires
= jiffies
+ bp
->current_interval
;
7547 bp
->timer
.data
= (unsigned long) bp
;
7548 bp
->timer
.function
= bnx2x_timer
;
7554 * ethtool service functions
7557 /* All ethtool functions called with rtnl_lock */
7559 static int bnx2x_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
7561 struct bnx2x
*bp
= netdev_priv(dev
);
7563 cmd
->supported
= bp
->port
.supported
;
7564 cmd
->advertising
= bp
->port
.advertising
;
7566 if (netif_carrier_ok(dev
)) {
7567 cmd
->speed
= bp
->link_vars
.line_speed
;
7568 cmd
->duplex
= bp
->link_vars
.duplex
;
7570 cmd
->speed
= bp
->link_params
.req_line_speed
;
7571 cmd
->duplex
= bp
->link_params
.req_duplex
;
7576 vn_max_rate
= ((bp
->mf_config
& FUNC_MF_CFG_MAX_BW_MASK
) >>
7577 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
7578 if (vn_max_rate
< cmd
->speed
)
7579 cmd
->speed
= vn_max_rate
;
7582 if (bp
->link_params
.switch_cfg
== SWITCH_CFG_10G
) {
7584 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
7586 switch (ext_phy_type
) {
7587 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
:
7588 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
:
7589 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
:
7590 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
7591 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
7592 cmd
->port
= PORT_FIBRE
;
7595 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
7596 cmd
->port
= PORT_TP
;
7599 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
:
7600 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7601 bp
->link_params
.ext_phy_config
);
7605 DP(NETIF_MSG_LINK
, "BAD XGXS ext_phy_config 0x%x\n",
7606 bp
->link_params
.ext_phy_config
);
7610 cmd
->port
= PORT_TP
;
7612 cmd
->phy_address
= bp
->port
.phy_addr
;
7613 cmd
->transceiver
= XCVR_INTERNAL
;
7615 if (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
)
7616 cmd
->autoneg
= AUTONEG_ENABLE
;
7618 cmd
->autoneg
= AUTONEG_DISABLE
;
7623 DP(NETIF_MSG_LINK
, "ethtool_cmd: cmd %d\n"
7624 DP_LEVEL
" supported 0x%x advertising 0x%x speed %d\n"
7625 DP_LEVEL
" duplex %d port %d phy_address %d transceiver %d\n"
7626 DP_LEVEL
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
7627 cmd
->cmd
, cmd
->supported
, cmd
->advertising
, cmd
->speed
,
7628 cmd
->duplex
, cmd
->port
, cmd
->phy_address
, cmd
->transceiver
,
7629 cmd
->autoneg
, cmd
->maxtxpkt
, cmd
->maxrxpkt
);
7634 static int bnx2x_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
7636 struct bnx2x
*bp
= netdev_priv(dev
);
7642 DP(NETIF_MSG_LINK
, "ethtool_cmd: cmd %d\n"
7643 DP_LEVEL
" supported 0x%x advertising 0x%x speed %d\n"
7644 DP_LEVEL
" duplex %d port %d phy_address %d transceiver %d\n"
7645 DP_LEVEL
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
7646 cmd
->cmd
, cmd
->supported
, cmd
->advertising
, cmd
->speed
,
7647 cmd
->duplex
, cmd
->port
, cmd
->phy_address
, cmd
->transceiver
,
7648 cmd
->autoneg
, cmd
->maxtxpkt
, cmd
->maxrxpkt
);
7650 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
7651 if (!(bp
->port
.supported
& SUPPORTED_Autoneg
)) {
7652 DP(NETIF_MSG_LINK
, "Autoneg not supported\n");
7656 /* advertise the requested speed and duplex if supported */
7657 cmd
->advertising
&= bp
->port
.supported
;
7659 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
7660 bp
->link_params
.req_duplex
= DUPLEX_FULL
;
7661 bp
->port
.advertising
|= (ADVERTISED_Autoneg
|
7664 } else { /* forced speed */
7665 /* advertise the requested speed and duplex if supported */
7666 switch (cmd
->speed
) {
7668 if (cmd
->duplex
== DUPLEX_FULL
) {
7669 if (!(bp
->port
.supported
&
7670 SUPPORTED_10baseT_Full
)) {
7672 "10M full not supported\n");
7676 advertising
= (ADVERTISED_10baseT_Full
|
7679 if (!(bp
->port
.supported
&
7680 SUPPORTED_10baseT_Half
)) {
7682 "10M half not supported\n");
7686 advertising
= (ADVERTISED_10baseT_Half
|
7692 if (cmd
->duplex
== DUPLEX_FULL
) {
7693 if (!(bp
->port
.supported
&
7694 SUPPORTED_100baseT_Full
)) {
7696 "100M full not supported\n");
7700 advertising
= (ADVERTISED_100baseT_Full
|
7703 if (!(bp
->port
.supported
&
7704 SUPPORTED_100baseT_Half
)) {
7706 "100M half not supported\n");
7710 advertising
= (ADVERTISED_100baseT_Half
|
7716 if (cmd
->duplex
!= DUPLEX_FULL
) {
7717 DP(NETIF_MSG_LINK
, "1G half not supported\n");
7721 if (!(bp
->port
.supported
& SUPPORTED_1000baseT_Full
)) {
7722 DP(NETIF_MSG_LINK
, "1G full not supported\n");
7726 advertising
= (ADVERTISED_1000baseT_Full
|
7731 if (cmd
->duplex
!= DUPLEX_FULL
) {
7733 "2.5G half not supported\n");
7737 if (!(bp
->port
.supported
& SUPPORTED_2500baseX_Full
)) {
7739 "2.5G full not supported\n");
7743 advertising
= (ADVERTISED_2500baseX_Full
|
7748 if (cmd
->duplex
!= DUPLEX_FULL
) {
7749 DP(NETIF_MSG_LINK
, "10G half not supported\n");
7753 if (!(bp
->port
.supported
& SUPPORTED_10000baseT_Full
)) {
7754 DP(NETIF_MSG_LINK
, "10G full not supported\n");
7758 advertising
= (ADVERTISED_10000baseT_Full
|
7763 DP(NETIF_MSG_LINK
, "Unsupported speed\n");
7767 bp
->link_params
.req_line_speed
= cmd
->speed
;
7768 bp
->link_params
.req_duplex
= cmd
->duplex
;
7769 bp
->port
.advertising
= advertising
;
7772 DP(NETIF_MSG_LINK
, "req_line_speed %d\n"
7773 DP_LEVEL
" req_duplex %d advertising 0x%x\n",
7774 bp
->link_params
.req_line_speed
, bp
->link_params
.req_duplex
,
7775 bp
->port
.advertising
);
7777 if (netif_running(dev
)) {
7778 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
7785 #define PHY_FW_VER_LEN 10
7787 static void bnx2x_get_drvinfo(struct net_device
*dev
,
7788 struct ethtool_drvinfo
*info
)
7790 struct bnx2x
*bp
= netdev_priv(dev
);
7791 u8 phy_fw_ver
[PHY_FW_VER_LEN
];
7793 strcpy(info
->driver
, DRV_MODULE_NAME
);
7794 strcpy(info
->version
, DRV_MODULE_VERSION
);
7796 phy_fw_ver
[0] = '\0';
7798 bnx2x_acquire_phy_lock(bp
);
7799 bnx2x_get_ext_phy_fw_version(&bp
->link_params
,
7800 (bp
->state
!= BNX2X_STATE_CLOSED
),
7801 phy_fw_ver
, PHY_FW_VER_LEN
);
7802 bnx2x_release_phy_lock(bp
);
7805 snprintf(info
->fw_version
, 32, "BC:%d.%d.%d%s%s",
7806 (bp
->common
.bc_ver
& 0xff0000) >> 16,
7807 (bp
->common
.bc_ver
& 0xff00) >> 8,
7808 (bp
->common
.bc_ver
& 0xff),
7809 ((phy_fw_ver
[0] != '\0') ? " PHY:" : ""), phy_fw_ver
);
7810 strcpy(info
->bus_info
, pci_name(bp
->pdev
));
7811 info
->n_stats
= BNX2X_NUM_STATS
;
7812 info
->testinfo_len
= BNX2X_NUM_TESTS
;
7813 info
->eedump_len
= bp
->common
.flash_size
;
7814 info
->regdump_len
= 0;
7817 static void bnx2x_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
7819 struct bnx2x
*bp
= netdev_priv(dev
);
7821 if (bp
->flags
& NO_WOL_FLAG
) {
7825 wol
->supported
= WAKE_MAGIC
;
7827 wol
->wolopts
= WAKE_MAGIC
;
7831 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
7834 static int bnx2x_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
7836 struct bnx2x
*bp
= netdev_priv(dev
);
7838 if (wol
->wolopts
& ~WAKE_MAGIC
)
7841 if (wol
->wolopts
& WAKE_MAGIC
) {
7842 if (bp
->flags
& NO_WOL_FLAG
)
7852 static u32
bnx2x_get_msglevel(struct net_device
*dev
)
7854 struct bnx2x
*bp
= netdev_priv(dev
);
7856 return bp
->msglevel
;
7859 static void bnx2x_set_msglevel(struct net_device
*dev
, u32 level
)
7861 struct bnx2x
*bp
= netdev_priv(dev
);
7863 if (capable(CAP_NET_ADMIN
))
7864 bp
->msglevel
= level
;
7867 static int bnx2x_nway_reset(struct net_device
*dev
)
7869 struct bnx2x
*bp
= netdev_priv(dev
);
7874 if (netif_running(dev
)) {
7875 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
7882 static int bnx2x_get_eeprom_len(struct net_device
*dev
)
7884 struct bnx2x
*bp
= netdev_priv(dev
);
7886 return bp
->common
.flash_size
;
7889 static int bnx2x_acquire_nvram_lock(struct bnx2x
*bp
)
7891 int port
= BP_PORT(bp
);
7895 /* adjust timeout for emulation/FPGA */
7896 count
= NVRAM_TIMEOUT_COUNT
;
7897 if (CHIP_REV_IS_SLOW(bp
))
7900 /* request access to nvram interface */
7901 REG_WR(bp
, MCP_REG_MCPR_NVM_SW_ARB
,
7902 (MCPR_NVM_SW_ARB_ARB_REQ_SET1
<< port
));
7904 for (i
= 0; i
< count
*10; i
++) {
7905 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_SW_ARB
);
7906 if (val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
))
7912 if (!(val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
))) {
7913 DP(BNX2X_MSG_NVM
, "cannot get access to nvram interface\n");
7920 static int bnx2x_release_nvram_lock(struct bnx2x
*bp
)
7922 int port
= BP_PORT(bp
);
7926 /* adjust timeout for emulation/FPGA */
7927 count
= NVRAM_TIMEOUT_COUNT
;
7928 if (CHIP_REV_IS_SLOW(bp
))
7931 /* relinquish nvram interface */
7932 REG_WR(bp
, MCP_REG_MCPR_NVM_SW_ARB
,
7933 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1
<< port
));
7935 for (i
= 0; i
< count
*10; i
++) {
7936 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_SW_ARB
);
7937 if (!(val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
)))
7943 if (val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
)) {
7944 DP(BNX2X_MSG_NVM
, "cannot free access to nvram interface\n");
7951 static void bnx2x_enable_nvram_access(struct bnx2x
*bp
)
7955 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
);
7957 /* enable both bits, even on read */
7958 REG_WR(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
,
7959 (val
| MCPR_NVM_ACCESS_ENABLE_EN
|
7960 MCPR_NVM_ACCESS_ENABLE_WR_EN
));
7963 static void bnx2x_disable_nvram_access(struct bnx2x
*bp
)
7967 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
);
7969 /* disable both bits, even after read */
7970 REG_WR(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
,
7971 (val
& ~(MCPR_NVM_ACCESS_ENABLE_EN
|
7972 MCPR_NVM_ACCESS_ENABLE_WR_EN
)));
7975 static int bnx2x_nvram_read_dword(struct bnx2x
*bp
, u32 offset
, u32
*ret_val
,
7981 /* build the command word */
7982 cmd_flags
|= MCPR_NVM_COMMAND_DOIT
;
7984 /* need to clear DONE bit separately */
7985 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, MCPR_NVM_COMMAND_DONE
);
7987 /* address of the NVRAM to read from */
7988 REG_WR(bp
, MCP_REG_MCPR_NVM_ADDR
,
7989 (offset
& MCPR_NVM_ADDR_NVM_ADDR_VALUE
));
7991 /* issue a read command */
7992 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, cmd_flags
);
7994 /* adjust timeout for emulation/FPGA */
7995 count
= NVRAM_TIMEOUT_COUNT
;
7996 if (CHIP_REV_IS_SLOW(bp
))
7999 /* wait for completion */
8002 for (i
= 0; i
< count
; i
++) {
8004 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_COMMAND
);
8006 if (val
& MCPR_NVM_COMMAND_DONE
) {
8007 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_READ
);
8008 /* we read nvram data in cpu order
8009 * but ethtool sees it as an array of bytes
8010 * converting to big-endian will do the work */
8011 val
= cpu_to_be32(val
);
8021 static int bnx2x_nvram_read(struct bnx2x
*bp
, u32 offset
, u8
*ret_buf
,
8028 if ((offset
& 0x03) || (buf_size
& 0x03) || (buf_size
== 0)) {
8030 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8035 if (offset
+ buf_size
> bp
->common
.flash_size
) {
8036 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
8037 " buf_size (0x%x) > flash_size (0x%x)\n",
8038 offset
, buf_size
, bp
->common
.flash_size
);
8042 /* request access to nvram interface */
8043 rc
= bnx2x_acquire_nvram_lock(bp
);
8047 /* enable access to nvram interface */
8048 bnx2x_enable_nvram_access(bp
);
8050 /* read the first word(s) */
8051 cmd_flags
= MCPR_NVM_COMMAND_FIRST
;
8052 while ((buf_size
> sizeof(u32
)) && (rc
== 0)) {
8053 rc
= bnx2x_nvram_read_dword(bp
, offset
, &val
, cmd_flags
);
8054 memcpy(ret_buf
, &val
, 4);
8056 /* advance to the next dword */
8057 offset
+= sizeof(u32
);
8058 ret_buf
+= sizeof(u32
);
8059 buf_size
-= sizeof(u32
);
8064 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
8065 rc
= bnx2x_nvram_read_dword(bp
, offset
, &val
, cmd_flags
);
8066 memcpy(ret_buf
, &val
, 4);
8069 /* disable access to nvram interface */
8070 bnx2x_disable_nvram_access(bp
);
8071 bnx2x_release_nvram_lock(bp
);
8076 static int bnx2x_get_eeprom(struct net_device
*dev
,
8077 struct ethtool_eeprom
*eeprom
, u8
*eebuf
)
8079 struct bnx2x
*bp
= netdev_priv(dev
);
8082 if (!netif_running(dev
))
8085 DP(BNX2X_MSG_NVM
, "ethtool_eeprom: cmd %d\n"
8086 DP_LEVEL
" magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8087 eeprom
->cmd
, eeprom
->magic
, eeprom
->offset
, eeprom
->offset
,
8088 eeprom
->len
, eeprom
->len
);
8090 /* parameters already validated in ethtool_get_eeprom */
8092 rc
= bnx2x_nvram_read(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
8097 static int bnx2x_nvram_write_dword(struct bnx2x
*bp
, u32 offset
, u32 val
,
8102 /* build the command word */
8103 cmd_flags
|= MCPR_NVM_COMMAND_DOIT
| MCPR_NVM_COMMAND_WR
;
8105 /* need to clear DONE bit separately */
8106 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, MCPR_NVM_COMMAND_DONE
);
8108 /* write the data */
8109 REG_WR(bp
, MCP_REG_MCPR_NVM_WRITE
, val
);
8111 /* address of the NVRAM to write to */
8112 REG_WR(bp
, MCP_REG_MCPR_NVM_ADDR
,
8113 (offset
& MCPR_NVM_ADDR_NVM_ADDR_VALUE
));
8115 /* issue the write command */
8116 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, cmd_flags
);
8118 /* adjust timeout for emulation/FPGA */
8119 count
= NVRAM_TIMEOUT_COUNT
;
8120 if (CHIP_REV_IS_SLOW(bp
))
8123 /* wait for completion */
8125 for (i
= 0; i
< count
; i
++) {
8127 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_COMMAND
);
8128 if (val
& MCPR_NVM_COMMAND_DONE
) {
8137 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8139 static int bnx2x_nvram_write1(struct bnx2x
*bp
, u32 offset
, u8
*data_buf
,
8147 if (offset
+ buf_size
> bp
->common
.flash_size
) {
8148 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
8149 " buf_size (0x%x) > flash_size (0x%x)\n",
8150 offset
, buf_size
, bp
->common
.flash_size
);
8154 /* request access to nvram interface */
8155 rc
= bnx2x_acquire_nvram_lock(bp
);
8159 /* enable access to nvram interface */
8160 bnx2x_enable_nvram_access(bp
);
8162 cmd_flags
= (MCPR_NVM_COMMAND_FIRST
| MCPR_NVM_COMMAND_LAST
);
8163 align_offset
= (offset
& ~0x03);
8164 rc
= bnx2x_nvram_read_dword(bp
, align_offset
, &val
, cmd_flags
);
8167 val
&= ~(0xff << BYTE_OFFSET(offset
));
8168 val
|= (*data_buf
<< BYTE_OFFSET(offset
));
8170 /* nvram data is returned as an array of bytes
8171 * convert it back to cpu order */
8172 val
= be32_to_cpu(val
);
8174 rc
= bnx2x_nvram_write_dword(bp
, align_offset
, val
,
8178 /* disable access to nvram interface */
8179 bnx2x_disable_nvram_access(bp
);
8180 bnx2x_release_nvram_lock(bp
);
8185 static int bnx2x_nvram_write(struct bnx2x
*bp
, u32 offset
, u8
*data_buf
,
8193 if (buf_size
== 1) /* ethtool */
8194 return bnx2x_nvram_write1(bp
, offset
, data_buf
, buf_size
);
8196 if ((offset
& 0x03) || (buf_size
& 0x03) || (buf_size
== 0)) {
8198 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8203 if (offset
+ buf_size
> bp
->common
.flash_size
) {
8204 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
8205 " buf_size (0x%x) > flash_size (0x%x)\n",
8206 offset
, buf_size
, bp
->common
.flash_size
);
8210 /* request access to nvram interface */
8211 rc
= bnx2x_acquire_nvram_lock(bp
);
8215 /* enable access to nvram interface */
8216 bnx2x_enable_nvram_access(bp
);
8219 cmd_flags
= MCPR_NVM_COMMAND_FIRST
;
8220 while ((written_so_far
< buf_size
) && (rc
== 0)) {
8221 if (written_so_far
== (buf_size
- sizeof(u32
)))
8222 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
8223 else if (((offset
+ 4) % NVRAM_PAGE_SIZE
) == 0)
8224 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
8225 else if ((offset
% NVRAM_PAGE_SIZE
) == 0)
8226 cmd_flags
|= MCPR_NVM_COMMAND_FIRST
;
8228 memcpy(&val
, data_buf
, 4);
8230 rc
= bnx2x_nvram_write_dword(bp
, offset
, val
, cmd_flags
);
8232 /* advance to the next dword */
8233 offset
+= sizeof(u32
);
8234 data_buf
+= sizeof(u32
);
8235 written_so_far
+= sizeof(u32
);
8239 /* disable access to nvram interface */
8240 bnx2x_disable_nvram_access(bp
);
8241 bnx2x_release_nvram_lock(bp
);
8246 static int bnx2x_set_eeprom(struct net_device
*dev
,
8247 struct ethtool_eeprom
*eeprom
, u8
*eebuf
)
8249 struct bnx2x
*bp
= netdev_priv(dev
);
8252 DP(BNX2X_MSG_NVM
, "ethtool_eeprom: cmd %d\n"
8253 DP_LEVEL
" magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8254 eeprom
->cmd
, eeprom
->magic
, eeprom
->offset
, eeprom
->offset
,
8255 eeprom
->len
, eeprom
->len
);
8257 /* parameters already validated in ethtool_set_eeprom */
8259 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8260 if (eeprom
->magic
== 0x00504859)
8263 bnx2x_acquire_phy_lock(bp
);
8264 rc
= bnx2x_flash_download(bp
, BP_PORT(bp
),
8265 bp
->link_params
.ext_phy_config
,
8266 (bp
->state
!= BNX2X_STATE_CLOSED
),
8267 eebuf
, eeprom
->len
);
8268 if ((bp
->state
== BNX2X_STATE_OPEN
) ||
8269 (bp
->state
== BNX2X_STATE_DISABLED
)) {
8270 rc
|= bnx2x_link_reset(&bp
->link_params
,
8272 rc
|= bnx2x_phy_init(&bp
->link_params
,
8275 bnx2x_release_phy_lock(bp
);
8277 } else /* Only the PMF can access the PHY */
8280 rc
= bnx2x_nvram_write(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
8285 static int bnx2x_get_coalesce(struct net_device
*dev
,
8286 struct ethtool_coalesce
*coal
)
8288 struct bnx2x
*bp
= netdev_priv(dev
);
8290 memset(coal
, 0, sizeof(struct ethtool_coalesce
));
8292 coal
->rx_coalesce_usecs
= bp
->rx_ticks
;
8293 coal
->tx_coalesce_usecs
= bp
->tx_ticks
;
8298 static int bnx2x_set_coalesce(struct net_device
*dev
,
8299 struct ethtool_coalesce
*coal
)
8301 struct bnx2x
*bp
= netdev_priv(dev
);
8303 bp
->rx_ticks
= (u16
) coal
->rx_coalesce_usecs
;
8304 if (bp
->rx_ticks
> 3000)
8305 bp
->rx_ticks
= 3000;
8307 bp
->tx_ticks
= (u16
) coal
->tx_coalesce_usecs
;
8308 if (bp
->tx_ticks
> 0x3000)
8309 bp
->tx_ticks
= 0x3000;
8311 if (netif_running(dev
))
8312 bnx2x_update_coalesce(bp
);
8317 static void bnx2x_get_ringparam(struct net_device
*dev
,
8318 struct ethtool_ringparam
*ering
)
8320 struct bnx2x
*bp
= netdev_priv(dev
);
8322 ering
->rx_max_pending
= MAX_RX_AVAIL
;
8323 ering
->rx_mini_max_pending
= 0;
8324 ering
->rx_jumbo_max_pending
= 0;
8326 ering
->rx_pending
= bp
->rx_ring_size
;
8327 ering
->rx_mini_pending
= 0;
8328 ering
->rx_jumbo_pending
= 0;
8330 ering
->tx_max_pending
= MAX_TX_AVAIL
;
8331 ering
->tx_pending
= bp
->tx_ring_size
;
8334 static int bnx2x_set_ringparam(struct net_device
*dev
,
8335 struct ethtool_ringparam
*ering
)
8337 struct bnx2x
*bp
= netdev_priv(dev
);
8340 if ((ering
->rx_pending
> MAX_RX_AVAIL
) ||
8341 (ering
->tx_pending
> MAX_TX_AVAIL
) ||
8342 (ering
->tx_pending
<= MAX_SKB_FRAGS
+ 4))
8345 bp
->rx_ring_size
= ering
->rx_pending
;
8346 bp
->tx_ring_size
= ering
->tx_pending
;
8348 if (netif_running(dev
)) {
8349 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
8350 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
8356 static void bnx2x_get_pauseparam(struct net_device
*dev
,
8357 struct ethtool_pauseparam
*epause
)
8359 struct bnx2x
*bp
= netdev_priv(dev
);
8361 epause
->autoneg
= (bp
->link_params
.req_flow_ctrl
== FLOW_CTRL_AUTO
) &&
8362 (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
);
8364 epause
->rx_pause
= ((bp
->link_vars
.flow_ctrl
& FLOW_CTRL_RX
) ==
8366 epause
->tx_pause
= ((bp
->link_vars
.flow_ctrl
& FLOW_CTRL_TX
) ==
8369 DP(NETIF_MSG_LINK
, "ethtool_pauseparam: cmd %d\n"
8370 DP_LEVEL
" autoneg %d rx_pause %d tx_pause %d\n",
8371 epause
->cmd
, epause
->autoneg
, epause
->rx_pause
, epause
->tx_pause
);
8374 static int bnx2x_set_pauseparam(struct net_device
*dev
,
8375 struct ethtool_pauseparam
*epause
)
8377 struct bnx2x
*bp
= netdev_priv(dev
);
8382 DP(NETIF_MSG_LINK
, "ethtool_pauseparam: cmd %d\n"
8383 DP_LEVEL
" autoneg %d rx_pause %d tx_pause %d\n",
8384 epause
->cmd
, epause
->autoneg
, epause
->rx_pause
, epause
->tx_pause
);
8386 bp
->link_params
.req_flow_ctrl
= FLOW_CTRL_AUTO
;
8388 if (epause
->rx_pause
)
8389 bp
->link_params
.req_flow_ctrl
|= FLOW_CTRL_RX
;
8391 if (epause
->tx_pause
)
8392 bp
->link_params
.req_flow_ctrl
|= FLOW_CTRL_TX
;
8394 if (bp
->link_params
.req_flow_ctrl
== FLOW_CTRL_AUTO
)
8395 bp
->link_params
.req_flow_ctrl
= FLOW_CTRL_NONE
;
8397 if (epause
->autoneg
) {
8398 if (!(bp
->port
.supported
& SUPPORTED_Autoneg
)) {
8399 DP(NETIF_MSG_LINK
, "autoneg not supported\n");
8403 if (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
)
8404 bp
->link_params
.req_flow_ctrl
= FLOW_CTRL_AUTO
;
8408 "req_flow_ctrl 0x%x\n", bp
->link_params
.req_flow_ctrl
);
8410 if (netif_running(dev
)) {
8411 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
8418 static int bnx2x_set_flags(struct net_device
*dev
, u32 data
)
8420 struct bnx2x
*bp
= netdev_priv(dev
);
8424 /* TPA requires Rx CSUM offloading */
8425 if ((data
& ETH_FLAG_LRO
) && bp
->rx_csum
) {
8426 if (!(dev
->features
& NETIF_F_LRO
)) {
8427 dev
->features
|= NETIF_F_LRO
;
8428 bp
->flags
|= TPA_ENABLE_FLAG
;
8432 } else if (dev
->features
& NETIF_F_LRO
) {
8433 dev
->features
&= ~NETIF_F_LRO
;
8434 bp
->flags
&= ~TPA_ENABLE_FLAG
;
8438 if (changed
&& netif_running(dev
)) {
8439 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
8440 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
8446 static u32
bnx2x_get_rx_csum(struct net_device
*dev
)
8448 struct bnx2x
*bp
= netdev_priv(dev
);
8453 static int bnx2x_set_rx_csum(struct net_device
*dev
, u32 data
)
8455 struct bnx2x
*bp
= netdev_priv(dev
);
8460 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8461 TPA'ed packets will be discarded due to wrong TCP CSUM */
8463 u32 flags
= ethtool_op_get_flags(dev
);
8465 rc
= bnx2x_set_flags(dev
, (flags
& ~ETH_FLAG_LRO
));
8471 static int bnx2x_set_tso(struct net_device
*dev
, u32 data
)
8474 dev
->features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
8475 dev
->features
|= NETIF_F_TSO6
;
8477 dev
->features
&= ~(NETIF_F_TSO
| NETIF_F_TSO_ECN
);
8478 dev
->features
&= ~NETIF_F_TSO6
;
8484 static const struct {
8485 char string
[ETH_GSTRING_LEN
];
8486 } bnx2x_tests_str_arr
[BNX2X_NUM_TESTS
] = {
8487 { "register_test (offline)" },
8488 { "memory_test (offline)" },
8489 { "loopback_test (offline)" },
8490 { "nvram_test (online)" },
8491 { "interrupt_test (online)" },
8492 { "link_test (online)" },
8493 { "idle check (online)" },
8494 { "MC errors (online)" }
8497 static int bnx2x_self_test_count(struct net_device
*dev
)
8499 return BNX2X_NUM_TESTS
;
8502 static int bnx2x_test_registers(struct bnx2x
*bp
)
8504 int idx
, i
, rc
= -ENODEV
;
8506 int port
= BP_PORT(bp
);
8507 static const struct {
8512 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0
, 4, 0x000003ff },
8513 { DORQ_REG_DB_ADDR0
, 4, 0xffffffff },
8514 { HC_REG_AGG_INT_0
, 4, 0x000003ff },
8515 { PBF_REG_MAC_IF0_ENABLE
, 4, 0x00000001 },
8516 { PBF_REG_P0_INIT_CRD
, 4, 0x000007ff },
8517 { PRS_REG_CID_PORT_0
, 4, 0x00ffffff },
8518 { PXP2_REG_PSWRQ_CDU0_L2P
, 4, 0x000fffff },
8519 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR
, 8, 0x0003ffff },
8520 { PXP2_REG_PSWRQ_TM0_L2P
, 4, 0x000fffff },
8521 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR
, 8, 0x0003ffff },
8522 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P
, 4, 0x000fffff },
8523 { QM_REG_CONNNUM_0
, 4, 0x000fffff },
8524 { TM_REG_LIN0_MAX_ACTIVE_CID
, 4, 0x0003ffff },
8525 { SRC_REG_KEYRSS0_0
, 40, 0xffffffff },
8526 { SRC_REG_KEYRSS0_7
, 40, 0xffffffff },
8527 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00
, 4, 0x00000001 },
8528 { XCM_REG_WU_DA_CNT_CMD00
, 4, 0x00000003 },
8529 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0
, 4, 0x000000ff },
8530 { NIG_REG_EGRESS_MNG0_FIFO
, 20, 0xffffffff },
8531 { NIG_REG_LLH0_T_BIT
, 4, 0x00000001 },
8532 /* 20 */ { NIG_REG_EMAC0_IN_EN
, 4, 0x00000001 },
8533 { NIG_REG_BMAC0_IN_EN
, 4, 0x00000001 },
8534 { NIG_REG_XCM0_OUT_EN
, 4, 0x00000001 },
8535 { NIG_REG_BRB0_OUT_EN
, 4, 0x00000001 },
8536 { NIG_REG_LLH0_XCM_MASK
, 4, 0x00000007 },
8537 { NIG_REG_LLH0_ACPI_PAT_6_LEN
, 68, 0x000000ff },
8538 { NIG_REG_LLH0_ACPI_PAT_0_CRC
, 68, 0xffffffff },
8539 { NIG_REG_LLH0_DEST_MAC_0_0
, 160, 0xffffffff },
8540 { NIG_REG_LLH0_DEST_IP_0_1
, 160, 0xffffffff },
8541 { NIG_REG_LLH0_IPV4_IPV6_0
, 160, 0x00000001 },
8542 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0
, 160, 0x0000ffff },
8543 { NIG_REG_LLH0_DEST_TCP_0
, 160, 0x0000ffff },
8544 { NIG_REG_LLH0_VLAN_ID_0
, 160, 0x00000fff },
8545 { NIG_REG_XGXS_SERDES0_MODE_SEL
, 4, 0x00000001 },
8546 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
, 4, 0x00000001 },
8547 { NIG_REG_STATUS_INTERRUPT_PORT0
, 4, 0x07ffffff },
8548 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST
, 24, 0x00000001 },
8549 { NIG_REG_SERDES0_CTRL_PHY_ADDR
, 16, 0x0000001f },
8551 { 0xffffffff, 0, 0x00000000 }
8554 if (!netif_running(bp
->dev
))
8557 /* Repeat the test twice:
8558 First by writing 0x00000000, second by writing 0xffffffff */
8559 for (idx
= 0; idx
< 2; idx
++) {
8566 wr_val
= 0xffffffff;
8570 for (i
= 0; reg_tbl
[i
].offset0
!= 0xffffffff; i
++) {
8571 u32 offset
, mask
, save_val
, val
;
8573 offset
= reg_tbl
[i
].offset0
+ port
*reg_tbl
[i
].offset1
;
8574 mask
= reg_tbl
[i
].mask
;
8576 save_val
= REG_RD(bp
, offset
);
8578 REG_WR(bp
, offset
, wr_val
);
8579 val
= REG_RD(bp
, offset
);
8581 /* Restore the original register's value */
8582 REG_WR(bp
, offset
, save_val
);
8584 /* verify that value is as expected value */
8585 if ((val
& mask
) != (wr_val
& mask
))
8596 static int bnx2x_test_memory(struct bnx2x
*bp
)
8598 int i
, j
, rc
= -ENODEV
;
8600 static const struct {
8604 { CCM_REG_XX_DESCR_TABLE
, CCM_REG_XX_DESCR_TABLE_SIZE
},
8605 { CFC_REG_ACTIVITY_COUNTER
, CFC_REG_ACTIVITY_COUNTER_SIZE
},
8606 { CFC_REG_LINK_LIST
, CFC_REG_LINK_LIST_SIZE
},
8607 { DMAE_REG_CMD_MEM
, DMAE_REG_CMD_MEM_SIZE
},
8608 { TCM_REG_XX_DESCR_TABLE
, TCM_REG_XX_DESCR_TABLE_SIZE
},
8609 { UCM_REG_XX_DESCR_TABLE
, UCM_REG_XX_DESCR_TABLE_SIZE
},
8610 { XCM_REG_XX_DESCR_TABLE
, XCM_REG_XX_DESCR_TABLE_SIZE
},
8614 static const struct {
8620 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS
, 0x3ffc0, 0 },
8621 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS
, 0x2, 0x2 },
8622 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS
, 0, 0 },
8623 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS
, 0x3ffc0, 0 },
8624 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS
, 0x3ffc0, 0 },
8625 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS
, 0x3ffc1, 0 },
8627 { NULL
, 0xffffffff, 0, 0 }
8630 if (!netif_running(bp
->dev
))
8633 /* Go through all the memories */
8634 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++)
8635 for (j
= 0; j
< mem_tbl
[i
].size
; j
++)
8636 REG_RD(bp
, mem_tbl
[i
].offset
+ j
*4);
8638 /* Check the parity status */
8639 for (i
= 0; prty_tbl
[i
].offset
!= 0xffffffff; i
++) {
8640 val
= REG_RD(bp
, prty_tbl
[i
].offset
);
8641 if ((CHIP_IS_E1(bp
) && (val
& ~(prty_tbl
[i
].e1_mask
))) ||
8642 (CHIP_IS_E1H(bp
) && (val
& ~(prty_tbl
[i
].e1h_mask
)))) {
8644 "%s is 0x%x\n", prty_tbl
[i
].name
, val
);
8655 static void bnx2x_wait_for_link(struct bnx2x
*bp
, u8 link_up
)
8660 while (bnx2x_link_test(bp
) && cnt
--)
8664 static int bnx2x_run_loopback(struct bnx2x
*bp
, int loopback_mode
, u8 link_up
)
8666 unsigned int pkt_size
, num_pkts
, i
;
8667 struct sk_buff
*skb
;
8668 unsigned char *packet
;
8669 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
8670 u16 tx_start_idx
, tx_idx
;
8671 u16 rx_start_idx
, rx_idx
;
8673 struct sw_tx_bd
*tx_buf
;
8674 struct eth_tx_bd
*tx_bd
;
8676 union eth_rx_cqe
*cqe
;
8678 struct sw_rx_bd
*rx_buf
;
8682 if (loopback_mode
== BNX2X_MAC_LOOPBACK
) {
8683 bp
->link_params
.loopback_mode
= LOOPBACK_BMAC
;
8684 bnx2x_acquire_phy_lock(bp
);
8685 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
8686 bnx2x_release_phy_lock(bp
);
8688 } else if (loopback_mode
== BNX2X_PHY_LOOPBACK
) {
8689 bp
->link_params
.loopback_mode
= LOOPBACK_XGXS_10
;
8690 bnx2x_acquire_phy_lock(bp
);
8691 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
8692 bnx2x_release_phy_lock(bp
);
8693 /* wait until link state is restored */
8694 bnx2x_wait_for_link(bp
, link_up
);
8700 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
8703 goto test_loopback_exit
;
8705 packet
= skb_put(skb
, pkt_size
);
8706 memcpy(packet
, bp
->dev
->dev_addr
, ETH_ALEN
);
8707 memset(packet
+ ETH_ALEN
, 0, (ETH_HLEN
- ETH_ALEN
));
8708 for (i
= ETH_HLEN
; i
< pkt_size
; i
++)
8709 packet
[i
] = (unsigned char) (i
& 0xff);
8712 tx_start_idx
= le16_to_cpu(*fp
->tx_cons_sb
);
8713 rx_start_idx
= le16_to_cpu(*fp
->rx_cons_sb
);
8715 pkt_prod
= fp
->tx_pkt_prod
++;
8716 tx_buf
= &fp
->tx_buf_ring
[TX_BD(pkt_prod
)];
8717 tx_buf
->first_bd
= fp
->tx_bd_prod
;
8720 tx_bd
= &fp
->tx_desc_ring
[TX_BD(fp
->tx_bd_prod
)];
8721 mapping
= pci_map_single(bp
->pdev
, skb
->data
,
8722 skb_headlen(skb
), PCI_DMA_TODEVICE
);
8723 tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
8724 tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
8725 tx_bd
->nbd
= cpu_to_le16(1);
8726 tx_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
8727 tx_bd
->vlan
= cpu_to_le16(pkt_prod
);
8728 tx_bd
->bd_flags
.as_bitfield
= (ETH_TX_BD_FLAGS_START_BD
|
8729 ETH_TX_BD_FLAGS_END_BD
);
8730 tx_bd
->general_data
= ((UNICAST_ADDRESS
<<
8731 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT
) | 1);
8733 fp
->hw_tx_prods
->bds_prod
=
8734 cpu_to_le16(le16_to_cpu(fp
->hw_tx_prods
->bds_prod
) + 1);
8735 mb(); /* FW restriction: must not reorder writing nbd and packets */
8736 fp
->hw_tx_prods
->packets_prod
=
8737 cpu_to_le32(le32_to_cpu(fp
->hw_tx_prods
->packets_prod
) + 1);
8738 DOORBELL(bp
, FP_IDX(fp
), 0);
8744 bp
->dev
->trans_start
= jiffies
;
8748 tx_idx
= le16_to_cpu(*fp
->tx_cons_sb
);
8749 if (tx_idx
!= tx_start_idx
+ num_pkts
)
8750 goto test_loopback_exit
;
8752 rx_idx
= le16_to_cpu(*fp
->rx_cons_sb
);
8753 if (rx_idx
!= rx_start_idx
+ num_pkts
)
8754 goto test_loopback_exit
;
8756 cqe
= &fp
->rx_comp_ring
[RCQ_BD(fp
->rx_comp_cons
)];
8757 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
8758 if (CQE_TYPE(cqe_fp_flags
) || (cqe_fp_flags
& ETH_RX_ERROR_FALGS
))
8759 goto test_loopback_rx_exit
;
8761 len
= le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
);
8762 if (len
!= pkt_size
)
8763 goto test_loopback_rx_exit
;
8765 rx_buf
= &fp
->rx_buf_ring
[RX_BD(fp
->rx_bd_cons
)];
8767 skb_reserve(skb
, cqe
->fast_path_cqe
.placement_offset
);
8768 for (i
= ETH_HLEN
; i
< pkt_size
; i
++)
8769 if (*(skb
->data
+ i
) != (unsigned char) (i
& 0xff))
8770 goto test_loopback_rx_exit
;
8774 test_loopback_rx_exit
:
8775 bp
->dev
->last_rx
= jiffies
;
8777 fp
->rx_bd_cons
= NEXT_RX_IDX(fp
->rx_bd_cons
);
8778 fp
->rx_bd_prod
= NEXT_RX_IDX(fp
->rx_bd_prod
);
8779 fp
->rx_comp_cons
= NEXT_RCQ_IDX(fp
->rx_comp_cons
);
8780 fp
->rx_comp_prod
= NEXT_RCQ_IDX(fp
->rx_comp_prod
);
8782 /* Update producers */
8783 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
8785 mmiowb(); /* keep prod updates ordered */
8788 bp
->link_params
.loopback_mode
= LOOPBACK_NONE
;
8793 static int bnx2x_test_loopback(struct bnx2x
*bp
, u8 link_up
)
8797 if (!netif_running(bp
->dev
))
8798 return BNX2X_LOOPBACK_FAILED
;
8800 bnx2x_netif_stop(bp
, 1);
8802 if (bnx2x_run_loopback(bp
, BNX2X_MAC_LOOPBACK
, link_up
)) {
8803 DP(NETIF_MSG_PROBE
, "MAC loopback failed\n");
8804 rc
|= BNX2X_MAC_LOOPBACK_FAILED
;
8807 if (bnx2x_run_loopback(bp
, BNX2X_PHY_LOOPBACK
, link_up
)) {
8808 DP(NETIF_MSG_PROBE
, "PHY loopback failed\n");
8809 rc
|= BNX2X_PHY_LOOPBACK_FAILED
;
8812 bnx2x_netif_start(bp
);
8817 #define CRC32_RESIDUAL 0xdebb20e3
8819 static int bnx2x_test_nvram(struct bnx2x
*bp
)
8821 static const struct {
8825 { 0, 0x14 }, /* bootstrap */
8826 { 0x14, 0xec }, /* dir */
8827 { 0x100, 0x350 }, /* manuf_info */
8828 { 0x450, 0xf0 }, /* feature_info */
8829 { 0x640, 0x64 }, /* upgrade_key_info */
8831 { 0x708, 0x70 }, /* manuf_key_info */
8836 u8
*data
= (u8
*)buf
;
8840 rc
= bnx2x_nvram_read(bp
, 0, data
, 4);
8842 DP(NETIF_MSG_PROBE
, "magic value read (rc -%d)\n", -rc
);
8843 goto test_nvram_exit
;
8846 magic
= be32_to_cpu(buf
[0]);
8847 if (magic
!= 0x669955aa) {
8848 DP(NETIF_MSG_PROBE
, "magic value (0x%08x)\n", magic
);
8850 goto test_nvram_exit
;
8853 for (i
= 0; nvram_tbl
[i
].size
; i
++) {
8855 rc
= bnx2x_nvram_read(bp
, nvram_tbl
[i
].offset
, data
,
8859 "nvram_tbl[%d] read data (rc -%d)\n", i
, -rc
);
8860 goto test_nvram_exit
;
8863 csum
= ether_crc_le(nvram_tbl
[i
].size
, data
);
8864 if (csum
!= CRC32_RESIDUAL
) {
8866 "nvram_tbl[%d] csum value (0x%08x)\n", i
, csum
);
8868 goto test_nvram_exit
;
8876 static int bnx2x_test_intr(struct bnx2x
*bp
)
8878 struct mac_configuration_cmd
*config
= bnx2x_sp(bp
, mac_config
);
8881 if (!netif_running(bp
->dev
))
8884 config
->hdr
.length_6b
= 0;
8885 config
->hdr
.offset
= 0;
8886 config
->hdr
.client_id
= BP_CL_ID(bp
);
8887 config
->hdr
.reserved1
= 0;
8889 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
8890 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
8891 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
8893 bp
->set_mac_pending
++;
8894 for (i
= 0; i
< 10; i
++) {
8895 if (!bp
->set_mac_pending
)
8897 msleep_interruptible(10);
8906 static void bnx2x_self_test(struct net_device
*dev
,
8907 struct ethtool_test
*etest
, u64
*buf
)
8909 struct bnx2x
*bp
= netdev_priv(dev
);
8911 memset(buf
, 0, sizeof(u64
) * BNX2X_NUM_TESTS
);
8913 if (!netif_running(dev
))
8916 /* offline tests are not supported in MF mode */
8918 etest
->flags
&= ~ETH_TEST_FL_OFFLINE
;
8920 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
8923 link_up
= bp
->link_vars
.link_up
;
8924 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
8925 bnx2x_nic_load(bp
, LOAD_DIAG
);
8926 /* wait until link state is restored */
8927 bnx2x_wait_for_link(bp
, link_up
);
8929 if (bnx2x_test_registers(bp
) != 0) {
8931 etest
->flags
|= ETH_TEST_FL_FAILED
;
8933 if (bnx2x_test_memory(bp
) != 0) {
8935 etest
->flags
|= ETH_TEST_FL_FAILED
;
8937 buf
[2] = bnx2x_test_loopback(bp
, link_up
);
8939 etest
->flags
|= ETH_TEST_FL_FAILED
;
8941 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
8942 bnx2x_nic_load(bp
, LOAD_NORMAL
);
8943 /* wait until link state is restored */
8944 bnx2x_wait_for_link(bp
, link_up
);
8946 if (bnx2x_test_nvram(bp
) != 0) {
8948 etest
->flags
|= ETH_TEST_FL_FAILED
;
8950 if (bnx2x_test_intr(bp
) != 0) {
8952 etest
->flags
|= ETH_TEST_FL_FAILED
;
8955 if (bnx2x_link_test(bp
) != 0) {
8957 etest
->flags
|= ETH_TEST_FL_FAILED
;
8959 buf
[7] = bnx2x_mc_assert(bp
);
8961 etest
->flags
|= ETH_TEST_FL_FAILED
;
8963 #ifdef BNX2X_EXTRA_DEBUG
8964 bnx2x_panic_dump(bp
);
8968 static const struct {
8972 #define STATS_FLAGS_PORT 1
8973 #define STATS_FLAGS_FUNC 2
8974 u8 string
[ETH_GSTRING_LEN
];
8975 } bnx2x_stats_arr
[BNX2X_NUM_STATS
] = {
8976 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi
),
8977 8, STATS_FLAGS_FUNC
, "rx_bytes" },
8978 { STATS_OFFSET32(error_bytes_received_hi
),
8979 8, STATS_FLAGS_FUNC
, "rx_error_bytes" },
8980 { STATS_OFFSET32(total_bytes_transmitted_hi
),
8981 8, STATS_FLAGS_FUNC
, "tx_bytes" },
8982 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi
),
8983 8, STATS_FLAGS_PORT
, "tx_error_bytes" },
8984 { STATS_OFFSET32(total_unicast_packets_received_hi
),
8985 8, STATS_FLAGS_FUNC
, "rx_ucast_packets" },
8986 { STATS_OFFSET32(total_multicast_packets_received_hi
),
8987 8, STATS_FLAGS_FUNC
, "rx_mcast_packets" },
8988 { STATS_OFFSET32(total_broadcast_packets_received_hi
),
8989 8, STATS_FLAGS_FUNC
, "rx_bcast_packets" },
8990 { STATS_OFFSET32(total_unicast_packets_transmitted_hi
),
8991 8, STATS_FLAGS_FUNC
, "tx_packets" },
8992 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi
),
8993 8, STATS_FLAGS_PORT
, "tx_mac_errors" },
8994 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi
),
8995 8, STATS_FLAGS_PORT
, "tx_carrier_errors" },
8996 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi
),
8997 8, STATS_FLAGS_PORT
, "rx_crc_errors" },
8998 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi
),
8999 8, STATS_FLAGS_PORT
, "rx_align_errors" },
9000 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi
),
9001 8, STATS_FLAGS_PORT
, "tx_single_collisions" },
9002 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi
),
9003 8, STATS_FLAGS_PORT
, "tx_multi_collisions" },
9004 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi
),
9005 8, STATS_FLAGS_PORT
, "tx_deferred" },
9006 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi
),
9007 8, STATS_FLAGS_PORT
, "tx_excess_collisions" },
9008 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi
),
9009 8, STATS_FLAGS_PORT
, "tx_late_collisions" },
9010 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi
),
9011 8, STATS_FLAGS_PORT
, "tx_total_collisions" },
9012 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi
),
9013 8, STATS_FLAGS_PORT
, "rx_fragments" },
9014 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi
),
9015 8, STATS_FLAGS_PORT
, "rx_jabbers" },
9016 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi
),
9017 8, STATS_FLAGS_PORT
, "rx_undersize_packets" },
9018 { STATS_OFFSET32(jabber_packets_received
),
9019 4, STATS_FLAGS_FUNC
, "rx_oversize_packets" },
9020 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi
),
9021 8, STATS_FLAGS_PORT
, "tx_64_byte_packets" },
9022 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi
),
9023 8, STATS_FLAGS_PORT
, "tx_65_to_127_byte_packets" },
9024 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi
),
9025 8, STATS_FLAGS_PORT
, "tx_128_to_255_byte_packets" },
9026 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi
),
9027 8, STATS_FLAGS_PORT
, "tx_256_to_511_byte_packets" },
9028 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi
),
9029 8, STATS_FLAGS_PORT
, "tx_512_to_1023_byte_packets" },
9030 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi
),
9031 8, STATS_FLAGS_PORT
, "tx_1024_to_1522_byte_packets" },
9032 { STATS_OFFSET32(etherstatspktsover1522octets_hi
),
9033 8, STATS_FLAGS_PORT
, "tx_1523_to_9022_byte_packets" },
9034 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi
),
9035 8, STATS_FLAGS_PORT
, "rx_xon_frames" },
9036 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi
),
9037 8, STATS_FLAGS_PORT
, "rx_xoff_frames" },
9038 { STATS_OFFSET32(tx_stat_outxonsent_hi
),
9039 8, STATS_FLAGS_PORT
, "tx_xon_frames" },
9040 { STATS_OFFSET32(tx_stat_outxoffsent_hi
),
9041 8, STATS_FLAGS_PORT
, "tx_xoff_frames" },
9042 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi
),
9043 8, STATS_FLAGS_PORT
, "rx_mac_ctrl_frames" },
9044 { STATS_OFFSET32(mac_filter_discard
),
9045 4, STATS_FLAGS_PORT
, "rx_filtered_packets" },
9046 { STATS_OFFSET32(no_buff_discard
),
9047 4, STATS_FLAGS_FUNC
, "rx_discards" },
9048 { STATS_OFFSET32(xxoverflow_discard
),
9049 4, STATS_FLAGS_PORT
, "rx_fw_discards" },
9050 { STATS_OFFSET32(brb_drop_hi
),
9051 8, STATS_FLAGS_PORT
, "brb_discard" },
9052 { STATS_OFFSET32(brb_truncate_hi
),
9053 8, STATS_FLAGS_PORT
, "brb_truncate" },
9054 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt
),
9055 4, STATS_FLAGS_FUNC
, "rx_phy_ip_err_discards"},
9056 { STATS_OFFSET32(rx_skb_alloc_failed
),
9057 4, STATS_FLAGS_FUNC
, "rx_skb_alloc_discard" },
9058 /* 42 */{ STATS_OFFSET32(hw_csum_err
),
9059 4, STATS_FLAGS_FUNC
, "rx_csum_offload_errors" }
9062 #define IS_NOT_E1HMF_STAT(bp, i) \
9063 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9065 static void bnx2x_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
9067 struct bnx2x
*bp
= netdev_priv(dev
);
9070 switch (stringset
) {
9072 for (i
= 0, j
= 0; i
< BNX2X_NUM_STATS
; i
++) {
9073 if (IS_NOT_E1HMF_STAT(bp
, i
))
9075 strcpy(buf
+ j
*ETH_GSTRING_LEN
,
9076 bnx2x_stats_arr
[i
].string
);
9082 memcpy(buf
, bnx2x_tests_str_arr
, sizeof(bnx2x_tests_str_arr
));
9087 static int bnx2x_get_stats_count(struct net_device
*dev
)
9089 struct bnx2x
*bp
= netdev_priv(dev
);
9090 int i
, num_stats
= 0;
9092 for (i
= 0; i
< BNX2X_NUM_STATS
; i
++) {
9093 if (IS_NOT_E1HMF_STAT(bp
, i
))
9100 static void bnx2x_get_ethtool_stats(struct net_device
*dev
,
9101 struct ethtool_stats
*stats
, u64
*buf
)
9103 struct bnx2x
*bp
= netdev_priv(dev
);
9104 u32
*hw_stats
= (u32
*)&bp
->eth_stats
;
9107 for (i
= 0, j
= 0; i
< BNX2X_NUM_STATS
; i
++) {
9108 if (IS_NOT_E1HMF_STAT(bp
, i
))
9111 if (bnx2x_stats_arr
[i
].size
== 0) {
9112 /* skip this counter */
9117 if (bnx2x_stats_arr
[i
].size
== 4) {
9118 /* 4-byte counter */
9119 buf
[j
] = (u64
) *(hw_stats
+ bnx2x_stats_arr
[i
].offset
);
9123 /* 8-byte counter */
9124 buf
[j
] = HILO_U64(*(hw_stats
+ bnx2x_stats_arr
[i
].offset
),
9125 *(hw_stats
+ bnx2x_stats_arr
[i
].offset
+ 1));
9130 static int bnx2x_phys_id(struct net_device
*dev
, u32 data
)
9132 struct bnx2x
*bp
= netdev_priv(dev
);
9133 int port
= BP_PORT(bp
);
9136 if (!netif_running(dev
))
9145 for (i
= 0; i
< (data
* 2); i
++) {
9147 bnx2x_set_led(bp
, port
, LED_MODE_OPER
, SPEED_1000
,
9148 bp
->link_params
.hw_led_mode
,
9149 bp
->link_params
.chip_id
);
9151 bnx2x_set_led(bp
, port
, LED_MODE_OFF
, 0,
9152 bp
->link_params
.hw_led_mode
,
9153 bp
->link_params
.chip_id
);
9155 msleep_interruptible(500);
9156 if (signal_pending(current
))
9160 if (bp
->link_vars
.link_up
)
9161 bnx2x_set_led(bp
, port
, LED_MODE_OPER
,
9162 bp
->link_vars
.line_speed
,
9163 bp
->link_params
.hw_led_mode
,
9164 bp
->link_params
.chip_id
);
9169 static struct ethtool_ops bnx2x_ethtool_ops
= {
9170 .get_settings
= bnx2x_get_settings
,
9171 .set_settings
= bnx2x_set_settings
,
9172 .get_drvinfo
= bnx2x_get_drvinfo
,
9173 .get_wol
= bnx2x_get_wol
,
9174 .set_wol
= bnx2x_set_wol
,
9175 .get_msglevel
= bnx2x_get_msglevel
,
9176 .set_msglevel
= bnx2x_set_msglevel
,
9177 .nway_reset
= bnx2x_nway_reset
,
9178 .get_link
= ethtool_op_get_link
,
9179 .get_eeprom_len
= bnx2x_get_eeprom_len
,
9180 .get_eeprom
= bnx2x_get_eeprom
,
9181 .set_eeprom
= bnx2x_set_eeprom
,
9182 .get_coalesce
= bnx2x_get_coalesce
,
9183 .set_coalesce
= bnx2x_set_coalesce
,
9184 .get_ringparam
= bnx2x_get_ringparam
,
9185 .set_ringparam
= bnx2x_set_ringparam
,
9186 .get_pauseparam
= bnx2x_get_pauseparam
,
9187 .set_pauseparam
= bnx2x_set_pauseparam
,
9188 .get_rx_csum
= bnx2x_get_rx_csum
,
9189 .set_rx_csum
= bnx2x_set_rx_csum
,
9190 .get_tx_csum
= ethtool_op_get_tx_csum
,
9191 .set_tx_csum
= ethtool_op_set_tx_hw_csum
,
9192 .set_flags
= bnx2x_set_flags
,
9193 .get_flags
= ethtool_op_get_flags
,
9194 .get_sg
= ethtool_op_get_sg
,
9195 .set_sg
= ethtool_op_set_sg
,
9196 .get_tso
= ethtool_op_get_tso
,
9197 .set_tso
= bnx2x_set_tso
,
9198 .self_test_count
= bnx2x_self_test_count
,
9199 .self_test
= bnx2x_self_test
,
9200 .get_strings
= bnx2x_get_strings
,
9201 .phys_id
= bnx2x_phys_id
,
9202 .get_stats_count
= bnx2x_get_stats_count
,
9203 .get_ethtool_stats
= bnx2x_get_ethtool_stats
,
9206 /* end of ethtool_ops */
9208 /****************************************************************************
9209 * General service functions
9210 ****************************************************************************/
9212 static int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
)
9216 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
9220 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
9221 ((pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
9222 PCI_PM_CTRL_PME_STATUS
));
9224 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
9225 /* delay required during transition out of D3hot */
9230 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
9234 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
9236 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
9239 /* No more memory access after this point until
9240 * device is brought back to D0.
9251 * net_device service functions
9254 static int bnx2x_poll(struct napi_struct
*napi
, int budget
)
9256 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
9258 struct bnx2x
*bp
= fp
->bp
;
9262 #ifdef BNX2X_STOP_ON_ERROR
9263 if (unlikely(bp
->panic
))
9267 prefetch(fp
->tx_buf_ring
[TX_BD(fp
->tx_pkt_cons
)].skb
);
9268 prefetch(fp
->rx_buf_ring
[RX_BD(fp
->rx_bd_cons
)].skb
);
9269 prefetch((char *)(fp
->rx_buf_ring
[RX_BD(fp
->rx_bd_cons
)].skb
) + 256);
9271 bnx2x_update_fpsb_idx(fp
);
9273 if (BNX2X_HAS_TX_WORK(fp
))
9274 bnx2x_tx_int(fp
, budget
);
9276 rx_cons_sb
= le16_to_cpu(*fp
->rx_cons_sb
);
9277 if ((rx_cons_sb
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
9279 if (BNX2X_HAS_RX_WORK(fp
))
9280 work_done
= bnx2x_rx_int(fp
, budget
);
9282 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9283 rx_cons_sb
= le16_to_cpu(*fp
->rx_cons_sb
);
9284 if ((rx_cons_sb
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
9287 /* must not complete if we consumed full budget */
9288 if ((work_done
< budget
) && !BNX2X_HAS_WORK(fp
)) {
9290 #ifdef BNX2X_STOP_ON_ERROR
9293 netif_rx_complete(bp
->dev
, napi
);
9295 bnx2x_ack_sb(bp
, FP_SB_ID(fp
), USTORM_ID
,
9296 le16_to_cpu(fp
->fp_u_idx
), IGU_INT_NOP
, 1);
9297 bnx2x_ack_sb(bp
, FP_SB_ID(fp
), CSTORM_ID
,
9298 le16_to_cpu(fp
->fp_c_idx
), IGU_INT_ENABLE
, 1);
9304 /* we split the first BD into headers and data BDs
9305 * to ease the pain of our fellow microcode engineers
9306 * we use one mapping for both BDs
9307 * So far this has only been observed to happen
9308 * in Other Operating Systems(TM)
9310 static noinline u16
bnx2x_tx_split(struct bnx2x
*bp
,
9311 struct bnx2x_fastpath
*fp
,
9312 struct eth_tx_bd
**tx_bd
, u16 hlen
,
9313 u16 bd_prod
, int nbd
)
9315 struct eth_tx_bd
*h_tx_bd
= *tx_bd
;
9316 struct eth_tx_bd
*d_tx_bd
;
9318 int old_len
= le16_to_cpu(h_tx_bd
->nbytes
);
9320 /* first fix first BD */
9321 h_tx_bd
->nbd
= cpu_to_le16(nbd
);
9322 h_tx_bd
->nbytes
= cpu_to_le16(hlen
);
9324 DP(NETIF_MSG_TX_QUEUED
, "TSO split header size is %d "
9325 "(%x:%x) nbd %d\n", h_tx_bd
->nbytes
, h_tx_bd
->addr_hi
,
9326 h_tx_bd
->addr_lo
, h_tx_bd
->nbd
);
9328 /* now get a new data BD
9329 * (after the pbd) and fill it */
9330 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
9331 d_tx_bd
= &fp
->tx_desc_ring
[bd_prod
];
9333 mapping
= HILO_U64(le32_to_cpu(h_tx_bd
->addr_hi
),
9334 le32_to_cpu(h_tx_bd
->addr_lo
)) + hlen
;
9336 d_tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
9337 d_tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
9338 d_tx_bd
->nbytes
= cpu_to_le16(old_len
- hlen
);
9340 /* this marks the BD as one that has no individual mapping
9341 * the FW ignores this flag in a BD not marked start
9343 d_tx_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_SW_LSO
;
9344 DP(NETIF_MSG_TX_QUEUED
,
9345 "TSO split data size is %d (%x:%x)\n",
9346 d_tx_bd
->nbytes
, d_tx_bd
->addr_hi
, d_tx_bd
->addr_lo
);
9348 /* update tx_bd for marking the last BD flag */
9354 static inline u16
bnx2x_csum_fix(unsigned char *t_header
, u16 csum
, s8 fix
)
9357 csum
= (u16
) ~csum_fold(csum_sub(csum
,
9358 csum_partial(t_header
- fix
, fix
, 0)));
9361 csum
= (u16
) ~csum_fold(csum_add(csum
,
9362 csum_partial(t_header
, -fix
, 0)));
9364 return swab16(csum
);
9367 static inline u32
bnx2x_xmit_type(struct bnx2x
*bp
, struct sk_buff
*skb
)
9371 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
9375 if (skb
->protocol
== ntohs(ETH_P_IPV6
)) {
9377 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
9378 rc
|= XMIT_CSUM_TCP
;
9382 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
9383 rc
|= XMIT_CSUM_TCP
;
9387 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
)
9390 else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
9396 /* check if packet requires linearization (packet is too fragmented) */
9397 static int bnx2x_pkt_req_lin(struct bnx2x
*bp
, struct sk_buff
*skb
,
9402 int first_bd_sz
= 0;
9404 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9405 if (skb_shinfo(skb
)->nr_frags
>= (MAX_FETCH_BD
- 3)) {
9407 if (xmit_type
& XMIT_GSO
) {
9408 unsigned short lso_mss
= skb_shinfo(skb
)->gso_size
;
9409 /* Check if LSO packet needs to be copied:
9410 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9411 int wnd_size
= MAX_FETCH_BD
- 3;
9412 /* Number of windows to check */
9413 int num_wnds
= skb_shinfo(skb
)->nr_frags
- wnd_size
;
9418 /* Headers length */
9419 hlen
= (int)(skb_transport_header(skb
) - skb
->data
) +
9422 /* Amount of data (w/o headers) on linear part of SKB*/
9423 first_bd_sz
= skb_headlen(skb
) - hlen
;
9425 wnd_sum
= first_bd_sz
;
9427 /* Calculate the first sum - it's special */
9428 for (frag_idx
= 0; frag_idx
< wnd_size
- 1; frag_idx
++)
9430 skb_shinfo(skb
)->frags
[frag_idx
].size
;
9432 /* If there was data on linear skb data - check it */
9433 if (first_bd_sz
> 0) {
9434 if (unlikely(wnd_sum
< lso_mss
)) {
9439 wnd_sum
-= first_bd_sz
;
9442 /* Others are easier: run through the frag list and
9443 check all windows */
9444 for (wnd_idx
= 0; wnd_idx
<= num_wnds
; wnd_idx
++) {
9446 skb_shinfo(skb
)->frags
[wnd_idx
+ wnd_size
- 1].size
;
9448 if (unlikely(wnd_sum
< lso_mss
)) {
9453 skb_shinfo(skb
)->frags
[wnd_idx
].size
;
9457 /* in non-LSO too fragmented packet should always
9464 if (unlikely(to_copy
))
9465 DP(NETIF_MSG_TX_QUEUED
,
9466 "Linearization IS REQUIRED for %s packet. "
9467 "num_frags %d hlen %d first_bd_sz %d\n",
9468 (xmit_type
& XMIT_GSO
) ? "LSO" : "non-LSO",
9469 skb_shinfo(skb
)->nr_frags
, hlen
, first_bd_sz
);
9474 /* called with netif_tx_lock
9475 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9476 * netif_wake_queue()
9478 static int bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
9480 struct bnx2x
*bp
= netdev_priv(dev
);
9481 struct bnx2x_fastpath
*fp
;
9482 struct sw_tx_bd
*tx_buf
;
9483 struct eth_tx_bd
*tx_bd
;
9484 struct eth_tx_parse_bd
*pbd
= NULL
;
9485 u16 pkt_prod
, bd_prod
;
9488 u32 xmit_type
= bnx2x_xmit_type(bp
, skb
);
9489 int vlan_off
= (bp
->e1hov
? 4 : 0);
9493 #ifdef BNX2X_STOP_ON_ERROR
9494 if (unlikely(bp
->panic
))
9495 return NETDEV_TX_BUSY
;
9498 fp_index
= (smp_processor_id() % bp
->num_queues
);
9499 fp
= &bp
->fp
[fp_index
];
9501 if (unlikely(bnx2x_tx_avail(fp
) < (skb_shinfo(skb
)->nr_frags
+ 3))) {
9502 bp
->eth_stats
.driver_xoff
++,
9503 netif_stop_queue(dev
);
9504 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9505 return NETDEV_TX_BUSY
;
9508 DP(NETIF_MSG_TX_QUEUED
, "SKB: summed %x protocol %x protocol(%x,%x)"
9509 " gso type %x xmit_type %x\n",
9510 skb
->ip_summed
, skb
->protocol
, ipv6_hdr(skb
)->nexthdr
,
9511 ip_hdr(skb
)->protocol
, skb_shinfo(skb
)->gso_type
, xmit_type
);
9513 /* First, check if we need to linearize the skb
9514 (due to FW restrictions) */
9515 if (bnx2x_pkt_req_lin(bp
, skb
, xmit_type
)) {
9516 /* Statistics of linearization */
9518 if (skb_linearize(skb
) != 0) {
9519 DP(NETIF_MSG_TX_QUEUED
, "SKB linearization failed - "
9520 "silently dropping this SKB\n");
9521 dev_kfree_skb_any(skb
);
9522 return NETDEV_TX_OK
;
9527 Please read carefully. First we use one BD which we mark as start,
9528 then for TSO or xsum we have a parsing info BD,
9529 and only then we have the rest of the TSO BDs.
9530 (don't forget to mark the last one as last,
9531 and to unmap only AFTER you write to the BD ...)
9532 And above all, all pdb sizes are in words - NOT DWORDS!
9535 pkt_prod
= fp
->tx_pkt_prod
++;
9536 bd_prod
= TX_BD(fp
->tx_bd_prod
);
9538 /* get a tx_buf and first BD */
9539 tx_buf
= &fp
->tx_buf_ring
[TX_BD(pkt_prod
)];
9540 tx_bd
= &fp
->tx_desc_ring
[bd_prod
];
9542 tx_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
9543 tx_bd
->general_data
= (UNICAST_ADDRESS
<<
9544 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT
);
9546 tx_bd
->general_data
|= (1 << ETH_TX_BD_HDR_NBDS_SHIFT
);
9548 /* remember the first BD of the packet */
9549 tx_buf
->first_bd
= fp
->tx_bd_prod
;
9552 DP(NETIF_MSG_TX_QUEUED
,
9553 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9554 pkt_prod
, tx_buf
, fp
->tx_pkt_prod
, bd_prod
, tx_bd
);
9556 if ((bp
->vlgrp
!= NULL
) && vlan_tx_tag_present(skb
)) {
9557 tx_bd
->vlan
= cpu_to_le16(vlan_tx_tag_get(skb
));
9558 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_VLAN_TAG
;
9561 tx_bd
->vlan
= cpu_to_le16(pkt_prod
);
9564 /* turn on parsing and get a BD */
9565 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
9566 pbd
= (void *)&fp
->tx_desc_ring
[bd_prod
];
9568 memset(pbd
, 0, sizeof(struct eth_tx_parse_bd
));
9571 if (xmit_type
& XMIT_CSUM
) {
9572 hlen
= (skb_network_header(skb
) - skb
->data
+ vlan_off
) / 2;
9574 /* for now NS flag is not used in Linux */
9575 pbd
->global_data
= (hlen
|
9576 ((skb
->protocol
== ntohs(ETH_P_8021Q
)) <<
9577 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT
));
9579 pbd
->ip_hlen
= (skb_transport_header(skb
) -
9580 skb_network_header(skb
)) / 2;
9582 hlen
+= pbd
->ip_hlen
+ tcp_hdrlen(skb
) / 2;
9584 pbd
->total_hlen
= cpu_to_le16(hlen
);
9585 hlen
= hlen
*2 - vlan_off
;
9587 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_TCP_CSUM
;
9589 if (xmit_type
& XMIT_CSUM_V4
)
9590 tx_bd
->bd_flags
.as_bitfield
|=
9591 ETH_TX_BD_FLAGS_IP_CSUM
;
9593 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_IPV6
;
9595 if (xmit_type
& XMIT_CSUM_TCP
) {
9596 pbd
->tcp_pseudo_csum
= swab16(tcp_hdr(skb
)->check
);
9599 s8 fix
= SKB_CS_OFF(skb
); /* signed! */
9601 pbd
->global_data
|= ETH_TX_PARSE_BD_CS_ANY_FLG
;
9602 pbd
->cs_offset
= fix
/ 2;
9604 DP(NETIF_MSG_TX_QUEUED
,
9605 "hlen %d offset %d fix %d csum before fix %x\n",
9606 le16_to_cpu(pbd
->total_hlen
), pbd
->cs_offset
, fix
,
9609 /* HW bug: fixup the CSUM */
9610 pbd
->tcp_pseudo_csum
=
9611 bnx2x_csum_fix(skb_transport_header(skb
),
9614 DP(NETIF_MSG_TX_QUEUED
, "csum after fix %x\n",
9615 pbd
->tcp_pseudo_csum
);
9619 mapping
= pci_map_single(bp
->pdev
, skb
->data
,
9620 skb_headlen(skb
), PCI_DMA_TODEVICE
);
9622 tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
9623 tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
9624 nbd
= skb_shinfo(skb
)->nr_frags
+ ((pbd
== NULL
) ? 1 : 2);
9625 tx_bd
->nbd
= cpu_to_le16(nbd
);
9626 tx_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
9628 DP(NETIF_MSG_TX_QUEUED
, "first bd @%p addr (%x:%x) nbd %d"
9629 " nbytes %d flags %x vlan %x\n",
9630 tx_bd
, tx_bd
->addr_hi
, tx_bd
->addr_lo
, le16_to_cpu(tx_bd
->nbd
),
9631 le16_to_cpu(tx_bd
->nbytes
), tx_bd
->bd_flags
.as_bitfield
,
9632 le16_to_cpu(tx_bd
->vlan
));
9634 if (xmit_type
& XMIT_GSO
) {
9636 DP(NETIF_MSG_TX_QUEUED
,
9637 "TSO packet len %d hlen %d total len %d tso size %d\n",
9638 skb
->len
, hlen
, skb_headlen(skb
),
9639 skb_shinfo(skb
)->gso_size
);
9641 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_SW_LSO
;
9643 if (unlikely(skb_headlen(skb
) > hlen
))
9644 bd_prod
= bnx2x_tx_split(bp
, fp
, &tx_bd
, hlen
,
9647 pbd
->lso_mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
9648 pbd
->tcp_send_seq
= swab32(tcp_hdr(skb
)->seq
);
9649 pbd
->tcp_flags
= pbd_tcp_flags(skb
);
9651 if (xmit_type
& XMIT_GSO_V4
) {
9652 pbd
->ip_id
= swab16(ip_hdr(skb
)->id
);
9653 pbd
->tcp_pseudo_csum
=
9654 swab16(~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
9656 0, IPPROTO_TCP
, 0));
9659 pbd
->tcp_pseudo_csum
=
9660 swab16(~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
9661 &ipv6_hdr(skb
)->daddr
,
9662 0, IPPROTO_TCP
, 0));
9664 pbd
->global_data
|= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN
;
9667 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
9668 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
9670 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
9671 tx_bd
= &fp
->tx_desc_ring
[bd_prod
];
9673 mapping
= pci_map_page(bp
->pdev
, frag
->page
, frag
->page_offset
,
9674 frag
->size
, PCI_DMA_TODEVICE
);
9676 tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
9677 tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
9678 tx_bd
->nbytes
= cpu_to_le16(frag
->size
);
9679 tx_bd
->vlan
= cpu_to_le16(pkt_prod
);
9680 tx_bd
->bd_flags
.as_bitfield
= 0;
9682 DP(NETIF_MSG_TX_QUEUED
,
9683 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9684 i
, tx_bd
, tx_bd
->addr_hi
, tx_bd
->addr_lo
,
9685 le16_to_cpu(tx_bd
->nbytes
), tx_bd
->bd_flags
.as_bitfield
);
9688 /* now at last mark the BD as the last BD */
9689 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_END_BD
;
9691 DP(NETIF_MSG_TX_QUEUED
, "last bd @%p flags %x\n",
9692 tx_bd
, tx_bd
->bd_flags
.as_bitfield
);
9694 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
9696 /* now send a tx doorbell, counting the next BD
9697 * if the packet contains or ends with it
9699 if (TX_BD_POFF(bd_prod
) < nbd
)
9703 DP(NETIF_MSG_TX_QUEUED
,
9704 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9705 " tcp_flags %x xsum %x seq %u hlen %u\n",
9706 pbd
, pbd
->global_data
, pbd
->ip_hlen
, pbd
->ip_id
,
9707 pbd
->lso_mss
, pbd
->tcp_flags
, pbd
->tcp_pseudo_csum
,
9708 pbd
->tcp_send_seq
, le16_to_cpu(pbd
->total_hlen
));
9710 DP(NETIF_MSG_TX_QUEUED
, "doorbell: nbd %d bd %u\n", nbd
, bd_prod
);
9712 fp
->hw_tx_prods
->bds_prod
=
9713 cpu_to_le16(le16_to_cpu(fp
->hw_tx_prods
->bds_prod
) + nbd
);
9714 mb(); /* FW restriction: must not reorder writing nbd and packets */
9715 fp
->hw_tx_prods
->packets_prod
=
9716 cpu_to_le32(le32_to_cpu(fp
->hw_tx_prods
->packets_prod
) + 1);
9717 DOORBELL(bp
, FP_IDX(fp
), 0);
9721 fp
->tx_bd_prod
+= nbd
;
9722 dev
->trans_start
= jiffies
;
9724 if (unlikely(bnx2x_tx_avail(fp
) < MAX_SKB_FRAGS
+ 3)) {
9725 netif_stop_queue(dev
);
9726 bp
->eth_stats
.driver_xoff
++;
9727 if (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3)
9728 netif_wake_queue(dev
);
9732 return NETDEV_TX_OK
;
9735 /* called with rtnl_lock */
9736 static int bnx2x_open(struct net_device
*dev
)
9738 struct bnx2x
*bp
= netdev_priv(dev
);
9740 bnx2x_set_power_state(bp
, PCI_D0
);
9742 return bnx2x_nic_load(bp
, LOAD_OPEN
);
9745 /* called with rtnl_lock */
9746 static int bnx2x_close(struct net_device
*dev
)
9748 struct bnx2x
*bp
= netdev_priv(dev
);
9750 /* Unload the driver, release IRQs */
9751 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
9752 if (atomic_read(&bp
->pdev
->enable_cnt
) == 1)
9753 if (!CHIP_REV_IS_SLOW(bp
))
9754 bnx2x_set_power_state(bp
, PCI_D3hot
);
9759 /* called with netif_tx_lock from set_multicast */
9760 static void bnx2x_set_rx_mode(struct net_device
*dev
)
9762 struct bnx2x
*bp
= netdev_priv(dev
);
9763 u32 rx_mode
= BNX2X_RX_MODE_NORMAL
;
9764 int port
= BP_PORT(bp
);
9766 if (bp
->state
!= BNX2X_STATE_OPEN
) {
9767 DP(NETIF_MSG_IFUP
, "state is %x, returning\n", bp
->state
);
9771 DP(NETIF_MSG_IFUP
, "dev->flags = %x\n", dev
->flags
);
9773 if (dev
->flags
& IFF_PROMISC
)
9774 rx_mode
= BNX2X_RX_MODE_PROMISC
;
9776 else if ((dev
->flags
& IFF_ALLMULTI
) ||
9777 ((dev
->mc_count
> BNX2X_MAX_MULTICAST
) && CHIP_IS_E1(bp
)))
9778 rx_mode
= BNX2X_RX_MODE_ALLMULTI
;
9780 else { /* some multicasts */
9781 if (CHIP_IS_E1(bp
)) {
9783 struct dev_mc_list
*mclist
;
9784 struct mac_configuration_cmd
*config
=
9785 bnx2x_sp(bp
, mcast_config
);
9787 for (i
= 0, mclist
= dev
->mc_list
;
9788 mclist
&& (i
< dev
->mc_count
);
9789 i
++, mclist
= mclist
->next
) {
9791 config
->config_table
[i
].
9792 cam_entry
.msb_mac_addr
=
9793 swab16(*(u16
*)&mclist
->dmi_addr
[0]);
9794 config
->config_table
[i
].
9795 cam_entry
.middle_mac_addr
=
9796 swab16(*(u16
*)&mclist
->dmi_addr
[2]);
9797 config
->config_table
[i
].
9798 cam_entry
.lsb_mac_addr
=
9799 swab16(*(u16
*)&mclist
->dmi_addr
[4]);
9800 config
->config_table
[i
].cam_entry
.flags
=
9802 config
->config_table
[i
].
9803 target_table_entry
.flags
= 0;
9804 config
->config_table
[i
].
9805 target_table_entry
.client_id
= 0;
9806 config
->config_table
[i
].
9807 target_table_entry
.vlan_id
= 0;
9810 "setting MCAST[%d] (%04x:%04x:%04x)\n", i
,
9811 config
->config_table
[i
].
9812 cam_entry
.msb_mac_addr
,
9813 config
->config_table
[i
].
9814 cam_entry
.middle_mac_addr
,
9815 config
->config_table
[i
].
9816 cam_entry
.lsb_mac_addr
);
9818 old
= config
->hdr
.length_6b
;
9820 for (; i
< old
; i
++) {
9821 if (CAM_IS_INVALID(config
->
9823 i
--; /* already invalidated */
9827 CAM_INVALIDATE(config
->
9832 if (CHIP_REV_IS_SLOW(bp
))
9833 offset
= BNX2X_MAX_EMUL_MULTI
*(1 + port
);
9835 offset
= BNX2X_MAX_MULTICAST
*(1 + port
);
9837 config
->hdr
.length_6b
= i
;
9838 config
->hdr
.offset
= offset
;
9839 config
->hdr
.client_id
= BP_CL_ID(bp
);
9840 config
->hdr
.reserved1
= 0;
9842 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
9843 U64_HI(bnx2x_sp_mapping(bp
, mcast_config
)),
9844 U64_LO(bnx2x_sp_mapping(bp
, mcast_config
)),
9847 /* Accept one or more multicasts */
9848 struct dev_mc_list
*mclist
;
9849 u32 mc_filter
[MC_HASH_SIZE
];
9850 u32 crc
, bit
, regidx
;
9853 memset(mc_filter
, 0, 4 * MC_HASH_SIZE
);
9855 for (i
= 0, mclist
= dev
->mc_list
;
9856 mclist
&& (i
< dev
->mc_count
);
9857 i
++, mclist
= mclist
->next
) {
9859 DP(NETIF_MSG_IFUP
, "Adding mcast MAC: "
9860 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9861 mclist
->dmi_addr
[0], mclist
->dmi_addr
[1],
9862 mclist
->dmi_addr
[2], mclist
->dmi_addr
[3],
9863 mclist
->dmi_addr
[4], mclist
->dmi_addr
[5]);
9865 crc
= crc32c_le(0, mclist
->dmi_addr
, ETH_ALEN
);
9866 bit
= (crc
>> 24) & 0xff;
9869 mc_filter
[regidx
] |= (1 << bit
);
9872 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
9873 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
),
9878 bp
->rx_mode
= rx_mode
;
9879 bnx2x_set_storm_rx_mode(bp
);
9882 /* called with rtnl_lock */
9883 static int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
)
9885 struct sockaddr
*addr
= p
;
9886 struct bnx2x
*bp
= netdev_priv(dev
);
9888 if (!is_valid_ether_addr((u8
*)(addr
->sa_data
)))
9891 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
9892 if (netif_running(dev
)) {
9894 bnx2x_set_mac_addr_e1(bp
, 1);
9896 bnx2x_set_mac_addr_e1h(bp
, 1);
9902 /* called with rtnl_lock */
9903 static int bnx2x_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
9905 struct mii_ioctl_data
*data
= if_mii(ifr
);
9906 struct bnx2x
*bp
= netdev_priv(dev
);
9907 int port
= BP_PORT(bp
);
9912 data
->phy_id
= bp
->port
.phy_addr
;
9919 if (!netif_running(dev
))
9922 mutex_lock(&bp
->port
.phy_mutex
);
9923 err
= bnx2x_cl45_read(bp
, port
, 0, bp
->port
.phy_addr
,
9924 DEFAULT_PHY_DEV_ADDR
,
9925 (data
->reg_num
& 0x1f), &mii_regval
);
9926 data
->val_out
= mii_regval
;
9927 mutex_unlock(&bp
->port
.phy_mutex
);
9932 if (!capable(CAP_NET_ADMIN
))
9935 if (!netif_running(dev
))
9938 mutex_lock(&bp
->port
.phy_mutex
);
9939 err
= bnx2x_cl45_write(bp
, port
, 0, bp
->port
.phy_addr
,
9940 DEFAULT_PHY_DEV_ADDR
,
9941 (data
->reg_num
& 0x1f), data
->val_in
);
9942 mutex_unlock(&bp
->port
.phy_mutex
);
9953 /* called with rtnl_lock */
9954 static int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
)
9956 struct bnx2x
*bp
= netdev_priv(dev
);
9959 if ((new_mtu
> ETH_MAX_JUMBO_PACKET_SIZE
) ||
9960 ((new_mtu
+ ETH_HLEN
) < ETH_MIN_PACKET_SIZE
))
9963 /* This does not race with packet allocation
9964 * because the actual alloc size is
9965 * only updated as part of load
9969 if (netif_running(dev
)) {
9970 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
9971 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
9977 static void bnx2x_tx_timeout(struct net_device
*dev
)
9979 struct bnx2x
*bp
= netdev_priv(dev
);
9981 #ifdef BNX2X_STOP_ON_ERROR
9985 /* This allows the netif to be shutdown gracefully before resetting */
9986 schedule_work(&bp
->reset_task
);
9990 /* called with rtnl_lock */
9991 static void bnx2x_vlan_rx_register(struct net_device
*dev
,
9992 struct vlan_group
*vlgrp
)
9994 struct bnx2x
*bp
= netdev_priv(dev
);
9997 if (netif_running(dev
))
9998 bnx2x_set_client_config(bp
);
10003 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10004 static void poll_bnx2x(struct net_device
*dev
)
10006 struct bnx2x
*bp
= netdev_priv(dev
);
10008 disable_irq(bp
->pdev
->irq
);
10009 bnx2x_interrupt(bp
->pdev
->irq
, dev
);
10010 enable_irq(bp
->pdev
->irq
);
10014 static int __devinit
bnx2x_init_dev(struct pci_dev
*pdev
,
10015 struct net_device
*dev
)
10020 SET_NETDEV_DEV(dev
, &pdev
->dev
);
10021 bp
= netdev_priv(dev
);
10026 bp
->func
= PCI_FUNC(pdev
->devfn
);
10028 rc
= pci_enable_device(pdev
);
10030 printk(KERN_ERR PFX
"Cannot enable PCI device, aborting\n");
10034 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
10035 printk(KERN_ERR PFX
"Cannot find PCI device base address,"
10038 goto err_out_disable
;
10041 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
10042 printk(KERN_ERR PFX
"Cannot find second PCI device"
10043 " base address, aborting\n");
10045 goto err_out_disable
;
10048 if (atomic_read(&pdev
->enable_cnt
) == 1) {
10049 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
10051 printk(KERN_ERR PFX
"Cannot obtain PCI resources,"
10053 goto err_out_disable
;
10056 pci_set_master(pdev
);
10057 pci_save_state(pdev
);
10060 bp
->pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
10061 if (bp
->pm_cap
== 0) {
10062 printk(KERN_ERR PFX
"Cannot find power management"
10063 " capability, aborting\n");
10065 goto err_out_release
;
10068 bp
->pcie_cap
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
10069 if (bp
->pcie_cap
== 0) {
10070 printk(KERN_ERR PFX
"Cannot find PCI Express capability,"
10073 goto err_out_release
;
10076 if (pci_set_dma_mask(pdev
, DMA_64BIT_MASK
) == 0) {
10077 bp
->flags
|= USING_DAC_FLAG
;
10078 if (pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
) != 0) {
10079 printk(KERN_ERR PFX
"pci_set_consistent_dma_mask"
10080 " failed, aborting\n");
10082 goto err_out_release
;
10085 } else if (pci_set_dma_mask(pdev
, DMA_32BIT_MASK
) != 0) {
10086 printk(KERN_ERR PFX
"System does not support DMA,"
10089 goto err_out_release
;
10092 dev
->mem_start
= pci_resource_start(pdev
, 0);
10093 dev
->base_addr
= dev
->mem_start
;
10094 dev
->mem_end
= pci_resource_end(pdev
, 0);
10096 dev
->irq
= pdev
->irq
;
10098 bp
->regview
= ioremap_nocache(dev
->base_addr
,
10099 pci_resource_len(pdev
, 0));
10100 if (!bp
->regview
) {
10101 printk(KERN_ERR PFX
"Cannot map register space, aborting\n");
10103 goto err_out_release
;
10106 bp
->doorbells
= ioremap_nocache(pci_resource_start(pdev
, 2),
10107 min_t(u64
, BNX2X_DB_SIZE
,
10108 pci_resource_len(pdev
, 2)));
10109 if (!bp
->doorbells
) {
10110 printk(KERN_ERR PFX
"Cannot map doorbell space, aborting\n");
10112 goto err_out_unmap
;
10115 bnx2x_set_power_state(bp
, PCI_D0
);
10117 /* clean indirect addresses */
10118 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
10119 PCICFG_VENDOR_ID_OFFSET
);
10120 REG_WR(bp
, PXP2_REG_PGL_ADDR_88_F0
+ BP_PORT(bp
)*16, 0);
10121 REG_WR(bp
, PXP2_REG_PGL_ADDR_8C_F0
+ BP_PORT(bp
)*16, 0);
10122 REG_WR(bp
, PXP2_REG_PGL_ADDR_90_F0
+ BP_PORT(bp
)*16, 0);
10123 REG_WR(bp
, PXP2_REG_PGL_ADDR_94_F0
+ BP_PORT(bp
)*16, 0);
10125 dev
->hard_start_xmit
= bnx2x_start_xmit
;
10126 dev
->watchdog_timeo
= TX_TIMEOUT
;
10128 dev
->ethtool_ops
= &bnx2x_ethtool_ops
;
10129 dev
->open
= bnx2x_open
;
10130 dev
->stop
= bnx2x_close
;
10131 dev
->set_multicast_list
= bnx2x_set_rx_mode
;
10132 dev
->set_mac_address
= bnx2x_change_mac_addr
;
10133 dev
->do_ioctl
= bnx2x_ioctl
;
10134 dev
->change_mtu
= bnx2x_change_mtu
;
10135 dev
->tx_timeout
= bnx2x_tx_timeout
;
10137 dev
->vlan_rx_register
= bnx2x_vlan_rx_register
;
10139 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10140 dev
->poll_controller
= poll_bnx2x
;
10142 dev
->features
|= NETIF_F_SG
;
10143 dev
->features
|= NETIF_F_HW_CSUM
;
10144 if (bp
->flags
& USING_DAC_FLAG
)
10145 dev
->features
|= NETIF_F_HIGHDMA
;
10147 dev
->features
|= (NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
);
10149 dev
->features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
10150 dev
->features
|= NETIF_F_TSO6
;
10156 iounmap(bp
->regview
);
10157 bp
->regview
= NULL
;
10159 if (bp
->doorbells
) {
10160 iounmap(bp
->doorbells
);
10161 bp
->doorbells
= NULL
;
10165 if (atomic_read(&pdev
->enable_cnt
) == 1)
10166 pci_release_regions(pdev
);
10169 pci_disable_device(pdev
);
10170 pci_set_drvdata(pdev
, NULL
);
10176 static int __devinit
bnx2x_get_pcie_width(struct bnx2x
*bp
)
10178 u32 val
= REG_RD(bp
, PCICFG_OFFSET
+ PCICFG_LINK_CONTROL
);
10180 val
= (val
& PCICFG_LINK_WIDTH
) >> PCICFG_LINK_WIDTH_SHIFT
;
10184 /* return value of 1=2.5GHz 2=5GHz */
10185 static int __devinit
bnx2x_get_pcie_speed(struct bnx2x
*bp
)
10187 u32 val
= REG_RD(bp
, PCICFG_OFFSET
+ PCICFG_LINK_CONTROL
);
10189 val
= (val
& PCICFG_LINK_SPEED
) >> PCICFG_LINK_SPEED_SHIFT
;
10193 static int __devinit
bnx2x_init_one(struct pci_dev
*pdev
,
10194 const struct pci_device_id
*ent
)
10196 static int version_printed
;
10197 struct net_device
*dev
= NULL
;
10200 DECLARE_MAC_BUF(mac
);
10202 if (version_printed
++ == 0)
10203 printk(KERN_INFO
"%s", version
);
10205 /* dev zeroed in init_etherdev */
10206 dev
= alloc_etherdev(sizeof(*bp
));
10208 printk(KERN_ERR PFX
"Cannot allocate net device\n");
10212 bp
= netdev_priv(dev
);
10213 bp
->msglevel
= debug
;
10215 rc
= bnx2x_init_dev(pdev
, dev
);
10221 rc
= register_netdev(dev
);
10223 dev_err(&pdev
->dev
, "Cannot register net device\n");
10224 goto init_one_exit
;
10227 pci_set_drvdata(pdev
, dev
);
10229 rc
= bnx2x_init_bp(bp
);
10231 unregister_netdev(dev
);
10232 goto init_one_exit
;
10235 netif_carrier_off(dev
);
10237 bp
->common
.name
= board_info
[ent
->driver_data
].name
;
10238 printk(KERN_INFO
"%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10239 " IRQ %d, ", dev
->name
, bp
->common
.name
,
10240 (CHIP_REV(bp
) >> 12) + 'A', (CHIP_METAL(bp
) >> 4),
10241 bnx2x_get_pcie_width(bp
),
10242 (bnx2x_get_pcie_speed(bp
) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10243 dev
->base_addr
, bp
->pdev
->irq
);
10244 printk(KERN_CONT
"node addr %s\n", print_mac(mac
, dev
->dev_addr
));
10249 iounmap(bp
->regview
);
10252 iounmap(bp
->doorbells
);
10256 if (atomic_read(&pdev
->enable_cnt
) == 1)
10257 pci_release_regions(pdev
);
10259 pci_disable_device(pdev
);
10260 pci_set_drvdata(pdev
, NULL
);
10265 static void __devexit
bnx2x_remove_one(struct pci_dev
*pdev
)
10267 struct net_device
*dev
= pci_get_drvdata(pdev
);
10271 printk(KERN_ERR PFX
"BAD net device from bnx2x_init_one\n");
10274 bp
= netdev_priv(dev
);
10276 unregister_netdev(dev
);
10279 iounmap(bp
->regview
);
10282 iounmap(bp
->doorbells
);
10286 if (atomic_read(&pdev
->enable_cnt
) == 1)
10287 pci_release_regions(pdev
);
10289 pci_disable_device(pdev
);
10290 pci_set_drvdata(pdev
, NULL
);
10293 static int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
)
10295 struct net_device
*dev
= pci_get_drvdata(pdev
);
10299 printk(KERN_ERR PFX
"BAD net device from bnx2x_init_one\n");
10302 bp
= netdev_priv(dev
);
10306 pci_save_state(pdev
);
10308 if (!netif_running(dev
)) {
10313 netif_device_detach(dev
);
10315 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
10317 bnx2x_set_power_state(bp
, pci_choose_state(pdev
, state
));
10324 static int bnx2x_resume(struct pci_dev
*pdev
)
10326 struct net_device
*dev
= pci_get_drvdata(pdev
);
10331 printk(KERN_ERR PFX
"BAD net device from bnx2x_init_one\n");
10334 bp
= netdev_priv(dev
);
10338 pci_restore_state(pdev
);
10340 if (!netif_running(dev
)) {
10345 bnx2x_set_power_state(bp
, PCI_D0
);
10346 netif_device_attach(dev
);
10348 rc
= bnx2x_nic_load(bp
, LOAD_OPEN
);
10355 static int bnx2x_eeh_nic_unload(struct bnx2x
*bp
)
10359 bp
->state
= BNX2X_STATE_ERROR
;
10361 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
10363 bnx2x_netif_stop(bp
, 0);
10365 del_timer_sync(&bp
->timer
);
10366 bp
->stats_state
= STATS_STATE_DISABLED
;
10367 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
10370 bnx2x_free_irq(bp
);
10372 if (CHIP_IS_E1(bp
)) {
10373 struct mac_configuration_cmd
*config
=
10374 bnx2x_sp(bp
, mcast_config
);
10376 for (i
= 0; i
< config
->hdr
.length_6b
; i
++)
10377 CAM_INVALIDATE(config
->config_table
[i
]);
10380 /* Free SKBs, SGEs, TPA pool and driver internals */
10381 bnx2x_free_skbs(bp
);
10382 for_each_queue(bp
, i
)
10383 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
10384 bnx2x_free_mem(bp
);
10386 bp
->state
= BNX2X_STATE_CLOSED
;
10388 netif_carrier_off(bp
->dev
);
10393 static void bnx2x_eeh_recover(struct bnx2x
*bp
)
10397 mutex_init(&bp
->port
.phy_mutex
);
10399 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
10400 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
10401 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp
->common
.shmem_base
);
10403 if (!bp
->common
.shmem_base
||
10404 (bp
->common
.shmem_base
< 0xA0000) ||
10405 (bp
->common
.shmem_base
>= 0xC0000)) {
10406 BNX2X_DEV_INFO("MCP not active\n");
10407 bp
->flags
|= NO_MCP_FLAG
;
10411 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
10412 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
10413 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
10414 BNX2X_ERR("BAD MCP validity signature\n");
10416 if (!BP_NOMCP(bp
)) {
10417 bp
->fw_seq
= (SHMEM_RD(bp
, func_mb
[BP_FUNC(bp
)].drv_mb_header
)
10418 & DRV_MSG_SEQ_NUMBER_MASK
);
10419 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
10424 * bnx2x_io_error_detected - called when PCI error is detected
10425 * @pdev: Pointer to PCI device
10426 * @state: The current pci connection state
10428 * This function is called after a PCI bus error affecting
10429 * this device has been detected.
10431 static pci_ers_result_t
bnx2x_io_error_detected(struct pci_dev
*pdev
,
10432 pci_channel_state_t state
)
10434 struct net_device
*dev
= pci_get_drvdata(pdev
);
10435 struct bnx2x
*bp
= netdev_priv(dev
);
10439 netif_device_detach(dev
);
10441 if (netif_running(dev
))
10442 bnx2x_eeh_nic_unload(bp
);
10444 pci_disable_device(pdev
);
10448 /* Request a slot reset */
10449 return PCI_ERS_RESULT_NEED_RESET
;
10453 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10454 * @pdev: Pointer to PCI device
10456 * Restart the card from scratch, as if from a cold-boot.
10458 static pci_ers_result_t
bnx2x_io_slot_reset(struct pci_dev
*pdev
)
10460 struct net_device
*dev
= pci_get_drvdata(pdev
);
10461 struct bnx2x
*bp
= netdev_priv(dev
);
10465 if (pci_enable_device(pdev
)) {
10466 dev_err(&pdev
->dev
,
10467 "Cannot re-enable PCI device after reset\n");
10469 return PCI_ERS_RESULT_DISCONNECT
;
10472 pci_set_master(pdev
);
10473 pci_restore_state(pdev
);
10475 if (netif_running(dev
))
10476 bnx2x_set_power_state(bp
, PCI_D0
);
10480 return PCI_ERS_RESULT_RECOVERED
;
10484 * bnx2x_io_resume - called when traffic can start flowing again
10485 * @pdev: Pointer to PCI device
10487 * This callback is called when the error recovery driver tells us that
10488 * its OK to resume normal operation.
10490 static void bnx2x_io_resume(struct pci_dev
*pdev
)
10492 struct net_device
*dev
= pci_get_drvdata(pdev
);
10493 struct bnx2x
*bp
= netdev_priv(dev
);
10497 bnx2x_eeh_recover(bp
);
10499 if (netif_running(dev
))
10500 bnx2x_nic_load(bp
, LOAD_NORMAL
);
10502 netif_device_attach(dev
);
10507 static struct pci_error_handlers bnx2x_err_handler
= {
10508 .error_detected
= bnx2x_io_error_detected
,
10509 .slot_reset
= bnx2x_io_slot_reset
,
10510 .resume
= bnx2x_io_resume
,
10513 static struct pci_driver bnx2x_pci_driver
= {
10514 .name
= DRV_MODULE_NAME
,
10515 .id_table
= bnx2x_pci_tbl
,
10516 .probe
= bnx2x_init_one
,
10517 .remove
= __devexit_p(bnx2x_remove_one
),
10518 .suspend
= bnx2x_suspend
,
10519 .resume
= bnx2x_resume
,
10520 .err_handler
= &bnx2x_err_handler
,
10523 static int __init
bnx2x_init(void)
10525 return pci_register_driver(&bnx2x_pci_driver
);
10528 static void __exit
bnx2x_cleanup(void)
10530 pci_unregister_driver(&bnx2x_pci_driver
);
10533 module_init(bnx2x_init
);
10534 module_exit(bnx2x_cleanup
);