1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
59 #define DRV_MODULE_VERSION "1.48.105-1"
60 #define DRV_MODULE_RELDATE "2009/04/22"
61 #define BNX2X_BC_VER 0x040200
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
66 #define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT (5*HZ)
72 static char version
[] __devinitdata
=
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME
" " DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION
);
81 static int multi_mode
= 1;
82 module_param(multi_mode
, int, 0);
83 MODULE_PARM_DESC(multi_mode
, " Use per-CPU queues");
85 static int disable_tpa
;
86 module_param(disable_tpa
, int, 0);
87 MODULE_PARM_DESC(disable_tpa
, " Disable the TPA (LRO) feature");
90 module_param(int_mode
, int, 0);
91 MODULE_PARM_DESC(int_mode
, " Force interrupt mode (1 INT#x; 2 MSI)");
94 module_param(poll
, int, 0);
95 MODULE_PARM_DESC(poll
, " Use polling (for debug)");
98 module_param(mrrs
, int, 0);
99 MODULE_PARM_DESC(mrrs
, " Force Max Read Req Size (0..3) (for debug)");
102 module_param(debug
, int, 0);
103 MODULE_PARM_DESC(debug
, " Default debug msglevel");
105 static int load_count
[3]; /* 0-common, 1-port0, 2-port1 */
107 static struct workqueue_struct
*bnx2x_wq
;
109 enum bnx2x_board_type
{
115 /* indexed by board_type, above */
118 } board_info
[] __devinitdata
= {
119 { "Broadcom NetXtreme II BCM57710 XGb" },
120 { "Broadcom NetXtreme II BCM57711 XGb" },
121 { "Broadcom NetXtreme II BCM57711E XGb" }
125 static const struct pci_device_id bnx2x_pci_tbl
[] = {
126 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_57710
,
127 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM57710
},
128 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_57711
,
129 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM57711
},
130 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_57711E
,
131 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM57711E
},
135 MODULE_DEVICE_TABLE(pci
, bnx2x_pci_tbl
);
137 /****************************************************************************
138 * General service functions
139 ****************************************************************************/
142 * locking is done by mcp
144 static void bnx2x_reg_wr_ind(struct bnx2x
*bp
, u32 addr
, u32 val
)
146 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
147 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, val
);
148 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
149 PCICFG_VENDOR_ID_OFFSET
);
152 static u32
bnx2x_reg_rd_ind(struct bnx2x
*bp
, u32 addr
)
156 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
157 pci_read_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, &val
);
158 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
159 PCICFG_VENDOR_ID_OFFSET
);
164 static const u32 dmae_reg_go_c
[] = {
165 DMAE_REG_GO_C0
, DMAE_REG_GO_C1
, DMAE_REG_GO_C2
, DMAE_REG_GO_C3
,
166 DMAE_REG_GO_C4
, DMAE_REG_GO_C5
, DMAE_REG_GO_C6
, DMAE_REG_GO_C7
,
167 DMAE_REG_GO_C8
, DMAE_REG_GO_C9
, DMAE_REG_GO_C10
, DMAE_REG_GO_C11
,
168 DMAE_REG_GO_C12
, DMAE_REG_GO_C13
, DMAE_REG_GO_C14
, DMAE_REG_GO_C15
171 /* copy command into DMAE command memory and set DMAE command go */
172 static void bnx2x_post_dmae(struct bnx2x
*bp
, struct dmae_command
*dmae
,
178 cmd_offset
= (DMAE_REG_CMD_MEM
+ sizeof(struct dmae_command
) * idx
);
179 for (i
= 0; i
< (sizeof(struct dmae_command
)/4); i
++) {
180 REG_WR(bp
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
182 DP(BNX2X_MSG_OFF
, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183 idx
, i
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
185 REG_WR(bp
, dmae_reg_go_c
[idx
], 1);
188 void bnx2x_write_dmae(struct bnx2x
*bp
, dma_addr_t dma_addr
, u32 dst_addr
,
191 struct dmae_command
*dmae
= &bp
->init_dmae
;
192 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
195 if (!bp
->dmae_ready
) {
196 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
198 DP(BNX2X_MSG_OFF
, "DMAE is not ready (dst_addr %08x len32 %d)"
199 " using indirect\n", dst_addr
, len32
);
200 bnx2x_init_ind_wr(bp
, dst_addr
, data
, len32
);
204 mutex_lock(&bp
->dmae_mutex
);
206 memset(dmae
, 0, sizeof(struct dmae_command
));
208 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
209 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
210 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
212 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
214 DMAE_CMD_ENDIANITY_DW_SWAP
|
216 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
217 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
218 dmae
->src_addr_lo
= U64_LO(dma_addr
);
219 dmae
->src_addr_hi
= U64_HI(dma_addr
);
220 dmae
->dst_addr_lo
= dst_addr
>> 2;
221 dmae
->dst_addr_hi
= 0;
223 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
224 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
225 dmae
->comp_val
= DMAE_COMP_VAL
;
227 DP(BNX2X_MSG_OFF
, "DMAE: opcode 0x%08x\n"
228 DP_LEVEL
"src_addr [%x:%08x] len [%d *4] "
229 "dst_addr [%x:%08x (%08x)]\n"
230 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
231 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
232 dmae
->len
, dmae
->dst_addr_hi
, dmae
->dst_addr_lo
, dst_addr
,
233 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
, dmae
->comp_val
);
234 DP(BNX2X_MSG_OFF
, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
235 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
236 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
240 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
244 while (*wb_comp
!= DMAE_COMP_VAL
) {
245 DP(BNX2X_MSG_OFF
, "wb_comp 0x%08x\n", *wb_comp
);
248 BNX2X_ERR("DMAE timeout!\n");
252 /* adjust delay for emulation/FPGA */
253 if (CHIP_REV_IS_SLOW(bp
))
259 mutex_unlock(&bp
->dmae_mutex
);
262 void bnx2x_read_dmae(struct bnx2x
*bp
, u32 src_addr
, u32 len32
)
264 struct dmae_command
*dmae
= &bp
->init_dmae
;
265 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
268 if (!bp
->dmae_ready
) {
269 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
272 DP(BNX2X_MSG_OFF
, "DMAE is not ready (src_addr %08x len32 %d)"
273 " using indirect\n", src_addr
, len32
);
274 for (i
= 0; i
< len32
; i
++)
275 data
[i
] = bnx2x_reg_rd_ind(bp
, src_addr
+ i
*4);
279 mutex_lock(&bp
->dmae_mutex
);
281 memset(bnx2x_sp(bp
, wb_data
[0]), 0, sizeof(u32
) * 4);
282 memset(dmae
, 0, sizeof(struct dmae_command
));
284 dmae
->opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
285 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
286 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
288 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
290 DMAE_CMD_ENDIANITY_DW_SWAP
|
292 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
293 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
294 dmae
->src_addr_lo
= src_addr
>> 2;
295 dmae
->src_addr_hi
= 0;
296 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_data
));
297 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_data
));
299 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
300 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
301 dmae
->comp_val
= DMAE_COMP_VAL
;
303 DP(BNX2X_MSG_OFF
, "DMAE: opcode 0x%08x\n"
304 DP_LEVEL
"src_addr [%x:%08x] len [%d *4] "
305 "dst_addr [%x:%08x (%08x)]\n"
306 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
307 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
308 dmae
->len
, dmae
->dst_addr_hi
, dmae
->dst_addr_lo
, src_addr
,
309 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
, dmae
->comp_val
);
313 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
317 while (*wb_comp
!= DMAE_COMP_VAL
) {
320 BNX2X_ERR("DMAE timeout!\n");
324 /* adjust delay for emulation/FPGA */
325 if (CHIP_REV_IS_SLOW(bp
))
330 DP(BNX2X_MSG_OFF
, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
331 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
332 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
334 mutex_unlock(&bp
->dmae_mutex
);
337 /* used only for slowpath so not inlined */
338 static void bnx2x_wb_wr(struct bnx2x
*bp
, int reg
, u32 val_hi
, u32 val_lo
)
342 wb_write
[0] = val_hi
;
343 wb_write
[1] = val_lo
;
344 REG_WR_DMAE(bp
, reg
, wb_write
, 2);
348 static u64
bnx2x_wb_rd(struct bnx2x
*bp
, int reg
)
352 REG_RD_DMAE(bp
, reg
, wb_data
, 2);
354 return HILO_U64(wb_data
[0], wb_data
[1]);
358 static int bnx2x_mc_assert(struct bnx2x
*bp
)
362 u32 row0
, row1
, row2
, row3
;
365 last_idx
= REG_RD8(bp
, BAR_XSTRORM_INTMEM
+
366 XSTORM_ASSERT_LIST_INDEX_OFFSET
);
368 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
370 /* print the asserts */
371 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
373 row0
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
374 XSTORM_ASSERT_LIST_OFFSET(i
));
375 row1
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
376 XSTORM_ASSERT_LIST_OFFSET(i
) + 4);
377 row2
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
378 XSTORM_ASSERT_LIST_OFFSET(i
) + 8);
379 row3
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
380 XSTORM_ASSERT_LIST_OFFSET(i
) + 12);
382 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
383 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384 " 0x%08x 0x%08x 0x%08x\n",
385 i
, row3
, row2
, row1
, row0
);
393 last_idx
= REG_RD8(bp
, BAR_TSTRORM_INTMEM
+
394 TSTORM_ASSERT_LIST_INDEX_OFFSET
);
396 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
398 /* print the asserts */
399 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
401 row0
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
402 TSTORM_ASSERT_LIST_OFFSET(i
));
403 row1
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
404 TSTORM_ASSERT_LIST_OFFSET(i
) + 4);
405 row2
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
406 TSTORM_ASSERT_LIST_OFFSET(i
) + 8);
407 row3
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
408 TSTORM_ASSERT_LIST_OFFSET(i
) + 12);
410 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
411 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412 " 0x%08x 0x%08x 0x%08x\n",
413 i
, row3
, row2
, row1
, row0
);
421 last_idx
= REG_RD8(bp
, BAR_CSTRORM_INTMEM
+
422 CSTORM_ASSERT_LIST_INDEX_OFFSET
);
424 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
426 /* print the asserts */
427 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
429 row0
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
430 CSTORM_ASSERT_LIST_OFFSET(i
));
431 row1
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
432 CSTORM_ASSERT_LIST_OFFSET(i
) + 4);
433 row2
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
434 CSTORM_ASSERT_LIST_OFFSET(i
) + 8);
435 row3
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
436 CSTORM_ASSERT_LIST_OFFSET(i
) + 12);
438 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
439 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440 " 0x%08x 0x%08x 0x%08x\n",
441 i
, row3
, row2
, row1
, row0
);
449 last_idx
= REG_RD8(bp
, BAR_USTRORM_INTMEM
+
450 USTORM_ASSERT_LIST_INDEX_OFFSET
);
452 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
454 /* print the asserts */
455 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
457 row0
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
458 USTORM_ASSERT_LIST_OFFSET(i
));
459 row1
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
460 USTORM_ASSERT_LIST_OFFSET(i
) + 4);
461 row2
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
462 USTORM_ASSERT_LIST_OFFSET(i
) + 8);
463 row3
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
464 USTORM_ASSERT_LIST_OFFSET(i
) + 12);
466 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
467 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468 " 0x%08x 0x%08x 0x%08x\n",
469 i
, row3
, row2
, row1
, row0
);
479 static void bnx2x_fw_dump(struct bnx2x
*bp
)
485 mark
= REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+ 0xf104);
486 mark
= ((mark
+ 0x3) & ~0x3);
487 printk(KERN_ERR PFX
"begin fw dump (mark 0x%x)\n", mark
);
489 printk(KERN_ERR PFX
);
490 for (offset
= mark
- 0x08000000; offset
<= 0xF900; offset
+= 0x8*4) {
491 for (word
= 0; word
< 8; word
++)
492 data
[word
] = htonl(REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+
495 printk(KERN_CONT
"%s", (char *)data
);
497 for (offset
= 0xF108; offset
<= mark
- 0x08000000; offset
+= 0x8*4) {
498 for (word
= 0; word
< 8; word
++)
499 data
[word
] = htonl(REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+
502 printk(KERN_CONT
"%s", (char *)data
);
504 printk(KERN_ERR PFX
"end of fw dump\n");
507 static void bnx2x_panic_dump(struct bnx2x
*bp
)
512 bp
->stats_state
= STATS_STATE_DISABLED
;
513 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
515 BNX2X_ERR("begin crash dump -----------------\n");
519 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
520 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
521 " spq_prod_idx(%u)\n",
522 bp
->def_c_idx
, bp
->def_u_idx
, bp
->def_x_idx
, bp
->def_t_idx
,
523 bp
->def_att_idx
, bp
->attn_state
, bp
->spq_prod_idx
);
526 for_each_rx_queue(bp
, i
) {
527 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
529 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
530 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
531 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
532 i
, fp
->rx_bd_prod
, fp
->rx_bd_cons
,
533 le16_to_cpu(*fp
->rx_bd_cons_sb
), fp
->rx_comp_prod
,
534 fp
->rx_comp_cons
, le16_to_cpu(*fp
->rx_cons_sb
));
535 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
536 " fp_u_idx(%x) *sb_u_idx(%x)\n",
537 fp
->rx_sge_prod
, fp
->last_max_sge
,
538 le16_to_cpu(fp
->fp_u_idx
),
539 fp
->status_blk
->u_status_block
.status_block_index
);
543 for_each_tx_queue(bp
, i
) {
544 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
545 struct eth_tx_db_data
*hw_prods
= fp
->hw_tx_prods
;
547 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
548 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
549 i
, fp
->tx_pkt_prod
, fp
->tx_pkt_cons
, fp
->tx_bd_prod
,
550 fp
->tx_bd_cons
, le16_to_cpu(*fp
->tx_cons_sb
));
551 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
552 " bd data(%x,%x)\n", le16_to_cpu(fp
->fp_c_idx
),
553 fp
->status_blk
->c_status_block
.status_block_index
,
554 hw_prods
->packets_prod
, hw_prods
->bds_prod
);
559 for_each_rx_queue(bp
, i
) {
560 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
562 start
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) - 10);
563 end
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) + 503);
564 for (j
= start
; j
!= end
; j
= RX_BD(j
+ 1)) {
565 u32
*rx_bd
= (u32
*)&fp
->rx_desc_ring
[j
];
566 struct sw_rx_bd
*sw_bd
= &fp
->rx_buf_ring
[j
];
568 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
569 i
, j
, rx_bd
[1], rx_bd
[0], sw_bd
->skb
);
572 start
= RX_SGE(fp
->rx_sge_prod
);
573 end
= RX_SGE(fp
->last_max_sge
);
574 for (j
= start
; j
!= end
; j
= RX_SGE(j
+ 1)) {
575 u32
*rx_sge
= (u32
*)&fp
->rx_sge_ring
[j
];
576 struct sw_rx_page
*sw_page
= &fp
->rx_page_ring
[j
];
578 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
579 i
, j
, rx_sge
[1], rx_sge
[0], sw_page
->page
);
582 start
= RCQ_BD(fp
->rx_comp_cons
- 10);
583 end
= RCQ_BD(fp
->rx_comp_cons
+ 503);
584 for (j
= start
; j
!= end
; j
= RCQ_BD(j
+ 1)) {
585 u32
*cqe
= (u32
*)&fp
->rx_comp_ring
[j
];
587 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
588 i
, j
, cqe
[0], cqe
[1], cqe
[2], cqe
[3]);
593 for_each_tx_queue(bp
, i
) {
594 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
596 start
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) - 10);
597 end
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) + 245);
598 for (j
= start
; j
!= end
; j
= TX_BD(j
+ 1)) {
599 struct sw_tx_bd
*sw_bd
= &fp
->tx_buf_ring
[j
];
601 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
602 i
, j
, sw_bd
->skb
, sw_bd
->first_bd
);
605 start
= TX_BD(fp
->tx_bd_cons
- 10);
606 end
= TX_BD(fp
->tx_bd_cons
+ 254);
607 for (j
= start
; j
!= end
; j
= TX_BD(j
+ 1)) {
608 u32
*tx_bd
= (u32
*)&fp
->tx_desc_ring
[j
];
610 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
611 i
, j
, tx_bd
[0], tx_bd
[1], tx_bd
[2], tx_bd
[3]);
617 BNX2X_ERR("end crash dump -----------------\n");
620 static void bnx2x_int_enable(struct bnx2x
*bp
)
622 int port
= BP_PORT(bp
);
623 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
624 u32 val
= REG_RD(bp
, addr
);
625 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
626 int msi
= (bp
->flags
& USING_MSI_FLAG
) ? 1 : 0;
629 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
630 HC_CONFIG_0_REG_INT_LINE_EN_0
);
631 val
|= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
632 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
634 val
&= ~HC_CONFIG_0_REG_INT_LINE_EN_0
;
635 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
636 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
637 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
639 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
640 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
641 HC_CONFIG_0_REG_INT_LINE_EN_0
|
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
644 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
647 REG_WR(bp
, addr
, val
);
649 val
&= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
;
652 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x) mode %s\n",
653 val
, port
, addr
, (msix
? "MSI-X" : (msi
? "MSI" : "INTx")));
655 REG_WR(bp
, addr
, val
);
657 if (CHIP_IS_E1H(bp
)) {
658 /* init leading/trailing edge */
660 val
= (0xee0f | (1 << (BP_E1HVN(bp
) + 4)));
662 /* enable nig and gpio3 attention */
667 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
668 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
672 static void bnx2x_int_disable(struct bnx2x
*bp
)
674 int port
= BP_PORT(bp
);
675 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
676 u32 val
= REG_RD(bp
, addr
);
678 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
679 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
680 HC_CONFIG_0_REG_INT_LINE_EN_0
|
681 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
683 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
686 /* flush all outstanding writes */
689 REG_WR(bp
, addr
, val
);
690 if (REG_RD(bp
, addr
) != val
)
691 BNX2X_ERR("BUG! proper val not read from IGU!\n");
695 static void bnx2x_int_disable_sync(struct bnx2x
*bp
, int disable_hw
)
697 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
700 /* disable interrupt handling */
701 atomic_inc(&bp
->intr_sem
);
703 /* prevent the HW from sending interrupts */
704 bnx2x_int_disable(bp
);
706 /* make sure all ISRs are done */
708 synchronize_irq(bp
->msix_table
[0].vector
);
710 for_each_queue(bp
, i
)
711 synchronize_irq(bp
->msix_table
[i
+ offset
].vector
);
713 synchronize_irq(bp
->pdev
->irq
);
715 /* make sure sp_task is not running */
716 cancel_delayed_work(&bp
->sp_task
);
717 flush_workqueue(bnx2x_wq
);
723 * General service functions
726 static inline void bnx2x_ack_sb(struct bnx2x
*bp
, u8 sb_id
,
727 u8 storm
, u16 index
, u8 op
, u8 update
)
729 u32 hc_addr
= (HC_REG_COMMAND_REG
+ BP_PORT(bp
)*32 +
730 COMMAND_REG_INT_ACK
);
731 struct igu_ack_register igu_ack
;
733 igu_ack
.status_block_index
= index
;
734 igu_ack
.sb_id_and_flags
=
735 ((sb_id
<< IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT
) |
736 (storm
<< IGU_ACK_REGISTER_STORM_ID_SHIFT
) |
737 (update
<< IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT
) |
738 (op
<< IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT
));
740 DP(BNX2X_MSG_OFF
, "write 0x%08x to HC addr 0x%x\n",
741 (*(u32
*)&igu_ack
), hc_addr
);
742 REG_WR(bp
, hc_addr
, (*(u32
*)&igu_ack
));
745 static inline u16
bnx2x_update_fpsb_idx(struct bnx2x_fastpath
*fp
)
747 struct host_status_block
*fpsb
= fp
->status_blk
;
750 barrier(); /* status block is written to by the chip */
751 if (fp
->fp_c_idx
!= fpsb
->c_status_block
.status_block_index
) {
752 fp
->fp_c_idx
= fpsb
->c_status_block
.status_block_index
;
755 if (fp
->fp_u_idx
!= fpsb
->u_status_block
.status_block_index
) {
756 fp
->fp_u_idx
= fpsb
->u_status_block
.status_block_index
;
762 static u16
bnx2x_ack_int(struct bnx2x
*bp
)
764 u32 hc_addr
= (HC_REG_COMMAND_REG
+ BP_PORT(bp
)*32 +
765 COMMAND_REG_SIMD_MASK
);
766 u32 result
= REG_RD(bp
, hc_addr
);
768 DP(BNX2X_MSG_OFF
, "read 0x%08x from HC addr 0x%x\n",
776 * fast path service functions
779 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath
*fp
)
783 /* Tell compiler that status block fields can change */
785 tx_cons_sb
= le16_to_cpu(*fp
->tx_cons_sb
);
786 return (fp
->tx_pkt_cons
!= tx_cons_sb
);
789 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath
*fp
)
791 /* Tell compiler that consumer and producer can change */
793 return (fp
->tx_pkt_prod
!= fp
->tx_pkt_cons
);
796 /* free skb in the packet ring at pos idx
797 * return idx of last bd freed
799 static u16
bnx2x_free_tx_pkt(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
802 struct sw_tx_bd
*tx_buf
= &fp
->tx_buf_ring
[idx
];
803 struct eth_tx_bd
*tx_bd
;
804 struct sk_buff
*skb
= tx_buf
->skb
;
805 u16 bd_idx
= TX_BD(tx_buf
->first_bd
), new_cons
;
808 DP(BNX2X_MSG_OFF
, "pkt_idx %d buff @(%p)->skb %p\n",
812 DP(BNX2X_MSG_OFF
, "free bd_idx %d\n", bd_idx
);
813 tx_bd
= &fp
->tx_desc_ring
[bd_idx
];
814 pci_unmap_single(bp
->pdev
, BD_UNMAP_ADDR(tx_bd
),
815 BD_UNMAP_LEN(tx_bd
), PCI_DMA_TODEVICE
);
817 nbd
= le16_to_cpu(tx_bd
->nbd
) - 1;
818 new_cons
= nbd
+ tx_buf
->first_bd
;
819 #ifdef BNX2X_STOP_ON_ERROR
820 if (nbd
> (MAX_SKB_FRAGS
+ 2)) {
821 BNX2X_ERR("BAD nbd!\n");
826 /* Skip a parse bd and the TSO split header bd
827 since they have no mapping */
829 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
831 if (tx_bd
->bd_flags
.as_bitfield
& (ETH_TX_BD_FLAGS_IP_CSUM
|
832 ETH_TX_BD_FLAGS_TCP_CSUM
|
833 ETH_TX_BD_FLAGS_SW_LSO
)) {
835 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
836 tx_bd
= &fp
->tx_desc_ring
[bd_idx
];
837 /* is this a TSO split header bd? */
838 if (tx_bd
->bd_flags
.as_bitfield
& ETH_TX_BD_FLAGS_SW_LSO
) {
840 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
847 DP(BNX2X_MSG_OFF
, "free frag bd_idx %d\n", bd_idx
);
848 tx_bd
= &fp
->tx_desc_ring
[bd_idx
];
849 pci_unmap_page(bp
->pdev
, BD_UNMAP_ADDR(tx_bd
),
850 BD_UNMAP_LEN(tx_bd
), PCI_DMA_TODEVICE
);
852 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
858 tx_buf
->first_bd
= 0;
864 static inline u16
bnx2x_tx_avail(struct bnx2x_fastpath
*fp
)
870 barrier(); /* Tell compiler that prod and cons can change */
871 prod
= fp
->tx_bd_prod
;
872 cons
= fp
->tx_bd_cons
;
874 /* NUM_TX_RINGS = number of "next-page" entries
875 It will be used as a threshold */
876 used
= SUB_S16(prod
, cons
) + (s16
)NUM_TX_RINGS
;
878 #ifdef BNX2X_STOP_ON_ERROR
880 WARN_ON(used
> fp
->bp
->tx_ring_size
);
881 WARN_ON((fp
->bp
->tx_ring_size
- used
) > MAX_TX_AVAIL
);
884 return (s16
)(fp
->bp
->tx_ring_size
) - used
;
887 static void bnx2x_tx_int(struct bnx2x_fastpath
*fp
)
889 struct bnx2x
*bp
= fp
->bp
;
890 struct netdev_queue
*txq
;
891 u16 hw_cons
, sw_cons
, bd_cons
= fp
->tx_bd_cons
;
894 #ifdef BNX2X_STOP_ON_ERROR
895 if (unlikely(bp
->panic
))
899 txq
= netdev_get_tx_queue(bp
->dev
, fp
->index
);
900 hw_cons
= le16_to_cpu(*fp
->tx_cons_sb
);
901 sw_cons
= fp
->tx_pkt_cons
;
903 while (sw_cons
!= hw_cons
) {
906 pkt_cons
= TX_BD(sw_cons
);
908 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
910 DP(NETIF_MSG_TX_DONE
, "hw_cons %u sw_cons %u pkt_cons %u\n",
911 hw_cons
, sw_cons
, pkt_cons
);
913 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
915 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
918 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, pkt_cons
);
923 fp
->tx_pkt_cons
= sw_cons
;
924 fp
->tx_bd_cons
= bd_cons
;
926 /* TBD need a thresh? */
927 if (unlikely(netif_tx_queue_stopped(txq
))) {
929 __netif_tx_lock(txq
, smp_processor_id());
931 /* Need to make the tx_bd_cons update visible to start_xmit()
932 * before checking for netif_tx_queue_stopped(). Without the
933 * memory barrier, there is a small possibility that
934 * start_xmit() will miss it and cause the queue to be stopped
939 if ((netif_tx_queue_stopped(txq
)) &&
940 (bp
->state
== BNX2X_STATE_OPEN
) &&
941 (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3))
942 netif_tx_wake_queue(txq
);
944 __netif_tx_unlock(txq
);
949 static void bnx2x_sp_event(struct bnx2x_fastpath
*fp
,
950 union eth_rx_cqe
*rr_cqe
)
952 struct bnx2x
*bp
= fp
->bp
;
953 int cid
= SW_CID(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
954 int command
= CQE_CMD(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
957 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
958 fp
->index
, cid
, command
, bp
->state
,
959 rr_cqe
->ramrod_cqe
.ramrod_type
);
964 switch (command
| fp
->state
) {
965 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP
|
966 BNX2X_FP_STATE_OPENING
):
967 DP(NETIF_MSG_IFUP
, "got MULTI[%d] setup ramrod\n",
969 fp
->state
= BNX2X_FP_STATE_OPEN
;
972 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_FP_STATE_HALTING
):
973 DP(NETIF_MSG_IFDOWN
, "got MULTI[%d] halt ramrod\n",
975 fp
->state
= BNX2X_FP_STATE_HALTED
;
979 BNX2X_ERR("unexpected MC reply (%d) "
980 "fp->state is %x\n", command
, fp
->state
);
983 mb(); /* force bnx2x_wait_ramrod() to see the change */
987 switch (command
| bp
->state
) {
988 case (RAMROD_CMD_ID_ETH_PORT_SETUP
| BNX2X_STATE_OPENING_WAIT4_PORT
):
989 DP(NETIF_MSG_IFUP
, "got setup ramrod\n");
990 bp
->state
= BNX2X_STATE_OPEN
;
993 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
994 DP(NETIF_MSG_IFDOWN
, "got halt ramrod\n");
995 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_DELETE
;
996 fp
->state
= BNX2X_FP_STATE_HALTED
;
999 case (RAMROD_CMD_ID_ETH_CFC_DEL
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
1000 DP(NETIF_MSG_IFDOWN
, "got delete ramrod for MULTI[%d]\n", cid
);
1001 bnx2x_fp(bp
, cid
, state
) = BNX2X_FP_STATE_CLOSED
;
1005 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_OPEN
):
1006 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_DIAG
):
1007 DP(NETIF_MSG_IFUP
, "got set mac ramrod\n");
1008 bp
->set_mac_pending
= 0;
1011 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
1012 DP(NETIF_MSG_IFDOWN
, "got (un)set mac ramrod\n");
1016 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1017 command
, bp
->state
);
1020 mb(); /* force bnx2x_wait_ramrod() to see the change */
1023 static inline void bnx2x_free_rx_sge(struct bnx2x
*bp
,
1024 struct bnx2x_fastpath
*fp
, u16 index
)
1026 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
1027 struct page
*page
= sw_buf
->page
;
1028 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
1030 /* Skip "next page" elements */
1034 pci_unmap_page(bp
->pdev
, pci_unmap_addr(sw_buf
, mapping
),
1035 SGE_PAGE_SIZE
*PAGES_PER_SGE
, PCI_DMA_FROMDEVICE
);
1036 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
1038 sw_buf
->page
= NULL
;
1043 static inline void bnx2x_free_rx_sge_range(struct bnx2x
*bp
,
1044 struct bnx2x_fastpath
*fp
, int last
)
1048 for (i
= 0; i
< last
; i
++)
1049 bnx2x_free_rx_sge(bp
, fp
, i
);
1052 static inline int bnx2x_alloc_rx_sge(struct bnx2x
*bp
,
1053 struct bnx2x_fastpath
*fp
, u16 index
)
1055 struct page
*page
= alloc_pages(GFP_ATOMIC
, PAGES_PER_SGE_SHIFT
);
1056 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
1057 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
1060 if (unlikely(page
== NULL
))
1063 mapping
= pci_map_page(bp
->pdev
, page
, 0, SGE_PAGE_SIZE
*PAGES_PER_SGE
,
1064 PCI_DMA_FROMDEVICE
);
1065 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
1066 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
1070 sw_buf
->page
= page
;
1071 pci_unmap_addr_set(sw_buf
, mapping
, mapping
);
1073 sge
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1074 sge
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1079 static inline int bnx2x_alloc_rx_skb(struct bnx2x
*bp
,
1080 struct bnx2x_fastpath
*fp
, u16 index
)
1082 struct sk_buff
*skb
;
1083 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[index
];
1084 struct eth_rx_bd
*rx_bd
= &fp
->rx_desc_ring
[index
];
1087 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
1088 if (unlikely(skb
== NULL
))
1091 mapping
= pci_map_single(bp
->pdev
, skb
->data
, bp
->rx_buf_size
,
1092 PCI_DMA_FROMDEVICE
);
1093 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
1099 pci_unmap_addr_set(rx_buf
, mapping
, mapping
);
1101 rx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1102 rx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1107 /* note that we are not allocating a new skb,
1108 * we are just moving one from cons to prod
1109 * we are not creating a new mapping,
1110 * so there is no need to check for dma_mapping_error().
1112 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath
*fp
,
1113 struct sk_buff
*skb
, u16 cons
, u16 prod
)
1115 struct bnx2x
*bp
= fp
->bp
;
1116 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
1117 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
1118 struct eth_rx_bd
*cons_bd
= &fp
->rx_desc_ring
[cons
];
1119 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
1121 pci_dma_sync_single_for_device(bp
->pdev
,
1122 pci_unmap_addr(cons_rx_buf
, mapping
),
1123 RX_COPY_THRESH
, PCI_DMA_FROMDEVICE
);
1125 prod_rx_buf
->skb
= cons_rx_buf
->skb
;
1126 pci_unmap_addr_set(prod_rx_buf
, mapping
,
1127 pci_unmap_addr(cons_rx_buf
, mapping
));
1128 *prod_bd
= *cons_bd
;
1131 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath
*fp
,
1134 u16 last_max
= fp
->last_max_sge
;
1136 if (SUB_S16(idx
, last_max
) > 0)
1137 fp
->last_max_sge
= idx
;
1140 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath
*fp
)
1144 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
1145 int idx
= RX_SGE_CNT
* i
- 1;
1147 for (j
= 0; j
< 2; j
++) {
1148 SGE_MASK_CLEAR_BIT(fp
, idx
);
1154 static void bnx2x_update_sge_prod(struct bnx2x_fastpath
*fp
,
1155 struct eth_fast_path_rx_cqe
*fp_cqe
)
1157 struct bnx2x
*bp
= fp
->bp
;
1158 u16 sge_len
= SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe
->pkt_len
) -
1159 le16_to_cpu(fp_cqe
->len_on_bd
)) >>
1161 u16 last_max
, last_elem
, first_elem
;
1168 /* First mark all used pages */
1169 for (i
= 0; i
< sge_len
; i
++)
1170 SGE_MASK_CLEAR_BIT(fp
, RX_SGE(le16_to_cpu(fp_cqe
->sgl
[i
])));
1172 DP(NETIF_MSG_RX_STATUS
, "fp_cqe->sgl[%d] = %d\n",
1173 sge_len
- 1, le16_to_cpu(fp_cqe
->sgl
[sge_len
- 1]));
1175 /* Here we assume that the last SGE index is the biggest */
1176 prefetch((void *)(fp
->sge_mask
));
1177 bnx2x_update_last_max_sge(fp
, le16_to_cpu(fp_cqe
->sgl
[sge_len
- 1]));
1179 last_max
= RX_SGE(fp
->last_max_sge
);
1180 last_elem
= last_max
>> RX_SGE_MASK_ELEM_SHIFT
;
1181 first_elem
= RX_SGE(fp
->rx_sge_prod
) >> RX_SGE_MASK_ELEM_SHIFT
;
1183 /* If ring is not full */
1184 if (last_elem
+ 1 != first_elem
)
1187 /* Now update the prod */
1188 for (i
= first_elem
; i
!= last_elem
; i
= NEXT_SGE_MASK_ELEM(i
)) {
1189 if (likely(fp
->sge_mask
[i
]))
1192 fp
->sge_mask
[i
] = RX_SGE_MASK_ELEM_ONE_MASK
;
1193 delta
+= RX_SGE_MASK_ELEM_SZ
;
1197 fp
->rx_sge_prod
+= delta
;
1198 /* clear page-end entries */
1199 bnx2x_clear_sge_mask_next_elems(fp
);
1202 DP(NETIF_MSG_RX_STATUS
,
1203 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1204 fp
->last_max_sge
, fp
->rx_sge_prod
);
1207 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath
*fp
)
1209 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1210 memset(fp
->sge_mask
, 0xff,
1211 (NUM_RX_SGE
>> RX_SGE_MASK_ELEM_SHIFT
)*sizeof(u64
));
1213 /* Clear the two last indices in the page to 1:
1214 these are the indices that correspond to the "next" element,
1215 hence will never be indicated and should be removed from
1216 the calculations. */
1217 bnx2x_clear_sge_mask_next_elems(fp
);
1220 static void bnx2x_tpa_start(struct bnx2x_fastpath
*fp
, u16 queue
,
1221 struct sk_buff
*skb
, u16 cons
, u16 prod
)
1223 struct bnx2x
*bp
= fp
->bp
;
1224 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
1225 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
1226 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
1229 /* move empty skb from pool to prod and map it */
1230 prod_rx_buf
->skb
= fp
->tpa_pool
[queue
].skb
;
1231 mapping
= pci_map_single(bp
->pdev
, fp
->tpa_pool
[queue
].skb
->data
,
1232 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
1233 pci_unmap_addr_set(prod_rx_buf
, mapping
, mapping
);
1235 /* move partial skb from cons to pool (don't unmap yet) */
1236 fp
->tpa_pool
[queue
] = *cons_rx_buf
;
1238 /* mark bin state as start - print error if current state != stop */
1239 if (fp
->tpa_state
[queue
] != BNX2X_TPA_STOP
)
1240 BNX2X_ERR("start of bin not in stop [%d]\n", queue
);
1242 fp
->tpa_state
[queue
] = BNX2X_TPA_START
;
1244 /* point prod_bd to new skb */
1245 prod_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1246 prod_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1248 #ifdef BNX2X_STOP_ON_ERROR
1249 fp
->tpa_queue_used
|= (1 << queue
);
1250 #ifdef __powerpc64__
1251 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%lx\n",
1253 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%llx\n",
1255 fp
->tpa_queue_used
);
1259 static int bnx2x_fill_frag_skb(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
1260 struct sk_buff
*skb
,
1261 struct eth_fast_path_rx_cqe
*fp_cqe
,
1264 struct sw_rx_page
*rx_pg
, old_rx_pg
;
1265 u16 len_on_bd
= le16_to_cpu(fp_cqe
->len_on_bd
);
1266 u32 i
, frag_len
, frag_size
, pages
;
1270 frag_size
= le16_to_cpu(fp_cqe
->pkt_len
) - len_on_bd
;
1271 pages
= SGE_PAGE_ALIGN(frag_size
) >> SGE_PAGE_SHIFT
;
1273 /* This is needed in order to enable forwarding support */
1275 skb_shinfo(skb
)->gso_size
= min((u32
)SGE_PAGE_SIZE
,
1276 max(frag_size
, (u32
)len_on_bd
));
1278 #ifdef BNX2X_STOP_ON_ERROR
1280 min((u32
)8, (u32
)MAX_SKB_FRAGS
) * SGE_PAGE_SIZE
* PAGES_PER_SGE
) {
1281 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1283 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1284 fp_cqe
->pkt_len
, len_on_bd
);
1290 /* Run through the SGL and compose the fragmented skb */
1291 for (i
= 0, j
= 0; i
< pages
; i
+= PAGES_PER_SGE
, j
++) {
1292 u16 sge_idx
= RX_SGE(le16_to_cpu(fp_cqe
->sgl
[j
]));
1294 /* FW gives the indices of the SGE as if the ring is an array
1295 (meaning that "next" element will consume 2 indices) */
1296 frag_len
= min(frag_size
, (u32
)(SGE_PAGE_SIZE
*PAGES_PER_SGE
));
1297 rx_pg
= &fp
->rx_page_ring
[sge_idx
];
1300 /* If we fail to allocate a substitute page, we simply stop
1301 where we are and drop the whole packet */
1302 err
= bnx2x_alloc_rx_sge(bp
, fp
, sge_idx
);
1303 if (unlikely(err
)) {
1304 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
1308 /* Unmap the page as we r going to pass it to the stack */
1309 pci_unmap_page(bp
->pdev
, pci_unmap_addr(&old_rx_pg
, mapping
),
1310 SGE_PAGE_SIZE
*PAGES_PER_SGE
, PCI_DMA_FROMDEVICE
);
1312 /* Add one frag and update the appropriate fields in the skb */
1313 skb_fill_page_desc(skb
, j
, old_rx_pg
.page
, 0, frag_len
);
1315 skb
->data_len
+= frag_len
;
1316 skb
->truesize
+= frag_len
;
1317 skb
->len
+= frag_len
;
1319 frag_size
-= frag_len
;
1325 static void bnx2x_tpa_stop(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
1326 u16 queue
, int pad
, int len
, union eth_rx_cqe
*cqe
,
1329 struct sw_rx_bd
*rx_buf
= &fp
->tpa_pool
[queue
];
1330 struct sk_buff
*skb
= rx_buf
->skb
;
1332 struct sk_buff
*new_skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
1334 /* Unmap skb in the pool anyway, as we are going to change
1335 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1337 pci_unmap_single(bp
->pdev
, pci_unmap_addr(rx_buf
, mapping
),
1338 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
1340 if (likely(new_skb
)) {
1341 /* fix ip xsum and give it to the stack */
1342 /* (no need to map the new skb) */
1345 (le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
) &
1346 PARSING_FLAGS_VLAN
);
1347 int is_not_hwaccel_vlan_cqe
=
1348 (is_vlan_cqe
&& (!(bp
->flags
& HW_VLAN_RX_FLAG
)));
1352 prefetch(((char *)(skb
)) + 128);
1354 #ifdef BNX2X_STOP_ON_ERROR
1355 if (pad
+ len
> bp
->rx_buf_size
) {
1356 BNX2X_ERR("skb_put is about to fail... "
1357 "pad %d len %d rx_buf_size %d\n",
1358 pad
, len
, bp
->rx_buf_size
);
1364 skb_reserve(skb
, pad
);
1367 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1368 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1373 iph
= (struct iphdr
*)skb
->data
;
1375 /* If there is no Rx VLAN offloading -
1376 take VLAN tag into an account */
1377 if (unlikely(is_not_hwaccel_vlan_cqe
))
1378 iph
= (struct iphdr
*)((u8
*)iph
+ VLAN_HLEN
);
1381 iph
->check
= ip_fast_csum((u8
*)iph
, iph
->ihl
);
1384 if (!bnx2x_fill_frag_skb(bp
, fp
, skb
,
1385 &cqe
->fast_path_cqe
, cqe_idx
)) {
1387 if ((bp
->vlgrp
!= NULL
) && is_vlan_cqe
&&
1388 (!is_not_hwaccel_vlan_cqe
))
1389 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
,
1390 le16_to_cpu(cqe
->fast_path_cqe
.
1394 netif_receive_skb(skb
);
1396 DP(NETIF_MSG_RX_STATUS
, "Failed to allocate new pages"
1397 " - dropping packet!\n");
1402 /* put new skb in bin */
1403 fp
->tpa_pool
[queue
].skb
= new_skb
;
1406 /* else drop the packet and keep the buffer in the bin */
1407 DP(NETIF_MSG_RX_STATUS
,
1408 "Failed to allocate new skb - dropping packet!\n");
1409 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
1412 fp
->tpa_state
[queue
] = BNX2X_TPA_STOP
;
1415 static inline void bnx2x_update_rx_prod(struct bnx2x
*bp
,
1416 struct bnx2x_fastpath
*fp
,
1417 u16 bd_prod
, u16 rx_comp_prod
,
1420 struct ustorm_eth_rx_producers rx_prods
= {0};
1423 /* Update producers */
1424 rx_prods
.bd_prod
= bd_prod
;
1425 rx_prods
.cqe_prod
= rx_comp_prod
;
1426 rx_prods
.sge_prod
= rx_sge_prod
;
1429 * Make sure that the BD and SGE data is updated before updating the
1430 * producers since FW might read the BD/SGE right after the producer
1432 * This is only applicable for weak-ordered memory model archs such
1433 * as IA-64. The following barrier is also mandatory since FW will
1434 * assumes BDs must have buffers.
1438 for (i
= 0; i
< sizeof(struct ustorm_eth_rx_producers
)/4; i
++)
1439 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1440 USTORM_RX_PRODS_OFFSET(BP_PORT(bp
), fp
->cl_id
) + i
*4,
1441 ((u32
*)&rx_prods
)[i
]);
1443 mmiowb(); /* keep prod updates ordered */
1445 DP(NETIF_MSG_RX_STATUS
,
1446 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1447 fp
->index
, bd_prod
, rx_comp_prod
, rx_sge_prod
);
1450 static int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
)
1452 struct bnx2x
*bp
= fp
->bp
;
1453 u16 bd_cons
, bd_prod
, bd_prod_fw
, comp_ring_cons
;
1454 u16 hw_comp_cons
, sw_comp_cons
, sw_comp_prod
;
1457 #ifdef BNX2X_STOP_ON_ERROR
1458 if (unlikely(bp
->panic
))
1462 /* CQ "next element" is of the size of the regular element,
1463 that's why it's ok here */
1464 hw_comp_cons
= le16_to_cpu(*fp
->rx_cons_sb
);
1465 if ((hw_comp_cons
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
1468 bd_cons
= fp
->rx_bd_cons
;
1469 bd_prod
= fp
->rx_bd_prod
;
1470 bd_prod_fw
= bd_prod
;
1471 sw_comp_cons
= fp
->rx_comp_cons
;
1472 sw_comp_prod
= fp
->rx_comp_prod
;
1474 /* Memory barrier necessary as speculative reads of the rx
1475 * buffer can be ahead of the index in the status block
1479 DP(NETIF_MSG_RX_STATUS
,
1480 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1481 fp
->index
, hw_comp_cons
, sw_comp_cons
);
1483 while (sw_comp_cons
!= hw_comp_cons
) {
1484 struct sw_rx_bd
*rx_buf
= NULL
;
1485 struct sk_buff
*skb
;
1486 union eth_rx_cqe
*cqe
;
1490 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
1491 bd_prod
= RX_BD(bd_prod
);
1492 bd_cons
= RX_BD(bd_cons
);
1494 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
1495 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
1497 DP(NETIF_MSG_RX_STATUS
, "CQE type %x err %x status %x"
1498 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags
),
1499 cqe_fp_flags
, cqe
->fast_path_cqe
.status_flags
,
1500 le32_to_cpu(cqe
->fast_path_cqe
.rss_hash_result
),
1501 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
),
1502 le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
));
1504 /* is this a slowpath msg? */
1505 if (unlikely(CQE_TYPE(cqe_fp_flags
))) {
1506 bnx2x_sp_event(fp
, cqe
);
1509 /* this is an rx packet */
1511 rx_buf
= &fp
->rx_buf_ring
[bd_cons
];
1513 len
= le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
);
1514 pad
= cqe
->fast_path_cqe
.placement_offset
;
1516 /* If CQE is marked both TPA_START and TPA_END
1517 it is a non-TPA CQE */
1518 if ((!fp
->disable_tpa
) &&
1519 (TPA_TYPE(cqe_fp_flags
) !=
1520 (TPA_TYPE_START
| TPA_TYPE_END
))) {
1521 u16 queue
= cqe
->fast_path_cqe
.queue_index
;
1523 if (TPA_TYPE(cqe_fp_flags
) == TPA_TYPE_START
) {
1524 DP(NETIF_MSG_RX_STATUS
,
1525 "calling tpa_start on queue %d\n",
1528 bnx2x_tpa_start(fp
, queue
, skb
,
1533 if (TPA_TYPE(cqe_fp_flags
) == TPA_TYPE_END
) {
1534 DP(NETIF_MSG_RX_STATUS
,
1535 "calling tpa_stop on queue %d\n",
1538 if (!BNX2X_RX_SUM_FIX(cqe
))
1539 BNX2X_ERR("STOP on none TCP "
1542 /* This is a size of the linear data
1544 len
= le16_to_cpu(cqe
->fast_path_cqe
.
1546 bnx2x_tpa_stop(bp
, fp
, queue
, pad
,
1547 len
, cqe
, comp_ring_cons
);
1548 #ifdef BNX2X_STOP_ON_ERROR
1553 bnx2x_update_sge_prod(fp
,
1554 &cqe
->fast_path_cqe
);
1559 pci_dma_sync_single_for_device(bp
->pdev
,
1560 pci_unmap_addr(rx_buf
, mapping
),
1561 pad
+ RX_COPY_THRESH
,
1562 PCI_DMA_FROMDEVICE
);
1564 prefetch(((char *)(skb
)) + 128);
1566 /* is this an error packet? */
1567 if (unlikely(cqe_fp_flags
& ETH_RX_ERROR_FALGS
)) {
1568 DP(NETIF_MSG_RX_ERR
,
1569 "ERROR flags %x rx packet %u\n",
1570 cqe_fp_flags
, sw_comp_cons
);
1571 fp
->eth_q_stats
.rx_err_discard_pkt
++;
1575 /* Since we don't have a jumbo ring
1576 * copy small packets if mtu > 1500
1578 if ((bp
->dev
->mtu
> ETH_MAX_PACKET_SIZE
) &&
1579 (len
<= RX_COPY_THRESH
)) {
1580 struct sk_buff
*new_skb
;
1582 new_skb
= netdev_alloc_skb(bp
->dev
,
1584 if (new_skb
== NULL
) {
1585 DP(NETIF_MSG_RX_ERR
,
1586 "ERROR packet dropped "
1587 "because of alloc failure\n");
1588 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
1593 skb_copy_from_linear_data_offset(skb
, pad
,
1594 new_skb
->data
+ pad
, len
);
1595 skb_reserve(new_skb
, pad
);
1596 skb_put(new_skb
, len
);
1598 bnx2x_reuse_rx_skb(fp
, skb
, bd_cons
, bd_prod
);
1602 } else if (bnx2x_alloc_rx_skb(bp
, fp
, bd_prod
) == 0) {
1603 pci_unmap_single(bp
->pdev
,
1604 pci_unmap_addr(rx_buf
, mapping
),
1606 PCI_DMA_FROMDEVICE
);
1607 skb_reserve(skb
, pad
);
1611 DP(NETIF_MSG_RX_ERR
,
1612 "ERROR packet dropped because "
1613 "of alloc failure\n");
1614 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
1616 bnx2x_reuse_rx_skb(fp
, skb
, bd_cons
, bd_prod
);
1620 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1622 skb
->ip_summed
= CHECKSUM_NONE
;
1624 if (likely(BNX2X_RX_CSUM_OK(cqe
)))
1625 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1627 fp
->eth_q_stats
.hw_csum_err
++;
1631 skb_record_rx_queue(skb
, fp
->index
);
1633 if ((bp
->vlgrp
!= NULL
) && (bp
->flags
& HW_VLAN_RX_FLAG
) &&
1634 (le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
) &
1635 PARSING_FLAGS_VLAN
))
1636 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
,
1637 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
));
1640 netif_receive_skb(skb
);
1646 bd_cons
= NEXT_RX_IDX(bd_cons
);
1647 bd_prod
= NEXT_RX_IDX(bd_prod
);
1648 bd_prod_fw
= NEXT_RX_IDX(bd_prod_fw
);
1651 sw_comp_prod
= NEXT_RCQ_IDX(sw_comp_prod
);
1652 sw_comp_cons
= NEXT_RCQ_IDX(sw_comp_cons
);
1654 if (rx_pkt
== budget
)
1658 fp
->rx_bd_cons
= bd_cons
;
1659 fp
->rx_bd_prod
= bd_prod_fw
;
1660 fp
->rx_comp_cons
= sw_comp_cons
;
1661 fp
->rx_comp_prod
= sw_comp_prod
;
1663 /* Update producers */
1664 bnx2x_update_rx_prod(bp
, fp
, bd_prod_fw
, sw_comp_prod
,
1667 fp
->rx_pkt
+= rx_pkt
;
1673 static irqreturn_t
bnx2x_msix_fp_int(int irq
, void *fp_cookie
)
1675 struct bnx2x_fastpath
*fp
= fp_cookie
;
1676 struct bnx2x
*bp
= fp
->bp
;
1677 int index
= fp
->index
;
1679 /* Return here if interrupt is disabled */
1680 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
1681 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
1685 DP(BNX2X_MSG_FP
, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1687 bnx2x_ack_sb(bp
, fp
->sb_id
, USTORM_ID
, 0, IGU_INT_DISABLE
, 0);
1689 #ifdef BNX2X_STOP_ON_ERROR
1690 if (unlikely(bp
->panic
))
1694 prefetch(fp
->rx_cons_sb
);
1695 prefetch(fp
->tx_cons_sb
);
1696 prefetch(&fp
->status_blk
->c_status_block
.status_block_index
);
1697 prefetch(&fp
->status_blk
->u_status_block
.status_block_index
);
1699 napi_schedule(&bnx2x_fp(bp
, index
, napi
));
1704 static irqreturn_t
bnx2x_interrupt(int irq
, void *dev_instance
)
1706 struct bnx2x
*bp
= netdev_priv(dev_instance
);
1707 u16 status
= bnx2x_ack_int(bp
);
1710 /* Return here if interrupt is shared and it's not for us */
1711 if (unlikely(status
== 0)) {
1712 DP(NETIF_MSG_INTR
, "not our interrupt!\n");
1715 DP(NETIF_MSG_INTR
, "got an interrupt status 0x%x\n", status
);
1717 /* Return here if interrupt is disabled */
1718 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
1719 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
1723 #ifdef BNX2X_STOP_ON_ERROR
1724 if (unlikely(bp
->panic
))
1728 mask
= 0x2 << bp
->fp
[0].sb_id
;
1729 if (status
& mask
) {
1730 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
1732 prefetch(fp
->rx_cons_sb
);
1733 prefetch(fp
->tx_cons_sb
);
1734 prefetch(&fp
->status_blk
->c_status_block
.status_block_index
);
1735 prefetch(&fp
->status_blk
->u_status_block
.status_block_index
);
1737 napi_schedule(&bnx2x_fp(bp
, 0, napi
));
1743 if (unlikely(status
& 0x1)) {
1744 queue_delayed_work(bnx2x_wq
, &bp
->sp_task
, 0);
1752 DP(NETIF_MSG_INTR
, "got an unknown interrupt! (status %u)\n",
1758 /* end of fast path */
1760 static void bnx2x_stats_handle(struct bnx2x
*bp
, enum bnx2x_stats_event event
);
1765 * General service functions
1768 static int bnx2x_acquire_hw_lock(struct bnx2x
*bp
, u32 resource
)
1771 u32 resource_bit
= (1 << resource
);
1772 int func
= BP_FUNC(bp
);
1773 u32 hw_lock_control_reg
;
1776 /* Validating that the resource is within range */
1777 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1779 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1780 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1785 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1787 hw_lock_control_reg
=
1788 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1791 /* Validating that the resource is not already taken */
1792 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1793 if (lock_status
& resource_bit
) {
1794 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1795 lock_status
, resource_bit
);
1799 /* Try for 5 second every 5ms */
1800 for (cnt
= 0; cnt
< 1000; cnt
++) {
1801 /* Try to acquire the lock */
1802 REG_WR(bp
, hw_lock_control_reg
+ 4, resource_bit
);
1803 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1804 if (lock_status
& resource_bit
)
1809 DP(NETIF_MSG_HW
, "Timeout\n");
1813 static int bnx2x_release_hw_lock(struct bnx2x
*bp
, u32 resource
)
1816 u32 resource_bit
= (1 << resource
);
1817 int func
= BP_FUNC(bp
);
1818 u32 hw_lock_control_reg
;
1820 /* Validating that the resource is within range */
1821 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1823 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1824 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1829 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1831 hw_lock_control_reg
=
1832 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1835 /* Validating that the resource is currently taken */
1836 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1837 if (!(lock_status
& resource_bit
)) {
1838 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1839 lock_status
, resource_bit
);
1843 REG_WR(bp
, hw_lock_control_reg
, resource_bit
);
1847 /* HW Lock for shared dual port PHYs */
1848 static void bnx2x_acquire_phy_lock(struct bnx2x
*bp
)
1850 mutex_lock(&bp
->port
.phy_mutex
);
1852 if (bp
->port
.need_hw_lock
)
1853 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
1856 static void bnx2x_release_phy_lock(struct bnx2x
*bp
)
1858 if (bp
->port
.need_hw_lock
)
1859 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
1861 mutex_unlock(&bp
->port
.phy_mutex
);
1864 int bnx2x_get_gpio(struct bnx2x
*bp
, int gpio_num
, u8 port
)
1866 /* The GPIO should be swapped if swap register is set and active */
1867 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1868 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1869 int gpio_shift
= gpio_num
+
1870 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1871 u32 gpio_mask
= (1 << gpio_shift
);
1875 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1876 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1880 /* read GPIO value */
1881 gpio_reg
= REG_RD(bp
, MISC_REG_GPIO
);
1883 /* get the requested pin value */
1884 if ((gpio_reg
& gpio_mask
) == gpio_mask
)
1889 DP(NETIF_MSG_LINK
, "pin %d value 0x%x\n", gpio_num
, value
);
1894 int bnx2x_set_gpio(struct bnx2x
*bp
, int gpio_num
, u32 mode
, u8 port
)
1896 /* The GPIO should be swapped if swap register is set and active */
1897 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1898 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1899 int gpio_shift
= gpio_num
+
1900 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1901 u32 gpio_mask
= (1 << gpio_shift
);
1904 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1905 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1909 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1910 /* read GPIO and mask except the float bits */
1911 gpio_reg
= (REG_RD(bp
, MISC_REG_GPIO
) & MISC_REGISTERS_GPIO_FLOAT
);
1914 case MISC_REGISTERS_GPIO_OUTPUT_LOW
:
1915 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output low\n",
1916 gpio_num
, gpio_shift
);
1917 /* clear FLOAT and set CLR */
1918 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1919 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_CLR_POS
);
1922 case MISC_REGISTERS_GPIO_OUTPUT_HIGH
:
1923 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output high\n",
1924 gpio_num
, gpio_shift
);
1925 /* clear FLOAT and set SET */
1926 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1927 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_SET_POS
);
1930 case MISC_REGISTERS_GPIO_INPUT_HI_Z
:
1931 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> input\n",
1932 gpio_num
, gpio_shift
);
1934 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1941 REG_WR(bp
, MISC_REG_GPIO
, gpio_reg
);
1942 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1947 int bnx2x_set_gpio_int(struct bnx2x
*bp
, int gpio_num
, u32 mode
, u8 port
)
1949 /* The GPIO should be swapped if swap register is set and active */
1950 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1951 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1952 int gpio_shift
= gpio_num
+
1953 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1954 u32 gpio_mask
= (1 << gpio_shift
);
1957 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1958 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1962 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1964 gpio_reg
= REG_RD(bp
, MISC_REG_GPIO_INT
);
1967 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR
:
1968 DP(NETIF_MSG_LINK
, "Clear GPIO INT %d (shift %d) -> "
1969 "output low\n", gpio_num
, gpio_shift
);
1970 /* clear SET and set CLR */
1971 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_INT_SET_POS
);
1972 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_INT_CLR_POS
);
1975 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET
:
1976 DP(NETIF_MSG_LINK
, "Set GPIO INT %d (shift %d) -> "
1977 "output high\n", gpio_num
, gpio_shift
);
1978 /* clear CLR and set SET */
1979 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_INT_CLR_POS
);
1980 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_INT_SET_POS
);
1987 REG_WR(bp
, MISC_REG_GPIO_INT
, gpio_reg
);
1988 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1993 static int bnx2x_set_spio(struct bnx2x
*bp
, int spio_num
, u32 mode
)
1995 u32 spio_mask
= (1 << spio_num
);
1998 if ((spio_num
< MISC_REGISTERS_SPIO_4
) ||
1999 (spio_num
> MISC_REGISTERS_SPIO_7
)) {
2000 BNX2X_ERR("Invalid SPIO %d\n", spio_num
);
2004 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
2005 /* read SPIO and mask except the float bits */
2006 spio_reg
= (REG_RD(bp
, MISC_REG_SPIO
) & MISC_REGISTERS_SPIO_FLOAT
);
2009 case MISC_REGISTERS_SPIO_OUTPUT_LOW
:
2010 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output low\n", spio_num
);
2011 /* clear FLOAT and set CLR */
2012 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
2013 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_CLR_POS
);
2016 case MISC_REGISTERS_SPIO_OUTPUT_HIGH
:
2017 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output high\n", spio_num
);
2018 /* clear FLOAT and set SET */
2019 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
2020 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_SET_POS
);
2023 case MISC_REGISTERS_SPIO_INPUT_HI_Z
:
2024 DP(NETIF_MSG_LINK
, "Set SPIO %d -> input\n", spio_num
);
2026 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
2033 REG_WR(bp
, MISC_REG_SPIO
, spio_reg
);
2034 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
2039 static void bnx2x_calc_fc_adv(struct bnx2x
*bp
)
2041 switch (bp
->link_vars
.ieee_fc
&
2042 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK
) {
2043 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE
:
2044 bp
->port
.advertising
&= ~(ADVERTISED_Asym_Pause
|
2048 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH
:
2049 bp
->port
.advertising
|= (ADVERTISED_Asym_Pause
|
2053 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC
:
2054 bp
->port
.advertising
|= ADVERTISED_Asym_Pause
;
2058 bp
->port
.advertising
&= ~(ADVERTISED_Asym_Pause
|
2064 static void bnx2x_link_report(struct bnx2x
*bp
)
2066 if (bp
->link_vars
.link_up
) {
2067 if (bp
->state
== BNX2X_STATE_OPEN
)
2068 netif_carrier_on(bp
->dev
);
2069 printk(KERN_INFO PFX
"%s NIC Link is Up, ", bp
->dev
->name
);
2071 printk("%d Mbps ", bp
->link_vars
.line_speed
);
2073 if (bp
->link_vars
.duplex
== DUPLEX_FULL
)
2074 printk("full duplex");
2076 printk("half duplex");
2078 if (bp
->link_vars
.flow_ctrl
!= BNX2X_FLOW_CTRL_NONE
) {
2079 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
) {
2080 printk(", receive ");
2081 if (bp
->link_vars
.flow_ctrl
&
2083 printk("& transmit ");
2085 printk(", transmit ");
2087 printk("flow control ON");
2091 } else { /* link_down */
2092 netif_carrier_off(bp
->dev
);
2093 printk(KERN_ERR PFX
"%s NIC Link is Down\n", bp
->dev
->name
);
2097 static u8
bnx2x_initial_phy_init(struct bnx2x
*bp
, int load_mode
)
2099 if (!BP_NOMCP(bp
)) {
2102 /* Initialize link parameters structure variables */
2103 /* It is recommended to turn off RX FC for jumbo frames
2104 for better performance */
2106 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_BOTH
;
2107 else if (bp
->dev
->mtu
> 5000)
2108 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_TX
;
2110 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_BOTH
;
2112 bnx2x_acquire_phy_lock(bp
);
2114 if (load_mode
== LOAD_DIAG
)
2115 bp
->link_params
.loopback_mode
= LOOPBACK_XGXS_10
;
2117 rc
= bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
2119 bnx2x_release_phy_lock(bp
);
2121 bnx2x_calc_fc_adv(bp
);
2123 if (CHIP_REV_IS_SLOW(bp
) && bp
->link_vars
.link_up
) {
2124 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2125 bnx2x_link_report(bp
);
2130 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2134 static void bnx2x_link_set(struct bnx2x
*bp
)
2136 if (!BP_NOMCP(bp
)) {
2137 bnx2x_acquire_phy_lock(bp
);
2138 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
2139 bnx2x_release_phy_lock(bp
);
2141 bnx2x_calc_fc_adv(bp
);
2143 BNX2X_ERR("Bootcode is missing - can not set link\n");
2146 static void bnx2x__link_reset(struct bnx2x
*bp
)
2148 if (!BP_NOMCP(bp
)) {
2149 bnx2x_acquire_phy_lock(bp
);
2150 bnx2x_link_reset(&bp
->link_params
, &bp
->link_vars
, 1);
2151 bnx2x_release_phy_lock(bp
);
2153 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2156 static u8
bnx2x_link_test(struct bnx2x
*bp
)
2160 bnx2x_acquire_phy_lock(bp
);
2161 rc
= bnx2x_test_link(&bp
->link_params
, &bp
->link_vars
);
2162 bnx2x_release_phy_lock(bp
);
2167 static void bnx2x_init_port_minmax(struct bnx2x
*bp
)
2169 u32 r_param
= bp
->link_vars
.line_speed
/ 8;
2170 u32 fair_periodic_timeout_usec
;
2173 memset(&(bp
->cmng
.rs_vars
), 0,
2174 sizeof(struct rate_shaping_vars_per_port
));
2175 memset(&(bp
->cmng
.fair_vars
), 0, sizeof(struct fairness_vars_per_port
));
2177 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2178 bp
->cmng
.rs_vars
.rs_periodic_timeout
= RS_PERIODIC_TIMEOUT_USEC
/ 4;
2180 /* this is the threshold below which no timer arming will occur
2181 1.25 coefficient is for the threshold to be a little bigger
2182 than the real time, to compensate for timer in-accuracy */
2183 bp
->cmng
.rs_vars
.rs_threshold
=
2184 (RS_PERIODIC_TIMEOUT_USEC
* r_param
* 5) / 4;
2186 /* resolution of fairness timer */
2187 fair_periodic_timeout_usec
= QM_ARB_BYTES
/ r_param
;
2188 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2189 t_fair
= T_FAIR_COEF
/ bp
->link_vars
.line_speed
;
2191 /* this is the threshold below which we won't arm the timer anymore */
2192 bp
->cmng
.fair_vars
.fair_threshold
= QM_ARB_BYTES
;
2194 /* we multiply by 1e3/8 to get bytes/msec.
2195 We don't want the credits to pass a credit
2196 of the t_fair*FAIR_MEM (algorithm resolution) */
2197 bp
->cmng
.fair_vars
.upper_bound
= r_param
* t_fair
* FAIR_MEM
;
2198 /* since each tick is 4 usec */
2199 bp
->cmng
.fair_vars
.fairness_timeout
= fair_periodic_timeout_usec
/ 4;
2202 static void bnx2x_init_vn_minmax(struct bnx2x
*bp
, int func
)
2204 struct rate_shaping_vars_per_vn m_rs_vn
;
2205 struct fairness_vars_per_vn m_fair_vn
;
2206 u32 vn_cfg
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
2207 u16 vn_min_rate
, vn_max_rate
;
2210 /* If function is hidden - set min and max to zeroes */
2211 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
) {
2216 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
2217 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
2218 /* If fairness is enabled (not all min rates are zeroes) and
2219 if current min rate is zero - set it to 1.
2220 This is a requirement of the algorithm. */
2221 if (bp
->vn_weight_sum
&& (vn_min_rate
== 0))
2222 vn_min_rate
= DEF_MIN_RATE
;
2223 vn_max_rate
= ((vn_cfg
& FUNC_MF_CFG_MAX_BW_MASK
) >>
2224 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
2228 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2229 func
, vn_min_rate
, vn_max_rate
, bp
->vn_weight_sum
);
2231 memset(&m_rs_vn
, 0, sizeof(struct rate_shaping_vars_per_vn
));
2232 memset(&m_fair_vn
, 0, sizeof(struct fairness_vars_per_vn
));
2234 /* global vn counter - maximal Mbps for this vn */
2235 m_rs_vn
.vn_counter
.rate
= vn_max_rate
;
2237 /* quota - number of bytes transmitted in this period */
2238 m_rs_vn
.vn_counter
.quota
=
2239 (vn_max_rate
* RS_PERIODIC_TIMEOUT_USEC
) / 8;
2241 if (bp
->vn_weight_sum
) {
2242 /* credit for each period of the fairness algorithm:
2243 number of bytes in T_FAIR (the vn share the port rate).
2244 vn_weight_sum should not be larger than 10000, thus
2245 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2247 m_fair_vn
.vn_credit_delta
=
2248 max((u32
)(vn_min_rate
* (T_FAIR_COEF
/
2249 (8 * bp
->vn_weight_sum
))),
2250 (u32
)(bp
->cmng
.fair_vars
.fair_threshold
* 2));
2251 DP(NETIF_MSG_IFUP
, "m_fair_vn.vn_credit_delta=%d\n",
2252 m_fair_vn
.vn_credit_delta
);
2255 /* Store it to internal memory */
2256 for (i
= 0; i
< sizeof(struct rate_shaping_vars_per_vn
)/4; i
++)
2257 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2258 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func
) + i
* 4,
2259 ((u32
*)(&m_rs_vn
))[i
]);
2261 for (i
= 0; i
< sizeof(struct fairness_vars_per_vn
)/4; i
++)
2262 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2263 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func
) + i
* 4,
2264 ((u32
*)(&m_fair_vn
))[i
]);
2268 /* This function is called upon link interrupt */
2269 static void bnx2x_link_attn(struct bnx2x
*bp
)
2271 /* Make sure that we are synced with the current statistics */
2272 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2274 bnx2x_link_update(&bp
->link_params
, &bp
->link_vars
);
2276 if (bp
->link_vars
.link_up
) {
2278 /* dropless flow control */
2279 if (CHIP_IS_E1H(bp
)) {
2280 int port
= BP_PORT(bp
);
2281 u32 pause_enabled
= 0;
2283 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
)
2286 REG_WR(bp
, BAR_USTRORM_INTMEM
+
2287 USTORM_PAUSE_ENABLED_OFFSET(port
),
2291 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
2292 struct host_port_stats
*pstats
;
2294 pstats
= bnx2x_sp(bp
, port_stats
);
2295 /* reset old bmac stats */
2296 memset(&(pstats
->mac_stx
[0]), 0,
2297 sizeof(struct mac_stx
));
2299 if ((bp
->state
== BNX2X_STATE_OPEN
) ||
2300 (bp
->state
== BNX2X_STATE_DISABLED
))
2301 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2304 /* indicate link status */
2305 bnx2x_link_report(bp
);
2308 int port
= BP_PORT(bp
);
2312 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
2313 if (vn
== BP_E1HVN(bp
))
2316 func
= ((vn
<< 1) | port
);
2318 /* Set the attention towards other drivers
2320 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_0
+
2321 (LINK_SYNC_ATTENTION_BIT_FUNC_0
+ func
)*4, 1);
2324 if (bp
->link_vars
.link_up
) {
2327 /* Init rate shaping and fairness contexts */
2328 bnx2x_init_port_minmax(bp
);
2330 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
2331 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
);
2333 /* Store it to internal memory */
2335 i
< sizeof(struct cmng_struct_per_port
) / 4; i
++)
2336 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2337 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
) + i
*4,
2338 ((u32
*)(&bp
->cmng
))[i
]);
2343 static void bnx2x__link_status_update(struct bnx2x
*bp
)
2345 if (bp
->state
!= BNX2X_STATE_OPEN
)
2348 bnx2x_link_status_update(&bp
->link_params
, &bp
->link_vars
);
2350 if (bp
->link_vars
.link_up
)
2351 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2353 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2355 /* indicate link status */
2356 bnx2x_link_report(bp
);
2359 static void bnx2x_pmf_update(struct bnx2x
*bp
)
2361 int port
= BP_PORT(bp
);
2365 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
2367 /* enable nig attention */
2368 val
= (0xff0f | (1 << (BP_E1HVN(bp
) + 4)));
2369 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
2370 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
2372 bnx2x_stats_handle(bp
, STATS_EVENT_PMF
);
2380 * General service functions
2383 /* the slow path queue is odd since completions arrive on the fastpath ring */
2384 static int bnx2x_sp_post(struct bnx2x
*bp
, int command
, int cid
,
2385 u32 data_hi
, u32 data_lo
, int common
)
2387 int func
= BP_FUNC(bp
);
2389 DP(BNX2X_MSG_SP
/*NETIF_MSG_TIMER*/,
2390 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2391 (u32
)U64_HI(bp
->spq_mapping
), (u32
)(U64_LO(bp
->spq_mapping
) +
2392 (void *)bp
->spq_prod_bd
- (void *)bp
->spq
), command
,
2393 HW_CID(bp
, cid
), data_hi
, data_lo
, bp
->spq_left
);
2395 #ifdef BNX2X_STOP_ON_ERROR
2396 if (unlikely(bp
->panic
))
2400 spin_lock_bh(&bp
->spq_lock
);
2402 if (!bp
->spq_left
) {
2403 BNX2X_ERR("BUG! SPQ ring full!\n");
2404 spin_unlock_bh(&bp
->spq_lock
);
2409 /* CID needs port number to be encoded int it */
2410 bp
->spq_prod_bd
->hdr
.conn_and_cmd_data
=
2411 cpu_to_le32(((command
<< SPE_HDR_CMD_ID_SHIFT
) |
2413 bp
->spq_prod_bd
->hdr
.type
= cpu_to_le16(ETH_CONNECTION_TYPE
);
2415 bp
->spq_prod_bd
->hdr
.type
|=
2416 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT
));
2418 bp
->spq_prod_bd
->data
.mac_config_addr
.hi
= cpu_to_le32(data_hi
);
2419 bp
->spq_prod_bd
->data
.mac_config_addr
.lo
= cpu_to_le32(data_lo
);
2423 if (bp
->spq_prod_bd
== bp
->spq_last_bd
) {
2424 bp
->spq_prod_bd
= bp
->spq
;
2425 bp
->spq_prod_idx
= 0;
2426 DP(NETIF_MSG_TIMER
, "end of spq\n");
2433 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_SPQ_PROD_OFFSET(func
),
2436 spin_unlock_bh(&bp
->spq_lock
);
2440 /* acquire split MCP access lock register */
2441 static int bnx2x_acquire_alr(struct bnx2x
*bp
)
2448 for (j
= 0; j
< i
*10; j
++) {
2450 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
2451 val
= REG_RD(bp
, GRCBASE_MCP
+ 0x9c);
2452 if (val
& (1L << 31))
2457 if (!(val
& (1L << 31))) {
2458 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2465 /* release split MCP access lock register */
2466 static void bnx2x_release_alr(struct bnx2x
*bp
)
2470 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
2473 static inline u16
bnx2x_update_dsb_idx(struct bnx2x
*bp
)
2475 struct host_def_status_block
*def_sb
= bp
->def_status_blk
;
2478 barrier(); /* status block is written to by the chip */
2479 if (bp
->def_att_idx
!= def_sb
->atten_status_block
.attn_bits_index
) {
2480 bp
->def_att_idx
= def_sb
->atten_status_block
.attn_bits_index
;
2483 if (bp
->def_c_idx
!= def_sb
->c_def_status_block
.status_block_index
) {
2484 bp
->def_c_idx
= def_sb
->c_def_status_block
.status_block_index
;
2487 if (bp
->def_u_idx
!= def_sb
->u_def_status_block
.status_block_index
) {
2488 bp
->def_u_idx
= def_sb
->u_def_status_block
.status_block_index
;
2491 if (bp
->def_x_idx
!= def_sb
->x_def_status_block
.status_block_index
) {
2492 bp
->def_x_idx
= def_sb
->x_def_status_block
.status_block_index
;
2495 if (bp
->def_t_idx
!= def_sb
->t_def_status_block
.status_block_index
) {
2496 bp
->def_t_idx
= def_sb
->t_def_status_block
.status_block_index
;
2503 * slow path service functions
2506 static void bnx2x_attn_int_asserted(struct bnx2x
*bp
, u32 asserted
)
2508 int port
= BP_PORT(bp
);
2509 u32 hc_addr
= (HC_REG_COMMAND_REG
+ port
*32 +
2510 COMMAND_REG_ATTN_BITS_SET
);
2511 u32 aeu_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
2512 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
2513 u32 nig_int_mask_addr
= port
? NIG_REG_MASK_INTERRUPT_PORT1
:
2514 NIG_REG_MASK_INTERRUPT_PORT0
;
2518 if (bp
->attn_state
& asserted
)
2519 BNX2X_ERR("IGU ERROR\n");
2521 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2522 aeu_mask
= REG_RD(bp
, aeu_addr
);
2524 DP(NETIF_MSG_HW
, "aeu_mask %x newly asserted %x\n",
2525 aeu_mask
, asserted
);
2526 aeu_mask
&= ~(asserted
& 0xff);
2527 DP(NETIF_MSG_HW
, "new mask %x\n", aeu_mask
);
2529 REG_WR(bp
, aeu_addr
, aeu_mask
);
2530 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2532 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
2533 bp
->attn_state
|= asserted
;
2534 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
2536 if (asserted
& ATTN_HARD_WIRED_MASK
) {
2537 if (asserted
& ATTN_NIG_FOR_FUNC
) {
2539 bnx2x_acquire_phy_lock(bp
);
2541 /* save nig interrupt mask */
2542 nig_mask
= REG_RD(bp
, nig_int_mask_addr
);
2543 REG_WR(bp
, nig_int_mask_addr
, 0);
2545 bnx2x_link_attn(bp
);
2547 /* handle unicore attn? */
2549 if (asserted
& ATTN_SW_TIMER_4_FUNC
)
2550 DP(NETIF_MSG_HW
, "ATTN_SW_TIMER_4_FUNC!\n");
2552 if (asserted
& GPIO_2_FUNC
)
2553 DP(NETIF_MSG_HW
, "GPIO_2_FUNC!\n");
2555 if (asserted
& GPIO_3_FUNC
)
2556 DP(NETIF_MSG_HW
, "GPIO_3_FUNC!\n");
2558 if (asserted
& GPIO_4_FUNC
)
2559 DP(NETIF_MSG_HW
, "GPIO_4_FUNC!\n");
2562 if (asserted
& ATTN_GENERAL_ATTN_1
) {
2563 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_1!\n");
2564 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_1
, 0x0);
2566 if (asserted
& ATTN_GENERAL_ATTN_2
) {
2567 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_2!\n");
2568 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_2
, 0x0);
2570 if (asserted
& ATTN_GENERAL_ATTN_3
) {
2571 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_3!\n");
2572 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_3
, 0x0);
2575 if (asserted
& ATTN_GENERAL_ATTN_4
) {
2576 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_4!\n");
2577 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_4
, 0x0);
2579 if (asserted
& ATTN_GENERAL_ATTN_5
) {
2580 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_5!\n");
2581 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_5
, 0x0);
2583 if (asserted
& ATTN_GENERAL_ATTN_6
) {
2584 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_6!\n");
2585 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_6
, 0x0);
2589 } /* if hardwired */
2591 DP(NETIF_MSG_HW
, "about to mask 0x%08x at HC addr 0x%x\n",
2593 REG_WR(bp
, hc_addr
, asserted
);
2595 /* now set back the mask */
2596 if (asserted
& ATTN_NIG_FOR_FUNC
) {
2597 REG_WR(bp
, nig_int_mask_addr
, nig_mask
);
2598 bnx2x_release_phy_lock(bp
);
2602 static inline void bnx2x_attn_int_deasserted0(struct bnx2x
*bp
, u32 attn
)
2604 int port
= BP_PORT(bp
);
2608 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
2609 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
2611 if (attn
& AEU_INPUTS_ATTN_BITS_SPIO5
) {
2613 val
= REG_RD(bp
, reg_offset
);
2614 val
&= ~AEU_INPUTS_ATTN_BITS_SPIO5
;
2615 REG_WR(bp
, reg_offset
, val
);
2617 BNX2X_ERR("SPIO5 hw attention\n");
2619 switch (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
)) {
2620 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
2621 /* Fan failure attention */
2623 /* The PHY reset is controlled by GPIO 1 */
2624 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_1
,
2625 MISC_REGISTERS_GPIO_OUTPUT_LOW
, port
);
2626 /* Low power mode is controlled by GPIO 2 */
2627 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_2
,
2628 MISC_REGISTERS_GPIO_OUTPUT_LOW
, port
);
2629 /* mark the failure */
2630 bp
->link_params
.ext_phy_config
&=
2631 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
2632 bp
->link_params
.ext_phy_config
|=
2633 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
;
2635 dev_info
.port_hw_config
[port
].
2636 external_phy_config
,
2637 bp
->link_params
.ext_phy_config
);
2638 /* log the failure */
2639 printk(KERN_ERR PFX
"Fan Failure on Network"
2640 " Controller %s has caused the driver to"
2641 " shutdown the card to prevent permanent"
2642 " damage. Please contact Dell Support for"
2643 " assistance\n", bp
->dev
->name
);
2651 if (attn
& (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0
|
2652 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1
)) {
2653 bnx2x_acquire_phy_lock(bp
);
2654 bnx2x_handle_module_detect_int(&bp
->link_params
);
2655 bnx2x_release_phy_lock(bp
);
2658 if (attn
& HW_INTERRUT_ASSERT_SET_0
) {
2660 val
= REG_RD(bp
, reg_offset
);
2661 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_0
);
2662 REG_WR(bp
, reg_offset
, val
);
2664 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2665 (attn
& HW_INTERRUT_ASSERT_SET_0
));
2670 static inline void bnx2x_attn_int_deasserted1(struct bnx2x
*bp
, u32 attn
)
2674 if (attn
& AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT
) {
2676 val
= REG_RD(bp
, DORQ_REG_DORQ_INT_STS_CLR
);
2677 BNX2X_ERR("DB hw attention 0x%x\n", val
);
2678 /* DORQ discard attention */
2680 BNX2X_ERR("FATAL error from DORQ\n");
2683 if (attn
& HW_INTERRUT_ASSERT_SET_1
) {
2685 int port
= BP_PORT(bp
);
2688 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1
:
2689 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1
);
2691 val
= REG_RD(bp
, reg_offset
);
2692 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_1
);
2693 REG_WR(bp
, reg_offset
, val
);
2695 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2696 (attn
& HW_INTERRUT_ASSERT_SET_1
));
2701 static inline void bnx2x_attn_int_deasserted2(struct bnx2x
*bp
, u32 attn
)
2705 if (attn
& AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT
) {
2707 val
= REG_RD(bp
, CFC_REG_CFC_INT_STS_CLR
);
2708 BNX2X_ERR("CFC hw attention 0x%x\n", val
);
2709 /* CFC error attention */
2711 BNX2X_ERR("FATAL error from CFC\n");
2714 if (attn
& AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT
) {
2716 val
= REG_RD(bp
, PXP_REG_PXP_INT_STS_CLR_0
);
2717 BNX2X_ERR("PXP hw attention 0x%x\n", val
);
2718 /* RQ_USDMDP_FIFO_OVERFLOW */
2720 BNX2X_ERR("FATAL error from PXP\n");
2723 if (attn
& HW_INTERRUT_ASSERT_SET_2
) {
2725 int port
= BP_PORT(bp
);
2728 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2
:
2729 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2
);
2731 val
= REG_RD(bp
, reg_offset
);
2732 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_2
);
2733 REG_WR(bp
, reg_offset
, val
);
2735 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2736 (attn
& HW_INTERRUT_ASSERT_SET_2
));
2741 static inline void bnx2x_attn_int_deasserted3(struct bnx2x
*bp
, u32 attn
)
2745 if (attn
& EVEREST_GEN_ATTN_IN_USE_MASK
) {
2747 if (attn
& BNX2X_PMF_LINK_ASSERT
) {
2748 int func
= BP_FUNC(bp
);
2750 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
2751 bnx2x__link_status_update(bp
);
2752 if (SHMEM_RD(bp
, func_mb
[func
].drv_status
) &
2754 bnx2x_pmf_update(bp
);
2756 } else if (attn
& BNX2X_MC_ASSERT_BITS
) {
2758 BNX2X_ERR("MC assert!\n");
2759 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_10
, 0);
2760 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_9
, 0);
2761 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_8
, 0);
2762 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_7
, 0);
2765 } else if (attn
& BNX2X_MCP_ASSERT
) {
2767 BNX2X_ERR("MCP assert!\n");
2768 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_11
, 0);
2772 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn
);
2775 if (attn
& EVEREST_LATCHED_ATTN_IN_USE_MASK
) {
2776 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn
);
2777 if (attn
& BNX2X_GRC_TIMEOUT
) {
2778 val
= CHIP_IS_E1H(bp
) ?
2779 REG_RD(bp
, MISC_REG_GRC_TIMEOUT_ATTN
) : 0;
2780 BNX2X_ERR("GRC time-out 0x%08x\n", val
);
2782 if (attn
& BNX2X_GRC_RSV
) {
2783 val
= CHIP_IS_E1H(bp
) ?
2784 REG_RD(bp
, MISC_REG_GRC_RSV_ATTN
) : 0;
2785 BNX2X_ERR("GRC reserved 0x%08x\n", val
);
2787 REG_WR(bp
, MISC_REG_AEU_CLR_LATCH_SIGNAL
, 0x7ff);
2791 static void bnx2x_attn_int_deasserted(struct bnx2x
*bp
, u32 deasserted
)
2793 struct attn_route attn
;
2794 struct attn_route group_mask
;
2795 int port
= BP_PORT(bp
);
2801 /* need to take HW lock because MCP or other port might also
2802 try to handle this event */
2803 bnx2x_acquire_alr(bp
);
2805 attn
.sig
[0] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ port
*4);
2806 attn
.sig
[1] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0
+ port
*4);
2807 attn
.sig
[2] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0
+ port
*4);
2808 attn
.sig
[3] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0
+ port
*4);
2809 DP(NETIF_MSG_HW
, "attn: %08x %08x %08x %08x\n",
2810 attn
.sig
[0], attn
.sig
[1], attn
.sig
[2], attn
.sig
[3]);
2812 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
2813 if (deasserted
& (1 << index
)) {
2814 group_mask
= bp
->attn_group
[index
];
2816 DP(NETIF_MSG_HW
, "group[%d]: %08x %08x %08x %08x\n",
2817 index
, group_mask
.sig
[0], group_mask
.sig
[1],
2818 group_mask
.sig
[2], group_mask
.sig
[3]);
2820 bnx2x_attn_int_deasserted3(bp
,
2821 attn
.sig
[3] & group_mask
.sig
[3]);
2822 bnx2x_attn_int_deasserted1(bp
,
2823 attn
.sig
[1] & group_mask
.sig
[1]);
2824 bnx2x_attn_int_deasserted2(bp
,
2825 attn
.sig
[2] & group_mask
.sig
[2]);
2826 bnx2x_attn_int_deasserted0(bp
,
2827 attn
.sig
[0] & group_mask
.sig
[0]);
2829 if ((attn
.sig
[0] & group_mask
.sig
[0] &
2830 HW_PRTY_ASSERT_SET_0
) ||
2831 (attn
.sig
[1] & group_mask
.sig
[1] &
2832 HW_PRTY_ASSERT_SET_1
) ||
2833 (attn
.sig
[2] & group_mask
.sig
[2] &
2834 HW_PRTY_ASSERT_SET_2
))
2835 BNX2X_ERR("FATAL HW block parity attention\n");
2839 bnx2x_release_alr(bp
);
2841 reg_addr
= (HC_REG_COMMAND_REG
+ port
*32 + COMMAND_REG_ATTN_BITS_CLR
);
2844 DP(NETIF_MSG_HW
, "about to mask 0x%08x at HC addr 0x%x\n",
2846 REG_WR(bp
, reg_addr
, val
);
2848 if (~bp
->attn_state
& deasserted
)
2849 BNX2X_ERR("IGU ERROR\n");
2851 reg_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
2852 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
2854 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2855 aeu_mask
= REG_RD(bp
, reg_addr
);
2857 DP(NETIF_MSG_HW
, "aeu_mask %x newly deasserted %x\n",
2858 aeu_mask
, deasserted
);
2859 aeu_mask
|= (deasserted
& 0xff);
2860 DP(NETIF_MSG_HW
, "new mask %x\n", aeu_mask
);
2862 REG_WR(bp
, reg_addr
, aeu_mask
);
2863 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2865 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
2866 bp
->attn_state
&= ~deasserted
;
2867 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
2870 static void bnx2x_attn_int(struct bnx2x
*bp
)
2872 /* read local copy of bits */
2873 u32 attn_bits
= le32_to_cpu(bp
->def_status_blk
->atten_status_block
.
2875 u32 attn_ack
= le32_to_cpu(bp
->def_status_blk
->atten_status_block
.
2877 u32 attn_state
= bp
->attn_state
;
2879 /* look for changed bits */
2880 u32 asserted
= attn_bits
& ~attn_ack
& ~attn_state
;
2881 u32 deasserted
= ~attn_bits
& attn_ack
& attn_state
;
2884 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2885 attn_bits
, attn_ack
, asserted
, deasserted
);
2887 if (~(attn_bits
^ attn_ack
) & (attn_bits
^ attn_state
))
2888 BNX2X_ERR("BAD attention state\n");
2890 /* handle bits that were raised */
2892 bnx2x_attn_int_asserted(bp
, asserted
);
2895 bnx2x_attn_int_deasserted(bp
, deasserted
);
2898 static void bnx2x_sp_task(struct work_struct
*work
)
2900 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, sp_task
.work
);
2904 /* Return here if interrupt is disabled */
2905 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
2906 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
2910 status
= bnx2x_update_dsb_idx(bp
);
2911 /* if (status == 0) */
2912 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2914 DP(NETIF_MSG_INTR
, "got a slowpath interrupt (updated %x)\n", status
);
2920 bnx2x_ack_sb(bp
, DEF_SB_ID
, ATTENTION_ID
, le16_to_cpu(bp
->def_att_idx
),
2922 bnx2x_ack_sb(bp
, DEF_SB_ID
, USTORM_ID
, le16_to_cpu(bp
->def_u_idx
),
2924 bnx2x_ack_sb(bp
, DEF_SB_ID
, CSTORM_ID
, le16_to_cpu(bp
->def_c_idx
),
2926 bnx2x_ack_sb(bp
, DEF_SB_ID
, XSTORM_ID
, le16_to_cpu(bp
->def_x_idx
),
2928 bnx2x_ack_sb(bp
, DEF_SB_ID
, TSTORM_ID
, le16_to_cpu(bp
->def_t_idx
),
2933 static irqreturn_t
bnx2x_msix_sp_int(int irq
, void *dev_instance
)
2935 struct net_device
*dev
= dev_instance
;
2936 struct bnx2x
*bp
= netdev_priv(dev
);
2938 /* Return here if interrupt is disabled */
2939 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
2940 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
2944 bnx2x_ack_sb(bp
, DEF_SB_ID
, TSTORM_ID
, 0, IGU_INT_DISABLE
, 0);
2946 #ifdef BNX2X_STOP_ON_ERROR
2947 if (unlikely(bp
->panic
))
2951 queue_delayed_work(bnx2x_wq
, &bp
->sp_task
, 0);
2956 /* end of slow path */
2960 /****************************************************************************
2962 ****************************************************************************/
2964 /* sum[hi:lo] += add[hi:lo] */
2965 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2968 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2971 /* difference = minuend - subtrahend */
2972 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2974 if (m_lo < s_lo) { \
2976 d_hi = m_hi - s_hi; \
2978 /* we can 'loan' 1 */ \
2980 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2982 /* m_hi <= s_hi */ \
2987 /* m_lo >= s_lo */ \
2988 if (m_hi < s_hi) { \
2992 /* m_hi >= s_hi */ \
2993 d_hi = m_hi - s_hi; \
2994 d_lo = m_lo - s_lo; \
2999 #define UPDATE_STAT64(s, t) \
3001 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3002 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3003 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3004 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3005 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3006 pstats->mac_stx[1].t##_lo, diff.lo); \
3009 #define UPDATE_STAT64_NIG(s, t) \
3011 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3012 diff.lo, new->s##_lo, old->s##_lo); \
3013 ADD_64(estats->t##_hi, diff.hi, \
3014 estats->t##_lo, diff.lo); \
3017 /* sum[hi:lo] += add */
3018 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3021 s_hi += (s_lo < a) ? 1 : 0; \
3024 #define UPDATE_EXTEND_STAT(s) \
3026 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3027 pstats->mac_stx[1].s##_lo, \
3031 #define UPDATE_EXTEND_TSTAT(s, t) \
3033 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3034 old_tclient->s = tclient->s; \
3035 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3038 #define UPDATE_EXTEND_USTAT(s, t) \
3040 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3041 old_uclient->s = uclient->s; \
3042 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3045 #define UPDATE_EXTEND_XSTAT(s, t) \
3047 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3048 old_xclient->s = xclient->s; \
3049 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3052 /* minuend -= subtrahend */
3053 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3055 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3058 /* minuend[hi:lo] -= subtrahend */
3059 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3061 SUB_64(m_hi, 0, m_lo, s); \
3064 #define SUB_EXTEND_USTAT(s, t) \
3066 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3067 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3071 * General service functions
3074 static inline long bnx2x_hilo(u32
*hiref
)
3076 u32 lo
= *(hiref
+ 1);
3077 #if (BITS_PER_LONG == 64)
3080 return HILO_U64(hi
, lo
);
3087 * Init service functions
3090 static void bnx2x_storm_stats_post(struct bnx2x
*bp
)
3092 if (!bp
->stats_pending
) {
3093 struct eth_query_ramrod_data ramrod_data
= {0};
3096 ramrod_data
.drv_counter
= bp
->stats_counter
++;
3097 ramrod_data
.collect_port
= bp
->port
.pmf
? 1 : 0;
3098 for_each_queue(bp
, i
)
3099 ramrod_data
.ctr_id_vector
|= (1 << bp
->fp
[i
].cl_id
);
3101 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_STAT_QUERY
, 0,
3102 ((u32
*)&ramrod_data
)[1],
3103 ((u32
*)&ramrod_data
)[0], 0);
3105 /* stats ramrod has it's own slot on the spq */
3107 bp
->stats_pending
= 1;
3112 static void bnx2x_stats_init(struct bnx2x
*bp
)
3114 int port
= BP_PORT(bp
);
3117 bp
->stats_pending
= 0;
3118 bp
->executer_idx
= 0;
3119 bp
->stats_counter
= 0;
3123 bp
->port
.port_stx
= SHMEM_RD(bp
, port_mb
[port
].port_stx
);
3125 bp
->port
.port_stx
= 0;
3126 DP(BNX2X_MSG_STATS
, "port_stx 0x%x\n", bp
->port
.port_stx
);
3128 memset(&(bp
->port
.old_nig_stats
), 0, sizeof(struct nig_stats
));
3129 bp
->port
.old_nig_stats
.brb_discard
=
3130 REG_RD(bp
, NIG_REG_STAT0_BRB_DISCARD
+ port
*0x38);
3131 bp
->port
.old_nig_stats
.brb_truncate
=
3132 REG_RD(bp
, NIG_REG_STAT0_BRB_TRUNCATE
+ port
*0x38);
3133 REG_RD_DMAE(bp
, NIG_REG_STAT0_EGRESS_MAC_PKT0
+ port
*0x50,
3134 &(bp
->port
.old_nig_stats
.egress_mac_pkt0_lo
), 2);
3135 REG_RD_DMAE(bp
, NIG_REG_STAT0_EGRESS_MAC_PKT1
+ port
*0x50,
3136 &(bp
->port
.old_nig_stats
.egress_mac_pkt1_lo
), 2);
3138 /* function stats */
3139 for_each_queue(bp
, i
) {
3140 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
3142 memset(&fp
->old_tclient
, 0,
3143 sizeof(struct tstorm_per_client_stats
));
3144 memset(&fp
->old_uclient
, 0,
3145 sizeof(struct ustorm_per_client_stats
));
3146 memset(&fp
->old_xclient
, 0,
3147 sizeof(struct xstorm_per_client_stats
));
3148 memset(&fp
->eth_q_stats
, 0, sizeof(struct bnx2x_eth_q_stats
));
3151 memset(&bp
->dev
->stats
, 0, sizeof(struct net_device_stats
));
3152 memset(&bp
->eth_stats
, 0, sizeof(struct bnx2x_eth_stats
));
3154 bp
->stats_state
= STATS_STATE_DISABLED
;
3155 if (IS_E1HMF(bp
) && bp
->port
.pmf
&& bp
->port
.port_stx
)
3156 bnx2x_stats_handle(bp
, STATS_EVENT_PMF
);
3159 static void bnx2x_hw_stats_post(struct bnx2x
*bp
)
3161 struct dmae_command
*dmae
= &bp
->stats_dmae
;
3162 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3164 *stats_comp
= DMAE_COMP_VAL
;
3165 if (CHIP_REV_IS_SLOW(bp
))
3169 if (bp
->executer_idx
) {
3170 int loader_idx
= PMF_DMAE_C(bp
);
3172 memset(dmae
, 0, sizeof(struct dmae_command
));
3174 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3175 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3176 DMAE_CMD_DST_RESET
|
3178 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3180 DMAE_CMD_ENDIANITY_DW_SWAP
|
3182 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
:
3184 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3185 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, dmae
[0]));
3186 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, dmae
[0]));
3187 dmae
->dst_addr_lo
= (DMAE_REG_CMD_MEM
+
3188 sizeof(struct dmae_command
) *
3189 (loader_idx
+ 1)) >> 2;
3190 dmae
->dst_addr_hi
= 0;
3191 dmae
->len
= sizeof(struct dmae_command
) >> 2;
3194 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
+ 1] >> 2;
3195 dmae
->comp_addr_hi
= 0;
3199 bnx2x_post_dmae(bp
, dmae
, loader_idx
);
3201 } else if (bp
->func_stx
) {
3203 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
3207 static int bnx2x_stats_comp(struct bnx2x
*bp
)
3209 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3213 while (*stats_comp
!= DMAE_COMP_VAL
) {
3215 BNX2X_ERR("timeout waiting for stats finished\n");
3225 * Statistics service functions
3228 static void bnx2x_stats_pmf_update(struct bnx2x
*bp
)
3230 struct dmae_command
*dmae
;
3232 int loader_idx
= PMF_DMAE_C(bp
);
3233 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3236 if (!IS_E1HMF(bp
) || !bp
->port
.pmf
|| !bp
->port
.port_stx
) {
3237 BNX2X_ERR("BUG!\n");
3241 bp
->executer_idx
= 0;
3243 opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3245 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3247 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3249 DMAE_CMD_ENDIANITY_DW_SWAP
|
3251 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3252 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3254 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3255 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_GRC
);
3256 dmae
->src_addr_lo
= bp
->port
.port_stx
>> 2;
3257 dmae
->src_addr_hi
= 0;
3258 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
3259 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
3260 dmae
->len
= DMAE_LEN32_RD_MAX
;
3261 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3262 dmae
->comp_addr_hi
= 0;
3265 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3266 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
3267 dmae
->src_addr_lo
= (bp
->port
.port_stx
>> 2) + DMAE_LEN32_RD_MAX
;
3268 dmae
->src_addr_hi
= 0;
3269 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
) +
3270 DMAE_LEN32_RD_MAX
* 4);
3271 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
) +
3272 DMAE_LEN32_RD_MAX
* 4);
3273 dmae
->len
= (sizeof(struct host_port_stats
) >> 2) - DMAE_LEN32_RD_MAX
;
3274 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3275 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3276 dmae
->comp_val
= DMAE_COMP_VAL
;
3279 bnx2x_hw_stats_post(bp
);
3280 bnx2x_stats_comp(bp
);
3283 static void bnx2x_port_stats_init(struct bnx2x
*bp
)
3285 struct dmae_command
*dmae
;
3286 int port
= BP_PORT(bp
);
3287 int vn
= BP_E1HVN(bp
);
3289 int loader_idx
= PMF_DMAE_C(bp
);
3291 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3294 if (!bp
->link_vars
.link_up
|| !bp
->port
.pmf
) {
3295 BNX2X_ERR("BUG!\n");
3299 bp
->executer_idx
= 0;
3302 opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3303 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3304 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3306 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3308 DMAE_CMD_ENDIANITY_DW_SWAP
|
3310 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3311 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3313 if (bp
->port
.port_stx
) {
3315 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3316 dmae
->opcode
= opcode
;
3317 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
3318 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
3319 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
3320 dmae
->dst_addr_hi
= 0;
3321 dmae
->len
= sizeof(struct host_port_stats
) >> 2;
3322 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3323 dmae
->comp_addr_hi
= 0;
3329 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3330 dmae
->opcode
= opcode
;
3331 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
3332 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
3333 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
3334 dmae
->dst_addr_hi
= 0;
3335 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
3336 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3337 dmae
->comp_addr_hi
= 0;
3342 opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3343 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3344 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3346 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3348 DMAE_CMD_ENDIANITY_DW_SWAP
|
3350 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3351 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3353 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
3355 mac_addr
= (port
? NIG_REG_INGRESS_BMAC1_MEM
:
3356 NIG_REG_INGRESS_BMAC0_MEM
);
3358 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3359 BIGMAC_REGISTER_TX_STAT_GTBYT */
3360 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3361 dmae
->opcode
= opcode
;
3362 dmae
->src_addr_lo
= (mac_addr
+
3363 BIGMAC_REGISTER_TX_STAT_GTPKT
) >> 2;
3364 dmae
->src_addr_hi
= 0;
3365 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
));
3366 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
));
3367 dmae
->len
= (8 + BIGMAC_REGISTER_TX_STAT_GTBYT
-
3368 BIGMAC_REGISTER_TX_STAT_GTPKT
) >> 2;
3369 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3370 dmae
->comp_addr_hi
= 0;
3373 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3374 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3375 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3376 dmae
->opcode
= opcode
;
3377 dmae
->src_addr_lo
= (mac_addr
+
3378 BIGMAC_REGISTER_RX_STAT_GR64
) >> 2;
3379 dmae
->src_addr_hi
= 0;
3380 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3381 offsetof(struct bmac_stats
, rx_stat_gr64_lo
));
3382 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3383 offsetof(struct bmac_stats
, rx_stat_gr64_lo
));
3384 dmae
->len
= (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ
-
3385 BIGMAC_REGISTER_RX_STAT_GR64
) >> 2;
3386 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3387 dmae
->comp_addr_hi
= 0;
3390 } else if (bp
->link_vars
.mac_type
== MAC_TYPE_EMAC
) {
3392 mac_addr
= (port
? GRCBASE_EMAC1
: GRCBASE_EMAC0
);
3394 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3395 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3396 dmae
->opcode
= opcode
;
3397 dmae
->src_addr_lo
= (mac_addr
+
3398 EMAC_REG_EMAC_RX_STAT_AC
) >> 2;
3399 dmae
->src_addr_hi
= 0;
3400 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
));
3401 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
));
3402 dmae
->len
= EMAC_REG_EMAC_RX_STAT_AC_COUNT
;
3403 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3404 dmae
->comp_addr_hi
= 0;
3407 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3408 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3409 dmae
->opcode
= opcode
;
3410 dmae
->src_addr_lo
= (mac_addr
+
3411 EMAC_REG_EMAC_RX_STAT_AC_28
) >> 2;
3412 dmae
->src_addr_hi
= 0;
3413 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3414 offsetof(struct emac_stats
, rx_stat_falsecarriererrors
));
3415 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3416 offsetof(struct emac_stats
, rx_stat_falsecarriererrors
));
3418 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3419 dmae
->comp_addr_hi
= 0;
3422 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3423 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3424 dmae
->opcode
= opcode
;
3425 dmae
->src_addr_lo
= (mac_addr
+
3426 EMAC_REG_EMAC_TX_STAT_AC
) >> 2;
3427 dmae
->src_addr_hi
= 0;
3428 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3429 offsetof(struct emac_stats
, tx_stat_ifhcoutoctets
));
3430 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3431 offsetof(struct emac_stats
, tx_stat_ifhcoutoctets
));
3432 dmae
->len
= EMAC_REG_EMAC_TX_STAT_AC_COUNT
;
3433 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3434 dmae
->comp_addr_hi
= 0;
3439 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3440 dmae
->opcode
= opcode
;
3441 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_BRB_DISCARD
:
3442 NIG_REG_STAT0_BRB_DISCARD
) >> 2;
3443 dmae
->src_addr_hi
= 0;
3444 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
));
3445 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
));
3446 dmae
->len
= (sizeof(struct nig_stats
) - 4*sizeof(u32
)) >> 2;
3447 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3448 dmae
->comp_addr_hi
= 0;
3451 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3452 dmae
->opcode
= opcode
;
3453 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_EGRESS_MAC_PKT0
:
3454 NIG_REG_STAT0_EGRESS_MAC_PKT0
) >> 2;
3455 dmae
->src_addr_hi
= 0;
3456 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
) +
3457 offsetof(struct nig_stats
, egress_mac_pkt0_lo
));
3458 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
) +
3459 offsetof(struct nig_stats
, egress_mac_pkt0_lo
));
3460 dmae
->len
= (2*sizeof(u32
)) >> 2;
3461 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3462 dmae
->comp_addr_hi
= 0;
3465 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3466 dmae
->opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3467 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
3468 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3470 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3472 DMAE_CMD_ENDIANITY_DW_SWAP
|
3474 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3475 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3476 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_EGRESS_MAC_PKT1
:
3477 NIG_REG_STAT0_EGRESS_MAC_PKT1
) >> 2;
3478 dmae
->src_addr_hi
= 0;
3479 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
) +
3480 offsetof(struct nig_stats
, egress_mac_pkt1_lo
));
3481 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
) +
3482 offsetof(struct nig_stats
, egress_mac_pkt1_lo
));
3483 dmae
->len
= (2*sizeof(u32
)) >> 2;
3484 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3485 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3486 dmae
->comp_val
= DMAE_COMP_VAL
;
3491 static void bnx2x_func_stats_init(struct bnx2x
*bp
)
3493 struct dmae_command
*dmae
= &bp
->stats_dmae
;
3494 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3497 if (!bp
->func_stx
) {
3498 BNX2X_ERR("BUG!\n");
3502 bp
->executer_idx
= 0;
3503 memset(dmae
, 0, sizeof(struct dmae_command
));
3505 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3506 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
3507 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3509 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3511 DMAE_CMD_ENDIANITY_DW_SWAP
|
3513 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3514 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3515 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
3516 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
3517 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
3518 dmae
->dst_addr_hi
= 0;
3519 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
3520 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3521 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3522 dmae
->comp_val
= DMAE_COMP_VAL
;
3527 static void bnx2x_stats_start(struct bnx2x
*bp
)
3530 bnx2x_port_stats_init(bp
);
3532 else if (bp
->func_stx
)
3533 bnx2x_func_stats_init(bp
);
3535 bnx2x_hw_stats_post(bp
);
3536 bnx2x_storm_stats_post(bp
);
3539 static void bnx2x_stats_pmf_start(struct bnx2x
*bp
)
3541 bnx2x_stats_comp(bp
);
3542 bnx2x_stats_pmf_update(bp
);
3543 bnx2x_stats_start(bp
);
3546 static void bnx2x_stats_restart(struct bnx2x
*bp
)
3548 bnx2x_stats_comp(bp
);
3549 bnx2x_stats_start(bp
);
3552 static void bnx2x_bmac_stats_update(struct bnx2x
*bp
)
3554 struct bmac_stats
*new = bnx2x_sp(bp
, mac_stats
.bmac_stats
);
3555 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3556 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3562 UPDATE_STAT64(rx_stat_grerb
, rx_stat_ifhcinbadoctets
);
3563 UPDATE_STAT64(rx_stat_grfcs
, rx_stat_dot3statsfcserrors
);
3564 UPDATE_STAT64(rx_stat_grund
, rx_stat_etherstatsundersizepkts
);
3565 UPDATE_STAT64(rx_stat_grovr
, rx_stat_dot3statsframestoolong
);
3566 UPDATE_STAT64(rx_stat_grfrg
, rx_stat_etherstatsfragments
);
3567 UPDATE_STAT64(rx_stat_grjbr
, rx_stat_etherstatsjabbers
);
3568 UPDATE_STAT64(rx_stat_grxcf
, rx_stat_maccontrolframesreceived
);
3569 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_xoffstateentered
);
3570 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_bmac_xpf
);
3571 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_outxoffsent
);
3572 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_flowcontroldone
);
3573 UPDATE_STAT64(tx_stat_gt64
, tx_stat_etherstatspkts64octets
);
3574 UPDATE_STAT64(tx_stat_gt127
,
3575 tx_stat_etherstatspkts65octetsto127octets
);
3576 UPDATE_STAT64(tx_stat_gt255
,
3577 tx_stat_etherstatspkts128octetsto255octets
);
3578 UPDATE_STAT64(tx_stat_gt511
,
3579 tx_stat_etherstatspkts256octetsto511octets
);
3580 UPDATE_STAT64(tx_stat_gt1023
,
3581 tx_stat_etherstatspkts512octetsto1023octets
);
3582 UPDATE_STAT64(tx_stat_gt1518
,
3583 tx_stat_etherstatspkts1024octetsto1522octets
);
3584 UPDATE_STAT64(tx_stat_gt2047
, tx_stat_bmac_2047
);
3585 UPDATE_STAT64(tx_stat_gt4095
, tx_stat_bmac_4095
);
3586 UPDATE_STAT64(tx_stat_gt9216
, tx_stat_bmac_9216
);
3587 UPDATE_STAT64(tx_stat_gt16383
, tx_stat_bmac_16383
);
3588 UPDATE_STAT64(tx_stat_gterr
,
3589 tx_stat_dot3statsinternalmactransmiterrors
);
3590 UPDATE_STAT64(tx_stat_gtufl
, tx_stat_bmac_ufl
);
3592 estats
->pause_frames_received_hi
=
3593 pstats
->mac_stx
[1].rx_stat_bmac_xpf_hi
;
3594 estats
->pause_frames_received_lo
=
3595 pstats
->mac_stx
[1].rx_stat_bmac_xpf_lo
;
3597 estats
->pause_frames_sent_hi
=
3598 pstats
->mac_stx
[1].tx_stat_outxoffsent_hi
;
3599 estats
->pause_frames_sent_lo
=
3600 pstats
->mac_stx
[1].tx_stat_outxoffsent_lo
;
3603 static void bnx2x_emac_stats_update(struct bnx2x
*bp
)
3605 struct emac_stats
*new = bnx2x_sp(bp
, mac_stats
.emac_stats
);
3606 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3607 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3609 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets
);
3610 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets
);
3611 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors
);
3612 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors
);
3613 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors
);
3614 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors
);
3615 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts
);
3616 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong
);
3617 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments
);
3618 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers
);
3619 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived
);
3620 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered
);
3621 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived
);
3622 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived
);
3623 UPDATE_EXTEND_STAT(tx_stat_outxonsent
);
3624 UPDATE_EXTEND_STAT(tx_stat_outxoffsent
);
3625 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone
);
3626 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions
);
3627 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes
);
3628 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes
);
3629 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions
);
3630 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions
);
3631 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions
);
3632 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets
);
3633 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets
);
3634 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets
);
3635 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets
);
3636 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets
);
3637 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets
);
3638 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets
);
3639 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors
);
3641 estats
->pause_frames_received_hi
=
3642 pstats
->mac_stx
[1].rx_stat_xonpauseframesreceived_hi
;
3643 estats
->pause_frames_received_lo
=
3644 pstats
->mac_stx
[1].rx_stat_xonpauseframesreceived_lo
;
3645 ADD_64(estats
->pause_frames_received_hi
,
3646 pstats
->mac_stx
[1].rx_stat_xoffpauseframesreceived_hi
,
3647 estats
->pause_frames_received_lo
,
3648 pstats
->mac_stx
[1].rx_stat_xoffpauseframesreceived_lo
);
3650 estats
->pause_frames_sent_hi
=
3651 pstats
->mac_stx
[1].tx_stat_outxonsent_hi
;
3652 estats
->pause_frames_sent_lo
=
3653 pstats
->mac_stx
[1].tx_stat_outxonsent_lo
;
3654 ADD_64(estats
->pause_frames_sent_hi
,
3655 pstats
->mac_stx
[1].tx_stat_outxoffsent_hi
,
3656 estats
->pause_frames_sent_lo
,
3657 pstats
->mac_stx
[1].tx_stat_outxoffsent_lo
);
3660 static int bnx2x_hw_stats_update(struct bnx2x
*bp
)
3662 struct nig_stats
*new = bnx2x_sp(bp
, nig_stats
);
3663 struct nig_stats
*old
= &(bp
->port
.old_nig_stats
);
3664 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3665 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3672 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
)
3673 bnx2x_bmac_stats_update(bp
);
3675 else if (bp
->link_vars
.mac_type
== MAC_TYPE_EMAC
)
3676 bnx2x_emac_stats_update(bp
);
3678 else { /* unreached */
3679 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3683 ADD_EXTEND_64(pstats
->brb_drop_hi
, pstats
->brb_drop_lo
,
3684 new->brb_discard
- old
->brb_discard
);
3685 ADD_EXTEND_64(estats
->brb_truncate_hi
, estats
->brb_truncate_lo
,
3686 new->brb_truncate
- old
->brb_truncate
);
3688 UPDATE_STAT64_NIG(egress_mac_pkt0
,
3689 etherstatspkts1024octetsto1522octets
);
3690 UPDATE_STAT64_NIG(egress_mac_pkt1
, etherstatspktsover1522octets
);
3692 memcpy(old
, new, sizeof(struct nig_stats
));
3694 memcpy(&(estats
->rx_stat_ifhcinbadoctets_hi
), &(pstats
->mac_stx
[1]),
3695 sizeof(struct mac_stx
));
3696 estats
->brb_drop_hi
= pstats
->brb_drop_hi
;
3697 estats
->brb_drop_lo
= pstats
->brb_drop_lo
;
3699 pstats
->host_port_stats_start
= ++pstats
->host_port_stats_end
;
3701 nig_timer_max
= SHMEM_RD(bp
, port_mb
[BP_PORT(bp
)].stat_nig_timer
);
3702 if (nig_timer_max
!= estats
->nig_timer_max
) {
3703 estats
->nig_timer_max
= nig_timer_max
;
3704 BNX2X_ERR("NIG timer max (%u)\n", estats
->nig_timer_max
);
3710 static int bnx2x_storm_stats_update(struct bnx2x
*bp
)
3712 struct eth_stats_query
*stats
= bnx2x_sp(bp
, fw_stats
);
3713 struct tstorm_per_port_stats
*tport
=
3714 &stats
->tstorm_common
.port_statistics
;
3715 struct host_func_stats
*fstats
= bnx2x_sp(bp
, func_stats
);
3716 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3719 memset(&(fstats
->total_bytes_received_hi
), 0,
3720 sizeof(struct host_func_stats
) - 2*sizeof(u32
));
3721 estats
->error_bytes_received_hi
= 0;
3722 estats
->error_bytes_received_lo
= 0;
3723 estats
->etherstatsoverrsizepkts_hi
= 0;
3724 estats
->etherstatsoverrsizepkts_lo
= 0;
3725 estats
->no_buff_discard_hi
= 0;
3726 estats
->no_buff_discard_lo
= 0;
3728 for_each_queue(bp
, i
) {
3729 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
3730 int cl_id
= fp
->cl_id
;
3731 struct tstorm_per_client_stats
*tclient
=
3732 &stats
->tstorm_common
.client_statistics
[cl_id
];
3733 struct tstorm_per_client_stats
*old_tclient
= &fp
->old_tclient
;
3734 struct ustorm_per_client_stats
*uclient
=
3735 &stats
->ustorm_common
.client_statistics
[cl_id
];
3736 struct ustorm_per_client_stats
*old_uclient
= &fp
->old_uclient
;
3737 struct xstorm_per_client_stats
*xclient
=
3738 &stats
->xstorm_common
.client_statistics
[cl_id
];
3739 struct xstorm_per_client_stats
*old_xclient
= &fp
->old_xclient
;
3740 struct bnx2x_eth_q_stats
*qstats
= &fp
->eth_q_stats
;
3743 /* are storm stats valid? */
3744 if ((u16
)(le16_to_cpu(xclient
->stats_counter
) + 1) !=
3745 bp
->stats_counter
) {
3746 DP(BNX2X_MSG_STATS
, "[%d] stats not updated by xstorm"
3747 " xstorm counter (%d) != stats_counter (%d)\n",
3748 i
, xclient
->stats_counter
, bp
->stats_counter
);
3751 if ((u16
)(le16_to_cpu(tclient
->stats_counter
) + 1) !=
3752 bp
->stats_counter
) {
3753 DP(BNX2X_MSG_STATS
, "[%d] stats not updated by tstorm"
3754 " tstorm counter (%d) != stats_counter (%d)\n",
3755 i
, tclient
->stats_counter
, bp
->stats_counter
);
3758 if ((u16
)(le16_to_cpu(uclient
->stats_counter
) + 1) !=
3759 bp
->stats_counter
) {
3760 DP(BNX2X_MSG_STATS
, "[%d] stats not updated by ustorm"
3761 " ustorm counter (%d) != stats_counter (%d)\n",
3762 i
, uclient
->stats_counter
, bp
->stats_counter
);
3766 qstats
->total_bytes_received_hi
=
3767 qstats
->valid_bytes_received_hi
=
3768 le32_to_cpu(tclient
->total_rcv_bytes
.hi
);
3769 qstats
->total_bytes_received_lo
=
3770 qstats
->valid_bytes_received_lo
=
3771 le32_to_cpu(tclient
->total_rcv_bytes
.lo
);
3773 qstats
->error_bytes_received_hi
=
3774 le32_to_cpu(tclient
->rcv_error_bytes
.hi
);
3775 qstats
->error_bytes_received_lo
=
3776 le32_to_cpu(tclient
->rcv_error_bytes
.lo
);
3778 ADD_64(qstats
->total_bytes_received_hi
,
3779 qstats
->error_bytes_received_hi
,
3780 qstats
->total_bytes_received_lo
,
3781 qstats
->error_bytes_received_lo
);
3783 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts
,
3784 total_unicast_packets_received
);
3785 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts
,
3786 total_multicast_packets_received
);
3787 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts
,
3788 total_broadcast_packets_received
);
3789 UPDATE_EXTEND_TSTAT(packets_too_big_discard
,
3790 etherstatsoverrsizepkts
);
3791 UPDATE_EXTEND_TSTAT(no_buff_discard
, no_buff_discard
);
3793 SUB_EXTEND_USTAT(ucast_no_buff_pkts
,
3794 total_unicast_packets_received
);
3795 SUB_EXTEND_USTAT(mcast_no_buff_pkts
,
3796 total_multicast_packets_received
);
3797 SUB_EXTEND_USTAT(bcast_no_buff_pkts
,
3798 total_broadcast_packets_received
);
3799 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts
, no_buff_discard
);
3800 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts
, no_buff_discard
);
3801 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts
, no_buff_discard
);
3803 qstats
->total_bytes_transmitted_hi
=
3804 le32_to_cpu(xclient
->total_sent_bytes
.hi
);
3805 qstats
->total_bytes_transmitted_lo
=
3806 le32_to_cpu(xclient
->total_sent_bytes
.lo
);
3808 UPDATE_EXTEND_XSTAT(unicast_pkts_sent
,
3809 total_unicast_packets_transmitted
);
3810 UPDATE_EXTEND_XSTAT(multicast_pkts_sent
,
3811 total_multicast_packets_transmitted
);
3812 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent
,
3813 total_broadcast_packets_transmitted
);
3815 old_tclient
->checksum_discard
= tclient
->checksum_discard
;
3816 old_tclient
->ttl0_discard
= tclient
->ttl0_discard
;
3818 ADD_64(fstats
->total_bytes_received_hi
,
3819 qstats
->total_bytes_received_hi
,
3820 fstats
->total_bytes_received_lo
,
3821 qstats
->total_bytes_received_lo
);
3822 ADD_64(fstats
->total_bytes_transmitted_hi
,
3823 qstats
->total_bytes_transmitted_hi
,
3824 fstats
->total_bytes_transmitted_lo
,
3825 qstats
->total_bytes_transmitted_lo
);
3826 ADD_64(fstats
->total_unicast_packets_received_hi
,
3827 qstats
->total_unicast_packets_received_hi
,
3828 fstats
->total_unicast_packets_received_lo
,
3829 qstats
->total_unicast_packets_received_lo
);
3830 ADD_64(fstats
->total_multicast_packets_received_hi
,
3831 qstats
->total_multicast_packets_received_hi
,
3832 fstats
->total_multicast_packets_received_lo
,
3833 qstats
->total_multicast_packets_received_lo
);
3834 ADD_64(fstats
->total_broadcast_packets_received_hi
,
3835 qstats
->total_broadcast_packets_received_hi
,
3836 fstats
->total_broadcast_packets_received_lo
,
3837 qstats
->total_broadcast_packets_received_lo
);
3838 ADD_64(fstats
->total_unicast_packets_transmitted_hi
,
3839 qstats
->total_unicast_packets_transmitted_hi
,
3840 fstats
->total_unicast_packets_transmitted_lo
,
3841 qstats
->total_unicast_packets_transmitted_lo
);
3842 ADD_64(fstats
->total_multicast_packets_transmitted_hi
,
3843 qstats
->total_multicast_packets_transmitted_hi
,
3844 fstats
->total_multicast_packets_transmitted_lo
,
3845 qstats
->total_multicast_packets_transmitted_lo
);
3846 ADD_64(fstats
->total_broadcast_packets_transmitted_hi
,
3847 qstats
->total_broadcast_packets_transmitted_hi
,
3848 fstats
->total_broadcast_packets_transmitted_lo
,
3849 qstats
->total_broadcast_packets_transmitted_lo
);
3850 ADD_64(fstats
->valid_bytes_received_hi
,
3851 qstats
->valid_bytes_received_hi
,
3852 fstats
->valid_bytes_received_lo
,
3853 qstats
->valid_bytes_received_lo
);
3855 ADD_64(estats
->error_bytes_received_hi
,
3856 qstats
->error_bytes_received_hi
,
3857 estats
->error_bytes_received_lo
,
3858 qstats
->error_bytes_received_lo
);
3859 ADD_64(estats
->etherstatsoverrsizepkts_hi
,
3860 qstats
->etherstatsoverrsizepkts_hi
,
3861 estats
->etherstatsoverrsizepkts_lo
,
3862 qstats
->etherstatsoverrsizepkts_lo
);
3863 ADD_64(estats
->no_buff_discard_hi
, qstats
->no_buff_discard_hi
,
3864 estats
->no_buff_discard_lo
, qstats
->no_buff_discard_lo
);
3867 ADD_64(fstats
->total_bytes_received_hi
,
3868 estats
->rx_stat_ifhcinbadoctets_hi
,
3869 fstats
->total_bytes_received_lo
,
3870 estats
->rx_stat_ifhcinbadoctets_lo
);
3872 memcpy(estats
, &(fstats
->total_bytes_received_hi
),
3873 sizeof(struct host_func_stats
) - 2*sizeof(u32
));
3875 ADD_64(estats
->etherstatsoverrsizepkts_hi
,
3876 estats
->rx_stat_dot3statsframestoolong_hi
,
3877 estats
->etherstatsoverrsizepkts_lo
,
3878 estats
->rx_stat_dot3statsframestoolong_lo
);
3879 ADD_64(estats
->error_bytes_received_hi
,
3880 estats
->rx_stat_ifhcinbadoctets_hi
,
3881 estats
->error_bytes_received_lo
,
3882 estats
->rx_stat_ifhcinbadoctets_lo
);
3885 estats
->mac_filter_discard
=
3886 le32_to_cpu(tport
->mac_filter_discard
);
3887 estats
->xxoverflow_discard
=
3888 le32_to_cpu(tport
->xxoverflow_discard
);
3889 estats
->brb_truncate_discard
=
3890 le32_to_cpu(tport
->brb_truncate_discard
);
3891 estats
->mac_discard
= le32_to_cpu(tport
->mac_discard
);
3894 fstats
->host_func_stats_start
= ++fstats
->host_func_stats_end
;
3896 bp
->stats_pending
= 0;
3901 static void bnx2x_net_stats_update(struct bnx2x
*bp
)
3903 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3904 struct net_device_stats
*nstats
= &bp
->dev
->stats
;
3907 nstats
->rx_packets
=
3908 bnx2x_hilo(&estats
->total_unicast_packets_received_hi
) +
3909 bnx2x_hilo(&estats
->total_multicast_packets_received_hi
) +
3910 bnx2x_hilo(&estats
->total_broadcast_packets_received_hi
);
3912 nstats
->tx_packets
=
3913 bnx2x_hilo(&estats
->total_unicast_packets_transmitted_hi
) +
3914 bnx2x_hilo(&estats
->total_multicast_packets_transmitted_hi
) +
3915 bnx2x_hilo(&estats
->total_broadcast_packets_transmitted_hi
);
3917 nstats
->rx_bytes
= bnx2x_hilo(&estats
->total_bytes_received_hi
);
3919 nstats
->tx_bytes
= bnx2x_hilo(&estats
->total_bytes_transmitted_hi
);
3921 nstats
->rx_dropped
= estats
->mac_discard
;
3922 for_each_queue(bp
, i
)
3923 nstats
->rx_dropped
+=
3924 le32_to_cpu(bp
->fp
[i
].old_tclient
.checksum_discard
);
3926 nstats
->tx_dropped
= 0;
3929 bnx2x_hilo(&estats
->total_multicast_packets_received_hi
);
3931 nstats
->collisions
=
3932 bnx2x_hilo(&estats
->tx_stat_etherstatscollisions_hi
);
3934 nstats
->rx_length_errors
=
3935 bnx2x_hilo(&estats
->rx_stat_etherstatsundersizepkts_hi
) +
3936 bnx2x_hilo(&estats
->etherstatsoverrsizepkts_hi
);
3937 nstats
->rx_over_errors
= bnx2x_hilo(&estats
->brb_drop_hi
) +
3938 bnx2x_hilo(&estats
->brb_truncate_hi
);
3939 nstats
->rx_crc_errors
=
3940 bnx2x_hilo(&estats
->rx_stat_dot3statsfcserrors_hi
);
3941 nstats
->rx_frame_errors
=
3942 bnx2x_hilo(&estats
->rx_stat_dot3statsalignmenterrors_hi
);
3943 nstats
->rx_fifo_errors
= bnx2x_hilo(&estats
->no_buff_discard_hi
);
3944 nstats
->rx_missed_errors
= estats
->xxoverflow_discard
;
3946 nstats
->rx_errors
= nstats
->rx_length_errors
+
3947 nstats
->rx_over_errors
+
3948 nstats
->rx_crc_errors
+
3949 nstats
->rx_frame_errors
+
3950 nstats
->rx_fifo_errors
+
3951 nstats
->rx_missed_errors
;
3953 nstats
->tx_aborted_errors
=
3954 bnx2x_hilo(&estats
->tx_stat_dot3statslatecollisions_hi
) +
3955 bnx2x_hilo(&estats
->tx_stat_dot3statsexcessivecollisions_hi
);
3956 nstats
->tx_carrier_errors
=
3957 bnx2x_hilo(&estats
->rx_stat_dot3statscarriersenseerrors_hi
);
3958 nstats
->tx_fifo_errors
= 0;
3959 nstats
->tx_heartbeat_errors
= 0;
3960 nstats
->tx_window_errors
= 0;
3962 nstats
->tx_errors
= nstats
->tx_aborted_errors
+
3963 nstats
->tx_carrier_errors
+
3964 bnx2x_hilo(&estats
->tx_stat_dot3statsinternalmactransmiterrors_hi
);
3967 static void bnx2x_drv_stats_update(struct bnx2x
*bp
)
3969 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3972 estats
->driver_xoff
= 0;
3973 estats
->rx_err_discard_pkt
= 0;
3974 estats
->rx_skb_alloc_failed
= 0;
3975 estats
->hw_csum_err
= 0;
3976 for_each_queue(bp
, i
) {
3977 struct bnx2x_eth_q_stats
*qstats
= &bp
->fp
[i
].eth_q_stats
;
3979 estats
->driver_xoff
+= qstats
->driver_xoff
;
3980 estats
->rx_err_discard_pkt
+= qstats
->rx_err_discard_pkt
;
3981 estats
->rx_skb_alloc_failed
+= qstats
->rx_skb_alloc_failed
;
3982 estats
->hw_csum_err
+= qstats
->hw_csum_err
;
3986 static void bnx2x_stats_update(struct bnx2x
*bp
)
3988 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3990 if (*stats_comp
!= DMAE_COMP_VAL
)
3994 bnx2x_hw_stats_update(bp
);
3996 if (bnx2x_storm_stats_update(bp
) && (bp
->stats_pending
++ == 3)) {
3997 BNX2X_ERR("storm stats were not updated for 3 times\n");
4002 bnx2x_net_stats_update(bp
);
4003 bnx2x_drv_stats_update(bp
);
4005 if (bp
->msglevel
& NETIF_MSG_TIMER
) {
4006 struct tstorm_per_client_stats
*old_tclient
=
4007 &bp
->fp
->old_tclient
;
4008 struct bnx2x_eth_q_stats
*qstats
= &bp
->fp
->eth_q_stats
;
4009 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
4010 struct net_device_stats
*nstats
= &bp
->dev
->stats
;
4013 printk(KERN_DEBUG
"%s:\n", bp
->dev
->name
);
4014 printk(KERN_DEBUG
" tx avail (%4x) tx hc idx (%x)"
4016 bnx2x_tx_avail(bp
->fp
),
4017 le16_to_cpu(*bp
->fp
->tx_cons_sb
), nstats
->tx_packets
);
4018 printk(KERN_DEBUG
" rx usage (%4x) rx hc idx (%x)"
4020 (u16
)(le16_to_cpu(*bp
->fp
->rx_cons_sb
) -
4021 bp
->fp
->rx_comp_cons
),
4022 le16_to_cpu(*bp
->fp
->rx_cons_sb
), nstats
->rx_packets
);
4023 printk(KERN_DEBUG
" %s (Xoff events %u) brb drops %u "
4024 "brb truncate %u\n",
4025 (netif_queue_stopped(bp
->dev
) ? "Xoff" : "Xon"),
4026 qstats
->driver_xoff
,
4027 estats
->brb_drop_lo
, estats
->brb_truncate_lo
);
4028 printk(KERN_DEBUG
"tstats: checksum_discard %u "
4029 "packets_too_big_discard %lu no_buff_discard %lu "
4030 "mac_discard %u mac_filter_discard %u "
4031 "xxovrflow_discard %u brb_truncate_discard %u "
4032 "ttl0_discard %u\n",
4033 le32_to_cpu(old_tclient
->checksum_discard
),
4034 bnx2x_hilo(&qstats
->etherstatsoverrsizepkts_hi
),
4035 bnx2x_hilo(&qstats
->no_buff_discard_hi
),
4036 estats
->mac_discard
, estats
->mac_filter_discard
,
4037 estats
->xxoverflow_discard
, estats
->brb_truncate_discard
,
4038 le32_to_cpu(old_tclient
->ttl0_discard
));
4040 for_each_queue(bp
, i
) {
4041 printk(KERN_DEBUG
"[%d]: %lu\t%lu\t%lu\n", i
,
4042 bnx2x_fp(bp
, i
, tx_pkt
),
4043 bnx2x_fp(bp
, i
, rx_pkt
),
4044 bnx2x_fp(bp
, i
, rx_calls
));
4048 bnx2x_hw_stats_post(bp
);
4049 bnx2x_storm_stats_post(bp
);
4052 static void bnx2x_port_stats_stop(struct bnx2x
*bp
)
4054 struct dmae_command
*dmae
;
4056 int loader_idx
= PMF_DMAE_C(bp
);
4057 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
4059 bp
->executer_idx
= 0;
4061 opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
4063 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
4065 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
4067 DMAE_CMD_ENDIANITY_DW_SWAP
|
4069 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
4070 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
4072 if (bp
->port
.port_stx
) {
4074 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
4076 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_GRC
);
4078 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
4079 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
4080 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
4081 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
4082 dmae
->dst_addr_hi
= 0;
4083 dmae
->len
= sizeof(struct host_port_stats
) >> 2;
4085 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
4086 dmae
->comp_addr_hi
= 0;
4089 dmae
->comp_addr_lo
=
4090 U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
4091 dmae
->comp_addr_hi
=
4092 U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
4093 dmae
->comp_val
= DMAE_COMP_VAL
;
4101 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
4102 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
4103 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
4104 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
4105 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
4106 dmae
->dst_addr_hi
= 0;
4107 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
4108 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
4109 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
4110 dmae
->comp_val
= DMAE_COMP_VAL
;
4116 static void bnx2x_stats_stop(struct bnx2x
*bp
)
4120 bnx2x_stats_comp(bp
);
4123 update
= (bnx2x_hw_stats_update(bp
) == 0);
4125 update
|= (bnx2x_storm_stats_update(bp
) == 0);
4128 bnx2x_net_stats_update(bp
);
4131 bnx2x_port_stats_stop(bp
);
4133 bnx2x_hw_stats_post(bp
);
4134 bnx2x_stats_comp(bp
);
4138 static void bnx2x_stats_do_nothing(struct bnx2x
*bp
)
4142 static const struct {
4143 void (*action
)(struct bnx2x
*bp
);
4144 enum bnx2x_stats_state next_state
;
4145 } bnx2x_stats_stm
[STATS_STATE_MAX
][STATS_EVENT_MAX
] = {
4148 /* DISABLED PMF */ {bnx2x_stats_pmf_update
, STATS_STATE_DISABLED
},
4149 /* LINK_UP */ {bnx2x_stats_start
, STATS_STATE_ENABLED
},
4150 /* UPDATE */ {bnx2x_stats_do_nothing
, STATS_STATE_DISABLED
},
4151 /* STOP */ {bnx2x_stats_do_nothing
, STATS_STATE_DISABLED
}
4154 /* ENABLED PMF */ {bnx2x_stats_pmf_start
, STATS_STATE_ENABLED
},
4155 /* LINK_UP */ {bnx2x_stats_restart
, STATS_STATE_ENABLED
},
4156 /* UPDATE */ {bnx2x_stats_update
, STATS_STATE_ENABLED
},
4157 /* STOP */ {bnx2x_stats_stop
, STATS_STATE_DISABLED
}
4161 static void bnx2x_stats_handle(struct bnx2x
*bp
, enum bnx2x_stats_event event
)
4163 enum bnx2x_stats_state state
= bp
->stats_state
;
4165 bnx2x_stats_stm
[state
][event
].action(bp
);
4166 bp
->stats_state
= bnx2x_stats_stm
[state
][event
].next_state
;
4168 if ((event
!= STATS_EVENT_UPDATE
) || (bp
->msglevel
& NETIF_MSG_TIMER
))
4169 DP(BNX2X_MSG_STATS
, "state %d -> event %d -> state %d\n",
4170 state
, event
, bp
->stats_state
);
4173 static void bnx2x_timer(unsigned long data
)
4175 struct bnx2x
*bp
= (struct bnx2x
*) data
;
4177 if (!netif_running(bp
->dev
))
4180 if (atomic_read(&bp
->intr_sem
) != 0)
4184 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
4188 rc
= bnx2x_rx_int(fp
, 1000);
4191 if (!BP_NOMCP(bp
)) {
4192 int func
= BP_FUNC(bp
);
4196 ++bp
->fw_drv_pulse_wr_seq
;
4197 bp
->fw_drv_pulse_wr_seq
&= DRV_PULSE_SEQ_MASK
;
4198 /* TBD - add SYSTEM_TIME */
4199 drv_pulse
= bp
->fw_drv_pulse_wr_seq
;
4200 SHMEM_WR(bp
, func_mb
[func
].drv_pulse_mb
, drv_pulse
);
4202 mcp_pulse
= (SHMEM_RD(bp
, func_mb
[func
].mcp_pulse_mb
) &
4203 MCP_PULSE_SEQ_MASK
);
4204 /* The delta between driver pulse and mcp response
4205 * should be 1 (before mcp response) or 0 (after mcp response)
4207 if ((drv_pulse
!= mcp_pulse
) &&
4208 (drv_pulse
!= ((mcp_pulse
+ 1) & MCP_PULSE_SEQ_MASK
))) {
4209 /* someone lost a heartbeat... */
4210 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4211 drv_pulse
, mcp_pulse
);
4215 if ((bp
->state
== BNX2X_STATE_OPEN
) ||
4216 (bp
->state
== BNX2X_STATE_DISABLED
))
4217 bnx2x_stats_handle(bp
, STATS_EVENT_UPDATE
);
4220 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
4223 /* end of Statistics */
4228 * nic init service functions
4231 static void bnx2x_zero_sb(struct bnx2x
*bp
, int sb_id
)
4233 int port
= BP_PORT(bp
);
4235 bnx2x_init_fill(bp
, USTORM_INTMEM_ADDR
+
4236 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port
, sb_id
), 0,
4237 sizeof(struct ustorm_status_block
)/4);
4238 bnx2x_init_fill(bp
, CSTORM_INTMEM_ADDR
+
4239 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port
, sb_id
), 0,
4240 sizeof(struct cstorm_status_block
)/4);
4243 static void bnx2x_init_sb(struct bnx2x
*bp
, struct host_status_block
*sb
,
4244 dma_addr_t mapping
, int sb_id
)
4246 int port
= BP_PORT(bp
);
4247 int func
= BP_FUNC(bp
);
4252 section
= ((u64
)mapping
) + offsetof(struct host_status_block
,
4254 sb
->u_status_block
.status_block_id
= sb_id
;
4256 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4257 USTORM_SB_HOST_SB_ADDR_OFFSET(port
, sb_id
), U64_LO(section
));
4258 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4259 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port
, sb_id
)) + 4),
4261 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ FP_USB_FUNC_OFF
+
4262 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port
, sb_id
), func
);
4264 for (index
= 0; index
< HC_USTORM_SB_NUM_INDICES
; index
++)
4265 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
4266 USTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
, index
), 1);
4269 section
= ((u64
)mapping
) + offsetof(struct host_status_block
,
4271 sb
->c_status_block
.status_block_id
= sb_id
;
4273 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4274 CSTORM_SB_HOST_SB_ADDR_OFFSET(port
, sb_id
), U64_LO(section
));
4275 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4276 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port
, sb_id
)) + 4),
4278 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ FP_CSB_FUNC_OFF
+
4279 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port
, sb_id
), func
);
4281 for (index
= 0; index
< HC_CSTORM_SB_NUM_INDICES
; index
++)
4282 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4283 CSTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
, index
), 1);
4285 bnx2x_ack_sb(bp
, sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4288 static void bnx2x_zero_def_sb(struct bnx2x
*bp
)
4290 int func
= BP_FUNC(bp
);
4292 bnx2x_init_fill(bp
, TSTORM_INTMEM_ADDR
+
4293 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4294 sizeof(struct tstorm_def_status_block
)/4);
4295 bnx2x_init_fill(bp
, USTORM_INTMEM_ADDR
+
4296 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4297 sizeof(struct ustorm_def_status_block
)/4);
4298 bnx2x_init_fill(bp
, CSTORM_INTMEM_ADDR
+
4299 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4300 sizeof(struct cstorm_def_status_block
)/4);
4301 bnx2x_init_fill(bp
, XSTORM_INTMEM_ADDR
+
4302 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4303 sizeof(struct xstorm_def_status_block
)/4);
4306 static void bnx2x_init_def_sb(struct bnx2x
*bp
,
4307 struct host_def_status_block
*def_sb
,
4308 dma_addr_t mapping
, int sb_id
)
4310 int port
= BP_PORT(bp
);
4311 int func
= BP_FUNC(bp
);
4312 int index
, val
, reg_offset
;
4316 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4317 atten_status_block
);
4318 def_sb
->atten_status_block
.status_block_id
= sb_id
;
4322 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
4323 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
4325 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
4326 bp
->attn_group
[index
].sig
[0] = REG_RD(bp
,
4327 reg_offset
+ 0x10*index
);
4328 bp
->attn_group
[index
].sig
[1] = REG_RD(bp
,
4329 reg_offset
+ 0x4 + 0x10*index
);
4330 bp
->attn_group
[index
].sig
[2] = REG_RD(bp
,
4331 reg_offset
+ 0x8 + 0x10*index
);
4332 bp
->attn_group
[index
].sig
[3] = REG_RD(bp
,
4333 reg_offset
+ 0xc + 0x10*index
);
4336 reg_offset
= (port
? HC_REG_ATTN_MSG1_ADDR_L
:
4337 HC_REG_ATTN_MSG0_ADDR_L
);
4339 REG_WR(bp
, reg_offset
, U64_LO(section
));
4340 REG_WR(bp
, reg_offset
+ 4, U64_HI(section
));
4342 reg_offset
= (port
? HC_REG_ATTN_NUM_P1
: HC_REG_ATTN_NUM_P0
);
4344 val
= REG_RD(bp
, reg_offset
);
4346 REG_WR(bp
, reg_offset
, val
);
4349 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4350 u_def_status_block
);
4351 def_sb
->u_def_status_block
.status_block_id
= sb_id
;
4353 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4354 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4355 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4356 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4358 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ DEF_USB_FUNC_OFF
+
4359 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4361 for (index
= 0; index
< HC_USTORM_DEF_SB_NUM_INDICES
; index
++)
4362 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
4363 USTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4366 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4367 c_def_status_block
);
4368 def_sb
->c_def_status_block
.status_block_id
= sb_id
;
4370 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4371 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4372 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4373 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4375 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ DEF_CSB_FUNC_OFF
+
4376 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4378 for (index
= 0; index
< HC_CSTORM_DEF_SB_NUM_INDICES
; index
++)
4379 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4380 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4383 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4384 t_def_status_block
);
4385 def_sb
->t_def_status_block
.status_block_id
= sb_id
;
4387 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4388 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4389 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4390 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4392 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ DEF_TSB_FUNC_OFF
+
4393 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4395 for (index
= 0; index
< HC_TSTORM_DEF_SB_NUM_INDICES
; index
++)
4396 REG_WR16(bp
, BAR_TSTRORM_INTMEM
+
4397 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4400 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4401 x_def_status_block
);
4402 def_sb
->x_def_status_block
.status_block_id
= sb_id
;
4404 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4405 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4406 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4407 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4409 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ DEF_XSB_FUNC_OFF
+
4410 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4412 for (index
= 0; index
< HC_XSTORM_DEF_SB_NUM_INDICES
; index
++)
4413 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+
4414 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4416 bp
->stats_pending
= 0;
4417 bp
->set_mac_pending
= 0;
4419 bnx2x_ack_sb(bp
, sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4422 static void bnx2x_update_coalesce(struct bnx2x
*bp
)
4424 int port
= BP_PORT(bp
);
4427 for_each_queue(bp
, i
) {
4428 int sb_id
= bp
->fp
[i
].sb_id
;
4430 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4431 REG_WR8(bp
, BAR_USTRORM_INTMEM
+
4432 USTORM_SB_HC_TIMEOUT_OFFSET(port
, sb_id
,
4433 U_SB_ETH_RX_CQ_INDEX
),
4435 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
4436 USTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
,
4437 U_SB_ETH_RX_CQ_INDEX
),
4438 (bp
->rx_ticks
/12) ? 0 : 1);
4440 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4441 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+
4442 CSTORM_SB_HC_TIMEOUT_OFFSET(port
, sb_id
,
4443 C_SB_ETH_TX_CQ_INDEX
),
4445 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4446 CSTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
,
4447 C_SB_ETH_TX_CQ_INDEX
),
4448 (bp
->tx_ticks
/12) ? 0 : 1);
4452 static inline void bnx2x_free_tpa_pool(struct bnx2x
*bp
,
4453 struct bnx2x_fastpath
*fp
, int last
)
4457 for (i
= 0; i
< last
; i
++) {
4458 struct sw_rx_bd
*rx_buf
= &(fp
->tpa_pool
[i
]);
4459 struct sk_buff
*skb
= rx_buf
->skb
;
4462 DP(NETIF_MSG_IFDOWN
, "tpa bin %d empty on free\n", i
);
4466 if (fp
->tpa_state
[i
] == BNX2X_TPA_START
)
4467 pci_unmap_single(bp
->pdev
,
4468 pci_unmap_addr(rx_buf
, mapping
),
4469 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
4476 static void bnx2x_init_rx_rings(struct bnx2x
*bp
)
4478 int func
= BP_FUNC(bp
);
4479 int max_agg_queues
= CHIP_IS_E1(bp
) ? ETH_MAX_AGGREGATION_QUEUES_E1
:
4480 ETH_MAX_AGGREGATION_QUEUES_E1H
;
4481 u16 ring_prod
, cqe_ring_prod
;
4484 bp
->rx_buf_size
= bp
->dev
->mtu
+ ETH_OVREHEAD
+ BNX2X_RX_ALIGN
;
4486 "mtu %d rx_buf_size %d\n", bp
->dev
->mtu
, bp
->rx_buf_size
);
4488 if (bp
->flags
& TPA_ENABLE_FLAG
) {
4490 for_each_rx_queue(bp
, j
) {
4491 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4493 for (i
= 0; i
< max_agg_queues
; i
++) {
4494 fp
->tpa_pool
[i
].skb
=
4495 netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
4496 if (!fp
->tpa_pool
[i
].skb
) {
4497 BNX2X_ERR("Failed to allocate TPA "
4498 "skb pool for queue[%d] - "
4499 "disabling TPA on this "
4501 bnx2x_free_tpa_pool(bp
, fp
, i
);
4502 fp
->disable_tpa
= 1;
4505 pci_unmap_addr_set((struct sw_rx_bd
*)
4506 &bp
->fp
->tpa_pool
[i
],
4508 fp
->tpa_state
[i
] = BNX2X_TPA_STOP
;
4513 for_each_rx_queue(bp
, j
) {
4514 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4517 fp
->rx_cons_sb
= BNX2X_RX_SB_INDEX
;
4518 fp
->rx_bd_cons_sb
= BNX2X_RX_SB_BD_INDEX
;
4520 /* "next page" elements initialization */
4522 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
4523 struct eth_rx_sge
*sge
;
4525 sge
= &fp
->rx_sge_ring
[RX_SGE_CNT
* i
- 2];
4527 cpu_to_le32(U64_HI(fp
->rx_sge_mapping
+
4528 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
4530 cpu_to_le32(U64_LO(fp
->rx_sge_mapping
+
4531 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
4534 bnx2x_init_sge_ring_bit_mask(fp
);
4537 for (i
= 1; i
<= NUM_RX_RINGS
; i
++) {
4538 struct eth_rx_bd
*rx_bd
;
4540 rx_bd
= &fp
->rx_desc_ring
[RX_DESC_CNT
* i
- 2];
4542 cpu_to_le32(U64_HI(fp
->rx_desc_mapping
+
4543 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
4545 cpu_to_le32(U64_LO(fp
->rx_desc_mapping
+
4546 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
4550 for (i
= 1; i
<= NUM_RCQ_RINGS
; i
++) {
4551 struct eth_rx_cqe_next_page
*nextpg
;
4553 nextpg
= (struct eth_rx_cqe_next_page
*)
4554 &fp
->rx_comp_ring
[RCQ_DESC_CNT
* i
- 1];
4556 cpu_to_le32(U64_HI(fp
->rx_comp_mapping
+
4557 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
4559 cpu_to_le32(U64_LO(fp
->rx_comp_mapping
+
4560 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
4563 /* Allocate SGEs and initialize the ring elements */
4564 for (i
= 0, ring_prod
= 0;
4565 i
< MAX_RX_SGE_CNT
*NUM_RX_SGE_PAGES
; i
++) {
4567 if (bnx2x_alloc_rx_sge(bp
, fp
, ring_prod
) < 0) {
4568 BNX2X_ERR("was only able to allocate "
4570 BNX2X_ERR("disabling TPA for queue[%d]\n", j
);
4571 /* Cleanup already allocated elements */
4572 bnx2x_free_rx_sge_range(bp
, fp
, ring_prod
);
4573 bnx2x_free_tpa_pool(bp
, fp
, max_agg_queues
);
4574 fp
->disable_tpa
= 1;
4578 ring_prod
= NEXT_SGE_IDX(ring_prod
);
4580 fp
->rx_sge_prod
= ring_prod
;
4582 /* Allocate BDs and initialize BD ring */
4583 fp
->rx_comp_cons
= 0;
4584 cqe_ring_prod
= ring_prod
= 0;
4585 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
4586 if (bnx2x_alloc_rx_skb(bp
, fp
, ring_prod
) < 0) {
4587 BNX2X_ERR("was only able to allocate "
4588 "%d rx skbs on queue[%d]\n", i
, j
);
4589 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
4592 ring_prod
= NEXT_RX_IDX(ring_prod
);
4593 cqe_ring_prod
= NEXT_RCQ_IDX(cqe_ring_prod
);
4594 WARN_ON(ring_prod
<= i
);
4597 fp
->rx_bd_prod
= ring_prod
;
4598 /* must not have more available CQEs than BDs */
4599 fp
->rx_comp_prod
= min((u16
)(NUM_RCQ_RINGS
*RCQ_DESC_CNT
),
4601 fp
->rx_pkt
= fp
->rx_calls
= 0;
4604 * this will generate an interrupt (to the TSTORM)
4605 * must only be done after chip is initialized
4607 bnx2x_update_rx_prod(bp
, fp
, ring_prod
, fp
->rx_comp_prod
,
4612 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4613 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
),
4614 U64_LO(fp
->rx_comp_mapping
));
4615 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4616 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
) + 4,
4617 U64_HI(fp
->rx_comp_mapping
));
4621 static void bnx2x_init_tx_ring(struct bnx2x
*bp
)
4625 for_each_tx_queue(bp
, j
) {
4626 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4628 for (i
= 1; i
<= NUM_TX_RINGS
; i
++) {
4629 struct eth_tx_bd
*tx_bd
=
4630 &fp
->tx_desc_ring
[TX_DESC_CNT
* i
- 1];
4633 cpu_to_le32(U64_HI(fp
->tx_desc_mapping
+
4634 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
4636 cpu_to_le32(U64_LO(fp
->tx_desc_mapping
+
4637 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
4640 fp
->tx_pkt_prod
= 0;
4641 fp
->tx_pkt_cons
= 0;
4644 fp
->tx_cons_sb
= BNX2X_TX_SB_INDEX
;
4649 static void bnx2x_init_sp_ring(struct bnx2x
*bp
)
4651 int func
= BP_FUNC(bp
);
4653 spin_lock_init(&bp
->spq_lock
);
4655 bp
->spq_left
= MAX_SPQ_PENDING
;
4656 bp
->spq_prod_idx
= 0;
4657 bp
->dsb_sp_prod
= BNX2X_SP_DSB_INDEX
;
4658 bp
->spq_prod_bd
= bp
->spq
;
4659 bp
->spq_last_bd
= bp
->spq_prod_bd
+ MAX_SP_DESC_CNT
;
4661 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PAGE_BASE_OFFSET(func
),
4662 U64_LO(bp
->spq_mapping
));
4664 XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PAGE_BASE_OFFSET(func
) + 4,
4665 U64_HI(bp
->spq_mapping
));
4667 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PROD_OFFSET(func
),
4671 static void bnx2x_init_context(struct bnx2x
*bp
)
4675 for_each_queue(bp
, i
) {
4676 struct eth_context
*context
= bnx2x_sp(bp
, context
[i
].eth
);
4677 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
4678 u8 cl_id
= fp
->cl_id
;
4679 u8 sb_id
= fp
->sb_id
;
4681 context
->ustorm_st_context
.common
.sb_index_numbers
=
4682 BNX2X_RX_SB_INDEX_NUM
;
4683 context
->ustorm_st_context
.common
.clientId
= cl_id
;
4684 context
->ustorm_st_context
.common
.status_block_id
= sb_id
;
4685 context
->ustorm_st_context
.common
.flags
=
4686 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT
|
4687 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS
);
4688 context
->ustorm_st_context
.common
.statistics_counter_id
=
4690 context
->ustorm_st_context
.common
.mc_alignment_log_size
=
4691 BNX2X_RX_ALIGN_SHIFT
;
4692 context
->ustorm_st_context
.common
.bd_buff_size
=
4694 context
->ustorm_st_context
.common
.bd_page_base_hi
=
4695 U64_HI(fp
->rx_desc_mapping
);
4696 context
->ustorm_st_context
.common
.bd_page_base_lo
=
4697 U64_LO(fp
->rx_desc_mapping
);
4698 if (!fp
->disable_tpa
) {
4699 context
->ustorm_st_context
.common
.flags
|=
4700 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA
|
4701 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING
);
4702 context
->ustorm_st_context
.common
.sge_buff_size
=
4703 (u16
)min((u32
)SGE_PAGE_SIZE
*PAGES_PER_SGE
,
4705 context
->ustorm_st_context
.common
.sge_page_base_hi
=
4706 U64_HI(fp
->rx_sge_mapping
);
4707 context
->ustorm_st_context
.common
.sge_page_base_lo
=
4708 U64_LO(fp
->rx_sge_mapping
);
4711 context
->ustorm_ag_context
.cdu_usage
=
4712 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, i
),
4713 CDU_REGION_NUMBER_UCM_AG
,
4714 ETH_CONNECTION_TYPE
);
4716 context
->xstorm_st_context
.tx_bd_page_base_hi
=
4717 U64_HI(fp
->tx_desc_mapping
);
4718 context
->xstorm_st_context
.tx_bd_page_base_lo
=
4719 U64_LO(fp
->tx_desc_mapping
);
4720 context
->xstorm_st_context
.db_data_addr_hi
=
4721 U64_HI(fp
->tx_prods_mapping
);
4722 context
->xstorm_st_context
.db_data_addr_lo
=
4723 U64_LO(fp
->tx_prods_mapping
);
4724 context
->xstorm_st_context
.statistics_data
= (cl_id
|
4725 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE
);
4726 context
->cstorm_st_context
.sb_index_number
=
4727 C_SB_ETH_TX_CQ_INDEX
;
4728 context
->cstorm_st_context
.status_block_id
= sb_id
;
4730 context
->xstorm_ag_context
.cdu_reserved
=
4731 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, i
),
4732 CDU_REGION_NUMBER_XCM_AG
,
4733 ETH_CONNECTION_TYPE
);
4737 static void bnx2x_init_ind_table(struct bnx2x
*bp
)
4739 int func
= BP_FUNC(bp
);
4742 if (bp
->multi_mode
== ETH_RSS_MODE_DISABLED
)
4746 "Initializing indirection table multi_mode %d\n", bp
->multi_mode
);
4747 for (i
= 0; i
< TSTORM_INDIRECTION_TABLE_SIZE
; i
++)
4748 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+
4749 TSTORM_INDIRECTION_TABLE_OFFSET(func
) + i
,
4750 bp
->fp
->cl_id
+ (i
% bp
->num_rx_queues
));
4753 static void bnx2x_set_client_config(struct bnx2x
*bp
)
4755 struct tstorm_eth_client_config tstorm_client
= {0};
4756 int port
= BP_PORT(bp
);
4759 tstorm_client
.mtu
= bp
->dev
->mtu
;
4760 tstorm_client
.config_flags
=
4761 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE
|
4762 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE
);
4764 if (bp
->rx_mode
&& bp
->vlgrp
&& (bp
->flags
& HW_VLAN_RX_FLAG
)) {
4765 tstorm_client
.config_flags
|=
4766 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE
;
4767 DP(NETIF_MSG_IFUP
, "vlan removal enabled\n");
4771 if (bp
->flags
& TPA_ENABLE_FLAG
) {
4772 tstorm_client
.max_sges_for_packet
=
4773 SGE_PAGE_ALIGN(tstorm_client
.mtu
) >> SGE_PAGE_SHIFT
;
4774 tstorm_client
.max_sges_for_packet
=
4775 ((tstorm_client
.max_sges_for_packet
+
4776 PAGES_PER_SGE
- 1) & (~(PAGES_PER_SGE
- 1))) >>
4777 PAGES_PER_SGE_SHIFT
;
4779 tstorm_client
.config_flags
|=
4780 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING
;
4783 for_each_queue(bp
, i
) {
4784 tstorm_client
.statistics_counter_id
= bp
->fp
[i
].cl_id
;
4786 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4787 TSTORM_CLIENT_CONFIG_OFFSET(port
, bp
->fp
[i
].cl_id
),
4788 ((u32
*)&tstorm_client
)[0]);
4789 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4790 TSTORM_CLIENT_CONFIG_OFFSET(port
, bp
->fp
[i
].cl_id
) + 4,
4791 ((u32
*)&tstorm_client
)[1]);
4794 DP(BNX2X_MSG_OFF
, "tstorm_client: 0x%08x 0x%08x\n",
4795 ((u32
*)&tstorm_client
)[0], ((u32
*)&tstorm_client
)[1]);
4798 static void bnx2x_set_storm_rx_mode(struct bnx2x
*bp
)
4800 struct tstorm_eth_mac_filter_config tstorm_mac_filter
= {0};
4801 int mode
= bp
->rx_mode
;
4802 int mask
= (1 << BP_L_ID(bp
));
4803 int func
= BP_FUNC(bp
);
4806 DP(NETIF_MSG_IFUP
, "rx mode %d mask 0x%x\n", mode
, mask
);
4809 case BNX2X_RX_MODE_NONE
: /* no Rx */
4810 tstorm_mac_filter
.ucast_drop_all
= mask
;
4811 tstorm_mac_filter
.mcast_drop_all
= mask
;
4812 tstorm_mac_filter
.bcast_drop_all
= mask
;
4815 case BNX2X_RX_MODE_NORMAL
:
4816 tstorm_mac_filter
.bcast_accept_all
= mask
;
4819 case BNX2X_RX_MODE_ALLMULTI
:
4820 tstorm_mac_filter
.mcast_accept_all
= mask
;
4821 tstorm_mac_filter
.bcast_accept_all
= mask
;
4824 case BNX2X_RX_MODE_PROMISC
:
4825 tstorm_mac_filter
.ucast_accept_all
= mask
;
4826 tstorm_mac_filter
.mcast_accept_all
= mask
;
4827 tstorm_mac_filter
.bcast_accept_all
= mask
;
4831 BNX2X_ERR("BAD rx mode (%d)\n", mode
);
4835 for (i
= 0; i
< sizeof(struct tstorm_eth_mac_filter_config
)/4; i
++) {
4836 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4837 TSTORM_MAC_FILTER_CONFIG_OFFSET(func
) + i
* 4,
4838 ((u32
*)&tstorm_mac_filter
)[i
]);
4840 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4841 ((u32 *)&tstorm_mac_filter)[i]); */
4844 if (mode
!= BNX2X_RX_MODE_NONE
)
4845 bnx2x_set_client_config(bp
);
4848 static void bnx2x_init_internal_common(struct bnx2x
*bp
)
4852 if (bp
->flags
& TPA_ENABLE_FLAG
) {
4853 struct tstorm_eth_tpa_exist tpa
= {0};
4857 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_TPA_EXIST_OFFSET
,
4859 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_TPA_EXIST_OFFSET
+ 4,
4863 /* Zero this manually as its initialization is
4864 currently missing in the initTool */
4865 for (i
= 0; i
< (USTORM_AGG_DATA_SIZE
>> 2); i
++)
4866 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4867 USTORM_AGG_DATA_OFFSET
+ i
* 4, 0);
4870 static void bnx2x_init_internal_port(struct bnx2x
*bp
)
4872 int port
= BP_PORT(bp
);
4874 REG_WR(bp
, BAR_USTRORM_INTMEM
+ USTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
4875 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
4876 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
4877 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
4880 /* Calculates the sum of vn_min_rates.
4881 It's needed for further normalizing of the min_rates.
4883 sum of vn_min_rates.
4885 0 - if all the min_rates are 0.
4886 In the later case fainess algorithm should be deactivated.
4887 If not all min_rates are zero then those that are zeroes will be set to 1.
4889 static void bnx2x_calc_vn_weight_sum(struct bnx2x
*bp
)
4892 int port
= BP_PORT(bp
);
4895 bp
->vn_weight_sum
= 0;
4896 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
4897 int func
= 2*vn
+ port
;
4899 SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
4900 u32 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
4901 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
4903 /* Skip hidden vns */
4904 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
)
4907 /* If min rate is zero - set it to 1 */
4909 vn_min_rate
= DEF_MIN_RATE
;
4913 bp
->vn_weight_sum
+= vn_min_rate
;
4916 /* ... only if all min rates are zeros - disable fairness */
4918 bp
->vn_weight_sum
= 0;
4921 static void bnx2x_init_internal_func(struct bnx2x
*bp
)
4923 struct tstorm_eth_function_common_config tstorm_config
= {0};
4924 struct stats_indication_flags stats_flags
= {0};
4925 int port
= BP_PORT(bp
);
4926 int func
= BP_FUNC(bp
);
4932 tstorm_config
.config_flags
= MULTI_FLAGS(bp
);
4933 tstorm_config
.rss_result_mask
= MULTI_MASK
;
4936 tstorm_config
.config_flags
|=
4937 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM
;
4939 tstorm_config
.leading_client_id
= BP_L_ID(bp
);
4941 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4942 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func
),
4943 (*(u32
*)&tstorm_config
));
4945 bp
->rx_mode
= BNX2X_RX_MODE_NONE
; /* no rx until link is up */
4946 bnx2x_set_storm_rx_mode(bp
);
4948 for_each_queue(bp
, i
) {
4949 u8 cl_id
= bp
->fp
[i
].cl_id
;
4951 /* reset xstorm per client statistics */
4952 offset
= BAR_XSTRORM_INTMEM
+
4953 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cl_id
);
4955 j
< sizeof(struct xstorm_per_client_stats
) / 4; j
++)
4956 REG_WR(bp
, offset
+ j
*4, 0);
4958 /* reset tstorm per client statistics */
4959 offset
= BAR_TSTRORM_INTMEM
+
4960 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cl_id
);
4962 j
< sizeof(struct tstorm_per_client_stats
) / 4; j
++)
4963 REG_WR(bp
, offset
+ j
*4, 0);
4965 /* reset ustorm per client statistics */
4966 offset
= BAR_USTRORM_INTMEM
+
4967 USTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cl_id
);
4969 j
< sizeof(struct ustorm_per_client_stats
) / 4; j
++)
4970 REG_WR(bp
, offset
+ j
*4, 0);
4973 /* Init statistics related context */
4974 stats_flags
.collect_eth
= 1;
4976 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(func
),
4977 ((u32
*)&stats_flags
)[0]);
4978 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(func
) + 4,
4979 ((u32
*)&stats_flags
)[1]);
4981 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(func
),
4982 ((u32
*)&stats_flags
)[0]);
4983 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(func
) + 4,
4984 ((u32
*)&stats_flags
)[1]);
4986 REG_WR(bp
, BAR_USTRORM_INTMEM
+ USTORM_STATS_FLAGS_OFFSET(func
),
4987 ((u32
*)&stats_flags
)[0]);
4988 REG_WR(bp
, BAR_USTRORM_INTMEM
+ USTORM_STATS_FLAGS_OFFSET(func
) + 4,
4989 ((u32
*)&stats_flags
)[1]);
4991 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(func
),
4992 ((u32
*)&stats_flags
)[0]);
4993 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(func
) + 4,
4994 ((u32
*)&stats_flags
)[1]);
4996 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4997 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
4998 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
4999 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
5000 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
5001 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
5003 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
5004 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
5005 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
5006 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
5007 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
5008 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
5010 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5011 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
5012 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
5013 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5014 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
5015 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
5017 if (CHIP_IS_E1H(bp
)) {
5018 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_FUNCTION_MODE_OFFSET
,
5020 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_FUNCTION_MODE_OFFSET
,
5022 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_FUNCTION_MODE_OFFSET
,
5024 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_FUNCTION_MODE_OFFSET
,
5027 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_E1HOV_OFFSET(func
),
5031 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5033 min((u32
)(min((u32
)8, (u32
)MAX_SKB_FRAGS
) *
5034 SGE_PAGE_SIZE
* PAGES_PER_SGE
),
5036 for_each_rx_queue(bp
, i
) {
5037 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5039 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5040 USTORM_CQE_PAGE_BASE_OFFSET(port
, fp
->cl_id
),
5041 U64_LO(fp
->rx_comp_mapping
));
5042 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5043 USTORM_CQE_PAGE_BASE_OFFSET(port
, fp
->cl_id
) + 4,
5044 U64_HI(fp
->rx_comp_mapping
));
5046 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
5047 USTORM_MAX_AGG_SIZE_OFFSET(port
, fp
->cl_id
),
5051 /* dropless flow control */
5052 if (CHIP_IS_E1H(bp
)) {
5053 struct ustorm_eth_rx_pause_data_e1h rx_pause
= {0};
5055 rx_pause
.bd_thr_low
= 250;
5056 rx_pause
.cqe_thr_low
= 250;
5058 rx_pause
.sge_thr_low
= 0;
5059 rx_pause
.bd_thr_high
= 350;
5060 rx_pause
.cqe_thr_high
= 350;
5061 rx_pause
.sge_thr_high
= 0;
5063 for_each_rx_queue(bp
, i
) {
5064 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5066 if (!fp
->disable_tpa
) {
5067 rx_pause
.sge_thr_low
= 150;
5068 rx_pause
.sge_thr_high
= 250;
5072 offset
= BAR_USTRORM_INTMEM
+
5073 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port
,
5076 j
< sizeof(struct ustorm_eth_rx_pause_data_e1h
)/4;
5078 REG_WR(bp
, offset
+ j
*4,
5079 ((u32
*)&rx_pause
)[j
]);
5083 memset(&(bp
->cmng
), 0, sizeof(struct cmng_struct_per_port
));
5085 /* Init rate shaping and fairness contexts */
5089 /* During init there is no active link
5090 Until link is up, set link rate to 10Gbps */
5091 bp
->link_vars
.line_speed
= SPEED_10000
;
5092 bnx2x_init_port_minmax(bp
);
5094 bnx2x_calc_vn_weight_sum(bp
);
5096 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
5097 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
);
5099 /* Enable rate shaping and fairness */
5100 bp
->cmng
.flags
.cmng_enables
=
5101 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN
;
5102 if (bp
->vn_weight_sum
)
5103 bp
->cmng
.flags
.cmng_enables
|=
5104 CMNG_FLAGS_PER_PORT_FAIRNESS_VN
;
5106 DP(NETIF_MSG_IFUP
, "All MIN values are zeroes"
5107 " fairness will be disabled\n");
5109 /* rate shaping and fairness are disabled */
5111 "single function mode minmax will be disabled\n");
5115 /* Store it to internal memory */
5117 for (i
= 0; i
< sizeof(struct cmng_struct_per_port
) / 4; i
++)
5118 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
5119 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
) + i
* 4,
5120 ((u32
*)(&bp
->cmng
))[i
]);
5123 static void bnx2x_init_internal(struct bnx2x
*bp
, u32 load_code
)
5125 switch (load_code
) {
5126 case FW_MSG_CODE_DRV_LOAD_COMMON
:
5127 bnx2x_init_internal_common(bp
);
5130 case FW_MSG_CODE_DRV_LOAD_PORT
:
5131 bnx2x_init_internal_port(bp
);
5134 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
5135 bnx2x_init_internal_func(bp
);
5139 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
5144 static void bnx2x_nic_init(struct bnx2x
*bp
, u32 load_code
)
5148 for_each_queue(bp
, i
) {
5149 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5152 fp
->state
= BNX2X_FP_STATE_CLOSED
;
5154 fp
->cl_id
= BP_L_ID(bp
) + i
;
5155 fp
->sb_id
= fp
->cl_id
;
5157 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5158 i
, bp
, fp
->status_blk
, fp
->cl_id
, fp
->sb_id
);
5159 bnx2x_init_sb(bp
, fp
->status_blk
, fp
->status_blk_mapping
,
5161 bnx2x_update_fpsb_idx(fp
);
5164 /* ensure status block indices were read */
5168 bnx2x_init_def_sb(bp
, bp
->def_status_blk
, bp
->def_status_blk_mapping
,
5170 bnx2x_update_dsb_idx(bp
);
5171 bnx2x_update_coalesce(bp
);
5172 bnx2x_init_rx_rings(bp
);
5173 bnx2x_init_tx_ring(bp
);
5174 bnx2x_init_sp_ring(bp
);
5175 bnx2x_init_context(bp
);
5176 bnx2x_init_internal(bp
, load_code
);
5177 bnx2x_init_ind_table(bp
);
5178 bnx2x_stats_init(bp
);
5180 /* At this point, we are ready for interrupts */
5181 atomic_set(&bp
->intr_sem
, 0);
5183 /* flush all before enabling interrupts */
5187 bnx2x_int_enable(bp
);
5190 /* end of nic init */
5193 * gzip service functions
5196 static int bnx2x_gunzip_init(struct bnx2x
*bp
)
5198 bp
->gunzip_buf
= pci_alloc_consistent(bp
->pdev
, FW_BUF_SIZE
,
5199 &bp
->gunzip_mapping
);
5200 if (bp
->gunzip_buf
== NULL
)
5203 bp
->strm
= kmalloc(sizeof(*bp
->strm
), GFP_KERNEL
);
5204 if (bp
->strm
== NULL
)
5207 bp
->strm
->workspace
= kmalloc(zlib_inflate_workspacesize(),
5209 if (bp
->strm
->workspace
== NULL
)
5219 pci_free_consistent(bp
->pdev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
5220 bp
->gunzip_mapping
);
5221 bp
->gunzip_buf
= NULL
;
5224 printk(KERN_ERR PFX
"%s: Cannot allocate firmware buffer for"
5225 " un-compression\n", bp
->dev
->name
);
5229 static void bnx2x_gunzip_end(struct bnx2x
*bp
)
5231 kfree(bp
->strm
->workspace
);
5236 if (bp
->gunzip_buf
) {
5237 pci_free_consistent(bp
->pdev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
5238 bp
->gunzip_mapping
);
5239 bp
->gunzip_buf
= NULL
;
5243 static int bnx2x_gunzip(struct bnx2x
*bp
, const u8
*zbuf
, int len
)
5247 /* check gzip header */
5248 if ((zbuf
[0] != 0x1f) || (zbuf
[1] != 0x8b) || (zbuf
[2] != Z_DEFLATED
)) {
5249 BNX2X_ERR("Bad gzip header\n");
5257 if (zbuf
[3] & FNAME
)
5258 while ((zbuf
[n
++] != 0) && (n
< len
));
5260 bp
->strm
->next_in
= (typeof(bp
->strm
->next_in
))zbuf
+ n
;
5261 bp
->strm
->avail_in
= len
- n
;
5262 bp
->strm
->next_out
= bp
->gunzip_buf
;
5263 bp
->strm
->avail_out
= FW_BUF_SIZE
;
5265 rc
= zlib_inflateInit2(bp
->strm
, -MAX_WBITS
);
5269 rc
= zlib_inflate(bp
->strm
, Z_FINISH
);
5270 if ((rc
!= Z_OK
) && (rc
!= Z_STREAM_END
))
5271 printk(KERN_ERR PFX
"%s: Firmware decompression error: %s\n",
5272 bp
->dev
->name
, bp
->strm
->msg
);
5274 bp
->gunzip_outlen
= (FW_BUF_SIZE
- bp
->strm
->avail_out
);
5275 if (bp
->gunzip_outlen
& 0x3)
5276 printk(KERN_ERR PFX
"%s: Firmware decompression error:"
5277 " gunzip_outlen (%d) not aligned\n",
5278 bp
->dev
->name
, bp
->gunzip_outlen
);
5279 bp
->gunzip_outlen
>>= 2;
5281 zlib_inflateEnd(bp
->strm
);
5283 if (rc
== Z_STREAM_END
)
5289 /* nic load/unload */
5292 * General service functions
5295 /* send a NIG loopback debug packet */
5296 static void bnx2x_lb_pckt(struct bnx2x
*bp
)
5300 /* Ethernet source and destination addresses */
5301 wb_write
[0] = 0x55555555;
5302 wb_write
[1] = 0x55555555;
5303 wb_write
[2] = 0x20; /* SOP */
5304 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
5306 /* NON-IP protocol */
5307 wb_write
[0] = 0x09000000;
5308 wb_write
[1] = 0x55555555;
5309 wb_write
[2] = 0x10; /* EOP, eop_bvalid = 0 */
5310 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
5313 /* some of the internal memories
5314 * are not directly readable from the driver
5315 * to test them we send debug packets
5317 static int bnx2x_int_mem_test(struct bnx2x
*bp
)
5323 if (CHIP_REV_IS_FPGA(bp
))
5325 else if (CHIP_REV_IS_EMUL(bp
))
5330 DP(NETIF_MSG_HW
, "start part1\n");
5332 /* Disable inputs of parser neighbor blocks */
5333 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
5334 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
5335 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
5336 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x0);
5338 /* Write 0 to parser credits for CFC search request */
5339 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
5341 /* send Ethernet packet */
5344 /* TODO do i reset NIG statistic? */
5345 /* Wait until NIG register shows 1 packet of size 0x10 */
5346 count
= 1000 * factor
;
5349 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
5350 val
= *bnx2x_sp(bp
, wb_data
[0]);
5358 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
5362 /* Wait until PRS register shows 1 packet */
5363 count
= 1000 * factor
;
5365 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
5373 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
5377 /* Reset and init BRB, PRS */
5378 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
5380 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
5382 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
5383 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
5385 DP(NETIF_MSG_HW
, "part2\n");
5387 /* Disable inputs of parser neighbor blocks */
5388 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
5389 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
5390 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
5391 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x0);
5393 /* Write 0 to parser credits for CFC search request */
5394 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
5396 /* send 10 Ethernet packets */
5397 for (i
= 0; i
< 10; i
++)
5400 /* Wait until NIG register shows 10 + 1
5401 packets of size 11*0x10 = 0xb0 */
5402 count
= 1000 * factor
;
5405 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
5406 val
= *bnx2x_sp(bp
, wb_data
[0]);
5414 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
5418 /* Wait until PRS register shows 2 packets */
5419 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
5421 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
5423 /* Write 1 to parser credits for CFC search request */
5424 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x1);
5426 /* Wait until PRS register shows 3 packets */
5427 msleep(10 * factor
);
5428 /* Wait until NIG register shows 1 packet of size 0x10 */
5429 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
5431 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
5433 /* clear NIG EOP FIFO */
5434 for (i
= 0; i
< 11; i
++)
5435 REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_FIFO
);
5436 val
= REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_EMPTY
);
5438 BNX2X_ERR("clear of NIG failed\n");
5442 /* Reset and init BRB, PRS, NIG */
5443 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
5445 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
5447 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
5448 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
5451 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
5454 /* Enable inputs of parser neighbor blocks */
5455 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x7fffffff);
5456 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x1);
5457 REG_WR(bp
, CFC_REG_DEBUG0
, 0x0);
5458 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x1);
5460 DP(NETIF_MSG_HW
, "done\n");
5465 static void enable_blocks_attention(struct bnx2x
*bp
)
5467 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
5468 REG_WR(bp
, PXP_REG_PXP_INT_MASK_1
, 0);
5469 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
5470 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
5471 REG_WR(bp
, QM_REG_QM_INT_MASK
, 0);
5472 REG_WR(bp
, TM_REG_TM_INT_MASK
, 0);
5473 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_0
, 0);
5474 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_1
, 0);
5475 REG_WR(bp
, XCM_REG_XCM_INT_MASK
, 0);
5476 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5477 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5478 REG_WR(bp
, USDM_REG_USDM_INT_MASK_0
, 0);
5479 REG_WR(bp
, USDM_REG_USDM_INT_MASK_1
, 0);
5480 REG_WR(bp
, UCM_REG_UCM_INT_MASK
, 0);
5481 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5482 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5483 REG_WR(bp
, GRCBASE_UPB
+ PB_REG_PB_INT_MASK
, 0);
5484 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_0
, 0);
5485 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_1
, 0);
5486 REG_WR(bp
, CCM_REG_CCM_INT_MASK
, 0);
5487 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5488 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5489 if (CHIP_REV_IS_FPGA(bp
))
5490 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x580000);
5492 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x480000);
5493 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_0
, 0);
5494 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_1
, 0);
5495 REG_WR(bp
, TCM_REG_TCM_INT_MASK
, 0);
5496 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5497 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5498 REG_WR(bp
, CDU_REG_CDU_INT_MASK
, 0);
5499 REG_WR(bp
, DMAE_REG_DMAE_INT_MASK
, 0);
5500 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5501 REG_WR(bp
, PBF_REG_PBF_INT_MASK
, 0X18); /* bit 3,4 masked */
5505 static void bnx2x_reset_common(struct bnx2x
*bp
)
5508 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
5510 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
, 0x1403);
5513 static int bnx2x_init_common(struct bnx2x
*bp
)
5517 DP(BNX2X_MSG_MCP
, "starting common init func %d\n", BP_FUNC(bp
));
5519 bnx2x_reset_common(bp
);
5520 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0xffffffff);
5521 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_SET
, 0xfffc);
5523 bnx2x_init_block(bp
, MISC_BLOCK
, COMMON_STAGE
);
5524 if (CHIP_IS_E1H(bp
))
5525 REG_WR(bp
, MISC_REG_E1HMF_MODE
, IS_E1HMF(bp
));
5527 REG_WR(bp
, MISC_REG_LCPLL_CTRL_REG_2
, 0x100);
5529 REG_WR(bp
, MISC_REG_LCPLL_CTRL_REG_2
, 0x0);
5531 bnx2x_init_block(bp
, PXP_BLOCK
, COMMON_STAGE
);
5532 if (CHIP_IS_E1(bp
)) {
5533 /* enable HW interrupt from PXP on USDM overflow
5534 bit 16 on INT_MASK_0 */
5535 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
5538 bnx2x_init_block(bp
, PXP2_BLOCK
, COMMON_STAGE
);
5542 REG_WR(bp
, PXP2_REG_RQ_QM_ENDIAN_M
, 1);
5543 REG_WR(bp
, PXP2_REG_RQ_TM_ENDIAN_M
, 1);
5544 REG_WR(bp
, PXP2_REG_RQ_SRC_ENDIAN_M
, 1);
5545 REG_WR(bp
, PXP2_REG_RQ_CDU_ENDIAN_M
, 1);
5546 REG_WR(bp
, PXP2_REG_RQ_DBG_ENDIAN_M
, 1);
5547 /* make sure this value is 0 */
5548 REG_WR(bp
, PXP2_REG_RQ_HC_ENDIAN_M
, 0);
5550 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5551 REG_WR(bp
, PXP2_REG_RD_QM_SWAP_MODE
, 1);
5552 REG_WR(bp
, PXP2_REG_RD_TM_SWAP_MODE
, 1);
5553 REG_WR(bp
, PXP2_REG_RD_SRC_SWAP_MODE
, 1);
5554 REG_WR(bp
, PXP2_REG_RD_CDURD_SWAP_MODE
, 1);
5557 REG_WR(bp
, PXP2_REG_RQ_CDU_P_SIZE
, 2);
5559 REG_WR(bp
, PXP2_REG_RQ_TM_P_SIZE
, 5);
5560 REG_WR(bp
, PXP2_REG_RQ_QM_P_SIZE
, 5);
5561 REG_WR(bp
, PXP2_REG_RQ_SRC_P_SIZE
, 5);
5564 if (CHIP_REV_IS_FPGA(bp
) && CHIP_IS_E1H(bp
))
5565 REG_WR(bp
, PXP2_REG_PGL_TAGS_LIMIT
, 0x1);
5567 /* let the HW do it's magic ... */
5569 /* finish PXP init */
5570 val
= REG_RD(bp
, PXP2_REG_RQ_CFG_DONE
);
5572 BNX2X_ERR("PXP2 CFG failed\n");
5575 val
= REG_RD(bp
, PXP2_REG_RD_INIT_DONE
);
5577 BNX2X_ERR("PXP2 RD_INIT failed\n");
5581 REG_WR(bp
, PXP2_REG_RQ_DISABLE_INPUTS
, 0);
5582 REG_WR(bp
, PXP2_REG_RD_DISABLE_INPUTS
, 0);
5584 bnx2x_init_block(bp
, DMAE_BLOCK
, COMMON_STAGE
);
5586 /* clean the DMAE memory */
5588 bnx2x_init_fill(bp
, TSEM_REG_PRAM
, 0, 8);
5590 bnx2x_init_block(bp
, TCM_BLOCK
, COMMON_STAGE
);
5591 bnx2x_init_block(bp
, UCM_BLOCK
, COMMON_STAGE
);
5592 bnx2x_init_block(bp
, CCM_BLOCK
, COMMON_STAGE
);
5593 bnx2x_init_block(bp
, XCM_BLOCK
, COMMON_STAGE
);
5595 bnx2x_read_dmae(bp
, XSEM_REG_PASSIVE_BUFFER
, 3);
5596 bnx2x_read_dmae(bp
, CSEM_REG_PASSIVE_BUFFER
, 3);
5597 bnx2x_read_dmae(bp
, TSEM_REG_PASSIVE_BUFFER
, 3);
5598 bnx2x_read_dmae(bp
, USEM_REG_PASSIVE_BUFFER
, 3);
5600 bnx2x_init_block(bp
, QM_BLOCK
, COMMON_STAGE
);
5601 /* soft reset pulse */
5602 REG_WR(bp
, QM_REG_SOFT_RESET
, 1);
5603 REG_WR(bp
, QM_REG_SOFT_RESET
, 0);
5606 bnx2x_init_block(bp
, TIMERS_BLOCK
, COMMON_STAGE
);
5609 bnx2x_init_block(bp
, DQ_BLOCK
, COMMON_STAGE
);
5610 REG_WR(bp
, DORQ_REG_DPM_CID_OFST
, BCM_PAGE_SHIFT
);
5611 if (!CHIP_REV_IS_SLOW(bp
)) {
5612 /* enable hw interrupt from doorbell Q */
5613 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
5616 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
5617 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
5618 REG_WR(bp
, PRS_REG_A_PRSU_20
, 0xf);
5620 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
5621 if (CHIP_IS_E1H(bp
))
5622 REG_WR(bp
, PRS_REG_E1HOV_MODE
, IS_E1HMF(bp
));
5624 bnx2x_init_block(bp
, TSDM_BLOCK
, COMMON_STAGE
);
5625 bnx2x_init_block(bp
, CSDM_BLOCK
, COMMON_STAGE
);
5626 bnx2x_init_block(bp
, USDM_BLOCK
, COMMON_STAGE
);
5627 bnx2x_init_block(bp
, XSDM_BLOCK
, COMMON_STAGE
);
5629 bnx2x_init_fill(bp
, TSTORM_INTMEM_ADDR
, 0, STORM_INTMEM_SIZE(bp
));
5630 bnx2x_init_fill(bp
, USTORM_INTMEM_ADDR
, 0, STORM_INTMEM_SIZE(bp
));
5631 bnx2x_init_fill(bp
, CSTORM_INTMEM_ADDR
, 0, STORM_INTMEM_SIZE(bp
));
5632 bnx2x_init_fill(bp
, XSTORM_INTMEM_ADDR
, 0, STORM_INTMEM_SIZE(bp
));
5634 bnx2x_init_block(bp
, TSEM_BLOCK
, COMMON_STAGE
);
5635 bnx2x_init_block(bp
, USEM_BLOCK
, COMMON_STAGE
);
5636 bnx2x_init_block(bp
, CSEM_BLOCK
, COMMON_STAGE
);
5637 bnx2x_init_block(bp
, XSEM_BLOCK
, COMMON_STAGE
);
5640 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
5642 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
5645 bnx2x_init_block(bp
, UPB_BLOCK
, COMMON_STAGE
);
5646 bnx2x_init_block(bp
, XPB_BLOCK
, COMMON_STAGE
);
5647 bnx2x_init_block(bp
, PBF_BLOCK
, COMMON_STAGE
);
5649 REG_WR(bp
, SRC_REG_SOFT_RST
, 1);
5650 for (i
= SRC_REG_KEYRSS0_0
; i
<= SRC_REG_KEYRSS1_9
; i
+= 4) {
5651 REG_WR(bp
, i
, 0xc0cac01a);
5652 /* TODO: replace with something meaningful */
5654 bnx2x_init_block(bp
, SRCH_BLOCK
, COMMON_STAGE
);
5655 REG_WR(bp
, SRC_REG_SOFT_RST
, 0);
5657 if (sizeof(union cdu_context
) != 1024)
5658 /* we currently assume that a context is 1024 bytes */
5659 printk(KERN_ALERT PFX
"please adjust the size of"
5660 " cdu_context(%ld)\n", (long)sizeof(union cdu_context
));
5662 bnx2x_init_block(bp
, CDU_BLOCK
, COMMON_STAGE
);
5663 val
= (4 << 24) + (0 << 12) + 1024;
5664 REG_WR(bp
, CDU_REG_CDU_GLOBAL_PARAMS
, val
);
5665 if (CHIP_IS_E1(bp
)) {
5666 /* !!! fix pxp client crdit until excel update */
5667 REG_WR(bp
, CDU_REG_CDU_DEBUG
, 0x264);
5668 REG_WR(bp
, CDU_REG_CDU_DEBUG
, 0);
5671 bnx2x_init_block(bp
, CFC_BLOCK
, COMMON_STAGE
);
5672 REG_WR(bp
, CFC_REG_INIT_REG
, 0x7FF);
5673 /* enable context validation interrupt from CFC */
5674 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
5676 /* set the thresholds to prevent CFC/CDU race */
5677 REG_WR(bp
, CFC_REG_DEBUG0
, 0x20020000);
5679 bnx2x_init_block(bp
, HC_BLOCK
, COMMON_STAGE
);
5680 bnx2x_init_block(bp
, MISC_AEU_BLOCK
, COMMON_STAGE
);
5682 /* PXPCS COMMON comes here */
5683 bnx2x_init_block(bp
, PXPCS_BLOCK
, COMMON_STAGE
);
5684 /* Reset PCIE errors for debug */
5685 REG_WR(bp
, 0x2814, 0xffffffff);
5686 REG_WR(bp
, 0x3820, 0xffffffff);
5688 /* EMAC0 COMMON comes here */
5689 bnx2x_init_block(bp
, EMAC0_BLOCK
, COMMON_STAGE
);
5690 /* EMAC1 COMMON comes here */
5691 bnx2x_init_block(bp
, EMAC1_BLOCK
, COMMON_STAGE
);
5692 /* DBU COMMON comes here */
5693 bnx2x_init_block(bp
, DBU_BLOCK
, COMMON_STAGE
);
5694 /* DBG COMMON comes here */
5695 bnx2x_init_block(bp
, DBG_BLOCK
, COMMON_STAGE
);
5697 bnx2x_init_block(bp
, NIG_BLOCK
, COMMON_STAGE
);
5698 if (CHIP_IS_E1H(bp
)) {
5699 REG_WR(bp
, NIG_REG_LLH_MF_MODE
, IS_E1HMF(bp
));
5700 REG_WR(bp
, NIG_REG_LLH_E1HOV_MODE
, IS_E1HMF(bp
));
5703 if (CHIP_REV_IS_SLOW(bp
))
5706 /* finish CFC init */
5707 val
= reg_poll(bp
, CFC_REG_LL_INIT_DONE
, 1, 100, 10);
5709 BNX2X_ERR("CFC LL_INIT failed\n");
5712 val
= reg_poll(bp
, CFC_REG_AC_INIT_DONE
, 1, 100, 10);
5714 BNX2X_ERR("CFC AC_INIT failed\n");
5717 val
= reg_poll(bp
, CFC_REG_CAM_INIT_DONE
, 1, 100, 10);
5719 BNX2X_ERR("CFC CAM_INIT failed\n");
5722 REG_WR(bp
, CFC_REG_DEBUG0
, 0);
5724 /* read NIG statistic
5725 to see if this is our first up since powerup */
5726 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
5727 val
= *bnx2x_sp(bp
, wb_data
[0]);
5729 /* do internal memory self test */
5730 if ((CHIP_IS_E1(bp
)) && (val
== 0) && bnx2x_int_mem_test(bp
)) {
5731 BNX2X_ERR("internal mem self test failed\n");
5735 switch (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
)) {
5736 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
5737 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
5738 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726
:
5739 bp
->port
.need_hw_lock
= 1;
5742 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
5743 /* Fan failure is indicated by SPIO 5 */
5744 bnx2x_set_spio(bp
, MISC_REGISTERS_SPIO_5
,
5745 MISC_REGISTERS_SPIO_INPUT_HI_Z
);
5747 /* set to active low mode */
5748 val
= REG_RD(bp
, MISC_REG_SPIO_INT
);
5749 val
|= ((1 << MISC_REGISTERS_SPIO_5
) <<
5750 MISC_REGISTERS_SPIO_INT_OLD_SET_POS
);
5751 REG_WR(bp
, MISC_REG_SPIO_INT
, val
);
5753 /* enable interrupt to signal the IGU */
5754 val
= REG_RD(bp
, MISC_REG_SPIO_EVENT_EN
);
5755 val
|= (1 << MISC_REGISTERS_SPIO_5
);
5756 REG_WR(bp
, MISC_REG_SPIO_EVENT_EN
, val
);
5763 /* clear PXP2 attentions */
5764 REG_RD(bp
, PXP2_REG_PXP2_INT_STS_CLR_0
);
5766 enable_blocks_attention(bp
);
5768 if (!BP_NOMCP(bp
)) {
5769 bnx2x_acquire_phy_lock(bp
);
5770 bnx2x_common_init_phy(bp
, bp
->common
.shmem_base
);
5771 bnx2x_release_phy_lock(bp
);
5773 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5778 static int bnx2x_init_port(struct bnx2x
*bp
)
5780 int port
= BP_PORT(bp
);
5781 int init_stage
= port
? PORT1_STAGE
: PORT0_STAGE
;
5785 DP(BNX2X_MSG_MCP
, "starting port init port %x\n", port
);
5787 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
5789 /* Port PXP comes here */
5790 bnx2x_init_block(bp
, PXP_BLOCK
, init_stage
);
5791 /* Port PXP2 comes here */
5792 bnx2x_init_block(bp
, PXP2_BLOCK
, init_stage
);
5797 wb_write
[0] = ONCHIP_ADDR1(bp
->timers_mapping
);
5798 wb_write
[1] = ONCHIP_ADDR2(bp
->timers_mapping
);
5799 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
5800 REG_WR(bp
, PXP2_REG_PSWRQ_TM0_L2P
+ func
*4, PXP_ONE_ILT(i
));
5805 wb_write
[0] = ONCHIP_ADDR1(bp
->qm_mapping
);
5806 wb_write
[1] = ONCHIP_ADDR2(bp
->qm_mapping
);
5807 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
5808 REG_WR(bp
, PXP2_REG_PSWRQ_QM0_L2P
+ func
*4, PXP_ONE_ILT(i
));
5813 wb_write
[0] = ONCHIP_ADDR1(bp
->t1_mapping
);
5814 wb_write
[1] = ONCHIP_ADDR2(bp
->t1_mapping
);
5815 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
5816 REG_WR(bp
, PXP2_REG_PSWRQ_SRC0_L2P
+ func
*4, PXP_ONE_ILT(i
));
5818 /* Port CMs come here */
5819 bnx2x_init_block(bp
, XCM_BLOCK
, init_stage
);
5821 /* Port QM comes here */
5823 REG_WR(bp
, TM_REG_LIN0_SCAN_TIME
+ func
*4, 1024/64*20);
5824 REG_WR(bp
, TM_REG_LIN0_MAX_ACTIVE_CID
+ func
*4, 31);
5826 bnx2x_init_block(bp
, TIMERS_BLOCK
, init_stage
);
5828 /* Port DQ comes here */
5829 bnx2x_init_block(bp
, DQ_BLOCK
, init_stage
);
5831 bnx2x_init_block(bp
, BRB1_BLOCK
, init_stage
);
5832 if (CHIP_REV_IS_SLOW(bp
) && !CHIP_IS_E1H(bp
)) {
5833 /* no pause for emulation and FPGA */
5838 low
= ((bp
->flags
& ONE_PORT_FLAG
) ? 160 : 246);
5839 else if (bp
->dev
->mtu
> 4096) {
5840 if (bp
->flags
& ONE_PORT_FLAG
)
5844 /* (24*1024 + val*4)/256 */
5845 low
= 96 + (val
/64) + ((val
% 64) ? 1 : 0);
5848 low
= ((bp
->flags
& ONE_PORT_FLAG
) ? 80 : 160);
5849 high
= low
+ 56; /* 14*1024/256 */
5851 REG_WR(bp
, BRB1_REG_PAUSE_LOW_THRESHOLD_0
+ port
*4, low
);
5852 REG_WR(bp
, BRB1_REG_PAUSE_HIGH_THRESHOLD_0
+ port
*4, high
);
5855 /* Port PRS comes here */
5856 bnx2x_init_block(bp
, PRS_BLOCK
, init_stage
);
5857 /* Port TSDM comes here */
5858 bnx2x_init_block(bp
, TSDM_BLOCK
, init_stage
);
5859 /* Port CSDM comes here */
5860 bnx2x_init_block(bp
, CSDM_BLOCK
, init_stage
);
5861 /* Port USDM comes here */
5862 bnx2x_init_block(bp
, USDM_BLOCK
, init_stage
);
5863 /* Port XSDM comes here */
5864 bnx2x_init_block(bp
, XSDM_BLOCK
, init_stage
);
5866 bnx2x_init_block(bp
, TSEM_BLOCK
, init_stage
);
5867 bnx2x_init_block(bp
, USEM_BLOCK
, init_stage
);
5868 bnx2x_init_block(bp
, CSEM_BLOCK
, init_stage
);
5869 bnx2x_init_block(bp
, XSEM_BLOCK
, init_stage
);
5871 /* Port UPB comes here */
5872 bnx2x_init_block(bp
, UPB_BLOCK
, init_stage
);
5873 /* Port XPB comes here */
5874 bnx2x_init_block(bp
, XPB_BLOCK
, init_stage
);
5876 bnx2x_init_block(bp
, PBF_BLOCK
, init_stage
);
5878 /* configure PBF to work without PAUSE mtu 9000 */
5879 REG_WR(bp
, PBF_REG_P0_PAUSE_ENABLE
+ port
*4, 0);
5881 /* update threshold */
5882 REG_WR(bp
, PBF_REG_P0_ARB_THRSH
+ port
*4, (9040/16));
5883 /* update init credit */
5884 REG_WR(bp
, PBF_REG_P0_INIT_CRD
+ port
*4, (9040/16) + 553 - 22);
5887 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 1);
5889 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 0);
5892 /* tell the searcher where the T2 table is */
5893 REG_WR(bp
, SRC_REG_COUNTFREE0
+ func
*4, 16*1024/64);
5895 wb_write
[0] = U64_LO(bp
->t2_mapping
);
5896 wb_write
[1] = U64_HI(bp
->t2_mapping
);
5897 REG_WR_DMAE(bp
, SRC_REG_FIRSTFREE0
+ func
*4, wb_write
, 2);
5898 wb_write
[0] = U64_LO((u64
)bp
->t2_mapping
+ 16*1024 - 64);
5899 wb_write
[1] = U64_HI((u64
)bp
->t2_mapping
+ 16*1024 - 64);
5900 REG_WR_DMAE(bp
, SRC_REG_LASTFREE0
+ func
*4, wb_write
, 2);
5902 REG_WR(bp
, SRC_REG_NUMBER_HASH_BITS0
+ func
*4, 10);
5903 /* Port SRCH comes here */
5905 /* Port CDU comes here */
5906 bnx2x_init_block(bp
, CDU_BLOCK
, init_stage
);
5907 /* Port CFC comes here */
5908 bnx2x_init_block(bp
, CFC_BLOCK
, init_stage
);
5910 if (CHIP_IS_E1(bp
)) {
5911 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
5912 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
5914 bnx2x_init_block(bp
, HC_BLOCK
, init_stage
);
5916 bnx2x_init_block(bp
, MISC_AEU_BLOCK
, init_stage
);
5917 /* init aeu_mask_attn_func_0/1:
5918 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5919 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5920 * bits 4-7 are used for "per vn group attention" */
5921 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4,
5922 (IS_E1HMF(bp
) ? 0xF7 : 0x7));
5924 /* Port PXPCS comes here */
5925 bnx2x_init_block(bp
, PXPCS_BLOCK
, init_stage
);
5926 /* Port EMAC0 comes here */
5927 bnx2x_init_block(bp
, EMAC0_BLOCK
, init_stage
);
5928 /* Port EMAC1 comes here */
5929 bnx2x_init_block(bp
, EMAC1_BLOCK
, init_stage
);
5930 /* Port DBU comes here */
5931 bnx2x_init_block(bp
, DBU_BLOCK
, init_stage
);
5932 /* Port DBG comes here */
5933 bnx2x_init_block(bp
, DBG_BLOCK
, init_stage
);
5935 bnx2x_init_block(bp
, NIG_BLOCK
, init_stage
);
5937 REG_WR(bp
, NIG_REG_XGXS_SERDES0_MODE_SEL
+ port
*4, 1);
5939 if (CHIP_IS_E1H(bp
)) {
5940 /* 0x2 disable e1hov, 0x1 enable */
5941 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK_MF
+ port
*4,
5942 (IS_E1HMF(bp
) ? 0x1 : 0x2));
5944 /* support pause requests from USDM, TSDM and BRB */
5945 REG_WR(bp
, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0
+ port
*4, 0x7);
5948 REG_WR(bp
, NIG_REG_LLFC_ENABLE_0
+ port
*4, 0);
5949 REG_WR(bp
, NIG_REG_LLFC_OUT_EN_0
+ port
*4, 0);
5950 REG_WR(bp
, NIG_REG_PAUSE_ENABLE_0
+ port
*4, 1);
5954 /* Port MCP comes here */
5955 bnx2x_init_block(bp
, MCP_BLOCK
, init_stage
);
5956 /* Port DMAE comes here */
5957 bnx2x_init_block(bp
, DMAE_BLOCK
, init_stage
);
5959 switch (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
)) {
5960 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726
:
5962 u32 swap_val
, swap_override
, aeu_gpio_mask
, offset
;
5964 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_3
,
5965 MISC_REGISTERS_GPIO_INPUT_HI_Z
, port
);
5967 /* The GPIO should be swapped if the swap register is
5969 swap_val
= REG_RD(bp
, NIG_REG_PORT_SWAP
);
5970 swap_override
= REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
);
5972 /* Select function upon port-swap configuration */
5974 offset
= MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
;
5975 aeu_gpio_mask
= (swap_val
&& swap_override
) ?
5976 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1
:
5977 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0
;
5979 offset
= MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
;
5980 aeu_gpio_mask
= (swap_val
&& swap_override
) ?
5981 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0
:
5982 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1
;
5984 val
= REG_RD(bp
, offset
);
5985 /* add GPIO3 to group */
5986 val
|= aeu_gpio_mask
;
5987 REG_WR(bp
, offset
, val
);
5991 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
5992 /* add SPIO 5 to group 0 */
5993 val
= REG_RD(bp
, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
5994 val
|= AEU_INPUTS_ATTN_BITS_SPIO5
;
5995 REG_WR(bp
, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
, val
);
6002 bnx2x__link_reset(bp
);
6007 #define ILT_PER_FUNC (768/2)
6008 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6009 /* the phys address is shifted right 12 bits and has an added
6010 1=valid bit added to the 53rd bit
6011 then since this is a wide register(TM)
6012 we split it into two 32 bit writes
6014 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6015 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6016 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6017 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6019 #define CNIC_ILT_LINES 0
6021 static void bnx2x_ilt_wr(struct bnx2x
*bp
, u32 index
, dma_addr_t addr
)
6025 if (CHIP_IS_E1H(bp
))
6026 reg
= PXP2_REG_RQ_ONCHIP_AT_B0
+ index
*8;
6028 reg
= PXP2_REG_RQ_ONCHIP_AT
+ index
*8;
6030 bnx2x_wb_wr(bp
, reg
, ONCHIP_ADDR1(addr
), ONCHIP_ADDR2(addr
));
6033 static int bnx2x_init_func(struct bnx2x
*bp
)
6035 int port
= BP_PORT(bp
);
6036 int func
= BP_FUNC(bp
);
6040 DP(BNX2X_MSG_MCP
, "starting func init func %x\n", func
);
6042 /* set MSI reconfigure capability */
6043 addr
= (port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
);
6044 val
= REG_RD(bp
, addr
);
6045 val
|= HC_CONFIG_0_REG_MSI_ATTN_EN_0
;
6046 REG_WR(bp
, addr
, val
);
6048 i
= FUNC_ILT_BASE(func
);
6050 bnx2x_ilt_wr(bp
, i
, bnx2x_sp_mapping(bp
, context
));
6051 if (CHIP_IS_E1H(bp
)) {
6052 REG_WR(bp
, PXP2_REG_RQ_CDU_FIRST_ILT
, i
);
6053 REG_WR(bp
, PXP2_REG_RQ_CDU_LAST_ILT
, i
+ CNIC_ILT_LINES
);
6055 REG_WR(bp
, PXP2_REG_PSWRQ_CDU0_L2P
+ func
*4,
6056 PXP_ILT_RANGE(i
, i
+ CNIC_ILT_LINES
));
6059 if (CHIP_IS_E1H(bp
)) {
6060 for (i
= 0; i
< 9; i
++)
6061 bnx2x_init_block(bp
,
6062 cm_blocks
[i
], FUNC0_STAGE
+ func
);
6064 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 1);
6065 REG_WR(bp
, NIG_REG_LLH0_FUNC_VLAN_ID
+ port
*8, bp
->e1hov
);
6068 /* HC init per function */
6069 if (CHIP_IS_E1H(bp
)) {
6070 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
6072 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
6073 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
6075 bnx2x_init_block(bp
, HC_BLOCK
, FUNC0_STAGE
+ func
);
6077 /* Reset PCIE errors for debug */
6078 REG_WR(bp
, 0x2114, 0xffffffff);
6079 REG_WR(bp
, 0x2120, 0xffffffff);
6084 static int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
)
6088 DP(BNX2X_MSG_MCP
, "function %d load_code %x\n",
6089 BP_FUNC(bp
), load_code
);
6092 mutex_init(&bp
->dmae_mutex
);
6093 bnx2x_gunzip_init(bp
);
6095 switch (load_code
) {
6096 case FW_MSG_CODE_DRV_LOAD_COMMON
:
6097 rc
= bnx2x_init_common(bp
);
6102 case FW_MSG_CODE_DRV_LOAD_PORT
:
6104 rc
= bnx2x_init_port(bp
);
6109 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
6111 rc
= bnx2x_init_func(bp
);
6117 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
6121 if (!BP_NOMCP(bp
)) {
6122 int func
= BP_FUNC(bp
);
6124 bp
->fw_drv_pulse_wr_seq
=
6125 (SHMEM_RD(bp
, func_mb
[func
].drv_pulse_mb
) &
6126 DRV_PULSE_SEQ_MASK
);
6127 bp
->func_stx
= SHMEM_RD(bp
, func_mb
[func
].fw_mb_param
);
6128 DP(BNX2X_MSG_MCP
, "drv_pulse 0x%x func_stx 0x%x\n",
6129 bp
->fw_drv_pulse_wr_seq
, bp
->func_stx
);
6133 /* this needs to be done before gunzip end */
6134 bnx2x_zero_def_sb(bp
);
6135 for_each_queue(bp
, i
)
6136 bnx2x_zero_sb(bp
, BP_L_ID(bp
) + i
);
6139 bnx2x_gunzip_end(bp
);
6144 /* send the MCP a request, block until there is a reply */
6145 static u32
bnx2x_fw_command(struct bnx2x
*bp
, u32 command
)
6147 int func
= BP_FUNC(bp
);
6148 u32 seq
= ++bp
->fw_seq
;
6151 u8 delay
= CHIP_REV_IS_SLOW(bp
) ? 100 : 10;
6153 SHMEM_WR(bp
, func_mb
[func
].drv_mb_header
, (command
| seq
));
6154 DP(BNX2X_MSG_MCP
, "wrote command (%x) to FW MB\n", (command
| seq
));
6157 /* let the FW do it's magic ... */
6160 rc
= SHMEM_RD(bp
, func_mb
[func
].fw_mb_header
);
6162 /* Give the FW up to 2 second (200*10ms) */
6163 } while ((seq
!= (rc
& FW_MSG_SEQ_NUMBER_MASK
)) && (cnt
++ < 200));
6165 DP(BNX2X_MSG_MCP
, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6166 cnt
*delay
, rc
, seq
);
6168 /* is this a reply to our command? */
6169 if (seq
== (rc
& FW_MSG_SEQ_NUMBER_MASK
)) {
6170 rc
&= FW_MSG_CODE_MASK
;
6174 BNX2X_ERR("FW failed to respond!\n");
6182 static void bnx2x_free_mem(struct bnx2x
*bp
)
6185 #define BNX2X_PCI_FREE(x, y, size) \
6188 pci_free_consistent(bp->pdev, size, x, y); \
6194 #define BNX2X_FREE(x) \
6206 for_each_queue(bp
, i
) {
6209 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, status_blk
),
6210 bnx2x_fp(bp
, i
, status_blk_mapping
),
6211 sizeof(struct host_status_block
) +
6212 sizeof(struct eth_tx_db_data
));
6215 for_each_rx_queue(bp
, i
) {
6217 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6218 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_buf_ring
));
6219 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_desc_ring
),
6220 bnx2x_fp(bp
, i
, rx_desc_mapping
),
6221 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
6223 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_comp_ring
),
6224 bnx2x_fp(bp
, i
, rx_comp_mapping
),
6225 sizeof(struct eth_fast_path_rx_cqe
) *
6229 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_page_ring
));
6230 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_sge_ring
),
6231 bnx2x_fp(bp
, i
, rx_sge_mapping
),
6232 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
6235 for_each_tx_queue(bp
, i
) {
6237 /* fastpath tx rings: tx_buf tx_desc */
6238 BNX2X_FREE(bnx2x_fp(bp
, i
, tx_buf_ring
));
6239 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, tx_desc_ring
),
6240 bnx2x_fp(bp
, i
, tx_desc_mapping
),
6241 sizeof(struct eth_tx_bd
) * NUM_TX_BD
);
6243 /* end of fastpath */
6245 BNX2X_PCI_FREE(bp
->def_status_blk
, bp
->def_status_blk_mapping
,
6246 sizeof(struct host_def_status_block
));
6248 BNX2X_PCI_FREE(bp
->slowpath
, bp
->slowpath_mapping
,
6249 sizeof(struct bnx2x_slowpath
));
6252 BNX2X_PCI_FREE(bp
->t1
, bp
->t1_mapping
, 64*1024);
6253 BNX2X_PCI_FREE(bp
->t2
, bp
->t2_mapping
, 16*1024);
6254 BNX2X_PCI_FREE(bp
->timers
, bp
->timers_mapping
, 8*1024);
6255 BNX2X_PCI_FREE(bp
->qm
, bp
->qm_mapping
, 128*1024);
6257 BNX2X_PCI_FREE(bp
->spq
, bp
->spq_mapping
, BCM_PAGE_SIZE
);
6259 #undef BNX2X_PCI_FREE
6263 static int bnx2x_alloc_mem(struct bnx2x
*bp
)
6266 #define BNX2X_PCI_ALLOC(x, y, size) \
6268 x = pci_alloc_consistent(bp->pdev, size, y); \
6270 goto alloc_mem_err; \
6271 memset(x, 0, size); \
6274 #define BNX2X_ALLOC(x, size) \
6276 x = vmalloc(size); \
6278 goto alloc_mem_err; \
6279 memset(x, 0, size); \
6286 for_each_queue(bp
, i
) {
6287 bnx2x_fp(bp
, i
, bp
) = bp
;
6290 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, status_blk
),
6291 &bnx2x_fp(bp
, i
, status_blk_mapping
),
6292 sizeof(struct host_status_block
) +
6293 sizeof(struct eth_tx_db_data
));
6296 for_each_rx_queue(bp
, i
) {
6298 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6299 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_buf_ring
),
6300 sizeof(struct sw_rx_bd
) * NUM_RX_BD
);
6301 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_desc_ring
),
6302 &bnx2x_fp(bp
, i
, rx_desc_mapping
),
6303 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
6305 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_comp_ring
),
6306 &bnx2x_fp(bp
, i
, rx_comp_mapping
),
6307 sizeof(struct eth_fast_path_rx_cqe
) *
6311 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_page_ring
),
6312 sizeof(struct sw_rx_page
) * NUM_RX_SGE
);
6313 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_sge_ring
),
6314 &bnx2x_fp(bp
, i
, rx_sge_mapping
),
6315 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
6318 for_each_tx_queue(bp
, i
) {
6320 bnx2x_fp(bp
, i
, hw_tx_prods
) =
6321 (void *)(bnx2x_fp(bp
, i
, status_blk
) + 1);
6323 bnx2x_fp(bp
, i
, tx_prods_mapping
) =
6324 bnx2x_fp(bp
, i
, status_blk_mapping
) +
6325 sizeof(struct host_status_block
);
6327 /* fastpath tx rings: tx_buf tx_desc */
6328 BNX2X_ALLOC(bnx2x_fp(bp
, i
, tx_buf_ring
),
6329 sizeof(struct sw_tx_bd
) * NUM_TX_BD
);
6330 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, tx_desc_ring
),
6331 &bnx2x_fp(bp
, i
, tx_desc_mapping
),
6332 sizeof(struct eth_tx_bd
) * NUM_TX_BD
);
6334 /* end of fastpath */
6336 BNX2X_PCI_ALLOC(bp
->def_status_blk
, &bp
->def_status_blk_mapping
,
6337 sizeof(struct host_def_status_block
));
6339 BNX2X_PCI_ALLOC(bp
->slowpath
, &bp
->slowpath_mapping
,
6340 sizeof(struct bnx2x_slowpath
));
6343 BNX2X_PCI_ALLOC(bp
->t1
, &bp
->t1_mapping
, 64*1024);
6346 for (i
= 0; i
< 64*1024; i
+= 64) {
6347 *(u64
*)((char *)bp
->t1
+ i
+ 56) = 0x0UL
;
6348 *(u64
*)((char *)bp
->t1
+ i
+ 3) = 0x0UL
;
6351 /* allocate searcher T2 table
6352 we allocate 1/4 of alloc num for T2
6353 (which is not entered into the ILT) */
6354 BNX2X_PCI_ALLOC(bp
->t2
, &bp
->t2_mapping
, 16*1024);
6357 for (i
= 0; i
< 16*1024; i
+= 64)
6358 * (u64
*)((char *)bp
->t2
+ i
+ 56) = bp
->t2_mapping
+ i
+ 64;
6360 /* now fixup the last line in the block to point to the next block */
6361 *(u64
*)((char *)bp
->t2
+ 1024*16-8) = bp
->t2_mapping
;
6363 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6364 BNX2X_PCI_ALLOC(bp
->timers
, &bp
->timers_mapping
, 8*1024);
6366 /* QM queues (128*MAX_CONN) */
6367 BNX2X_PCI_ALLOC(bp
->qm
, &bp
->qm_mapping
, 128*1024);
6370 /* Slow path ring */
6371 BNX2X_PCI_ALLOC(bp
->spq
, &bp
->spq_mapping
, BCM_PAGE_SIZE
);
6379 #undef BNX2X_PCI_ALLOC
6383 static void bnx2x_free_tx_skbs(struct bnx2x
*bp
)
6387 for_each_tx_queue(bp
, i
) {
6388 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
6390 u16 bd_cons
= fp
->tx_bd_cons
;
6391 u16 sw_prod
= fp
->tx_pkt_prod
;
6392 u16 sw_cons
= fp
->tx_pkt_cons
;
6394 while (sw_cons
!= sw_prod
) {
6395 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, TX_BD(sw_cons
));
6401 static void bnx2x_free_rx_skbs(struct bnx2x
*bp
)
6405 for_each_rx_queue(bp
, j
) {
6406 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
6408 for (i
= 0; i
< NUM_RX_BD
; i
++) {
6409 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[i
];
6410 struct sk_buff
*skb
= rx_buf
->skb
;
6415 pci_unmap_single(bp
->pdev
,
6416 pci_unmap_addr(rx_buf
, mapping
),
6417 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
6422 if (!fp
->disable_tpa
)
6423 bnx2x_free_tpa_pool(bp
, fp
, CHIP_IS_E1(bp
) ?
6424 ETH_MAX_AGGREGATION_QUEUES_E1
:
6425 ETH_MAX_AGGREGATION_QUEUES_E1H
);
6429 static void bnx2x_free_skbs(struct bnx2x
*bp
)
6431 bnx2x_free_tx_skbs(bp
);
6432 bnx2x_free_rx_skbs(bp
);
6435 static void bnx2x_free_msix_irqs(struct bnx2x
*bp
)
6439 free_irq(bp
->msix_table
[0].vector
, bp
->dev
);
6440 DP(NETIF_MSG_IFDOWN
, "released sp irq (%d)\n",
6441 bp
->msix_table
[0].vector
);
6443 for_each_queue(bp
, i
) {
6444 DP(NETIF_MSG_IFDOWN
, "about to release fp #%d->%d irq "
6445 "state %x\n", i
, bp
->msix_table
[i
+ offset
].vector
,
6446 bnx2x_fp(bp
, i
, state
));
6448 free_irq(bp
->msix_table
[i
+ offset
].vector
, &bp
->fp
[i
]);
6452 static void bnx2x_free_irq(struct bnx2x
*bp
)
6454 if (bp
->flags
& USING_MSIX_FLAG
) {
6455 bnx2x_free_msix_irqs(bp
);
6456 pci_disable_msix(bp
->pdev
);
6457 bp
->flags
&= ~USING_MSIX_FLAG
;
6459 } else if (bp
->flags
& USING_MSI_FLAG
) {
6460 free_irq(bp
->pdev
->irq
, bp
->dev
);
6461 pci_disable_msi(bp
->pdev
);
6462 bp
->flags
&= ~USING_MSI_FLAG
;
6465 free_irq(bp
->pdev
->irq
, bp
->dev
);
6468 static int bnx2x_enable_msix(struct bnx2x
*bp
)
6470 int i
, rc
, offset
= 1;
6473 bp
->msix_table
[0].entry
= igu_vec
;
6474 DP(NETIF_MSG_IFUP
, "msix_table[0].entry = %d (slowpath)\n", igu_vec
);
6476 for_each_queue(bp
, i
) {
6477 igu_vec
= BP_L_ID(bp
) + offset
+ i
;
6478 bp
->msix_table
[i
+ offset
].entry
= igu_vec
;
6479 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d "
6480 "(fastpath #%u)\n", i
+ offset
, igu_vec
, i
);
6483 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0],
6484 BNX2X_NUM_QUEUES(bp
) + offset
);
6486 DP(NETIF_MSG_IFUP
, "MSI-X is not attainable rc %d\n", rc
);
6490 bp
->flags
|= USING_MSIX_FLAG
;
6495 static int bnx2x_req_msix_irqs(struct bnx2x
*bp
)
6497 int i
, rc
, offset
= 1;
6499 rc
= request_irq(bp
->msix_table
[0].vector
, bnx2x_msix_sp_int
, 0,
6500 bp
->dev
->name
, bp
->dev
);
6502 BNX2X_ERR("request sp irq failed\n");
6506 for_each_queue(bp
, i
) {
6507 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
6509 sprintf(fp
->name
, "%s.fp%d", bp
->dev
->name
, i
);
6510 rc
= request_irq(bp
->msix_table
[i
+ offset
].vector
,
6511 bnx2x_msix_fp_int
, 0, fp
->name
, fp
);
6513 BNX2X_ERR("request fp #%d irq failed rc %d\n", i
, rc
);
6514 bnx2x_free_msix_irqs(bp
);
6518 fp
->state
= BNX2X_FP_STATE_IRQ
;
6521 i
= BNX2X_NUM_QUEUES(bp
);
6523 printk(KERN_INFO PFX
6524 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6525 bp
->dev
->name
, bp
->msix_table
[0].vector
,
6526 bp
->msix_table
[offset
].vector
,
6527 bp
->msix_table
[offset
+ i
- 1].vector
);
6529 printk(KERN_INFO PFX
"%s: using MSI-X IRQs: sp %d fp %d\n",
6530 bp
->dev
->name
, bp
->msix_table
[0].vector
,
6531 bp
->msix_table
[offset
+ i
- 1].vector
);
6536 static int bnx2x_enable_msi(struct bnx2x
*bp
)
6540 rc
= pci_enable_msi(bp
->pdev
);
6542 DP(NETIF_MSG_IFUP
, "MSI is not attainable\n");
6545 bp
->flags
|= USING_MSI_FLAG
;
6550 static int bnx2x_req_irq(struct bnx2x
*bp
)
6552 unsigned long flags
;
6555 if (bp
->flags
& USING_MSI_FLAG
)
6558 flags
= IRQF_SHARED
;
6560 rc
= request_irq(bp
->pdev
->irq
, bnx2x_interrupt
, flags
,
6561 bp
->dev
->name
, bp
->dev
);
6563 bnx2x_fp(bp
, 0, state
) = BNX2X_FP_STATE_IRQ
;
6568 static void bnx2x_napi_enable(struct bnx2x
*bp
)
6572 for_each_rx_queue(bp
, i
)
6573 napi_enable(&bnx2x_fp(bp
, i
, napi
));
6576 static void bnx2x_napi_disable(struct bnx2x
*bp
)
6580 for_each_rx_queue(bp
, i
)
6581 napi_disable(&bnx2x_fp(bp
, i
, napi
));
6584 static void bnx2x_netif_start(struct bnx2x
*bp
)
6586 if (atomic_dec_and_test(&bp
->intr_sem
)) {
6587 if (netif_running(bp
->dev
)) {
6588 bnx2x_napi_enable(bp
);
6589 bnx2x_int_enable(bp
);
6590 if (bp
->state
== BNX2X_STATE_OPEN
)
6591 netif_tx_wake_all_queues(bp
->dev
);
6596 static void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
)
6598 bnx2x_int_disable_sync(bp
, disable_hw
);
6599 bnx2x_napi_disable(bp
);
6600 netif_tx_disable(bp
->dev
);
6601 bp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
6605 * Init service functions
6608 static void bnx2x_set_mac_addr_e1(struct bnx2x
*bp
, int set
)
6610 struct mac_configuration_cmd
*config
= bnx2x_sp(bp
, mac_config
);
6611 int port
= BP_PORT(bp
);
6614 * unicasts 0-31:port0 32-63:port1
6615 * multicast 64-127:port0 128-191:port1
6617 config
->hdr
.length
= 2;
6618 config
->hdr
.offset
= port
? 32 : 0;
6619 config
->hdr
.client_id
= bp
->fp
->cl_id
;
6620 config
->hdr
.reserved1
= 0;
6623 config
->config_table
[0].cam_entry
.msb_mac_addr
=
6624 swab16(*(u16
*)&bp
->dev
->dev_addr
[0]);
6625 config
->config_table
[0].cam_entry
.middle_mac_addr
=
6626 swab16(*(u16
*)&bp
->dev
->dev_addr
[2]);
6627 config
->config_table
[0].cam_entry
.lsb_mac_addr
=
6628 swab16(*(u16
*)&bp
->dev
->dev_addr
[4]);
6629 config
->config_table
[0].cam_entry
.flags
= cpu_to_le16(port
);
6631 config
->config_table
[0].target_table_entry
.flags
= 0;
6633 CAM_INVALIDATE(config
->config_table
[0]);
6634 config
->config_table
[0].target_table_entry
.client_id
= 0;
6635 config
->config_table
[0].target_table_entry
.vlan_id
= 0;
6637 DP(NETIF_MSG_IFUP
, "%s MAC (%04x:%04x:%04x)\n",
6638 (set
? "setting" : "clearing"),
6639 config
->config_table
[0].cam_entry
.msb_mac_addr
,
6640 config
->config_table
[0].cam_entry
.middle_mac_addr
,
6641 config
->config_table
[0].cam_entry
.lsb_mac_addr
);
6644 config
->config_table
[1].cam_entry
.msb_mac_addr
= cpu_to_le16(0xffff);
6645 config
->config_table
[1].cam_entry
.middle_mac_addr
= cpu_to_le16(0xffff);
6646 config
->config_table
[1].cam_entry
.lsb_mac_addr
= cpu_to_le16(0xffff);
6647 config
->config_table
[1].cam_entry
.flags
= cpu_to_le16(port
);
6649 config
->config_table
[1].target_table_entry
.flags
=
6650 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST
;
6652 CAM_INVALIDATE(config
->config_table
[1]);
6653 config
->config_table
[1].target_table_entry
.client_id
= 0;
6654 config
->config_table
[1].target_table_entry
.vlan_id
= 0;
6656 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
6657 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
6658 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
6661 static void bnx2x_set_mac_addr_e1h(struct bnx2x
*bp
, int set
)
6663 struct mac_configuration_cmd_e1h
*config
=
6664 (struct mac_configuration_cmd_e1h
*)bnx2x_sp(bp
, mac_config
);
6666 if (set
&& (bp
->state
!= BNX2X_STATE_OPEN
)) {
6667 DP(NETIF_MSG_IFUP
, "state is %x, returning\n", bp
->state
);
6671 /* CAM allocation for E1H
6672 * unicasts: by func number
6673 * multicast: 20+FUNC*20, 20 each
6675 config
->hdr
.length
= 1;
6676 config
->hdr
.offset
= BP_FUNC(bp
);
6677 config
->hdr
.client_id
= bp
->fp
->cl_id
;
6678 config
->hdr
.reserved1
= 0;
6681 config
->config_table
[0].msb_mac_addr
=
6682 swab16(*(u16
*)&bp
->dev
->dev_addr
[0]);
6683 config
->config_table
[0].middle_mac_addr
=
6684 swab16(*(u16
*)&bp
->dev
->dev_addr
[2]);
6685 config
->config_table
[0].lsb_mac_addr
=
6686 swab16(*(u16
*)&bp
->dev
->dev_addr
[4]);
6687 config
->config_table
[0].client_id
= BP_L_ID(bp
);
6688 config
->config_table
[0].vlan_id
= 0;
6689 config
->config_table
[0].e1hov_id
= cpu_to_le16(bp
->e1hov
);
6691 config
->config_table
[0].flags
= BP_PORT(bp
);
6693 config
->config_table
[0].flags
=
6694 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE
;
6696 DP(NETIF_MSG_IFUP
, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6697 (set
? "setting" : "clearing"),
6698 config
->config_table
[0].msb_mac_addr
,
6699 config
->config_table
[0].middle_mac_addr
,
6700 config
->config_table
[0].lsb_mac_addr
, bp
->e1hov
, BP_L_ID(bp
));
6702 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
6703 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
6704 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
6707 static int bnx2x_wait_ramrod(struct bnx2x
*bp
, int state
, int idx
,
6708 int *state_p
, int poll
)
6710 /* can take a while if any port is running */
6713 DP(NETIF_MSG_IFUP
, "%s for state to become %x on IDX [%d]\n",
6714 poll
? "polling" : "waiting", state
, idx
);
6719 bnx2x_rx_int(bp
->fp
, 10);
6720 /* if index is different from 0
6721 * the reply for some commands will
6722 * be on the non default queue
6725 bnx2x_rx_int(&bp
->fp
[idx
], 10);
6728 mb(); /* state is changed by bnx2x_sp_event() */
6729 if (*state_p
== state
) {
6730 #ifdef BNX2X_STOP_ON_ERROR
6731 DP(NETIF_MSG_IFUP
, "exit (cnt %d)\n", 5000 - cnt
);
6740 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6741 poll
? "polling" : "waiting", state
, idx
);
6742 #ifdef BNX2X_STOP_ON_ERROR
6749 static int bnx2x_setup_leading(struct bnx2x
*bp
)
6753 /* reset IGU state */
6754 bnx2x_ack_sb(bp
, bp
->fp
[0].sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
6757 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_PORT_SETUP
, 0, 0, 0, 0);
6759 /* Wait for completion */
6760 rc
= bnx2x_wait_ramrod(bp
, BNX2X_STATE_OPEN
, 0, &(bp
->state
), 0);
6765 static int bnx2x_setup_multi(struct bnx2x
*bp
, int index
)
6767 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
6769 /* reset IGU state */
6770 bnx2x_ack_sb(bp
, fp
->sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
6773 fp
->state
= BNX2X_FP_STATE_OPENING
;
6774 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CLIENT_SETUP
, index
, 0,
6777 /* Wait for completion */
6778 return bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_OPEN
, index
,
6782 static int bnx2x_poll(struct napi_struct
*napi
, int budget
);
6784 static void bnx2x_set_int_mode(struct bnx2x
*bp
)
6792 bp
->num_rx_queues
= num_queues
;
6793 bp
->num_tx_queues
= num_queues
;
6795 "set number of queues to %d\n", num_queues
);
6800 if (bp
->multi_mode
== ETH_RSS_MODE_REGULAR
)
6801 num_queues
= min_t(u32
, num_online_cpus(),
6802 BNX2X_MAX_QUEUES(bp
));
6805 bp
->num_rx_queues
= num_queues
;
6806 bp
->num_tx_queues
= num_queues
;
6807 DP(NETIF_MSG_IFUP
, "set number of rx queues to %d"
6808 " number of tx queues to %d\n",
6809 bp
->num_rx_queues
, bp
->num_tx_queues
);
6810 /* if we can't use MSI-X we only need one fp,
6811 * so try to enable MSI-X with the requested number of fp's
6812 * and fallback to MSI or legacy INTx with one fp
6814 if (bnx2x_enable_msix(bp
)) {
6815 /* failed to enable MSI-X */
6817 bp
->num_rx_queues
= num_queues
;
6818 bp
->num_tx_queues
= num_queues
;
6820 BNX2X_ERR("Multi requested but failed to "
6821 "enable MSI-X set number of "
6822 "queues to %d\n", num_queues
);
6826 bp
->dev
->real_num_tx_queues
= bp
->num_tx_queues
;
6829 static void bnx2x_set_rx_mode(struct net_device
*dev
);
6831 /* must be called with rtnl_lock */
6832 static int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
)
6836 #ifdef BNX2X_STOP_ON_ERROR
6837 DP(NETIF_MSG_IFUP
, "enter load_mode %d\n", load_mode
);
6838 if (unlikely(bp
->panic
))
6842 bp
->state
= BNX2X_STATE_OPENING_WAIT4_LOAD
;
6844 bnx2x_set_int_mode(bp
);
6846 if (bnx2x_alloc_mem(bp
))
6849 for_each_rx_queue(bp
, i
)
6850 bnx2x_fp(bp
, i
, disable_tpa
) =
6851 ((bp
->flags
& TPA_ENABLE_FLAG
) == 0);
6853 for_each_rx_queue(bp
, i
)
6854 netif_napi_add(bp
->dev
, &bnx2x_fp(bp
, i
, napi
),
6857 #ifdef BNX2X_STOP_ON_ERROR
6858 for_each_rx_queue(bp
, i
) {
6859 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
6861 fp
->poll_no_work
= 0;
6863 fp
->poll_max_calls
= 0;
6864 fp
->poll_complete
= 0;
6868 bnx2x_napi_enable(bp
);
6870 if (bp
->flags
& USING_MSIX_FLAG
) {
6871 rc
= bnx2x_req_msix_irqs(bp
);
6873 pci_disable_msix(bp
->pdev
);
6877 if ((rc
!= -ENOMEM
) && (int_mode
!= INT_MODE_INTx
))
6878 bnx2x_enable_msi(bp
);
6880 rc
= bnx2x_req_irq(bp
);
6882 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc
);
6883 if (bp
->flags
& USING_MSI_FLAG
)
6884 pci_disable_msi(bp
->pdev
);
6887 if (bp
->flags
& USING_MSI_FLAG
) {
6888 bp
->dev
->irq
= bp
->pdev
->irq
;
6889 printk(KERN_INFO PFX
"%s: using MSI IRQ %d\n",
6890 bp
->dev
->name
, bp
->pdev
->irq
);
6894 /* Send LOAD_REQUEST command to MCP
6895 Returns the type of LOAD command:
6896 if it is the first port to be initialized
6897 common blocks should be initialized, otherwise - not
6899 if (!BP_NOMCP(bp
)) {
6900 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_REQ
);
6902 BNX2X_ERR("MCP response failure, aborting\n");
6906 if (load_code
== FW_MSG_CODE_DRV_LOAD_REFUSED
) {
6907 rc
= -EBUSY
; /* other port in diagnostic mode */
6912 int port
= BP_PORT(bp
);
6914 DP(NETIF_MSG_IFUP
, "NO MCP - load counts %d, %d, %d\n",
6915 load_count
[0], load_count
[1], load_count
[2]);
6917 load_count
[1 + port
]++;
6918 DP(NETIF_MSG_IFUP
, "NO MCP - new load counts %d, %d, %d\n",
6919 load_count
[0], load_count
[1], load_count
[2]);
6920 if (load_count
[0] == 1)
6921 load_code
= FW_MSG_CODE_DRV_LOAD_COMMON
;
6922 else if (load_count
[1 + port
] == 1)
6923 load_code
= FW_MSG_CODE_DRV_LOAD_PORT
;
6925 load_code
= FW_MSG_CODE_DRV_LOAD_FUNCTION
;
6928 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
6929 (load_code
== FW_MSG_CODE_DRV_LOAD_PORT
))
6933 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
6936 rc
= bnx2x_init_hw(bp
, load_code
);
6938 BNX2X_ERR("HW init failed, aborting\n");
6942 /* Setup NIC internals and enable interrupts */
6943 bnx2x_nic_init(bp
, load_code
);
6945 /* Send LOAD_DONE command to MCP */
6946 if (!BP_NOMCP(bp
)) {
6947 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
);
6949 BNX2X_ERR("MCP response failure, aborting\n");
6955 bp
->state
= BNX2X_STATE_OPENING_WAIT4_PORT
;
6957 rc
= bnx2x_setup_leading(bp
);
6959 BNX2X_ERR("Setup leading failed!\n");
6963 if (CHIP_IS_E1H(bp
))
6964 if (bp
->mf_config
& FUNC_MF_CFG_FUNC_DISABLED
) {
6965 DP(NETIF_MSG_IFUP
, "mf_cfg function disabled\n");
6966 bp
->state
= BNX2X_STATE_DISABLED
;
6969 if (bp
->state
== BNX2X_STATE_OPEN
)
6970 for_each_nondefault_queue(bp
, i
) {
6971 rc
= bnx2x_setup_multi(bp
, i
);
6977 bnx2x_set_mac_addr_e1(bp
, 1);
6979 bnx2x_set_mac_addr_e1h(bp
, 1);
6982 bnx2x_initial_phy_init(bp
, load_mode
);
6984 /* Start fast path */
6985 switch (load_mode
) {
6987 /* Tx queue should be only reenabled */
6988 netif_tx_wake_all_queues(bp
->dev
);
6989 /* Initialize the receive filter. */
6990 bnx2x_set_rx_mode(bp
->dev
);
6994 netif_tx_start_all_queues(bp
->dev
);
6995 /* Initialize the receive filter. */
6996 bnx2x_set_rx_mode(bp
->dev
);
7000 /* Initialize the receive filter. */
7001 bnx2x_set_rx_mode(bp
->dev
);
7002 bp
->state
= BNX2X_STATE_DIAG
;
7010 bnx2x__link_status_update(bp
);
7012 /* start the timer */
7013 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
7019 bnx2x_int_disable_sync(bp
, 1);
7020 if (!BP_NOMCP(bp
)) {
7021 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
);
7022 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
7025 /* Free SKBs, SGEs, TPA pool and driver internals */
7026 bnx2x_free_skbs(bp
);
7027 for_each_rx_queue(bp
, i
)
7028 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
7033 bnx2x_napi_disable(bp
);
7034 for_each_rx_queue(bp
, i
)
7035 netif_napi_del(&bnx2x_fp(bp
, i
, napi
));
7041 static int bnx2x_stop_multi(struct bnx2x
*bp
, int index
)
7043 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
7046 /* halt the connection */
7047 fp
->state
= BNX2X_FP_STATE_HALTING
;
7048 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, index
, 0, fp
->cl_id
, 0);
7050 /* Wait for completion */
7051 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, index
,
7053 if (rc
) /* timeout */
7056 /* delete cfc entry */
7057 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CFC_DEL
, index
, 0, 0, 1);
7059 /* Wait for completion */
7060 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_CLOSED
, index
,
7065 static int bnx2x_stop_leading(struct bnx2x
*bp
)
7067 __le16 dsb_sp_prod_idx
;
7068 /* if the other port is handling traffic,
7069 this can take a lot of time */
7075 /* Send HALT ramrod */
7076 bp
->fp
[0].state
= BNX2X_FP_STATE_HALTING
;
7077 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, 0, 0, bp
->fp
->cl_id
, 0);
7079 /* Wait for completion */
7080 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, 0,
7081 &(bp
->fp
[0].state
), 1);
7082 if (rc
) /* timeout */
7085 dsb_sp_prod_idx
= *bp
->dsb_sp_prod
;
7087 /* Send PORT_DELETE ramrod */
7088 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_PORT_DEL
, 0, 0, 0, 1);
7090 /* Wait for completion to arrive on default status block
7091 we are going to reset the chip anyway
7092 so there is not much to do if this times out
7094 while (dsb_sp_prod_idx
== *bp
->dsb_sp_prod
) {
7096 DP(NETIF_MSG_IFDOWN
, "timeout waiting for port del "
7097 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7098 *bp
->dsb_sp_prod
, dsb_sp_prod_idx
);
7099 #ifdef BNX2X_STOP_ON_ERROR
7107 rmb(); /* Refresh the dsb_sp_prod */
7109 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_UNLOAD
;
7110 bp
->fp
[0].state
= BNX2X_FP_STATE_CLOSED
;
7115 static void bnx2x_reset_func(struct bnx2x
*bp
)
7117 int port
= BP_PORT(bp
);
7118 int func
= BP_FUNC(bp
);
7122 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
7123 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
7126 base
= FUNC_ILT_BASE(func
);
7127 for (i
= base
; i
< base
+ ILT_PER_FUNC
; i
++)
7128 bnx2x_ilt_wr(bp
, i
, 0);
7131 static void bnx2x_reset_port(struct bnx2x
*bp
)
7133 int port
= BP_PORT(bp
);
7136 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
7138 /* Do not rcv packets to BRB */
7139 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK
+ port
*4, 0x0);
7140 /* Do not direct rcv packets that are not for MCP to the BRB */
7141 REG_WR(bp
, (port
? NIG_REG_LLH1_BRB1_NOT_MCP
:
7142 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
7145 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4, 0);
7148 /* Check for BRB port occupancy */
7149 val
= REG_RD(bp
, BRB1_REG_PORT_NUM_OCC_BLOCKS_0
+ port
*4);
7151 DP(NETIF_MSG_IFDOWN
,
7152 "BRB1 is not empty %d blocks are occupied\n", val
);
7154 /* TODO: Close Doorbell port? */
7157 static void bnx2x_reset_chip(struct bnx2x
*bp
, u32 reset_code
)
7159 DP(BNX2X_MSG_MCP
, "function %d reset_code %x\n",
7160 BP_FUNC(bp
), reset_code
);
7162 switch (reset_code
) {
7163 case FW_MSG_CODE_DRV_UNLOAD_COMMON
:
7164 bnx2x_reset_port(bp
);
7165 bnx2x_reset_func(bp
);
7166 bnx2x_reset_common(bp
);
7169 case FW_MSG_CODE_DRV_UNLOAD_PORT
:
7170 bnx2x_reset_port(bp
);
7171 bnx2x_reset_func(bp
);
7174 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION
:
7175 bnx2x_reset_func(bp
);
7179 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code
);
7184 /* must be called with rtnl_lock */
7185 static int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
)
7187 int port
= BP_PORT(bp
);
7191 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_HALT
;
7193 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
7194 bnx2x_set_storm_rx_mode(bp
);
7196 bnx2x_netif_stop(bp
, 1);
7198 del_timer_sync(&bp
->timer
);
7199 SHMEM_WR(bp
, func_mb
[BP_FUNC(bp
)].drv_pulse_mb
,
7200 (DRV_PULSE_ALWAYS_ALIVE
| bp
->fw_drv_pulse_wr_seq
));
7201 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
7206 /* Wait until tx fastpath tasks complete */
7207 for_each_tx_queue(bp
, i
) {
7208 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
7211 while (bnx2x_has_tx_work_unload(fp
)) {
7215 BNX2X_ERR("timeout waiting for queue[%d]\n",
7217 #ifdef BNX2X_STOP_ON_ERROR
7228 /* Give HW time to discard old tx messages */
7231 if (CHIP_IS_E1(bp
)) {
7232 struct mac_configuration_cmd
*config
=
7233 bnx2x_sp(bp
, mcast_config
);
7235 bnx2x_set_mac_addr_e1(bp
, 0);
7237 for (i
= 0; i
< config
->hdr
.length
; i
++)
7238 CAM_INVALIDATE(config
->config_table
[i
]);
7240 config
->hdr
.length
= i
;
7241 if (CHIP_REV_IS_SLOW(bp
))
7242 config
->hdr
.offset
= BNX2X_MAX_EMUL_MULTI
*(1 + port
);
7244 config
->hdr
.offset
= BNX2X_MAX_MULTICAST
*(1 + port
);
7245 config
->hdr
.client_id
= bp
->fp
->cl_id
;
7246 config
->hdr
.reserved1
= 0;
7248 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
7249 U64_HI(bnx2x_sp_mapping(bp
, mcast_config
)),
7250 U64_LO(bnx2x_sp_mapping(bp
, mcast_config
)), 0);
7253 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 0);
7255 bnx2x_set_mac_addr_e1h(bp
, 0);
7257 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
7258 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
), 0);
7261 if (unload_mode
== UNLOAD_NORMAL
)
7262 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
7264 else if (bp
->flags
& NO_WOL_FLAG
) {
7265 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
;
7266 if (CHIP_IS_E1H(bp
))
7267 REG_WR(bp
, MISC_REG_E1HMF_MODE
, 0);
7269 } else if (bp
->wol
) {
7270 u32 emac_base
= port
? GRCBASE_EMAC1
: GRCBASE_EMAC0
;
7271 u8
*mac_addr
= bp
->dev
->dev_addr
;
7273 /* The mac address is written to entries 1-4 to
7274 preserve entry 0 which is used by the PMF */
7275 u8 entry
= (BP_E1HVN(bp
) + 1)*8;
7277 val
= (mac_addr
[0] << 8) | mac_addr
[1];
7278 EMAC_WR(bp
, EMAC_REG_EMAC_MAC_MATCH
+ entry
, val
);
7280 val
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
7281 (mac_addr
[4] << 8) | mac_addr
[5];
7282 EMAC_WR(bp
, EMAC_REG_EMAC_MAC_MATCH
+ entry
+ 4, val
);
7284 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_EN
;
7287 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
7289 /* Close multi and leading connections
7290 Completions for ramrods are collected in a synchronous way */
7291 for_each_nondefault_queue(bp
, i
)
7292 if (bnx2x_stop_multi(bp
, i
))
7295 rc
= bnx2x_stop_leading(bp
);
7297 BNX2X_ERR("Stop leading failed!\n");
7298 #ifdef BNX2X_STOP_ON_ERROR
7307 reset_code
= bnx2x_fw_command(bp
, reset_code
);
7309 DP(NETIF_MSG_IFDOWN
, "NO MCP - load counts %d, %d, %d\n",
7310 load_count
[0], load_count
[1], load_count
[2]);
7312 load_count
[1 + port
]--;
7313 DP(NETIF_MSG_IFDOWN
, "NO MCP - new load counts %d, %d, %d\n",
7314 load_count
[0], load_count
[1], load_count
[2]);
7315 if (load_count
[0] == 0)
7316 reset_code
= FW_MSG_CODE_DRV_UNLOAD_COMMON
;
7317 else if (load_count
[1 + port
] == 0)
7318 reset_code
= FW_MSG_CODE_DRV_UNLOAD_PORT
;
7320 reset_code
= FW_MSG_CODE_DRV_UNLOAD_FUNCTION
;
7323 if ((reset_code
== FW_MSG_CODE_DRV_UNLOAD_COMMON
) ||
7324 (reset_code
== FW_MSG_CODE_DRV_UNLOAD_PORT
))
7325 bnx2x__link_reset(bp
);
7327 /* Reset the chip */
7328 bnx2x_reset_chip(bp
, reset_code
);
7330 /* Report UNLOAD_DONE to MCP */
7332 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
7336 /* Free SKBs, SGEs, TPA pool and driver internals */
7337 bnx2x_free_skbs(bp
);
7338 for_each_rx_queue(bp
, i
)
7339 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
7340 for_each_rx_queue(bp
, i
)
7341 netif_napi_del(&bnx2x_fp(bp
, i
, napi
));
7344 bp
->state
= BNX2X_STATE_CLOSED
;
7346 netif_carrier_off(bp
->dev
);
7351 static void bnx2x_reset_task(struct work_struct
*work
)
7353 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, reset_task
);
7355 #ifdef BNX2X_STOP_ON_ERROR
7356 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7357 " so reset not done to allow debug dump,\n"
7358 " you will need to reboot when done\n");
7364 if (!netif_running(bp
->dev
))
7365 goto reset_task_exit
;
7367 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
7368 bnx2x_nic_load(bp
, LOAD_NORMAL
);
7374 /* end of nic load/unload */
7379 * Init service functions
7382 static inline u32
bnx2x_get_pretend_reg(struct bnx2x
*bp
, int func
)
7385 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0
;
7386 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1
;
7387 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2
;
7388 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3
;
7389 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4
;
7390 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5
;
7391 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6
;
7392 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7
;
7394 BNX2X_ERR("Unsupported function index: %d\n", func
);
7399 static void bnx2x_undi_int_disable_e1h(struct bnx2x
*bp
, int orig_func
)
7401 u32 reg
= bnx2x_get_pretend_reg(bp
, orig_func
), new_val
;
7403 /* Flush all outstanding writes */
7406 /* Pretend to be function 0 */
7408 /* Flush the GRC transaction (in the chip) */
7409 new_val
= REG_RD(bp
, reg
);
7411 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7416 /* From now we are in the "like-E1" mode */
7417 bnx2x_int_disable(bp
);
7419 /* Flush all outstanding writes */
7422 /* Restore the original funtion settings */
7423 REG_WR(bp
, reg
, orig_func
);
7424 new_val
= REG_RD(bp
, reg
);
7425 if (new_val
!= orig_func
) {
7426 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7427 orig_func
, new_val
);
7432 static inline void bnx2x_undi_int_disable(struct bnx2x
*bp
, int func
)
7434 if (CHIP_IS_E1H(bp
))
7435 bnx2x_undi_int_disable_e1h(bp
, func
);
7437 bnx2x_int_disable(bp
);
7440 static void __devinit
bnx2x_undi_unload(struct bnx2x
*bp
)
7444 /* Check if there is any driver already loaded */
7445 val
= REG_RD(bp
, MISC_REG_UNPREPARED
);
7447 /* Check if it is the UNDI driver
7448 * UNDI driver initializes CID offset for normal bell to 0x7
7450 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
7451 val
= REG_RD(bp
, DORQ_REG_NORM_CID_OFST
);
7453 u32 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
7455 int func
= BP_FUNC(bp
);
7459 /* clear the UNDI indication */
7460 REG_WR(bp
, DORQ_REG_NORM_CID_OFST
, 0);
7462 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7464 /* try unload UNDI on port 0 */
7467 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
7468 DRV_MSG_SEQ_NUMBER_MASK
);
7469 reset_code
= bnx2x_fw_command(bp
, reset_code
);
7471 /* if UNDI is loaded on the other port */
7472 if (reset_code
!= FW_MSG_CODE_DRV_UNLOAD_COMMON
) {
7474 /* send "DONE" for previous unload */
7475 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
7477 /* unload UNDI on port 1 */
7480 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
7481 DRV_MSG_SEQ_NUMBER_MASK
);
7482 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
7484 bnx2x_fw_command(bp
, reset_code
);
7487 /* now it's safe to release the lock */
7488 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
7490 bnx2x_undi_int_disable(bp
, func
);
7492 /* close input traffic and wait for it */
7493 /* Do not rcv packets to BRB */
7495 (BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_DRV_MASK
:
7496 NIG_REG_LLH0_BRB1_DRV_MASK
), 0x0);
7497 /* Do not direct rcv packets that are not for MCP to
7500 (BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_NOT_MCP
:
7501 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
7504 (BP_PORT(bp
) ? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
7505 MISC_REG_AEU_MASK_ATTN_FUNC_0
), 0);
7508 /* save NIG port swap info */
7509 swap_val
= REG_RD(bp
, NIG_REG_PORT_SWAP
);
7510 swap_en
= REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
);
7513 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
7516 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
,
7518 /* take the NIG out of reset and restore swap values */
7520 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
7521 MISC_REGISTERS_RESET_REG_1_RST_NIG
);
7522 REG_WR(bp
, NIG_REG_PORT_SWAP
, swap_val
);
7523 REG_WR(bp
, NIG_REG_STRAP_OVERRIDE
, swap_en
);
7525 /* send unload done to the MCP */
7526 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
7528 /* restore our func and fw_seq */
7531 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
7532 DRV_MSG_SEQ_NUMBER_MASK
);
7535 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
7539 static void __devinit
bnx2x_get_common_hwinfo(struct bnx2x
*bp
)
7541 u32 val
, val2
, val3
, val4
, id
;
7544 /* Get the chip revision id and number. */
7545 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7546 val
= REG_RD(bp
, MISC_REG_CHIP_NUM
);
7547 id
= ((val
& 0xffff) << 16);
7548 val
= REG_RD(bp
, MISC_REG_CHIP_REV
);
7549 id
|= ((val
& 0xf) << 12);
7550 val
= REG_RD(bp
, MISC_REG_CHIP_METAL
);
7551 id
|= ((val
& 0xff) << 4);
7552 val
= REG_RD(bp
, MISC_REG_BOND_ID
);
7554 bp
->common
.chip_id
= id
;
7555 bp
->link_params
.chip_id
= bp
->common
.chip_id
;
7556 BNX2X_DEV_INFO("chip ID is 0x%x\n", id
);
7558 val
= (REG_RD(bp
, 0x2874) & 0x55);
7559 if ((bp
->common
.chip_id
& 0x1) ||
7560 (CHIP_IS_E1(bp
) && val
) || (CHIP_IS_E1H(bp
) && (val
== 0x55))) {
7561 bp
->flags
|= ONE_PORT_FLAG
;
7562 BNX2X_DEV_INFO("single port device\n");
7565 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_CFG4
);
7566 bp
->common
.flash_size
= (NVRAM_1MB_SIZE
<<
7567 (val
& MCPR_NVM_CFG4_FLASH_SIZE
));
7568 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7569 bp
->common
.flash_size
, bp
->common
.flash_size
);
7571 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
7572 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
7573 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp
->common
.shmem_base
);
7575 if (!bp
->common
.shmem_base
||
7576 (bp
->common
.shmem_base
< 0xA0000) ||
7577 (bp
->common
.shmem_base
>= 0xC0000)) {
7578 BNX2X_DEV_INFO("MCP not active\n");
7579 bp
->flags
|= NO_MCP_FLAG
;
7583 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
7584 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
7585 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
7586 BNX2X_ERR("BAD MCP validity signature\n");
7588 bp
->common
.hw_config
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.config
);
7589 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp
->common
.hw_config
);
7591 bp
->link_params
.hw_led_mode
= ((bp
->common
.hw_config
&
7592 SHARED_HW_CFG_LED_MODE_MASK
) >>
7593 SHARED_HW_CFG_LED_MODE_SHIFT
);
7595 bp
->link_params
.feature_config_flags
= 0;
7596 val
= SHMEM_RD(bp
, dev_info
.shared_feature_config
.config
);
7597 if (val
& SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED
)
7598 bp
->link_params
.feature_config_flags
|=
7599 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED
;
7601 bp
->link_params
.feature_config_flags
&=
7602 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED
;
7604 val
= SHMEM_RD(bp
, dev_info
.bc_rev
) >> 8;
7605 bp
->common
.bc_ver
= val
;
7606 BNX2X_DEV_INFO("bc_ver %X\n", val
);
7607 if (val
< BNX2X_BC_VER
) {
7608 /* for now only warn
7609 * later we might need to enforce this */
7610 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7611 " please upgrade BC\n", BNX2X_BC_VER
, val
);
7614 if (BP_E1HVN(bp
) == 0) {
7615 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_PMC
, &pmc
);
7616 bp
->flags
|= (pmc
& PCI_PM_CAP_PME_D3cold
) ? 0 : NO_WOL_FLAG
;
7618 /* no WOL capability for E1HVN != 0 */
7619 bp
->flags
|= NO_WOL_FLAG
;
7621 BNX2X_DEV_INFO("%sWoL capable\n",
7622 (bp
->flags
& NO_WOL_FLAG
) ? "not " : "");
7624 val
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
);
7625 val2
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[4]);
7626 val3
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[8]);
7627 val4
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[12]);
7629 printk(KERN_INFO PFX
"part number %X-%X-%X-%X\n",
7630 val
, val2
, val3
, val4
);
7633 static void __devinit
bnx2x_link_settings_supported(struct bnx2x
*bp
,
7636 int port
= BP_PORT(bp
);
7639 switch (switch_cfg
) {
7641 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg
);
7644 SERDES_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
7645 switch (ext_phy_type
) {
7646 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT
:
7647 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7650 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
7651 SUPPORTED_10baseT_Full
|
7652 SUPPORTED_100baseT_Half
|
7653 SUPPORTED_100baseT_Full
|
7654 SUPPORTED_1000baseT_Full
|
7655 SUPPORTED_2500baseX_Full
|
7660 SUPPORTED_Asym_Pause
);
7663 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482
:
7664 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7667 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
7668 SUPPORTED_10baseT_Full
|
7669 SUPPORTED_100baseT_Half
|
7670 SUPPORTED_100baseT_Full
|
7671 SUPPORTED_1000baseT_Full
|
7676 SUPPORTED_Asym_Pause
);
7680 BNX2X_ERR("NVRAM config error. "
7681 "BAD SerDes ext_phy_config 0x%x\n",
7682 bp
->link_params
.ext_phy_config
);
7686 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_SERDES0_CTRL_PHY_ADDR
+
7688 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
7691 case SWITCH_CFG_10G
:
7692 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg
);
7695 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
7696 switch (ext_phy_type
) {
7697 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
:
7698 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7701 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
7702 SUPPORTED_10baseT_Full
|
7703 SUPPORTED_100baseT_Half
|
7704 SUPPORTED_100baseT_Full
|
7705 SUPPORTED_1000baseT_Full
|
7706 SUPPORTED_2500baseX_Full
|
7707 SUPPORTED_10000baseT_Full
|
7712 SUPPORTED_Asym_Pause
);
7715 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
7716 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7719 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
7720 SUPPORTED_1000baseT_Full
|
7724 SUPPORTED_Asym_Pause
);
7727 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
7728 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7731 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
7732 SUPPORTED_2500baseX_Full
|
7733 SUPPORTED_1000baseT_Full
|
7737 SUPPORTED_Asym_Pause
);
7740 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
:
7741 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7744 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
7747 SUPPORTED_Asym_Pause
);
7750 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
:
7751 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7754 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
7755 SUPPORTED_1000baseT_Full
|
7758 SUPPORTED_Asym_Pause
);
7761 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726
:
7762 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7765 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
7766 SUPPORTED_1000baseT_Full
|
7770 SUPPORTED_Asym_Pause
);
7773 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
7774 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7777 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
7781 SUPPORTED_Asym_Pause
);
7784 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481
:
7785 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7788 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
7789 SUPPORTED_10baseT_Full
|
7790 SUPPORTED_100baseT_Half
|
7791 SUPPORTED_100baseT_Full
|
7792 SUPPORTED_1000baseT_Full
|
7793 SUPPORTED_10000baseT_Full
|
7797 SUPPORTED_Asym_Pause
);
7800 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
:
7801 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7802 bp
->link_params
.ext_phy_config
);
7806 BNX2X_ERR("NVRAM config error. "
7807 "BAD XGXS ext_phy_config 0x%x\n",
7808 bp
->link_params
.ext_phy_config
);
7812 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_XGXS0_CTRL_PHY_ADDR
+
7814 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
7819 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7820 bp
->port
.link_config
);
7823 bp
->link_params
.phy_addr
= bp
->port
.phy_addr
;
7825 /* mask what we support according to speed_cap_mask */
7826 if (!(bp
->link_params
.speed_cap_mask
&
7827 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF
))
7828 bp
->port
.supported
&= ~SUPPORTED_10baseT_Half
;
7830 if (!(bp
->link_params
.speed_cap_mask
&
7831 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL
))
7832 bp
->port
.supported
&= ~SUPPORTED_10baseT_Full
;
7834 if (!(bp
->link_params
.speed_cap_mask
&
7835 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF
))
7836 bp
->port
.supported
&= ~SUPPORTED_100baseT_Half
;
7838 if (!(bp
->link_params
.speed_cap_mask
&
7839 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL
))
7840 bp
->port
.supported
&= ~SUPPORTED_100baseT_Full
;
7842 if (!(bp
->link_params
.speed_cap_mask
&
7843 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G
))
7844 bp
->port
.supported
&= ~(SUPPORTED_1000baseT_Half
|
7845 SUPPORTED_1000baseT_Full
);
7847 if (!(bp
->link_params
.speed_cap_mask
&
7848 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G
))
7849 bp
->port
.supported
&= ~SUPPORTED_2500baseX_Full
;
7851 if (!(bp
->link_params
.speed_cap_mask
&
7852 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G
))
7853 bp
->port
.supported
&= ~SUPPORTED_10000baseT_Full
;
7855 BNX2X_DEV_INFO("supported 0x%x\n", bp
->port
.supported
);
7858 static void __devinit
bnx2x_link_settings_requested(struct bnx2x
*bp
)
7860 bp
->link_params
.req_duplex
= DUPLEX_FULL
;
7862 switch (bp
->port
.link_config
& PORT_FEATURE_LINK_SPEED_MASK
) {
7863 case PORT_FEATURE_LINK_SPEED_AUTO
:
7864 if (bp
->port
.supported
& SUPPORTED_Autoneg
) {
7865 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
7866 bp
->port
.advertising
= bp
->port
.supported
;
7869 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
7871 if ((ext_phy_type
==
7872 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
) ||
7874 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
)) {
7875 /* force 10G, no AN */
7876 bp
->link_params
.req_line_speed
= SPEED_10000
;
7877 bp
->port
.advertising
=
7878 (ADVERTISED_10000baseT_Full
|
7882 BNX2X_ERR("NVRAM config error. "
7883 "Invalid link_config 0x%x"
7884 " Autoneg not supported\n",
7885 bp
->port
.link_config
);
7890 case PORT_FEATURE_LINK_SPEED_10M_FULL
:
7891 if (bp
->port
.supported
& SUPPORTED_10baseT_Full
) {
7892 bp
->link_params
.req_line_speed
= SPEED_10
;
7893 bp
->port
.advertising
= (ADVERTISED_10baseT_Full
|
7896 BNX2X_ERR("NVRAM config error. "
7897 "Invalid link_config 0x%x"
7898 " speed_cap_mask 0x%x\n",
7899 bp
->port
.link_config
,
7900 bp
->link_params
.speed_cap_mask
);
7905 case PORT_FEATURE_LINK_SPEED_10M_HALF
:
7906 if (bp
->port
.supported
& SUPPORTED_10baseT_Half
) {
7907 bp
->link_params
.req_line_speed
= SPEED_10
;
7908 bp
->link_params
.req_duplex
= DUPLEX_HALF
;
7909 bp
->port
.advertising
= (ADVERTISED_10baseT_Half
|
7912 BNX2X_ERR("NVRAM config error. "
7913 "Invalid link_config 0x%x"
7914 " speed_cap_mask 0x%x\n",
7915 bp
->port
.link_config
,
7916 bp
->link_params
.speed_cap_mask
);
7921 case PORT_FEATURE_LINK_SPEED_100M_FULL
:
7922 if (bp
->port
.supported
& SUPPORTED_100baseT_Full
) {
7923 bp
->link_params
.req_line_speed
= SPEED_100
;
7924 bp
->port
.advertising
= (ADVERTISED_100baseT_Full
|
7927 BNX2X_ERR("NVRAM config error. "
7928 "Invalid link_config 0x%x"
7929 " speed_cap_mask 0x%x\n",
7930 bp
->port
.link_config
,
7931 bp
->link_params
.speed_cap_mask
);
7936 case PORT_FEATURE_LINK_SPEED_100M_HALF
:
7937 if (bp
->port
.supported
& SUPPORTED_100baseT_Half
) {
7938 bp
->link_params
.req_line_speed
= SPEED_100
;
7939 bp
->link_params
.req_duplex
= DUPLEX_HALF
;
7940 bp
->port
.advertising
= (ADVERTISED_100baseT_Half
|
7943 BNX2X_ERR("NVRAM config error. "
7944 "Invalid link_config 0x%x"
7945 " speed_cap_mask 0x%x\n",
7946 bp
->port
.link_config
,
7947 bp
->link_params
.speed_cap_mask
);
7952 case PORT_FEATURE_LINK_SPEED_1G
:
7953 if (bp
->port
.supported
& SUPPORTED_1000baseT_Full
) {
7954 bp
->link_params
.req_line_speed
= SPEED_1000
;
7955 bp
->port
.advertising
= (ADVERTISED_1000baseT_Full
|
7958 BNX2X_ERR("NVRAM config error. "
7959 "Invalid link_config 0x%x"
7960 " speed_cap_mask 0x%x\n",
7961 bp
->port
.link_config
,
7962 bp
->link_params
.speed_cap_mask
);
7967 case PORT_FEATURE_LINK_SPEED_2_5G
:
7968 if (bp
->port
.supported
& SUPPORTED_2500baseX_Full
) {
7969 bp
->link_params
.req_line_speed
= SPEED_2500
;
7970 bp
->port
.advertising
= (ADVERTISED_2500baseX_Full
|
7973 BNX2X_ERR("NVRAM config error. "
7974 "Invalid link_config 0x%x"
7975 " speed_cap_mask 0x%x\n",
7976 bp
->port
.link_config
,
7977 bp
->link_params
.speed_cap_mask
);
7982 case PORT_FEATURE_LINK_SPEED_10G_CX4
:
7983 case PORT_FEATURE_LINK_SPEED_10G_KX4
:
7984 case PORT_FEATURE_LINK_SPEED_10G_KR
:
7985 if (bp
->port
.supported
& SUPPORTED_10000baseT_Full
) {
7986 bp
->link_params
.req_line_speed
= SPEED_10000
;
7987 bp
->port
.advertising
= (ADVERTISED_10000baseT_Full
|
7990 BNX2X_ERR("NVRAM config error. "
7991 "Invalid link_config 0x%x"
7992 " speed_cap_mask 0x%x\n",
7993 bp
->port
.link_config
,
7994 bp
->link_params
.speed_cap_mask
);
8000 BNX2X_ERR("NVRAM config error. "
8001 "BAD link speed link_config 0x%x\n",
8002 bp
->port
.link_config
);
8003 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
8004 bp
->port
.advertising
= bp
->port
.supported
;
8008 bp
->link_params
.req_flow_ctrl
= (bp
->port
.link_config
&
8009 PORT_FEATURE_FLOW_CONTROL_MASK
);
8010 if ((bp
->link_params
.req_flow_ctrl
== BNX2X_FLOW_CTRL_AUTO
) &&
8011 !(bp
->port
.supported
& SUPPORTED_Autoneg
))
8012 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_NONE
;
8014 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8015 " advertising 0x%x\n",
8016 bp
->link_params
.req_line_speed
,
8017 bp
->link_params
.req_duplex
,
8018 bp
->link_params
.req_flow_ctrl
, bp
->port
.advertising
);
8021 static void __devinit
bnx2x_get_port_hwinfo(struct bnx2x
*bp
)
8023 int port
= BP_PORT(bp
);
8028 bp
->link_params
.bp
= bp
;
8029 bp
->link_params
.port
= port
;
8031 bp
->link_params
.lane_config
=
8032 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].lane_config
);
8033 bp
->link_params
.ext_phy_config
=
8035 dev_info
.port_hw_config
[port
].external_phy_config
);
8036 bp
->link_params
.speed_cap_mask
=
8038 dev_info
.port_hw_config
[port
].speed_capability_mask
);
8040 bp
->port
.link_config
=
8041 SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].link_config
);
8043 /* Get the 4 lanes xgxs config rx and tx */
8044 for (i
= 0; i
< 2; i
++) {
8046 dev_info
.port_hw_config
[port
].xgxs_config_rx
[i
<<1]);
8047 bp
->link_params
.xgxs_config_rx
[i
<< 1] = ((val
>>16) & 0xffff);
8048 bp
->link_params
.xgxs_config_rx
[(i
<< 1) + 1] = (val
& 0xffff);
8051 dev_info
.port_hw_config
[port
].xgxs_config_tx
[i
<<1]);
8052 bp
->link_params
.xgxs_config_tx
[i
<< 1] = ((val
>>16) & 0xffff);
8053 bp
->link_params
.xgxs_config_tx
[(i
<< 1) + 1] = (val
& 0xffff);
8056 config
= SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].config
);
8057 if (config
& PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED
)
8058 bp
->link_params
.feature_config_flags
|=
8059 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED
;
8061 bp
->link_params
.feature_config_flags
&=
8062 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED
;
8064 /* If the device is capable of WoL, set the default state according
8067 bp
->wol
= (!(bp
->flags
& NO_WOL_FLAG
) &&
8068 (config
& PORT_FEATURE_WOL_ENABLED
));
8070 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8071 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8072 bp
->link_params
.lane_config
,
8073 bp
->link_params
.ext_phy_config
,
8074 bp
->link_params
.speed_cap_mask
, bp
->port
.link_config
);
8076 bp
->link_params
.switch_cfg
= (bp
->port
.link_config
&
8077 PORT_FEATURE_CONNECTED_SWITCH_MASK
);
8078 bnx2x_link_settings_supported(bp
, bp
->link_params
.switch_cfg
);
8080 bnx2x_link_settings_requested(bp
);
8082 val2
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_upper
);
8083 val
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_lower
);
8084 bp
->dev
->dev_addr
[0] = (u8
)(val2
>> 8 & 0xff);
8085 bp
->dev
->dev_addr
[1] = (u8
)(val2
& 0xff);
8086 bp
->dev
->dev_addr
[2] = (u8
)(val
>> 24 & 0xff);
8087 bp
->dev
->dev_addr
[3] = (u8
)(val
>> 16 & 0xff);
8088 bp
->dev
->dev_addr
[4] = (u8
)(val
>> 8 & 0xff);
8089 bp
->dev
->dev_addr
[5] = (u8
)(val
& 0xff);
8090 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
8091 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
8094 static int __devinit
bnx2x_get_hwinfo(struct bnx2x
*bp
)
8096 int func
= BP_FUNC(bp
);
8100 bnx2x_get_common_hwinfo(bp
);
8104 if (CHIP_IS_E1H(bp
)) {
8106 SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
8108 val
= (SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].e1hov_tag
) &
8109 FUNC_MF_CFG_E1HOV_TAG_MASK
);
8110 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
) {
8114 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8116 func
, bp
->e1hov
, bp
->e1hov
);
8118 BNX2X_DEV_INFO("single function mode\n");
8120 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8121 " aborting\n", func
);
8127 if (!BP_NOMCP(bp
)) {
8128 bnx2x_get_port_hwinfo(bp
);
8130 bp
->fw_seq
= (SHMEM_RD(bp
, func_mb
[func
].drv_mb_header
) &
8131 DRV_MSG_SEQ_NUMBER_MASK
);
8132 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
8136 val2
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].mac_upper
);
8137 val
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].mac_lower
);
8138 if ((val2
!= FUNC_MF_CFG_UPPERMAC_DEFAULT
) &&
8139 (val
!= FUNC_MF_CFG_LOWERMAC_DEFAULT
)) {
8140 bp
->dev
->dev_addr
[0] = (u8
)(val2
>> 8 & 0xff);
8141 bp
->dev
->dev_addr
[1] = (u8
)(val2
& 0xff);
8142 bp
->dev
->dev_addr
[2] = (u8
)(val
>> 24 & 0xff);
8143 bp
->dev
->dev_addr
[3] = (u8
)(val
>> 16 & 0xff);
8144 bp
->dev
->dev_addr
[4] = (u8
)(val
>> 8 & 0xff);
8145 bp
->dev
->dev_addr
[5] = (u8
)(val
& 0xff);
8146 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
,
8148 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
,
8156 /* only supposed to happen on emulation/FPGA */
8157 BNX2X_ERR("warning random MAC workaround active\n");
8158 random_ether_addr(bp
->dev
->dev_addr
);
8159 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
8165 static int __devinit
bnx2x_init_bp(struct bnx2x
*bp
)
8167 int func
= BP_FUNC(bp
);
8171 /* Disable interrupt handling until HW is initialized */
8172 atomic_set(&bp
->intr_sem
, 1);
8174 mutex_init(&bp
->port
.phy_mutex
);
8176 INIT_DELAYED_WORK(&bp
->sp_task
, bnx2x_sp_task
);
8177 INIT_WORK(&bp
->reset_task
, bnx2x_reset_task
);
8179 rc
= bnx2x_get_hwinfo(bp
);
8181 /* need to reset chip if undi was active */
8183 bnx2x_undi_unload(bp
);
8185 if (CHIP_REV_IS_FPGA(bp
))
8186 printk(KERN_ERR PFX
"FPGA detected\n");
8188 if (BP_NOMCP(bp
) && (func
== 0))
8190 "MCP disabled, must load devices in order!\n");
8192 /* Set multi queue mode */
8193 if ((multi_mode
!= ETH_RSS_MODE_DISABLED
) &&
8194 ((int_mode
== INT_MODE_INTx
) || (int_mode
== INT_MODE_MSI
))) {
8196 "Multi disabled since int_mode requested is not MSI-X\n");
8197 multi_mode
= ETH_RSS_MODE_DISABLED
;
8199 bp
->multi_mode
= multi_mode
;
8204 bp
->flags
&= ~TPA_ENABLE_FLAG
;
8205 bp
->dev
->features
&= ~NETIF_F_LRO
;
8207 bp
->flags
|= TPA_ENABLE_FLAG
;
8208 bp
->dev
->features
|= NETIF_F_LRO
;
8213 bp
->tx_ring_size
= MAX_TX_AVAIL
;
8214 bp
->rx_ring_size
= MAX_RX_AVAIL
;
8221 timer_interval
= (CHIP_REV_IS_SLOW(bp
) ? 5*HZ
: HZ
);
8222 bp
->current_interval
= (poll
? poll
: timer_interval
);
8224 init_timer(&bp
->timer
);
8225 bp
->timer
.expires
= jiffies
+ bp
->current_interval
;
8226 bp
->timer
.data
= (unsigned long) bp
;
8227 bp
->timer
.function
= bnx2x_timer
;
8233 * ethtool service functions
8236 /* All ethtool functions called with rtnl_lock */
8238 static int bnx2x_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
8240 struct bnx2x
*bp
= netdev_priv(dev
);
8242 cmd
->supported
= bp
->port
.supported
;
8243 cmd
->advertising
= bp
->port
.advertising
;
8245 if (netif_carrier_ok(dev
)) {
8246 cmd
->speed
= bp
->link_vars
.line_speed
;
8247 cmd
->duplex
= bp
->link_vars
.duplex
;
8249 cmd
->speed
= bp
->link_params
.req_line_speed
;
8250 cmd
->duplex
= bp
->link_params
.req_duplex
;
8255 vn_max_rate
= ((bp
->mf_config
& FUNC_MF_CFG_MAX_BW_MASK
) >>
8256 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
8257 if (vn_max_rate
< cmd
->speed
)
8258 cmd
->speed
= vn_max_rate
;
8261 if (bp
->link_params
.switch_cfg
== SWITCH_CFG_10G
) {
8263 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
8265 switch (ext_phy_type
) {
8266 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
:
8267 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
8268 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
8269 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
:
8270 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
:
8271 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726
:
8272 cmd
->port
= PORT_FIBRE
;
8275 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
8276 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481
:
8277 cmd
->port
= PORT_TP
;
8280 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
:
8281 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8282 bp
->link_params
.ext_phy_config
);
8286 DP(NETIF_MSG_LINK
, "BAD XGXS ext_phy_config 0x%x\n",
8287 bp
->link_params
.ext_phy_config
);
8291 cmd
->port
= PORT_TP
;
8293 cmd
->phy_address
= bp
->port
.phy_addr
;
8294 cmd
->transceiver
= XCVR_INTERNAL
;
8296 if (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
)
8297 cmd
->autoneg
= AUTONEG_ENABLE
;
8299 cmd
->autoneg
= AUTONEG_DISABLE
;
8304 DP(NETIF_MSG_LINK
, "ethtool_cmd: cmd %d\n"
8305 DP_LEVEL
" supported 0x%x advertising 0x%x speed %d\n"
8306 DP_LEVEL
" duplex %d port %d phy_address %d transceiver %d\n"
8307 DP_LEVEL
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
8308 cmd
->cmd
, cmd
->supported
, cmd
->advertising
, cmd
->speed
,
8309 cmd
->duplex
, cmd
->port
, cmd
->phy_address
, cmd
->transceiver
,
8310 cmd
->autoneg
, cmd
->maxtxpkt
, cmd
->maxrxpkt
);
8315 static int bnx2x_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
8317 struct bnx2x
*bp
= netdev_priv(dev
);
8323 DP(NETIF_MSG_LINK
, "ethtool_cmd: cmd %d\n"
8324 DP_LEVEL
" supported 0x%x advertising 0x%x speed %d\n"
8325 DP_LEVEL
" duplex %d port %d phy_address %d transceiver %d\n"
8326 DP_LEVEL
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
8327 cmd
->cmd
, cmd
->supported
, cmd
->advertising
, cmd
->speed
,
8328 cmd
->duplex
, cmd
->port
, cmd
->phy_address
, cmd
->transceiver
,
8329 cmd
->autoneg
, cmd
->maxtxpkt
, cmd
->maxrxpkt
);
8331 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
8332 if (!(bp
->port
.supported
& SUPPORTED_Autoneg
)) {
8333 DP(NETIF_MSG_LINK
, "Autoneg not supported\n");
8337 /* advertise the requested speed and duplex if supported */
8338 cmd
->advertising
&= bp
->port
.supported
;
8340 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
8341 bp
->link_params
.req_duplex
= DUPLEX_FULL
;
8342 bp
->port
.advertising
|= (ADVERTISED_Autoneg
|
8345 } else { /* forced speed */
8346 /* advertise the requested speed and duplex if supported */
8347 switch (cmd
->speed
) {
8349 if (cmd
->duplex
== DUPLEX_FULL
) {
8350 if (!(bp
->port
.supported
&
8351 SUPPORTED_10baseT_Full
)) {
8353 "10M full not supported\n");
8357 advertising
= (ADVERTISED_10baseT_Full
|
8360 if (!(bp
->port
.supported
&
8361 SUPPORTED_10baseT_Half
)) {
8363 "10M half not supported\n");
8367 advertising
= (ADVERTISED_10baseT_Half
|
8373 if (cmd
->duplex
== DUPLEX_FULL
) {
8374 if (!(bp
->port
.supported
&
8375 SUPPORTED_100baseT_Full
)) {
8377 "100M full not supported\n");
8381 advertising
= (ADVERTISED_100baseT_Full
|
8384 if (!(bp
->port
.supported
&
8385 SUPPORTED_100baseT_Half
)) {
8387 "100M half not supported\n");
8391 advertising
= (ADVERTISED_100baseT_Half
|
8397 if (cmd
->duplex
!= DUPLEX_FULL
) {
8398 DP(NETIF_MSG_LINK
, "1G half not supported\n");
8402 if (!(bp
->port
.supported
& SUPPORTED_1000baseT_Full
)) {
8403 DP(NETIF_MSG_LINK
, "1G full not supported\n");
8407 advertising
= (ADVERTISED_1000baseT_Full
|
8412 if (cmd
->duplex
!= DUPLEX_FULL
) {
8414 "2.5G half not supported\n");
8418 if (!(bp
->port
.supported
& SUPPORTED_2500baseX_Full
)) {
8420 "2.5G full not supported\n");
8424 advertising
= (ADVERTISED_2500baseX_Full
|
8429 if (cmd
->duplex
!= DUPLEX_FULL
) {
8430 DP(NETIF_MSG_LINK
, "10G half not supported\n");
8434 if (!(bp
->port
.supported
& SUPPORTED_10000baseT_Full
)) {
8435 DP(NETIF_MSG_LINK
, "10G full not supported\n");
8439 advertising
= (ADVERTISED_10000baseT_Full
|
8444 DP(NETIF_MSG_LINK
, "Unsupported speed\n");
8448 bp
->link_params
.req_line_speed
= cmd
->speed
;
8449 bp
->link_params
.req_duplex
= cmd
->duplex
;
8450 bp
->port
.advertising
= advertising
;
8453 DP(NETIF_MSG_LINK
, "req_line_speed %d\n"
8454 DP_LEVEL
" req_duplex %d advertising 0x%x\n",
8455 bp
->link_params
.req_line_speed
, bp
->link_params
.req_duplex
,
8456 bp
->port
.advertising
);
8458 if (netif_running(dev
)) {
8459 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
8466 #define PHY_FW_VER_LEN 10
8468 static void bnx2x_get_drvinfo(struct net_device
*dev
,
8469 struct ethtool_drvinfo
*info
)
8471 struct bnx2x
*bp
= netdev_priv(dev
);
8472 u8 phy_fw_ver
[PHY_FW_VER_LEN
];
8474 strcpy(info
->driver
, DRV_MODULE_NAME
);
8475 strcpy(info
->version
, DRV_MODULE_VERSION
);
8477 phy_fw_ver
[0] = '\0';
8479 bnx2x_acquire_phy_lock(bp
);
8480 bnx2x_get_ext_phy_fw_version(&bp
->link_params
,
8481 (bp
->state
!= BNX2X_STATE_CLOSED
),
8482 phy_fw_ver
, PHY_FW_VER_LEN
);
8483 bnx2x_release_phy_lock(bp
);
8486 snprintf(info
->fw_version
, 32, "BC:%d.%d.%d%s%s",
8487 (bp
->common
.bc_ver
& 0xff0000) >> 16,
8488 (bp
->common
.bc_ver
& 0xff00) >> 8,
8489 (bp
->common
.bc_ver
& 0xff),
8490 ((phy_fw_ver
[0] != '\0') ? " PHY:" : ""), phy_fw_ver
);
8491 strcpy(info
->bus_info
, pci_name(bp
->pdev
));
8492 info
->n_stats
= BNX2X_NUM_STATS
;
8493 info
->testinfo_len
= BNX2X_NUM_TESTS
;
8494 info
->eedump_len
= bp
->common
.flash_size
;
8495 info
->regdump_len
= 0;
8498 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8499 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8501 static int bnx2x_get_regs_len(struct net_device
*dev
)
8503 static u32 regdump_len
;
8504 struct bnx2x
*bp
= netdev_priv(dev
);
8510 if (CHIP_IS_E1(bp
)) {
8511 for (i
= 0; i
< REGS_COUNT
; i
++)
8512 if (IS_E1_ONLINE(reg_addrs
[i
].info
))
8513 regdump_len
+= reg_addrs
[i
].size
;
8515 for (i
= 0; i
< WREGS_COUNT_E1
; i
++)
8516 if (IS_E1_ONLINE(wreg_addrs_e1
[i
].info
))
8517 regdump_len
+= wreg_addrs_e1
[i
].size
*
8518 (1 + wreg_addrs_e1
[i
].read_regs_count
);
8521 for (i
= 0; i
< REGS_COUNT
; i
++)
8522 if (IS_E1H_ONLINE(reg_addrs
[i
].info
))
8523 regdump_len
+= reg_addrs
[i
].size
;
8525 for (i
= 0; i
< WREGS_COUNT_E1H
; i
++)
8526 if (IS_E1H_ONLINE(wreg_addrs_e1h
[i
].info
))
8527 regdump_len
+= wreg_addrs_e1h
[i
].size
*
8528 (1 + wreg_addrs_e1h
[i
].read_regs_count
);
8531 regdump_len
+= sizeof(struct dump_hdr
);
8536 static void bnx2x_get_regs(struct net_device
*dev
,
8537 struct ethtool_regs
*regs
, void *_p
)
8540 struct bnx2x
*bp
= netdev_priv(dev
);
8541 struct dump_hdr dump_hdr
= {0};
8544 memset(p
, 0, regs
->len
);
8546 if (!netif_running(bp
->dev
))
8549 dump_hdr
.hdr_size
= (sizeof(struct dump_hdr
) / 4) - 1;
8550 dump_hdr
.dump_sign
= dump_sign_all
;
8551 dump_hdr
.xstorm_waitp
= REG_RD(bp
, XSTORM_WAITP_ADDR
);
8552 dump_hdr
.tstorm_waitp
= REG_RD(bp
, TSTORM_WAITP_ADDR
);
8553 dump_hdr
.ustorm_waitp
= REG_RD(bp
, USTORM_WAITP_ADDR
);
8554 dump_hdr
.cstorm_waitp
= REG_RD(bp
, CSTORM_WAITP_ADDR
);
8555 dump_hdr
.info
= CHIP_IS_E1(bp
) ? RI_E1_ONLINE
: RI_E1H_ONLINE
;
8557 memcpy(p
, &dump_hdr
, sizeof(struct dump_hdr
));
8558 p
+= dump_hdr
.hdr_size
+ 1;
8560 if (CHIP_IS_E1(bp
)) {
8561 for (i
= 0; i
< REGS_COUNT
; i
++)
8562 if (IS_E1_ONLINE(reg_addrs
[i
].info
))
8563 for (j
= 0; j
< reg_addrs
[i
].size
; j
++)
8565 reg_addrs
[i
].addr
+ j
*4);
8568 for (i
= 0; i
< REGS_COUNT
; i
++)
8569 if (IS_E1H_ONLINE(reg_addrs
[i
].info
))
8570 for (j
= 0; j
< reg_addrs
[i
].size
; j
++)
8572 reg_addrs
[i
].addr
+ j
*4);
8576 static void bnx2x_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
8578 struct bnx2x
*bp
= netdev_priv(dev
);
8580 if (bp
->flags
& NO_WOL_FLAG
) {
8584 wol
->supported
= WAKE_MAGIC
;
8586 wol
->wolopts
= WAKE_MAGIC
;
8590 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
8593 static int bnx2x_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
8595 struct bnx2x
*bp
= netdev_priv(dev
);
8597 if (wol
->wolopts
& ~WAKE_MAGIC
)
8600 if (wol
->wolopts
& WAKE_MAGIC
) {
8601 if (bp
->flags
& NO_WOL_FLAG
)
8611 static u32
bnx2x_get_msglevel(struct net_device
*dev
)
8613 struct bnx2x
*bp
= netdev_priv(dev
);
8615 return bp
->msglevel
;
8618 static void bnx2x_set_msglevel(struct net_device
*dev
, u32 level
)
8620 struct bnx2x
*bp
= netdev_priv(dev
);
8622 if (capable(CAP_NET_ADMIN
))
8623 bp
->msglevel
= level
;
8626 static int bnx2x_nway_reset(struct net_device
*dev
)
8628 struct bnx2x
*bp
= netdev_priv(dev
);
8633 if (netif_running(dev
)) {
8634 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
8642 bnx2x_get_link(struct net_device
*dev
)
8644 struct bnx2x
*bp
= netdev_priv(dev
);
8646 return bp
->link_vars
.link_up
;
8649 static int bnx2x_get_eeprom_len(struct net_device
*dev
)
8651 struct bnx2x
*bp
= netdev_priv(dev
);
8653 return bp
->common
.flash_size
;
8656 static int bnx2x_acquire_nvram_lock(struct bnx2x
*bp
)
8658 int port
= BP_PORT(bp
);
8662 /* adjust timeout for emulation/FPGA */
8663 count
= NVRAM_TIMEOUT_COUNT
;
8664 if (CHIP_REV_IS_SLOW(bp
))
8667 /* request access to nvram interface */
8668 REG_WR(bp
, MCP_REG_MCPR_NVM_SW_ARB
,
8669 (MCPR_NVM_SW_ARB_ARB_REQ_SET1
<< port
));
8671 for (i
= 0; i
< count
*10; i
++) {
8672 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_SW_ARB
);
8673 if (val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
))
8679 if (!(val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
))) {
8680 DP(BNX2X_MSG_NVM
, "cannot get access to nvram interface\n");
8687 static int bnx2x_release_nvram_lock(struct bnx2x
*bp
)
8689 int port
= BP_PORT(bp
);
8693 /* adjust timeout for emulation/FPGA */
8694 count
= NVRAM_TIMEOUT_COUNT
;
8695 if (CHIP_REV_IS_SLOW(bp
))
8698 /* relinquish nvram interface */
8699 REG_WR(bp
, MCP_REG_MCPR_NVM_SW_ARB
,
8700 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1
<< port
));
8702 for (i
= 0; i
< count
*10; i
++) {
8703 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_SW_ARB
);
8704 if (!(val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
)))
8710 if (val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
)) {
8711 DP(BNX2X_MSG_NVM
, "cannot free access to nvram interface\n");
8718 static void bnx2x_enable_nvram_access(struct bnx2x
*bp
)
8722 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
);
8724 /* enable both bits, even on read */
8725 REG_WR(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
,
8726 (val
| MCPR_NVM_ACCESS_ENABLE_EN
|
8727 MCPR_NVM_ACCESS_ENABLE_WR_EN
));
8730 static void bnx2x_disable_nvram_access(struct bnx2x
*bp
)
8734 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
);
8736 /* disable both bits, even after read */
8737 REG_WR(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
,
8738 (val
& ~(MCPR_NVM_ACCESS_ENABLE_EN
|
8739 MCPR_NVM_ACCESS_ENABLE_WR_EN
)));
8742 static int bnx2x_nvram_read_dword(struct bnx2x
*bp
, u32 offset
, __be32
*ret_val
,
8748 /* build the command word */
8749 cmd_flags
|= MCPR_NVM_COMMAND_DOIT
;
8751 /* need to clear DONE bit separately */
8752 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, MCPR_NVM_COMMAND_DONE
);
8754 /* address of the NVRAM to read from */
8755 REG_WR(bp
, MCP_REG_MCPR_NVM_ADDR
,
8756 (offset
& MCPR_NVM_ADDR_NVM_ADDR_VALUE
));
8758 /* issue a read command */
8759 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, cmd_flags
);
8761 /* adjust timeout for emulation/FPGA */
8762 count
= NVRAM_TIMEOUT_COUNT
;
8763 if (CHIP_REV_IS_SLOW(bp
))
8766 /* wait for completion */
8769 for (i
= 0; i
< count
; i
++) {
8771 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_COMMAND
);
8773 if (val
& MCPR_NVM_COMMAND_DONE
) {
8774 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_READ
);
8775 /* we read nvram data in cpu order
8776 * but ethtool sees it as an array of bytes
8777 * converting to big-endian will do the work */
8778 *ret_val
= cpu_to_be32(val
);
8787 static int bnx2x_nvram_read(struct bnx2x
*bp
, u32 offset
, u8
*ret_buf
,
8794 if ((offset
& 0x03) || (buf_size
& 0x03) || (buf_size
== 0)) {
8796 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8801 if (offset
+ buf_size
> bp
->common
.flash_size
) {
8802 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
8803 " buf_size (0x%x) > flash_size (0x%x)\n",
8804 offset
, buf_size
, bp
->common
.flash_size
);
8808 /* request access to nvram interface */
8809 rc
= bnx2x_acquire_nvram_lock(bp
);
8813 /* enable access to nvram interface */
8814 bnx2x_enable_nvram_access(bp
);
8816 /* read the first word(s) */
8817 cmd_flags
= MCPR_NVM_COMMAND_FIRST
;
8818 while ((buf_size
> sizeof(u32
)) && (rc
== 0)) {
8819 rc
= bnx2x_nvram_read_dword(bp
, offset
, &val
, cmd_flags
);
8820 memcpy(ret_buf
, &val
, 4);
8822 /* advance to the next dword */
8823 offset
+= sizeof(u32
);
8824 ret_buf
+= sizeof(u32
);
8825 buf_size
-= sizeof(u32
);
8830 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
8831 rc
= bnx2x_nvram_read_dword(bp
, offset
, &val
, cmd_flags
);
8832 memcpy(ret_buf
, &val
, 4);
8835 /* disable access to nvram interface */
8836 bnx2x_disable_nvram_access(bp
);
8837 bnx2x_release_nvram_lock(bp
);
8842 static int bnx2x_get_eeprom(struct net_device
*dev
,
8843 struct ethtool_eeprom
*eeprom
, u8
*eebuf
)
8845 struct bnx2x
*bp
= netdev_priv(dev
);
8848 if (!netif_running(dev
))
8851 DP(BNX2X_MSG_NVM
, "ethtool_eeprom: cmd %d\n"
8852 DP_LEVEL
" magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8853 eeprom
->cmd
, eeprom
->magic
, eeprom
->offset
, eeprom
->offset
,
8854 eeprom
->len
, eeprom
->len
);
8856 /* parameters already validated in ethtool_get_eeprom */
8858 rc
= bnx2x_nvram_read(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
8863 static int bnx2x_nvram_write_dword(struct bnx2x
*bp
, u32 offset
, u32 val
,
8868 /* build the command word */
8869 cmd_flags
|= MCPR_NVM_COMMAND_DOIT
| MCPR_NVM_COMMAND_WR
;
8871 /* need to clear DONE bit separately */
8872 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, MCPR_NVM_COMMAND_DONE
);
8874 /* write the data */
8875 REG_WR(bp
, MCP_REG_MCPR_NVM_WRITE
, val
);
8877 /* address of the NVRAM to write to */
8878 REG_WR(bp
, MCP_REG_MCPR_NVM_ADDR
,
8879 (offset
& MCPR_NVM_ADDR_NVM_ADDR_VALUE
));
8881 /* issue the write command */
8882 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, cmd_flags
);
8884 /* adjust timeout for emulation/FPGA */
8885 count
= NVRAM_TIMEOUT_COUNT
;
8886 if (CHIP_REV_IS_SLOW(bp
))
8889 /* wait for completion */
8891 for (i
= 0; i
< count
; i
++) {
8893 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_COMMAND
);
8894 if (val
& MCPR_NVM_COMMAND_DONE
) {
8903 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8905 static int bnx2x_nvram_write1(struct bnx2x
*bp
, u32 offset
, u8
*data_buf
,
8913 if (offset
+ buf_size
> bp
->common
.flash_size
) {
8914 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
8915 " buf_size (0x%x) > flash_size (0x%x)\n",
8916 offset
, buf_size
, bp
->common
.flash_size
);
8920 /* request access to nvram interface */
8921 rc
= bnx2x_acquire_nvram_lock(bp
);
8925 /* enable access to nvram interface */
8926 bnx2x_enable_nvram_access(bp
);
8928 cmd_flags
= (MCPR_NVM_COMMAND_FIRST
| MCPR_NVM_COMMAND_LAST
);
8929 align_offset
= (offset
& ~0x03);
8930 rc
= bnx2x_nvram_read_dword(bp
, align_offset
, &val
, cmd_flags
);
8933 val
&= ~(0xff << BYTE_OFFSET(offset
));
8934 val
|= (*data_buf
<< BYTE_OFFSET(offset
));
8936 /* nvram data is returned as an array of bytes
8937 * convert it back to cpu order */
8938 val
= be32_to_cpu(val
);
8940 rc
= bnx2x_nvram_write_dword(bp
, align_offset
, val
,
8944 /* disable access to nvram interface */
8945 bnx2x_disable_nvram_access(bp
);
8946 bnx2x_release_nvram_lock(bp
);
8951 static int bnx2x_nvram_write(struct bnx2x
*bp
, u32 offset
, u8
*data_buf
,
8959 if (buf_size
== 1) /* ethtool */
8960 return bnx2x_nvram_write1(bp
, offset
, data_buf
, buf_size
);
8962 if ((offset
& 0x03) || (buf_size
& 0x03) || (buf_size
== 0)) {
8964 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8969 if (offset
+ buf_size
> bp
->common
.flash_size
) {
8970 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
8971 " buf_size (0x%x) > flash_size (0x%x)\n",
8972 offset
, buf_size
, bp
->common
.flash_size
);
8976 /* request access to nvram interface */
8977 rc
= bnx2x_acquire_nvram_lock(bp
);
8981 /* enable access to nvram interface */
8982 bnx2x_enable_nvram_access(bp
);
8985 cmd_flags
= MCPR_NVM_COMMAND_FIRST
;
8986 while ((written_so_far
< buf_size
) && (rc
== 0)) {
8987 if (written_so_far
== (buf_size
- sizeof(u32
)))
8988 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
8989 else if (((offset
+ 4) % NVRAM_PAGE_SIZE
) == 0)
8990 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
8991 else if ((offset
% NVRAM_PAGE_SIZE
) == 0)
8992 cmd_flags
|= MCPR_NVM_COMMAND_FIRST
;
8994 memcpy(&val
, data_buf
, 4);
8996 rc
= bnx2x_nvram_write_dword(bp
, offset
, val
, cmd_flags
);
8998 /* advance to the next dword */
8999 offset
+= sizeof(u32
);
9000 data_buf
+= sizeof(u32
);
9001 written_so_far
+= sizeof(u32
);
9005 /* disable access to nvram interface */
9006 bnx2x_disable_nvram_access(bp
);
9007 bnx2x_release_nvram_lock(bp
);
9012 static int bnx2x_set_eeprom(struct net_device
*dev
,
9013 struct ethtool_eeprom
*eeprom
, u8
*eebuf
)
9015 struct bnx2x
*bp
= netdev_priv(dev
);
9018 if (!netif_running(dev
))
9021 DP(BNX2X_MSG_NVM
, "ethtool_eeprom: cmd %d\n"
9022 DP_LEVEL
" magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9023 eeprom
->cmd
, eeprom
->magic
, eeprom
->offset
, eeprom
->offset
,
9024 eeprom
->len
, eeprom
->len
);
9026 /* parameters already validated in ethtool_set_eeprom */
9028 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
9029 if (eeprom
->magic
== 0x00504859)
9032 bnx2x_acquire_phy_lock(bp
);
9033 rc
= bnx2x_flash_download(bp
, BP_PORT(bp
),
9034 bp
->link_params
.ext_phy_config
,
9035 (bp
->state
!= BNX2X_STATE_CLOSED
),
9036 eebuf
, eeprom
->len
);
9037 if ((bp
->state
== BNX2X_STATE_OPEN
) ||
9038 (bp
->state
== BNX2X_STATE_DISABLED
)) {
9039 rc
|= bnx2x_link_reset(&bp
->link_params
,
9041 rc
|= bnx2x_phy_init(&bp
->link_params
,
9044 bnx2x_release_phy_lock(bp
);
9046 } else /* Only the PMF can access the PHY */
9049 rc
= bnx2x_nvram_write(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
9054 static int bnx2x_get_coalesce(struct net_device
*dev
,
9055 struct ethtool_coalesce
*coal
)
9057 struct bnx2x
*bp
= netdev_priv(dev
);
9059 memset(coal
, 0, sizeof(struct ethtool_coalesce
));
9061 coal
->rx_coalesce_usecs
= bp
->rx_ticks
;
9062 coal
->tx_coalesce_usecs
= bp
->tx_ticks
;
9067 static int bnx2x_set_coalesce(struct net_device
*dev
,
9068 struct ethtool_coalesce
*coal
)
9070 struct bnx2x
*bp
= netdev_priv(dev
);
9072 bp
->rx_ticks
= (u16
) coal
->rx_coalesce_usecs
;
9073 if (bp
->rx_ticks
> BNX2X_MAX_COALESCE_TOUT
)
9074 bp
->rx_ticks
= BNX2X_MAX_COALESCE_TOUT
;
9076 bp
->tx_ticks
= (u16
) coal
->tx_coalesce_usecs
;
9077 if (bp
->tx_ticks
> BNX2X_MAX_COALESCE_TOUT
)
9078 bp
->tx_ticks
= BNX2X_MAX_COALESCE_TOUT
;
9080 if (netif_running(dev
))
9081 bnx2x_update_coalesce(bp
);
9086 static void bnx2x_get_ringparam(struct net_device
*dev
,
9087 struct ethtool_ringparam
*ering
)
9089 struct bnx2x
*bp
= netdev_priv(dev
);
9091 ering
->rx_max_pending
= MAX_RX_AVAIL
;
9092 ering
->rx_mini_max_pending
= 0;
9093 ering
->rx_jumbo_max_pending
= 0;
9095 ering
->rx_pending
= bp
->rx_ring_size
;
9096 ering
->rx_mini_pending
= 0;
9097 ering
->rx_jumbo_pending
= 0;
9099 ering
->tx_max_pending
= MAX_TX_AVAIL
;
9100 ering
->tx_pending
= bp
->tx_ring_size
;
9103 static int bnx2x_set_ringparam(struct net_device
*dev
,
9104 struct ethtool_ringparam
*ering
)
9106 struct bnx2x
*bp
= netdev_priv(dev
);
9109 if ((ering
->rx_pending
> MAX_RX_AVAIL
) ||
9110 (ering
->tx_pending
> MAX_TX_AVAIL
) ||
9111 (ering
->tx_pending
<= MAX_SKB_FRAGS
+ 4))
9114 bp
->rx_ring_size
= ering
->rx_pending
;
9115 bp
->tx_ring_size
= ering
->tx_pending
;
9117 if (netif_running(dev
)) {
9118 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
9119 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
9125 static void bnx2x_get_pauseparam(struct net_device
*dev
,
9126 struct ethtool_pauseparam
*epause
)
9128 struct bnx2x
*bp
= netdev_priv(dev
);
9130 epause
->autoneg
= (bp
->link_params
.req_flow_ctrl
==
9131 BNX2X_FLOW_CTRL_AUTO
) &&
9132 (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
);
9134 epause
->rx_pause
= ((bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
) ==
9135 BNX2X_FLOW_CTRL_RX
);
9136 epause
->tx_pause
= ((bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
) ==
9137 BNX2X_FLOW_CTRL_TX
);
9139 DP(NETIF_MSG_LINK
, "ethtool_pauseparam: cmd %d\n"
9140 DP_LEVEL
" autoneg %d rx_pause %d tx_pause %d\n",
9141 epause
->cmd
, epause
->autoneg
, epause
->rx_pause
, epause
->tx_pause
);
9144 static int bnx2x_set_pauseparam(struct net_device
*dev
,
9145 struct ethtool_pauseparam
*epause
)
9147 struct bnx2x
*bp
= netdev_priv(dev
);
9152 DP(NETIF_MSG_LINK
, "ethtool_pauseparam: cmd %d\n"
9153 DP_LEVEL
" autoneg %d rx_pause %d tx_pause %d\n",
9154 epause
->cmd
, epause
->autoneg
, epause
->rx_pause
, epause
->tx_pause
);
9156 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_AUTO
;
9158 if (epause
->rx_pause
)
9159 bp
->link_params
.req_flow_ctrl
|= BNX2X_FLOW_CTRL_RX
;
9161 if (epause
->tx_pause
)
9162 bp
->link_params
.req_flow_ctrl
|= BNX2X_FLOW_CTRL_TX
;
9164 if (bp
->link_params
.req_flow_ctrl
== BNX2X_FLOW_CTRL_AUTO
)
9165 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_NONE
;
9167 if (epause
->autoneg
) {
9168 if (!(bp
->port
.supported
& SUPPORTED_Autoneg
)) {
9169 DP(NETIF_MSG_LINK
, "autoneg not supported\n");
9173 if (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
)
9174 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_AUTO
;
9178 "req_flow_ctrl 0x%x\n", bp
->link_params
.req_flow_ctrl
);
9180 if (netif_running(dev
)) {
9181 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
9188 static int bnx2x_set_flags(struct net_device
*dev
, u32 data
)
9190 struct bnx2x
*bp
= netdev_priv(dev
);
9194 /* TPA requires Rx CSUM offloading */
9195 if ((data
& ETH_FLAG_LRO
) && bp
->rx_csum
) {
9196 if (!(dev
->features
& NETIF_F_LRO
)) {
9197 dev
->features
|= NETIF_F_LRO
;
9198 bp
->flags
|= TPA_ENABLE_FLAG
;
9202 } else if (dev
->features
& NETIF_F_LRO
) {
9203 dev
->features
&= ~NETIF_F_LRO
;
9204 bp
->flags
&= ~TPA_ENABLE_FLAG
;
9208 if (changed
&& netif_running(dev
)) {
9209 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
9210 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
9216 static u32
bnx2x_get_rx_csum(struct net_device
*dev
)
9218 struct bnx2x
*bp
= netdev_priv(dev
);
9223 static int bnx2x_set_rx_csum(struct net_device
*dev
, u32 data
)
9225 struct bnx2x
*bp
= netdev_priv(dev
);
9230 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9231 TPA'ed packets will be discarded due to wrong TCP CSUM */
9233 u32 flags
= ethtool_op_get_flags(dev
);
9235 rc
= bnx2x_set_flags(dev
, (flags
& ~ETH_FLAG_LRO
));
9241 static int bnx2x_set_tso(struct net_device
*dev
, u32 data
)
9244 dev
->features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
9245 dev
->features
|= NETIF_F_TSO6
;
9247 dev
->features
&= ~(NETIF_F_TSO
| NETIF_F_TSO_ECN
);
9248 dev
->features
&= ~NETIF_F_TSO6
;
9254 static const struct {
9255 char string
[ETH_GSTRING_LEN
];
9256 } bnx2x_tests_str_arr
[BNX2X_NUM_TESTS
] = {
9257 { "register_test (offline)" },
9258 { "memory_test (offline)" },
9259 { "loopback_test (offline)" },
9260 { "nvram_test (online)" },
9261 { "interrupt_test (online)" },
9262 { "link_test (online)" },
9263 { "idle check (online)" }
9266 static int bnx2x_self_test_count(struct net_device
*dev
)
9268 return BNX2X_NUM_TESTS
;
9271 static int bnx2x_test_registers(struct bnx2x
*bp
)
9273 int idx
, i
, rc
= -ENODEV
;
9275 int port
= BP_PORT(bp
);
9276 static const struct {
9281 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0
, 4, 0x000003ff },
9282 { DORQ_REG_DB_ADDR0
, 4, 0xffffffff },
9283 { HC_REG_AGG_INT_0
, 4, 0x000003ff },
9284 { PBF_REG_MAC_IF0_ENABLE
, 4, 0x00000001 },
9285 { PBF_REG_P0_INIT_CRD
, 4, 0x000007ff },
9286 { PRS_REG_CID_PORT_0
, 4, 0x00ffffff },
9287 { PXP2_REG_PSWRQ_CDU0_L2P
, 4, 0x000fffff },
9288 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR
, 8, 0x0003ffff },
9289 { PXP2_REG_PSWRQ_TM0_L2P
, 4, 0x000fffff },
9290 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR
, 8, 0x0003ffff },
9291 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P
, 4, 0x000fffff },
9292 { QM_REG_CONNNUM_0
, 4, 0x000fffff },
9293 { TM_REG_LIN0_MAX_ACTIVE_CID
, 4, 0x0003ffff },
9294 { SRC_REG_KEYRSS0_0
, 40, 0xffffffff },
9295 { SRC_REG_KEYRSS0_7
, 40, 0xffffffff },
9296 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00
, 4, 0x00000001 },
9297 { XCM_REG_WU_DA_CNT_CMD00
, 4, 0x00000003 },
9298 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0
, 4, 0x000000ff },
9299 { NIG_REG_EGRESS_MNG0_FIFO
, 20, 0xffffffff },
9300 { NIG_REG_LLH0_T_BIT
, 4, 0x00000001 },
9301 /* 20 */ { NIG_REG_EMAC0_IN_EN
, 4, 0x00000001 },
9302 { NIG_REG_BMAC0_IN_EN
, 4, 0x00000001 },
9303 { NIG_REG_XCM0_OUT_EN
, 4, 0x00000001 },
9304 { NIG_REG_BRB0_OUT_EN
, 4, 0x00000001 },
9305 { NIG_REG_LLH0_XCM_MASK
, 4, 0x00000007 },
9306 { NIG_REG_LLH0_ACPI_PAT_6_LEN
, 68, 0x000000ff },
9307 { NIG_REG_LLH0_ACPI_PAT_0_CRC
, 68, 0xffffffff },
9308 { NIG_REG_LLH0_DEST_MAC_0_0
, 160, 0xffffffff },
9309 { NIG_REG_LLH0_DEST_IP_0_1
, 160, 0xffffffff },
9310 { NIG_REG_LLH0_IPV4_IPV6_0
, 160, 0x00000001 },
9311 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0
, 160, 0x0000ffff },
9312 { NIG_REG_LLH0_DEST_TCP_0
, 160, 0x0000ffff },
9313 { NIG_REG_LLH0_VLAN_ID_0
, 160, 0x00000fff },
9314 { NIG_REG_XGXS_SERDES0_MODE_SEL
, 4, 0x00000001 },
9315 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
, 4, 0x00000001 },
9316 { NIG_REG_STATUS_INTERRUPT_PORT0
, 4, 0x07ffffff },
9317 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST
, 24, 0x00000001 },
9318 { NIG_REG_SERDES0_CTRL_PHY_ADDR
, 16, 0x0000001f },
9320 { 0xffffffff, 0, 0x00000000 }
9323 if (!netif_running(bp
->dev
))
9326 /* Repeat the test twice:
9327 First by writing 0x00000000, second by writing 0xffffffff */
9328 for (idx
= 0; idx
< 2; idx
++) {
9335 wr_val
= 0xffffffff;
9339 for (i
= 0; reg_tbl
[i
].offset0
!= 0xffffffff; i
++) {
9340 u32 offset
, mask
, save_val
, val
;
9342 offset
= reg_tbl
[i
].offset0
+ port
*reg_tbl
[i
].offset1
;
9343 mask
= reg_tbl
[i
].mask
;
9345 save_val
= REG_RD(bp
, offset
);
9347 REG_WR(bp
, offset
, wr_val
);
9348 val
= REG_RD(bp
, offset
);
9350 /* Restore the original register's value */
9351 REG_WR(bp
, offset
, save_val
);
9353 /* verify that value is as expected value */
9354 if ((val
& mask
) != (wr_val
& mask
))
9365 static int bnx2x_test_memory(struct bnx2x
*bp
)
9367 int i
, j
, rc
= -ENODEV
;
9369 static const struct {
9373 { CCM_REG_XX_DESCR_TABLE
, CCM_REG_XX_DESCR_TABLE_SIZE
},
9374 { CFC_REG_ACTIVITY_COUNTER
, CFC_REG_ACTIVITY_COUNTER_SIZE
},
9375 { CFC_REG_LINK_LIST
, CFC_REG_LINK_LIST_SIZE
},
9376 { DMAE_REG_CMD_MEM
, DMAE_REG_CMD_MEM_SIZE
},
9377 { TCM_REG_XX_DESCR_TABLE
, TCM_REG_XX_DESCR_TABLE_SIZE
},
9378 { UCM_REG_XX_DESCR_TABLE
, UCM_REG_XX_DESCR_TABLE_SIZE
},
9379 { XCM_REG_XX_DESCR_TABLE
, XCM_REG_XX_DESCR_TABLE_SIZE
},
9383 static const struct {
9389 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS
, 0x3ffc0, 0 },
9390 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS
, 0x2, 0x2 },
9391 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS
, 0, 0 },
9392 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS
, 0x3ffc0, 0 },
9393 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS
, 0x3ffc0, 0 },
9394 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS
, 0x3ffc1, 0 },
9396 { NULL
, 0xffffffff, 0, 0 }
9399 if (!netif_running(bp
->dev
))
9402 /* Go through all the memories */
9403 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++)
9404 for (j
= 0; j
< mem_tbl
[i
].size
; j
++)
9405 REG_RD(bp
, mem_tbl
[i
].offset
+ j
*4);
9407 /* Check the parity status */
9408 for (i
= 0; prty_tbl
[i
].offset
!= 0xffffffff; i
++) {
9409 val
= REG_RD(bp
, prty_tbl
[i
].offset
);
9410 if ((CHIP_IS_E1(bp
) && (val
& ~(prty_tbl
[i
].e1_mask
))) ||
9411 (CHIP_IS_E1H(bp
) && (val
& ~(prty_tbl
[i
].e1h_mask
)))) {
9413 "%s is 0x%x\n", prty_tbl
[i
].name
, val
);
9424 static void bnx2x_wait_for_link(struct bnx2x
*bp
, u8 link_up
)
9429 while (bnx2x_link_test(bp
) && cnt
--)
9433 static int bnx2x_run_loopback(struct bnx2x
*bp
, int loopback_mode
, u8 link_up
)
9435 unsigned int pkt_size
, num_pkts
, i
;
9436 struct sk_buff
*skb
;
9437 unsigned char *packet
;
9438 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
9439 u16 tx_start_idx
, tx_idx
;
9440 u16 rx_start_idx
, rx_idx
;
9442 struct sw_tx_bd
*tx_buf
;
9443 struct eth_tx_bd
*tx_bd
;
9445 union eth_rx_cqe
*cqe
;
9447 struct sw_rx_bd
*rx_buf
;
9451 /* check the loopback mode */
9452 switch (loopback_mode
) {
9453 case BNX2X_PHY_LOOPBACK
:
9454 if (bp
->link_params
.loopback_mode
!= LOOPBACK_XGXS_10
)
9457 case BNX2X_MAC_LOOPBACK
:
9458 bp
->link_params
.loopback_mode
= LOOPBACK_BMAC
;
9459 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
9465 /* prepare the loopback packet */
9466 pkt_size
= (((bp
->dev
->mtu
< ETH_MAX_PACKET_SIZE
) ?
9467 bp
->dev
->mtu
: ETH_MAX_PACKET_SIZE
) + ETH_HLEN
);
9468 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
9471 goto test_loopback_exit
;
9473 packet
= skb_put(skb
, pkt_size
);
9474 memcpy(packet
, bp
->dev
->dev_addr
, ETH_ALEN
);
9475 memset(packet
+ ETH_ALEN
, 0, (ETH_HLEN
- ETH_ALEN
));
9476 for (i
= ETH_HLEN
; i
< pkt_size
; i
++)
9477 packet
[i
] = (unsigned char) (i
& 0xff);
9479 /* send the loopback packet */
9481 tx_start_idx
= le16_to_cpu(*fp
->tx_cons_sb
);
9482 rx_start_idx
= le16_to_cpu(*fp
->rx_cons_sb
);
9484 pkt_prod
= fp
->tx_pkt_prod
++;
9485 tx_buf
= &fp
->tx_buf_ring
[TX_BD(pkt_prod
)];
9486 tx_buf
->first_bd
= fp
->tx_bd_prod
;
9489 tx_bd
= &fp
->tx_desc_ring
[TX_BD(fp
->tx_bd_prod
)];
9490 mapping
= pci_map_single(bp
->pdev
, skb
->data
,
9491 skb_headlen(skb
), PCI_DMA_TODEVICE
);
9492 tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
9493 tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
9494 tx_bd
->nbd
= cpu_to_le16(1);
9495 tx_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
9496 tx_bd
->vlan
= cpu_to_le16(pkt_prod
);
9497 tx_bd
->bd_flags
.as_bitfield
= (ETH_TX_BD_FLAGS_START_BD
|
9498 ETH_TX_BD_FLAGS_END_BD
);
9499 tx_bd
->general_data
= ((UNICAST_ADDRESS
<<
9500 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT
) | 1);
9504 le16_add_cpu(&fp
->hw_tx_prods
->bds_prod
, 1);
9505 mb(); /* FW restriction: must not reorder writing nbd and packets */
9506 le32_add_cpu(&fp
->hw_tx_prods
->packets_prod
, 1);
9507 DOORBELL(bp
, fp
->index
, 0);
9513 bp
->dev
->trans_start
= jiffies
;
9517 tx_idx
= le16_to_cpu(*fp
->tx_cons_sb
);
9518 if (tx_idx
!= tx_start_idx
+ num_pkts
)
9519 goto test_loopback_exit
;
9521 rx_idx
= le16_to_cpu(*fp
->rx_cons_sb
);
9522 if (rx_idx
!= rx_start_idx
+ num_pkts
)
9523 goto test_loopback_exit
;
9525 cqe
= &fp
->rx_comp_ring
[RCQ_BD(fp
->rx_comp_cons
)];
9526 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
9527 if (CQE_TYPE(cqe_fp_flags
) || (cqe_fp_flags
& ETH_RX_ERROR_FALGS
))
9528 goto test_loopback_rx_exit
;
9530 len
= le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
);
9531 if (len
!= pkt_size
)
9532 goto test_loopback_rx_exit
;
9534 rx_buf
= &fp
->rx_buf_ring
[RX_BD(fp
->rx_bd_cons
)];
9536 skb_reserve(skb
, cqe
->fast_path_cqe
.placement_offset
);
9537 for (i
= ETH_HLEN
; i
< pkt_size
; i
++)
9538 if (*(skb
->data
+ i
) != (unsigned char) (i
& 0xff))
9539 goto test_loopback_rx_exit
;
9543 test_loopback_rx_exit
:
9545 fp
->rx_bd_cons
= NEXT_RX_IDX(fp
->rx_bd_cons
);
9546 fp
->rx_bd_prod
= NEXT_RX_IDX(fp
->rx_bd_prod
);
9547 fp
->rx_comp_cons
= NEXT_RCQ_IDX(fp
->rx_comp_cons
);
9548 fp
->rx_comp_prod
= NEXT_RCQ_IDX(fp
->rx_comp_prod
);
9550 /* Update producers */
9551 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
9555 bp
->link_params
.loopback_mode
= LOOPBACK_NONE
;
9560 static int bnx2x_test_loopback(struct bnx2x
*bp
, u8 link_up
)
9564 if (!netif_running(bp
->dev
))
9565 return BNX2X_LOOPBACK_FAILED
;
9567 bnx2x_netif_stop(bp
, 1);
9568 bnx2x_acquire_phy_lock(bp
);
9570 res
= bnx2x_run_loopback(bp
, BNX2X_PHY_LOOPBACK
, link_up
);
9572 DP(NETIF_MSG_PROBE
, " PHY loopback failed (res %d)\n", res
);
9573 rc
|= BNX2X_PHY_LOOPBACK_FAILED
;
9576 res
= bnx2x_run_loopback(bp
, BNX2X_MAC_LOOPBACK
, link_up
);
9578 DP(NETIF_MSG_PROBE
, " MAC loopback failed (res %d)\n", res
);
9579 rc
|= BNX2X_MAC_LOOPBACK_FAILED
;
9582 bnx2x_release_phy_lock(bp
);
9583 bnx2x_netif_start(bp
);
9588 #define CRC32_RESIDUAL 0xdebb20e3
9590 static int bnx2x_test_nvram(struct bnx2x
*bp
)
9592 static const struct {
9596 { 0, 0x14 }, /* bootstrap */
9597 { 0x14, 0xec }, /* dir */
9598 { 0x100, 0x350 }, /* manuf_info */
9599 { 0x450, 0xf0 }, /* feature_info */
9600 { 0x640, 0x64 }, /* upgrade_key_info */
9602 { 0x708, 0x70 }, /* manuf_key_info */
9606 __be32 buf
[0x350 / 4];
9607 u8
*data
= (u8
*)buf
;
9611 rc
= bnx2x_nvram_read(bp
, 0, data
, 4);
9613 DP(NETIF_MSG_PROBE
, "magic value read (rc %d)\n", rc
);
9614 goto test_nvram_exit
;
9617 magic
= be32_to_cpu(buf
[0]);
9618 if (magic
!= 0x669955aa) {
9619 DP(NETIF_MSG_PROBE
, "magic value (0x%08x)\n", magic
);
9621 goto test_nvram_exit
;
9624 for (i
= 0; nvram_tbl
[i
].size
; i
++) {
9626 rc
= bnx2x_nvram_read(bp
, nvram_tbl
[i
].offset
, data
,
9630 "nvram_tbl[%d] read data (rc %d)\n", i
, rc
);
9631 goto test_nvram_exit
;
9634 csum
= ether_crc_le(nvram_tbl
[i
].size
, data
);
9635 if (csum
!= CRC32_RESIDUAL
) {
9637 "nvram_tbl[%d] csum value (0x%08x)\n", i
, csum
);
9639 goto test_nvram_exit
;
9647 static int bnx2x_test_intr(struct bnx2x
*bp
)
9649 struct mac_configuration_cmd
*config
= bnx2x_sp(bp
, mac_config
);
9652 if (!netif_running(bp
->dev
))
9655 config
->hdr
.length
= 0;
9657 config
->hdr
.offset
= (BP_PORT(bp
) ? 32 : 0);
9659 config
->hdr
.offset
= BP_FUNC(bp
);
9660 config
->hdr
.client_id
= bp
->fp
->cl_id
;
9661 config
->hdr
.reserved1
= 0;
9663 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
9664 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
9665 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
9667 bp
->set_mac_pending
++;
9668 for (i
= 0; i
< 10; i
++) {
9669 if (!bp
->set_mac_pending
)
9671 msleep_interruptible(10);
9680 static void bnx2x_self_test(struct net_device
*dev
,
9681 struct ethtool_test
*etest
, u64
*buf
)
9683 struct bnx2x
*bp
= netdev_priv(dev
);
9685 memset(buf
, 0, sizeof(u64
) * BNX2X_NUM_TESTS
);
9687 if (!netif_running(dev
))
9690 /* offline tests are not supported in MF mode */
9692 etest
->flags
&= ~ETH_TEST_FL_OFFLINE
;
9694 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
9697 link_up
= bp
->link_vars
.link_up
;
9698 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
9699 bnx2x_nic_load(bp
, LOAD_DIAG
);
9700 /* wait until link state is restored */
9701 bnx2x_wait_for_link(bp
, link_up
);
9703 if (bnx2x_test_registers(bp
) != 0) {
9705 etest
->flags
|= ETH_TEST_FL_FAILED
;
9707 if (bnx2x_test_memory(bp
) != 0) {
9709 etest
->flags
|= ETH_TEST_FL_FAILED
;
9711 buf
[2] = bnx2x_test_loopback(bp
, link_up
);
9713 etest
->flags
|= ETH_TEST_FL_FAILED
;
9715 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
9716 bnx2x_nic_load(bp
, LOAD_NORMAL
);
9717 /* wait until link state is restored */
9718 bnx2x_wait_for_link(bp
, link_up
);
9720 if (bnx2x_test_nvram(bp
) != 0) {
9722 etest
->flags
|= ETH_TEST_FL_FAILED
;
9724 if (bnx2x_test_intr(bp
) != 0) {
9726 etest
->flags
|= ETH_TEST_FL_FAILED
;
9729 if (bnx2x_link_test(bp
) != 0) {
9731 etest
->flags
|= ETH_TEST_FL_FAILED
;
9734 #ifdef BNX2X_EXTRA_DEBUG
9735 bnx2x_panic_dump(bp
);
9739 static const struct {
9742 u8 string
[ETH_GSTRING_LEN
];
9743 } bnx2x_q_stats_arr
[BNX2X_NUM_Q_STATS
] = {
9744 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi
), 8, "[%d]: rx_bytes" },
9745 { Q_STATS_OFFSET32(error_bytes_received_hi
),
9746 8, "[%d]: rx_error_bytes" },
9747 { Q_STATS_OFFSET32(total_unicast_packets_received_hi
),
9748 8, "[%d]: rx_ucast_packets" },
9749 { Q_STATS_OFFSET32(total_multicast_packets_received_hi
),
9750 8, "[%d]: rx_mcast_packets" },
9751 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi
),
9752 8, "[%d]: rx_bcast_packets" },
9753 { Q_STATS_OFFSET32(no_buff_discard_hi
), 8, "[%d]: rx_discards" },
9754 { Q_STATS_OFFSET32(rx_err_discard_pkt
),
9755 4, "[%d]: rx_phy_ip_err_discards"},
9756 { Q_STATS_OFFSET32(rx_skb_alloc_failed
),
9757 4, "[%d]: rx_skb_alloc_discard" },
9758 { Q_STATS_OFFSET32(hw_csum_err
), 4, "[%d]: rx_csum_offload_errors" },
9760 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi
), 8, "[%d]: tx_bytes" },
9761 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi
),
9762 8, "[%d]: tx_packets" }
9765 static const struct {
9769 #define STATS_FLAGS_PORT 1
9770 #define STATS_FLAGS_FUNC 2
9771 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9772 u8 string
[ETH_GSTRING_LEN
];
9773 } bnx2x_stats_arr
[BNX2X_NUM_STATS
] = {
9774 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi
),
9775 8, STATS_FLAGS_BOTH
, "rx_bytes" },
9776 { STATS_OFFSET32(error_bytes_received_hi
),
9777 8, STATS_FLAGS_BOTH
, "rx_error_bytes" },
9778 { STATS_OFFSET32(total_unicast_packets_received_hi
),
9779 8, STATS_FLAGS_BOTH
, "rx_ucast_packets" },
9780 { STATS_OFFSET32(total_multicast_packets_received_hi
),
9781 8, STATS_FLAGS_BOTH
, "rx_mcast_packets" },
9782 { STATS_OFFSET32(total_broadcast_packets_received_hi
),
9783 8, STATS_FLAGS_BOTH
, "rx_bcast_packets" },
9784 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi
),
9785 8, STATS_FLAGS_PORT
, "rx_crc_errors" },
9786 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi
),
9787 8, STATS_FLAGS_PORT
, "rx_align_errors" },
9788 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi
),
9789 8, STATS_FLAGS_PORT
, "rx_undersize_packets" },
9790 { STATS_OFFSET32(etherstatsoverrsizepkts_hi
),
9791 8, STATS_FLAGS_PORT
, "rx_oversize_packets" },
9792 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi
),
9793 8, STATS_FLAGS_PORT
, "rx_fragments" },
9794 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi
),
9795 8, STATS_FLAGS_PORT
, "rx_jabbers" },
9796 { STATS_OFFSET32(no_buff_discard_hi
),
9797 8, STATS_FLAGS_BOTH
, "rx_discards" },
9798 { STATS_OFFSET32(mac_filter_discard
),
9799 4, STATS_FLAGS_PORT
, "rx_filtered_packets" },
9800 { STATS_OFFSET32(xxoverflow_discard
),
9801 4, STATS_FLAGS_PORT
, "rx_fw_discards" },
9802 { STATS_OFFSET32(brb_drop_hi
),
9803 8, STATS_FLAGS_PORT
, "rx_brb_discard" },
9804 { STATS_OFFSET32(brb_truncate_hi
),
9805 8, STATS_FLAGS_PORT
, "rx_brb_truncate" },
9806 { STATS_OFFSET32(pause_frames_received_hi
),
9807 8, STATS_FLAGS_PORT
, "rx_pause_frames" },
9808 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi
),
9809 8, STATS_FLAGS_PORT
, "rx_mac_ctrl_frames" },
9810 { STATS_OFFSET32(nig_timer_max
),
9811 4, STATS_FLAGS_PORT
, "rx_constant_pause_events" },
9812 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt
),
9813 4, STATS_FLAGS_BOTH
, "rx_phy_ip_err_discards"},
9814 { STATS_OFFSET32(rx_skb_alloc_failed
),
9815 4, STATS_FLAGS_BOTH
, "rx_skb_alloc_discard" },
9816 { STATS_OFFSET32(hw_csum_err
),
9817 4, STATS_FLAGS_BOTH
, "rx_csum_offload_errors" },
9819 { STATS_OFFSET32(total_bytes_transmitted_hi
),
9820 8, STATS_FLAGS_BOTH
, "tx_bytes" },
9821 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi
),
9822 8, STATS_FLAGS_PORT
, "tx_error_bytes" },
9823 { STATS_OFFSET32(total_unicast_packets_transmitted_hi
),
9824 8, STATS_FLAGS_BOTH
, "tx_packets" },
9825 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi
),
9826 8, STATS_FLAGS_PORT
, "tx_mac_errors" },
9827 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi
),
9828 8, STATS_FLAGS_PORT
, "tx_carrier_errors" },
9829 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi
),
9830 8, STATS_FLAGS_PORT
, "tx_single_collisions" },
9831 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi
),
9832 8, STATS_FLAGS_PORT
, "tx_multi_collisions" },
9833 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi
),
9834 8, STATS_FLAGS_PORT
, "tx_deferred" },
9835 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi
),
9836 8, STATS_FLAGS_PORT
, "tx_excess_collisions" },
9837 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi
),
9838 8, STATS_FLAGS_PORT
, "tx_late_collisions" },
9839 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi
),
9840 8, STATS_FLAGS_PORT
, "tx_total_collisions" },
9841 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi
),
9842 8, STATS_FLAGS_PORT
, "tx_64_byte_packets" },
9843 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi
),
9844 8, STATS_FLAGS_PORT
, "tx_65_to_127_byte_packets" },
9845 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi
),
9846 8, STATS_FLAGS_PORT
, "tx_128_to_255_byte_packets" },
9847 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi
),
9848 8, STATS_FLAGS_PORT
, "tx_256_to_511_byte_packets" },
9849 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi
),
9850 8, STATS_FLAGS_PORT
, "tx_512_to_1023_byte_packets" },
9851 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi
),
9852 8, STATS_FLAGS_PORT
, "tx_1024_to_1522_byte_packets" },
9853 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi
),
9854 8, STATS_FLAGS_PORT
, "tx_1523_to_9022_byte_packets" },
9855 { STATS_OFFSET32(pause_frames_sent_hi
),
9856 8, STATS_FLAGS_PORT
, "tx_pause_frames" }
9859 #define IS_PORT_STAT(i) \
9860 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9861 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9862 #define IS_E1HMF_MODE_STAT(bp) \
9863 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9865 static void bnx2x_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
9867 struct bnx2x
*bp
= netdev_priv(dev
);
9870 switch (stringset
) {
9874 for_each_queue(bp
, i
) {
9875 for (j
= 0; j
< BNX2X_NUM_Q_STATS
; j
++)
9876 sprintf(buf
+ (k
+ j
)*ETH_GSTRING_LEN
,
9877 bnx2x_q_stats_arr
[j
].string
, i
);
9878 k
+= BNX2X_NUM_Q_STATS
;
9880 if (IS_E1HMF_MODE_STAT(bp
))
9882 for (j
= 0; j
< BNX2X_NUM_STATS
; j
++)
9883 strcpy(buf
+ (k
+ j
)*ETH_GSTRING_LEN
,
9884 bnx2x_stats_arr
[j
].string
);
9886 for (i
= 0, j
= 0; i
< BNX2X_NUM_STATS
; i
++) {
9887 if (IS_E1HMF_MODE_STAT(bp
) && IS_PORT_STAT(i
))
9889 strcpy(buf
+ j
*ETH_GSTRING_LEN
,
9890 bnx2x_stats_arr
[i
].string
);
9897 memcpy(buf
, bnx2x_tests_str_arr
, sizeof(bnx2x_tests_str_arr
));
9902 static int bnx2x_get_stats_count(struct net_device
*dev
)
9904 struct bnx2x
*bp
= netdev_priv(dev
);
9908 num_stats
= BNX2X_NUM_Q_STATS
* BNX2X_NUM_QUEUES(bp
);
9909 if (!IS_E1HMF_MODE_STAT(bp
))
9910 num_stats
+= BNX2X_NUM_STATS
;
9912 if (IS_E1HMF_MODE_STAT(bp
)) {
9914 for (i
= 0; i
< BNX2X_NUM_STATS
; i
++)
9915 if (IS_FUNC_STAT(i
))
9918 num_stats
= BNX2X_NUM_STATS
;
9924 static void bnx2x_get_ethtool_stats(struct net_device
*dev
,
9925 struct ethtool_stats
*stats
, u64
*buf
)
9927 struct bnx2x
*bp
= netdev_priv(dev
);
9928 u32
*hw_stats
, *offset
;
9933 for_each_queue(bp
, i
) {
9934 hw_stats
= (u32
*)&bp
->fp
[i
].eth_q_stats
;
9935 for (j
= 0; j
< BNX2X_NUM_Q_STATS
; j
++) {
9936 if (bnx2x_q_stats_arr
[j
].size
== 0) {
9937 /* skip this counter */
9941 offset
= (hw_stats
+
9942 bnx2x_q_stats_arr
[j
].offset
);
9943 if (bnx2x_q_stats_arr
[j
].size
== 4) {
9944 /* 4-byte counter */
9945 buf
[k
+ j
] = (u64
) *offset
;
9948 /* 8-byte counter */
9949 buf
[k
+ j
] = HILO_U64(*offset
, *(offset
+ 1));
9951 k
+= BNX2X_NUM_Q_STATS
;
9953 if (IS_E1HMF_MODE_STAT(bp
))
9955 hw_stats
= (u32
*)&bp
->eth_stats
;
9956 for (j
= 0; j
< BNX2X_NUM_STATS
; j
++) {
9957 if (bnx2x_stats_arr
[j
].size
== 0) {
9958 /* skip this counter */
9962 offset
= (hw_stats
+ bnx2x_stats_arr
[j
].offset
);
9963 if (bnx2x_stats_arr
[j
].size
== 4) {
9964 /* 4-byte counter */
9965 buf
[k
+ j
] = (u64
) *offset
;
9968 /* 8-byte counter */
9969 buf
[k
+ j
] = HILO_U64(*offset
, *(offset
+ 1));
9972 hw_stats
= (u32
*)&bp
->eth_stats
;
9973 for (i
= 0, j
= 0; i
< BNX2X_NUM_STATS
; i
++) {
9974 if (IS_E1HMF_MODE_STAT(bp
) && IS_PORT_STAT(i
))
9976 if (bnx2x_stats_arr
[i
].size
== 0) {
9977 /* skip this counter */
9982 offset
= (hw_stats
+ bnx2x_stats_arr
[i
].offset
);
9983 if (bnx2x_stats_arr
[i
].size
== 4) {
9984 /* 4-byte counter */
9985 buf
[j
] = (u64
) *offset
;
9989 /* 8-byte counter */
9990 buf
[j
] = HILO_U64(*offset
, *(offset
+ 1));
9996 static int bnx2x_phys_id(struct net_device
*dev
, u32 data
)
9998 struct bnx2x
*bp
= netdev_priv(dev
);
9999 int port
= BP_PORT(bp
);
10002 if (!netif_running(dev
))
10011 for (i
= 0; i
< (data
* 2); i
++) {
10013 bnx2x_set_led(bp
, port
, LED_MODE_OPER
, SPEED_1000
,
10014 bp
->link_params
.hw_led_mode
,
10015 bp
->link_params
.chip_id
);
10017 bnx2x_set_led(bp
, port
, LED_MODE_OFF
, 0,
10018 bp
->link_params
.hw_led_mode
,
10019 bp
->link_params
.chip_id
);
10021 msleep_interruptible(500);
10022 if (signal_pending(current
))
10026 if (bp
->link_vars
.link_up
)
10027 bnx2x_set_led(bp
, port
, LED_MODE_OPER
,
10028 bp
->link_vars
.line_speed
,
10029 bp
->link_params
.hw_led_mode
,
10030 bp
->link_params
.chip_id
);
10035 static struct ethtool_ops bnx2x_ethtool_ops
= {
10036 .get_settings
= bnx2x_get_settings
,
10037 .set_settings
= bnx2x_set_settings
,
10038 .get_drvinfo
= bnx2x_get_drvinfo
,
10039 .get_regs_len
= bnx2x_get_regs_len
,
10040 .get_regs
= bnx2x_get_regs
,
10041 .get_wol
= bnx2x_get_wol
,
10042 .set_wol
= bnx2x_set_wol
,
10043 .get_msglevel
= bnx2x_get_msglevel
,
10044 .set_msglevel
= bnx2x_set_msglevel
,
10045 .nway_reset
= bnx2x_nway_reset
,
10046 .get_link
= bnx2x_get_link
,
10047 .get_eeprom_len
= bnx2x_get_eeprom_len
,
10048 .get_eeprom
= bnx2x_get_eeprom
,
10049 .set_eeprom
= bnx2x_set_eeprom
,
10050 .get_coalesce
= bnx2x_get_coalesce
,
10051 .set_coalesce
= bnx2x_set_coalesce
,
10052 .get_ringparam
= bnx2x_get_ringparam
,
10053 .set_ringparam
= bnx2x_set_ringparam
,
10054 .get_pauseparam
= bnx2x_get_pauseparam
,
10055 .set_pauseparam
= bnx2x_set_pauseparam
,
10056 .get_rx_csum
= bnx2x_get_rx_csum
,
10057 .set_rx_csum
= bnx2x_set_rx_csum
,
10058 .get_tx_csum
= ethtool_op_get_tx_csum
,
10059 .set_tx_csum
= ethtool_op_set_tx_hw_csum
,
10060 .set_flags
= bnx2x_set_flags
,
10061 .get_flags
= ethtool_op_get_flags
,
10062 .get_sg
= ethtool_op_get_sg
,
10063 .set_sg
= ethtool_op_set_sg
,
10064 .get_tso
= ethtool_op_get_tso
,
10065 .set_tso
= bnx2x_set_tso
,
10066 .self_test_count
= bnx2x_self_test_count
,
10067 .self_test
= bnx2x_self_test
,
10068 .get_strings
= bnx2x_get_strings
,
10069 .phys_id
= bnx2x_phys_id
,
10070 .get_stats_count
= bnx2x_get_stats_count
,
10071 .get_ethtool_stats
= bnx2x_get_ethtool_stats
,
10074 /* end of ethtool_ops */
10076 /****************************************************************************
10077 * General service functions
10078 ****************************************************************************/
10080 static int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
)
10084 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
10088 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
10089 ((pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
10090 PCI_PM_CTRL_PME_STATUS
));
10092 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
10093 /* delay required during transition out of D3hot */
10098 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
10102 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
10104 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
10107 /* No more memory access after this point until
10108 * device is brought back to D0.
10118 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath
*fp
)
10122 /* Tell compiler that status block fields can change */
10124 rx_cons_sb
= le16_to_cpu(*fp
->rx_cons_sb
);
10125 if ((rx_cons_sb
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
10127 return (fp
->rx_comp_cons
!= rx_cons_sb
);
10131 * net_device service functions
10134 static int bnx2x_poll(struct napi_struct
*napi
, int budget
)
10136 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
10138 struct bnx2x
*bp
= fp
->bp
;
10141 #ifdef BNX2X_STOP_ON_ERROR
10142 if (unlikely(bp
->panic
))
10146 prefetch(fp
->tx_buf_ring
[TX_BD(fp
->tx_pkt_cons
)].skb
);
10147 prefetch(fp
->rx_buf_ring
[RX_BD(fp
->rx_bd_cons
)].skb
);
10148 prefetch((char *)(fp
->rx_buf_ring
[RX_BD(fp
->rx_bd_cons
)].skb
) + 256);
10150 bnx2x_update_fpsb_idx(fp
);
10152 if (bnx2x_has_tx_work(fp
))
10155 if (bnx2x_has_rx_work(fp
)) {
10156 work_done
= bnx2x_rx_int(fp
, budget
);
10158 /* must not complete if we consumed full budget */
10159 if (work_done
>= budget
)
10163 /* BNX2X_HAS_WORK() reads the status block, thus we need to
10164 * ensure that status block indices have been actually read
10165 * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10166 * so that we won't write the "newer" value of the status block to IGU
10167 * (if there was a DMA right after BNX2X_HAS_WORK and
10168 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10169 * may be postponed to right before bnx2x_ack_sb). In this case
10170 * there will never be another interrupt until there is another update
10171 * of the status block, while there is still unhandled work.
10175 if (!BNX2X_HAS_WORK(fp
)) {
10176 #ifdef BNX2X_STOP_ON_ERROR
10179 napi_complete(napi
);
10181 bnx2x_ack_sb(bp
, fp
->sb_id
, USTORM_ID
,
10182 le16_to_cpu(fp
->fp_u_idx
), IGU_INT_NOP
, 1);
10183 bnx2x_ack_sb(bp
, fp
->sb_id
, CSTORM_ID
,
10184 le16_to_cpu(fp
->fp_c_idx
), IGU_INT_ENABLE
, 1);
10192 /* we split the first BD into headers and data BDs
10193 * to ease the pain of our fellow microcode engineers
10194 * we use one mapping for both BDs
10195 * So far this has only been observed to happen
10196 * in Other Operating Systems(TM)
10198 static noinline u16
bnx2x_tx_split(struct bnx2x
*bp
,
10199 struct bnx2x_fastpath
*fp
,
10200 struct eth_tx_bd
**tx_bd
, u16 hlen
,
10201 u16 bd_prod
, int nbd
)
10203 struct eth_tx_bd
*h_tx_bd
= *tx_bd
;
10204 struct eth_tx_bd
*d_tx_bd
;
10205 dma_addr_t mapping
;
10206 int old_len
= le16_to_cpu(h_tx_bd
->nbytes
);
10208 /* first fix first BD */
10209 h_tx_bd
->nbd
= cpu_to_le16(nbd
);
10210 h_tx_bd
->nbytes
= cpu_to_le16(hlen
);
10212 DP(NETIF_MSG_TX_QUEUED
, "TSO split header size is %d "
10213 "(%x:%x) nbd %d\n", h_tx_bd
->nbytes
, h_tx_bd
->addr_hi
,
10214 h_tx_bd
->addr_lo
, h_tx_bd
->nbd
);
10216 /* now get a new data BD
10217 * (after the pbd) and fill it */
10218 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
10219 d_tx_bd
= &fp
->tx_desc_ring
[bd_prod
];
10221 mapping
= HILO_U64(le32_to_cpu(h_tx_bd
->addr_hi
),
10222 le32_to_cpu(h_tx_bd
->addr_lo
)) + hlen
;
10224 d_tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
10225 d_tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
10226 d_tx_bd
->nbytes
= cpu_to_le16(old_len
- hlen
);
10228 /* this marks the BD as one that has no individual mapping
10229 * the FW ignores this flag in a BD not marked start
10231 d_tx_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_SW_LSO
;
10232 DP(NETIF_MSG_TX_QUEUED
,
10233 "TSO split data size is %d (%x:%x)\n",
10234 d_tx_bd
->nbytes
, d_tx_bd
->addr_hi
, d_tx_bd
->addr_lo
);
10236 /* update tx_bd for marking the last BD flag */
10242 static inline u16
bnx2x_csum_fix(unsigned char *t_header
, u16 csum
, s8 fix
)
10245 csum
= (u16
) ~csum_fold(csum_sub(csum
,
10246 csum_partial(t_header
- fix
, fix
, 0)));
10249 csum
= (u16
) ~csum_fold(csum_add(csum
,
10250 csum_partial(t_header
, -fix
, 0)));
10252 return swab16(csum
);
10255 static inline u32
bnx2x_xmit_type(struct bnx2x
*bp
, struct sk_buff
*skb
)
10259 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
10263 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
10265 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
10266 rc
|= XMIT_CSUM_TCP
;
10270 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
10271 rc
|= XMIT_CSUM_TCP
;
10275 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
)
10278 else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
10284 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10285 /* check if packet requires linearization (packet is too fragmented)
10286 no need to check fragmentation if page size > 8K (there will be no
10287 violation to FW restrictions) */
10288 static int bnx2x_pkt_req_lin(struct bnx2x
*bp
, struct sk_buff
*skb
,
10293 int first_bd_sz
= 0;
10295 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10296 if (skb_shinfo(skb
)->nr_frags
>= (MAX_FETCH_BD
- 3)) {
10298 if (xmit_type
& XMIT_GSO
) {
10299 unsigned short lso_mss
= skb_shinfo(skb
)->gso_size
;
10300 /* Check if LSO packet needs to be copied:
10301 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10302 int wnd_size
= MAX_FETCH_BD
- 3;
10303 /* Number of windows to check */
10304 int num_wnds
= skb_shinfo(skb
)->nr_frags
- wnd_size
;
10309 /* Headers length */
10310 hlen
= (int)(skb_transport_header(skb
) - skb
->data
) +
10313 /* Amount of data (w/o headers) on linear part of SKB*/
10314 first_bd_sz
= skb_headlen(skb
) - hlen
;
10316 wnd_sum
= first_bd_sz
;
10318 /* Calculate the first sum - it's special */
10319 for (frag_idx
= 0; frag_idx
< wnd_size
- 1; frag_idx
++)
10321 skb_shinfo(skb
)->frags
[frag_idx
].size
;
10323 /* If there was data on linear skb data - check it */
10324 if (first_bd_sz
> 0) {
10325 if (unlikely(wnd_sum
< lso_mss
)) {
10330 wnd_sum
-= first_bd_sz
;
10333 /* Others are easier: run through the frag list and
10334 check all windows */
10335 for (wnd_idx
= 0; wnd_idx
<= num_wnds
; wnd_idx
++) {
10337 skb_shinfo(skb
)->frags
[wnd_idx
+ wnd_size
- 1].size
;
10339 if (unlikely(wnd_sum
< lso_mss
)) {
10344 skb_shinfo(skb
)->frags
[wnd_idx
].size
;
10347 /* in non-LSO too fragmented packet should always
10354 if (unlikely(to_copy
))
10355 DP(NETIF_MSG_TX_QUEUED
,
10356 "Linearization IS REQUIRED for %s packet. "
10357 "num_frags %d hlen %d first_bd_sz %d\n",
10358 (xmit_type
& XMIT_GSO
) ? "LSO" : "non-LSO",
10359 skb_shinfo(skb
)->nr_frags
, hlen
, first_bd_sz
);
10365 /* called with netif_tx_lock
10366 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10367 * netif_wake_queue()
10369 static int bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
10371 struct bnx2x
*bp
= netdev_priv(dev
);
10372 struct bnx2x_fastpath
*fp
;
10373 struct netdev_queue
*txq
;
10374 struct sw_tx_bd
*tx_buf
;
10375 struct eth_tx_bd
*tx_bd
;
10376 struct eth_tx_parse_bd
*pbd
= NULL
;
10377 u16 pkt_prod
, bd_prod
;
10379 dma_addr_t mapping
;
10380 u32 xmit_type
= bnx2x_xmit_type(bp
, skb
);
10381 int vlan_off
= (bp
->e1hov
? 4 : 0);
10385 #ifdef BNX2X_STOP_ON_ERROR
10386 if (unlikely(bp
->panic
))
10387 return NETDEV_TX_BUSY
;
10390 fp_index
= skb_get_queue_mapping(skb
);
10391 txq
= netdev_get_tx_queue(dev
, fp_index
);
10393 fp
= &bp
->fp
[fp_index
];
10395 if (unlikely(bnx2x_tx_avail(fp
) < (skb_shinfo(skb
)->nr_frags
+ 3))) {
10396 fp
->eth_q_stats
.driver_xoff
++,
10397 netif_tx_stop_queue(txq
);
10398 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10399 return NETDEV_TX_BUSY
;
10402 DP(NETIF_MSG_TX_QUEUED
, "SKB: summed %x protocol %x protocol(%x,%x)"
10403 " gso type %x xmit_type %x\n",
10404 skb
->ip_summed
, skb
->protocol
, ipv6_hdr(skb
)->nexthdr
,
10405 ip_hdr(skb
)->protocol
, skb_shinfo(skb
)->gso_type
, xmit_type
);
10407 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10408 /* First, check if we need to linearize the skb (due to FW
10409 restrictions). No need to check fragmentation if page size > 8K
10410 (there will be no violation to FW restrictions) */
10411 if (bnx2x_pkt_req_lin(bp
, skb
, xmit_type
)) {
10412 /* Statistics of linearization */
10414 if (skb_linearize(skb
) != 0) {
10415 DP(NETIF_MSG_TX_QUEUED
, "SKB linearization failed - "
10416 "silently dropping this SKB\n");
10417 dev_kfree_skb_any(skb
);
10418 return NETDEV_TX_OK
;
10424 Please read carefully. First we use one BD which we mark as start,
10425 then for TSO or xsum we have a parsing info BD,
10426 and only then we have the rest of the TSO BDs.
10427 (don't forget to mark the last one as last,
10428 and to unmap only AFTER you write to the BD ...)
10429 And above all, all pdb sizes are in words - NOT DWORDS!
10432 pkt_prod
= fp
->tx_pkt_prod
++;
10433 bd_prod
= TX_BD(fp
->tx_bd_prod
);
10435 /* get a tx_buf and first BD */
10436 tx_buf
= &fp
->tx_buf_ring
[TX_BD(pkt_prod
)];
10437 tx_bd
= &fp
->tx_desc_ring
[bd_prod
];
10439 tx_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
10440 tx_bd
->general_data
= (UNICAST_ADDRESS
<<
10441 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT
);
10443 tx_bd
->general_data
|= (1 << ETH_TX_BD_HDR_NBDS_SHIFT
);
10445 /* remember the first BD of the packet */
10446 tx_buf
->first_bd
= fp
->tx_bd_prod
;
10449 DP(NETIF_MSG_TX_QUEUED
,
10450 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10451 pkt_prod
, tx_buf
, fp
->tx_pkt_prod
, bd_prod
, tx_bd
);
10454 if ((bp
->vlgrp
!= NULL
) && vlan_tx_tag_present(skb
) &&
10455 (bp
->flags
& HW_VLAN_TX_FLAG
)) {
10456 tx_bd
->vlan
= cpu_to_le16(vlan_tx_tag_get(skb
));
10457 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_VLAN_TAG
;
10461 tx_bd
->vlan
= cpu_to_le16(pkt_prod
);
10464 /* turn on parsing and get a BD */
10465 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
10466 pbd
= (void *)&fp
->tx_desc_ring
[bd_prod
];
10468 memset(pbd
, 0, sizeof(struct eth_tx_parse_bd
));
10471 if (xmit_type
& XMIT_CSUM
) {
10472 hlen
= (skb_network_header(skb
) - skb
->data
+ vlan_off
) / 2;
10474 /* for now NS flag is not used in Linux */
10476 (hlen
| ((skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) <<
10477 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT
));
10479 pbd
->ip_hlen
= (skb_transport_header(skb
) -
10480 skb_network_header(skb
)) / 2;
10482 hlen
+= pbd
->ip_hlen
+ tcp_hdrlen(skb
) / 2;
10484 pbd
->total_hlen
= cpu_to_le16(hlen
);
10485 hlen
= hlen
*2 - vlan_off
;
10487 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_TCP_CSUM
;
10489 if (xmit_type
& XMIT_CSUM_V4
)
10490 tx_bd
->bd_flags
.as_bitfield
|=
10491 ETH_TX_BD_FLAGS_IP_CSUM
;
10493 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_IPV6
;
10495 if (xmit_type
& XMIT_CSUM_TCP
) {
10496 pbd
->tcp_pseudo_csum
= swab16(tcp_hdr(skb
)->check
);
10499 s8 fix
= SKB_CS_OFF(skb
); /* signed! */
10501 pbd
->global_data
|= ETH_TX_PARSE_BD_CS_ANY_FLG
;
10502 pbd
->cs_offset
= fix
/ 2;
10504 DP(NETIF_MSG_TX_QUEUED
,
10505 "hlen %d offset %d fix %d csum before fix %x\n",
10506 le16_to_cpu(pbd
->total_hlen
), pbd
->cs_offset
, fix
,
10509 /* HW bug: fixup the CSUM */
10510 pbd
->tcp_pseudo_csum
=
10511 bnx2x_csum_fix(skb_transport_header(skb
),
10514 DP(NETIF_MSG_TX_QUEUED
, "csum after fix %x\n",
10515 pbd
->tcp_pseudo_csum
);
10519 mapping
= pci_map_single(bp
->pdev
, skb
->data
,
10520 skb_headlen(skb
), PCI_DMA_TODEVICE
);
10522 tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
10523 tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
10524 nbd
= skb_shinfo(skb
)->nr_frags
+ ((pbd
== NULL
) ? 1 : 2);
10525 tx_bd
->nbd
= cpu_to_le16(nbd
);
10526 tx_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
10528 DP(NETIF_MSG_TX_QUEUED
, "first bd @%p addr (%x:%x) nbd %d"
10529 " nbytes %d flags %x vlan %x\n",
10530 tx_bd
, tx_bd
->addr_hi
, tx_bd
->addr_lo
, le16_to_cpu(tx_bd
->nbd
),
10531 le16_to_cpu(tx_bd
->nbytes
), tx_bd
->bd_flags
.as_bitfield
,
10532 le16_to_cpu(tx_bd
->vlan
));
10534 if (xmit_type
& XMIT_GSO
) {
10536 DP(NETIF_MSG_TX_QUEUED
,
10537 "TSO packet len %d hlen %d total len %d tso size %d\n",
10538 skb
->len
, hlen
, skb_headlen(skb
),
10539 skb_shinfo(skb
)->gso_size
);
10541 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_SW_LSO
;
10543 if (unlikely(skb_headlen(skb
) > hlen
))
10544 bd_prod
= bnx2x_tx_split(bp
, fp
, &tx_bd
, hlen
,
10547 pbd
->lso_mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
10548 pbd
->tcp_send_seq
= swab32(tcp_hdr(skb
)->seq
);
10549 pbd
->tcp_flags
= pbd_tcp_flags(skb
);
10551 if (xmit_type
& XMIT_GSO_V4
) {
10552 pbd
->ip_id
= swab16(ip_hdr(skb
)->id
);
10553 pbd
->tcp_pseudo_csum
=
10554 swab16(~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
10555 ip_hdr(skb
)->daddr
,
10556 0, IPPROTO_TCP
, 0));
10559 pbd
->tcp_pseudo_csum
=
10560 swab16(~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
10561 &ipv6_hdr(skb
)->daddr
,
10562 0, IPPROTO_TCP
, 0));
10564 pbd
->global_data
|= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN
;
10567 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
10568 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
10570 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
10571 tx_bd
= &fp
->tx_desc_ring
[bd_prod
];
10573 mapping
= pci_map_page(bp
->pdev
, frag
->page
, frag
->page_offset
,
10574 frag
->size
, PCI_DMA_TODEVICE
);
10576 tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
10577 tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
10578 tx_bd
->nbytes
= cpu_to_le16(frag
->size
);
10579 tx_bd
->vlan
= cpu_to_le16(pkt_prod
);
10580 tx_bd
->bd_flags
.as_bitfield
= 0;
10582 DP(NETIF_MSG_TX_QUEUED
,
10583 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10584 i
, tx_bd
, tx_bd
->addr_hi
, tx_bd
->addr_lo
,
10585 le16_to_cpu(tx_bd
->nbytes
), tx_bd
->bd_flags
.as_bitfield
);
10588 /* now at last mark the BD as the last BD */
10589 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_END_BD
;
10591 DP(NETIF_MSG_TX_QUEUED
, "last bd @%p flags %x\n",
10592 tx_bd
, tx_bd
->bd_flags
.as_bitfield
);
10594 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
10596 /* now send a tx doorbell, counting the next BD
10597 * if the packet contains or ends with it
10599 if (TX_BD_POFF(bd_prod
) < nbd
)
10603 DP(NETIF_MSG_TX_QUEUED
,
10604 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10605 " tcp_flags %x xsum %x seq %u hlen %u\n",
10606 pbd
, pbd
->global_data
, pbd
->ip_hlen
, pbd
->ip_id
,
10607 pbd
->lso_mss
, pbd
->tcp_flags
, pbd
->tcp_pseudo_csum
,
10608 pbd
->tcp_send_seq
, le16_to_cpu(pbd
->total_hlen
));
10610 DP(NETIF_MSG_TX_QUEUED
, "doorbell: nbd %d bd %u\n", nbd
, bd_prod
);
10613 * Make sure that the BD data is updated before updating the producer
10614 * since FW might read the BD right after the producer is updated.
10615 * This is only applicable for weak-ordered memory model archs such
10616 * as IA-64. The following barrier is also mandatory since FW will
10617 * assumes packets must have BDs.
10621 le16_add_cpu(&fp
->hw_tx_prods
->bds_prod
, nbd
);
10622 mb(); /* FW restriction: must not reorder writing nbd and packets */
10623 le32_add_cpu(&fp
->hw_tx_prods
->packets_prod
, 1);
10624 DOORBELL(bp
, fp
->index
, 0);
10628 fp
->tx_bd_prod
+= nbd
;
10630 if (unlikely(bnx2x_tx_avail(fp
) < MAX_SKB_FRAGS
+ 3)) {
10631 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10632 if we put Tx into XOFF state. */
10634 netif_tx_stop_queue(txq
);
10635 fp
->eth_q_stats
.driver_xoff
++;
10636 if (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3)
10637 netif_tx_wake_queue(txq
);
10641 return NETDEV_TX_OK
;
10644 /* called with rtnl_lock */
10645 static int bnx2x_open(struct net_device
*dev
)
10647 struct bnx2x
*bp
= netdev_priv(dev
);
10649 netif_carrier_off(dev
);
10651 bnx2x_set_power_state(bp
, PCI_D0
);
10653 return bnx2x_nic_load(bp
, LOAD_OPEN
);
10656 /* called with rtnl_lock */
10657 static int bnx2x_close(struct net_device
*dev
)
10659 struct bnx2x
*bp
= netdev_priv(dev
);
10661 /* Unload the driver, release IRQs */
10662 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
10663 if (atomic_read(&bp
->pdev
->enable_cnt
) == 1)
10664 if (!CHIP_REV_IS_SLOW(bp
))
10665 bnx2x_set_power_state(bp
, PCI_D3hot
);
10670 /* called with netif_tx_lock from dev_mcast.c */
10671 static void bnx2x_set_rx_mode(struct net_device
*dev
)
10673 struct bnx2x
*bp
= netdev_priv(dev
);
10674 u32 rx_mode
= BNX2X_RX_MODE_NORMAL
;
10675 int port
= BP_PORT(bp
);
10677 if (bp
->state
!= BNX2X_STATE_OPEN
) {
10678 DP(NETIF_MSG_IFUP
, "state is %x, returning\n", bp
->state
);
10682 DP(NETIF_MSG_IFUP
, "dev->flags = %x\n", dev
->flags
);
10684 if (dev
->flags
& IFF_PROMISC
)
10685 rx_mode
= BNX2X_RX_MODE_PROMISC
;
10687 else if ((dev
->flags
& IFF_ALLMULTI
) ||
10688 ((dev
->mc_count
> BNX2X_MAX_MULTICAST
) && CHIP_IS_E1(bp
)))
10689 rx_mode
= BNX2X_RX_MODE_ALLMULTI
;
10691 else { /* some multicasts */
10692 if (CHIP_IS_E1(bp
)) {
10693 int i
, old
, offset
;
10694 struct dev_mc_list
*mclist
;
10695 struct mac_configuration_cmd
*config
=
10696 bnx2x_sp(bp
, mcast_config
);
10698 for (i
= 0, mclist
= dev
->mc_list
;
10699 mclist
&& (i
< dev
->mc_count
);
10700 i
++, mclist
= mclist
->next
) {
10702 config
->config_table
[i
].
10703 cam_entry
.msb_mac_addr
=
10704 swab16(*(u16
*)&mclist
->dmi_addr
[0]);
10705 config
->config_table
[i
].
10706 cam_entry
.middle_mac_addr
=
10707 swab16(*(u16
*)&mclist
->dmi_addr
[2]);
10708 config
->config_table
[i
].
10709 cam_entry
.lsb_mac_addr
=
10710 swab16(*(u16
*)&mclist
->dmi_addr
[4]);
10711 config
->config_table
[i
].cam_entry
.flags
=
10713 config
->config_table
[i
].
10714 target_table_entry
.flags
= 0;
10715 config
->config_table
[i
].
10716 target_table_entry
.client_id
= 0;
10717 config
->config_table
[i
].
10718 target_table_entry
.vlan_id
= 0;
10721 "setting MCAST[%d] (%04x:%04x:%04x)\n", i
,
10722 config
->config_table
[i
].
10723 cam_entry
.msb_mac_addr
,
10724 config
->config_table
[i
].
10725 cam_entry
.middle_mac_addr
,
10726 config
->config_table
[i
].
10727 cam_entry
.lsb_mac_addr
);
10729 old
= config
->hdr
.length
;
10731 for (; i
< old
; i
++) {
10732 if (CAM_IS_INVALID(config
->
10733 config_table
[i
])) {
10734 /* already invalidated */
10738 CAM_INVALIDATE(config
->
10743 if (CHIP_REV_IS_SLOW(bp
))
10744 offset
= BNX2X_MAX_EMUL_MULTI
*(1 + port
);
10746 offset
= BNX2X_MAX_MULTICAST
*(1 + port
);
10748 config
->hdr
.length
= i
;
10749 config
->hdr
.offset
= offset
;
10750 config
->hdr
.client_id
= bp
->fp
->cl_id
;
10751 config
->hdr
.reserved1
= 0;
10753 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
10754 U64_HI(bnx2x_sp_mapping(bp
, mcast_config
)),
10755 U64_LO(bnx2x_sp_mapping(bp
, mcast_config
)),
10758 /* Accept one or more multicasts */
10759 struct dev_mc_list
*mclist
;
10760 u32 mc_filter
[MC_HASH_SIZE
];
10761 u32 crc
, bit
, regidx
;
10764 memset(mc_filter
, 0, 4 * MC_HASH_SIZE
);
10766 for (i
= 0, mclist
= dev
->mc_list
;
10767 mclist
&& (i
< dev
->mc_count
);
10768 i
++, mclist
= mclist
->next
) {
10770 DP(NETIF_MSG_IFUP
, "Adding mcast MAC: %pM\n",
10773 crc
= crc32c_le(0, mclist
->dmi_addr
, ETH_ALEN
);
10774 bit
= (crc
>> 24) & 0xff;
10777 mc_filter
[regidx
] |= (1 << bit
);
10780 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
10781 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
),
10786 bp
->rx_mode
= rx_mode
;
10787 bnx2x_set_storm_rx_mode(bp
);
10790 /* called with rtnl_lock */
10791 static int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
)
10793 struct sockaddr
*addr
= p
;
10794 struct bnx2x
*bp
= netdev_priv(dev
);
10796 if (!is_valid_ether_addr((u8
*)(addr
->sa_data
)))
10799 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
10800 if (netif_running(dev
)) {
10801 if (CHIP_IS_E1(bp
))
10802 bnx2x_set_mac_addr_e1(bp
, 1);
10804 bnx2x_set_mac_addr_e1h(bp
, 1);
10810 /* called with rtnl_lock */
10811 static int bnx2x_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
10813 struct mii_ioctl_data
*data
= if_mii(ifr
);
10814 struct bnx2x
*bp
= netdev_priv(dev
);
10815 int port
= BP_PORT(bp
);
10820 data
->phy_id
= bp
->port
.phy_addr
;
10824 case SIOCGMIIREG
: {
10827 if (!netif_running(dev
))
10830 mutex_lock(&bp
->port
.phy_mutex
);
10831 err
= bnx2x_cl45_read(bp
, port
, 0, bp
->port
.phy_addr
,
10832 DEFAULT_PHY_DEV_ADDR
,
10833 (data
->reg_num
& 0x1f), &mii_regval
);
10834 data
->val_out
= mii_regval
;
10835 mutex_unlock(&bp
->port
.phy_mutex
);
10840 if (!capable(CAP_NET_ADMIN
))
10843 if (!netif_running(dev
))
10846 mutex_lock(&bp
->port
.phy_mutex
);
10847 err
= bnx2x_cl45_write(bp
, port
, 0, bp
->port
.phy_addr
,
10848 DEFAULT_PHY_DEV_ADDR
,
10849 (data
->reg_num
& 0x1f), data
->val_in
);
10850 mutex_unlock(&bp
->port
.phy_mutex
);
10858 return -EOPNOTSUPP
;
10861 /* called with rtnl_lock */
10862 static int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
)
10864 struct bnx2x
*bp
= netdev_priv(dev
);
10867 if ((new_mtu
> ETH_MAX_JUMBO_PACKET_SIZE
) ||
10868 ((new_mtu
+ ETH_HLEN
) < ETH_MIN_PACKET_SIZE
))
10871 /* This does not race with packet allocation
10872 * because the actual alloc size is
10873 * only updated as part of load
10875 dev
->mtu
= new_mtu
;
10877 if (netif_running(dev
)) {
10878 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
10879 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
10885 static void bnx2x_tx_timeout(struct net_device
*dev
)
10887 struct bnx2x
*bp
= netdev_priv(dev
);
10889 #ifdef BNX2X_STOP_ON_ERROR
10893 /* This allows the netif to be shutdown gracefully before resetting */
10894 schedule_work(&bp
->reset_task
);
10898 /* called with rtnl_lock */
10899 static void bnx2x_vlan_rx_register(struct net_device
*dev
,
10900 struct vlan_group
*vlgrp
)
10902 struct bnx2x
*bp
= netdev_priv(dev
);
10906 /* Set flags according to the required capabilities */
10907 bp
->flags
&= ~(HW_VLAN_RX_FLAG
| HW_VLAN_TX_FLAG
);
10909 if (dev
->features
& NETIF_F_HW_VLAN_TX
)
10910 bp
->flags
|= HW_VLAN_TX_FLAG
;
10912 if (dev
->features
& NETIF_F_HW_VLAN_RX
)
10913 bp
->flags
|= HW_VLAN_RX_FLAG
;
10915 if (netif_running(dev
))
10916 bnx2x_set_client_config(bp
);
10921 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10922 static void poll_bnx2x(struct net_device
*dev
)
10924 struct bnx2x
*bp
= netdev_priv(dev
);
10926 disable_irq(bp
->pdev
->irq
);
10927 bnx2x_interrupt(bp
->pdev
->irq
, dev
);
10928 enable_irq(bp
->pdev
->irq
);
10932 static const struct net_device_ops bnx2x_netdev_ops
= {
10933 .ndo_open
= bnx2x_open
,
10934 .ndo_stop
= bnx2x_close
,
10935 .ndo_start_xmit
= bnx2x_start_xmit
,
10936 .ndo_set_multicast_list
= bnx2x_set_rx_mode
,
10937 .ndo_set_mac_address
= bnx2x_change_mac_addr
,
10938 .ndo_validate_addr
= eth_validate_addr
,
10939 .ndo_do_ioctl
= bnx2x_ioctl
,
10940 .ndo_change_mtu
= bnx2x_change_mtu
,
10941 .ndo_tx_timeout
= bnx2x_tx_timeout
,
10943 .ndo_vlan_rx_register
= bnx2x_vlan_rx_register
,
10945 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10946 .ndo_poll_controller
= poll_bnx2x
,
10950 static int __devinit
bnx2x_init_dev(struct pci_dev
*pdev
,
10951 struct net_device
*dev
)
10956 SET_NETDEV_DEV(dev
, &pdev
->dev
);
10957 bp
= netdev_priv(dev
);
10962 bp
->func
= PCI_FUNC(pdev
->devfn
);
10964 rc
= pci_enable_device(pdev
);
10966 printk(KERN_ERR PFX
"Cannot enable PCI device, aborting\n");
10970 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
10971 printk(KERN_ERR PFX
"Cannot find PCI device base address,"
10974 goto err_out_disable
;
10977 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
10978 printk(KERN_ERR PFX
"Cannot find second PCI device"
10979 " base address, aborting\n");
10981 goto err_out_disable
;
10984 if (atomic_read(&pdev
->enable_cnt
) == 1) {
10985 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
10987 printk(KERN_ERR PFX
"Cannot obtain PCI resources,"
10989 goto err_out_disable
;
10992 pci_set_master(pdev
);
10993 pci_save_state(pdev
);
10996 bp
->pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
10997 if (bp
->pm_cap
== 0) {
10998 printk(KERN_ERR PFX
"Cannot find power management"
10999 " capability, aborting\n");
11001 goto err_out_release
;
11004 bp
->pcie_cap
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
11005 if (bp
->pcie_cap
== 0) {
11006 printk(KERN_ERR PFX
"Cannot find PCI Express capability,"
11009 goto err_out_release
;
11012 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(64)) == 0) {
11013 bp
->flags
|= USING_DAC_FLAG
;
11014 if (pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64)) != 0) {
11015 printk(KERN_ERR PFX
"pci_set_consistent_dma_mask"
11016 " failed, aborting\n");
11018 goto err_out_release
;
11021 } else if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(32)) != 0) {
11022 printk(KERN_ERR PFX
"System does not support DMA,"
11025 goto err_out_release
;
11028 dev
->mem_start
= pci_resource_start(pdev
, 0);
11029 dev
->base_addr
= dev
->mem_start
;
11030 dev
->mem_end
= pci_resource_end(pdev
, 0);
11032 dev
->irq
= pdev
->irq
;
11034 bp
->regview
= pci_ioremap_bar(pdev
, 0);
11035 if (!bp
->regview
) {
11036 printk(KERN_ERR PFX
"Cannot map register space, aborting\n");
11038 goto err_out_release
;
11041 bp
->doorbells
= ioremap_nocache(pci_resource_start(pdev
, 2),
11042 min_t(u64
, BNX2X_DB_SIZE
,
11043 pci_resource_len(pdev
, 2)));
11044 if (!bp
->doorbells
) {
11045 printk(KERN_ERR PFX
"Cannot map doorbell space, aborting\n");
11047 goto err_out_unmap
;
11050 bnx2x_set_power_state(bp
, PCI_D0
);
11052 /* clean indirect addresses */
11053 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
11054 PCICFG_VENDOR_ID_OFFSET
);
11055 REG_WR(bp
, PXP2_REG_PGL_ADDR_88_F0
+ BP_PORT(bp
)*16, 0);
11056 REG_WR(bp
, PXP2_REG_PGL_ADDR_8C_F0
+ BP_PORT(bp
)*16, 0);
11057 REG_WR(bp
, PXP2_REG_PGL_ADDR_90_F0
+ BP_PORT(bp
)*16, 0);
11058 REG_WR(bp
, PXP2_REG_PGL_ADDR_94_F0
+ BP_PORT(bp
)*16, 0);
11060 dev
->watchdog_timeo
= TX_TIMEOUT
;
11062 dev
->netdev_ops
= &bnx2x_netdev_ops
;
11063 dev
->ethtool_ops
= &bnx2x_ethtool_ops
;
11064 dev
->features
|= NETIF_F_SG
;
11065 dev
->features
|= NETIF_F_HW_CSUM
;
11066 if (bp
->flags
& USING_DAC_FLAG
)
11067 dev
->features
|= NETIF_F_HIGHDMA
;
11069 dev
->features
|= (NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
);
11070 bp
->flags
|= (HW_VLAN_RX_FLAG
| HW_VLAN_TX_FLAG
);
11072 dev
->features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
11073 dev
->features
|= NETIF_F_TSO6
;
11079 iounmap(bp
->regview
);
11080 bp
->regview
= NULL
;
11082 if (bp
->doorbells
) {
11083 iounmap(bp
->doorbells
);
11084 bp
->doorbells
= NULL
;
11088 if (atomic_read(&pdev
->enable_cnt
) == 1)
11089 pci_release_regions(pdev
);
11092 pci_disable_device(pdev
);
11093 pci_set_drvdata(pdev
, NULL
);
11099 static int __devinit
bnx2x_get_pcie_width(struct bnx2x
*bp
)
11101 u32 val
= REG_RD(bp
, PCICFG_OFFSET
+ PCICFG_LINK_CONTROL
);
11103 val
= (val
& PCICFG_LINK_WIDTH
) >> PCICFG_LINK_WIDTH_SHIFT
;
11107 /* return value of 1=2.5GHz 2=5GHz */
11108 static int __devinit
bnx2x_get_pcie_speed(struct bnx2x
*bp
)
11110 u32 val
= REG_RD(bp
, PCICFG_OFFSET
+ PCICFG_LINK_CONTROL
);
11112 val
= (val
& PCICFG_LINK_SPEED
) >> PCICFG_LINK_SPEED_SHIFT
;
11115 static int __devinit
bnx2x_check_firmware(struct bnx2x
*bp
)
11117 struct bnx2x_fw_file_hdr
*fw_hdr
;
11118 struct bnx2x_fw_file_section
*sections
;
11120 u32 offset
, len
, num_ops
;
11122 const struct firmware
*firmware
= bp
->firmware
;
11125 if (firmware
->size
< sizeof(struct bnx2x_fw_file_hdr
))
11128 fw_hdr
= (struct bnx2x_fw_file_hdr
*)firmware
->data
;
11129 sections
= (struct bnx2x_fw_file_section
*)fw_hdr
;
11131 /* Make sure none of the offsets and sizes make us read beyond
11132 * the end of the firmware data */
11133 for (i
= 0; i
< sizeof(*fw_hdr
) / sizeof(*sections
); i
++) {
11134 offset
= be32_to_cpu(sections
[i
].offset
);
11135 len
= be32_to_cpu(sections
[i
].len
);
11136 if (offset
+ len
> firmware
->size
) {
11137 printk(KERN_ERR PFX
"Section %d length is out of bounds\n", i
);
11142 /* Likewise for the init_ops offsets */
11143 offset
= be32_to_cpu(fw_hdr
->init_ops_offsets
.offset
);
11144 ops_offsets
= (u16
*)(firmware
->data
+ offset
);
11145 num_ops
= be32_to_cpu(fw_hdr
->init_ops
.len
) / sizeof(struct raw_op
);
11147 for (i
= 0; i
< be32_to_cpu(fw_hdr
->init_ops_offsets
.len
) / 2; i
++) {
11148 if (be16_to_cpu(ops_offsets
[i
]) > num_ops
) {
11149 printk(KERN_ERR PFX
"Section offset %d is out of bounds\n", i
);
11154 /* Check FW version */
11155 offset
= be32_to_cpu(fw_hdr
->fw_version
.offset
);
11156 fw_ver
= firmware
->data
+ offset
;
11157 if ((fw_ver
[0] != BCM_5710_FW_MAJOR_VERSION
) ||
11158 (fw_ver
[1] != BCM_5710_FW_MINOR_VERSION
) ||
11159 (fw_ver
[2] != BCM_5710_FW_REVISION_VERSION
) ||
11160 (fw_ver
[3] != BCM_5710_FW_ENGINEERING_VERSION
)) {
11161 printk(KERN_ERR PFX
"Bad FW version:%d.%d.%d.%d."
11162 " Should be %d.%d.%d.%d\n",
11163 fw_ver
[0], fw_ver
[1], fw_ver
[2],
11164 fw_ver
[3], BCM_5710_FW_MAJOR_VERSION
,
11165 BCM_5710_FW_MINOR_VERSION
,
11166 BCM_5710_FW_REVISION_VERSION
,
11167 BCM_5710_FW_ENGINEERING_VERSION
);
11174 static void inline be32_to_cpu_n(const u8
*_source
, u8
*_target
, u32 n
)
11177 const __be32
*source
= (const __be32
*)_source
;
11178 u32
*target
= (u32
*)_target
;
11180 for (i
= 0; i
< n
/4; i
++)
11181 target
[i
] = be32_to_cpu(source
[i
]);
11185 Ops array is stored in the following format:
11186 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11188 static void inline bnx2x_prep_ops(const u8
*_source
, u8
*_target
, u32 n
)
11191 const __be32
*source
= (const __be32
*)_source
;
11192 struct raw_op
*target
= (struct raw_op
*)_target
;
11194 for (i
= 0, j
= 0; i
< n
/8; i
++, j
+=2) {
11195 tmp
= be32_to_cpu(source
[j
]);
11196 target
[i
].op
= (tmp
>> 24) & 0xff;
11197 target
[i
].offset
= tmp
& 0xffffff;
11198 target
[i
].raw_data
= be32_to_cpu(source
[j
+1]);
11201 static void inline be16_to_cpu_n(const u8
*_source
, u8
*_target
, u32 n
)
11204 u16
*target
= (u16
*)_target
;
11205 const __be16
*source
= (const __be16
*)_source
;
11207 for (i
= 0; i
< n
/2; i
++)
11208 target
[i
] = be16_to_cpu(source
[i
]);
11211 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11213 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11214 bp->arr = kmalloc(len, GFP_KERNEL); \
11216 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11219 func(bp->firmware->data + \
11220 be32_to_cpu(fw_hdr->arr.offset), \
11221 (u8*)bp->arr, len); \
11225 static int __devinit
bnx2x_init_firmware(struct bnx2x
*bp
, struct device
*dev
)
11227 char fw_file_name
[40] = {0};
11229 struct bnx2x_fw_file_hdr
*fw_hdr
;
11231 /* Create a FW file name */
11232 if (CHIP_IS_E1(bp
))
11233 offset
= sprintf(fw_file_name
, FW_FILE_PREFIX_E1
);
11235 offset
= sprintf(fw_file_name
, FW_FILE_PREFIX_E1H
);
11237 sprintf(fw_file_name
+ offset
, "%d.%d.%d.%d.fw",
11238 BCM_5710_FW_MAJOR_VERSION
,
11239 BCM_5710_FW_MINOR_VERSION
,
11240 BCM_5710_FW_REVISION_VERSION
,
11241 BCM_5710_FW_ENGINEERING_VERSION
);
11243 printk(KERN_INFO PFX
"Loading %s\n", fw_file_name
);
11245 rc
= request_firmware(&bp
->firmware
, fw_file_name
, dev
);
11247 printk(KERN_ERR PFX
"Can't load firmware file %s\n", fw_file_name
);
11248 goto request_firmware_exit
;
11251 rc
= bnx2x_check_firmware(bp
);
11253 printk(KERN_ERR PFX
"Corrupt firmware file %s\n", fw_file_name
);
11254 goto request_firmware_exit
;
11257 fw_hdr
= (struct bnx2x_fw_file_hdr
*)bp
->firmware
->data
;
11259 /* Initialize the pointers to the init arrays */
11261 BNX2X_ALLOC_AND_SET(init_data
, request_firmware_exit
, be32_to_cpu_n
);
11264 BNX2X_ALLOC_AND_SET(init_ops
, init_ops_alloc_err
, bnx2x_prep_ops
);
11267 BNX2X_ALLOC_AND_SET(init_ops_offsets
, init_offsets_alloc_err
, be16_to_cpu_n
);
11269 /* STORMs firmware */
11270 bp
->tsem_int_table_data
= bp
->firmware
->data
+
11271 be32_to_cpu(fw_hdr
->tsem_int_table_data
.offset
);
11272 bp
->tsem_pram_data
= bp
->firmware
->data
+
11273 be32_to_cpu(fw_hdr
->tsem_pram_data
.offset
);
11274 bp
->usem_int_table_data
= bp
->firmware
->data
+
11275 be32_to_cpu(fw_hdr
->usem_int_table_data
.offset
);
11276 bp
->usem_pram_data
= bp
->firmware
->data
+
11277 be32_to_cpu(fw_hdr
->usem_pram_data
.offset
);
11278 bp
->xsem_int_table_data
= bp
->firmware
->data
+
11279 be32_to_cpu(fw_hdr
->xsem_int_table_data
.offset
);
11280 bp
->xsem_pram_data
= bp
->firmware
->data
+
11281 be32_to_cpu(fw_hdr
->xsem_pram_data
.offset
);
11282 bp
->csem_int_table_data
= bp
->firmware
->data
+
11283 be32_to_cpu(fw_hdr
->csem_int_table_data
.offset
);
11284 bp
->csem_pram_data
= bp
->firmware
->data
+
11285 be32_to_cpu(fw_hdr
->csem_pram_data
.offset
);
11288 init_offsets_alloc_err
:
11289 kfree(bp
->init_ops
);
11290 init_ops_alloc_err
:
11291 kfree(bp
->init_data
);
11292 request_firmware_exit
:
11293 release_firmware(bp
->firmware
);
11300 static int __devinit
bnx2x_init_one(struct pci_dev
*pdev
,
11301 const struct pci_device_id
*ent
)
11303 static int version_printed
;
11304 struct net_device
*dev
= NULL
;
11308 if (version_printed
++ == 0)
11309 printk(KERN_INFO
"%s", version
);
11311 /* dev zeroed in init_etherdev */
11312 dev
= alloc_etherdev_mq(sizeof(*bp
), MAX_CONTEXT
);
11314 printk(KERN_ERR PFX
"Cannot allocate net device\n");
11318 bp
= netdev_priv(dev
);
11319 bp
->msglevel
= debug
;
11321 rc
= bnx2x_init_dev(pdev
, dev
);
11327 pci_set_drvdata(pdev
, dev
);
11329 rc
= bnx2x_init_bp(bp
);
11331 goto init_one_exit
;
11333 /* Set init arrays */
11334 rc
= bnx2x_init_firmware(bp
, &pdev
->dev
);
11336 printk(KERN_ERR PFX
"Error loading firmware\n");
11337 goto init_one_exit
;
11340 rc
= register_netdev(dev
);
11342 dev_err(&pdev
->dev
, "Cannot register net device\n");
11343 goto init_one_exit
;
11346 printk(KERN_INFO
"%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11347 " IRQ %d, ", dev
->name
, board_info
[ent
->driver_data
].name
,
11348 (CHIP_REV(bp
) >> 12) + 'A', (CHIP_METAL(bp
) >> 4),
11349 bnx2x_get_pcie_width(bp
),
11350 (bnx2x_get_pcie_speed(bp
) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11351 dev
->base_addr
, bp
->pdev
->irq
);
11352 printk(KERN_CONT
"node addr %pM\n", dev
->dev_addr
);
11358 iounmap(bp
->regview
);
11361 iounmap(bp
->doorbells
);
11365 if (atomic_read(&pdev
->enable_cnt
) == 1)
11366 pci_release_regions(pdev
);
11368 pci_disable_device(pdev
);
11369 pci_set_drvdata(pdev
, NULL
);
11374 static void __devexit
bnx2x_remove_one(struct pci_dev
*pdev
)
11376 struct net_device
*dev
= pci_get_drvdata(pdev
);
11380 printk(KERN_ERR PFX
"BAD net device from bnx2x_init_one\n");
11383 bp
= netdev_priv(dev
);
11385 unregister_netdev(dev
);
11387 kfree(bp
->init_ops_offsets
);
11388 kfree(bp
->init_ops
);
11389 kfree(bp
->init_data
);
11390 release_firmware(bp
->firmware
);
11393 iounmap(bp
->regview
);
11396 iounmap(bp
->doorbells
);
11400 if (atomic_read(&pdev
->enable_cnt
) == 1)
11401 pci_release_regions(pdev
);
11403 pci_disable_device(pdev
);
11404 pci_set_drvdata(pdev
, NULL
);
11407 static int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
)
11409 struct net_device
*dev
= pci_get_drvdata(pdev
);
11413 printk(KERN_ERR PFX
"BAD net device from bnx2x_init_one\n");
11416 bp
= netdev_priv(dev
);
11420 pci_save_state(pdev
);
11422 if (!netif_running(dev
)) {
11427 netif_device_detach(dev
);
11429 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
11431 bnx2x_set_power_state(bp
, pci_choose_state(pdev
, state
));
11438 static int bnx2x_resume(struct pci_dev
*pdev
)
11440 struct net_device
*dev
= pci_get_drvdata(pdev
);
11445 printk(KERN_ERR PFX
"BAD net device from bnx2x_init_one\n");
11448 bp
= netdev_priv(dev
);
11452 pci_restore_state(pdev
);
11454 if (!netif_running(dev
)) {
11459 bnx2x_set_power_state(bp
, PCI_D0
);
11460 netif_device_attach(dev
);
11462 rc
= bnx2x_nic_load(bp
, LOAD_OPEN
);
11469 static int bnx2x_eeh_nic_unload(struct bnx2x
*bp
)
11473 bp
->state
= BNX2X_STATE_ERROR
;
11475 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
11477 bnx2x_netif_stop(bp
, 0);
11479 del_timer_sync(&bp
->timer
);
11480 bp
->stats_state
= STATS_STATE_DISABLED
;
11481 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
11484 bnx2x_free_irq(bp
);
11486 if (CHIP_IS_E1(bp
)) {
11487 struct mac_configuration_cmd
*config
=
11488 bnx2x_sp(bp
, mcast_config
);
11490 for (i
= 0; i
< config
->hdr
.length
; i
++)
11491 CAM_INVALIDATE(config
->config_table
[i
]);
11494 /* Free SKBs, SGEs, TPA pool and driver internals */
11495 bnx2x_free_skbs(bp
);
11496 for_each_rx_queue(bp
, i
)
11497 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
11498 for_each_rx_queue(bp
, i
)
11499 netif_napi_del(&bnx2x_fp(bp
, i
, napi
));
11500 bnx2x_free_mem(bp
);
11502 bp
->state
= BNX2X_STATE_CLOSED
;
11504 netif_carrier_off(bp
->dev
);
11509 static void bnx2x_eeh_recover(struct bnx2x
*bp
)
11513 mutex_init(&bp
->port
.phy_mutex
);
11515 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
11516 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
11517 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp
->common
.shmem_base
);
11519 if (!bp
->common
.shmem_base
||
11520 (bp
->common
.shmem_base
< 0xA0000) ||
11521 (bp
->common
.shmem_base
>= 0xC0000)) {
11522 BNX2X_DEV_INFO("MCP not active\n");
11523 bp
->flags
|= NO_MCP_FLAG
;
11527 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
11528 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
11529 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
11530 BNX2X_ERR("BAD MCP validity signature\n");
11532 if (!BP_NOMCP(bp
)) {
11533 bp
->fw_seq
= (SHMEM_RD(bp
, func_mb
[BP_FUNC(bp
)].drv_mb_header
)
11534 & DRV_MSG_SEQ_NUMBER_MASK
);
11535 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
11540 * bnx2x_io_error_detected - called when PCI error is detected
11541 * @pdev: Pointer to PCI device
11542 * @state: The current pci connection state
11544 * This function is called after a PCI bus error affecting
11545 * this device has been detected.
11547 static pci_ers_result_t
bnx2x_io_error_detected(struct pci_dev
*pdev
,
11548 pci_channel_state_t state
)
11550 struct net_device
*dev
= pci_get_drvdata(pdev
);
11551 struct bnx2x
*bp
= netdev_priv(dev
);
11555 netif_device_detach(dev
);
11557 if (netif_running(dev
))
11558 bnx2x_eeh_nic_unload(bp
);
11560 pci_disable_device(pdev
);
11564 /* Request a slot reset */
11565 return PCI_ERS_RESULT_NEED_RESET
;
11569 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11570 * @pdev: Pointer to PCI device
11572 * Restart the card from scratch, as if from a cold-boot.
11574 static pci_ers_result_t
bnx2x_io_slot_reset(struct pci_dev
*pdev
)
11576 struct net_device
*dev
= pci_get_drvdata(pdev
);
11577 struct bnx2x
*bp
= netdev_priv(dev
);
11581 if (pci_enable_device(pdev
)) {
11582 dev_err(&pdev
->dev
,
11583 "Cannot re-enable PCI device after reset\n");
11585 return PCI_ERS_RESULT_DISCONNECT
;
11588 pci_set_master(pdev
);
11589 pci_restore_state(pdev
);
11591 if (netif_running(dev
))
11592 bnx2x_set_power_state(bp
, PCI_D0
);
11596 return PCI_ERS_RESULT_RECOVERED
;
11600 * bnx2x_io_resume - called when traffic can start flowing again
11601 * @pdev: Pointer to PCI device
11603 * This callback is called when the error recovery driver tells us that
11604 * its OK to resume normal operation.
11606 static void bnx2x_io_resume(struct pci_dev
*pdev
)
11608 struct net_device
*dev
= pci_get_drvdata(pdev
);
11609 struct bnx2x
*bp
= netdev_priv(dev
);
11613 bnx2x_eeh_recover(bp
);
11615 if (netif_running(dev
))
11616 bnx2x_nic_load(bp
, LOAD_NORMAL
);
11618 netif_device_attach(dev
);
11623 static struct pci_error_handlers bnx2x_err_handler
= {
11624 .error_detected
= bnx2x_io_error_detected
,
11625 .slot_reset
= bnx2x_io_slot_reset
,
11626 .resume
= bnx2x_io_resume
,
11629 static struct pci_driver bnx2x_pci_driver
= {
11630 .name
= DRV_MODULE_NAME
,
11631 .id_table
= bnx2x_pci_tbl
,
11632 .probe
= bnx2x_init_one
,
11633 .remove
= __devexit_p(bnx2x_remove_one
),
11634 .suspend
= bnx2x_suspend
,
11635 .resume
= bnx2x_resume
,
11636 .err_handler
= &bnx2x_err_handler
,
11639 static int __init
bnx2x_init(void)
11643 bnx2x_wq
= create_singlethread_workqueue("bnx2x");
11644 if (bnx2x_wq
== NULL
) {
11645 printk(KERN_ERR PFX
"Cannot create workqueue\n");
11649 ret
= pci_register_driver(&bnx2x_pci_driver
);
11651 printk(KERN_ERR PFX
"Cannot register driver\n");
11652 destroy_workqueue(bnx2x_wq
);
11657 static void __exit
bnx2x_cleanup(void)
11659 pci_unregister_driver(&bnx2x_pci_driver
);
11661 destroy_workqueue(bnx2x_wq
);
11664 module_init(bnx2x_init
);
11665 module_exit(bnx2x_cleanup
);