1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
52 #include <linux/stringify.h>
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
60 #define DRV_MODULE_VERSION "1.52.1-7"
61 #define DRV_MODULE_RELDATE "2010/02/28"
62 #define BNX2X_BC_VER 0x040200
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
67 #define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT (5*HZ)
78 static char version
[] __devinitdata
=
79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80 DRV_MODULE_NAME
" " DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION
);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1
);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H
);
89 static int multi_mode
= 1;
90 module_param(multi_mode
, int, 0);
91 MODULE_PARM_DESC(multi_mode
, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
94 static int num_queues
;
95 module_param(num_queues
, int, 0);
96 MODULE_PARM_DESC(num_queues
, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
99 static int disable_tpa
;
100 module_param(disable_tpa
, int, 0);
101 MODULE_PARM_DESC(disable_tpa
, " Disable the TPA (LRO) feature");
104 module_param(int_mode
, int, 0);
105 MODULE_PARM_DESC(int_mode
, " Force interrupt mode (1 INT#x; 2 MSI)");
107 static int dropless_fc
;
108 module_param(dropless_fc
, int, 0);
109 MODULE_PARM_DESC(dropless_fc
, " Pause on exhausted host ring");
112 module_param(poll
, int, 0);
113 MODULE_PARM_DESC(poll
, " Use polling (for debug)");
115 static int mrrs
= -1;
116 module_param(mrrs
, int, 0);
117 MODULE_PARM_DESC(mrrs
, " Force Max Read Req Size (0..3) (for debug)");
120 module_param(debug
, int, 0);
121 MODULE_PARM_DESC(debug
, " Default debug msglevel");
123 static int load_count
[3]; /* 0-common, 1-port0, 2-port1 */
125 static struct workqueue_struct
*bnx2x_wq
;
127 enum bnx2x_board_type
{
133 /* indexed by board_type, above */
136 } board_info
[] __devinitdata
= {
137 { "Broadcom NetXtreme II BCM57710 XGb" },
138 { "Broadcom NetXtreme II BCM57711 XGb" },
139 { "Broadcom NetXtreme II BCM57711E XGb" }
143 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl
) = {
144 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57710
), BCM57710
},
145 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57711
), BCM57711
},
146 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57711E
), BCM57711E
},
150 MODULE_DEVICE_TABLE(pci
, bnx2x_pci_tbl
);
152 /****************************************************************************
153 * General service functions
154 ****************************************************************************/
157 * locking is done by mcp
159 void bnx2x_reg_wr_ind(struct bnx2x
*bp
, u32 addr
, u32 val
)
161 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
162 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, val
);
163 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
164 PCICFG_VENDOR_ID_OFFSET
);
167 static u32
bnx2x_reg_rd_ind(struct bnx2x
*bp
, u32 addr
)
171 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
172 pci_read_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, &val
);
173 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
174 PCICFG_VENDOR_ID_OFFSET
);
179 static const u32 dmae_reg_go_c
[] = {
180 DMAE_REG_GO_C0
, DMAE_REG_GO_C1
, DMAE_REG_GO_C2
, DMAE_REG_GO_C3
,
181 DMAE_REG_GO_C4
, DMAE_REG_GO_C5
, DMAE_REG_GO_C6
, DMAE_REG_GO_C7
,
182 DMAE_REG_GO_C8
, DMAE_REG_GO_C9
, DMAE_REG_GO_C10
, DMAE_REG_GO_C11
,
183 DMAE_REG_GO_C12
, DMAE_REG_GO_C13
, DMAE_REG_GO_C14
, DMAE_REG_GO_C15
186 /* copy command into DMAE command memory and set DMAE command go */
187 static void bnx2x_post_dmae(struct bnx2x
*bp
, struct dmae_command
*dmae
,
193 cmd_offset
= (DMAE_REG_CMD_MEM
+ sizeof(struct dmae_command
) * idx
);
194 for (i
= 0; i
< (sizeof(struct dmae_command
)/4); i
++) {
195 REG_WR(bp
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
197 DP(BNX2X_MSG_OFF
, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx
, i
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
200 REG_WR(bp
, dmae_reg_go_c
[idx
], 1);
203 void bnx2x_write_dmae(struct bnx2x
*bp
, dma_addr_t dma_addr
, u32 dst_addr
,
206 struct dmae_command dmae
;
207 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
210 if (!bp
->dmae_ready
) {
211 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
213 DP(BNX2X_MSG_OFF
, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr
, len32
);
215 bnx2x_init_ind_wr(bp
, dst_addr
, data
, len32
);
219 memset(&dmae
, 0, sizeof(struct dmae_command
));
221 dmae
.opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
222 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
223 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
225 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
227 DMAE_CMD_ENDIANITY_DW_SWAP
|
229 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
230 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
231 dmae
.src_addr_lo
= U64_LO(dma_addr
);
232 dmae
.src_addr_hi
= U64_HI(dma_addr
);
233 dmae
.dst_addr_lo
= dst_addr
>> 2;
234 dmae
.dst_addr_hi
= 0;
236 dmae
.comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
237 dmae
.comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
238 dmae
.comp_val
= DMAE_COMP_VAL
;
240 DP(BNX2X_MSG_OFF
, "DMAE: opcode 0x%08x\n"
241 DP_LEVEL
"src_addr [%x:%08x] len [%d *4] "
242 "dst_addr [%x:%08x (%08x)]\n"
243 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
244 dmae
.opcode
, dmae
.src_addr_hi
, dmae
.src_addr_lo
,
245 dmae
.len
, dmae
.dst_addr_hi
, dmae
.dst_addr_lo
, dst_addr
,
246 dmae
.comp_addr_hi
, dmae
.comp_addr_lo
, dmae
.comp_val
);
247 DP(BNX2X_MSG_OFF
, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
248 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
249 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
251 mutex_lock(&bp
->dmae_mutex
);
255 bnx2x_post_dmae(bp
, &dmae
, INIT_DMAE_C(bp
));
259 while (*wb_comp
!= DMAE_COMP_VAL
) {
260 DP(BNX2X_MSG_OFF
, "wb_comp 0x%08x\n", *wb_comp
);
263 BNX2X_ERR("DMAE timeout!\n");
267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp
))
274 mutex_unlock(&bp
->dmae_mutex
);
277 void bnx2x_read_dmae(struct bnx2x
*bp
, u32 src_addr
, u32 len32
)
279 struct dmae_command dmae
;
280 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
283 if (!bp
->dmae_ready
) {
284 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
287 DP(BNX2X_MSG_OFF
, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr
, len32
);
289 for (i
= 0; i
< len32
; i
++)
290 data
[i
] = bnx2x_reg_rd_ind(bp
, src_addr
+ i
*4);
294 memset(&dmae
, 0, sizeof(struct dmae_command
));
296 dmae
.opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
297 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
298 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
300 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
302 DMAE_CMD_ENDIANITY_DW_SWAP
|
304 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
305 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
306 dmae
.src_addr_lo
= src_addr
>> 2;
307 dmae
.src_addr_hi
= 0;
308 dmae
.dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_data
));
309 dmae
.dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_data
));
311 dmae
.comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
312 dmae
.comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
313 dmae
.comp_val
= DMAE_COMP_VAL
;
315 DP(BNX2X_MSG_OFF
, "DMAE: opcode 0x%08x\n"
316 DP_LEVEL
"src_addr [%x:%08x] len [%d *4] "
317 "dst_addr [%x:%08x (%08x)]\n"
318 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
319 dmae
.opcode
, dmae
.src_addr_hi
, dmae
.src_addr_lo
,
320 dmae
.len
, dmae
.dst_addr_hi
, dmae
.dst_addr_lo
, src_addr
,
321 dmae
.comp_addr_hi
, dmae
.comp_addr_lo
, dmae
.comp_val
);
323 mutex_lock(&bp
->dmae_mutex
);
325 memset(bnx2x_sp(bp
, wb_data
[0]), 0, sizeof(u32
) * 4);
328 bnx2x_post_dmae(bp
, &dmae
, INIT_DMAE_C(bp
));
332 while (*wb_comp
!= DMAE_COMP_VAL
) {
335 BNX2X_ERR("DMAE timeout!\n");
339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp
))
345 DP(BNX2X_MSG_OFF
, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
346 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
347 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
349 mutex_unlock(&bp
->dmae_mutex
);
352 void bnx2x_write_dmae_phys_len(struct bnx2x
*bp
, dma_addr_t phys_addr
,
357 while (len
> DMAE_LEN32_WR_MAX
) {
358 bnx2x_write_dmae(bp
, phys_addr
+ offset
,
359 addr
+ offset
, DMAE_LEN32_WR_MAX
);
360 offset
+= DMAE_LEN32_WR_MAX
* 4;
361 len
-= DMAE_LEN32_WR_MAX
;
364 bnx2x_write_dmae(bp
, phys_addr
+ offset
, addr
+ offset
, len
);
367 /* used only for slowpath so not inlined */
368 static void bnx2x_wb_wr(struct bnx2x
*bp
, int reg
, u32 val_hi
, u32 val_lo
)
372 wb_write
[0] = val_hi
;
373 wb_write
[1] = val_lo
;
374 REG_WR_DMAE(bp
, reg
, wb_write
, 2);
378 static u64
bnx2x_wb_rd(struct bnx2x
*bp
, int reg
)
382 REG_RD_DMAE(bp
, reg
, wb_data
, 2);
384 return HILO_U64(wb_data
[0], wb_data
[1]);
388 static int bnx2x_mc_assert(struct bnx2x
*bp
)
392 u32 row0
, row1
, row2
, row3
;
395 last_idx
= REG_RD8(bp
, BAR_XSTRORM_INTMEM
+
396 XSTORM_ASSERT_LIST_INDEX_OFFSET
);
398 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
400 /* print the asserts */
401 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
403 row0
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
404 XSTORM_ASSERT_LIST_OFFSET(i
));
405 row1
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
406 XSTORM_ASSERT_LIST_OFFSET(i
) + 4);
407 row2
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
408 XSTORM_ASSERT_LIST_OFFSET(i
) + 8);
409 row3
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
410 XSTORM_ASSERT_LIST_OFFSET(i
) + 12);
412 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
413 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414 " 0x%08x 0x%08x 0x%08x\n",
415 i
, row3
, row2
, row1
, row0
);
423 last_idx
= REG_RD8(bp
, BAR_TSTRORM_INTMEM
+
424 TSTORM_ASSERT_LIST_INDEX_OFFSET
);
426 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
428 /* print the asserts */
429 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
431 row0
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
432 TSTORM_ASSERT_LIST_OFFSET(i
));
433 row1
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
434 TSTORM_ASSERT_LIST_OFFSET(i
) + 4);
435 row2
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
436 TSTORM_ASSERT_LIST_OFFSET(i
) + 8);
437 row3
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
438 TSTORM_ASSERT_LIST_OFFSET(i
) + 12);
440 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
441 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442 " 0x%08x 0x%08x 0x%08x\n",
443 i
, row3
, row2
, row1
, row0
);
451 last_idx
= REG_RD8(bp
, BAR_CSTRORM_INTMEM
+
452 CSTORM_ASSERT_LIST_INDEX_OFFSET
);
454 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
456 /* print the asserts */
457 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
459 row0
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
460 CSTORM_ASSERT_LIST_OFFSET(i
));
461 row1
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
462 CSTORM_ASSERT_LIST_OFFSET(i
) + 4);
463 row2
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
464 CSTORM_ASSERT_LIST_OFFSET(i
) + 8);
465 row3
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
466 CSTORM_ASSERT_LIST_OFFSET(i
) + 12);
468 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
469 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470 " 0x%08x 0x%08x 0x%08x\n",
471 i
, row3
, row2
, row1
, row0
);
479 last_idx
= REG_RD8(bp
, BAR_USTRORM_INTMEM
+
480 USTORM_ASSERT_LIST_INDEX_OFFSET
);
482 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
484 /* print the asserts */
485 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
487 row0
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
488 USTORM_ASSERT_LIST_OFFSET(i
));
489 row1
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
490 USTORM_ASSERT_LIST_OFFSET(i
) + 4);
491 row2
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
492 USTORM_ASSERT_LIST_OFFSET(i
) + 8);
493 row3
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
494 USTORM_ASSERT_LIST_OFFSET(i
) + 12);
496 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
497 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498 " 0x%08x 0x%08x 0x%08x\n",
499 i
, row3
, row2
, row1
, row0
);
509 static void bnx2x_fw_dump(struct bnx2x
*bp
)
515 mark
= REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+ 0xf104);
516 mark
= ((mark
+ 0x3) & ~0x3);
517 pr_err("begin fw dump (mark 0x%x)\n", mark
);
520 for (offset
= mark
- 0x08000000; offset
<= 0xF900; offset
+= 0x8*4) {
521 for (word
= 0; word
< 8; word
++)
522 data
[word
] = htonl(REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+
525 pr_cont("%s", (char *)data
);
527 for (offset
= 0xF108; offset
<= mark
- 0x08000000; offset
+= 0x8*4) {
528 for (word
= 0; word
< 8; word
++)
529 data
[word
] = htonl(REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+
532 pr_cont("%s", (char *)data
);
534 pr_err("end of fw dump\n");
537 static void bnx2x_panic_dump(struct bnx2x
*bp
)
542 bp
->stats_state
= STATS_STATE_DISABLED
;
543 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
545 BNX2X_ERR("begin crash dump -----------------\n");
549 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
550 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
551 " spq_prod_idx(%u)\n",
552 bp
->def_c_idx
, bp
->def_u_idx
, bp
->def_x_idx
, bp
->def_t_idx
,
553 bp
->def_att_idx
, bp
->attn_state
, bp
->spq_prod_idx
);
556 for_each_queue(bp
, i
) {
557 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
560 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
561 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
562 i
, fp
->rx_bd_prod
, fp
->rx_bd_cons
,
563 le16_to_cpu(*fp
->rx_bd_cons_sb
), fp
->rx_comp_prod
,
564 fp
->rx_comp_cons
, le16_to_cpu(*fp
->rx_cons_sb
));
565 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
566 " fp_u_idx(%x) *sb_u_idx(%x)\n",
567 fp
->rx_sge_prod
, fp
->last_max_sge
,
568 le16_to_cpu(fp
->fp_u_idx
),
569 fp
->status_blk
->u_status_block
.status_block_index
);
573 for_each_queue(bp
, i
) {
574 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
577 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
578 i
, fp
->tx_pkt_prod
, fp
->tx_pkt_cons
, fp
->tx_bd_prod
,
579 fp
->tx_bd_cons
, le16_to_cpu(*fp
->tx_cons_sb
));
580 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
581 " tx_db_prod(%x)\n", le16_to_cpu(fp
->fp_c_idx
),
582 fp
->status_blk
->c_status_block
.status_block_index
,
583 fp
->tx_db
.data
.prod
);
588 for_each_queue(bp
, i
) {
589 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
591 start
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) - 10);
592 end
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) + 503);
593 for (j
= start
; j
!= end
; j
= RX_BD(j
+ 1)) {
594 u32
*rx_bd
= (u32
*)&fp
->rx_desc_ring
[j
];
595 struct sw_rx_bd
*sw_bd
= &fp
->rx_buf_ring
[j
];
597 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
598 i
, j
, rx_bd
[1], rx_bd
[0], sw_bd
->skb
);
601 start
= RX_SGE(fp
->rx_sge_prod
);
602 end
= RX_SGE(fp
->last_max_sge
);
603 for (j
= start
; j
!= end
; j
= RX_SGE(j
+ 1)) {
604 u32
*rx_sge
= (u32
*)&fp
->rx_sge_ring
[j
];
605 struct sw_rx_page
*sw_page
= &fp
->rx_page_ring
[j
];
607 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
608 i
, j
, rx_sge
[1], rx_sge
[0], sw_page
->page
);
611 start
= RCQ_BD(fp
->rx_comp_cons
- 10);
612 end
= RCQ_BD(fp
->rx_comp_cons
+ 503);
613 for (j
= start
; j
!= end
; j
= RCQ_BD(j
+ 1)) {
614 u32
*cqe
= (u32
*)&fp
->rx_comp_ring
[j
];
616 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617 i
, j
, cqe
[0], cqe
[1], cqe
[2], cqe
[3]);
622 for_each_queue(bp
, i
) {
623 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
625 start
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) - 10);
626 end
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) + 245);
627 for (j
= start
; j
!= end
; j
= TX_BD(j
+ 1)) {
628 struct sw_tx_bd
*sw_bd
= &fp
->tx_buf_ring
[j
];
630 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631 i
, j
, sw_bd
->skb
, sw_bd
->first_bd
);
634 start
= TX_BD(fp
->tx_bd_cons
- 10);
635 end
= TX_BD(fp
->tx_bd_cons
+ 254);
636 for (j
= start
; j
!= end
; j
= TX_BD(j
+ 1)) {
637 u32
*tx_bd
= (u32
*)&fp
->tx_desc_ring
[j
];
639 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640 i
, j
, tx_bd
[0], tx_bd
[1], tx_bd
[2], tx_bd
[3]);
646 BNX2X_ERR("end crash dump -----------------\n");
649 static void bnx2x_int_enable(struct bnx2x
*bp
)
651 int port
= BP_PORT(bp
);
652 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
653 u32 val
= REG_RD(bp
, addr
);
654 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
655 int msi
= (bp
->flags
& USING_MSI_FLAG
) ? 1 : 0;
658 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
659 HC_CONFIG_0_REG_INT_LINE_EN_0
);
660 val
|= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
661 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
663 val
&= ~HC_CONFIG_0_REG_INT_LINE_EN_0
;
664 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
665 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
666 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
668 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
670 HC_CONFIG_0_REG_INT_LINE_EN_0
|
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
673 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
676 REG_WR(bp
, addr
, val
);
678 val
&= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
;
681 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x) mode %s\n",
682 val
, port
, addr
, (msix
? "MSI-X" : (msi
? "MSI" : "INTx")));
684 REG_WR(bp
, addr
, val
);
686 * Ensure that HC_CONFIG is written before leading/trailing edge config
691 if (CHIP_IS_E1H(bp
)) {
692 /* init leading/trailing edge */
694 val
= (0xee0f | (1 << (BP_E1HVN(bp
) + 4)));
696 /* enable nig and gpio3 attention */
701 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
702 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
705 /* Make sure that interrupts are indeed enabled from here on */
709 static void bnx2x_int_disable(struct bnx2x
*bp
)
711 int port
= BP_PORT(bp
);
712 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
713 u32 val
= REG_RD(bp
, addr
);
715 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
716 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
717 HC_CONFIG_0_REG_INT_LINE_EN_0
|
718 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
720 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
723 /* flush all outstanding writes */
726 REG_WR(bp
, addr
, val
);
727 if (REG_RD(bp
, addr
) != val
)
728 BNX2X_ERR("BUG! proper val not read from IGU!\n");
731 static void bnx2x_int_disable_sync(struct bnx2x
*bp
, int disable_hw
)
733 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
736 /* disable interrupt handling */
737 atomic_inc(&bp
->intr_sem
);
738 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
741 /* prevent the HW from sending interrupts */
742 bnx2x_int_disable(bp
);
744 /* make sure all ISRs are done */
746 synchronize_irq(bp
->msix_table
[0].vector
);
751 for_each_queue(bp
, i
)
752 synchronize_irq(bp
->msix_table
[i
+ offset
].vector
);
754 synchronize_irq(bp
->pdev
->irq
);
756 /* make sure sp_task is not running */
757 cancel_delayed_work(&bp
->sp_task
);
758 flush_workqueue(bnx2x_wq
);
764 * General service functions
767 static inline void bnx2x_ack_sb(struct bnx2x
*bp
, u8 sb_id
,
768 u8 storm
, u16 index
, u8 op
, u8 update
)
770 u32 hc_addr
= (HC_REG_COMMAND_REG
+ BP_PORT(bp
)*32 +
771 COMMAND_REG_INT_ACK
);
772 struct igu_ack_register igu_ack
;
774 igu_ack
.status_block_index
= index
;
775 igu_ack
.sb_id_and_flags
=
776 ((sb_id
<< IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT
) |
777 (storm
<< IGU_ACK_REGISTER_STORM_ID_SHIFT
) |
778 (update
<< IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT
) |
779 (op
<< IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT
));
781 DP(BNX2X_MSG_OFF
, "write 0x%08x to HC addr 0x%x\n",
782 (*(u32
*)&igu_ack
), hc_addr
);
783 REG_WR(bp
, hc_addr
, (*(u32
*)&igu_ack
));
785 /* Make sure that ACK is written */
790 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath
*fp
)
792 struct host_status_block
*fpsb
= fp
->status_blk
;
794 barrier(); /* status block is written to by the chip */
795 fp
->fp_c_idx
= fpsb
->c_status_block
.status_block_index
;
796 fp
->fp_u_idx
= fpsb
->u_status_block
.status_block_index
;
799 static u16
bnx2x_ack_int(struct bnx2x
*bp
)
801 u32 hc_addr
= (HC_REG_COMMAND_REG
+ BP_PORT(bp
)*32 +
802 COMMAND_REG_SIMD_MASK
);
803 u32 result
= REG_RD(bp
, hc_addr
);
805 DP(BNX2X_MSG_OFF
, "read 0x%08x from HC addr 0x%x\n",
813 * fast path service functions
816 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath
*fp
)
818 /* Tell compiler that consumer and producer can change */
820 return (fp
->tx_pkt_prod
!= fp
->tx_pkt_cons
);
823 /* free skb in the packet ring at pos idx
824 * return idx of last bd freed
826 static u16
bnx2x_free_tx_pkt(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
829 struct sw_tx_bd
*tx_buf
= &fp
->tx_buf_ring
[idx
];
830 struct eth_tx_start_bd
*tx_start_bd
;
831 struct eth_tx_bd
*tx_data_bd
;
832 struct sk_buff
*skb
= tx_buf
->skb
;
833 u16 bd_idx
= TX_BD(tx_buf
->first_bd
), new_cons
;
836 /* prefetch skb end pointer to speedup dev_kfree_skb() */
839 DP(BNX2X_MSG_OFF
, "pkt_idx %d buff @(%p)->skb %p\n",
843 DP(BNX2X_MSG_OFF
, "free bd_idx %d\n", bd_idx
);
844 tx_start_bd
= &fp
->tx_desc_ring
[bd_idx
].start_bd
;
845 pci_unmap_single(bp
->pdev
, BD_UNMAP_ADDR(tx_start_bd
),
846 BD_UNMAP_LEN(tx_start_bd
), PCI_DMA_TODEVICE
);
848 nbd
= le16_to_cpu(tx_start_bd
->nbd
) - 1;
849 #ifdef BNX2X_STOP_ON_ERROR
850 if ((nbd
- 1) > (MAX_SKB_FRAGS
+ 2)) {
851 BNX2X_ERR("BAD nbd!\n");
855 new_cons
= nbd
+ tx_buf
->first_bd
;
857 /* Get the next bd */
858 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
860 /* Skip a parse bd... */
862 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
864 /* ...and the TSO split header bd since they have no mapping */
865 if (tx_buf
->flags
& BNX2X_TSO_SPLIT_BD
) {
867 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
873 DP(BNX2X_MSG_OFF
, "free frag bd_idx %d\n", bd_idx
);
874 tx_data_bd
= &fp
->tx_desc_ring
[bd_idx
].reg_bd
;
875 pci_unmap_page(bp
->pdev
, BD_UNMAP_ADDR(tx_data_bd
),
876 BD_UNMAP_LEN(tx_data_bd
), PCI_DMA_TODEVICE
);
878 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
884 tx_buf
->first_bd
= 0;
890 static inline u16
bnx2x_tx_avail(struct bnx2x_fastpath
*fp
)
896 barrier(); /* Tell compiler that prod and cons can change */
897 prod
= fp
->tx_bd_prod
;
898 cons
= fp
->tx_bd_cons
;
900 /* NUM_TX_RINGS = number of "next-page" entries
901 It will be used as a threshold */
902 used
= SUB_S16(prod
, cons
) + (s16
)NUM_TX_RINGS
;
904 #ifdef BNX2X_STOP_ON_ERROR
906 WARN_ON(used
> fp
->bp
->tx_ring_size
);
907 WARN_ON((fp
->bp
->tx_ring_size
- used
) > MAX_TX_AVAIL
);
910 return (s16
)(fp
->bp
->tx_ring_size
) - used
;
913 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath
*fp
)
917 /* Tell compiler that status block fields can change */
919 hw_cons
= le16_to_cpu(*fp
->tx_cons_sb
);
920 return hw_cons
!= fp
->tx_pkt_cons
;
923 static int bnx2x_tx_int(struct bnx2x_fastpath
*fp
)
925 struct bnx2x
*bp
= fp
->bp
;
926 struct netdev_queue
*txq
;
927 u16 hw_cons
, sw_cons
, bd_cons
= fp
->tx_bd_cons
;
929 #ifdef BNX2X_STOP_ON_ERROR
930 if (unlikely(bp
->panic
))
934 txq
= netdev_get_tx_queue(bp
->dev
, fp
->index
);
935 hw_cons
= le16_to_cpu(*fp
->tx_cons_sb
);
936 sw_cons
= fp
->tx_pkt_cons
;
938 while (sw_cons
!= hw_cons
) {
941 pkt_cons
= TX_BD(sw_cons
);
943 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
945 DP(NETIF_MSG_TX_DONE
, "hw_cons %u sw_cons %u pkt_cons %u\n",
946 hw_cons
, sw_cons
, pkt_cons
);
948 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
950 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
953 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, pkt_cons
);
957 fp
->tx_pkt_cons
= sw_cons
;
958 fp
->tx_bd_cons
= bd_cons
;
960 /* Need to make the tx_bd_cons update visible to start_xmit()
961 * before checking for netif_tx_queue_stopped(). Without the
962 * memory barrier, there is a small possibility that
963 * start_xmit() will miss it and cause the queue to be stopped
968 /* TBD need a thresh? */
969 if (unlikely(netif_tx_queue_stopped(txq
))) {
970 /* Taking tx_lock() is needed to prevent reenabling the queue
971 * while it's empty. This could have happen if rx_action() gets
972 * suspended in bnx2x_tx_int() after the condition before
973 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
975 * stops the queue->sees fresh tx_bd_cons->releases the queue->
976 * sends some packets consuming the whole queue again->
980 __netif_tx_lock(txq
, smp_processor_id());
982 if ((netif_tx_queue_stopped(txq
)) &&
983 (bp
->state
== BNX2X_STATE_OPEN
) &&
984 (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3))
985 netif_tx_wake_queue(txq
);
987 __netif_tx_unlock(txq
);
993 static void bnx2x_cnic_cfc_comp(struct bnx2x
*bp
, int cid
);
996 static void bnx2x_sp_event(struct bnx2x_fastpath
*fp
,
997 union eth_rx_cqe
*rr_cqe
)
999 struct bnx2x
*bp
= fp
->bp
;
1000 int cid
= SW_CID(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
1001 int command
= CQE_CMD(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
1004 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1005 fp
->index
, cid
, command
, bp
->state
,
1006 rr_cqe
->ramrod_cqe
.ramrod_type
);
1011 switch (command
| fp
->state
) {
1012 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP
|
1013 BNX2X_FP_STATE_OPENING
):
1014 DP(NETIF_MSG_IFUP
, "got MULTI[%d] setup ramrod\n",
1016 fp
->state
= BNX2X_FP_STATE_OPEN
;
1019 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_FP_STATE_HALTING
):
1020 DP(NETIF_MSG_IFDOWN
, "got MULTI[%d] halt ramrod\n",
1022 fp
->state
= BNX2X_FP_STATE_HALTED
;
1026 BNX2X_ERR("unexpected MC reply (%d) "
1027 "fp->state is %x\n", command
, fp
->state
);
1030 mb(); /* force bnx2x_wait_ramrod() to see the change */
1034 switch (command
| bp
->state
) {
1035 case (RAMROD_CMD_ID_ETH_PORT_SETUP
| BNX2X_STATE_OPENING_WAIT4_PORT
):
1036 DP(NETIF_MSG_IFUP
, "got setup ramrod\n");
1037 bp
->state
= BNX2X_STATE_OPEN
;
1040 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
1041 DP(NETIF_MSG_IFDOWN
, "got halt ramrod\n");
1042 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_DELETE
;
1043 fp
->state
= BNX2X_FP_STATE_HALTED
;
1046 case (RAMROD_CMD_ID_ETH_CFC_DEL
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
1047 DP(NETIF_MSG_IFDOWN
, "got delete ramrod for MULTI[%d]\n", cid
);
1048 bnx2x_fp(bp
, cid
, state
) = BNX2X_FP_STATE_CLOSED
;
1052 case (RAMROD_CMD_ID_ETH_CFC_DEL
| BNX2X_STATE_OPEN
):
1053 DP(NETIF_MSG_IFDOWN
, "got delete ramrod for CID %d\n", cid
);
1054 bnx2x_cnic_cfc_comp(bp
, cid
);
1058 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_OPEN
):
1059 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_DIAG
):
1060 DP(NETIF_MSG_IFUP
, "got set mac ramrod\n");
1061 bp
->set_mac_pending
--;
1065 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
1066 DP(NETIF_MSG_IFDOWN
, "got (un)set mac ramrod\n");
1067 bp
->set_mac_pending
--;
1072 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1073 command
, bp
->state
);
1076 mb(); /* force bnx2x_wait_ramrod() to see the change */
1079 static inline void bnx2x_free_rx_sge(struct bnx2x
*bp
,
1080 struct bnx2x_fastpath
*fp
, u16 index
)
1082 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
1083 struct page
*page
= sw_buf
->page
;
1084 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
1086 /* Skip "next page" elements */
1090 pci_unmap_page(bp
->pdev
, pci_unmap_addr(sw_buf
, mapping
),
1091 SGE_PAGE_SIZE
*PAGES_PER_SGE
, PCI_DMA_FROMDEVICE
);
1092 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
1094 sw_buf
->page
= NULL
;
1099 static inline void bnx2x_free_rx_sge_range(struct bnx2x
*bp
,
1100 struct bnx2x_fastpath
*fp
, int last
)
1104 for (i
= 0; i
< last
; i
++)
1105 bnx2x_free_rx_sge(bp
, fp
, i
);
1108 static inline int bnx2x_alloc_rx_sge(struct bnx2x
*bp
,
1109 struct bnx2x_fastpath
*fp
, u16 index
)
1111 struct page
*page
= alloc_pages(GFP_ATOMIC
, PAGES_PER_SGE_SHIFT
);
1112 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
1113 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
1116 if (unlikely(page
== NULL
))
1119 mapping
= pci_map_page(bp
->pdev
, page
, 0, SGE_PAGE_SIZE
*PAGES_PER_SGE
,
1120 PCI_DMA_FROMDEVICE
);
1121 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
1122 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
1126 sw_buf
->page
= page
;
1127 pci_unmap_addr_set(sw_buf
, mapping
, mapping
);
1129 sge
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1130 sge
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1135 static inline int bnx2x_alloc_rx_skb(struct bnx2x
*bp
,
1136 struct bnx2x_fastpath
*fp
, u16 index
)
1138 struct sk_buff
*skb
;
1139 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[index
];
1140 struct eth_rx_bd
*rx_bd
= &fp
->rx_desc_ring
[index
];
1143 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
1144 if (unlikely(skb
== NULL
))
1147 mapping
= pci_map_single(bp
->pdev
, skb
->data
, bp
->rx_buf_size
,
1148 PCI_DMA_FROMDEVICE
);
1149 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
1155 pci_unmap_addr_set(rx_buf
, mapping
, mapping
);
1157 rx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1158 rx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1163 /* note that we are not allocating a new skb,
1164 * we are just moving one from cons to prod
1165 * we are not creating a new mapping,
1166 * so there is no need to check for dma_mapping_error().
1168 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath
*fp
,
1169 struct sk_buff
*skb
, u16 cons
, u16 prod
)
1171 struct bnx2x
*bp
= fp
->bp
;
1172 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
1173 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
1174 struct eth_rx_bd
*cons_bd
= &fp
->rx_desc_ring
[cons
];
1175 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
1177 pci_dma_sync_single_for_device(bp
->pdev
,
1178 pci_unmap_addr(cons_rx_buf
, mapping
),
1179 RX_COPY_THRESH
, PCI_DMA_FROMDEVICE
);
1181 prod_rx_buf
->skb
= cons_rx_buf
->skb
;
1182 pci_unmap_addr_set(prod_rx_buf
, mapping
,
1183 pci_unmap_addr(cons_rx_buf
, mapping
));
1184 *prod_bd
= *cons_bd
;
1187 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath
*fp
,
1190 u16 last_max
= fp
->last_max_sge
;
1192 if (SUB_S16(idx
, last_max
) > 0)
1193 fp
->last_max_sge
= idx
;
1196 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath
*fp
)
1200 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
1201 int idx
= RX_SGE_CNT
* i
- 1;
1203 for (j
= 0; j
< 2; j
++) {
1204 SGE_MASK_CLEAR_BIT(fp
, idx
);
1210 static void bnx2x_update_sge_prod(struct bnx2x_fastpath
*fp
,
1211 struct eth_fast_path_rx_cqe
*fp_cqe
)
1213 struct bnx2x
*bp
= fp
->bp
;
1214 u16 sge_len
= SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe
->pkt_len
) -
1215 le16_to_cpu(fp_cqe
->len_on_bd
)) >>
1217 u16 last_max
, last_elem
, first_elem
;
1224 /* First mark all used pages */
1225 for (i
= 0; i
< sge_len
; i
++)
1226 SGE_MASK_CLEAR_BIT(fp
, RX_SGE(le16_to_cpu(fp_cqe
->sgl
[i
])));
1228 DP(NETIF_MSG_RX_STATUS
, "fp_cqe->sgl[%d] = %d\n",
1229 sge_len
- 1, le16_to_cpu(fp_cqe
->sgl
[sge_len
- 1]));
1231 /* Here we assume that the last SGE index is the biggest */
1232 prefetch((void *)(fp
->sge_mask
));
1233 bnx2x_update_last_max_sge(fp
, le16_to_cpu(fp_cqe
->sgl
[sge_len
- 1]));
1235 last_max
= RX_SGE(fp
->last_max_sge
);
1236 last_elem
= last_max
>> RX_SGE_MASK_ELEM_SHIFT
;
1237 first_elem
= RX_SGE(fp
->rx_sge_prod
) >> RX_SGE_MASK_ELEM_SHIFT
;
1239 /* If ring is not full */
1240 if (last_elem
+ 1 != first_elem
)
1243 /* Now update the prod */
1244 for (i
= first_elem
; i
!= last_elem
; i
= NEXT_SGE_MASK_ELEM(i
)) {
1245 if (likely(fp
->sge_mask
[i
]))
1248 fp
->sge_mask
[i
] = RX_SGE_MASK_ELEM_ONE_MASK
;
1249 delta
+= RX_SGE_MASK_ELEM_SZ
;
1253 fp
->rx_sge_prod
+= delta
;
1254 /* clear page-end entries */
1255 bnx2x_clear_sge_mask_next_elems(fp
);
1258 DP(NETIF_MSG_RX_STATUS
,
1259 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1260 fp
->last_max_sge
, fp
->rx_sge_prod
);
1263 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath
*fp
)
1265 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1266 memset(fp
->sge_mask
, 0xff,
1267 (NUM_RX_SGE
>> RX_SGE_MASK_ELEM_SHIFT
)*sizeof(u64
));
1269 /* Clear the two last indices in the page to 1:
1270 these are the indices that correspond to the "next" element,
1271 hence will never be indicated and should be removed from
1272 the calculations. */
1273 bnx2x_clear_sge_mask_next_elems(fp
);
1276 static void bnx2x_tpa_start(struct bnx2x_fastpath
*fp
, u16 queue
,
1277 struct sk_buff
*skb
, u16 cons
, u16 prod
)
1279 struct bnx2x
*bp
= fp
->bp
;
1280 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
1281 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
1282 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
1285 /* move empty skb from pool to prod and map it */
1286 prod_rx_buf
->skb
= fp
->tpa_pool
[queue
].skb
;
1287 mapping
= pci_map_single(bp
->pdev
, fp
->tpa_pool
[queue
].skb
->data
,
1288 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
1289 pci_unmap_addr_set(prod_rx_buf
, mapping
, mapping
);
1291 /* move partial skb from cons to pool (don't unmap yet) */
1292 fp
->tpa_pool
[queue
] = *cons_rx_buf
;
1294 /* mark bin state as start - print error if current state != stop */
1295 if (fp
->tpa_state
[queue
] != BNX2X_TPA_STOP
)
1296 BNX2X_ERR("start of bin not in stop [%d]\n", queue
);
1298 fp
->tpa_state
[queue
] = BNX2X_TPA_START
;
1300 /* point prod_bd to new skb */
1301 prod_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1302 prod_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1304 #ifdef BNX2X_STOP_ON_ERROR
1305 fp
->tpa_queue_used
|= (1 << queue
);
1306 #ifdef __powerpc64__
1307 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%lx\n",
1309 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%llx\n",
1311 fp
->tpa_queue_used
);
1315 static int bnx2x_fill_frag_skb(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
1316 struct sk_buff
*skb
,
1317 struct eth_fast_path_rx_cqe
*fp_cqe
,
1320 struct sw_rx_page
*rx_pg
, old_rx_pg
;
1321 u16 len_on_bd
= le16_to_cpu(fp_cqe
->len_on_bd
);
1322 u32 i
, frag_len
, frag_size
, pages
;
1326 frag_size
= le16_to_cpu(fp_cqe
->pkt_len
) - len_on_bd
;
1327 pages
= SGE_PAGE_ALIGN(frag_size
) >> SGE_PAGE_SHIFT
;
1329 /* This is needed in order to enable forwarding support */
1331 skb_shinfo(skb
)->gso_size
= min((u32
)SGE_PAGE_SIZE
,
1332 max(frag_size
, (u32
)len_on_bd
));
1334 #ifdef BNX2X_STOP_ON_ERROR
1336 min((u32
)8, (u32
)MAX_SKB_FRAGS
) * SGE_PAGE_SIZE
* PAGES_PER_SGE
) {
1337 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1339 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1340 fp_cqe
->pkt_len
, len_on_bd
);
1346 /* Run through the SGL and compose the fragmented skb */
1347 for (i
= 0, j
= 0; i
< pages
; i
+= PAGES_PER_SGE
, j
++) {
1348 u16 sge_idx
= RX_SGE(le16_to_cpu(fp_cqe
->sgl
[j
]));
1350 /* FW gives the indices of the SGE as if the ring is an array
1351 (meaning that "next" element will consume 2 indices) */
1352 frag_len
= min(frag_size
, (u32
)(SGE_PAGE_SIZE
*PAGES_PER_SGE
));
1353 rx_pg
= &fp
->rx_page_ring
[sge_idx
];
1356 /* If we fail to allocate a substitute page, we simply stop
1357 where we are and drop the whole packet */
1358 err
= bnx2x_alloc_rx_sge(bp
, fp
, sge_idx
);
1359 if (unlikely(err
)) {
1360 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
1364 /* Unmap the page as we r going to pass it to the stack */
1365 pci_unmap_page(bp
->pdev
, pci_unmap_addr(&old_rx_pg
, mapping
),
1366 SGE_PAGE_SIZE
*PAGES_PER_SGE
, PCI_DMA_FROMDEVICE
);
1368 /* Add one frag and update the appropriate fields in the skb */
1369 skb_fill_page_desc(skb
, j
, old_rx_pg
.page
, 0, frag_len
);
1371 skb
->data_len
+= frag_len
;
1372 skb
->truesize
+= frag_len
;
1373 skb
->len
+= frag_len
;
1375 frag_size
-= frag_len
;
1381 static void bnx2x_tpa_stop(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
1382 u16 queue
, int pad
, int len
, union eth_rx_cqe
*cqe
,
1385 struct sw_rx_bd
*rx_buf
= &fp
->tpa_pool
[queue
];
1386 struct sk_buff
*skb
= rx_buf
->skb
;
1388 struct sk_buff
*new_skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
1390 /* Unmap skb in the pool anyway, as we are going to change
1391 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1393 pci_unmap_single(bp
->pdev
, pci_unmap_addr(rx_buf
, mapping
),
1394 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
1396 if (likely(new_skb
)) {
1397 /* fix ip xsum and give it to the stack */
1398 /* (no need to map the new skb) */
1401 (le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
) &
1402 PARSING_FLAGS_VLAN
);
1403 int is_not_hwaccel_vlan_cqe
=
1404 (is_vlan_cqe
&& (!(bp
->flags
& HW_VLAN_RX_FLAG
)));
1408 prefetch(((char *)(skb
)) + 128);
1410 #ifdef BNX2X_STOP_ON_ERROR
1411 if (pad
+ len
> bp
->rx_buf_size
) {
1412 BNX2X_ERR("skb_put is about to fail... "
1413 "pad %d len %d rx_buf_size %d\n",
1414 pad
, len
, bp
->rx_buf_size
);
1420 skb_reserve(skb
, pad
);
1423 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1424 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1429 iph
= (struct iphdr
*)skb
->data
;
1431 /* If there is no Rx VLAN offloading -
1432 take VLAN tag into an account */
1433 if (unlikely(is_not_hwaccel_vlan_cqe
))
1434 iph
= (struct iphdr
*)((u8
*)iph
+ VLAN_HLEN
);
1437 iph
->check
= ip_fast_csum((u8
*)iph
, iph
->ihl
);
1440 if (!bnx2x_fill_frag_skb(bp
, fp
, skb
,
1441 &cqe
->fast_path_cqe
, cqe_idx
)) {
1443 if ((bp
->vlgrp
!= NULL
) && is_vlan_cqe
&&
1444 (!is_not_hwaccel_vlan_cqe
))
1445 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
,
1446 le16_to_cpu(cqe
->fast_path_cqe
.
1450 netif_receive_skb(skb
);
1452 DP(NETIF_MSG_RX_STATUS
, "Failed to allocate new pages"
1453 " - dropping packet!\n");
1458 /* put new skb in bin */
1459 fp
->tpa_pool
[queue
].skb
= new_skb
;
1462 /* else drop the packet and keep the buffer in the bin */
1463 DP(NETIF_MSG_RX_STATUS
,
1464 "Failed to allocate new skb - dropping packet!\n");
1465 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
1468 fp
->tpa_state
[queue
] = BNX2X_TPA_STOP
;
1471 static inline void bnx2x_update_rx_prod(struct bnx2x
*bp
,
1472 struct bnx2x_fastpath
*fp
,
1473 u16 bd_prod
, u16 rx_comp_prod
,
1476 struct ustorm_eth_rx_producers rx_prods
= {0};
1479 /* Update producers */
1480 rx_prods
.bd_prod
= bd_prod
;
1481 rx_prods
.cqe_prod
= rx_comp_prod
;
1482 rx_prods
.sge_prod
= rx_sge_prod
;
1485 * Make sure that the BD and SGE data is updated before updating the
1486 * producers since FW might read the BD/SGE right after the producer
1488 * This is only applicable for weak-ordered memory model archs such
1489 * as IA-64. The following barrier is also mandatory since FW will
1490 * assumes BDs must have buffers.
1494 for (i
= 0; i
< sizeof(struct ustorm_eth_rx_producers
)/4; i
++)
1495 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1496 USTORM_RX_PRODS_OFFSET(BP_PORT(bp
), fp
->cl_id
) + i
*4,
1497 ((u32
*)&rx_prods
)[i
]);
1499 mmiowb(); /* keep prod updates ordered */
1501 DP(NETIF_MSG_RX_STATUS
,
1502 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1503 fp
->index
, bd_prod
, rx_comp_prod
, rx_sge_prod
);
1506 static int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
)
1508 struct bnx2x
*bp
= fp
->bp
;
1509 u16 bd_cons
, bd_prod
, bd_prod_fw
, comp_ring_cons
;
1510 u16 hw_comp_cons
, sw_comp_cons
, sw_comp_prod
;
1513 #ifdef BNX2X_STOP_ON_ERROR
1514 if (unlikely(bp
->panic
))
1518 /* CQ "next element" is of the size of the regular element,
1519 that's why it's ok here */
1520 hw_comp_cons
= le16_to_cpu(*fp
->rx_cons_sb
);
1521 if ((hw_comp_cons
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
1524 bd_cons
= fp
->rx_bd_cons
;
1525 bd_prod
= fp
->rx_bd_prod
;
1526 bd_prod_fw
= bd_prod
;
1527 sw_comp_cons
= fp
->rx_comp_cons
;
1528 sw_comp_prod
= fp
->rx_comp_prod
;
1530 /* Memory barrier necessary as speculative reads of the rx
1531 * buffer can be ahead of the index in the status block
1535 DP(NETIF_MSG_RX_STATUS
,
1536 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1537 fp
->index
, hw_comp_cons
, sw_comp_cons
);
1539 while (sw_comp_cons
!= hw_comp_cons
) {
1540 struct sw_rx_bd
*rx_buf
= NULL
;
1541 struct sk_buff
*skb
;
1542 union eth_rx_cqe
*cqe
;
1546 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
1547 bd_prod
= RX_BD(bd_prod
);
1548 bd_cons
= RX_BD(bd_cons
);
1550 /* Prefetch the page containing the BD descriptor
1551 at producer's index. It will be needed when new skb is
1553 prefetch((void *)(PAGE_ALIGN((unsigned long)
1554 (&fp
->rx_desc_ring
[bd_prod
])) -
1557 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
1558 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
1560 DP(NETIF_MSG_RX_STATUS
, "CQE type %x err %x status %x"
1561 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags
),
1562 cqe_fp_flags
, cqe
->fast_path_cqe
.status_flags
,
1563 le32_to_cpu(cqe
->fast_path_cqe
.rss_hash_result
),
1564 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
),
1565 le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
));
1567 /* is this a slowpath msg? */
1568 if (unlikely(CQE_TYPE(cqe_fp_flags
))) {
1569 bnx2x_sp_event(fp
, cqe
);
1572 /* this is an rx packet */
1574 rx_buf
= &fp
->rx_buf_ring
[bd_cons
];
1577 prefetch((u8
*)skb
+ 256);
1578 len
= le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
);
1579 pad
= cqe
->fast_path_cqe
.placement_offset
;
1581 /* If CQE is marked both TPA_START and TPA_END
1582 it is a non-TPA CQE */
1583 if ((!fp
->disable_tpa
) &&
1584 (TPA_TYPE(cqe_fp_flags
) !=
1585 (TPA_TYPE_START
| TPA_TYPE_END
))) {
1586 u16 queue
= cqe
->fast_path_cqe
.queue_index
;
1588 if (TPA_TYPE(cqe_fp_flags
) == TPA_TYPE_START
) {
1589 DP(NETIF_MSG_RX_STATUS
,
1590 "calling tpa_start on queue %d\n",
1593 bnx2x_tpa_start(fp
, queue
, skb
,
1598 if (TPA_TYPE(cqe_fp_flags
) == TPA_TYPE_END
) {
1599 DP(NETIF_MSG_RX_STATUS
,
1600 "calling tpa_stop on queue %d\n",
1603 if (!BNX2X_RX_SUM_FIX(cqe
))
1604 BNX2X_ERR("STOP on none TCP "
1607 /* This is a size of the linear data
1609 len
= le16_to_cpu(cqe
->fast_path_cqe
.
1611 bnx2x_tpa_stop(bp
, fp
, queue
, pad
,
1612 len
, cqe
, comp_ring_cons
);
1613 #ifdef BNX2X_STOP_ON_ERROR
1618 bnx2x_update_sge_prod(fp
,
1619 &cqe
->fast_path_cqe
);
1624 pci_dma_sync_single_for_device(bp
->pdev
,
1625 pci_unmap_addr(rx_buf
, mapping
),
1626 pad
+ RX_COPY_THRESH
,
1627 PCI_DMA_FROMDEVICE
);
1629 prefetch(((char *)(skb
)) + 128);
1631 /* is this an error packet? */
1632 if (unlikely(cqe_fp_flags
& ETH_RX_ERROR_FALGS
)) {
1633 DP(NETIF_MSG_RX_ERR
,
1634 "ERROR flags %x rx packet %u\n",
1635 cqe_fp_flags
, sw_comp_cons
);
1636 fp
->eth_q_stats
.rx_err_discard_pkt
++;
1640 /* Since we don't have a jumbo ring
1641 * copy small packets if mtu > 1500
1643 if ((bp
->dev
->mtu
> ETH_MAX_PACKET_SIZE
) &&
1644 (len
<= RX_COPY_THRESH
)) {
1645 struct sk_buff
*new_skb
;
1647 new_skb
= netdev_alloc_skb(bp
->dev
,
1649 if (new_skb
== NULL
) {
1650 DP(NETIF_MSG_RX_ERR
,
1651 "ERROR packet dropped "
1652 "because of alloc failure\n");
1653 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
1658 skb_copy_from_linear_data_offset(skb
, pad
,
1659 new_skb
->data
+ pad
, len
);
1660 skb_reserve(new_skb
, pad
);
1661 skb_put(new_skb
, len
);
1663 bnx2x_reuse_rx_skb(fp
, skb
, bd_cons
, bd_prod
);
1668 if (likely(bnx2x_alloc_rx_skb(bp
, fp
, bd_prod
) == 0)) {
1669 pci_unmap_single(bp
->pdev
,
1670 pci_unmap_addr(rx_buf
, mapping
),
1672 PCI_DMA_FROMDEVICE
);
1673 skb_reserve(skb
, pad
);
1677 DP(NETIF_MSG_RX_ERR
,
1678 "ERROR packet dropped because "
1679 "of alloc failure\n");
1680 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
1682 bnx2x_reuse_rx_skb(fp
, skb
, bd_cons
, bd_prod
);
1686 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1688 skb
->ip_summed
= CHECKSUM_NONE
;
1690 if (likely(BNX2X_RX_CSUM_OK(cqe
)))
1691 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1693 fp
->eth_q_stats
.hw_csum_err
++;
1697 skb_record_rx_queue(skb
, fp
->index
);
1700 if ((bp
->vlgrp
!= NULL
) && (bp
->flags
& HW_VLAN_RX_FLAG
) &&
1701 (le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
) &
1702 PARSING_FLAGS_VLAN
))
1703 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
,
1704 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
));
1707 netif_receive_skb(skb
);
1713 bd_cons
= NEXT_RX_IDX(bd_cons
);
1714 bd_prod
= NEXT_RX_IDX(bd_prod
);
1715 bd_prod_fw
= NEXT_RX_IDX(bd_prod_fw
);
1718 sw_comp_prod
= NEXT_RCQ_IDX(sw_comp_prod
);
1719 sw_comp_cons
= NEXT_RCQ_IDX(sw_comp_cons
);
1721 if (rx_pkt
== budget
)
1725 fp
->rx_bd_cons
= bd_cons
;
1726 fp
->rx_bd_prod
= bd_prod_fw
;
1727 fp
->rx_comp_cons
= sw_comp_cons
;
1728 fp
->rx_comp_prod
= sw_comp_prod
;
1730 /* Update producers */
1731 bnx2x_update_rx_prod(bp
, fp
, bd_prod_fw
, sw_comp_prod
,
1734 fp
->rx_pkt
+= rx_pkt
;
1740 static irqreturn_t
bnx2x_msix_fp_int(int irq
, void *fp_cookie
)
1742 struct bnx2x_fastpath
*fp
= fp_cookie
;
1743 struct bnx2x
*bp
= fp
->bp
;
1745 /* Return here if interrupt is disabled */
1746 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
1747 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
1751 DP(BNX2X_MSG_FP
, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1752 fp
->index
, fp
->sb_id
);
1753 bnx2x_ack_sb(bp
, fp
->sb_id
, USTORM_ID
, 0, IGU_INT_DISABLE
, 0);
1755 #ifdef BNX2X_STOP_ON_ERROR
1756 if (unlikely(bp
->panic
))
1760 /* Handle Rx and Tx according to MSI-X vector */
1761 prefetch(fp
->rx_cons_sb
);
1762 prefetch(fp
->tx_cons_sb
);
1763 prefetch(&fp
->status_blk
->u_status_block
.status_block_index
);
1764 prefetch(&fp
->status_blk
->c_status_block
.status_block_index
);
1765 napi_schedule(&bnx2x_fp(bp
, fp
->index
, napi
));
1770 static irqreturn_t
bnx2x_interrupt(int irq
, void *dev_instance
)
1772 struct bnx2x
*bp
= netdev_priv(dev_instance
);
1773 u16 status
= bnx2x_ack_int(bp
);
1777 /* Return here if interrupt is shared and it's not for us */
1778 if (unlikely(status
== 0)) {
1779 DP(NETIF_MSG_INTR
, "not our interrupt!\n");
1782 DP(NETIF_MSG_INTR
, "got an interrupt status 0x%x\n", status
);
1784 /* Return here if interrupt is disabled */
1785 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
1786 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
1790 #ifdef BNX2X_STOP_ON_ERROR
1791 if (unlikely(bp
->panic
))
1795 for (i
= 0; i
< BNX2X_NUM_QUEUES(bp
); i
++) {
1796 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1798 mask
= 0x2 << fp
->sb_id
;
1799 if (status
& mask
) {
1800 /* Handle Rx and Tx according to SB id */
1801 prefetch(fp
->rx_cons_sb
);
1802 prefetch(&fp
->status_blk
->u_status_block
.
1803 status_block_index
);
1804 prefetch(fp
->tx_cons_sb
);
1805 prefetch(&fp
->status_blk
->c_status_block
.
1806 status_block_index
);
1807 napi_schedule(&bnx2x_fp(bp
, fp
->index
, napi
));
1813 mask
= 0x2 << CNIC_SB_ID(bp
);
1814 if (status
& (mask
| 0x1)) {
1815 struct cnic_ops
*c_ops
= NULL
;
1818 c_ops
= rcu_dereference(bp
->cnic_ops
);
1820 c_ops
->cnic_handler(bp
->cnic_data
, NULL
);
1827 if (unlikely(status
& 0x1)) {
1828 queue_delayed_work(bnx2x_wq
, &bp
->sp_task
, 0);
1836 DP(NETIF_MSG_INTR
, "got an unknown interrupt! (status %u)\n",
1842 /* end of fast path */
1844 static void bnx2x_stats_handle(struct bnx2x
*bp
, enum bnx2x_stats_event event
);
1849 * General service functions
1852 static int bnx2x_acquire_hw_lock(struct bnx2x
*bp
, u32 resource
)
1855 u32 resource_bit
= (1 << resource
);
1856 int func
= BP_FUNC(bp
);
1857 u32 hw_lock_control_reg
;
1860 /* Validating that the resource is within range */
1861 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1863 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1864 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1869 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1871 hw_lock_control_reg
=
1872 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1875 /* Validating that the resource is not already taken */
1876 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1877 if (lock_status
& resource_bit
) {
1878 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1879 lock_status
, resource_bit
);
1883 /* Try for 5 second every 5ms */
1884 for (cnt
= 0; cnt
< 1000; cnt
++) {
1885 /* Try to acquire the lock */
1886 REG_WR(bp
, hw_lock_control_reg
+ 4, resource_bit
);
1887 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1888 if (lock_status
& resource_bit
)
1893 DP(NETIF_MSG_HW
, "Timeout\n");
1897 static int bnx2x_release_hw_lock(struct bnx2x
*bp
, u32 resource
)
1900 u32 resource_bit
= (1 << resource
);
1901 int func
= BP_FUNC(bp
);
1902 u32 hw_lock_control_reg
;
1904 /* Validating that the resource is within range */
1905 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1907 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1908 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1913 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1915 hw_lock_control_reg
=
1916 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1919 /* Validating that the resource is currently taken */
1920 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1921 if (!(lock_status
& resource_bit
)) {
1922 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1923 lock_status
, resource_bit
);
1927 REG_WR(bp
, hw_lock_control_reg
, resource_bit
);
1931 /* HW Lock for shared dual port PHYs */
1932 static void bnx2x_acquire_phy_lock(struct bnx2x
*bp
)
1934 mutex_lock(&bp
->port
.phy_mutex
);
1936 if (bp
->port
.need_hw_lock
)
1937 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
1940 static void bnx2x_release_phy_lock(struct bnx2x
*bp
)
1942 if (bp
->port
.need_hw_lock
)
1943 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
1945 mutex_unlock(&bp
->port
.phy_mutex
);
1948 int bnx2x_get_gpio(struct bnx2x
*bp
, int gpio_num
, u8 port
)
1950 /* The GPIO should be swapped if swap register is set and active */
1951 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1952 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1953 int gpio_shift
= gpio_num
+
1954 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1955 u32 gpio_mask
= (1 << gpio_shift
);
1959 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1960 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1964 /* read GPIO value */
1965 gpio_reg
= REG_RD(bp
, MISC_REG_GPIO
);
1967 /* get the requested pin value */
1968 if ((gpio_reg
& gpio_mask
) == gpio_mask
)
1973 DP(NETIF_MSG_LINK
, "pin %d value 0x%x\n", gpio_num
, value
);
1978 int bnx2x_set_gpio(struct bnx2x
*bp
, int gpio_num
, u32 mode
, u8 port
)
1980 /* The GPIO should be swapped if swap register is set and active */
1981 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1982 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1983 int gpio_shift
= gpio_num
+
1984 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1985 u32 gpio_mask
= (1 << gpio_shift
);
1988 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1989 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1993 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1994 /* read GPIO and mask except the float bits */
1995 gpio_reg
= (REG_RD(bp
, MISC_REG_GPIO
) & MISC_REGISTERS_GPIO_FLOAT
);
1998 case MISC_REGISTERS_GPIO_OUTPUT_LOW
:
1999 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output low\n",
2000 gpio_num
, gpio_shift
);
2001 /* clear FLOAT and set CLR */
2002 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
2003 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_CLR_POS
);
2006 case MISC_REGISTERS_GPIO_OUTPUT_HIGH
:
2007 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output high\n",
2008 gpio_num
, gpio_shift
);
2009 /* clear FLOAT and set SET */
2010 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
2011 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_SET_POS
);
2014 case MISC_REGISTERS_GPIO_INPUT_HI_Z
:
2015 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> input\n",
2016 gpio_num
, gpio_shift
);
2018 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
2025 REG_WR(bp
, MISC_REG_GPIO
, gpio_reg
);
2026 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
2031 int bnx2x_set_gpio_int(struct bnx2x
*bp
, int gpio_num
, u32 mode
, u8 port
)
2033 /* The GPIO should be swapped if swap register is set and active */
2034 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
2035 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
2036 int gpio_shift
= gpio_num
+
2037 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
2038 u32 gpio_mask
= (1 << gpio_shift
);
2041 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
2042 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
2046 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
2048 gpio_reg
= REG_RD(bp
, MISC_REG_GPIO_INT
);
2051 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR
:
2052 DP(NETIF_MSG_LINK
, "Clear GPIO INT %d (shift %d) -> "
2053 "output low\n", gpio_num
, gpio_shift
);
2054 /* clear SET and set CLR */
2055 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_INT_SET_POS
);
2056 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_INT_CLR_POS
);
2059 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET
:
2060 DP(NETIF_MSG_LINK
, "Set GPIO INT %d (shift %d) -> "
2061 "output high\n", gpio_num
, gpio_shift
);
2062 /* clear CLR and set SET */
2063 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_INT_CLR_POS
);
2064 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_INT_SET_POS
);
2071 REG_WR(bp
, MISC_REG_GPIO_INT
, gpio_reg
);
2072 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
2077 static int bnx2x_set_spio(struct bnx2x
*bp
, int spio_num
, u32 mode
)
2079 u32 spio_mask
= (1 << spio_num
);
2082 if ((spio_num
< MISC_REGISTERS_SPIO_4
) ||
2083 (spio_num
> MISC_REGISTERS_SPIO_7
)) {
2084 BNX2X_ERR("Invalid SPIO %d\n", spio_num
);
2088 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
2089 /* read SPIO and mask except the float bits */
2090 spio_reg
= (REG_RD(bp
, MISC_REG_SPIO
) & MISC_REGISTERS_SPIO_FLOAT
);
2093 case MISC_REGISTERS_SPIO_OUTPUT_LOW
:
2094 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output low\n", spio_num
);
2095 /* clear FLOAT and set CLR */
2096 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
2097 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_CLR_POS
);
2100 case MISC_REGISTERS_SPIO_OUTPUT_HIGH
:
2101 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output high\n", spio_num
);
2102 /* clear FLOAT and set SET */
2103 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
2104 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_SET_POS
);
2107 case MISC_REGISTERS_SPIO_INPUT_HI_Z
:
2108 DP(NETIF_MSG_LINK
, "Set SPIO %d -> input\n", spio_num
);
2110 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
2117 REG_WR(bp
, MISC_REG_SPIO
, spio_reg
);
2118 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
2123 static void bnx2x_calc_fc_adv(struct bnx2x
*bp
)
2125 switch (bp
->link_vars
.ieee_fc
&
2126 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK
) {
2127 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE
:
2128 bp
->port
.advertising
&= ~(ADVERTISED_Asym_Pause
|
2132 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH
:
2133 bp
->port
.advertising
|= (ADVERTISED_Asym_Pause
|
2137 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC
:
2138 bp
->port
.advertising
|= ADVERTISED_Asym_Pause
;
2142 bp
->port
.advertising
&= ~(ADVERTISED_Asym_Pause
|
2148 static void bnx2x_link_report(struct bnx2x
*bp
)
2150 if (bp
->flags
& MF_FUNC_DIS
) {
2151 netif_carrier_off(bp
->dev
);
2152 netdev_err(bp
->dev
, "NIC Link is Down\n");
2156 if (bp
->link_vars
.link_up
) {
2159 if (bp
->state
== BNX2X_STATE_OPEN
)
2160 netif_carrier_on(bp
->dev
);
2161 netdev_info(bp
->dev
, "NIC Link is Up, ");
2163 line_speed
= bp
->link_vars
.line_speed
;
2168 ((bp
->mf_config
& FUNC_MF_CFG_MAX_BW_MASK
) >>
2169 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
2170 if (vn_max_rate
< line_speed
)
2171 line_speed
= vn_max_rate
;
2173 pr_cont("%d Mbps ", line_speed
);
2175 if (bp
->link_vars
.duplex
== DUPLEX_FULL
)
2176 pr_cont("full duplex");
2178 pr_cont("half duplex");
2180 if (bp
->link_vars
.flow_ctrl
!= BNX2X_FLOW_CTRL_NONE
) {
2181 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
) {
2182 pr_cont(", receive ");
2183 if (bp
->link_vars
.flow_ctrl
&
2185 pr_cont("& transmit ");
2187 pr_cont(", transmit ");
2189 pr_cont("flow control ON");
2193 } else { /* link_down */
2194 netif_carrier_off(bp
->dev
);
2195 netdev_err(bp
->dev
, "NIC Link is Down\n");
2199 static u8
bnx2x_initial_phy_init(struct bnx2x
*bp
, int load_mode
)
2201 if (!BP_NOMCP(bp
)) {
2204 /* Initialize link parameters structure variables */
2205 /* It is recommended to turn off RX FC for jumbo frames
2206 for better performance */
2207 if (bp
->dev
->mtu
> 5000)
2208 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_TX
;
2210 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_BOTH
;
2212 bnx2x_acquire_phy_lock(bp
);
2214 if (load_mode
== LOAD_DIAG
)
2215 bp
->link_params
.loopback_mode
= LOOPBACK_XGXS_10
;
2217 rc
= bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
2219 bnx2x_release_phy_lock(bp
);
2221 bnx2x_calc_fc_adv(bp
);
2223 if (CHIP_REV_IS_SLOW(bp
) && bp
->link_vars
.link_up
) {
2224 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2225 bnx2x_link_report(bp
);
2230 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2234 static void bnx2x_link_set(struct bnx2x
*bp
)
2236 if (!BP_NOMCP(bp
)) {
2237 bnx2x_acquire_phy_lock(bp
);
2238 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
2239 bnx2x_release_phy_lock(bp
);
2241 bnx2x_calc_fc_adv(bp
);
2243 BNX2X_ERR("Bootcode is missing - can not set link\n");
2246 static void bnx2x__link_reset(struct bnx2x
*bp
)
2248 if (!BP_NOMCP(bp
)) {
2249 bnx2x_acquire_phy_lock(bp
);
2250 bnx2x_link_reset(&bp
->link_params
, &bp
->link_vars
, 1);
2251 bnx2x_release_phy_lock(bp
);
2253 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2256 static u8
bnx2x_link_test(struct bnx2x
*bp
)
2260 bnx2x_acquire_phy_lock(bp
);
2261 rc
= bnx2x_test_link(&bp
->link_params
, &bp
->link_vars
);
2262 bnx2x_release_phy_lock(bp
);
2267 static void bnx2x_init_port_minmax(struct bnx2x
*bp
)
2269 u32 r_param
= bp
->link_vars
.line_speed
/ 8;
2270 u32 fair_periodic_timeout_usec
;
2273 memset(&(bp
->cmng
.rs_vars
), 0,
2274 sizeof(struct rate_shaping_vars_per_port
));
2275 memset(&(bp
->cmng
.fair_vars
), 0, sizeof(struct fairness_vars_per_port
));
2277 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2278 bp
->cmng
.rs_vars
.rs_periodic_timeout
= RS_PERIODIC_TIMEOUT_USEC
/ 4;
2280 /* this is the threshold below which no timer arming will occur
2281 1.25 coefficient is for the threshold to be a little bigger
2282 than the real time, to compensate for timer in-accuracy */
2283 bp
->cmng
.rs_vars
.rs_threshold
=
2284 (RS_PERIODIC_TIMEOUT_USEC
* r_param
* 5) / 4;
2286 /* resolution of fairness timer */
2287 fair_periodic_timeout_usec
= QM_ARB_BYTES
/ r_param
;
2288 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2289 t_fair
= T_FAIR_COEF
/ bp
->link_vars
.line_speed
;
2291 /* this is the threshold below which we won't arm the timer anymore */
2292 bp
->cmng
.fair_vars
.fair_threshold
= QM_ARB_BYTES
;
2294 /* we multiply by 1e3/8 to get bytes/msec.
2295 We don't want the credits to pass a credit
2296 of the t_fair*FAIR_MEM (algorithm resolution) */
2297 bp
->cmng
.fair_vars
.upper_bound
= r_param
* t_fair
* FAIR_MEM
;
2298 /* since each tick is 4 usec */
2299 bp
->cmng
.fair_vars
.fairness_timeout
= fair_periodic_timeout_usec
/ 4;
2302 /* Calculates the sum of vn_min_rates.
2303 It's needed for further normalizing of the min_rates.
2305 sum of vn_min_rates.
2307 0 - if all the min_rates are 0.
2308 In the later case fainess algorithm should be deactivated.
2309 If not all min_rates are zero then those that are zeroes will be set to 1.
2311 static void bnx2x_calc_vn_weight_sum(struct bnx2x
*bp
)
2314 int port
= BP_PORT(bp
);
2317 bp
->vn_weight_sum
= 0;
2318 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
2319 int func
= 2*vn
+ port
;
2320 u32 vn_cfg
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
2321 u32 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
2322 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
2324 /* Skip hidden vns */
2325 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
)
2328 /* If min rate is zero - set it to 1 */
2330 vn_min_rate
= DEF_MIN_RATE
;
2334 bp
->vn_weight_sum
+= vn_min_rate
;
2337 /* ... only if all min rates are zeros - disable fairness */
2339 bp
->cmng
.flags
.cmng_enables
&=
2340 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN
;
2341 DP(NETIF_MSG_IFUP
, "All MIN values are zeroes"
2342 " fairness will be disabled\n");
2344 bp
->cmng
.flags
.cmng_enables
|=
2345 CMNG_FLAGS_PER_PORT_FAIRNESS_VN
;
2348 static void bnx2x_init_vn_minmax(struct bnx2x
*bp
, int func
)
2350 struct rate_shaping_vars_per_vn m_rs_vn
;
2351 struct fairness_vars_per_vn m_fair_vn
;
2352 u32 vn_cfg
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
2353 u16 vn_min_rate
, vn_max_rate
;
2356 /* If function is hidden - set min and max to zeroes */
2357 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
) {
2362 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
2363 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
2364 /* If min rate is zero - set it to 1 */
2366 vn_min_rate
= DEF_MIN_RATE
;
2367 vn_max_rate
= ((vn_cfg
& FUNC_MF_CFG_MAX_BW_MASK
) >>
2368 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
2371 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
2372 func
, vn_min_rate
, vn_max_rate
, bp
->vn_weight_sum
);
2374 memset(&m_rs_vn
, 0, sizeof(struct rate_shaping_vars_per_vn
));
2375 memset(&m_fair_vn
, 0, sizeof(struct fairness_vars_per_vn
));
2377 /* global vn counter - maximal Mbps for this vn */
2378 m_rs_vn
.vn_counter
.rate
= vn_max_rate
;
2380 /* quota - number of bytes transmitted in this period */
2381 m_rs_vn
.vn_counter
.quota
=
2382 (vn_max_rate
* RS_PERIODIC_TIMEOUT_USEC
) / 8;
2384 if (bp
->vn_weight_sum
) {
2385 /* credit for each period of the fairness algorithm:
2386 number of bytes in T_FAIR (the vn share the port rate).
2387 vn_weight_sum should not be larger than 10000, thus
2388 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2390 m_fair_vn
.vn_credit_delta
=
2391 max((u32
)(vn_min_rate
* (T_FAIR_COEF
/
2392 (8 * bp
->vn_weight_sum
))),
2393 (u32
)(bp
->cmng
.fair_vars
.fair_threshold
* 2));
2394 DP(NETIF_MSG_IFUP
, "m_fair_vn.vn_credit_delta=%d\n",
2395 m_fair_vn
.vn_credit_delta
);
2398 /* Store it to internal memory */
2399 for (i
= 0; i
< sizeof(struct rate_shaping_vars_per_vn
)/4; i
++)
2400 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2401 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func
) + i
* 4,
2402 ((u32
*)(&m_rs_vn
))[i
]);
2404 for (i
= 0; i
< sizeof(struct fairness_vars_per_vn
)/4; i
++)
2405 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2406 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func
) + i
* 4,
2407 ((u32
*)(&m_fair_vn
))[i
]);
2411 /* This function is called upon link interrupt */
2412 static void bnx2x_link_attn(struct bnx2x
*bp
)
2414 /* Make sure that we are synced with the current statistics */
2415 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2417 bnx2x_link_update(&bp
->link_params
, &bp
->link_vars
);
2419 if (bp
->link_vars
.link_up
) {
2421 /* dropless flow control */
2422 if (CHIP_IS_E1H(bp
) && bp
->dropless_fc
) {
2423 int port
= BP_PORT(bp
);
2424 u32 pause_enabled
= 0;
2426 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
)
2429 REG_WR(bp
, BAR_USTRORM_INTMEM
+
2430 USTORM_ETH_PAUSE_ENABLED_OFFSET(port
),
2434 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
2435 struct host_port_stats
*pstats
;
2437 pstats
= bnx2x_sp(bp
, port_stats
);
2438 /* reset old bmac stats */
2439 memset(&(pstats
->mac_stx
[0]), 0,
2440 sizeof(struct mac_stx
));
2442 if (bp
->state
== BNX2X_STATE_OPEN
)
2443 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2446 /* indicate link status */
2447 bnx2x_link_report(bp
);
2450 int port
= BP_PORT(bp
);
2454 /* Set the attention towards other drivers on the same port */
2455 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
2456 if (vn
== BP_E1HVN(bp
))
2459 func
= ((vn
<< 1) | port
);
2460 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_0
+
2461 (LINK_SYNC_ATTENTION_BIT_FUNC_0
+ func
)*4, 1);
2464 if (bp
->link_vars
.link_up
) {
2467 /* Init rate shaping and fairness contexts */
2468 bnx2x_init_port_minmax(bp
);
2470 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
2471 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
);
2473 /* Store it to internal memory */
2475 i
< sizeof(struct cmng_struct_per_port
) / 4; i
++)
2476 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2477 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
) + i
*4,
2478 ((u32
*)(&bp
->cmng
))[i
]);
2483 static void bnx2x__link_status_update(struct bnx2x
*bp
)
2485 if ((bp
->state
!= BNX2X_STATE_OPEN
) || (bp
->flags
& MF_FUNC_DIS
))
2488 bnx2x_link_status_update(&bp
->link_params
, &bp
->link_vars
);
2490 if (bp
->link_vars
.link_up
)
2491 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2493 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2495 bnx2x_calc_vn_weight_sum(bp
);
2497 /* indicate link status */
2498 bnx2x_link_report(bp
);
2501 static void bnx2x_pmf_update(struct bnx2x
*bp
)
2503 int port
= BP_PORT(bp
);
2507 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
2509 /* enable nig attention */
2510 val
= (0xff0f | (1 << (BP_E1HVN(bp
) + 4)));
2511 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
2512 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
2514 bnx2x_stats_handle(bp
, STATS_EVENT_PMF
);
2522 * General service functions
2525 /* send the MCP a request, block until there is a reply */
2526 u32
bnx2x_fw_command(struct bnx2x
*bp
, u32 command
)
2528 int func
= BP_FUNC(bp
);
2529 u32 seq
= ++bp
->fw_seq
;
2532 u8 delay
= CHIP_REV_IS_SLOW(bp
) ? 100 : 10;
2534 mutex_lock(&bp
->fw_mb_mutex
);
2535 SHMEM_WR(bp
, func_mb
[func
].drv_mb_header
, (command
| seq
));
2536 DP(BNX2X_MSG_MCP
, "wrote command (%x) to FW MB\n", (command
| seq
));
2539 /* let the FW do it's magic ... */
2542 rc
= SHMEM_RD(bp
, func_mb
[func
].fw_mb_header
);
2544 /* Give the FW up to 5 second (500*10ms) */
2545 } while ((seq
!= (rc
& FW_MSG_SEQ_NUMBER_MASK
)) && (cnt
++ < 500));
2547 DP(BNX2X_MSG_MCP
, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2548 cnt
*delay
, rc
, seq
);
2550 /* is this a reply to our command? */
2551 if (seq
== (rc
& FW_MSG_SEQ_NUMBER_MASK
))
2552 rc
&= FW_MSG_CODE_MASK
;
2555 BNX2X_ERR("FW failed to respond!\n");
2559 mutex_unlock(&bp
->fw_mb_mutex
);
2564 static void bnx2x_set_storm_rx_mode(struct bnx2x
*bp
);
2565 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x
*bp
, int set
);
2566 static void bnx2x_set_rx_mode(struct net_device
*dev
);
2568 static void bnx2x_e1h_disable(struct bnx2x
*bp
)
2570 int port
= BP_PORT(bp
);
2572 netif_tx_disable(bp
->dev
);
2574 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 0);
2576 netif_carrier_off(bp
->dev
);
2579 static void bnx2x_e1h_enable(struct bnx2x
*bp
)
2581 int port
= BP_PORT(bp
);
2583 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 1);
2585 /* Tx queue should be only reenabled */
2586 netif_tx_wake_all_queues(bp
->dev
);
2589 * Should not call netif_carrier_on since it will be called if the link
2590 * is up when checking for link state
2594 static void bnx2x_update_min_max(struct bnx2x
*bp
)
2596 int port
= BP_PORT(bp
);
2599 /* Init rate shaping and fairness contexts */
2600 bnx2x_init_port_minmax(bp
);
2602 bnx2x_calc_vn_weight_sum(bp
);
2604 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
2605 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
);
2610 /* Set the attention towards other drivers on the same port */
2611 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
2612 if (vn
== BP_E1HVN(bp
))
2615 func
= ((vn
<< 1) | port
);
2616 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_0
+
2617 (LINK_SYNC_ATTENTION_BIT_FUNC_0
+ func
)*4, 1);
2620 /* Store it to internal memory */
2621 for (i
= 0; i
< sizeof(struct cmng_struct_per_port
) / 4; i
++)
2622 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2623 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
) + i
*4,
2624 ((u32
*)(&bp
->cmng
))[i
]);
2628 static void bnx2x_dcc_event(struct bnx2x
*bp
, u32 dcc_event
)
2630 DP(BNX2X_MSG_MCP
, "dcc_event 0x%x\n", dcc_event
);
2632 if (dcc_event
& DRV_STATUS_DCC_DISABLE_ENABLE_PF
) {
2635 * This is the only place besides the function initialization
2636 * where the bp->flags can change so it is done without any
2639 if (bp
->mf_config
& FUNC_MF_CFG_FUNC_DISABLED
) {
2640 DP(NETIF_MSG_IFDOWN
, "mf_cfg function disabled\n");
2641 bp
->flags
|= MF_FUNC_DIS
;
2643 bnx2x_e1h_disable(bp
);
2645 DP(NETIF_MSG_IFUP
, "mf_cfg function enabled\n");
2646 bp
->flags
&= ~MF_FUNC_DIS
;
2648 bnx2x_e1h_enable(bp
);
2650 dcc_event
&= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF
;
2652 if (dcc_event
& DRV_STATUS_DCC_BANDWIDTH_ALLOCATION
) {
2654 bnx2x_update_min_max(bp
);
2655 dcc_event
&= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION
;
2658 /* Report results to MCP */
2660 bnx2x_fw_command(bp
, DRV_MSG_CODE_DCC_FAILURE
);
2662 bnx2x_fw_command(bp
, DRV_MSG_CODE_DCC_OK
);
2665 /* must be called under the spq lock */
2666 static inline struct eth_spe
*bnx2x_sp_get_next(struct bnx2x
*bp
)
2668 struct eth_spe
*next_spe
= bp
->spq_prod_bd
;
2670 if (bp
->spq_prod_bd
== bp
->spq_last_bd
) {
2671 bp
->spq_prod_bd
= bp
->spq
;
2672 bp
->spq_prod_idx
= 0;
2673 DP(NETIF_MSG_TIMER
, "end of spq\n");
2681 /* must be called under the spq lock */
2682 static inline void bnx2x_sp_prod_update(struct bnx2x
*bp
)
2684 int func
= BP_FUNC(bp
);
2686 /* Make sure that BD data is updated before writing the producer */
2689 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_SPQ_PROD_OFFSET(func
),
2694 /* the slow path queue is odd since completions arrive on the fastpath ring */
2695 static int bnx2x_sp_post(struct bnx2x
*bp
, int command
, int cid
,
2696 u32 data_hi
, u32 data_lo
, int common
)
2698 struct eth_spe
*spe
;
2700 DP(BNX2X_MSG_SP
/*NETIF_MSG_TIMER*/,
2701 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2702 (u32
)U64_HI(bp
->spq_mapping
), (u32
)(U64_LO(bp
->spq_mapping
) +
2703 (void *)bp
->spq_prod_bd
- (void *)bp
->spq
), command
,
2704 HW_CID(bp
, cid
), data_hi
, data_lo
, bp
->spq_left
);
2706 #ifdef BNX2X_STOP_ON_ERROR
2707 if (unlikely(bp
->panic
))
2711 spin_lock_bh(&bp
->spq_lock
);
2713 if (!bp
->spq_left
) {
2714 BNX2X_ERR("BUG! SPQ ring full!\n");
2715 spin_unlock_bh(&bp
->spq_lock
);
2720 spe
= bnx2x_sp_get_next(bp
);
2722 /* CID needs port number to be encoded int it */
2723 spe
->hdr
.conn_and_cmd_data
=
2724 cpu_to_le32(((command
<< SPE_HDR_CMD_ID_SHIFT
) |
2726 spe
->hdr
.type
= cpu_to_le16(ETH_CONNECTION_TYPE
);
2729 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT
));
2731 spe
->data
.mac_config_addr
.hi
= cpu_to_le32(data_hi
);
2732 spe
->data
.mac_config_addr
.lo
= cpu_to_le32(data_lo
);
2736 bnx2x_sp_prod_update(bp
);
2737 spin_unlock_bh(&bp
->spq_lock
);
2741 /* acquire split MCP access lock register */
2742 static int bnx2x_acquire_alr(struct bnx2x
*bp
)
2749 for (j
= 0; j
< i
*10; j
++) {
2751 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
2752 val
= REG_RD(bp
, GRCBASE_MCP
+ 0x9c);
2753 if (val
& (1L << 31))
2758 if (!(val
& (1L << 31))) {
2759 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2766 /* release split MCP access lock register */
2767 static void bnx2x_release_alr(struct bnx2x
*bp
)
2771 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
2774 static inline u16
bnx2x_update_dsb_idx(struct bnx2x
*bp
)
2776 struct host_def_status_block
*def_sb
= bp
->def_status_blk
;
2779 barrier(); /* status block is written to by the chip */
2780 if (bp
->def_att_idx
!= def_sb
->atten_status_block
.attn_bits_index
) {
2781 bp
->def_att_idx
= def_sb
->atten_status_block
.attn_bits_index
;
2784 if (bp
->def_c_idx
!= def_sb
->c_def_status_block
.status_block_index
) {
2785 bp
->def_c_idx
= def_sb
->c_def_status_block
.status_block_index
;
2788 if (bp
->def_u_idx
!= def_sb
->u_def_status_block
.status_block_index
) {
2789 bp
->def_u_idx
= def_sb
->u_def_status_block
.status_block_index
;
2792 if (bp
->def_x_idx
!= def_sb
->x_def_status_block
.status_block_index
) {
2793 bp
->def_x_idx
= def_sb
->x_def_status_block
.status_block_index
;
2796 if (bp
->def_t_idx
!= def_sb
->t_def_status_block
.status_block_index
) {
2797 bp
->def_t_idx
= def_sb
->t_def_status_block
.status_block_index
;
2804 * slow path service functions
2807 static void bnx2x_attn_int_asserted(struct bnx2x
*bp
, u32 asserted
)
2809 int port
= BP_PORT(bp
);
2810 u32 hc_addr
= (HC_REG_COMMAND_REG
+ port
*32 +
2811 COMMAND_REG_ATTN_BITS_SET
);
2812 u32 aeu_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
2813 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
2814 u32 nig_int_mask_addr
= port
? NIG_REG_MASK_INTERRUPT_PORT1
:
2815 NIG_REG_MASK_INTERRUPT_PORT0
;
2819 if (bp
->attn_state
& asserted
)
2820 BNX2X_ERR("IGU ERROR\n");
2822 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2823 aeu_mask
= REG_RD(bp
, aeu_addr
);
2825 DP(NETIF_MSG_HW
, "aeu_mask %x newly asserted %x\n",
2826 aeu_mask
, asserted
);
2827 aeu_mask
&= ~(asserted
& 0xff);
2828 DP(NETIF_MSG_HW
, "new mask %x\n", aeu_mask
);
2830 REG_WR(bp
, aeu_addr
, aeu_mask
);
2831 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2833 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
2834 bp
->attn_state
|= asserted
;
2835 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
2837 if (asserted
& ATTN_HARD_WIRED_MASK
) {
2838 if (asserted
& ATTN_NIG_FOR_FUNC
) {
2840 bnx2x_acquire_phy_lock(bp
);
2842 /* save nig interrupt mask */
2843 nig_mask
= REG_RD(bp
, nig_int_mask_addr
);
2844 REG_WR(bp
, nig_int_mask_addr
, 0);
2846 bnx2x_link_attn(bp
);
2848 /* handle unicore attn? */
2850 if (asserted
& ATTN_SW_TIMER_4_FUNC
)
2851 DP(NETIF_MSG_HW
, "ATTN_SW_TIMER_4_FUNC!\n");
2853 if (asserted
& GPIO_2_FUNC
)
2854 DP(NETIF_MSG_HW
, "GPIO_2_FUNC!\n");
2856 if (asserted
& GPIO_3_FUNC
)
2857 DP(NETIF_MSG_HW
, "GPIO_3_FUNC!\n");
2859 if (asserted
& GPIO_4_FUNC
)
2860 DP(NETIF_MSG_HW
, "GPIO_4_FUNC!\n");
2863 if (asserted
& ATTN_GENERAL_ATTN_1
) {
2864 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_1!\n");
2865 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_1
, 0x0);
2867 if (asserted
& ATTN_GENERAL_ATTN_2
) {
2868 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_2!\n");
2869 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_2
, 0x0);
2871 if (asserted
& ATTN_GENERAL_ATTN_3
) {
2872 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_3!\n");
2873 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_3
, 0x0);
2876 if (asserted
& ATTN_GENERAL_ATTN_4
) {
2877 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_4!\n");
2878 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_4
, 0x0);
2880 if (asserted
& ATTN_GENERAL_ATTN_5
) {
2881 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_5!\n");
2882 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_5
, 0x0);
2884 if (asserted
& ATTN_GENERAL_ATTN_6
) {
2885 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_6!\n");
2886 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_6
, 0x0);
2890 } /* if hardwired */
2892 DP(NETIF_MSG_HW
, "about to mask 0x%08x at HC addr 0x%x\n",
2894 REG_WR(bp
, hc_addr
, asserted
);
2896 /* now set back the mask */
2897 if (asserted
& ATTN_NIG_FOR_FUNC
) {
2898 REG_WR(bp
, nig_int_mask_addr
, nig_mask
);
2899 bnx2x_release_phy_lock(bp
);
2903 static inline void bnx2x_fan_failure(struct bnx2x
*bp
)
2905 int port
= BP_PORT(bp
);
2907 /* mark the failure */
2908 bp
->link_params
.ext_phy_config
&= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
2909 bp
->link_params
.ext_phy_config
|= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
;
2910 SHMEM_WR(bp
, dev_info
.port_hw_config
[port
].external_phy_config
,
2911 bp
->link_params
.ext_phy_config
);
2913 /* log the failure */
2914 netdev_err(bp
->dev
, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
2915 "Please contact Dell Support for assistance.\n");
2918 static inline void bnx2x_attn_int_deasserted0(struct bnx2x
*bp
, u32 attn
)
2920 int port
= BP_PORT(bp
);
2922 u32 val
, swap_val
, swap_override
;
2924 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
2925 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
2927 if (attn
& AEU_INPUTS_ATTN_BITS_SPIO5
) {
2929 val
= REG_RD(bp
, reg_offset
);
2930 val
&= ~AEU_INPUTS_ATTN_BITS_SPIO5
;
2931 REG_WR(bp
, reg_offset
, val
);
2933 BNX2X_ERR("SPIO5 hw attention\n");
2935 /* Fan failure attention */
2936 switch (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
)) {
2937 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
2938 /* Low power mode is controlled by GPIO 2 */
2939 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_2
,
2940 MISC_REGISTERS_GPIO_OUTPUT_LOW
, port
);
2941 /* The PHY reset is controlled by GPIO 1 */
2942 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_1
,
2943 MISC_REGISTERS_GPIO_OUTPUT_LOW
, port
);
2946 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
:
2947 /* The PHY reset is controlled by GPIO 1 */
2948 /* fake the port number to cancel the swap done in
2950 swap_val
= REG_RD(bp
, NIG_REG_PORT_SWAP
);
2951 swap_override
= REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
);
2952 port
= (swap_val
&& swap_override
) ^ 1;
2953 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_1
,
2954 MISC_REGISTERS_GPIO_OUTPUT_LOW
, port
);
2960 bnx2x_fan_failure(bp
);
2963 if (attn
& (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0
|
2964 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1
)) {
2965 bnx2x_acquire_phy_lock(bp
);
2966 bnx2x_handle_module_detect_int(&bp
->link_params
);
2967 bnx2x_release_phy_lock(bp
);
2970 if (attn
& HW_INTERRUT_ASSERT_SET_0
) {
2972 val
= REG_RD(bp
, reg_offset
);
2973 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_0
);
2974 REG_WR(bp
, reg_offset
, val
);
2976 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2977 (u32
)(attn
& HW_INTERRUT_ASSERT_SET_0
));
2982 static inline void bnx2x_attn_int_deasserted1(struct bnx2x
*bp
, u32 attn
)
2986 if (attn
& AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT
) {
2988 val
= REG_RD(bp
, DORQ_REG_DORQ_INT_STS_CLR
);
2989 BNX2X_ERR("DB hw attention 0x%x\n", val
);
2990 /* DORQ discard attention */
2992 BNX2X_ERR("FATAL error from DORQ\n");
2995 if (attn
& HW_INTERRUT_ASSERT_SET_1
) {
2997 int port
= BP_PORT(bp
);
3000 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1
:
3001 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1
);
3003 val
= REG_RD(bp
, reg_offset
);
3004 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_1
);
3005 REG_WR(bp
, reg_offset
, val
);
3007 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3008 (u32
)(attn
& HW_INTERRUT_ASSERT_SET_1
));
3013 static inline void bnx2x_attn_int_deasserted2(struct bnx2x
*bp
, u32 attn
)
3017 if (attn
& AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT
) {
3019 val
= REG_RD(bp
, CFC_REG_CFC_INT_STS_CLR
);
3020 BNX2X_ERR("CFC hw attention 0x%x\n", val
);
3021 /* CFC error attention */
3023 BNX2X_ERR("FATAL error from CFC\n");
3026 if (attn
& AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT
) {
3028 val
= REG_RD(bp
, PXP_REG_PXP_INT_STS_CLR_0
);
3029 BNX2X_ERR("PXP hw attention 0x%x\n", val
);
3030 /* RQ_USDMDP_FIFO_OVERFLOW */
3032 BNX2X_ERR("FATAL error from PXP\n");
3035 if (attn
& HW_INTERRUT_ASSERT_SET_2
) {
3037 int port
= BP_PORT(bp
);
3040 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2
:
3041 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2
);
3043 val
= REG_RD(bp
, reg_offset
);
3044 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_2
);
3045 REG_WR(bp
, reg_offset
, val
);
3047 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3048 (u32
)(attn
& HW_INTERRUT_ASSERT_SET_2
));
3053 static inline void bnx2x_attn_int_deasserted3(struct bnx2x
*bp
, u32 attn
)
3057 if (attn
& EVEREST_GEN_ATTN_IN_USE_MASK
) {
3059 if (attn
& BNX2X_PMF_LINK_ASSERT
) {
3060 int func
= BP_FUNC(bp
);
3062 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
3063 bp
->mf_config
= SHMEM_RD(bp
,
3064 mf_cfg
.func_mf_config
[func
].config
);
3065 val
= SHMEM_RD(bp
, func_mb
[func
].drv_status
);
3066 if (val
& DRV_STATUS_DCC_EVENT_MASK
)
3068 (val
& DRV_STATUS_DCC_EVENT_MASK
));
3069 bnx2x__link_status_update(bp
);
3070 if ((bp
->port
.pmf
== 0) && (val
& DRV_STATUS_PMF
))
3071 bnx2x_pmf_update(bp
);
3073 } else if (attn
& BNX2X_MC_ASSERT_BITS
) {
3075 BNX2X_ERR("MC assert!\n");
3076 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_10
, 0);
3077 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_9
, 0);
3078 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_8
, 0);
3079 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_7
, 0);
3082 } else if (attn
& BNX2X_MCP_ASSERT
) {
3084 BNX2X_ERR("MCP assert!\n");
3085 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_11
, 0);
3089 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn
);
3092 if (attn
& EVEREST_LATCHED_ATTN_IN_USE_MASK
) {
3093 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn
);
3094 if (attn
& BNX2X_GRC_TIMEOUT
) {
3095 val
= CHIP_IS_E1H(bp
) ?
3096 REG_RD(bp
, MISC_REG_GRC_TIMEOUT_ATTN
) : 0;
3097 BNX2X_ERR("GRC time-out 0x%08x\n", val
);
3099 if (attn
& BNX2X_GRC_RSV
) {
3100 val
= CHIP_IS_E1H(bp
) ?
3101 REG_RD(bp
, MISC_REG_GRC_RSV_ATTN
) : 0;
3102 BNX2X_ERR("GRC reserved 0x%08x\n", val
);
3104 REG_WR(bp
, MISC_REG_AEU_CLR_LATCH_SIGNAL
, 0x7ff);
3108 static void bnx2x_attn_int_deasserted(struct bnx2x
*bp
, u32 deasserted
)
3110 struct attn_route attn
;
3111 struct attn_route group_mask
;
3112 int port
= BP_PORT(bp
);
3118 /* need to take HW lock because MCP or other port might also
3119 try to handle this event */
3120 bnx2x_acquire_alr(bp
);
3122 attn
.sig
[0] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ port
*4);
3123 attn
.sig
[1] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0
+ port
*4);
3124 attn
.sig
[2] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0
+ port
*4);
3125 attn
.sig
[3] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0
+ port
*4);
3126 DP(NETIF_MSG_HW
, "attn: %08x %08x %08x %08x\n",
3127 attn
.sig
[0], attn
.sig
[1], attn
.sig
[2], attn
.sig
[3]);
3129 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
3130 if (deasserted
& (1 << index
)) {
3131 group_mask
= bp
->attn_group
[index
];
3133 DP(NETIF_MSG_HW
, "group[%d]: %08x %08x %08x %08x\n",
3134 index
, group_mask
.sig
[0], group_mask
.sig
[1],
3135 group_mask
.sig
[2], group_mask
.sig
[3]);
3137 bnx2x_attn_int_deasserted3(bp
,
3138 attn
.sig
[3] & group_mask
.sig
[3]);
3139 bnx2x_attn_int_deasserted1(bp
,
3140 attn
.sig
[1] & group_mask
.sig
[1]);
3141 bnx2x_attn_int_deasserted2(bp
,
3142 attn
.sig
[2] & group_mask
.sig
[2]);
3143 bnx2x_attn_int_deasserted0(bp
,
3144 attn
.sig
[0] & group_mask
.sig
[0]);
3146 if ((attn
.sig
[0] & group_mask
.sig
[0] &
3147 HW_PRTY_ASSERT_SET_0
) ||
3148 (attn
.sig
[1] & group_mask
.sig
[1] &
3149 HW_PRTY_ASSERT_SET_1
) ||
3150 (attn
.sig
[2] & group_mask
.sig
[2] &
3151 HW_PRTY_ASSERT_SET_2
))
3152 BNX2X_ERR("FATAL HW block parity attention\n");
3156 bnx2x_release_alr(bp
);
3158 reg_addr
= (HC_REG_COMMAND_REG
+ port
*32 + COMMAND_REG_ATTN_BITS_CLR
);
3161 DP(NETIF_MSG_HW
, "about to mask 0x%08x at HC addr 0x%x\n",
3163 REG_WR(bp
, reg_addr
, val
);
3165 if (~bp
->attn_state
& deasserted
)
3166 BNX2X_ERR("IGU ERROR\n");
3168 reg_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
3169 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
3171 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
3172 aeu_mask
= REG_RD(bp
, reg_addr
);
3174 DP(NETIF_MSG_HW
, "aeu_mask %x newly deasserted %x\n",
3175 aeu_mask
, deasserted
);
3176 aeu_mask
|= (deasserted
& 0xff);
3177 DP(NETIF_MSG_HW
, "new mask %x\n", aeu_mask
);
3179 REG_WR(bp
, reg_addr
, aeu_mask
);
3180 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
3182 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
3183 bp
->attn_state
&= ~deasserted
;
3184 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
3187 static void bnx2x_attn_int(struct bnx2x
*bp
)
3189 /* read local copy of bits */
3190 u32 attn_bits
= le32_to_cpu(bp
->def_status_blk
->atten_status_block
.
3192 u32 attn_ack
= le32_to_cpu(bp
->def_status_blk
->atten_status_block
.
3194 u32 attn_state
= bp
->attn_state
;
3196 /* look for changed bits */
3197 u32 asserted
= attn_bits
& ~attn_ack
& ~attn_state
;
3198 u32 deasserted
= ~attn_bits
& attn_ack
& attn_state
;
3201 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3202 attn_bits
, attn_ack
, asserted
, deasserted
);
3204 if (~(attn_bits
^ attn_ack
) & (attn_bits
^ attn_state
))
3205 BNX2X_ERR("BAD attention state\n");
3207 /* handle bits that were raised */
3209 bnx2x_attn_int_asserted(bp
, asserted
);
3212 bnx2x_attn_int_deasserted(bp
, deasserted
);
3215 static void bnx2x_sp_task(struct work_struct
*work
)
3217 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, sp_task
.work
);
3221 /* Return here if interrupt is disabled */
3222 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
3223 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
3227 status
= bnx2x_update_dsb_idx(bp
);
3228 /* if (status == 0) */
3229 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3231 DP(NETIF_MSG_INTR
, "got a slowpath interrupt (updated %x)\n", status
);
3237 bnx2x_ack_sb(bp
, DEF_SB_ID
, ATTENTION_ID
, le16_to_cpu(bp
->def_att_idx
),
3239 bnx2x_ack_sb(bp
, DEF_SB_ID
, USTORM_ID
, le16_to_cpu(bp
->def_u_idx
),
3241 bnx2x_ack_sb(bp
, DEF_SB_ID
, CSTORM_ID
, le16_to_cpu(bp
->def_c_idx
),
3243 bnx2x_ack_sb(bp
, DEF_SB_ID
, XSTORM_ID
, le16_to_cpu(bp
->def_x_idx
),
3245 bnx2x_ack_sb(bp
, DEF_SB_ID
, TSTORM_ID
, le16_to_cpu(bp
->def_t_idx
),
3250 static irqreturn_t
bnx2x_msix_sp_int(int irq
, void *dev_instance
)
3252 struct net_device
*dev
= dev_instance
;
3253 struct bnx2x
*bp
= netdev_priv(dev
);
3255 /* Return here if interrupt is disabled */
3256 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
3257 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
3261 bnx2x_ack_sb(bp
, DEF_SB_ID
, TSTORM_ID
, 0, IGU_INT_DISABLE
, 0);
3263 #ifdef BNX2X_STOP_ON_ERROR
3264 if (unlikely(bp
->panic
))
3270 struct cnic_ops
*c_ops
;
3273 c_ops
= rcu_dereference(bp
->cnic_ops
);
3275 c_ops
->cnic_handler(bp
->cnic_data
, NULL
);
3279 queue_delayed_work(bnx2x_wq
, &bp
->sp_task
, 0);
3284 /* end of slow path */
3288 /****************************************************************************
3290 ****************************************************************************/
3292 /* sum[hi:lo] += add[hi:lo] */
3293 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3296 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3299 /* difference = minuend - subtrahend */
3300 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3302 if (m_lo < s_lo) { \
3304 d_hi = m_hi - s_hi; \
3306 /* we can 'loan' 1 */ \
3308 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3310 /* m_hi <= s_hi */ \
3315 /* m_lo >= s_lo */ \
3316 if (m_hi < s_hi) { \
3320 /* m_hi >= s_hi */ \
3321 d_hi = m_hi - s_hi; \
3322 d_lo = m_lo - s_lo; \
3327 #define UPDATE_STAT64(s, t) \
3329 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3330 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3331 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3332 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3333 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3334 pstats->mac_stx[1].t##_lo, diff.lo); \
3337 #define UPDATE_STAT64_NIG(s, t) \
3339 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3340 diff.lo, new->s##_lo, old->s##_lo); \
3341 ADD_64(estats->t##_hi, diff.hi, \
3342 estats->t##_lo, diff.lo); \
3345 /* sum[hi:lo] += add */
3346 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3349 s_hi += (s_lo < a) ? 1 : 0; \
3352 #define UPDATE_EXTEND_STAT(s) \
3354 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3355 pstats->mac_stx[1].s##_lo, \
3359 #define UPDATE_EXTEND_TSTAT(s, t) \
3361 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3362 old_tclient->s = tclient->s; \
3363 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3366 #define UPDATE_EXTEND_USTAT(s, t) \
3368 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3369 old_uclient->s = uclient->s; \
3370 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3373 #define UPDATE_EXTEND_XSTAT(s, t) \
3375 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3376 old_xclient->s = xclient->s; \
3377 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3380 /* minuend -= subtrahend */
3381 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3383 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3386 /* minuend[hi:lo] -= subtrahend */
3387 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3389 SUB_64(m_hi, 0, m_lo, s); \
3392 #define SUB_EXTEND_USTAT(s, t) \
3394 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3395 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3399 * General service functions
3402 static inline long bnx2x_hilo(u32
*hiref
)
3404 u32 lo
= *(hiref
+ 1);
3405 #if (BITS_PER_LONG == 64)
3408 return HILO_U64(hi
, lo
);
3415 * Init service functions
3418 static void bnx2x_storm_stats_post(struct bnx2x
*bp
)
3420 if (!bp
->stats_pending
) {
3421 struct eth_query_ramrod_data ramrod_data
= {0};
3424 ramrod_data
.drv_counter
= bp
->stats_counter
++;
3425 ramrod_data
.collect_port
= bp
->port
.pmf
? 1 : 0;
3426 for_each_queue(bp
, i
)
3427 ramrod_data
.ctr_id_vector
|= (1 << bp
->fp
[i
].cl_id
);
3429 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_STAT_QUERY
, 0,
3430 ((u32
*)&ramrod_data
)[1],
3431 ((u32
*)&ramrod_data
)[0], 0);
3433 /* stats ramrod has it's own slot on the spq */
3435 bp
->stats_pending
= 1;
3440 static void bnx2x_hw_stats_post(struct bnx2x
*bp
)
3442 struct dmae_command
*dmae
= &bp
->stats_dmae
;
3443 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3445 *stats_comp
= DMAE_COMP_VAL
;
3446 if (CHIP_REV_IS_SLOW(bp
))
3450 if (bp
->executer_idx
) {
3451 int loader_idx
= PMF_DMAE_C(bp
);
3453 memset(dmae
, 0, sizeof(struct dmae_command
));
3455 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3456 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3457 DMAE_CMD_DST_RESET
|
3459 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3461 DMAE_CMD_ENDIANITY_DW_SWAP
|
3463 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
:
3465 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3466 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, dmae
[0]));
3467 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, dmae
[0]));
3468 dmae
->dst_addr_lo
= (DMAE_REG_CMD_MEM
+
3469 sizeof(struct dmae_command
) *
3470 (loader_idx
+ 1)) >> 2;
3471 dmae
->dst_addr_hi
= 0;
3472 dmae
->len
= sizeof(struct dmae_command
) >> 2;
3475 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
+ 1] >> 2;
3476 dmae
->comp_addr_hi
= 0;
3480 bnx2x_post_dmae(bp
, dmae
, loader_idx
);
3482 } else if (bp
->func_stx
) {
3484 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
3488 static int bnx2x_stats_comp(struct bnx2x
*bp
)
3490 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3494 while (*stats_comp
!= DMAE_COMP_VAL
) {
3496 BNX2X_ERR("timeout waiting for stats finished\n");
3506 * Statistics service functions
3509 static void bnx2x_stats_pmf_update(struct bnx2x
*bp
)
3511 struct dmae_command
*dmae
;
3513 int loader_idx
= PMF_DMAE_C(bp
);
3514 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3517 if (!IS_E1HMF(bp
) || !bp
->port
.pmf
|| !bp
->port
.port_stx
) {
3518 BNX2X_ERR("BUG!\n");
3522 bp
->executer_idx
= 0;
3524 opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3526 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3528 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3530 DMAE_CMD_ENDIANITY_DW_SWAP
|
3532 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3533 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3535 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3536 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_GRC
);
3537 dmae
->src_addr_lo
= bp
->port
.port_stx
>> 2;
3538 dmae
->src_addr_hi
= 0;
3539 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
3540 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
3541 dmae
->len
= DMAE_LEN32_RD_MAX
;
3542 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3543 dmae
->comp_addr_hi
= 0;
3546 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3547 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
3548 dmae
->src_addr_lo
= (bp
->port
.port_stx
>> 2) + DMAE_LEN32_RD_MAX
;
3549 dmae
->src_addr_hi
= 0;
3550 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
) +
3551 DMAE_LEN32_RD_MAX
* 4);
3552 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
) +
3553 DMAE_LEN32_RD_MAX
* 4);
3554 dmae
->len
= (sizeof(struct host_port_stats
) >> 2) - DMAE_LEN32_RD_MAX
;
3555 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3556 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3557 dmae
->comp_val
= DMAE_COMP_VAL
;
3560 bnx2x_hw_stats_post(bp
);
3561 bnx2x_stats_comp(bp
);
3564 static void bnx2x_port_stats_init(struct bnx2x
*bp
)
3566 struct dmae_command
*dmae
;
3567 int port
= BP_PORT(bp
);
3568 int vn
= BP_E1HVN(bp
);
3570 int loader_idx
= PMF_DMAE_C(bp
);
3572 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3575 if (!bp
->link_vars
.link_up
|| !bp
->port
.pmf
) {
3576 BNX2X_ERR("BUG!\n");
3580 bp
->executer_idx
= 0;
3583 opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3584 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3585 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3587 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3589 DMAE_CMD_ENDIANITY_DW_SWAP
|
3591 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3592 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3594 if (bp
->port
.port_stx
) {
3596 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3597 dmae
->opcode
= opcode
;
3598 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
3599 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
3600 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
3601 dmae
->dst_addr_hi
= 0;
3602 dmae
->len
= sizeof(struct host_port_stats
) >> 2;
3603 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3604 dmae
->comp_addr_hi
= 0;
3610 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3611 dmae
->opcode
= opcode
;
3612 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
3613 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
3614 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
3615 dmae
->dst_addr_hi
= 0;
3616 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
3617 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3618 dmae
->comp_addr_hi
= 0;
3623 opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3624 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3625 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3627 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3629 DMAE_CMD_ENDIANITY_DW_SWAP
|
3631 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3632 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3634 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
3636 mac_addr
= (port
? NIG_REG_INGRESS_BMAC1_MEM
:
3637 NIG_REG_INGRESS_BMAC0_MEM
);
3639 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3640 BIGMAC_REGISTER_TX_STAT_GTBYT */
3641 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3642 dmae
->opcode
= opcode
;
3643 dmae
->src_addr_lo
= (mac_addr
+
3644 BIGMAC_REGISTER_TX_STAT_GTPKT
) >> 2;
3645 dmae
->src_addr_hi
= 0;
3646 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
));
3647 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
));
3648 dmae
->len
= (8 + BIGMAC_REGISTER_TX_STAT_GTBYT
-
3649 BIGMAC_REGISTER_TX_STAT_GTPKT
) >> 2;
3650 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3651 dmae
->comp_addr_hi
= 0;
3654 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3655 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3656 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3657 dmae
->opcode
= opcode
;
3658 dmae
->src_addr_lo
= (mac_addr
+
3659 BIGMAC_REGISTER_RX_STAT_GR64
) >> 2;
3660 dmae
->src_addr_hi
= 0;
3661 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3662 offsetof(struct bmac_stats
, rx_stat_gr64_lo
));
3663 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3664 offsetof(struct bmac_stats
, rx_stat_gr64_lo
));
3665 dmae
->len
= (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ
-
3666 BIGMAC_REGISTER_RX_STAT_GR64
) >> 2;
3667 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3668 dmae
->comp_addr_hi
= 0;
3671 } else if (bp
->link_vars
.mac_type
== MAC_TYPE_EMAC
) {
3673 mac_addr
= (port
? GRCBASE_EMAC1
: GRCBASE_EMAC0
);
3675 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3676 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3677 dmae
->opcode
= opcode
;
3678 dmae
->src_addr_lo
= (mac_addr
+
3679 EMAC_REG_EMAC_RX_STAT_AC
) >> 2;
3680 dmae
->src_addr_hi
= 0;
3681 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
));
3682 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
));
3683 dmae
->len
= EMAC_REG_EMAC_RX_STAT_AC_COUNT
;
3684 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3685 dmae
->comp_addr_hi
= 0;
3688 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3689 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3690 dmae
->opcode
= opcode
;
3691 dmae
->src_addr_lo
= (mac_addr
+
3692 EMAC_REG_EMAC_RX_STAT_AC_28
) >> 2;
3693 dmae
->src_addr_hi
= 0;
3694 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3695 offsetof(struct emac_stats
, rx_stat_falsecarriererrors
));
3696 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3697 offsetof(struct emac_stats
, rx_stat_falsecarriererrors
));
3699 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3700 dmae
->comp_addr_hi
= 0;
3703 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3704 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3705 dmae
->opcode
= opcode
;
3706 dmae
->src_addr_lo
= (mac_addr
+
3707 EMAC_REG_EMAC_TX_STAT_AC
) >> 2;
3708 dmae
->src_addr_hi
= 0;
3709 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3710 offsetof(struct emac_stats
, tx_stat_ifhcoutoctets
));
3711 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3712 offsetof(struct emac_stats
, tx_stat_ifhcoutoctets
));
3713 dmae
->len
= EMAC_REG_EMAC_TX_STAT_AC_COUNT
;
3714 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3715 dmae
->comp_addr_hi
= 0;
3720 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3721 dmae
->opcode
= opcode
;
3722 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_BRB_DISCARD
:
3723 NIG_REG_STAT0_BRB_DISCARD
) >> 2;
3724 dmae
->src_addr_hi
= 0;
3725 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
));
3726 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
));
3727 dmae
->len
= (sizeof(struct nig_stats
) - 4*sizeof(u32
)) >> 2;
3728 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3729 dmae
->comp_addr_hi
= 0;
3732 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3733 dmae
->opcode
= opcode
;
3734 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_EGRESS_MAC_PKT0
:
3735 NIG_REG_STAT0_EGRESS_MAC_PKT0
) >> 2;
3736 dmae
->src_addr_hi
= 0;
3737 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
) +
3738 offsetof(struct nig_stats
, egress_mac_pkt0_lo
));
3739 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
) +
3740 offsetof(struct nig_stats
, egress_mac_pkt0_lo
));
3741 dmae
->len
= (2*sizeof(u32
)) >> 2;
3742 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3743 dmae
->comp_addr_hi
= 0;
3746 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3747 dmae
->opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3748 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
3749 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3751 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3753 DMAE_CMD_ENDIANITY_DW_SWAP
|
3755 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3756 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3757 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_EGRESS_MAC_PKT1
:
3758 NIG_REG_STAT0_EGRESS_MAC_PKT1
) >> 2;
3759 dmae
->src_addr_hi
= 0;
3760 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
) +
3761 offsetof(struct nig_stats
, egress_mac_pkt1_lo
));
3762 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
) +
3763 offsetof(struct nig_stats
, egress_mac_pkt1_lo
));
3764 dmae
->len
= (2*sizeof(u32
)) >> 2;
3765 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3766 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3767 dmae
->comp_val
= DMAE_COMP_VAL
;
3772 static void bnx2x_func_stats_init(struct bnx2x
*bp
)
3774 struct dmae_command
*dmae
= &bp
->stats_dmae
;
3775 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3778 if (!bp
->func_stx
) {
3779 BNX2X_ERR("BUG!\n");
3783 bp
->executer_idx
= 0;
3784 memset(dmae
, 0, sizeof(struct dmae_command
));
3786 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3787 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
3788 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3790 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3792 DMAE_CMD_ENDIANITY_DW_SWAP
|
3794 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3795 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3796 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
3797 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
3798 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
3799 dmae
->dst_addr_hi
= 0;
3800 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
3801 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3802 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3803 dmae
->comp_val
= DMAE_COMP_VAL
;
3808 static void bnx2x_stats_start(struct bnx2x
*bp
)
3811 bnx2x_port_stats_init(bp
);
3813 else if (bp
->func_stx
)
3814 bnx2x_func_stats_init(bp
);
3816 bnx2x_hw_stats_post(bp
);
3817 bnx2x_storm_stats_post(bp
);
3820 static void bnx2x_stats_pmf_start(struct bnx2x
*bp
)
3822 bnx2x_stats_comp(bp
);
3823 bnx2x_stats_pmf_update(bp
);
3824 bnx2x_stats_start(bp
);
3827 static void bnx2x_stats_restart(struct bnx2x
*bp
)
3829 bnx2x_stats_comp(bp
);
3830 bnx2x_stats_start(bp
);
3833 static void bnx2x_bmac_stats_update(struct bnx2x
*bp
)
3835 struct bmac_stats
*new = bnx2x_sp(bp
, mac_stats
.bmac_stats
);
3836 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3837 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3843 UPDATE_STAT64(rx_stat_grerb
, rx_stat_ifhcinbadoctets
);
3844 UPDATE_STAT64(rx_stat_grfcs
, rx_stat_dot3statsfcserrors
);
3845 UPDATE_STAT64(rx_stat_grund
, rx_stat_etherstatsundersizepkts
);
3846 UPDATE_STAT64(rx_stat_grovr
, rx_stat_dot3statsframestoolong
);
3847 UPDATE_STAT64(rx_stat_grfrg
, rx_stat_etherstatsfragments
);
3848 UPDATE_STAT64(rx_stat_grjbr
, rx_stat_etherstatsjabbers
);
3849 UPDATE_STAT64(rx_stat_grxcf
, rx_stat_maccontrolframesreceived
);
3850 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_xoffstateentered
);
3851 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_bmac_xpf
);
3852 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_outxoffsent
);
3853 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_flowcontroldone
);
3854 UPDATE_STAT64(tx_stat_gt64
, tx_stat_etherstatspkts64octets
);
3855 UPDATE_STAT64(tx_stat_gt127
,
3856 tx_stat_etherstatspkts65octetsto127octets
);
3857 UPDATE_STAT64(tx_stat_gt255
,
3858 tx_stat_etherstatspkts128octetsto255octets
);
3859 UPDATE_STAT64(tx_stat_gt511
,
3860 tx_stat_etherstatspkts256octetsto511octets
);
3861 UPDATE_STAT64(tx_stat_gt1023
,
3862 tx_stat_etherstatspkts512octetsto1023octets
);
3863 UPDATE_STAT64(tx_stat_gt1518
,
3864 tx_stat_etherstatspkts1024octetsto1522octets
);
3865 UPDATE_STAT64(tx_stat_gt2047
, tx_stat_bmac_2047
);
3866 UPDATE_STAT64(tx_stat_gt4095
, tx_stat_bmac_4095
);
3867 UPDATE_STAT64(tx_stat_gt9216
, tx_stat_bmac_9216
);
3868 UPDATE_STAT64(tx_stat_gt16383
, tx_stat_bmac_16383
);
3869 UPDATE_STAT64(tx_stat_gterr
,
3870 tx_stat_dot3statsinternalmactransmiterrors
);
3871 UPDATE_STAT64(tx_stat_gtufl
, tx_stat_bmac_ufl
);
3873 estats
->pause_frames_received_hi
=
3874 pstats
->mac_stx
[1].rx_stat_bmac_xpf_hi
;
3875 estats
->pause_frames_received_lo
=
3876 pstats
->mac_stx
[1].rx_stat_bmac_xpf_lo
;
3878 estats
->pause_frames_sent_hi
=
3879 pstats
->mac_stx
[1].tx_stat_outxoffsent_hi
;
3880 estats
->pause_frames_sent_lo
=
3881 pstats
->mac_stx
[1].tx_stat_outxoffsent_lo
;
3884 static void bnx2x_emac_stats_update(struct bnx2x
*bp
)
3886 struct emac_stats
*new = bnx2x_sp(bp
, mac_stats
.emac_stats
);
3887 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3888 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3890 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets
);
3891 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets
);
3892 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors
);
3893 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors
);
3894 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors
);
3895 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors
);
3896 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts
);
3897 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong
);
3898 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments
);
3899 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers
);
3900 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived
);
3901 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered
);
3902 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived
);
3903 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived
);
3904 UPDATE_EXTEND_STAT(tx_stat_outxonsent
);
3905 UPDATE_EXTEND_STAT(tx_stat_outxoffsent
);
3906 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone
);
3907 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions
);
3908 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes
);
3909 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes
);
3910 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions
);
3911 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions
);
3912 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions
);
3913 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets
);
3914 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets
);
3915 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets
);
3916 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets
);
3917 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets
);
3918 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets
);
3919 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets
);
3920 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors
);
3922 estats
->pause_frames_received_hi
=
3923 pstats
->mac_stx
[1].rx_stat_xonpauseframesreceived_hi
;
3924 estats
->pause_frames_received_lo
=
3925 pstats
->mac_stx
[1].rx_stat_xonpauseframesreceived_lo
;
3926 ADD_64(estats
->pause_frames_received_hi
,
3927 pstats
->mac_stx
[1].rx_stat_xoffpauseframesreceived_hi
,
3928 estats
->pause_frames_received_lo
,
3929 pstats
->mac_stx
[1].rx_stat_xoffpauseframesreceived_lo
);
3931 estats
->pause_frames_sent_hi
=
3932 pstats
->mac_stx
[1].tx_stat_outxonsent_hi
;
3933 estats
->pause_frames_sent_lo
=
3934 pstats
->mac_stx
[1].tx_stat_outxonsent_lo
;
3935 ADD_64(estats
->pause_frames_sent_hi
,
3936 pstats
->mac_stx
[1].tx_stat_outxoffsent_hi
,
3937 estats
->pause_frames_sent_lo
,
3938 pstats
->mac_stx
[1].tx_stat_outxoffsent_lo
);
3941 static int bnx2x_hw_stats_update(struct bnx2x
*bp
)
3943 struct nig_stats
*new = bnx2x_sp(bp
, nig_stats
);
3944 struct nig_stats
*old
= &(bp
->port
.old_nig_stats
);
3945 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3946 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3953 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
)
3954 bnx2x_bmac_stats_update(bp
);
3956 else if (bp
->link_vars
.mac_type
== MAC_TYPE_EMAC
)
3957 bnx2x_emac_stats_update(bp
);
3959 else { /* unreached */
3960 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3964 ADD_EXTEND_64(pstats
->brb_drop_hi
, pstats
->brb_drop_lo
,
3965 new->brb_discard
- old
->brb_discard
);
3966 ADD_EXTEND_64(estats
->brb_truncate_hi
, estats
->brb_truncate_lo
,
3967 new->brb_truncate
- old
->brb_truncate
);
3969 UPDATE_STAT64_NIG(egress_mac_pkt0
,
3970 etherstatspkts1024octetsto1522octets
);
3971 UPDATE_STAT64_NIG(egress_mac_pkt1
, etherstatspktsover1522octets
);
3973 memcpy(old
, new, sizeof(struct nig_stats
));
3975 memcpy(&(estats
->rx_stat_ifhcinbadoctets_hi
), &(pstats
->mac_stx
[1]),
3976 sizeof(struct mac_stx
));
3977 estats
->brb_drop_hi
= pstats
->brb_drop_hi
;
3978 estats
->brb_drop_lo
= pstats
->brb_drop_lo
;
3980 pstats
->host_port_stats_start
= ++pstats
->host_port_stats_end
;
3982 nig_timer_max
= SHMEM_RD(bp
, port_mb
[BP_PORT(bp
)].stat_nig_timer
);
3983 if (nig_timer_max
!= estats
->nig_timer_max
) {
3984 estats
->nig_timer_max
= nig_timer_max
;
3985 BNX2X_ERR("NIG timer max (%u)\n", estats
->nig_timer_max
);
3991 static int bnx2x_storm_stats_update(struct bnx2x
*bp
)
3993 struct eth_stats_query
*stats
= bnx2x_sp(bp
, fw_stats
);
3994 struct tstorm_per_port_stats
*tport
=
3995 &stats
->tstorm_common
.port_statistics
;
3996 struct host_func_stats
*fstats
= bnx2x_sp(bp
, func_stats
);
3997 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
4000 memcpy(&(fstats
->total_bytes_received_hi
),
4001 &(bnx2x_sp(bp
, func_stats_base
)->total_bytes_received_hi
),
4002 sizeof(struct host_func_stats
) - 2*sizeof(u32
));
4003 estats
->error_bytes_received_hi
= 0;
4004 estats
->error_bytes_received_lo
= 0;
4005 estats
->etherstatsoverrsizepkts_hi
= 0;
4006 estats
->etherstatsoverrsizepkts_lo
= 0;
4007 estats
->no_buff_discard_hi
= 0;
4008 estats
->no_buff_discard_lo
= 0;
4010 for_each_queue(bp
, i
) {
4011 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
4012 int cl_id
= fp
->cl_id
;
4013 struct tstorm_per_client_stats
*tclient
=
4014 &stats
->tstorm_common
.client_statistics
[cl_id
];
4015 struct tstorm_per_client_stats
*old_tclient
= &fp
->old_tclient
;
4016 struct ustorm_per_client_stats
*uclient
=
4017 &stats
->ustorm_common
.client_statistics
[cl_id
];
4018 struct ustorm_per_client_stats
*old_uclient
= &fp
->old_uclient
;
4019 struct xstorm_per_client_stats
*xclient
=
4020 &stats
->xstorm_common
.client_statistics
[cl_id
];
4021 struct xstorm_per_client_stats
*old_xclient
= &fp
->old_xclient
;
4022 struct bnx2x_eth_q_stats
*qstats
= &fp
->eth_q_stats
;
4025 /* are storm stats valid? */
4026 if ((u16
)(le16_to_cpu(xclient
->stats_counter
) + 1) !=
4027 bp
->stats_counter
) {
4028 DP(BNX2X_MSG_STATS
, "[%d] stats not updated by xstorm"
4029 " xstorm counter (%d) != stats_counter (%d)\n",
4030 i
, xclient
->stats_counter
, bp
->stats_counter
);
4033 if ((u16
)(le16_to_cpu(tclient
->stats_counter
) + 1) !=
4034 bp
->stats_counter
) {
4035 DP(BNX2X_MSG_STATS
, "[%d] stats not updated by tstorm"
4036 " tstorm counter (%d) != stats_counter (%d)\n",
4037 i
, tclient
->stats_counter
, bp
->stats_counter
);
4040 if ((u16
)(le16_to_cpu(uclient
->stats_counter
) + 1) !=
4041 bp
->stats_counter
) {
4042 DP(BNX2X_MSG_STATS
, "[%d] stats not updated by ustorm"
4043 " ustorm counter (%d) != stats_counter (%d)\n",
4044 i
, uclient
->stats_counter
, bp
->stats_counter
);
4048 qstats
->total_bytes_received_hi
=
4049 le32_to_cpu(tclient
->rcv_broadcast_bytes
.hi
);
4050 qstats
->total_bytes_received_lo
=
4051 le32_to_cpu(tclient
->rcv_broadcast_bytes
.lo
);
4053 ADD_64(qstats
->total_bytes_received_hi
,
4054 le32_to_cpu(tclient
->rcv_multicast_bytes
.hi
),
4055 qstats
->total_bytes_received_lo
,
4056 le32_to_cpu(tclient
->rcv_multicast_bytes
.lo
));
4058 ADD_64(qstats
->total_bytes_received_hi
,
4059 le32_to_cpu(tclient
->rcv_unicast_bytes
.hi
),
4060 qstats
->total_bytes_received_lo
,
4061 le32_to_cpu(tclient
->rcv_unicast_bytes
.lo
));
4063 qstats
->valid_bytes_received_hi
=
4064 qstats
->total_bytes_received_hi
;
4065 qstats
->valid_bytes_received_lo
=
4066 qstats
->total_bytes_received_lo
;
4068 qstats
->error_bytes_received_hi
=
4069 le32_to_cpu(tclient
->rcv_error_bytes
.hi
);
4070 qstats
->error_bytes_received_lo
=
4071 le32_to_cpu(tclient
->rcv_error_bytes
.lo
);
4073 ADD_64(qstats
->total_bytes_received_hi
,
4074 qstats
->error_bytes_received_hi
,
4075 qstats
->total_bytes_received_lo
,
4076 qstats
->error_bytes_received_lo
);
4078 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts
,
4079 total_unicast_packets_received
);
4080 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts
,
4081 total_multicast_packets_received
);
4082 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts
,
4083 total_broadcast_packets_received
);
4084 UPDATE_EXTEND_TSTAT(packets_too_big_discard
,
4085 etherstatsoverrsizepkts
);
4086 UPDATE_EXTEND_TSTAT(no_buff_discard
, no_buff_discard
);
4088 SUB_EXTEND_USTAT(ucast_no_buff_pkts
,
4089 total_unicast_packets_received
);
4090 SUB_EXTEND_USTAT(mcast_no_buff_pkts
,
4091 total_multicast_packets_received
);
4092 SUB_EXTEND_USTAT(bcast_no_buff_pkts
,
4093 total_broadcast_packets_received
);
4094 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts
, no_buff_discard
);
4095 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts
, no_buff_discard
);
4096 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts
, no_buff_discard
);
4098 qstats
->total_bytes_transmitted_hi
=
4099 le32_to_cpu(xclient
->unicast_bytes_sent
.hi
);
4100 qstats
->total_bytes_transmitted_lo
=
4101 le32_to_cpu(xclient
->unicast_bytes_sent
.lo
);
4103 ADD_64(qstats
->total_bytes_transmitted_hi
,
4104 le32_to_cpu(xclient
->multicast_bytes_sent
.hi
),
4105 qstats
->total_bytes_transmitted_lo
,
4106 le32_to_cpu(xclient
->multicast_bytes_sent
.lo
));
4108 ADD_64(qstats
->total_bytes_transmitted_hi
,
4109 le32_to_cpu(xclient
->broadcast_bytes_sent
.hi
),
4110 qstats
->total_bytes_transmitted_lo
,
4111 le32_to_cpu(xclient
->broadcast_bytes_sent
.lo
));
4113 UPDATE_EXTEND_XSTAT(unicast_pkts_sent
,
4114 total_unicast_packets_transmitted
);
4115 UPDATE_EXTEND_XSTAT(multicast_pkts_sent
,
4116 total_multicast_packets_transmitted
);
4117 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent
,
4118 total_broadcast_packets_transmitted
);
4120 old_tclient
->checksum_discard
= tclient
->checksum_discard
;
4121 old_tclient
->ttl0_discard
= tclient
->ttl0_discard
;
4123 ADD_64(fstats
->total_bytes_received_hi
,
4124 qstats
->total_bytes_received_hi
,
4125 fstats
->total_bytes_received_lo
,
4126 qstats
->total_bytes_received_lo
);
4127 ADD_64(fstats
->total_bytes_transmitted_hi
,
4128 qstats
->total_bytes_transmitted_hi
,
4129 fstats
->total_bytes_transmitted_lo
,
4130 qstats
->total_bytes_transmitted_lo
);
4131 ADD_64(fstats
->total_unicast_packets_received_hi
,
4132 qstats
->total_unicast_packets_received_hi
,
4133 fstats
->total_unicast_packets_received_lo
,
4134 qstats
->total_unicast_packets_received_lo
);
4135 ADD_64(fstats
->total_multicast_packets_received_hi
,
4136 qstats
->total_multicast_packets_received_hi
,
4137 fstats
->total_multicast_packets_received_lo
,
4138 qstats
->total_multicast_packets_received_lo
);
4139 ADD_64(fstats
->total_broadcast_packets_received_hi
,
4140 qstats
->total_broadcast_packets_received_hi
,
4141 fstats
->total_broadcast_packets_received_lo
,
4142 qstats
->total_broadcast_packets_received_lo
);
4143 ADD_64(fstats
->total_unicast_packets_transmitted_hi
,
4144 qstats
->total_unicast_packets_transmitted_hi
,
4145 fstats
->total_unicast_packets_transmitted_lo
,
4146 qstats
->total_unicast_packets_transmitted_lo
);
4147 ADD_64(fstats
->total_multicast_packets_transmitted_hi
,
4148 qstats
->total_multicast_packets_transmitted_hi
,
4149 fstats
->total_multicast_packets_transmitted_lo
,
4150 qstats
->total_multicast_packets_transmitted_lo
);
4151 ADD_64(fstats
->total_broadcast_packets_transmitted_hi
,
4152 qstats
->total_broadcast_packets_transmitted_hi
,
4153 fstats
->total_broadcast_packets_transmitted_lo
,
4154 qstats
->total_broadcast_packets_transmitted_lo
);
4155 ADD_64(fstats
->valid_bytes_received_hi
,
4156 qstats
->valid_bytes_received_hi
,
4157 fstats
->valid_bytes_received_lo
,
4158 qstats
->valid_bytes_received_lo
);
4160 ADD_64(estats
->error_bytes_received_hi
,
4161 qstats
->error_bytes_received_hi
,
4162 estats
->error_bytes_received_lo
,
4163 qstats
->error_bytes_received_lo
);
4164 ADD_64(estats
->etherstatsoverrsizepkts_hi
,
4165 qstats
->etherstatsoverrsizepkts_hi
,
4166 estats
->etherstatsoverrsizepkts_lo
,
4167 qstats
->etherstatsoverrsizepkts_lo
);
4168 ADD_64(estats
->no_buff_discard_hi
, qstats
->no_buff_discard_hi
,
4169 estats
->no_buff_discard_lo
, qstats
->no_buff_discard_lo
);
4172 ADD_64(fstats
->total_bytes_received_hi
,
4173 estats
->rx_stat_ifhcinbadoctets_hi
,
4174 fstats
->total_bytes_received_lo
,
4175 estats
->rx_stat_ifhcinbadoctets_lo
);
4177 memcpy(estats
, &(fstats
->total_bytes_received_hi
),
4178 sizeof(struct host_func_stats
) - 2*sizeof(u32
));
4180 ADD_64(estats
->etherstatsoverrsizepkts_hi
,
4181 estats
->rx_stat_dot3statsframestoolong_hi
,
4182 estats
->etherstatsoverrsizepkts_lo
,
4183 estats
->rx_stat_dot3statsframestoolong_lo
);
4184 ADD_64(estats
->error_bytes_received_hi
,
4185 estats
->rx_stat_ifhcinbadoctets_hi
,
4186 estats
->error_bytes_received_lo
,
4187 estats
->rx_stat_ifhcinbadoctets_lo
);
4190 estats
->mac_filter_discard
=
4191 le32_to_cpu(tport
->mac_filter_discard
);
4192 estats
->xxoverflow_discard
=
4193 le32_to_cpu(tport
->xxoverflow_discard
);
4194 estats
->brb_truncate_discard
=
4195 le32_to_cpu(tport
->brb_truncate_discard
);
4196 estats
->mac_discard
= le32_to_cpu(tport
->mac_discard
);
4199 fstats
->host_func_stats_start
= ++fstats
->host_func_stats_end
;
4201 bp
->stats_pending
= 0;
4206 static void bnx2x_net_stats_update(struct bnx2x
*bp
)
4208 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
4209 struct net_device_stats
*nstats
= &bp
->dev
->stats
;
4212 nstats
->rx_packets
=
4213 bnx2x_hilo(&estats
->total_unicast_packets_received_hi
) +
4214 bnx2x_hilo(&estats
->total_multicast_packets_received_hi
) +
4215 bnx2x_hilo(&estats
->total_broadcast_packets_received_hi
);
4217 nstats
->tx_packets
=
4218 bnx2x_hilo(&estats
->total_unicast_packets_transmitted_hi
) +
4219 bnx2x_hilo(&estats
->total_multicast_packets_transmitted_hi
) +
4220 bnx2x_hilo(&estats
->total_broadcast_packets_transmitted_hi
);
4222 nstats
->rx_bytes
= bnx2x_hilo(&estats
->total_bytes_received_hi
);
4224 nstats
->tx_bytes
= bnx2x_hilo(&estats
->total_bytes_transmitted_hi
);
4226 nstats
->rx_dropped
= estats
->mac_discard
;
4227 for_each_queue(bp
, i
)
4228 nstats
->rx_dropped
+=
4229 le32_to_cpu(bp
->fp
[i
].old_tclient
.checksum_discard
);
4231 nstats
->tx_dropped
= 0;
4234 bnx2x_hilo(&estats
->total_multicast_packets_received_hi
);
4236 nstats
->collisions
=
4237 bnx2x_hilo(&estats
->tx_stat_etherstatscollisions_hi
);
4239 nstats
->rx_length_errors
=
4240 bnx2x_hilo(&estats
->rx_stat_etherstatsundersizepkts_hi
) +
4241 bnx2x_hilo(&estats
->etherstatsoverrsizepkts_hi
);
4242 nstats
->rx_over_errors
= bnx2x_hilo(&estats
->brb_drop_hi
) +
4243 bnx2x_hilo(&estats
->brb_truncate_hi
);
4244 nstats
->rx_crc_errors
=
4245 bnx2x_hilo(&estats
->rx_stat_dot3statsfcserrors_hi
);
4246 nstats
->rx_frame_errors
=
4247 bnx2x_hilo(&estats
->rx_stat_dot3statsalignmenterrors_hi
);
4248 nstats
->rx_fifo_errors
= bnx2x_hilo(&estats
->no_buff_discard_hi
);
4249 nstats
->rx_missed_errors
= estats
->xxoverflow_discard
;
4251 nstats
->rx_errors
= nstats
->rx_length_errors
+
4252 nstats
->rx_over_errors
+
4253 nstats
->rx_crc_errors
+
4254 nstats
->rx_frame_errors
+
4255 nstats
->rx_fifo_errors
+
4256 nstats
->rx_missed_errors
;
4258 nstats
->tx_aborted_errors
=
4259 bnx2x_hilo(&estats
->tx_stat_dot3statslatecollisions_hi
) +
4260 bnx2x_hilo(&estats
->tx_stat_dot3statsexcessivecollisions_hi
);
4261 nstats
->tx_carrier_errors
=
4262 bnx2x_hilo(&estats
->rx_stat_dot3statscarriersenseerrors_hi
);
4263 nstats
->tx_fifo_errors
= 0;
4264 nstats
->tx_heartbeat_errors
= 0;
4265 nstats
->tx_window_errors
= 0;
4267 nstats
->tx_errors
= nstats
->tx_aborted_errors
+
4268 nstats
->tx_carrier_errors
+
4269 bnx2x_hilo(&estats
->tx_stat_dot3statsinternalmactransmiterrors_hi
);
4272 static void bnx2x_drv_stats_update(struct bnx2x
*bp
)
4274 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
4277 estats
->driver_xoff
= 0;
4278 estats
->rx_err_discard_pkt
= 0;
4279 estats
->rx_skb_alloc_failed
= 0;
4280 estats
->hw_csum_err
= 0;
4281 for_each_queue(bp
, i
) {
4282 struct bnx2x_eth_q_stats
*qstats
= &bp
->fp
[i
].eth_q_stats
;
4284 estats
->driver_xoff
+= qstats
->driver_xoff
;
4285 estats
->rx_err_discard_pkt
+= qstats
->rx_err_discard_pkt
;
4286 estats
->rx_skb_alloc_failed
+= qstats
->rx_skb_alloc_failed
;
4287 estats
->hw_csum_err
+= qstats
->hw_csum_err
;
4291 static void bnx2x_stats_update(struct bnx2x
*bp
)
4293 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
4295 if (*stats_comp
!= DMAE_COMP_VAL
)
4299 bnx2x_hw_stats_update(bp
);
4301 if (bnx2x_storm_stats_update(bp
) && (bp
->stats_pending
++ == 3)) {
4302 BNX2X_ERR("storm stats were not updated for 3 times\n");
4307 bnx2x_net_stats_update(bp
);
4308 bnx2x_drv_stats_update(bp
);
4310 if (netif_msg_timer(bp
)) {
4311 struct bnx2x_fastpath
*fp0_rx
= bp
->fp
;
4312 struct bnx2x_fastpath
*fp0_tx
= bp
->fp
;
4313 struct tstorm_per_client_stats
*old_tclient
=
4314 &bp
->fp
->old_tclient
;
4315 struct bnx2x_eth_q_stats
*qstats
= &bp
->fp
->eth_q_stats
;
4316 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
4317 struct net_device_stats
*nstats
= &bp
->dev
->stats
;
4320 netdev_printk(KERN_DEBUG
, bp
->dev
, "\n");
4321 printk(KERN_DEBUG
" tx avail (%4x) tx hc idx (%x)"
4323 bnx2x_tx_avail(fp0_tx
),
4324 le16_to_cpu(*fp0_tx
->tx_cons_sb
), nstats
->tx_packets
);
4325 printk(KERN_DEBUG
" rx usage (%4x) rx hc idx (%x)"
4327 (u16
)(le16_to_cpu(*fp0_rx
->rx_cons_sb
) -
4328 fp0_rx
->rx_comp_cons
),
4329 le16_to_cpu(*fp0_rx
->rx_cons_sb
), nstats
->rx_packets
);
4330 printk(KERN_DEBUG
" %s (Xoff events %u) brb drops %u "
4331 "brb truncate %u\n",
4332 (netif_queue_stopped(bp
->dev
) ? "Xoff" : "Xon"),
4333 qstats
->driver_xoff
,
4334 estats
->brb_drop_lo
, estats
->brb_truncate_lo
);
4335 printk(KERN_DEBUG
"tstats: checksum_discard %u "
4336 "packets_too_big_discard %lu no_buff_discard %lu "
4337 "mac_discard %u mac_filter_discard %u "
4338 "xxovrflow_discard %u brb_truncate_discard %u "
4339 "ttl0_discard %u\n",
4340 le32_to_cpu(old_tclient
->checksum_discard
),
4341 bnx2x_hilo(&qstats
->etherstatsoverrsizepkts_hi
),
4342 bnx2x_hilo(&qstats
->no_buff_discard_hi
),
4343 estats
->mac_discard
, estats
->mac_filter_discard
,
4344 estats
->xxoverflow_discard
, estats
->brb_truncate_discard
,
4345 le32_to_cpu(old_tclient
->ttl0_discard
));
4347 for_each_queue(bp
, i
) {
4348 printk(KERN_DEBUG
"[%d]: %lu\t%lu\t%lu\n", i
,
4349 bnx2x_fp(bp
, i
, tx_pkt
),
4350 bnx2x_fp(bp
, i
, rx_pkt
),
4351 bnx2x_fp(bp
, i
, rx_calls
));
4355 bnx2x_hw_stats_post(bp
);
4356 bnx2x_storm_stats_post(bp
);
4359 static void bnx2x_port_stats_stop(struct bnx2x
*bp
)
4361 struct dmae_command
*dmae
;
4363 int loader_idx
= PMF_DMAE_C(bp
);
4364 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
4366 bp
->executer_idx
= 0;
4368 opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
4370 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
4372 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
4374 DMAE_CMD_ENDIANITY_DW_SWAP
|
4376 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
4377 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
4379 if (bp
->port
.port_stx
) {
4381 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
4383 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_GRC
);
4385 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
4386 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
4387 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
4388 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
4389 dmae
->dst_addr_hi
= 0;
4390 dmae
->len
= sizeof(struct host_port_stats
) >> 2;
4392 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
4393 dmae
->comp_addr_hi
= 0;
4396 dmae
->comp_addr_lo
=
4397 U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
4398 dmae
->comp_addr_hi
=
4399 U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
4400 dmae
->comp_val
= DMAE_COMP_VAL
;
4408 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
4409 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
4410 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
4411 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
4412 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
4413 dmae
->dst_addr_hi
= 0;
4414 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
4415 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
4416 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
4417 dmae
->comp_val
= DMAE_COMP_VAL
;
4423 static void bnx2x_stats_stop(struct bnx2x
*bp
)
4427 bnx2x_stats_comp(bp
);
4430 update
= (bnx2x_hw_stats_update(bp
) == 0);
4432 update
|= (bnx2x_storm_stats_update(bp
) == 0);
4435 bnx2x_net_stats_update(bp
);
4438 bnx2x_port_stats_stop(bp
);
4440 bnx2x_hw_stats_post(bp
);
4441 bnx2x_stats_comp(bp
);
4445 static void bnx2x_stats_do_nothing(struct bnx2x
*bp
)
4449 static const struct {
4450 void (*action
)(struct bnx2x
*bp
);
4451 enum bnx2x_stats_state next_state
;
4452 } bnx2x_stats_stm
[STATS_STATE_MAX
][STATS_EVENT_MAX
] = {
4455 /* DISABLED PMF */ {bnx2x_stats_pmf_update
, STATS_STATE_DISABLED
},
4456 /* LINK_UP */ {bnx2x_stats_start
, STATS_STATE_ENABLED
},
4457 /* UPDATE */ {bnx2x_stats_do_nothing
, STATS_STATE_DISABLED
},
4458 /* STOP */ {bnx2x_stats_do_nothing
, STATS_STATE_DISABLED
}
4461 /* ENABLED PMF */ {bnx2x_stats_pmf_start
, STATS_STATE_ENABLED
},
4462 /* LINK_UP */ {bnx2x_stats_restart
, STATS_STATE_ENABLED
},
4463 /* UPDATE */ {bnx2x_stats_update
, STATS_STATE_ENABLED
},
4464 /* STOP */ {bnx2x_stats_stop
, STATS_STATE_DISABLED
}
4468 static void bnx2x_stats_handle(struct bnx2x
*bp
, enum bnx2x_stats_event event
)
4470 enum bnx2x_stats_state state
= bp
->stats_state
;
4472 bnx2x_stats_stm
[state
][event
].action(bp
);
4473 bp
->stats_state
= bnx2x_stats_stm
[state
][event
].next_state
;
4475 /* Make sure the state has been "changed" */
4478 if ((event
!= STATS_EVENT_UPDATE
) || netif_msg_timer(bp
))
4479 DP(BNX2X_MSG_STATS
, "state %d -> event %d -> state %d\n",
4480 state
, event
, bp
->stats_state
);
4483 static void bnx2x_port_stats_base_init(struct bnx2x
*bp
)
4485 struct dmae_command
*dmae
;
4486 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
4489 if (!bp
->port
.pmf
|| !bp
->port
.port_stx
) {
4490 BNX2X_ERR("BUG!\n");
4494 bp
->executer_idx
= 0;
4496 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
4497 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
4498 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
4499 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
4501 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
4503 DMAE_CMD_ENDIANITY_DW_SWAP
|
4505 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
4506 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
4507 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
4508 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
4509 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
4510 dmae
->dst_addr_hi
= 0;
4511 dmae
->len
= sizeof(struct host_port_stats
) >> 2;
4512 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
4513 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
4514 dmae
->comp_val
= DMAE_COMP_VAL
;
4517 bnx2x_hw_stats_post(bp
);
4518 bnx2x_stats_comp(bp
);
4521 static void bnx2x_func_stats_base_init(struct bnx2x
*bp
)
4523 int vn
, vn_max
= IS_E1HMF(bp
) ? E1HVN_MAX
: E1VN_MAX
;
4524 int port
= BP_PORT(bp
);
4529 if (!bp
->port
.pmf
|| !bp
->func_stx
) {
4530 BNX2X_ERR("BUG!\n");
4534 /* save our func_stx */
4535 func_stx
= bp
->func_stx
;
4537 for (vn
= VN_0
; vn
< vn_max
; vn
++) {
4540 bp
->func_stx
= SHMEM_RD(bp
, func_mb
[func
].fw_mb_param
);
4541 bnx2x_func_stats_init(bp
);
4542 bnx2x_hw_stats_post(bp
);
4543 bnx2x_stats_comp(bp
);
4546 /* restore our func_stx */
4547 bp
->func_stx
= func_stx
;
4550 static void bnx2x_func_stats_base_update(struct bnx2x
*bp
)
4552 struct dmae_command
*dmae
= &bp
->stats_dmae
;
4553 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
4556 if (!bp
->func_stx
) {
4557 BNX2X_ERR("BUG!\n");
4561 bp
->executer_idx
= 0;
4562 memset(dmae
, 0, sizeof(struct dmae_command
));
4564 dmae
->opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
4565 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
4566 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
4568 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
4570 DMAE_CMD_ENDIANITY_DW_SWAP
|
4572 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
4573 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
4574 dmae
->src_addr_lo
= bp
->func_stx
>> 2;
4575 dmae
->src_addr_hi
= 0;
4576 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats_base
));
4577 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats_base
));
4578 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
4579 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
4580 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
4581 dmae
->comp_val
= DMAE_COMP_VAL
;
4584 bnx2x_hw_stats_post(bp
);
4585 bnx2x_stats_comp(bp
);
4588 static void bnx2x_stats_init(struct bnx2x
*bp
)
4590 int port
= BP_PORT(bp
);
4591 int func
= BP_FUNC(bp
);
4594 bp
->stats_pending
= 0;
4595 bp
->executer_idx
= 0;
4596 bp
->stats_counter
= 0;
4598 /* port and func stats for management */
4599 if (!BP_NOMCP(bp
)) {
4600 bp
->port
.port_stx
= SHMEM_RD(bp
, port_mb
[port
].port_stx
);
4601 bp
->func_stx
= SHMEM_RD(bp
, func_mb
[func
].fw_mb_param
);
4604 bp
->port
.port_stx
= 0;
4607 DP(BNX2X_MSG_STATS
, "port_stx 0x%x func_stx 0x%x\n",
4608 bp
->port
.port_stx
, bp
->func_stx
);
4611 memset(&(bp
->port
.old_nig_stats
), 0, sizeof(struct nig_stats
));
4612 bp
->port
.old_nig_stats
.brb_discard
=
4613 REG_RD(bp
, NIG_REG_STAT0_BRB_DISCARD
+ port
*0x38);
4614 bp
->port
.old_nig_stats
.brb_truncate
=
4615 REG_RD(bp
, NIG_REG_STAT0_BRB_TRUNCATE
+ port
*0x38);
4616 REG_RD_DMAE(bp
, NIG_REG_STAT0_EGRESS_MAC_PKT0
+ port
*0x50,
4617 &(bp
->port
.old_nig_stats
.egress_mac_pkt0_lo
), 2);
4618 REG_RD_DMAE(bp
, NIG_REG_STAT0_EGRESS_MAC_PKT1
+ port
*0x50,
4619 &(bp
->port
.old_nig_stats
.egress_mac_pkt1_lo
), 2);
4621 /* function stats */
4622 for_each_queue(bp
, i
) {
4623 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
4625 memset(&fp
->old_tclient
, 0,
4626 sizeof(struct tstorm_per_client_stats
));
4627 memset(&fp
->old_uclient
, 0,
4628 sizeof(struct ustorm_per_client_stats
));
4629 memset(&fp
->old_xclient
, 0,
4630 sizeof(struct xstorm_per_client_stats
));
4631 memset(&fp
->eth_q_stats
, 0, sizeof(struct bnx2x_eth_q_stats
));
4634 memset(&bp
->dev
->stats
, 0, sizeof(struct net_device_stats
));
4635 memset(&bp
->eth_stats
, 0, sizeof(struct bnx2x_eth_stats
));
4637 bp
->stats_state
= STATS_STATE_DISABLED
;
4640 if (bp
->port
.port_stx
)
4641 bnx2x_port_stats_base_init(bp
);
4644 bnx2x_func_stats_base_init(bp
);
4646 } else if (bp
->func_stx
)
4647 bnx2x_func_stats_base_update(bp
);
4650 static void bnx2x_timer(unsigned long data
)
4652 struct bnx2x
*bp
= (struct bnx2x
*) data
;
4654 if (!netif_running(bp
->dev
))
4657 if (atomic_read(&bp
->intr_sem
) != 0)
4661 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
4665 rc
= bnx2x_rx_int(fp
, 1000);
4668 if (!BP_NOMCP(bp
)) {
4669 int func
= BP_FUNC(bp
);
4673 ++bp
->fw_drv_pulse_wr_seq
;
4674 bp
->fw_drv_pulse_wr_seq
&= DRV_PULSE_SEQ_MASK
;
4675 /* TBD - add SYSTEM_TIME */
4676 drv_pulse
= bp
->fw_drv_pulse_wr_seq
;
4677 SHMEM_WR(bp
, func_mb
[func
].drv_pulse_mb
, drv_pulse
);
4679 mcp_pulse
= (SHMEM_RD(bp
, func_mb
[func
].mcp_pulse_mb
) &
4680 MCP_PULSE_SEQ_MASK
);
4681 /* The delta between driver pulse and mcp response
4682 * should be 1 (before mcp response) or 0 (after mcp response)
4684 if ((drv_pulse
!= mcp_pulse
) &&
4685 (drv_pulse
!= ((mcp_pulse
+ 1) & MCP_PULSE_SEQ_MASK
))) {
4686 /* someone lost a heartbeat... */
4687 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4688 drv_pulse
, mcp_pulse
);
4692 if (bp
->state
== BNX2X_STATE_OPEN
)
4693 bnx2x_stats_handle(bp
, STATS_EVENT_UPDATE
);
4696 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
4699 /* end of Statistics */
4704 * nic init service functions
4707 static void bnx2x_zero_sb(struct bnx2x
*bp
, int sb_id
)
4709 int port
= BP_PORT(bp
);
4712 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
+
4713 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port
, sb_id
), 0,
4714 CSTORM_SB_STATUS_BLOCK_U_SIZE
/ 4);
4715 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
+
4716 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port
, sb_id
), 0,
4717 CSTORM_SB_STATUS_BLOCK_C_SIZE
/ 4);
4720 static void bnx2x_init_sb(struct bnx2x
*bp
, struct host_status_block
*sb
,
4721 dma_addr_t mapping
, int sb_id
)
4723 int port
= BP_PORT(bp
);
4724 int func
= BP_FUNC(bp
);
4729 section
= ((u64
)mapping
) + offsetof(struct host_status_block
,
4731 sb
->u_status_block
.status_block_id
= sb_id
;
4733 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4734 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port
, sb_id
), U64_LO(section
));
4735 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4736 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port
, sb_id
)) + 4),
4738 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ FP_USB_FUNC_OFF
+
4739 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port
, sb_id
), func
);
4741 for (index
= 0; index
< HC_USTORM_SB_NUM_INDICES
; index
++)
4742 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4743 CSTORM_SB_HC_DISABLE_U_OFFSET(port
, sb_id
, index
), 1);
4746 section
= ((u64
)mapping
) + offsetof(struct host_status_block
,
4748 sb
->c_status_block
.status_block_id
= sb_id
;
4750 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4751 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port
, sb_id
), U64_LO(section
));
4752 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4753 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port
, sb_id
)) + 4),
4755 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ FP_CSB_FUNC_OFF
+
4756 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port
, sb_id
), func
);
4758 for (index
= 0; index
< HC_CSTORM_SB_NUM_INDICES
; index
++)
4759 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4760 CSTORM_SB_HC_DISABLE_C_OFFSET(port
, sb_id
, index
), 1);
4762 bnx2x_ack_sb(bp
, sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4765 static void bnx2x_zero_def_sb(struct bnx2x
*bp
)
4767 int func
= BP_FUNC(bp
);
4769 bnx2x_init_fill(bp
, TSEM_REG_FAST_MEMORY
+
4770 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4771 sizeof(struct tstorm_def_status_block
)/4);
4772 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
+
4773 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func
), 0,
4774 sizeof(struct cstorm_def_status_block_u
)/4);
4775 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
+
4776 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func
), 0,
4777 sizeof(struct cstorm_def_status_block_c
)/4);
4778 bnx2x_init_fill(bp
, XSEM_REG_FAST_MEMORY
+
4779 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4780 sizeof(struct xstorm_def_status_block
)/4);
4783 static void bnx2x_init_def_sb(struct bnx2x
*bp
,
4784 struct host_def_status_block
*def_sb
,
4785 dma_addr_t mapping
, int sb_id
)
4787 int port
= BP_PORT(bp
);
4788 int func
= BP_FUNC(bp
);
4789 int index
, val
, reg_offset
;
4793 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4794 atten_status_block
);
4795 def_sb
->atten_status_block
.status_block_id
= sb_id
;
4799 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
4800 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
4802 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
4803 bp
->attn_group
[index
].sig
[0] = REG_RD(bp
,
4804 reg_offset
+ 0x10*index
);
4805 bp
->attn_group
[index
].sig
[1] = REG_RD(bp
,
4806 reg_offset
+ 0x4 + 0x10*index
);
4807 bp
->attn_group
[index
].sig
[2] = REG_RD(bp
,
4808 reg_offset
+ 0x8 + 0x10*index
);
4809 bp
->attn_group
[index
].sig
[3] = REG_RD(bp
,
4810 reg_offset
+ 0xc + 0x10*index
);
4813 reg_offset
= (port
? HC_REG_ATTN_MSG1_ADDR_L
:
4814 HC_REG_ATTN_MSG0_ADDR_L
);
4816 REG_WR(bp
, reg_offset
, U64_LO(section
));
4817 REG_WR(bp
, reg_offset
+ 4, U64_HI(section
));
4819 reg_offset
= (port
? HC_REG_ATTN_NUM_P1
: HC_REG_ATTN_NUM_P0
);
4821 val
= REG_RD(bp
, reg_offset
);
4823 REG_WR(bp
, reg_offset
, val
);
4826 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4827 u_def_status_block
);
4828 def_sb
->u_def_status_block
.status_block_id
= sb_id
;
4830 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4831 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func
), U64_LO(section
));
4832 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4833 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func
)) + 4),
4835 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ DEF_USB_FUNC_OFF
+
4836 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func
), func
);
4838 for (index
= 0; index
< HC_USTORM_DEF_SB_NUM_INDICES
; index
++)
4839 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4840 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func
, index
), 1);
4843 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4844 c_def_status_block
);
4845 def_sb
->c_def_status_block
.status_block_id
= sb_id
;
4847 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4848 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func
), U64_LO(section
));
4849 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4850 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func
)) + 4),
4852 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ DEF_CSB_FUNC_OFF
+
4853 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func
), func
);
4855 for (index
= 0; index
< HC_CSTORM_DEF_SB_NUM_INDICES
; index
++)
4856 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4857 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func
, index
), 1);
4860 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4861 t_def_status_block
);
4862 def_sb
->t_def_status_block
.status_block_id
= sb_id
;
4864 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4865 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4866 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4867 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4869 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ DEF_TSB_FUNC_OFF
+
4870 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4872 for (index
= 0; index
< HC_TSTORM_DEF_SB_NUM_INDICES
; index
++)
4873 REG_WR16(bp
, BAR_TSTRORM_INTMEM
+
4874 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4877 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4878 x_def_status_block
);
4879 def_sb
->x_def_status_block
.status_block_id
= sb_id
;
4881 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4882 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4883 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4884 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4886 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ DEF_XSB_FUNC_OFF
+
4887 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4889 for (index
= 0; index
< HC_XSTORM_DEF_SB_NUM_INDICES
; index
++)
4890 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+
4891 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4893 bp
->stats_pending
= 0;
4894 bp
->set_mac_pending
= 0;
4896 bnx2x_ack_sb(bp
, sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4899 static void bnx2x_update_coalesce(struct bnx2x
*bp
)
4901 int port
= BP_PORT(bp
);
4904 for_each_queue(bp
, i
) {
4905 int sb_id
= bp
->fp
[i
].sb_id
;
4907 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4908 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+
4909 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port
, sb_id
,
4910 U_SB_ETH_RX_CQ_INDEX
),
4911 bp
->rx_ticks
/(4 * BNX2X_BTR
));
4912 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4913 CSTORM_SB_HC_DISABLE_U_OFFSET(port
, sb_id
,
4914 U_SB_ETH_RX_CQ_INDEX
),
4915 (bp
->rx_ticks
/(4 * BNX2X_BTR
)) ? 0 : 1);
4917 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4918 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+
4919 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port
, sb_id
,
4920 C_SB_ETH_TX_CQ_INDEX
),
4921 bp
->tx_ticks
/(4 * BNX2X_BTR
));
4922 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4923 CSTORM_SB_HC_DISABLE_C_OFFSET(port
, sb_id
,
4924 C_SB_ETH_TX_CQ_INDEX
),
4925 (bp
->tx_ticks
/(4 * BNX2X_BTR
)) ? 0 : 1);
4929 static inline void bnx2x_free_tpa_pool(struct bnx2x
*bp
,
4930 struct bnx2x_fastpath
*fp
, int last
)
4934 for (i
= 0; i
< last
; i
++) {
4935 struct sw_rx_bd
*rx_buf
= &(fp
->tpa_pool
[i
]);
4936 struct sk_buff
*skb
= rx_buf
->skb
;
4939 DP(NETIF_MSG_IFDOWN
, "tpa bin %d empty on free\n", i
);
4943 if (fp
->tpa_state
[i
] == BNX2X_TPA_START
)
4944 pci_unmap_single(bp
->pdev
,
4945 pci_unmap_addr(rx_buf
, mapping
),
4946 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
4953 static void bnx2x_init_rx_rings(struct bnx2x
*bp
)
4955 int func
= BP_FUNC(bp
);
4956 int max_agg_queues
= CHIP_IS_E1(bp
) ? ETH_MAX_AGGREGATION_QUEUES_E1
:
4957 ETH_MAX_AGGREGATION_QUEUES_E1H
;
4958 u16 ring_prod
, cqe_ring_prod
;
4961 bp
->rx_buf_size
= bp
->dev
->mtu
+ ETH_OVREHEAD
+ BNX2X_RX_ALIGN
;
4963 "mtu %d rx_buf_size %d\n", bp
->dev
->mtu
, bp
->rx_buf_size
);
4965 if (bp
->flags
& TPA_ENABLE_FLAG
) {
4967 for_each_queue(bp
, j
) {
4968 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4970 for (i
= 0; i
< max_agg_queues
; i
++) {
4971 fp
->tpa_pool
[i
].skb
=
4972 netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
4973 if (!fp
->tpa_pool
[i
].skb
) {
4974 BNX2X_ERR("Failed to allocate TPA "
4975 "skb pool for queue[%d] - "
4976 "disabling TPA on this "
4978 bnx2x_free_tpa_pool(bp
, fp
, i
);
4979 fp
->disable_tpa
= 1;
4982 pci_unmap_addr_set((struct sw_rx_bd
*)
4983 &bp
->fp
->tpa_pool
[i
],
4985 fp
->tpa_state
[i
] = BNX2X_TPA_STOP
;
4990 for_each_queue(bp
, j
) {
4991 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4994 fp
->rx_cons_sb
= BNX2X_RX_SB_INDEX
;
4995 fp
->rx_bd_cons_sb
= BNX2X_RX_SB_BD_INDEX
;
4997 /* "next page" elements initialization */
4999 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
5000 struct eth_rx_sge
*sge
;
5002 sge
= &fp
->rx_sge_ring
[RX_SGE_CNT
* i
- 2];
5004 cpu_to_le32(U64_HI(fp
->rx_sge_mapping
+
5005 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
5007 cpu_to_le32(U64_LO(fp
->rx_sge_mapping
+
5008 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
5011 bnx2x_init_sge_ring_bit_mask(fp
);
5014 for (i
= 1; i
<= NUM_RX_RINGS
; i
++) {
5015 struct eth_rx_bd
*rx_bd
;
5017 rx_bd
= &fp
->rx_desc_ring
[RX_DESC_CNT
* i
- 2];
5019 cpu_to_le32(U64_HI(fp
->rx_desc_mapping
+
5020 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
5022 cpu_to_le32(U64_LO(fp
->rx_desc_mapping
+
5023 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
5027 for (i
= 1; i
<= NUM_RCQ_RINGS
; i
++) {
5028 struct eth_rx_cqe_next_page
*nextpg
;
5030 nextpg
= (struct eth_rx_cqe_next_page
*)
5031 &fp
->rx_comp_ring
[RCQ_DESC_CNT
* i
- 1];
5033 cpu_to_le32(U64_HI(fp
->rx_comp_mapping
+
5034 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
5036 cpu_to_le32(U64_LO(fp
->rx_comp_mapping
+
5037 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
5040 /* Allocate SGEs and initialize the ring elements */
5041 for (i
= 0, ring_prod
= 0;
5042 i
< MAX_RX_SGE_CNT
*NUM_RX_SGE_PAGES
; i
++) {
5044 if (bnx2x_alloc_rx_sge(bp
, fp
, ring_prod
) < 0) {
5045 BNX2X_ERR("was only able to allocate "
5047 BNX2X_ERR("disabling TPA for queue[%d]\n", j
);
5048 /* Cleanup already allocated elements */
5049 bnx2x_free_rx_sge_range(bp
, fp
, ring_prod
);
5050 bnx2x_free_tpa_pool(bp
, fp
, max_agg_queues
);
5051 fp
->disable_tpa
= 1;
5055 ring_prod
= NEXT_SGE_IDX(ring_prod
);
5057 fp
->rx_sge_prod
= ring_prod
;
5059 /* Allocate BDs and initialize BD ring */
5060 fp
->rx_comp_cons
= 0;
5061 cqe_ring_prod
= ring_prod
= 0;
5062 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
5063 if (bnx2x_alloc_rx_skb(bp
, fp
, ring_prod
) < 0) {
5064 BNX2X_ERR("was only able to allocate "
5065 "%d rx skbs on queue[%d]\n", i
, j
);
5066 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
5069 ring_prod
= NEXT_RX_IDX(ring_prod
);
5070 cqe_ring_prod
= NEXT_RCQ_IDX(cqe_ring_prod
);
5071 WARN_ON(ring_prod
<= i
);
5074 fp
->rx_bd_prod
= ring_prod
;
5075 /* must not have more available CQEs than BDs */
5076 fp
->rx_comp_prod
= min((u16
)(NUM_RCQ_RINGS
*RCQ_DESC_CNT
),
5078 fp
->rx_pkt
= fp
->rx_calls
= 0;
5081 * this will generate an interrupt (to the TSTORM)
5082 * must only be done after chip is initialized
5084 bnx2x_update_rx_prod(bp
, fp
, ring_prod
, fp
->rx_comp_prod
,
5089 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5090 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
),
5091 U64_LO(fp
->rx_comp_mapping
));
5092 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5093 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
) + 4,
5094 U64_HI(fp
->rx_comp_mapping
));
5098 static void bnx2x_init_tx_ring(struct bnx2x
*bp
)
5102 for_each_queue(bp
, j
) {
5103 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
5105 for (i
= 1; i
<= NUM_TX_RINGS
; i
++) {
5106 struct eth_tx_next_bd
*tx_next_bd
=
5107 &fp
->tx_desc_ring
[TX_DESC_CNT
* i
- 1].next_bd
;
5109 tx_next_bd
->addr_hi
=
5110 cpu_to_le32(U64_HI(fp
->tx_desc_mapping
+
5111 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
5112 tx_next_bd
->addr_lo
=
5113 cpu_to_le32(U64_LO(fp
->tx_desc_mapping
+
5114 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
5117 fp
->tx_db
.data
.header
.header
= DOORBELL_HDR_DB_TYPE
;
5118 fp
->tx_db
.data
.zero_fill1
= 0;
5119 fp
->tx_db
.data
.prod
= 0;
5121 fp
->tx_pkt_prod
= 0;
5122 fp
->tx_pkt_cons
= 0;
5125 fp
->tx_cons_sb
= BNX2X_TX_SB_INDEX
;
5130 static void bnx2x_init_sp_ring(struct bnx2x
*bp
)
5132 int func
= BP_FUNC(bp
);
5134 spin_lock_init(&bp
->spq_lock
);
5136 bp
->spq_left
= MAX_SPQ_PENDING
;
5137 bp
->spq_prod_idx
= 0;
5138 bp
->dsb_sp_prod
= BNX2X_SP_DSB_INDEX
;
5139 bp
->spq_prod_bd
= bp
->spq
;
5140 bp
->spq_last_bd
= bp
->spq_prod_bd
+ MAX_SP_DESC_CNT
;
5142 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PAGE_BASE_OFFSET(func
),
5143 U64_LO(bp
->spq_mapping
));
5145 XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PAGE_BASE_OFFSET(func
) + 4,
5146 U64_HI(bp
->spq_mapping
));
5148 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PROD_OFFSET(func
),
5152 static void bnx2x_init_context(struct bnx2x
*bp
)
5157 for_each_queue(bp
, i
) {
5158 struct eth_context
*context
= bnx2x_sp(bp
, context
[i
].eth
);
5159 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5160 u8 cl_id
= fp
->cl_id
;
5162 context
->ustorm_st_context
.common
.sb_index_numbers
=
5163 BNX2X_RX_SB_INDEX_NUM
;
5164 context
->ustorm_st_context
.common
.clientId
= cl_id
;
5165 context
->ustorm_st_context
.common
.status_block_id
= fp
->sb_id
;
5166 context
->ustorm_st_context
.common
.flags
=
5167 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT
|
5168 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS
);
5169 context
->ustorm_st_context
.common
.statistics_counter_id
=
5171 context
->ustorm_st_context
.common
.mc_alignment_log_size
=
5172 BNX2X_RX_ALIGN_SHIFT
;
5173 context
->ustorm_st_context
.common
.bd_buff_size
=
5175 context
->ustorm_st_context
.common
.bd_page_base_hi
=
5176 U64_HI(fp
->rx_desc_mapping
);
5177 context
->ustorm_st_context
.common
.bd_page_base_lo
=
5178 U64_LO(fp
->rx_desc_mapping
);
5179 if (!fp
->disable_tpa
) {
5180 context
->ustorm_st_context
.common
.flags
|=
5181 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA
;
5182 context
->ustorm_st_context
.common
.sge_buff_size
=
5183 (u16
)min((u32
)SGE_PAGE_SIZE
*PAGES_PER_SGE
,
5185 context
->ustorm_st_context
.common
.sge_page_base_hi
=
5186 U64_HI(fp
->rx_sge_mapping
);
5187 context
->ustorm_st_context
.common
.sge_page_base_lo
=
5188 U64_LO(fp
->rx_sge_mapping
);
5190 context
->ustorm_st_context
.common
.max_sges_for_packet
=
5191 SGE_PAGE_ALIGN(bp
->dev
->mtu
) >> SGE_PAGE_SHIFT
;
5192 context
->ustorm_st_context
.common
.max_sges_for_packet
=
5193 ((context
->ustorm_st_context
.common
.
5194 max_sges_for_packet
+ PAGES_PER_SGE
- 1) &
5195 (~(PAGES_PER_SGE
- 1))) >> PAGES_PER_SGE_SHIFT
;
5198 context
->ustorm_ag_context
.cdu_usage
=
5199 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, i
),
5200 CDU_REGION_NUMBER_UCM_AG
,
5201 ETH_CONNECTION_TYPE
);
5203 context
->xstorm_ag_context
.cdu_reserved
=
5204 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, i
),
5205 CDU_REGION_NUMBER_XCM_AG
,
5206 ETH_CONNECTION_TYPE
);
5210 for_each_queue(bp
, i
) {
5211 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5212 struct eth_context
*context
=
5213 bnx2x_sp(bp
, context
[i
].eth
);
5215 context
->cstorm_st_context
.sb_index_number
=
5216 C_SB_ETH_TX_CQ_INDEX
;
5217 context
->cstorm_st_context
.status_block_id
= fp
->sb_id
;
5219 context
->xstorm_st_context
.tx_bd_page_base_hi
=
5220 U64_HI(fp
->tx_desc_mapping
);
5221 context
->xstorm_st_context
.tx_bd_page_base_lo
=
5222 U64_LO(fp
->tx_desc_mapping
);
5223 context
->xstorm_st_context
.statistics_data
= (fp
->cl_id
|
5224 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE
);
5228 static void bnx2x_init_ind_table(struct bnx2x
*bp
)
5230 int func
= BP_FUNC(bp
);
5233 if (bp
->multi_mode
== ETH_RSS_MODE_DISABLED
)
5237 "Initializing indirection table multi_mode %d\n", bp
->multi_mode
);
5238 for (i
= 0; i
< TSTORM_INDIRECTION_TABLE_SIZE
; i
++)
5239 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+
5240 TSTORM_INDIRECTION_TABLE_OFFSET(func
) + i
,
5241 bp
->fp
->cl_id
+ (i
% bp
->num_queues
));
5244 static void bnx2x_set_client_config(struct bnx2x
*bp
)
5246 struct tstorm_eth_client_config tstorm_client
= {0};
5247 int port
= BP_PORT(bp
);
5250 tstorm_client
.mtu
= bp
->dev
->mtu
;
5251 tstorm_client
.config_flags
=
5252 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE
|
5253 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE
);
5255 if (bp
->rx_mode
&& bp
->vlgrp
&& (bp
->flags
& HW_VLAN_RX_FLAG
)) {
5256 tstorm_client
.config_flags
|=
5257 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE
;
5258 DP(NETIF_MSG_IFUP
, "vlan removal enabled\n");
5262 for_each_queue(bp
, i
) {
5263 tstorm_client
.statistics_counter_id
= bp
->fp
[i
].cl_id
;
5265 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
5266 TSTORM_CLIENT_CONFIG_OFFSET(port
, bp
->fp
[i
].cl_id
),
5267 ((u32
*)&tstorm_client
)[0]);
5268 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
5269 TSTORM_CLIENT_CONFIG_OFFSET(port
, bp
->fp
[i
].cl_id
) + 4,
5270 ((u32
*)&tstorm_client
)[1]);
5273 DP(BNX2X_MSG_OFF
, "tstorm_client: 0x%08x 0x%08x\n",
5274 ((u32
*)&tstorm_client
)[0], ((u32
*)&tstorm_client
)[1]);
5277 static void bnx2x_set_storm_rx_mode(struct bnx2x
*bp
)
5279 struct tstorm_eth_mac_filter_config tstorm_mac_filter
= {0};
5280 int mode
= bp
->rx_mode
;
5281 int mask
= bp
->rx_mode_cl_mask
;
5282 int func
= BP_FUNC(bp
);
5283 int port
= BP_PORT(bp
);
5285 /* All but management unicast packets should pass to the host as well */
5287 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST
|
5288 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST
|
5289 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN
|
5290 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN
;
5292 DP(NETIF_MSG_IFUP
, "rx mode %d mask 0x%x\n", mode
, mask
);
5295 case BNX2X_RX_MODE_NONE
: /* no Rx */
5296 tstorm_mac_filter
.ucast_drop_all
= mask
;
5297 tstorm_mac_filter
.mcast_drop_all
= mask
;
5298 tstorm_mac_filter
.bcast_drop_all
= mask
;
5301 case BNX2X_RX_MODE_NORMAL
:
5302 tstorm_mac_filter
.bcast_accept_all
= mask
;
5305 case BNX2X_RX_MODE_ALLMULTI
:
5306 tstorm_mac_filter
.mcast_accept_all
= mask
;
5307 tstorm_mac_filter
.bcast_accept_all
= mask
;
5310 case BNX2X_RX_MODE_PROMISC
:
5311 tstorm_mac_filter
.ucast_accept_all
= mask
;
5312 tstorm_mac_filter
.mcast_accept_all
= mask
;
5313 tstorm_mac_filter
.bcast_accept_all
= mask
;
5314 /* pass management unicast packets as well */
5315 llh_mask
|= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST
;
5319 BNX2X_ERR("BAD rx mode (%d)\n", mode
);
5324 (port
? NIG_REG_LLH1_BRB1_DRV_MASK
: NIG_REG_LLH0_BRB1_DRV_MASK
),
5327 for (i
= 0; i
< sizeof(struct tstorm_eth_mac_filter_config
)/4; i
++) {
5328 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
5329 TSTORM_MAC_FILTER_CONFIG_OFFSET(func
) + i
* 4,
5330 ((u32
*)&tstorm_mac_filter
)[i
]);
5332 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5333 ((u32 *)&tstorm_mac_filter)[i]); */
5336 if (mode
!= BNX2X_RX_MODE_NONE
)
5337 bnx2x_set_client_config(bp
);
5340 static void bnx2x_init_internal_common(struct bnx2x
*bp
)
5344 /* Zero this manually as its initialization is
5345 currently missing in the initTool */
5346 for (i
= 0; i
< (USTORM_AGG_DATA_SIZE
>> 2); i
++)
5347 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5348 USTORM_AGG_DATA_OFFSET
+ i
* 4, 0);
5351 static void bnx2x_init_internal_port(struct bnx2x
*bp
)
5353 int port
= BP_PORT(bp
);
5356 BAR_CSTRORM_INTMEM
+ CSTORM_HC_BTR_U_OFFSET(port
), BNX2X_BTR
);
5358 BAR_CSTRORM_INTMEM
+ CSTORM_HC_BTR_C_OFFSET(port
), BNX2X_BTR
);
5359 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
5360 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
5363 static void bnx2x_init_internal_func(struct bnx2x
*bp
)
5365 struct tstorm_eth_function_common_config tstorm_config
= {0};
5366 struct stats_indication_flags stats_flags
= {0};
5367 int port
= BP_PORT(bp
);
5368 int func
= BP_FUNC(bp
);
5374 tstorm_config
.config_flags
= MULTI_FLAGS(bp
);
5375 tstorm_config
.rss_result_mask
= MULTI_MASK
;
5378 /* Enable TPA if needed */
5379 if (bp
->flags
& TPA_ENABLE_FLAG
)
5380 tstorm_config
.config_flags
|=
5381 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA
;
5384 tstorm_config
.config_flags
|=
5385 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM
;
5387 tstorm_config
.leading_client_id
= BP_L_ID(bp
);
5389 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
5390 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func
),
5391 (*(u32
*)&tstorm_config
));
5393 bp
->rx_mode
= BNX2X_RX_MODE_NONE
; /* no rx until link is up */
5394 bp
->rx_mode_cl_mask
= (1 << BP_L_ID(bp
));
5395 bnx2x_set_storm_rx_mode(bp
);
5397 for_each_queue(bp
, i
) {
5398 u8 cl_id
= bp
->fp
[i
].cl_id
;
5400 /* reset xstorm per client statistics */
5401 offset
= BAR_XSTRORM_INTMEM
+
5402 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cl_id
);
5404 j
< sizeof(struct xstorm_per_client_stats
) / 4; j
++)
5405 REG_WR(bp
, offset
+ j
*4, 0);
5407 /* reset tstorm per client statistics */
5408 offset
= BAR_TSTRORM_INTMEM
+
5409 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cl_id
);
5411 j
< sizeof(struct tstorm_per_client_stats
) / 4; j
++)
5412 REG_WR(bp
, offset
+ j
*4, 0);
5414 /* reset ustorm per client statistics */
5415 offset
= BAR_USTRORM_INTMEM
+
5416 USTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cl_id
);
5418 j
< sizeof(struct ustorm_per_client_stats
) / 4; j
++)
5419 REG_WR(bp
, offset
+ j
*4, 0);
5422 /* Init statistics related context */
5423 stats_flags
.collect_eth
= 1;
5425 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(func
),
5426 ((u32
*)&stats_flags
)[0]);
5427 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(func
) + 4,
5428 ((u32
*)&stats_flags
)[1]);
5430 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(func
),
5431 ((u32
*)&stats_flags
)[0]);
5432 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(func
) + 4,
5433 ((u32
*)&stats_flags
)[1]);
5435 REG_WR(bp
, BAR_USTRORM_INTMEM
+ USTORM_STATS_FLAGS_OFFSET(func
),
5436 ((u32
*)&stats_flags
)[0]);
5437 REG_WR(bp
, BAR_USTRORM_INTMEM
+ USTORM_STATS_FLAGS_OFFSET(func
) + 4,
5438 ((u32
*)&stats_flags
)[1]);
5440 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(func
),
5441 ((u32
*)&stats_flags
)[0]);
5442 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(func
) + 4,
5443 ((u32
*)&stats_flags
)[1]);
5445 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
5446 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
5447 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
5448 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
5449 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
5450 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
5452 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
5453 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
5454 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
5455 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
5456 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
5457 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
5459 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5460 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
5461 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
5462 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5463 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
5464 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
5466 if (CHIP_IS_E1H(bp
)) {
5467 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_FUNCTION_MODE_OFFSET
,
5469 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_FUNCTION_MODE_OFFSET
,
5471 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_FUNCTION_MODE_OFFSET
,
5473 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_FUNCTION_MODE_OFFSET
,
5476 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_E1HOV_OFFSET(func
),
5480 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5482 min((u32
)(min((u32
)8, (u32
)MAX_SKB_FRAGS
) *
5483 SGE_PAGE_SIZE
* PAGES_PER_SGE
),
5485 for_each_queue(bp
, i
) {
5486 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5488 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5489 USTORM_CQE_PAGE_BASE_OFFSET(port
, fp
->cl_id
),
5490 U64_LO(fp
->rx_comp_mapping
));
5491 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5492 USTORM_CQE_PAGE_BASE_OFFSET(port
, fp
->cl_id
) + 4,
5493 U64_HI(fp
->rx_comp_mapping
));
5496 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5497 USTORM_CQE_PAGE_NEXT_OFFSET(port
, fp
->cl_id
),
5498 U64_LO(fp
->rx_comp_mapping
+ BCM_PAGE_SIZE
));
5499 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5500 USTORM_CQE_PAGE_NEXT_OFFSET(port
, fp
->cl_id
) + 4,
5501 U64_HI(fp
->rx_comp_mapping
+ BCM_PAGE_SIZE
));
5503 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
5504 USTORM_MAX_AGG_SIZE_OFFSET(port
, fp
->cl_id
),
5508 /* dropless flow control */
5509 if (CHIP_IS_E1H(bp
)) {
5510 struct ustorm_eth_rx_pause_data_e1h rx_pause
= {0};
5512 rx_pause
.bd_thr_low
= 250;
5513 rx_pause
.cqe_thr_low
= 250;
5515 rx_pause
.sge_thr_low
= 0;
5516 rx_pause
.bd_thr_high
= 350;
5517 rx_pause
.cqe_thr_high
= 350;
5518 rx_pause
.sge_thr_high
= 0;
5520 for_each_queue(bp
, i
) {
5521 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5523 if (!fp
->disable_tpa
) {
5524 rx_pause
.sge_thr_low
= 150;
5525 rx_pause
.sge_thr_high
= 250;
5529 offset
= BAR_USTRORM_INTMEM
+
5530 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port
,
5533 j
< sizeof(struct ustorm_eth_rx_pause_data_e1h
)/4;
5535 REG_WR(bp
, offset
+ j
*4,
5536 ((u32
*)&rx_pause
)[j
]);
5540 memset(&(bp
->cmng
), 0, sizeof(struct cmng_struct_per_port
));
5542 /* Init rate shaping and fairness contexts */
5546 /* During init there is no active link
5547 Until link is up, set link rate to 10Gbps */
5548 bp
->link_vars
.line_speed
= SPEED_10000
;
5549 bnx2x_init_port_minmax(bp
);
5553 SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
5554 bnx2x_calc_vn_weight_sum(bp
);
5556 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
5557 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
);
5559 /* Enable rate shaping and fairness */
5560 bp
->cmng
.flags
.cmng_enables
|=
5561 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN
;
5564 /* rate shaping and fairness are disabled */
5566 "single function mode minmax will be disabled\n");
5570 /* Store it to internal memory */
5572 for (i
= 0; i
< sizeof(struct cmng_struct_per_port
) / 4; i
++)
5573 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
5574 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
) + i
* 4,
5575 ((u32
*)(&bp
->cmng
))[i
]);
5578 static void bnx2x_init_internal(struct bnx2x
*bp
, u32 load_code
)
5580 switch (load_code
) {
5581 case FW_MSG_CODE_DRV_LOAD_COMMON
:
5582 bnx2x_init_internal_common(bp
);
5585 case FW_MSG_CODE_DRV_LOAD_PORT
:
5586 bnx2x_init_internal_port(bp
);
5589 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
5590 bnx2x_init_internal_func(bp
);
5594 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
5599 static void bnx2x_nic_init(struct bnx2x
*bp
, u32 load_code
)
5603 for_each_queue(bp
, i
) {
5604 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5607 fp
->state
= BNX2X_FP_STATE_CLOSED
;
5609 fp
->cl_id
= BP_L_ID(bp
) + i
;
5611 fp
->sb_id
= fp
->cl_id
+ 1;
5613 fp
->sb_id
= fp
->cl_id
;
5616 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5617 i
, bp
, fp
->status_blk
, fp
->cl_id
, fp
->sb_id
);
5618 bnx2x_init_sb(bp
, fp
->status_blk
, fp
->status_blk_mapping
,
5620 bnx2x_update_fpsb_idx(fp
);
5623 /* ensure status block indices were read */
5627 bnx2x_init_def_sb(bp
, bp
->def_status_blk
, bp
->def_status_blk_mapping
,
5629 bnx2x_update_dsb_idx(bp
);
5630 bnx2x_update_coalesce(bp
);
5631 bnx2x_init_rx_rings(bp
);
5632 bnx2x_init_tx_ring(bp
);
5633 bnx2x_init_sp_ring(bp
);
5634 bnx2x_init_context(bp
);
5635 bnx2x_init_internal(bp
, load_code
);
5636 bnx2x_init_ind_table(bp
);
5637 bnx2x_stats_init(bp
);
5639 /* At this point, we are ready for interrupts */
5640 atomic_set(&bp
->intr_sem
, 0);
5642 /* flush all before enabling interrupts */
5646 bnx2x_int_enable(bp
);
5648 /* Check for SPIO5 */
5649 bnx2x_attn_int_deasserted0(bp
,
5650 REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ BP_PORT(bp
)*4) &
5651 AEU_INPUTS_ATTN_BITS_SPIO5
);
5654 /* end of nic init */
5657 * gzip service functions
5660 static int bnx2x_gunzip_init(struct bnx2x
*bp
)
5662 bp
->gunzip_buf
= pci_alloc_consistent(bp
->pdev
, FW_BUF_SIZE
,
5663 &bp
->gunzip_mapping
);
5664 if (bp
->gunzip_buf
== NULL
)
5667 bp
->strm
= kmalloc(sizeof(*bp
->strm
), GFP_KERNEL
);
5668 if (bp
->strm
== NULL
)
5671 bp
->strm
->workspace
= kmalloc(zlib_inflate_workspacesize(),
5673 if (bp
->strm
->workspace
== NULL
)
5683 pci_free_consistent(bp
->pdev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
5684 bp
->gunzip_mapping
);
5685 bp
->gunzip_buf
= NULL
;
5688 netdev_err(bp
->dev
, "Cannot allocate firmware buffer for un-compression\n");
5692 static void bnx2x_gunzip_end(struct bnx2x
*bp
)
5694 kfree(bp
->strm
->workspace
);
5699 if (bp
->gunzip_buf
) {
5700 pci_free_consistent(bp
->pdev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
5701 bp
->gunzip_mapping
);
5702 bp
->gunzip_buf
= NULL
;
5706 static int bnx2x_gunzip(struct bnx2x
*bp
, const u8
*zbuf
, int len
)
5710 /* check gzip header */
5711 if ((zbuf
[0] != 0x1f) || (zbuf
[1] != 0x8b) || (zbuf
[2] != Z_DEFLATED
)) {
5712 BNX2X_ERR("Bad gzip header\n");
5720 if (zbuf
[3] & FNAME
)
5721 while ((zbuf
[n
++] != 0) && (n
< len
));
5723 bp
->strm
->next_in
= (typeof(bp
->strm
->next_in
))zbuf
+ n
;
5724 bp
->strm
->avail_in
= len
- n
;
5725 bp
->strm
->next_out
= bp
->gunzip_buf
;
5726 bp
->strm
->avail_out
= FW_BUF_SIZE
;
5728 rc
= zlib_inflateInit2(bp
->strm
, -MAX_WBITS
);
5732 rc
= zlib_inflate(bp
->strm
, Z_FINISH
);
5733 if ((rc
!= Z_OK
) && (rc
!= Z_STREAM_END
))
5734 netdev_err(bp
->dev
, "Firmware decompression error: %s\n",
5737 bp
->gunzip_outlen
= (FW_BUF_SIZE
- bp
->strm
->avail_out
);
5738 if (bp
->gunzip_outlen
& 0x3)
5739 netdev_err(bp
->dev
, "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
5741 bp
->gunzip_outlen
>>= 2;
5743 zlib_inflateEnd(bp
->strm
);
5745 if (rc
== Z_STREAM_END
)
5751 /* nic load/unload */
5754 * General service functions
5757 /* send a NIG loopback debug packet */
5758 static void bnx2x_lb_pckt(struct bnx2x
*bp
)
5762 /* Ethernet source and destination addresses */
5763 wb_write
[0] = 0x55555555;
5764 wb_write
[1] = 0x55555555;
5765 wb_write
[2] = 0x20; /* SOP */
5766 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
5768 /* NON-IP protocol */
5769 wb_write
[0] = 0x09000000;
5770 wb_write
[1] = 0x55555555;
5771 wb_write
[2] = 0x10; /* EOP, eop_bvalid = 0 */
5772 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
5775 /* some of the internal memories
5776 * are not directly readable from the driver
5777 * to test them we send debug packets
5779 static int bnx2x_int_mem_test(struct bnx2x
*bp
)
5785 if (CHIP_REV_IS_FPGA(bp
))
5787 else if (CHIP_REV_IS_EMUL(bp
))
5792 DP(NETIF_MSG_HW
, "start part1\n");
5794 /* Disable inputs of parser neighbor blocks */
5795 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
5796 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
5797 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
5798 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x0);
5800 /* Write 0 to parser credits for CFC search request */
5801 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
5803 /* send Ethernet packet */
5806 /* TODO do i reset NIG statistic? */
5807 /* Wait until NIG register shows 1 packet of size 0x10 */
5808 count
= 1000 * factor
;
5811 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
5812 val
= *bnx2x_sp(bp
, wb_data
[0]);
5820 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
5824 /* Wait until PRS register shows 1 packet */
5825 count
= 1000 * factor
;
5827 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
5835 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
5839 /* Reset and init BRB, PRS */
5840 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
5842 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
5844 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
5845 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
5847 DP(NETIF_MSG_HW
, "part2\n");
5849 /* Disable inputs of parser neighbor blocks */
5850 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
5851 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
5852 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
5853 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x0);
5855 /* Write 0 to parser credits for CFC search request */
5856 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
5858 /* send 10 Ethernet packets */
5859 for (i
= 0; i
< 10; i
++)
5862 /* Wait until NIG register shows 10 + 1
5863 packets of size 11*0x10 = 0xb0 */
5864 count
= 1000 * factor
;
5867 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
5868 val
= *bnx2x_sp(bp
, wb_data
[0]);
5876 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
5880 /* Wait until PRS register shows 2 packets */
5881 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
5883 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
5885 /* Write 1 to parser credits for CFC search request */
5886 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x1);
5888 /* Wait until PRS register shows 3 packets */
5889 msleep(10 * factor
);
5890 /* Wait until NIG register shows 1 packet of size 0x10 */
5891 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
5893 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
5895 /* clear NIG EOP FIFO */
5896 for (i
= 0; i
< 11; i
++)
5897 REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_FIFO
);
5898 val
= REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_EMPTY
);
5900 BNX2X_ERR("clear of NIG failed\n");
5904 /* Reset and init BRB, PRS, NIG */
5905 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
5907 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
5909 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
5910 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
5913 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
5916 /* Enable inputs of parser neighbor blocks */
5917 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x7fffffff);
5918 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x1);
5919 REG_WR(bp
, CFC_REG_DEBUG0
, 0x0);
5920 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x1);
5922 DP(NETIF_MSG_HW
, "done\n");
5927 static void enable_blocks_attention(struct bnx2x
*bp
)
5929 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
5930 REG_WR(bp
, PXP_REG_PXP_INT_MASK_1
, 0);
5931 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
5932 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
5933 REG_WR(bp
, QM_REG_QM_INT_MASK
, 0);
5934 REG_WR(bp
, TM_REG_TM_INT_MASK
, 0);
5935 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_0
, 0);
5936 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_1
, 0);
5937 REG_WR(bp
, XCM_REG_XCM_INT_MASK
, 0);
5938 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5939 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5940 REG_WR(bp
, USDM_REG_USDM_INT_MASK_0
, 0);
5941 REG_WR(bp
, USDM_REG_USDM_INT_MASK_1
, 0);
5942 REG_WR(bp
, UCM_REG_UCM_INT_MASK
, 0);
5943 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5944 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5945 REG_WR(bp
, GRCBASE_UPB
+ PB_REG_PB_INT_MASK
, 0);
5946 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_0
, 0);
5947 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_1
, 0);
5948 REG_WR(bp
, CCM_REG_CCM_INT_MASK
, 0);
5949 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5950 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5951 if (CHIP_REV_IS_FPGA(bp
))
5952 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x580000);
5954 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x480000);
5955 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_0
, 0);
5956 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_1
, 0);
5957 REG_WR(bp
, TCM_REG_TCM_INT_MASK
, 0);
5958 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5959 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5960 REG_WR(bp
, CDU_REG_CDU_INT_MASK
, 0);
5961 REG_WR(bp
, DMAE_REG_DMAE_INT_MASK
, 0);
5962 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5963 REG_WR(bp
, PBF_REG_PBF_INT_MASK
, 0X18); /* bit 3,4 masked */
5967 static void bnx2x_reset_common(struct bnx2x
*bp
)
5970 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
5972 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
, 0x1403);
5975 static void bnx2x_init_pxp(struct bnx2x
*bp
)
5978 int r_order
, w_order
;
5980 pci_read_config_word(bp
->pdev
,
5981 bp
->pcie_cap
+ PCI_EXP_DEVCTL
, &devctl
);
5982 DP(NETIF_MSG_HW
, "read 0x%x from devctl\n", devctl
);
5983 w_order
= ((devctl
& PCI_EXP_DEVCTL_PAYLOAD
) >> 5);
5985 r_order
= ((devctl
& PCI_EXP_DEVCTL_READRQ
) >> 12);
5987 DP(NETIF_MSG_HW
, "force read order to %d\n", bp
->mrrs
);
5991 bnx2x_init_pxp_arb(bp
, r_order
, w_order
);
5994 static void bnx2x_setup_fan_failure_detection(struct bnx2x
*bp
)
6000 val
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.config2
) &
6001 SHARED_HW_CFG_FAN_FAILURE_MASK
;
6003 if (val
== SHARED_HW_CFG_FAN_FAILURE_ENABLED
)
6007 * The fan failure mechanism is usually related to the PHY type since
6008 * the power consumption of the board is affected by the PHY. Currently,
6009 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6011 else if (val
== SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE
)
6012 for (port
= PORT_0
; port
< PORT_MAX
; port
++) {
6014 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].
6015 external_phy_config
) &
6016 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
6019 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
) ||
6021 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
) ||
6023 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481
));
6026 DP(NETIF_MSG_HW
, "fan detection setting: %d\n", is_required
);
6028 if (is_required
== 0)
6031 /* Fan failure is indicated by SPIO 5 */
6032 bnx2x_set_spio(bp
, MISC_REGISTERS_SPIO_5
,
6033 MISC_REGISTERS_SPIO_INPUT_HI_Z
);
6035 /* set to active low mode */
6036 val
= REG_RD(bp
, MISC_REG_SPIO_INT
);
6037 val
|= ((1 << MISC_REGISTERS_SPIO_5
) <<
6038 MISC_REGISTERS_SPIO_INT_OLD_SET_POS
);
6039 REG_WR(bp
, MISC_REG_SPIO_INT
, val
);
6041 /* enable interrupt to signal the IGU */
6042 val
= REG_RD(bp
, MISC_REG_SPIO_EVENT_EN
);
6043 val
|= (1 << MISC_REGISTERS_SPIO_5
);
6044 REG_WR(bp
, MISC_REG_SPIO_EVENT_EN
, val
);
6047 static int bnx2x_init_common(struct bnx2x
*bp
)
6054 DP(BNX2X_MSG_MCP
, "starting common init func %d\n", BP_FUNC(bp
));
6056 bnx2x_reset_common(bp
);
6057 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0xffffffff);
6058 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_SET
, 0xfffc);
6060 bnx2x_init_block(bp
, MISC_BLOCK
, COMMON_STAGE
);
6061 if (CHIP_IS_E1H(bp
))
6062 REG_WR(bp
, MISC_REG_E1HMF_MODE
, IS_E1HMF(bp
));
6064 REG_WR(bp
, MISC_REG_LCPLL_CTRL_REG_2
, 0x100);
6066 REG_WR(bp
, MISC_REG_LCPLL_CTRL_REG_2
, 0x0);
6068 bnx2x_init_block(bp
, PXP_BLOCK
, COMMON_STAGE
);
6069 if (CHIP_IS_E1(bp
)) {
6070 /* enable HW interrupt from PXP on USDM overflow
6071 bit 16 on INT_MASK_0 */
6072 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
6075 bnx2x_init_block(bp
, PXP2_BLOCK
, COMMON_STAGE
);
6079 REG_WR(bp
, PXP2_REG_RQ_QM_ENDIAN_M
, 1);
6080 REG_WR(bp
, PXP2_REG_RQ_TM_ENDIAN_M
, 1);
6081 REG_WR(bp
, PXP2_REG_RQ_SRC_ENDIAN_M
, 1);
6082 REG_WR(bp
, PXP2_REG_RQ_CDU_ENDIAN_M
, 1);
6083 REG_WR(bp
, PXP2_REG_RQ_DBG_ENDIAN_M
, 1);
6084 /* make sure this value is 0 */
6085 REG_WR(bp
, PXP2_REG_RQ_HC_ENDIAN_M
, 0);
6087 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6088 REG_WR(bp
, PXP2_REG_RD_QM_SWAP_MODE
, 1);
6089 REG_WR(bp
, PXP2_REG_RD_TM_SWAP_MODE
, 1);
6090 REG_WR(bp
, PXP2_REG_RD_SRC_SWAP_MODE
, 1);
6091 REG_WR(bp
, PXP2_REG_RD_CDURD_SWAP_MODE
, 1);
6094 REG_WR(bp
, PXP2_REG_RQ_CDU_P_SIZE
, 2);
6096 REG_WR(bp
, PXP2_REG_RQ_TM_P_SIZE
, 5);
6097 REG_WR(bp
, PXP2_REG_RQ_QM_P_SIZE
, 5);
6098 REG_WR(bp
, PXP2_REG_RQ_SRC_P_SIZE
, 5);
6101 if (CHIP_REV_IS_FPGA(bp
) && CHIP_IS_E1H(bp
))
6102 REG_WR(bp
, PXP2_REG_PGL_TAGS_LIMIT
, 0x1);
6104 /* let the HW do it's magic ... */
6106 /* finish PXP init */
6107 val
= REG_RD(bp
, PXP2_REG_RQ_CFG_DONE
);
6109 BNX2X_ERR("PXP2 CFG failed\n");
6112 val
= REG_RD(bp
, PXP2_REG_RD_INIT_DONE
);
6114 BNX2X_ERR("PXP2 RD_INIT failed\n");
6118 REG_WR(bp
, PXP2_REG_RQ_DISABLE_INPUTS
, 0);
6119 REG_WR(bp
, PXP2_REG_RD_DISABLE_INPUTS
, 0);
6121 bnx2x_init_block(bp
, DMAE_BLOCK
, COMMON_STAGE
);
6123 /* clean the DMAE memory */
6125 bnx2x_init_fill(bp
, TSEM_REG_PRAM
, 0, 8);
6127 bnx2x_init_block(bp
, TCM_BLOCK
, COMMON_STAGE
);
6128 bnx2x_init_block(bp
, UCM_BLOCK
, COMMON_STAGE
);
6129 bnx2x_init_block(bp
, CCM_BLOCK
, COMMON_STAGE
);
6130 bnx2x_init_block(bp
, XCM_BLOCK
, COMMON_STAGE
);
6132 bnx2x_read_dmae(bp
, XSEM_REG_PASSIVE_BUFFER
, 3);
6133 bnx2x_read_dmae(bp
, CSEM_REG_PASSIVE_BUFFER
, 3);
6134 bnx2x_read_dmae(bp
, TSEM_REG_PASSIVE_BUFFER
, 3);
6135 bnx2x_read_dmae(bp
, USEM_REG_PASSIVE_BUFFER
, 3);
6137 bnx2x_init_block(bp
, QM_BLOCK
, COMMON_STAGE
);
6142 for (i
= 0; i
< 64; i
++) {
6143 REG_WR(bp
, QM_REG_BASEADDR
+ i
*4, 1024 * 4 * (i
%16));
6144 bnx2x_init_ind_wr(bp
, QM_REG_PTRTBL
+ i
*8, wb_write
, 2);
6146 if (CHIP_IS_E1H(bp
)) {
6147 REG_WR(bp
, QM_REG_BASEADDR_EXT_A
+ i
*4, 1024*4*(i
%16));
6148 bnx2x_init_ind_wr(bp
, QM_REG_PTRTBL_EXT_A
+ i
*8,
6153 /* soft reset pulse */
6154 REG_WR(bp
, QM_REG_SOFT_RESET
, 1);
6155 REG_WR(bp
, QM_REG_SOFT_RESET
, 0);
6158 bnx2x_init_block(bp
, TIMERS_BLOCK
, COMMON_STAGE
);
6161 bnx2x_init_block(bp
, DQ_BLOCK
, COMMON_STAGE
);
6162 REG_WR(bp
, DORQ_REG_DPM_CID_OFST
, BCM_PAGE_SHIFT
);
6163 if (!CHIP_REV_IS_SLOW(bp
)) {
6164 /* enable hw interrupt from doorbell Q */
6165 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
6168 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
6169 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
6170 REG_WR(bp
, PRS_REG_A_PRSU_20
, 0xf);
6173 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
6175 if (CHIP_IS_E1H(bp
))
6176 REG_WR(bp
, PRS_REG_E1HOV_MODE
, IS_E1HMF(bp
));
6178 bnx2x_init_block(bp
, TSDM_BLOCK
, COMMON_STAGE
);
6179 bnx2x_init_block(bp
, CSDM_BLOCK
, COMMON_STAGE
);
6180 bnx2x_init_block(bp
, USDM_BLOCK
, COMMON_STAGE
);
6181 bnx2x_init_block(bp
, XSDM_BLOCK
, COMMON_STAGE
);
6183 bnx2x_init_fill(bp
, TSEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
6184 bnx2x_init_fill(bp
, USEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
6185 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
6186 bnx2x_init_fill(bp
, XSEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
6188 bnx2x_init_block(bp
, TSEM_BLOCK
, COMMON_STAGE
);
6189 bnx2x_init_block(bp
, USEM_BLOCK
, COMMON_STAGE
);
6190 bnx2x_init_block(bp
, CSEM_BLOCK
, COMMON_STAGE
);
6191 bnx2x_init_block(bp
, XSEM_BLOCK
, COMMON_STAGE
);
6194 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
6196 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
6199 bnx2x_init_block(bp
, UPB_BLOCK
, COMMON_STAGE
);
6200 bnx2x_init_block(bp
, XPB_BLOCK
, COMMON_STAGE
);
6201 bnx2x_init_block(bp
, PBF_BLOCK
, COMMON_STAGE
);
6203 REG_WR(bp
, SRC_REG_SOFT_RST
, 1);
6204 for (i
= SRC_REG_KEYRSS0_0
; i
<= SRC_REG_KEYRSS1_9
; i
+= 4) {
6205 REG_WR(bp
, i
, 0xc0cac01a);
6206 /* TODO: replace with something meaningful */
6208 bnx2x_init_block(bp
, SRCH_BLOCK
, COMMON_STAGE
);
6210 REG_WR(bp
, SRC_REG_KEYSEARCH_0
, 0x63285672);
6211 REG_WR(bp
, SRC_REG_KEYSEARCH_1
, 0x24b8f2cc);
6212 REG_WR(bp
, SRC_REG_KEYSEARCH_2
, 0x223aef9b);
6213 REG_WR(bp
, SRC_REG_KEYSEARCH_3
, 0x26001e3a);
6214 REG_WR(bp
, SRC_REG_KEYSEARCH_4
, 0x7ae91116);
6215 REG_WR(bp
, SRC_REG_KEYSEARCH_5
, 0x5ce5230b);
6216 REG_WR(bp
, SRC_REG_KEYSEARCH_6
, 0x298d8adf);
6217 REG_WR(bp
, SRC_REG_KEYSEARCH_7
, 0x6eb0ff09);
6218 REG_WR(bp
, SRC_REG_KEYSEARCH_8
, 0x1830f82f);
6219 REG_WR(bp
, SRC_REG_KEYSEARCH_9
, 0x01e46be7);
6221 REG_WR(bp
, SRC_REG_SOFT_RST
, 0);
6223 if (sizeof(union cdu_context
) != 1024)
6224 /* we currently assume that a context is 1024 bytes */
6225 pr_alert("please adjust the size of cdu_context(%ld)\n",
6226 (long)sizeof(union cdu_context
));
6228 bnx2x_init_block(bp
, CDU_BLOCK
, COMMON_STAGE
);
6229 val
= (4 << 24) + (0 << 12) + 1024;
6230 REG_WR(bp
, CDU_REG_CDU_GLOBAL_PARAMS
, val
);
6232 bnx2x_init_block(bp
, CFC_BLOCK
, COMMON_STAGE
);
6233 REG_WR(bp
, CFC_REG_INIT_REG
, 0x7FF);
6234 /* enable context validation interrupt from CFC */
6235 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
6237 /* set the thresholds to prevent CFC/CDU race */
6238 REG_WR(bp
, CFC_REG_DEBUG0
, 0x20020000);
6240 bnx2x_init_block(bp
, HC_BLOCK
, COMMON_STAGE
);
6241 bnx2x_init_block(bp
, MISC_AEU_BLOCK
, COMMON_STAGE
);
6243 bnx2x_init_block(bp
, PXPCS_BLOCK
, COMMON_STAGE
);
6244 /* Reset PCIE errors for debug */
6245 REG_WR(bp
, 0x2814, 0xffffffff);
6246 REG_WR(bp
, 0x3820, 0xffffffff);
6248 bnx2x_init_block(bp
, EMAC0_BLOCK
, COMMON_STAGE
);
6249 bnx2x_init_block(bp
, EMAC1_BLOCK
, COMMON_STAGE
);
6250 bnx2x_init_block(bp
, DBU_BLOCK
, COMMON_STAGE
);
6251 bnx2x_init_block(bp
, DBG_BLOCK
, COMMON_STAGE
);
6253 bnx2x_init_block(bp
, NIG_BLOCK
, COMMON_STAGE
);
6254 if (CHIP_IS_E1H(bp
)) {
6255 REG_WR(bp
, NIG_REG_LLH_MF_MODE
, IS_E1HMF(bp
));
6256 REG_WR(bp
, NIG_REG_LLH_E1HOV_MODE
, IS_E1HMF(bp
));
6259 if (CHIP_REV_IS_SLOW(bp
))
6262 /* finish CFC init */
6263 val
= reg_poll(bp
, CFC_REG_LL_INIT_DONE
, 1, 100, 10);
6265 BNX2X_ERR("CFC LL_INIT failed\n");
6268 val
= reg_poll(bp
, CFC_REG_AC_INIT_DONE
, 1, 100, 10);
6270 BNX2X_ERR("CFC AC_INIT failed\n");
6273 val
= reg_poll(bp
, CFC_REG_CAM_INIT_DONE
, 1, 100, 10);
6275 BNX2X_ERR("CFC CAM_INIT failed\n");
6278 REG_WR(bp
, CFC_REG_DEBUG0
, 0);
6280 /* read NIG statistic
6281 to see if this is our first up since powerup */
6282 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
6283 val
= *bnx2x_sp(bp
, wb_data
[0]);
6285 /* do internal memory self test */
6286 if ((CHIP_IS_E1(bp
)) && (val
== 0) && bnx2x_int_mem_test(bp
)) {
6287 BNX2X_ERR("internal mem self test failed\n");
6291 switch (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
)) {
6292 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
6293 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
6294 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726
:
6295 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
:
6296 bp
->port
.need_hw_lock
= 1;
6303 bnx2x_setup_fan_failure_detection(bp
);
6305 /* clear PXP2 attentions */
6306 REG_RD(bp
, PXP2_REG_PXP2_INT_STS_CLR_0
);
6308 enable_blocks_attention(bp
);
6310 if (!BP_NOMCP(bp
)) {
6311 bnx2x_acquire_phy_lock(bp
);
6312 bnx2x_common_init_phy(bp
, bp
->common
.shmem_base
);
6313 bnx2x_release_phy_lock(bp
);
6315 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6320 static int bnx2x_init_port(struct bnx2x
*bp
)
6322 int port
= BP_PORT(bp
);
6323 int init_stage
= port
? PORT1_STAGE
: PORT0_STAGE
;
6327 DP(BNX2X_MSG_MCP
, "starting port init port %x\n", port
);
6329 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
6331 bnx2x_init_block(bp
, PXP_BLOCK
, init_stage
);
6332 bnx2x_init_block(bp
, PXP2_BLOCK
, init_stage
);
6334 bnx2x_init_block(bp
, TCM_BLOCK
, init_stage
);
6335 bnx2x_init_block(bp
, UCM_BLOCK
, init_stage
);
6336 bnx2x_init_block(bp
, CCM_BLOCK
, init_stage
);
6337 bnx2x_init_block(bp
, XCM_BLOCK
, init_stage
);
6340 REG_WR(bp
, QM_REG_CONNNUM_0
+ port
*4, 1024/16 - 1);
6342 bnx2x_init_block(bp
, TIMERS_BLOCK
, init_stage
);
6343 REG_WR(bp
, TM_REG_LIN0_SCAN_TIME
+ port
*4, 20);
6344 REG_WR(bp
, TM_REG_LIN0_MAX_ACTIVE_CID
+ port
*4, 31);
6346 bnx2x_init_block(bp
, DQ_BLOCK
, init_stage
);
6348 bnx2x_init_block(bp
, BRB1_BLOCK
, init_stage
);
6349 if (CHIP_REV_IS_SLOW(bp
) && !CHIP_IS_E1H(bp
)) {
6350 /* no pause for emulation and FPGA */
6355 low
= ((bp
->flags
& ONE_PORT_FLAG
) ? 160 : 246);
6356 else if (bp
->dev
->mtu
> 4096) {
6357 if (bp
->flags
& ONE_PORT_FLAG
)
6361 /* (24*1024 + val*4)/256 */
6362 low
= 96 + (val
/64) + ((val
% 64) ? 1 : 0);
6365 low
= ((bp
->flags
& ONE_PORT_FLAG
) ? 80 : 160);
6366 high
= low
+ 56; /* 14*1024/256 */
6368 REG_WR(bp
, BRB1_REG_PAUSE_LOW_THRESHOLD_0
+ port
*4, low
);
6369 REG_WR(bp
, BRB1_REG_PAUSE_HIGH_THRESHOLD_0
+ port
*4, high
);
6372 bnx2x_init_block(bp
, PRS_BLOCK
, init_stage
);
6374 bnx2x_init_block(bp
, TSDM_BLOCK
, init_stage
);
6375 bnx2x_init_block(bp
, CSDM_BLOCK
, init_stage
);
6376 bnx2x_init_block(bp
, USDM_BLOCK
, init_stage
);
6377 bnx2x_init_block(bp
, XSDM_BLOCK
, init_stage
);
6379 bnx2x_init_block(bp
, TSEM_BLOCK
, init_stage
);
6380 bnx2x_init_block(bp
, USEM_BLOCK
, init_stage
);
6381 bnx2x_init_block(bp
, CSEM_BLOCK
, init_stage
);
6382 bnx2x_init_block(bp
, XSEM_BLOCK
, init_stage
);
6384 bnx2x_init_block(bp
, UPB_BLOCK
, init_stage
);
6385 bnx2x_init_block(bp
, XPB_BLOCK
, init_stage
);
6387 bnx2x_init_block(bp
, PBF_BLOCK
, init_stage
);
6389 /* configure PBF to work without PAUSE mtu 9000 */
6390 REG_WR(bp
, PBF_REG_P0_PAUSE_ENABLE
+ port
*4, 0);
6392 /* update threshold */
6393 REG_WR(bp
, PBF_REG_P0_ARB_THRSH
+ port
*4, (9040/16));
6394 /* update init credit */
6395 REG_WR(bp
, PBF_REG_P0_INIT_CRD
+ port
*4, (9040/16) + 553 - 22);
6398 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 1);
6400 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 0);
6403 bnx2x_init_block(bp
, SRCH_BLOCK
, init_stage
);
6405 bnx2x_init_block(bp
, CDU_BLOCK
, init_stage
);
6406 bnx2x_init_block(bp
, CFC_BLOCK
, init_stage
);
6408 if (CHIP_IS_E1(bp
)) {
6409 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
6410 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
6412 bnx2x_init_block(bp
, HC_BLOCK
, init_stage
);
6414 bnx2x_init_block(bp
, MISC_AEU_BLOCK
, init_stage
);
6415 /* init aeu_mask_attn_func_0/1:
6416 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6417 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6418 * bits 4-7 are used for "per vn group attention" */
6419 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4,
6420 (IS_E1HMF(bp
) ? 0xF7 : 0x7));
6422 bnx2x_init_block(bp
, PXPCS_BLOCK
, init_stage
);
6423 bnx2x_init_block(bp
, EMAC0_BLOCK
, init_stage
);
6424 bnx2x_init_block(bp
, EMAC1_BLOCK
, init_stage
);
6425 bnx2x_init_block(bp
, DBU_BLOCK
, init_stage
);
6426 bnx2x_init_block(bp
, DBG_BLOCK
, init_stage
);
6428 bnx2x_init_block(bp
, NIG_BLOCK
, init_stage
);
6430 REG_WR(bp
, NIG_REG_XGXS_SERDES0_MODE_SEL
+ port
*4, 1);
6432 if (CHIP_IS_E1H(bp
)) {
6433 /* 0x2 disable e1hov, 0x1 enable */
6434 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK_MF
+ port
*4,
6435 (IS_E1HMF(bp
) ? 0x1 : 0x2));
6438 REG_WR(bp
, NIG_REG_LLFC_ENABLE_0
+ port
*4, 0);
6439 REG_WR(bp
, NIG_REG_LLFC_OUT_EN_0
+ port
*4, 0);
6440 REG_WR(bp
, NIG_REG_PAUSE_ENABLE_0
+ port
*4, 1);
6444 bnx2x_init_block(bp
, MCP_BLOCK
, init_stage
);
6445 bnx2x_init_block(bp
, DMAE_BLOCK
, init_stage
);
6447 switch (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
)) {
6448 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726
:
6450 u32 swap_val
, swap_override
, aeu_gpio_mask
, offset
;
6452 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_3
,
6453 MISC_REGISTERS_GPIO_INPUT_HI_Z
, port
);
6455 /* The GPIO should be swapped if the swap register is
6457 swap_val
= REG_RD(bp
, NIG_REG_PORT_SWAP
);
6458 swap_override
= REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
);
6460 /* Select function upon port-swap configuration */
6462 offset
= MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
;
6463 aeu_gpio_mask
= (swap_val
&& swap_override
) ?
6464 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1
:
6465 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0
;
6467 offset
= MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
;
6468 aeu_gpio_mask
= (swap_val
&& swap_override
) ?
6469 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0
:
6470 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1
;
6472 val
= REG_RD(bp
, offset
);
6473 /* add GPIO3 to group */
6474 val
|= aeu_gpio_mask
;
6475 REG_WR(bp
, offset
, val
);
6479 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
6480 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
:
6481 /* add SPIO 5 to group 0 */
6483 u32 reg_addr
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
6484 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
6485 val
= REG_RD(bp
, reg_addr
);
6486 val
|= AEU_INPUTS_ATTN_BITS_SPIO5
;
6487 REG_WR(bp
, reg_addr
, val
);
6495 bnx2x__link_reset(bp
);
6500 #define ILT_PER_FUNC (768/2)
6501 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6502 /* the phys address is shifted right 12 bits and has an added
6503 1=valid bit added to the 53rd bit
6504 then since this is a wide register(TM)
6505 we split it into two 32 bit writes
6507 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6508 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6509 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6510 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6513 #define CNIC_ILT_LINES 127
6514 #define CNIC_CTX_PER_ILT 16
6516 #define CNIC_ILT_LINES 0
6519 static void bnx2x_ilt_wr(struct bnx2x
*bp
, u32 index
, dma_addr_t addr
)
6523 if (CHIP_IS_E1H(bp
))
6524 reg
= PXP2_REG_RQ_ONCHIP_AT_B0
+ index
*8;
6526 reg
= PXP2_REG_RQ_ONCHIP_AT
+ index
*8;
6528 bnx2x_wb_wr(bp
, reg
, ONCHIP_ADDR1(addr
), ONCHIP_ADDR2(addr
));
6531 static int bnx2x_init_func(struct bnx2x
*bp
)
6533 int port
= BP_PORT(bp
);
6534 int func
= BP_FUNC(bp
);
6538 DP(BNX2X_MSG_MCP
, "starting func init func %x\n", func
);
6540 /* set MSI reconfigure capability */
6541 addr
= (port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
);
6542 val
= REG_RD(bp
, addr
);
6543 val
|= HC_CONFIG_0_REG_MSI_ATTN_EN_0
;
6544 REG_WR(bp
, addr
, val
);
6546 i
= FUNC_ILT_BASE(func
);
6548 bnx2x_ilt_wr(bp
, i
, bnx2x_sp_mapping(bp
, context
));
6549 if (CHIP_IS_E1H(bp
)) {
6550 REG_WR(bp
, PXP2_REG_RQ_CDU_FIRST_ILT
, i
);
6551 REG_WR(bp
, PXP2_REG_RQ_CDU_LAST_ILT
, i
+ CNIC_ILT_LINES
);
6553 REG_WR(bp
, PXP2_REG_PSWRQ_CDU0_L2P
+ func
*4,
6554 PXP_ILT_RANGE(i
, i
+ CNIC_ILT_LINES
));
6557 i
+= 1 + CNIC_ILT_LINES
;
6558 bnx2x_ilt_wr(bp
, i
, bp
->timers_mapping
);
6560 REG_WR(bp
, PXP2_REG_PSWRQ_TM0_L2P
+ func
*4, PXP_ONE_ILT(i
));
6562 REG_WR(bp
, PXP2_REG_RQ_TM_FIRST_ILT
, i
);
6563 REG_WR(bp
, PXP2_REG_RQ_TM_LAST_ILT
, i
);
6567 bnx2x_ilt_wr(bp
, i
, bp
->qm_mapping
);
6569 REG_WR(bp
, PXP2_REG_PSWRQ_QM0_L2P
+ func
*4, PXP_ONE_ILT(i
));
6571 REG_WR(bp
, PXP2_REG_RQ_QM_FIRST_ILT
, i
);
6572 REG_WR(bp
, PXP2_REG_RQ_QM_LAST_ILT
, i
);
6576 bnx2x_ilt_wr(bp
, i
, bp
->t1_mapping
);
6578 REG_WR(bp
, PXP2_REG_PSWRQ_SRC0_L2P
+ func
*4, PXP_ONE_ILT(i
));
6580 REG_WR(bp
, PXP2_REG_RQ_SRC_FIRST_ILT
, i
);
6581 REG_WR(bp
, PXP2_REG_RQ_SRC_LAST_ILT
, i
);
6584 /* tell the searcher where the T2 table is */
6585 REG_WR(bp
, SRC_REG_COUNTFREE0
+ port
*4, 16*1024/64);
6587 bnx2x_wb_wr(bp
, SRC_REG_FIRSTFREE0
+ port
*16,
6588 U64_LO(bp
->t2_mapping
), U64_HI(bp
->t2_mapping
));
6590 bnx2x_wb_wr(bp
, SRC_REG_LASTFREE0
+ port
*16,
6591 U64_LO((u64
)bp
->t2_mapping
+ 16*1024 - 64),
6592 U64_HI((u64
)bp
->t2_mapping
+ 16*1024 - 64));
6594 REG_WR(bp
, SRC_REG_NUMBER_HASH_BITS0
+ port
*4, 10);
6597 if (CHIP_IS_E1H(bp
)) {
6598 bnx2x_init_block(bp
, MISC_BLOCK
, FUNC0_STAGE
+ func
);
6599 bnx2x_init_block(bp
, TCM_BLOCK
, FUNC0_STAGE
+ func
);
6600 bnx2x_init_block(bp
, UCM_BLOCK
, FUNC0_STAGE
+ func
);
6601 bnx2x_init_block(bp
, CCM_BLOCK
, FUNC0_STAGE
+ func
);
6602 bnx2x_init_block(bp
, XCM_BLOCK
, FUNC0_STAGE
+ func
);
6603 bnx2x_init_block(bp
, TSEM_BLOCK
, FUNC0_STAGE
+ func
);
6604 bnx2x_init_block(bp
, USEM_BLOCK
, FUNC0_STAGE
+ func
);
6605 bnx2x_init_block(bp
, CSEM_BLOCK
, FUNC0_STAGE
+ func
);
6606 bnx2x_init_block(bp
, XSEM_BLOCK
, FUNC0_STAGE
+ func
);
6608 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 1);
6609 REG_WR(bp
, NIG_REG_LLH0_FUNC_VLAN_ID
+ port
*8, bp
->e1hov
);
6612 /* HC init per function */
6613 if (CHIP_IS_E1H(bp
)) {
6614 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
6616 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
6617 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
6619 bnx2x_init_block(bp
, HC_BLOCK
, FUNC0_STAGE
+ func
);
6621 /* Reset PCIE errors for debug */
6622 REG_WR(bp
, 0x2114, 0xffffffff);
6623 REG_WR(bp
, 0x2120, 0xffffffff);
6628 static int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
)
6632 DP(BNX2X_MSG_MCP
, "function %d load_code %x\n",
6633 BP_FUNC(bp
), load_code
);
6636 mutex_init(&bp
->dmae_mutex
);
6637 rc
= bnx2x_gunzip_init(bp
);
6641 switch (load_code
) {
6642 case FW_MSG_CODE_DRV_LOAD_COMMON
:
6643 rc
= bnx2x_init_common(bp
);
6648 case FW_MSG_CODE_DRV_LOAD_PORT
:
6650 rc
= bnx2x_init_port(bp
);
6655 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
6657 rc
= bnx2x_init_func(bp
);
6663 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
6667 if (!BP_NOMCP(bp
)) {
6668 int func
= BP_FUNC(bp
);
6670 bp
->fw_drv_pulse_wr_seq
=
6671 (SHMEM_RD(bp
, func_mb
[func
].drv_pulse_mb
) &
6672 DRV_PULSE_SEQ_MASK
);
6673 DP(BNX2X_MSG_MCP
, "drv_pulse 0x%x\n", bp
->fw_drv_pulse_wr_seq
);
6676 /* this needs to be done before gunzip end */
6677 bnx2x_zero_def_sb(bp
);
6678 for_each_queue(bp
, i
)
6679 bnx2x_zero_sb(bp
, BP_L_ID(bp
) + i
);
6681 bnx2x_zero_sb(bp
, BP_L_ID(bp
) + i
);
6685 bnx2x_gunzip_end(bp
);
6690 static void bnx2x_free_mem(struct bnx2x
*bp
)
6693 #define BNX2X_PCI_FREE(x, y, size) \
6696 pci_free_consistent(bp->pdev, size, x, y); \
6702 #define BNX2X_FREE(x) \
6714 for_each_queue(bp
, i
) {
6717 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, status_blk
),
6718 bnx2x_fp(bp
, i
, status_blk_mapping
),
6719 sizeof(struct host_status_block
));
6722 for_each_queue(bp
, i
) {
6724 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6725 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_buf_ring
));
6726 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_desc_ring
),
6727 bnx2x_fp(bp
, i
, rx_desc_mapping
),
6728 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
6730 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_comp_ring
),
6731 bnx2x_fp(bp
, i
, rx_comp_mapping
),
6732 sizeof(struct eth_fast_path_rx_cqe
) *
6736 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_page_ring
));
6737 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_sge_ring
),
6738 bnx2x_fp(bp
, i
, rx_sge_mapping
),
6739 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
6742 for_each_queue(bp
, i
) {
6744 /* fastpath tx rings: tx_buf tx_desc */
6745 BNX2X_FREE(bnx2x_fp(bp
, i
, tx_buf_ring
));
6746 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, tx_desc_ring
),
6747 bnx2x_fp(bp
, i
, tx_desc_mapping
),
6748 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
6750 /* end of fastpath */
6752 BNX2X_PCI_FREE(bp
->def_status_blk
, bp
->def_status_blk_mapping
,
6753 sizeof(struct host_def_status_block
));
6755 BNX2X_PCI_FREE(bp
->slowpath
, bp
->slowpath_mapping
,
6756 sizeof(struct bnx2x_slowpath
));
6759 BNX2X_PCI_FREE(bp
->t1
, bp
->t1_mapping
, 64*1024);
6760 BNX2X_PCI_FREE(bp
->t2
, bp
->t2_mapping
, 16*1024);
6761 BNX2X_PCI_FREE(bp
->timers
, bp
->timers_mapping
, 8*1024);
6762 BNX2X_PCI_FREE(bp
->qm
, bp
->qm_mapping
, 128*1024);
6763 BNX2X_PCI_FREE(bp
->cnic_sb
, bp
->cnic_sb_mapping
,
6764 sizeof(struct host_status_block
));
6766 BNX2X_PCI_FREE(bp
->spq
, bp
->spq_mapping
, BCM_PAGE_SIZE
);
6768 #undef BNX2X_PCI_FREE
6772 static int bnx2x_alloc_mem(struct bnx2x
*bp
)
6775 #define BNX2X_PCI_ALLOC(x, y, size) \
6777 x = pci_alloc_consistent(bp->pdev, size, y); \
6779 goto alloc_mem_err; \
6780 memset(x, 0, size); \
6783 #define BNX2X_ALLOC(x, size) \
6785 x = vmalloc(size); \
6787 goto alloc_mem_err; \
6788 memset(x, 0, size); \
6795 for_each_queue(bp
, i
) {
6796 bnx2x_fp(bp
, i
, bp
) = bp
;
6799 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, status_blk
),
6800 &bnx2x_fp(bp
, i
, status_blk_mapping
),
6801 sizeof(struct host_status_block
));
6804 for_each_queue(bp
, i
) {
6806 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6807 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_buf_ring
),
6808 sizeof(struct sw_rx_bd
) * NUM_RX_BD
);
6809 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_desc_ring
),
6810 &bnx2x_fp(bp
, i
, rx_desc_mapping
),
6811 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
6813 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_comp_ring
),
6814 &bnx2x_fp(bp
, i
, rx_comp_mapping
),
6815 sizeof(struct eth_fast_path_rx_cqe
) *
6819 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_page_ring
),
6820 sizeof(struct sw_rx_page
) * NUM_RX_SGE
);
6821 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_sge_ring
),
6822 &bnx2x_fp(bp
, i
, rx_sge_mapping
),
6823 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
6826 for_each_queue(bp
, i
) {
6828 /* fastpath tx rings: tx_buf tx_desc */
6829 BNX2X_ALLOC(bnx2x_fp(bp
, i
, tx_buf_ring
),
6830 sizeof(struct sw_tx_bd
) * NUM_TX_BD
);
6831 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, tx_desc_ring
),
6832 &bnx2x_fp(bp
, i
, tx_desc_mapping
),
6833 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
6835 /* end of fastpath */
6837 BNX2X_PCI_ALLOC(bp
->def_status_blk
, &bp
->def_status_blk_mapping
,
6838 sizeof(struct host_def_status_block
));
6840 BNX2X_PCI_ALLOC(bp
->slowpath
, &bp
->slowpath_mapping
,
6841 sizeof(struct bnx2x_slowpath
));
6844 BNX2X_PCI_ALLOC(bp
->t1
, &bp
->t1_mapping
, 64*1024);
6846 /* allocate searcher T2 table
6847 we allocate 1/4 of alloc num for T2
6848 (which is not entered into the ILT) */
6849 BNX2X_PCI_ALLOC(bp
->t2
, &bp
->t2_mapping
, 16*1024);
6851 /* Initialize T2 (for 1024 connections) */
6852 for (i
= 0; i
< 16*1024; i
+= 64)
6853 *(u64
*)((char *)bp
->t2
+ i
+ 56) = bp
->t2_mapping
+ i
+ 64;
6855 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6856 BNX2X_PCI_ALLOC(bp
->timers
, &bp
->timers_mapping
, 8*1024);
6858 /* QM queues (128*MAX_CONN) */
6859 BNX2X_PCI_ALLOC(bp
->qm
, &bp
->qm_mapping
, 128*1024);
6861 BNX2X_PCI_ALLOC(bp
->cnic_sb
, &bp
->cnic_sb_mapping
,
6862 sizeof(struct host_status_block
));
6865 /* Slow path ring */
6866 BNX2X_PCI_ALLOC(bp
->spq
, &bp
->spq_mapping
, BCM_PAGE_SIZE
);
6874 #undef BNX2X_PCI_ALLOC
6878 static void bnx2x_free_tx_skbs(struct bnx2x
*bp
)
6882 for_each_queue(bp
, i
) {
6883 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
6885 u16 bd_cons
= fp
->tx_bd_cons
;
6886 u16 sw_prod
= fp
->tx_pkt_prod
;
6887 u16 sw_cons
= fp
->tx_pkt_cons
;
6889 while (sw_cons
!= sw_prod
) {
6890 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, TX_BD(sw_cons
));
6896 static void bnx2x_free_rx_skbs(struct bnx2x
*bp
)
6900 for_each_queue(bp
, j
) {
6901 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
6903 for (i
= 0; i
< NUM_RX_BD
; i
++) {
6904 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[i
];
6905 struct sk_buff
*skb
= rx_buf
->skb
;
6910 pci_unmap_single(bp
->pdev
,
6911 pci_unmap_addr(rx_buf
, mapping
),
6912 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
6917 if (!fp
->disable_tpa
)
6918 bnx2x_free_tpa_pool(bp
, fp
, CHIP_IS_E1(bp
) ?
6919 ETH_MAX_AGGREGATION_QUEUES_E1
:
6920 ETH_MAX_AGGREGATION_QUEUES_E1H
);
6924 static void bnx2x_free_skbs(struct bnx2x
*bp
)
6926 bnx2x_free_tx_skbs(bp
);
6927 bnx2x_free_rx_skbs(bp
);
6930 static void bnx2x_free_msix_irqs(struct bnx2x
*bp
)
6934 free_irq(bp
->msix_table
[0].vector
, bp
->dev
);
6935 DP(NETIF_MSG_IFDOWN
, "released sp irq (%d)\n",
6936 bp
->msix_table
[0].vector
);
6941 for_each_queue(bp
, i
) {
6942 DP(NETIF_MSG_IFDOWN
, "about to release fp #%d->%d irq "
6943 "state %x\n", i
, bp
->msix_table
[i
+ offset
].vector
,
6944 bnx2x_fp(bp
, i
, state
));
6946 free_irq(bp
->msix_table
[i
+ offset
].vector
, &bp
->fp
[i
]);
6950 static void bnx2x_free_irq(struct bnx2x
*bp
, bool disable_only
)
6952 if (bp
->flags
& USING_MSIX_FLAG
) {
6954 bnx2x_free_msix_irqs(bp
);
6955 pci_disable_msix(bp
->pdev
);
6956 bp
->flags
&= ~USING_MSIX_FLAG
;
6958 } else if (bp
->flags
& USING_MSI_FLAG
) {
6960 free_irq(bp
->pdev
->irq
, bp
->dev
);
6961 pci_disable_msi(bp
->pdev
);
6962 bp
->flags
&= ~USING_MSI_FLAG
;
6964 } else if (!disable_only
)
6965 free_irq(bp
->pdev
->irq
, bp
->dev
);
6968 static int bnx2x_enable_msix(struct bnx2x
*bp
)
6970 int i
, rc
, offset
= 1;
6973 bp
->msix_table
[0].entry
= igu_vec
;
6974 DP(NETIF_MSG_IFUP
, "msix_table[0].entry = %d (slowpath)\n", igu_vec
);
6977 igu_vec
= BP_L_ID(bp
) + offset
;
6978 bp
->msix_table
[1].entry
= igu_vec
;
6979 DP(NETIF_MSG_IFUP
, "msix_table[1].entry = %d (CNIC)\n", igu_vec
);
6982 for_each_queue(bp
, i
) {
6983 igu_vec
= BP_L_ID(bp
) + offset
+ i
;
6984 bp
->msix_table
[i
+ offset
].entry
= igu_vec
;
6985 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d "
6986 "(fastpath #%u)\n", i
+ offset
, igu_vec
, i
);
6989 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0],
6990 BNX2X_NUM_QUEUES(bp
) + offset
);
6992 DP(NETIF_MSG_IFUP
, "MSI-X is not attainable rc %d\n", rc
);
6996 bp
->flags
|= USING_MSIX_FLAG
;
7001 static int bnx2x_req_msix_irqs(struct bnx2x
*bp
)
7003 int i
, rc
, offset
= 1;
7005 rc
= request_irq(bp
->msix_table
[0].vector
, bnx2x_msix_sp_int
, 0,
7006 bp
->dev
->name
, bp
->dev
);
7008 BNX2X_ERR("request sp irq failed\n");
7015 for_each_queue(bp
, i
) {
7016 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
7017 snprintf(fp
->name
, sizeof(fp
->name
), "%s-fp-%d",
7020 rc
= request_irq(bp
->msix_table
[i
+ offset
].vector
,
7021 bnx2x_msix_fp_int
, 0, fp
->name
, fp
);
7023 BNX2X_ERR("request fp #%d irq failed rc %d\n", i
, rc
);
7024 bnx2x_free_msix_irqs(bp
);
7028 fp
->state
= BNX2X_FP_STATE_IRQ
;
7031 i
= BNX2X_NUM_QUEUES(bp
);
7032 netdev_info(bp
->dev
, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
7033 bp
->msix_table
[0].vector
,
7034 0, bp
->msix_table
[offset
].vector
,
7035 i
- 1, bp
->msix_table
[offset
+ i
- 1].vector
);
7040 static int bnx2x_enable_msi(struct bnx2x
*bp
)
7044 rc
= pci_enable_msi(bp
->pdev
);
7046 DP(NETIF_MSG_IFUP
, "MSI is not attainable\n");
7049 bp
->flags
|= USING_MSI_FLAG
;
7054 static int bnx2x_req_irq(struct bnx2x
*bp
)
7056 unsigned long flags
;
7059 if (bp
->flags
& USING_MSI_FLAG
)
7062 flags
= IRQF_SHARED
;
7064 rc
= request_irq(bp
->pdev
->irq
, bnx2x_interrupt
, flags
,
7065 bp
->dev
->name
, bp
->dev
);
7067 bnx2x_fp(bp
, 0, state
) = BNX2X_FP_STATE_IRQ
;
7072 static void bnx2x_napi_enable(struct bnx2x
*bp
)
7076 for_each_queue(bp
, i
)
7077 napi_enable(&bnx2x_fp(bp
, i
, napi
));
7080 static void bnx2x_napi_disable(struct bnx2x
*bp
)
7084 for_each_queue(bp
, i
)
7085 napi_disable(&bnx2x_fp(bp
, i
, napi
));
7088 static void bnx2x_netif_start(struct bnx2x
*bp
)
7092 intr_sem
= atomic_dec_and_test(&bp
->intr_sem
);
7093 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7096 if (netif_running(bp
->dev
)) {
7097 bnx2x_napi_enable(bp
);
7098 bnx2x_int_enable(bp
);
7099 if (bp
->state
== BNX2X_STATE_OPEN
)
7100 netif_tx_wake_all_queues(bp
->dev
);
7105 static void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
)
7107 bnx2x_int_disable_sync(bp
, disable_hw
);
7108 bnx2x_napi_disable(bp
);
7109 netif_tx_disable(bp
->dev
);
7113 * Init service functions
7117 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7119 * @param bp driver descriptor
7120 * @param set set or clear an entry (1 or 0)
7121 * @param mac pointer to a buffer containing a MAC
7122 * @param cl_bit_vec bit vector of clients to register a MAC for
7123 * @param cam_offset offset in a CAM to use
7124 * @param with_bcast set broadcast MAC as well
7126 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x
*bp
, int set
, u8
*mac
,
7127 u32 cl_bit_vec
, u8 cam_offset
,
7130 struct mac_configuration_cmd
*config
= bnx2x_sp(bp
, mac_config
);
7131 int port
= BP_PORT(bp
);
7134 * unicasts 0-31:port0 32-63:port1
7135 * multicast 64-127:port0 128-191:port1
7137 config
->hdr
.length
= 1 + (with_bcast
? 1 : 0);
7138 config
->hdr
.offset
= cam_offset
;
7139 config
->hdr
.client_id
= 0xff;
7140 config
->hdr
.reserved1
= 0;
7143 config
->config_table
[0].cam_entry
.msb_mac_addr
=
7144 swab16(*(u16
*)&mac
[0]);
7145 config
->config_table
[0].cam_entry
.middle_mac_addr
=
7146 swab16(*(u16
*)&mac
[2]);
7147 config
->config_table
[0].cam_entry
.lsb_mac_addr
=
7148 swab16(*(u16
*)&mac
[4]);
7149 config
->config_table
[0].cam_entry
.flags
= cpu_to_le16(port
);
7151 config
->config_table
[0].target_table_entry
.flags
= 0;
7153 CAM_INVALIDATE(config
->config_table
[0]);
7154 config
->config_table
[0].target_table_entry
.clients_bit_vector
=
7155 cpu_to_le32(cl_bit_vec
);
7156 config
->config_table
[0].target_table_entry
.vlan_id
= 0;
7158 DP(NETIF_MSG_IFUP
, "%s MAC (%04x:%04x:%04x)\n",
7159 (set
? "setting" : "clearing"),
7160 config
->config_table
[0].cam_entry
.msb_mac_addr
,
7161 config
->config_table
[0].cam_entry
.middle_mac_addr
,
7162 config
->config_table
[0].cam_entry
.lsb_mac_addr
);
7166 config
->config_table
[1].cam_entry
.msb_mac_addr
=
7167 cpu_to_le16(0xffff);
7168 config
->config_table
[1].cam_entry
.middle_mac_addr
=
7169 cpu_to_le16(0xffff);
7170 config
->config_table
[1].cam_entry
.lsb_mac_addr
=
7171 cpu_to_le16(0xffff);
7172 config
->config_table
[1].cam_entry
.flags
= cpu_to_le16(port
);
7174 config
->config_table
[1].target_table_entry
.flags
=
7175 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST
;
7177 CAM_INVALIDATE(config
->config_table
[1]);
7178 config
->config_table
[1].target_table_entry
.clients_bit_vector
=
7179 cpu_to_le32(cl_bit_vec
);
7180 config
->config_table
[1].target_table_entry
.vlan_id
= 0;
7183 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
7184 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
7185 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
7189 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7191 * @param bp driver descriptor
7192 * @param set set or clear an entry (1 or 0)
7193 * @param mac pointer to a buffer containing a MAC
7194 * @param cl_bit_vec bit vector of clients to register a MAC for
7195 * @param cam_offset offset in a CAM to use
7197 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x
*bp
, int set
, u8
*mac
,
7198 u32 cl_bit_vec
, u8 cam_offset
)
7200 struct mac_configuration_cmd_e1h
*config
=
7201 (struct mac_configuration_cmd_e1h
*)bnx2x_sp(bp
, mac_config
);
7203 config
->hdr
.length
= 1;
7204 config
->hdr
.offset
= cam_offset
;
7205 config
->hdr
.client_id
= 0xff;
7206 config
->hdr
.reserved1
= 0;
7209 config
->config_table
[0].msb_mac_addr
=
7210 swab16(*(u16
*)&mac
[0]);
7211 config
->config_table
[0].middle_mac_addr
=
7212 swab16(*(u16
*)&mac
[2]);
7213 config
->config_table
[0].lsb_mac_addr
=
7214 swab16(*(u16
*)&mac
[4]);
7215 config
->config_table
[0].clients_bit_vector
=
7216 cpu_to_le32(cl_bit_vec
);
7217 config
->config_table
[0].vlan_id
= 0;
7218 config
->config_table
[0].e1hov_id
= cpu_to_le16(bp
->e1hov
);
7220 config
->config_table
[0].flags
= BP_PORT(bp
);
7222 config
->config_table
[0].flags
=
7223 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE
;
7225 DP(NETIF_MSG_IFUP
, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
7226 (set
? "setting" : "clearing"),
7227 config
->config_table
[0].msb_mac_addr
,
7228 config
->config_table
[0].middle_mac_addr
,
7229 config
->config_table
[0].lsb_mac_addr
, bp
->e1hov
, cl_bit_vec
);
7231 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
7232 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
7233 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
7236 static int bnx2x_wait_ramrod(struct bnx2x
*bp
, int state
, int idx
,
7237 int *state_p
, int poll
)
7239 /* can take a while if any port is running */
7242 DP(NETIF_MSG_IFUP
, "%s for state to become %x on IDX [%d]\n",
7243 poll
? "polling" : "waiting", state
, idx
);
7248 bnx2x_rx_int(bp
->fp
, 10);
7249 /* if index is different from 0
7250 * the reply for some commands will
7251 * be on the non default queue
7254 bnx2x_rx_int(&bp
->fp
[idx
], 10);
7257 mb(); /* state is changed by bnx2x_sp_event() */
7258 if (*state_p
== state
) {
7259 #ifdef BNX2X_STOP_ON_ERROR
7260 DP(NETIF_MSG_IFUP
, "exit (cnt %d)\n", 5000 - cnt
);
7272 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7273 poll
? "polling" : "waiting", state
, idx
);
7274 #ifdef BNX2X_STOP_ON_ERROR
7281 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x
*bp
, int set
)
7283 bp
->set_mac_pending
++;
7286 bnx2x_set_mac_addr_e1h_gen(bp
, set
, bp
->dev
->dev_addr
,
7287 (1 << bp
->fp
->cl_id
), BP_FUNC(bp
));
7289 /* Wait for a completion */
7290 bnx2x_wait_ramrod(bp
, 0, 0, &bp
->set_mac_pending
, set
? 0 : 1);
7293 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x
*bp
, int set
)
7295 bp
->set_mac_pending
++;
7298 bnx2x_set_mac_addr_e1_gen(bp
, set
, bp
->dev
->dev_addr
,
7299 (1 << bp
->fp
->cl_id
), (BP_PORT(bp
) ? 32 : 0),
7302 /* Wait for a completion */
7303 bnx2x_wait_ramrod(bp
, 0, 0, &bp
->set_mac_pending
, set
? 0 : 1);
7308 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7309 * MAC(s). This function will wait until the ramdord completion
7312 * @param bp driver handle
7313 * @param set set or clear the CAM entry
7315 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7317 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x
*bp
, int set
)
7319 u32 cl_bit_vec
= (1 << BCM_ISCSI_ETH_CL_ID
);
7321 bp
->set_mac_pending
++;
7324 /* Send a SET_MAC ramrod */
7326 bnx2x_set_mac_addr_e1_gen(bp
, set
, bp
->iscsi_mac
,
7327 cl_bit_vec
, (BP_PORT(bp
) ? 32 : 0) + 2,
7330 /* CAM allocation for E1H
7331 * unicasts: by func number
7332 * multicast: 20+FUNC*20, 20 each
7334 bnx2x_set_mac_addr_e1h_gen(bp
, set
, bp
->iscsi_mac
,
7335 cl_bit_vec
, E1H_FUNC_MAX
+ BP_FUNC(bp
));
7337 /* Wait for a completion when setting */
7338 bnx2x_wait_ramrod(bp
, 0, 0, &bp
->set_mac_pending
, set
? 0 : 1);
7344 static int bnx2x_setup_leading(struct bnx2x
*bp
)
7348 /* reset IGU state */
7349 bnx2x_ack_sb(bp
, bp
->fp
[0].sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
7352 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_PORT_SETUP
, 0, 0, 0, 0);
7354 /* Wait for completion */
7355 rc
= bnx2x_wait_ramrod(bp
, BNX2X_STATE_OPEN
, 0, &(bp
->state
), 0);
7360 static int bnx2x_setup_multi(struct bnx2x
*bp
, int index
)
7362 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
7364 /* reset IGU state */
7365 bnx2x_ack_sb(bp
, fp
->sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
7368 fp
->state
= BNX2X_FP_STATE_OPENING
;
7369 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CLIENT_SETUP
, index
, 0,
7372 /* Wait for completion */
7373 return bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_OPEN
, index
,
7377 static int bnx2x_poll(struct napi_struct
*napi
, int budget
);
7379 static void bnx2x_set_num_queues_msix(struct bnx2x
*bp
)
7382 switch (bp
->multi_mode
) {
7383 case ETH_RSS_MODE_DISABLED
:
7387 case ETH_RSS_MODE_REGULAR
:
7389 bp
->num_queues
= min_t(u32
, num_queues
,
7390 BNX2X_MAX_QUEUES(bp
));
7392 bp
->num_queues
= min_t(u32
, num_online_cpus(),
7393 BNX2X_MAX_QUEUES(bp
));
7403 static int bnx2x_set_num_queues(struct bnx2x
*bp
)
7411 DP(NETIF_MSG_IFUP
, "set number of queues to 1\n");
7416 /* Set number of queues according to bp->multi_mode value */
7417 bnx2x_set_num_queues_msix(bp
);
7419 DP(NETIF_MSG_IFUP
, "set number of queues to %d\n",
7422 /* if we can't use MSI-X we only need one fp,
7423 * so try to enable MSI-X with the requested number of fp's
7424 * and fallback to MSI or legacy INTx with one fp
7426 rc
= bnx2x_enable_msix(bp
);
7428 /* failed to enable MSI-X */
7432 bp
->dev
->real_num_tx_queues
= bp
->num_queues
;
7437 static int bnx2x_cnic_notify(struct bnx2x
*bp
, int cmd
);
7438 static void bnx2x_setup_cnic_irq_info(struct bnx2x
*bp
);
7441 /* must be called with rtnl_lock */
7442 static int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
)
7447 #ifdef BNX2X_STOP_ON_ERROR
7448 if (unlikely(bp
->panic
))
7452 bp
->state
= BNX2X_STATE_OPENING_WAIT4_LOAD
;
7454 rc
= bnx2x_set_num_queues(bp
);
7456 if (bnx2x_alloc_mem(bp
)) {
7457 bnx2x_free_irq(bp
, true);
7461 for_each_queue(bp
, i
)
7462 bnx2x_fp(bp
, i
, disable_tpa
) =
7463 ((bp
->flags
& TPA_ENABLE_FLAG
) == 0);
7465 for_each_queue(bp
, i
)
7466 netif_napi_add(bp
->dev
, &bnx2x_fp(bp
, i
, napi
),
7469 bnx2x_napi_enable(bp
);
7471 if (bp
->flags
& USING_MSIX_FLAG
) {
7472 rc
= bnx2x_req_msix_irqs(bp
);
7474 bnx2x_free_irq(bp
, true);
7478 /* Fall to INTx if failed to enable MSI-X due to lack of
7479 memory (in bnx2x_set_num_queues()) */
7480 if ((rc
!= -ENOMEM
) && (int_mode
!= INT_MODE_INTx
))
7481 bnx2x_enable_msi(bp
);
7483 rc
= bnx2x_req_irq(bp
);
7485 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc
);
7486 bnx2x_free_irq(bp
, true);
7489 if (bp
->flags
& USING_MSI_FLAG
) {
7490 bp
->dev
->irq
= bp
->pdev
->irq
;
7491 netdev_info(bp
->dev
, "using MSI IRQ %d\n",
7496 /* Send LOAD_REQUEST command to MCP
7497 Returns the type of LOAD command:
7498 if it is the first port to be initialized
7499 common blocks should be initialized, otherwise - not
7501 if (!BP_NOMCP(bp
)) {
7502 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_REQ
);
7504 BNX2X_ERR("MCP response failure, aborting\n");
7508 if (load_code
== FW_MSG_CODE_DRV_LOAD_REFUSED
) {
7509 rc
= -EBUSY
; /* other port in diagnostic mode */
7514 int port
= BP_PORT(bp
);
7516 DP(NETIF_MSG_IFUP
, "NO MCP - load counts %d, %d, %d\n",
7517 load_count
[0], load_count
[1], load_count
[2]);
7519 load_count
[1 + port
]++;
7520 DP(NETIF_MSG_IFUP
, "NO MCP - new load counts %d, %d, %d\n",
7521 load_count
[0], load_count
[1], load_count
[2]);
7522 if (load_count
[0] == 1)
7523 load_code
= FW_MSG_CODE_DRV_LOAD_COMMON
;
7524 else if (load_count
[1 + port
] == 1)
7525 load_code
= FW_MSG_CODE_DRV_LOAD_PORT
;
7527 load_code
= FW_MSG_CODE_DRV_LOAD_FUNCTION
;
7530 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
7531 (load_code
== FW_MSG_CODE_DRV_LOAD_PORT
))
7535 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
7538 rc
= bnx2x_init_hw(bp
, load_code
);
7540 BNX2X_ERR("HW init failed, aborting\n");
7541 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
);
7542 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
);
7543 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
7547 /* Setup NIC internals and enable interrupts */
7548 bnx2x_nic_init(bp
, load_code
);
7550 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) &&
7551 (bp
->common
.shmem2_base
))
7552 SHMEM2_WR(bp
, dcc_support
,
7553 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV
|
7554 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV
));
7556 /* Send LOAD_DONE command to MCP */
7557 if (!BP_NOMCP(bp
)) {
7558 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
);
7560 BNX2X_ERR("MCP response failure, aborting\n");
7566 bp
->state
= BNX2X_STATE_OPENING_WAIT4_PORT
;
7568 rc
= bnx2x_setup_leading(bp
);
7570 BNX2X_ERR("Setup leading failed!\n");
7571 #ifndef BNX2X_STOP_ON_ERROR
7579 if (CHIP_IS_E1H(bp
))
7580 if (bp
->mf_config
& FUNC_MF_CFG_FUNC_DISABLED
) {
7581 DP(NETIF_MSG_IFUP
, "mf_cfg function disabled\n");
7582 bp
->flags
|= MF_FUNC_DIS
;
7585 if (bp
->state
== BNX2X_STATE_OPEN
) {
7587 /* Enable Timer scan */
7588 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ BP_PORT(bp
)*4, 1);
7590 for_each_nondefault_queue(bp
, i
) {
7591 rc
= bnx2x_setup_multi(bp
, i
);
7601 bnx2x_set_eth_mac_addr_e1(bp
, 1);
7603 bnx2x_set_eth_mac_addr_e1h(bp
, 1);
7605 /* Set iSCSI L2 MAC */
7606 mutex_lock(&bp
->cnic_mutex
);
7607 if (bp
->cnic_eth_dev
.drv_state
& CNIC_DRV_STATE_REGD
) {
7608 bnx2x_set_iscsi_eth_mac_addr(bp
, 1);
7609 bp
->cnic_flags
|= BNX2X_CNIC_FLAG_MAC_SET
;
7610 bnx2x_init_sb(bp
, bp
->cnic_sb
, bp
->cnic_sb_mapping
,
7613 mutex_unlock(&bp
->cnic_mutex
);
7618 bnx2x_initial_phy_init(bp
, load_mode
);
7620 /* Start fast path */
7621 switch (load_mode
) {
7623 if (bp
->state
== BNX2X_STATE_OPEN
) {
7624 /* Tx queue should be only reenabled */
7625 netif_tx_wake_all_queues(bp
->dev
);
7627 /* Initialize the receive filter. */
7628 bnx2x_set_rx_mode(bp
->dev
);
7632 netif_tx_start_all_queues(bp
->dev
);
7633 if (bp
->state
!= BNX2X_STATE_OPEN
)
7634 netif_tx_disable(bp
->dev
);
7635 /* Initialize the receive filter. */
7636 bnx2x_set_rx_mode(bp
->dev
);
7640 /* Initialize the receive filter. */
7641 bnx2x_set_rx_mode(bp
->dev
);
7642 bp
->state
= BNX2X_STATE_DIAG
;
7650 bnx2x__link_status_update(bp
);
7652 /* start the timer */
7653 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
7656 bnx2x_setup_cnic_irq_info(bp
);
7657 if (bp
->state
== BNX2X_STATE_OPEN
)
7658 bnx2x_cnic_notify(bp
, CNIC_CTL_START_CMD
);
7665 /* Disable Timer scan */
7666 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ BP_PORT(bp
)*4, 0);
7669 bnx2x_int_disable_sync(bp
, 1);
7670 if (!BP_NOMCP(bp
)) {
7671 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
);
7672 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
7675 /* Free SKBs, SGEs, TPA pool and driver internals */
7676 bnx2x_free_skbs(bp
);
7677 for_each_queue(bp
, i
)
7678 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
7681 bnx2x_free_irq(bp
, false);
7683 bnx2x_napi_disable(bp
);
7684 for_each_queue(bp
, i
)
7685 netif_napi_del(&bnx2x_fp(bp
, i
, napi
));
7691 static int bnx2x_stop_multi(struct bnx2x
*bp
, int index
)
7693 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
7696 /* halt the connection */
7697 fp
->state
= BNX2X_FP_STATE_HALTING
;
7698 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, index
, 0, fp
->cl_id
, 0);
7700 /* Wait for completion */
7701 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, index
,
7703 if (rc
) /* timeout */
7706 /* delete cfc entry */
7707 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CFC_DEL
, index
, 0, 0, 1);
7709 /* Wait for completion */
7710 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_CLOSED
, index
,
7715 static int bnx2x_stop_leading(struct bnx2x
*bp
)
7717 __le16 dsb_sp_prod_idx
;
7718 /* if the other port is handling traffic,
7719 this can take a lot of time */
7725 /* Send HALT ramrod */
7726 bp
->fp
[0].state
= BNX2X_FP_STATE_HALTING
;
7727 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, 0, 0, bp
->fp
->cl_id
, 0);
7729 /* Wait for completion */
7730 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, 0,
7731 &(bp
->fp
[0].state
), 1);
7732 if (rc
) /* timeout */
7735 dsb_sp_prod_idx
= *bp
->dsb_sp_prod
;
7737 /* Send PORT_DELETE ramrod */
7738 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_PORT_DEL
, 0, 0, 0, 1);
7740 /* Wait for completion to arrive on default status block
7741 we are going to reset the chip anyway
7742 so there is not much to do if this times out
7744 while (dsb_sp_prod_idx
== *bp
->dsb_sp_prod
) {
7746 DP(NETIF_MSG_IFDOWN
, "timeout waiting for port del "
7747 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7748 *bp
->dsb_sp_prod
, dsb_sp_prod_idx
);
7749 #ifdef BNX2X_STOP_ON_ERROR
7757 rmb(); /* Refresh the dsb_sp_prod */
7759 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_UNLOAD
;
7760 bp
->fp
[0].state
= BNX2X_FP_STATE_CLOSED
;
7765 static void bnx2x_reset_func(struct bnx2x
*bp
)
7767 int port
= BP_PORT(bp
);
7768 int func
= BP_FUNC(bp
);
7772 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
7773 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
7776 /* Disable Timer scan */
7777 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 0);
7779 * Wait for at least 10ms and up to 2 second for the timers scan to
7782 for (i
= 0; i
< 200; i
++) {
7784 if (!REG_RD(bp
, TM_REG_LIN0_SCAN_ON
+ port
*4))
7789 base
= FUNC_ILT_BASE(func
);
7790 for (i
= base
; i
< base
+ ILT_PER_FUNC
; i
++)
7791 bnx2x_ilt_wr(bp
, i
, 0);
7794 static void bnx2x_reset_port(struct bnx2x
*bp
)
7796 int port
= BP_PORT(bp
);
7799 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
7801 /* Do not rcv packets to BRB */
7802 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK
+ port
*4, 0x0);
7803 /* Do not direct rcv packets that are not for MCP to the BRB */
7804 REG_WR(bp
, (port
? NIG_REG_LLH1_BRB1_NOT_MCP
:
7805 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
7808 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4, 0);
7811 /* Check for BRB port occupancy */
7812 val
= REG_RD(bp
, BRB1_REG_PORT_NUM_OCC_BLOCKS_0
+ port
*4);
7814 DP(NETIF_MSG_IFDOWN
,
7815 "BRB1 is not empty %d blocks are occupied\n", val
);
7817 /* TODO: Close Doorbell port? */
7820 static void bnx2x_reset_chip(struct bnx2x
*bp
, u32 reset_code
)
7822 DP(BNX2X_MSG_MCP
, "function %d reset_code %x\n",
7823 BP_FUNC(bp
), reset_code
);
7825 switch (reset_code
) {
7826 case FW_MSG_CODE_DRV_UNLOAD_COMMON
:
7827 bnx2x_reset_port(bp
);
7828 bnx2x_reset_func(bp
);
7829 bnx2x_reset_common(bp
);
7832 case FW_MSG_CODE_DRV_UNLOAD_PORT
:
7833 bnx2x_reset_port(bp
);
7834 bnx2x_reset_func(bp
);
7837 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION
:
7838 bnx2x_reset_func(bp
);
7842 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code
);
7847 /* must be called with rtnl_lock */
7848 static int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
)
7850 int port
= BP_PORT(bp
);
7855 bnx2x_cnic_notify(bp
, CNIC_CTL_STOP_CMD
);
7857 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_HALT
;
7859 /* Set "drop all" */
7860 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
7861 bnx2x_set_storm_rx_mode(bp
);
7863 /* Disable HW interrupts, NAPI and Tx */
7864 bnx2x_netif_stop(bp
, 1);
7866 del_timer_sync(&bp
->timer
);
7867 SHMEM_WR(bp
, func_mb
[BP_FUNC(bp
)].drv_pulse_mb
,
7868 (DRV_PULSE_ALWAYS_ALIVE
| bp
->fw_drv_pulse_wr_seq
));
7869 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
7872 bnx2x_free_irq(bp
, false);
7874 /* Wait until tx fastpath tasks complete */
7875 for_each_queue(bp
, i
) {
7876 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
7879 while (bnx2x_has_tx_work_unload(fp
)) {
7883 BNX2X_ERR("timeout waiting for queue[%d]\n",
7885 #ifdef BNX2X_STOP_ON_ERROR
7896 /* Give HW time to discard old tx messages */
7899 if (CHIP_IS_E1(bp
)) {
7900 struct mac_configuration_cmd
*config
=
7901 bnx2x_sp(bp
, mcast_config
);
7903 bnx2x_set_eth_mac_addr_e1(bp
, 0);
7905 for (i
= 0; i
< config
->hdr
.length
; i
++)
7906 CAM_INVALIDATE(config
->config_table
[i
]);
7908 config
->hdr
.length
= i
;
7909 if (CHIP_REV_IS_SLOW(bp
))
7910 config
->hdr
.offset
= BNX2X_MAX_EMUL_MULTI
*(1 + port
);
7912 config
->hdr
.offset
= BNX2X_MAX_MULTICAST
*(1 + port
);
7913 config
->hdr
.client_id
= bp
->fp
->cl_id
;
7914 config
->hdr
.reserved1
= 0;
7916 bp
->set_mac_pending
++;
7919 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
7920 U64_HI(bnx2x_sp_mapping(bp
, mcast_config
)),
7921 U64_LO(bnx2x_sp_mapping(bp
, mcast_config
)), 0);
7924 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 0);
7926 bnx2x_set_eth_mac_addr_e1h(bp
, 0);
7928 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
7929 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
), 0);
7931 REG_WR(bp
, MISC_REG_E1HMF_MODE
, 0);
7934 /* Clear iSCSI L2 MAC */
7935 mutex_lock(&bp
->cnic_mutex
);
7936 if (bp
->cnic_flags
& BNX2X_CNIC_FLAG_MAC_SET
) {
7937 bnx2x_set_iscsi_eth_mac_addr(bp
, 0);
7938 bp
->cnic_flags
&= ~BNX2X_CNIC_FLAG_MAC_SET
;
7940 mutex_unlock(&bp
->cnic_mutex
);
7943 if (unload_mode
== UNLOAD_NORMAL
)
7944 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
7946 else if (bp
->flags
& NO_WOL_FLAG
)
7947 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
;
7950 u32 emac_base
= port
? GRCBASE_EMAC1
: GRCBASE_EMAC0
;
7951 u8
*mac_addr
= bp
->dev
->dev_addr
;
7953 /* The mac address is written to entries 1-4 to
7954 preserve entry 0 which is used by the PMF */
7955 u8 entry
= (BP_E1HVN(bp
) + 1)*8;
7957 val
= (mac_addr
[0] << 8) | mac_addr
[1];
7958 EMAC_WR(bp
, EMAC_REG_EMAC_MAC_MATCH
+ entry
, val
);
7960 val
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
7961 (mac_addr
[4] << 8) | mac_addr
[5];
7962 EMAC_WR(bp
, EMAC_REG_EMAC_MAC_MATCH
+ entry
+ 4, val
);
7964 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_EN
;
7967 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
7969 /* Close multi and leading connections
7970 Completions for ramrods are collected in a synchronous way */
7971 for_each_nondefault_queue(bp
, i
)
7972 if (bnx2x_stop_multi(bp
, i
))
7975 rc
= bnx2x_stop_leading(bp
);
7977 BNX2X_ERR("Stop leading failed!\n");
7978 #ifdef BNX2X_STOP_ON_ERROR
7987 reset_code
= bnx2x_fw_command(bp
, reset_code
);
7989 DP(NETIF_MSG_IFDOWN
, "NO MCP - load counts %d, %d, %d\n",
7990 load_count
[0], load_count
[1], load_count
[2]);
7992 load_count
[1 + port
]--;
7993 DP(NETIF_MSG_IFDOWN
, "NO MCP - new load counts %d, %d, %d\n",
7994 load_count
[0], load_count
[1], load_count
[2]);
7995 if (load_count
[0] == 0)
7996 reset_code
= FW_MSG_CODE_DRV_UNLOAD_COMMON
;
7997 else if (load_count
[1 + port
] == 0)
7998 reset_code
= FW_MSG_CODE_DRV_UNLOAD_PORT
;
8000 reset_code
= FW_MSG_CODE_DRV_UNLOAD_FUNCTION
;
8003 if ((reset_code
== FW_MSG_CODE_DRV_UNLOAD_COMMON
) ||
8004 (reset_code
== FW_MSG_CODE_DRV_UNLOAD_PORT
))
8005 bnx2x__link_reset(bp
);
8007 /* Reset the chip */
8008 bnx2x_reset_chip(bp
, reset_code
);
8010 /* Report UNLOAD_DONE to MCP */
8012 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
8016 /* Free SKBs, SGEs, TPA pool and driver internals */
8017 bnx2x_free_skbs(bp
);
8018 for_each_queue(bp
, i
)
8019 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
8020 for_each_queue(bp
, i
)
8021 netif_napi_del(&bnx2x_fp(bp
, i
, napi
));
8024 bp
->state
= BNX2X_STATE_CLOSED
;
8026 netif_carrier_off(bp
->dev
);
8031 static void bnx2x_reset_task(struct work_struct
*work
)
8033 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, reset_task
);
8035 #ifdef BNX2X_STOP_ON_ERROR
8036 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8037 " so reset not done to allow debug dump,\n"
8038 " you will need to reboot when done\n");
8044 if (!netif_running(bp
->dev
))
8045 goto reset_task_exit
;
8047 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
8048 bnx2x_nic_load(bp
, LOAD_NORMAL
);
8054 /* end of nic load/unload */
8059 * Init service functions
8062 static inline u32
bnx2x_get_pretend_reg(struct bnx2x
*bp
, int func
)
8065 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0
;
8066 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1
;
8067 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2
;
8068 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3
;
8069 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4
;
8070 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5
;
8071 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6
;
8072 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7
;
8074 BNX2X_ERR("Unsupported function index: %d\n", func
);
8079 static void bnx2x_undi_int_disable_e1h(struct bnx2x
*bp
, int orig_func
)
8081 u32 reg
= bnx2x_get_pretend_reg(bp
, orig_func
), new_val
;
8083 /* Flush all outstanding writes */
8086 /* Pretend to be function 0 */
8088 /* Flush the GRC transaction (in the chip) */
8089 new_val
= REG_RD(bp
, reg
);
8091 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8096 /* From now we are in the "like-E1" mode */
8097 bnx2x_int_disable(bp
);
8099 /* Flush all outstanding writes */
8102 /* Restore the original funtion settings */
8103 REG_WR(bp
, reg
, orig_func
);
8104 new_val
= REG_RD(bp
, reg
);
8105 if (new_val
!= orig_func
) {
8106 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8107 orig_func
, new_val
);
8112 static inline void bnx2x_undi_int_disable(struct bnx2x
*bp
, int func
)
8114 if (CHIP_IS_E1H(bp
))
8115 bnx2x_undi_int_disable_e1h(bp
, func
);
8117 bnx2x_int_disable(bp
);
8120 static void __devinit
bnx2x_undi_unload(struct bnx2x
*bp
)
8124 /* Check if there is any driver already loaded */
8125 val
= REG_RD(bp
, MISC_REG_UNPREPARED
);
8127 /* Check if it is the UNDI driver
8128 * UNDI driver initializes CID offset for normal bell to 0x7
8130 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
8131 val
= REG_RD(bp
, DORQ_REG_NORM_CID_OFST
);
8133 u32 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
8135 int func
= BP_FUNC(bp
);
8139 /* clear the UNDI indication */
8140 REG_WR(bp
, DORQ_REG_NORM_CID_OFST
, 0);
8142 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8144 /* try unload UNDI on port 0 */
8147 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
8148 DRV_MSG_SEQ_NUMBER_MASK
);
8149 reset_code
= bnx2x_fw_command(bp
, reset_code
);
8151 /* if UNDI is loaded on the other port */
8152 if (reset_code
!= FW_MSG_CODE_DRV_UNLOAD_COMMON
) {
8154 /* send "DONE" for previous unload */
8155 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
8157 /* unload UNDI on port 1 */
8160 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
8161 DRV_MSG_SEQ_NUMBER_MASK
);
8162 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
8164 bnx2x_fw_command(bp
, reset_code
);
8167 /* now it's safe to release the lock */
8168 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
8170 bnx2x_undi_int_disable(bp
, func
);
8172 /* close input traffic and wait for it */
8173 /* Do not rcv packets to BRB */
8175 (BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_DRV_MASK
:
8176 NIG_REG_LLH0_BRB1_DRV_MASK
), 0x0);
8177 /* Do not direct rcv packets that are not for MCP to
8180 (BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_NOT_MCP
:
8181 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
8184 (BP_PORT(bp
) ? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
8185 MISC_REG_AEU_MASK_ATTN_FUNC_0
), 0);
8188 /* save NIG port swap info */
8189 swap_val
= REG_RD(bp
, NIG_REG_PORT_SWAP
);
8190 swap_en
= REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
);
8193 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
8196 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
,
8198 /* take the NIG out of reset and restore swap values */
8200 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
8201 MISC_REGISTERS_RESET_REG_1_RST_NIG
);
8202 REG_WR(bp
, NIG_REG_PORT_SWAP
, swap_val
);
8203 REG_WR(bp
, NIG_REG_STRAP_OVERRIDE
, swap_en
);
8205 /* send unload done to the MCP */
8206 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
8208 /* restore our func and fw_seq */
8211 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
8212 DRV_MSG_SEQ_NUMBER_MASK
);
8215 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
8219 static void __devinit
bnx2x_get_common_hwinfo(struct bnx2x
*bp
)
8221 u32 val
, val2
, val3
, val4
, id
;
8224 /* Get the chip revision id and number. */
8225 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8226 val
= REG_RD(bp
, MISC_REG_CHIP_NUM
);
8227 id
= ((val
& 0xffff) << 16);
8228 val
= REG_RD(bp
, MISC_REG_CHIP_REV
);
8229 id
|= ((val
& 0xf) << 12);
8230 val
= REG_RD(bp
, MISC_REG_CHIP_METAL
);
8231 id
|= ((val
& 0xff) << 4);
8232 val
= REG_RD(bp
, MISC_REG_BOND_ID
);
8234 bp
->common
.chip_id
= id
;
8235 bp
->link_params
.chip_id
= bp
->common
.chip_id
;
8236 BNX2X_DEV_INFO("chip ID is 0x%x\n", id
);
8238 val
= (REG_RD(bp
, 0x2874) & 0x55);
8239 if ((bp
->common
.chip_id
& 0x1) ||
8240 (CHIP_IS_E1(bp
) && val
) || (CHIP_IS_E1H(bp
) && (val
== 0x55))) {
8241 bp
->flags
|= ONE_PORT_FLAG
;
8242 BNX2X_DEV_INFO("single port device\n");
8245 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_CFG4
);
8246 bp
->common
.flash_size
= (NVRAM_1MB_SIZE
<<
8247 (val
& MCPR_NVM_CFG4_FLASH_SIZE
));
8248 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8249 bp
->common
.flash_size
, bp
->common
.flash_size
);
8251 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
8252 bp
->common
.shmem2_base
= REG_RD(bp
, MISC_REG_GENERIC_CR_0
);
8253 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
8254 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8255 bp
->common
.shmem_base
, bp
->common
.shmem2_base
);
8257 if (!bp
->common
.shmem_base
||
8258 (bp
->common
.shmem_base
< 0xA0000) ||
8259 (bp
->common
.shmem_base
>= 0xC0000)) {
8260 BNX2X_DEV_INFO("MCP not active\n");
8261 bp
->flags
|= NO_MCP_FLAG
;
8265 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
8266 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
8267 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
8268 BNX2X_ERR("BAD MCP validity signature\n");
8270 bp
->common
.hw_config
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.config
);
8271 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp
->common
.hw_config
);
8273 bp
->link_params
.hw_led_mode
= ((bp
->common
.hw_config
&
8274 SHARED_HW_CFG_LED_MODE_MASK
) >>
8275 SHARED_HW_CFG_LED_MODE_SHIFT
);
8277 bp
->link_params
.feature_config_flags
= 0;
8278 val
= SHMEM_RD(bp
, dev_info
.shared_feature_config
.config
);
8279 if (val
& SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED
)
8280 bp
->link_params
.feature_config_flags
|=
8281 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED
;
8283 bp
->link_params
.feature_config_flags
&=
8284 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED
;
8286 val
= SHMEM_RD(bp
, dev_info
.bc_rev
) >> 8;
8287 bp
->common
.bc_ver
= val
;
8288 BNX2X_DEV_INFO("bc_ver %X\n", val
);
8289 if (val
< BNX2X_BC_VER
) {
8290 /* for now only warn
8291 * later we might need to enforce this */
8292 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8293 " please upgrade BC\n", BNX2X_BC_VER
, val
);
8295 bp
->link_params
.feature_config_flags
|=
8296 (val
>= REQ_BC_VER_4_VRFY_OPT_MDL
) ?
8297 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY
: 0;
8299 if (BP_E1HVN(bp
) == 0) {
8300 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_PMC
, &pmc
);
8301 bp
->flags
|= (pmc
& PCI_PM_CAP_PME_D3cold
) ? 0 : NO_WOL_FLAG
;
8303 /* no WOL capability for E1HVN != 0 */
8304 bp
->flags
|= NO_WOL_FLAG
;
8306 BNX2X_DEV_INFO("%sWoL capable\n",
8307 (bp
->flags
& NO_WOL_FLAG
) ? "not " : "");
8309 val
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
);
8310 val2
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[4]);
8311 val3
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[8]);
8312 val4
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[12]);
8314 pr_info("part number %X-%X-%X-%X\n", val
, val2
, val3
, val4
);
8317 static void __devinit
bnx2x_link_settings_supported(struct bnx2x
*bp
,
8320 int port
= BP_PORT(bp
);
8323 switch (switch_cfg
) {
8325 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg
);
8328 SERDES_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
8329 switch (ext_phy_type
) {
8330 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT
:
8331 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8334 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
8335 SUPPORTED_10baseT_Full
|
8336 SUPPORTED_100baseT_Half
|
8337 SUPPORTED_100baseT_Full
|
8338 SUPPORTED_1000baseT_Full
|
8339 SUPPORTED_2500baseX_Full
|
8344 SUPPORTED_Asym_Pause
);
8347 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482
:
8348 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8351 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
8352 SUPPORTED_10baseT_Full
|
8353 SUPPORTED_100baseT_Half
|
8354 SUPPORTED_100baseT_Full
|
8355 SUPPORTED_1000baseT_Full
|
8360 SUPPORTED_Asym_Pause
);
8364 BNX2X_ERR("NVRAM config error. "
8365 "BAD SerDes ext_phy_config 0x%x\n",
8366 bp
->link_params
.ext_phy_config
);
8370 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_SERDES0_CTRL_PHY_ADDR
+
8372 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
8375 case SWITCH_CFG_10G
:
8376 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg
);
8379 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
8380 switch (ext_phy_type
) {
8381 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
:
8382 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8385 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
8386 SUPPORTED_10baseT_Full
|
8387 SUPPORTED_100baseT_Half
|
8388 SUPPORTED_100baseT_Full
|
8389 SUPPORTED_1000baseT_Full
|
8390 SUPPORTED_2500baseX_Full
|
8391 SUPPORTED_10000baseT_Full
|
8396 SUPPORTED_Asym_Pause
);
8399 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
8400 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8403 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
8404 SUPPORTED_1000baseT_Full
|
8408 SUPPORTED_Asym_Pause
);
8411 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
8412 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8415 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
8416 SUPPORTED_2500baseX_Full
|
8417 SUPPORTED_1000baseT_Full
|
8421 SUPPORTED_Asym_Pause
);
8424 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
:
8425 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8428 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
8431 SUPPORTED_Asym_Pause
);
8434 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
:
8435 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8438 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
8439 SUPPORTED_1000baseT_Full
|
8442 SUPPORTED_Asym_Pause
);
8445 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726
:
8446 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8449 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
8450 SUPPORTED_1000baseT_Full
|
8454 SUPPORTED_Asym_Pause
);
8457 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
:
8458 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8461 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
8462 SUPPORTED_1000baseT_Full
|
8466 SUPPORTED_Asym_Pause
);
8469 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
8470 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8473 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
8477 SUPPORTED_Asym_Pause
);
8480 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481
:
8481 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8484 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
8485 SUPPORTED_10baseT_Full
|
8486 SUPPORTED_100baseT_Half
|
8487 SUPPORTED_100baseT_Full
|
8488 SUPPORTED_1000baseT_Full
|
8489 SUPPORTED_10000baseT_Full
|
8493 SUPPORTED_Asym_Pause
);
8496 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
:
8497 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8498 bp
->link_params
.ext_phy_config
);
8502 BNX2X_ERR("NVRAM config error. "
8503 "BAD XGXS ext_phy_config 0x%x\n",
8504 bp
->link_params
.ext_phy_config
);
8508 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_XGXS0_CTRL_PHY_ADDR
+
8510 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
8515 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8516 bp
->port
.link_config
);
8519 bp
->link_params
.phy_addr
= bp
->port
.phy_addr
;
8521 /* mask what we support according to speed_cap_mask */
8522 if (!(bp
->link_params
.speed_cap_mask
&
8523 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF
))
8524 bp
->port
.supported
&= ~SUPPORTED_10baseT_Half
;
8526 if (!(bp
->link_params
.speed_cap_mask
&
8527 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL
))
8528 bp
->port
.supported
&= ~SUPPORTED_10baseT_Full
;
8530 if (!(bp
->link_params
.speed_cap_mask
&
8531 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF
))
8532 bp
->port
.supported
&= ~SUPPORTED_100baseT_Half
;
8534 if (!(bp
->link_params
.speed_cap_mask
&
8535 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL
))
8536 bp
->port
.supported
&= ~SUPPORTED_100baseT_Full
;
8538 if (!(bp
->link_params
.speed_cap_mask
&
8539 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G
))
8540 bp
->port
.supported
&= ~(SUPPORTED_1000baseT_Half
|
8541 SUPPORTED_1000baseT_Full
);
8543 if (!(bp
->link_params
.speed_cap_mask
&
8544 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G
))
8545 bp
->port
.supported
&= ~SUPPORTED_2500baseX_Full
;
8547 if (!(bp
->link_params
.speed_cap_mask
&
8548 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G
))
8549 bp
->port
.supported
&= ~SUPPORTED_10000baseT_Full
;
8551 BNX2X_DEV_INFO("supported 0x%x\n", bp
->port
.supported
);
8554 static void __devinit
bnx2x_link_settings_requested(struct bnx2x
*bp
)
8556 bp
->link_params
.req_duplex
= DUPLEX_FULL
;
8558 switch (bp
->port
.link_config
& PORT_FEATURE_LINK_SPEED_MASK
) {
8559 case PORT_FEATURE_LINK_SPEED_AUTO
:
8560 if (bp
->port
.supported
& SUPPORTED_Autoneg
) {
8561 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
8562 bp
->port
.advertising
= bp
->port
.supported
;
8565 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
8567 if ((ext_phy_type
==
8568 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
) ||
8570 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
)) {
8571 /* force 10G, no AN */
8572 bp
->link_params
.req_line_speed
= SPEED_10000
;
8573 bp
->port
.advertising
=
8574 (ADVERTISED_10000baseT_Full
|
8578 BNX2X_ERR("NVRAM config error. "
8579 "Invalid link_config 0x%x"
8580 " Autoneg not supported\n",
8581 bp
->port
.link_config
);
8586 case PORT_FEATURE_LINK_SPEED_10M_FULL
:
8587 if (bp
->port
.supported
& SUPPORTED_10baseT_Full
) {
8588 bp
->link_params
.req_line_speed
= SPEED_10
;
8589 bp
->port
.advertising
= (ADVERTISED_10baseT_Full
|
8592 BNX2X_ERR("NVRAM config error. "
8593 "Invalid link_config 0x%x"
8594 " speed_cap_mask 0x%x\n",
8595 bp
->port
.link_config
,
8596 bp
->link_params
.speed_cap_mask
);
8601 case PORT_FEATURE_LINK_SPEED_10M_HALF
:
8602 if (bp
->port
.supported
& SUPPORTED_10baseT_Half
) {
8603 bp
->link_params
.req_line_speed
= SPEED_10
;
8604 bp
->link_params
.req_duplex
= DUPLEX_HALF
;
8605 bp
->port
.advertising
= (ADVERTISED_10baseT_Half
|
8608 BNX2X_ERR("NVRAM config error. "
8609 "Invalid link_config 0x%x"
8610 " speed_cap_mask 0x%x\n",
8611 bp
->port
.link_config
,
8612 bp
->link_params
.speed_cap_mask
);
8617 case PORT_FEATURE_LINK_SPEED_100M_FULL
:
8618 if (bp
->port
.supported
& SUPPORTED_100baseT_Full
) {
8619 bp
->link_params
.req_line_speed
= SPEED_100
;
8620 bp
->port
.advertising
= (ADVERTISED_100baseT_Full
|
8623 BNX2X_ERR("NVRAM config error. "
8624 "Invalid link_config 0x%x"
8625 " speed_cap_mask 0x%x\n",
8626 bp
->port
.link_config
,
8627 bp
->link_params
.speed_cap_mask
);
8632 case PORT_FEATURE_LINK_SPEED_100M_HALF
:
8633 if (bp
->port
.supported
& SUPPORTED_100baseT_Half
) {
8634 bp
->link_params
.req_line_speed
= SPEED_100
;
8635 bp
->link_params
.req_duplex
= DUPLEX_HALF
;
8636 bp
->port
.advertising
= (ADVERTISED_100baseT_Half
|
8639 BNX2X_ERR("NVRAM config error. "
8640 "Invalid link_config 0x%x"
8641 " speed_cap_mask 0x%x\n",
8642 bp
->port
.link_config
,
8643 bp
->link_params
.speed_cap_mask
);
8648 case PORT_FEATURE_LINK_SPEED_1G
:
8649 if (bp
->port
.supported
& SUPPORTED_1000baseT_Full
) {
8650 bp
->link_params
.req_line_speed
= SPEED_1000
;
8651 bp
->port
.advertising
= (ADVERTISED_1000baseT_Full
|
8654 BNX2X_ERR("NVRAM config error. "
8655 "Invalid link_config 0x%x"
8656 " speed_cap_mask 0x%x\n",
8657 bp
->port
.link_config
,
8658 bp
->link_params
.speed_cap_mask
);
8663 case PORT_FEATURE_LINK_SPEED_2_5G
:
8664 if (bp
->port
.supported
& SUPPORTED_2500baseX_Full
) {
8665 bp
->link_params
.req_line_speed
= SPEED_2500
;
8666 bp
->port
.advertising
= (ADVERTISED_2500baseX_Full
|
8669 BNX2X_ERR("NVRAM config error. "
8670 "Invalid link_config 0x%x"
8671 " speed_cap_mask 0x%x\n",
8672 bp
->port
.link_config
,
8673 bp
->link_params
.speed_cap_mask
);
8678 case PORT_FEATURE_LINK_SPEED_10G_CX4
:
8679 case PORT_FEATURE_LINK_SPEED_10G_KX4
:
8680 case PORT_FEATURE_LINK_SPEED_10G_KR
:
8681 if (bp
->port
.supported
& SUPPORTED_10000baseT_Full
) {
8682 bp
->link_params
.req_line_speed
= SPEED_10000
;
8683 bp
->port
.advertising
= (ADVERTISED_10000baseT_Full
|
8686 BNX2X_ERR("NVRAM config error. "
8687 "Invalid link_config 0x%x"
8688 " speed_cap_mask 0x%x\n",
8689 bp
->port
.link_config
,
8690 bp
->link_params
.speed_cap_mask
);
8696 BNX2X_ERR("NVRAM config error. "
8697 "BAD link speed link_config 0x%x\n",
8698 bp
->port
.link_config
);
8699 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
8700 bp
->port
.advertising
= bp
->port
.supported
;
8704 bp
->link_params
.req_flow_ctrl
= (bp
->port
.link_config
&
8705 PORT_FEATURE_FLOW_CONTROL_MASK
);
8706 if ((bp
->link_params
.req_flow_ctrl
== BNX2X_FLOW_CTRL_AUTO
) &&
8707 !(bp
->port
.supported
& SUPPORTED_Autoneg
))
8708 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_NONE
;
8710 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8711 " advertising 0x%x\n",
8712 bp
->link_params
.req_line_speed
,
8713 bp
->link_params
.req_duplex
,
8714 bp
->link_params
.req_flow_ctrl
, bp
->port
.advertising
);
8717 static void __devinit
bnx2x_set_mac_buf(u8
*mac_buf
, u32 mac_lo
, u16 mac_hi
)
8719 mac_hi
= cpu_to_be16(mac_hi
);
8720 mac_lo
= cpu_to_be32(mac_lo
);
8721 memcpy(mac_buf
, &mac_hi
, sizeof(mac_hi
));
8722 memcpy(mac_buf
+ sizeof(mac_hi
), &mac_lo
, sizeof(mac_lo
));
8725 static void __devinit
bnx2x_get_port_hwinfo(struct bnx2x
*bp
)
8727 int port
= BP_PORT(bp
);
8733 bp
->link_params
.bp
= bp
;
8734 bp
->link_params
.port
= port
;
8736 bp
->link_params
.lane_config
=
8737 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].lane_config
);
8738 bp
->link_params
.ext_phy_config
=
8740 dev_info
.port_hw_config
[port
].external_phy_config
);
8741 /* BCM8727_NOC => BCM8727 no over current */
8742 if (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
) ==
8743 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC
) {
8744 bp
->link_params
.ext_phy_config
&=
8745 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
8746 bp
->link_params
.ext_phy_config
|=
8747 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
;
8748 bp
->link_params
.feature_config_flags
|=
8749 FEATURE_CONFIG_BCM8727_NOC
;
8752 bp
->link_params
.speed_cap_mask
=
8754 dev_info
.port_hw_config
[port
].speed_capability_mask
);
8756 bp
->port
.link_config
=
8757 SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].link_config
);
8759 /* Get the 4 lanes xgxs config rx and tx */
8760 for (i
= 0; i
< 2; i
++) {
8762 dev_info
.port_hw_config
[port
].xgxs_config_rx
[i
<<1]);
8763 bp
->link_params
.xgxs_config_rx
[i
<< 1] = ((val
>>16) & 0xffff);
8764 bp
->link_params
.xgxs_config_rx
[(i
<< 1) + 1] = (val
& 0xffff);
8767 dev_info
.port_hw_config
[port
].xgxs_config_tx
[i
<<1]);
8768 bp
->link_params
.xgxs_config_tx
[i
<< 1] = ((val
>>16) & 0xffff);
8769 bp
->link_params
.xgxs_config_tx
[(i
<< 1) + 1] = (val
& 0xffff);
8772 /* If the device is capable of WoL, set the default state according
8775 config
= SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].config
);
8776 bp
->wol
= (!(bp
->flags
& NO_WOL_FLAG
) &&
8777 (config
& PORT_FEATURE_WOL_ENABLED
));
8779 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8780 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8781 bp
->link_params
.lane_config
,
8782 bp
->link_params
.ext_phy_config
,
8783 bp
->link_params
.speed_cap_mask
, bp
->port
.link_config
);
8785 bp
->link_params
.switch_cfg
|= (bp
->port
.link_config
&
8786 PORT_FEATURE_CONNECTED_SWITCH_MASK
);
8787 bnx2x_link_settings_supported(bp
, bp
->link_params
.switch_cfg
);
8789 bnx2x_link_settings_requested(bp
);
8792 * If connected directly, work with the internal PHY, otherwise, work
8793 * with the external PHY
8795 ext_phy_type
= XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
8796 if (ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
)
8797 bp
->mdio
.prtad
= bp
->link_params
.phy_addr
;
8799 else if ((ext_phy_type
!= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
) &&
8800 (ext_phy_type
!= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN
))
8802 XGXS_EXT_PHY_ADDR(bp
->link_params
.ext_phy_config
);
8804 val2
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_upper
);
8805 val
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_lower
);
8806 bnx2x_set_mac_buf(bp
->dev
->dev_addr
, val
, val2
);
8807 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
8808 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
8811 val2
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].iscsi_mac_upper
);
8812 val
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].iscsi_mac_lower
);
8813 bnx2x_set_mac_buf(bp
->iscsi_mac
, val
, val2
);
8817 static int __devinit
bnx2x_get_hwinfo(struct bnx2x
*bp
)
8819 int func
= BP_FUNC(bp
);
8823 bnx2x_get_common_hwinfo(bp
);
8827 if (CHIP_IS_E1H(bp
)) {
8829 SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
8831 val
= (SHMEM_RD(bp
, mf_cfg
.func_mf_config
[FUNC_0
].e1hov_tag
) &
8832 FUNC_MF_CFG_E1HOV_TAG_MASK
);
8833 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
)
8835 BNX2X_DEV_INFO("%s function mode\n",
8836 IS_E1HMF(bp
) ? "multi" : "single");
8839 val
= (SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].
8841 FUNC_MF_CFG_E1HOV_TAG_MASK
);
8842 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
) {
8844 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8846 func
, bp
->e1hov
, bp
->e1hov
);
8848 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8849 " aborting\n", func
);
8854 BNX2X_ERR("!!! VN %d in single function mode,"
8855 " aborting\n", BP_E1HVN(bp
));
8861 if (!BP_NOMCP(bp
)) {
8862 bnx2x_get_port_hwinfo(bp
);
8864 bp
->fw_seq
= (SHMEM_RD(bp
, func_mb
[func
].drv_mb_header
) &
8865 DRV_MSG_SEQ_NUMBER_MASK
);
8866 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
8870 val2
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].mac_upper
);
8871 val
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].mac_lower
);
8872 if ((val2
!= FUNC_MF_CFG_UPPERMAC_DEFAULT
) &&
8873 (val
!= FUNC_MF_CFG_LOWERMAC_DEFAULT
)) {
8874 bp
->dev
->dev_addr
[0] = (u8
)(val2
>> 8 & 0xff);
8875 bp
->dev
->dev_addr
[1] = (u8
)(val2
& 0xff);
8876 bp
->dev
->dev_addr
[2] = (u8
)(val
>> 24 & 0xff);
8877 bp
->dev
->dev_addr
[3] = (u8
)(val
>> 16 & 0xff);
8878 bp
->dev
->dev_addr
[4] = (u8
)(val
>> 8 & 0xff);
8879 bp
->dev
->dev_addr
[5] = (u8
)(val
& 0xff);
8880 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
,
8882 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
,
8890 /* only supposed to happen on emulation/FPGA */
8891 BNX2X_ERR("warning random MAC workaround active\n");
8892 random_ether_addr(bp
->dev
->dev_addr
);
8893 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
8899 static int __devinit
bnx2x_init_bp(struct bnx2x
*bp
)
8901 int func
= BP_FUNC(bp
);
8905 /* Disable interrupt handling until HW is initialized */
8906 atomic_set(&bp
->intr_sem
, 1);
8907 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8909 mutex_init(&bp
->port
.phy_mutex
);
8910 mutex_init(&bp
->fw_mb_mutex
);
8912 mutex_init(&bp
->cnic_mutex
);
8915 INIT_DELAYED_WORK(&bp
->sp_task
, bnx2x_sp_task
);
8916 INIT_WORK(&bp
->reset_task
, bnx2x_reset_task
);
8918 rc
= bnx2x_get_hwinfo(bp
);
8920 /* need to reset chip if undi was active */
8922 bnx2x_undi_unload(bp
);
8924 if (CHIP_REV_IS_FPGA(bp
))
8925 pr_err("FPGA detected\n");
8927 if (BP_NOMCP(bp
) && (func
== 0))
8928 pr_err("MCP disabled, must load devices in order!\n");
8930 /* Set multi queue mode */
8931 if ((multi_mode
!= ETH_RSS_MODE_DISABLED
) &&
8932 ((int_mode
== INT_MODE_INTx
) || (int_mode
== INT_MODE_MSI
))) {
8933 pr_err("Multi disabled since int_mode requested is not MSI-X\n");
8934 multi_mode
= ETH_RSS_MODE_DISABLED
;
8936 bp
->multi_mode
= multi_mode
;
8941 bp
->flags
&= ~TPA_ENABLE_FLAG
;
8942 bp
->dev
->features
&= ~NETIF_F_LRO
;
8944 bp
->flags
|= TPA_ENABLE_FLAG
;
8945 bp
->dev
->features
|= NETIF_F_LRO
;
8949 bp
->dropless_fc
= 0;
8951 bp
->dropless_fc
= dropless_fc
;
8955 bp
->tx_ring_size
= MAX_TX_AVAIL
;
8956 bp
->rx_ring_size
= MAX_RX_AVAIL
;
8960 /* make sure that the numbers are in the right granularity */
8961 bp
->tx_ticks
= (50 / (4 * BNX2X_BTR
)) * (4 * BNX2X_BTR
);
8962 bp
->rx_ticks
= (25 / (4 * BNX2X_BTR
)) * (4 * BNX2X_BTR
);
8964 timer_interval
= (CHIP_REV_IS_SLOW(bp
) ? 5*HZ
: HZ
);
8965 bp
->current_interval
= (poll
? poll
: timer_interval
);
8967 init_timer(&bp
->timer
);
8968 bp
->timer
.expires
= jiffies
+ bp
->current_interval
;
8969 bp
->timer
.data
= (unsigned long) bp
;
8970 bp
->timer
.function
= bnx2x_timer
;
8976 * ethtool service functions
8979 /* All ethtool functions called with rtnl_lock */
8981 static int bnx2x_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
8983 struct bnx2x
*bp
= netdev_priv(dev
);
8985 cmd
->supported
= bp
->port
.supported
;
8986 cmd
->advertising
= bp
->port
.advertising
;
8988 if ((bp
->state
== BNX2X_STATE_OPEN
) &&
8989 !(bp
->flags
& MF_FUNC_DIS
) &&
8990 (bp
->link_vars
.link_up
)) {
8991 cmd
->speed
= bp
->link_vars
.line_speed
;
8992 cmd
->duplex
= bp
->link_vars
.duplex
;
8997 ((bp
->mf_config
& FUNC_MF_CFG_MAX_BW_MASK
) >>
8998 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
8999 if (vn_max_rate
< cmd
->speed
)
9000 cmd
->speed
= vn_max_rate
;
9007 if (bp
->link_params
.switch_cfg
== SWITCH_CFG_10G
) {
9009 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
9011 switch (ext_phy_type
) {
9012 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
:
9013 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
9014 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
9015 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
:
9016 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
:
9017 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726
:
9018 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
:
9019 cmd
->port
= PORT_FIBRE
;
9022 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
9023 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481
:
9024 cmd
->port
= PORT_TP
;
9027 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
:
9028 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9029 bp
->link_params
.ext_phy_config
);
9033 DP(NETIF_MSG_LINK
, "BAD XGXS ext_phy_config 0x%x\n",
9034 bp
->link_params
.ext_phy_config
);
9038 cmd
->port
= PORT_TP
;
9040 cmd
->phy_address
= bp
->mdio
.prtad
;
9041 cmd
->transceiver
= XCVR_INTERNAL
;
9043 if (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
)
9044 cmd
->autoneg
= AUTONEG_ENABLE
;
9046 cmd
->autoneg
= AUTONEG_DISABLE
;
9051 DP(NETIF_MSG_LINK
, "ethtool_cmd: cmd %d\n"
9052 DP_LEVEL
" supported 0x%x advertising 0x%x speed %d\n"
9053 DP_LEVEL
" duplex %d port %d phy_address %d transceiver %d\n"
9054 DP_LEVEL
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
9055 cmd
->cmd
, cmd
->supported
, cmd
->advertising
, cmd
->speed
,
9056 cmd
->duplex
, cmd
->port
, cmd
->phy_address
, cmd
->transceiver
,
9057 cmd
->autoneg
, cmd
->maxtxpkt
, cmd
->maxrxpkt
);
9062 static int bnx2x_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
9064 struct bnx2x
*bp
= netdev_priv(dev
);
9070 DP(NETIF_MSG_LINK
, "ethtool_cmd: cmd %d\n"
9071 DP_LEVEL
" supported 0x%x advertising 0x%x speed %d\n"
9072 DP_LEVEL
" duplex %d port %d phy_address %d transceiver %d\n"
9073 DP_LEVEL
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
9074 cmd
->cmd
, cmd
->supported
, cmd
->advertising
, cmd
->speed
,
9075 cmd
->duplex
, cmd
->port
, cmd
->phy_address
, cmd
->transceiver
,
9076 cmd
->autoneg
, cmd
->maxtxpkt
, cmd
->maxrxpkt
);
9078 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
9079 if (!(bp
->port
.supported
& SUPPORTED_Autoneg
)) {
9080 DP(NETIF_MSG_LINK
, "Autoneg not supported\n");
9084 /* advertise the requested speed and duplex if supported */
9085 cmd
->advertising
&= bp
->port
.supported
;
9087 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
9088 bp
->link_params
.req_duplex
= DUPLEX_FULL
;
9089 bp
->port
.advertising
|= (ADVERTISED_Autoneg
|
9092 } else { /* forced speed */
9093 /* advertise the requested speed and duplex if supported */
9094 switch (cmd
->speed
) {
9096 if (cmd
->duplex
== DUPLEX_FULL
) {
9097 if (!(bp
->port
.supported
&
9098 SUPPORTED_10baseT_Full
)) {
9100 "10M full not supported\n");
9104 advertising
= (ADVERTISED_10baseT_Full
|
9107 if (!(bp
->port
.supported
&
9108 SUPPORTED_10baseT_Half
)) {
9110 "10M half not supported\n");
9114 advertising
= (ADVERTISED_10baseT_Half
|
9120 if (cmd
->duplex
== DUPLEX_FULL
) {
9121 if (!(bp
->port
.supported
&
9122 SUPPORTED_100baseT_Full
)) {
9124 "100M full not supported\n");
9128 advertising
= (ADVERTISED_100baseT_Full
|
9131 if (!(bp
->port
.supported
&
9132 SUPPORTED_100baseT_Half
)) {
9134 "100M half not supported\n");
9138 advertising
= (ADVERTISED_100baseT_Half
|
9144 if (cmd
->duplex
!= DUPLEX_FULL
) {
9145 DP(NETIF_MSG_LINK
, "1G half not supported\n");
9149 if (!(bp
->port
.supported
& SUPPORTED_1000baseT_Full
)) {
9150 DP(NETIF_MSG_LINK
, "1G full not supported\n");
9154 advertising
= (ADVERTISED_1000baseT_Full
|
9159 if (cmd
->duplex
!= DUPLEX_FULL
) {
9161 "2.5G half not supported\n");
9165 if (!(bp
->port
.supported
& SUPPORTED_2500baseX_Full
)) {
9167 "2.5G full not supported\n");
9171 advertising
= (ADVERTISED_2500baseX_Full
|
9176 if (cmd
->duplex
!= DUPLEX_FULL
) {
9177 DP(NETIF_MSG_LINK
, "10G half not supported\n");
9181 if (!(bp
->port
.supported
& SUPPORTED_10000baseT_Full
)) {
9182 DP(NETIF_MSG_LINK
, "10G full not supported\n");
9186 advertising
= (ADVERTISED_10000baseT_Full
|
9191 DP(NETIF_MSG_LINK
, "Unsupported speed\n");
9195 bp
->link_params
.req_line_speed
= cmd
->speed
;
9196 bp
->link_params
.req_duplex
= cmd
->duplex
;
9197 bp
->port
.advertising
= advertising
;
9200 DP(NETIF_MSG_LINK
, "req_line_speed %d\n"
9201 DP_LEVEL
" req_duplex %d advertising 0x%x\n",
9202 bp
->link_params
.req_line_speed
, bp
->link_params
.req_duplex
,
9203 bp
->port
.advertising
);
9205 if (netif_running(dev
)) {
9206 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
9213 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9214 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9216 static int bnx2x_get_regs_len(struct net_device
*dev
)
9218 struct bnx2x
*bp
= netdev_priv(dev
);
9219 int regdump_len
= 0;
9222 if (CHIP_IS_E1(bp
)) {
9223 for (i
= 0; i
< REGS_COUNT
; i
++)
9224 if (IS_E1_ONLINE(reg_addrs
[i
].info
))
9225 regdump_len
+= reg_addrs
[i
].size
;
9227 for (i
= 0; i
< WREGS_COUNT_E1
; i
++)
9228 if (IS_E1_ONLINE(wreg_addrs_e1
[i
].info
))
9229 regdump_len
+= wreg_addrs_e1
[i
].size
*
9230 (1 + wreg_addrs_e1
[i
].read_regs_count
);
9233 for (i
= 0; i
< REGS_COUNT
; i
++)
9234 if (IS_E1H_ONLINE(reg_addrs
[i
].info
))
9235 regdump_len
+= reg_addrs
[i
].size
;
9237 for (i
= 0; i
< WREGS_COUNT_E1H
; i
++)
9238 if (IS_E1H_ONLINE(wreg_addrs_e1h
[i
].info
))
9239 regdump_len
+= wreg_addrs_e1h
[i
].size
*
9240 (1 + wreg_addrs_e1h
[i
].read_regs_count
);
9243 regdump_len
+= sizeof(struct dump_hdr
);
9248 static void bnx2x_get_regs(struct net_device
*dev
,
9249 struct ethtool_regs
*regs
, void *_p
)
9252 struct bnx2x
*bp
= netdev_priv(dev
);
9253 struct dump_hdr dump_hdr
= {0};
9256 memset(p
, 0, regs
->len
);
9258 if (!netif_running(bp
->dev
))
9261 dump_hdr
.hdr_size
= (sizeof(struct dump_hdr
) / 4) - 1;
9262 dump_hdr
.dump_sign
= dump_sign_all
;
9263 dump_hdr
.xstorm_waitp
= REG_RD(bp
, XSTORM_WAITP_ADDR
);
9264 dump_hdr
.tstorm_waitp
= REG_RD(bp
, TSTORM_WAITP_ADDR
);
9265 dump_hdr
.ustorm_waitp
= REG_RD(bp
, USTORM_WAITP_ADDR
);
9266 dump_hdr
.cstorm_waitp
= REG_RD(bp
, CSTORM_WAITP_ADDR
);
9267 dump_hdr
.info
= CHIP_IS_E1(bp
) ? RI_E1_ONLINE
: RI_E1H_ONLINE
;
9269 memcpy(p
, &dump_hdr
, sizeof(struct dump_hdr
));
9270 p
+= dump_hdr
.hdr_size
+ 1;
9272 if (CHIP_IS_E1(bp
)) {
9273 for (i
= 0; i
< REGS_COUNT
; i
++)
9274 if (IS_E1_ONLINE(reg_addrs
[i
].info
))
9275 for (j
= 0; j
< reg_addrs
[i
].size
; j
++)
9277 reg_addrs
[i
].addr
+ j
*4);
9280 for (i
= 0; i
< REGS_COUNT
; i
++)
9281 if (IS_E1H_ONLINE(reg_addrs
[i
].info
))
9282 for (j
= 0; j
< reg_addrs
[i
].size
; j
++)
9284 reg_addrs
[i
].addr
+ j
*4);
9288 #define PHY_FW_VER_LEN 10
9290 static void bnx2x_get_drvinfo(struct net_device
*dev
,
9291 struct ethtool_drvinfo
*info
)
9293 struct bnx2x
*bp
= netdev_priv(dev
);
9294 u8 phy_fw_ver
[PHY_FW_VER_LEN
];
9296 strcpy(info
->driver
, DRV_MODULE_NAME
);
9297 strcpy(info
->version
, DRV_MODULE_VERSION
);
9299 phy_fw_ver
[0] = '\0';
9301 bnx2x_acquire_phy_lock(bp
);
9302 bnx2x_get_ext_phy_fw_version(&bp
->link_params
,
9303 (bp
->state
!= BNX2X_STATE_CLOSED
),
9304 phy_fw_ver
, PHY_FW_VER_LEN
);
9305 bnx2x_release_phy_lock(bp
);
9308 snprintf(info
->fw_version
, 32, "BC:%d.%d.%d%s%s",
9309 (bp
->common
.bc_ver
& 0xff0000) >> 16,
9310 (bp
->common
.bc_ver
& 0xff00) >> 8,
9311 (bp
->common
.bc_ver
& 0xff),
9312 ((phy_fw_ver
[0] != '\0') ? " PHY:" : ""), phy_fw_ver
);
9313 strcpy(info
->bus_info
, pci_name(bp
->pdev
));
9314 info
->n_stats
= BNX2X_NUM_STATS
;
9315 info
->testinfo_len
= BNX2X_NUM_TESTS
;
9316 info
->eedump_len
= bp
->common
.flash_size
;
9317 info
->regdump_len
= bnx2x_get_regs_len(dev
);
9320 static void bnx2x_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
9322 struct bnx2x
*bp
= netdev_priv(dev
);
9324 if (bp
->flags
& NO_WOL_FLAG
) {
9328 wol
->supported
= WAKE_MAGIC
;
9330 wol
->wolopts
= WAKE_MAGIC
;
9334 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
9337 static int bnx2x_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
9339 struct bnx2x
*bp
= netdev_priv(dev
);
9341 if (wol
->wolopts
& ~WAKE_MAGIC
)
9344 if (wol
->wolopts
& WAKE_MAGIC
) {
9345 if (bp
->flags
& NO_WOL_FLAG
)
9355 static u32
bnx2x_get_msglevel(struct net_device
*dev
)
9357 struct bnx2x
*bp
= netdev_priv(dev
);
9359 return bp
->msg_enable
;
9362 static void bnx2x_set_msglevel(struct net_device
*dev
, u32 level
)
9364 struct bnx2x
*bp
= netdev_priv(dev
);
9366 if (capable(CAP_NET_ADMIN
))
9367 bp
->msg_enable
= level
;
9370 static int bnx2x_nway_reset(struct net_device
*dev
)
9372 struct bnx2x
*bp
= netdev_priv(dev
);
9377 if (netif_running(dev
)) {
9378 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
9385 static u32
bnx2x_get_link(struct net_device
*dev
)
9387 struct bnx2x
*bp
= netdev_priv(dev
);
9389 if (bp
->flags
& MF_FUNC_DIS
)
9392 return bp
->link_vars
.link_up
;
9395 static int bnx2x_get_eeprom_len(struct net_device
*dev
)
9397 struct bnx2x
*bp
= netdev_priv(dev
);
9399 return bp
->common
.flash_size
;
9402 static int bnx2x_acquire_nvram_lock(struct bnx2x
*bp
)
9404 int port
= BP_PORT(bp
);
9408 /* adjust timeout for emulation/FPGA */
9409 count
= NVRAM_TIMEOUT_COUNT
;
9410 if (CHIP_REV_IS_SLOW(bp
))
9413 /* request access to nvram interface */
9414 REG_WR(bp
, MCP_REG_MCPR_NVM_SW_ARB
,
9415 (MCPR_NVM_SW_ARB_ARB_REQ_SET1
<< port
));
9417 for (i
= 0; i
< count
*10; i
++) {
9418 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_SW_ARB
);
9419 if (val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
))
9425 if (!(val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
))) {
9426 DP(BNX2X_MSG_NVM
, "cannot get access to nvram interface\n");
9433 static int bnx2x_release_nvram_lock(struct bnx2x
*bp
)
9435 int port
= BP_PORT(bp
);
9439 /* adjust timeout for emulation/FPGA */
9440 count
= NVRAM_TIMEOUT_COUNT
;
9441 if (CHIP_REV_IS_SLOW(bp
))
9444 /* relinquish nvram interface */
9445 REG_WR(bp
, MCP_REG_MCPR_NVM_SW_ARB
,
9446 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1
<< port
));
9448 for (i
= 0; i
< count
*10; i
++) {
9449 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_SW_ARB
);
9450 if (!(val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
)))
9456 if (val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
)) {
9457 DP(BNX2X_MSG_NVM
, "cannot free access to nvram interface\n");
9464 static void bnx2x_enable_nvram_access(struct bnx2x
*bp
)
9468 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
);
9470 /* enable both bits, even on read */
9471 REG_WR(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
,
9472 (val
| MCPR_NVM_ACCESS_ENABLE_EN
|
9473 MCPR_NVM_ACCESS_ENABLE_WR_EN
));
9476 static void bnx2x_disable_nvram_access(struct bnx2x
*bp
)
9480 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
);
9482 /* disable both bits, even after read */
9483 REG_WR(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
,
9484 (val
& ~(MCPR_NVM_ACCESS_ENABLE_EN
|
9485 MCPR_NVM_ACCESS_ENABLE_WR_EN
)));
9488 static int bnx2x_nvram_read_dword(struct bnx2x
*bp
, u32 offset
, __be32
*ret_val
,
9494 /* build the command word */
9495 cmd_flags
|= MCPR_NVM_COMMAND_DOIT
;
9497 /* need to clear DONE bit separately */
9498 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, MCPR_NVM_COMMAND_DONE
);
9500 /* address of the NVRAM to read from */
9501 REG_WR(bp
, MCP_REG_MCPR_NVM_ADDR
,
9502 (offset
& MCPR_NVM_ADDR_NVM_ADDR_VALUE
));
9504 /* issue a read command */
9505 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, cmd_flags
);
9507 /* adjust timeout for emulation/FPGA */
9508 count
= NVRAM_TIMEOUT_COUNT
;
9509 if (CHIP_REV_IS_SLOW(bp
))
9512 /* wait for completion */
9515 for (i
= 0; i
< count
; i
++) {
9517 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_COMMAND
);
9519 if (val
& MCPR_NVM_COMMAND_DONE
) {
9520 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_READ
);
9521 /* we read nvram data in cpu order
9522 * but ethtool sees it as an array of bytes
9523 * converting to big-endian will do the work */
9524 *ret_val
= cpu_to_be32(val
);
9533 static int bnx2x_nvram_read(struct bnx2x
*bp
, u32 offset
, u8
*ret_buf
,
9540 if ((offset
& 0x03) || (buf_size
& 0x03) || (buf_size
== 0)) {
9542 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9547 if (offset
+ buf_size
> bp
->common
.flash_size
) {
9548 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
9549 " buf_size (0x%x) > flash_size (0x%x)\n",
9550 offset
, buf_size
, bp
->common
.flash_size
);
9554 /* request access to nvram interface */
9555 rc
= bnx2x_acquire_nvram_lock(bp
);
9559 /* enable access to nvram interface */
9560 bnx2x_enable_nvram_access(bp
);
9562 /* read the first word(s) */
9563 cmd_flags
= MCPR_NVM_COMMAND_FIRST
;
9564 while ((buf_size
> sizeof(u32
)) && (rc
== 0)) {
9565 rc
= bnx2x_nvram_read_dword(bp
, offset
, &val
, cmd_flags
);
9566 memcpy(ret_buf
, &val
, 4);
9568 /* advance to the next dword */
9569 offset
+= sizeof(u32
);
9570 ret_buf
+= sizeof(u32
);
9571 buf_size
-= sizeof(u32
);
9576 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
9577 rc
= bnx2x_nvram_read_dword(bp
, offset
, &val
, cmd_flags
);
9578 memcpy(ret_buf
, &val
, 4);
9581 /* disable access to nvram interface */
9582 bnx2x_disable_nvram_access(bp
);
9583 bnx2x_release_nvram_lock(bp
);
9588 static int bnx2x_get_eeprom(struct net_device
*dev
,
9589 struct ethtool_eeprom
*eeprom
, u8
*eebuf
)
9591 struct bnx2x
*bp
= netdev_priv(dev
);
9594 if (!netif_running(dev
))
9597 DP(BNX2X_MSG_NVM
, "ethtool_eeprom: cmd %d\n"
9598 DP_LEVEL
" magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9599 eeprom
->cmd
, eeprom
->magic
, eeprom
->offset
, eeprom
->offset
,
9600 eeprom
->len
, eeprom
->len
);
9602 /* parameters already validated in ethtool_get_eeprom */
9604 rc
= bnx2x_nvram_read(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
9609 static int bnx2x_nvram_write_dword(struct bnx2x
*bp
, u32 offset
, u32 val
,
9614 /* build the command word */
9615 cmd_flags
|= MCPR_NVM_COMMAND_DOIT
| MCPR_NVM_COMMAND_WR
;
9617 /* need to clear DONE bit separately */
9618 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, MCPR_NVM_COMMAND_DONE
);
9620 /* write the data */
9621 REG_WR(bp
, MCP_REG_MCPR_NVM_WRITE
, val
);
9623 /* address of the NVRAM to write to */
9624 REG_WR(bp
, MCP_REG_MCPR_NVM_ADDR
,
9625 (offset
& MCPR_NVM_ADDR_NVM_ADDR_VALUE
));
9627 /* issue the write command */
9628 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, cmd_flags
);
9630 /* adjust timeout for emulation/FPGA */
9631 count
= NVRAM_TIMEOUT_COUNT
;
9632 if (CHIP_REV_IS_SLOW(bp
))
9635 /* wait for completion */
9637 for (i
= 0; i
< count
; i
++) {
9639 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_COMMAND
);
9640 if (val
& MCPR_NVM_COMMAND_DONE
) {
9649 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
9651 static int bnx2x_nvram_write1(struct bnx2x
*bp
, u32 offset
, u8
*data_buf
,
9659 if (offset
+ buf_size
> bp
->common
.flash_size
) {
9660 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
9661 " buf_size (0x%x) > flash_size (0x%x)\n",
9662 offset
, buf_size
, bp
->common
.flash_size
);
9666 /* request access to nvram interface */
9667 rc
= bnx2x_acquire_nvram_lock(bp
);
9671 /* enable access to nvram interface */
9672 bnx2x_enable_nvram_access(bp
);
9674 cmd_flags
= (MCPR_NVM_COMMAND_FIRST
| MCPR_NVM_COMMAND_LAST
);
9675 align_offset
= (offset
& ~0x03);
9676 rc
= bnx2x_nvram_read_dword(bp
, align_offset
, &val
, cmd_flags
);
9679 val
&= ~(0xff << BYTE_OFFSET(offset
));
9680 val
|= (*data_buf
<< BYTE_OFFSET(offset
));
9682 /* nvram data is returned as an array of bytes
9683 * convert it back to cpu order */
9684 val
= be32_to_cpu(val
);
9686 rc
= bnx2x_nvram_write_dword(bp
, align_offset
, val
,
9690 /* disable access to nvram interface */
9691 bnx2x_disable_nvram_access(bp
);
9692 bnx2x_release_nvram_lock(bp
);
9697 static int bnx2x_nvram_write(struct bnx2x
*bp
, u32 offset
, u8
*data_buf
,
9705 if (buf_size
== 1) /* ethtool */
9706 return bnx2x_nvram_write1(bp
, offset
, data_buf
, buf_size
);
9708 if ((offset
& 0x03) || (buf_size
& 0x03) || (buf_size
== 0)) {
9710 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9715 if (offset
+ buf_size
> bp
->common
.flash_size
) {
9716 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
9717 " buf_size (0x%x) > flash_size (0x%x)\n",
9718 offset
, buf_size
, bp
->common
.flash_size
);
9722 /* request access to nvram interface */
9723 rc
= bnx2x_acquire_nvram_lock(bp
);
9727 /* enable access to nvram interface */
9728 bnx2x_enable_nvram_access(bp
);
9731 cmd_flags
= MCPR_NVM_COMMAND_FIRST
;
9732 while ((written_so_far
< buf_size
) && (rc
== 0)) {
9733 if (written_so_far
== (buf_size
- sizeof(u32
)))
9734 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
9735 else if (((offset
+ 4) % NVRAM_PAGE_SIZE
) == 0)
9736 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
9737 else if ((offset
% NVRAM_PAGE_SIZE
) == 0)
9738 cmd_flags
|= MCPR_NVM_COMMAND_FIRST
;
9740 memcpy(&val
, data_buf
, 4);
9742 rc
= bnx2x_nvram_write_dword(bp
, offset
, val
, cmd_flags
);
9744 /* advance to the next dword */
9745 offset
+= sizeof(u32
);
9746 data_buf
+= sizeof(u32
);
9747 written_so_far
+= sizeof(u32
);
9751 /* disable access to nvram interface */
9752 bnx2x_disable_nvram_access(bp
);
9753 bnx2x_release_nvram_lock(bp
);
9758 static int bnx2x_set_eeprom(struct net_device
*dev
,
9759 struct ethtool_eeprom
*eeprom
, u8
*eebuf
)
9761 struct bnx2x
*bp
= netdev_priv(dev
);
9762 int port
= BP_PORT(bp
);
9765 if (!netif_running(dev
))
9768 DP(BNX2X_MSG_NVM
, "ethtool_eeprom: cmd %d\n"
9769 DP_LEVEL
" magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9770 eeprom
->cmd
, eeprom
->magic
, eeprom
->offset
, eeprom
->offset
,
9771 eeprom
->len
, eeprom
->len
);
9773 /* parameters already validated in ethtool_set_eeprom */
9775 /* PHY eeprom can be accessed only by the PMF */
9776 if ((eeprom
->magic
>= 0x50485900) && (eeprom
->magic
<= 0x504859FF) &&
9780 if (eeprom
->magic
== 0x50485950) {
9781 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9782 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
9784 bnx2x_acquire_phy_lock(bp
);
9785 rc
|= bnx2x_link_reset(&bp
->link_params
,
9787 if (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
) ==
9788 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
)
9789 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_0
,
9790 MISC_REGISTERS_GPIO_HIGH
, port
);
9791 bnx2x_release_phy_lock(bp
);
9792 bnx2x_link_report(bp
);
9794 } else if (eeprom
->magic
== 0x50485952) {
9795 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9796 if (bp
->state
== BNX2X_STATE_OPEN
) {
9797 bnx2x_acquire_phy_lock(bp
);
9798 rc
|= bnx2x_link_reset(&bp
->link_params
,
9801 rc
|= bnx2x_phy_init(&bp
->link_params
,
9803 bnx2x_release_phy_lock(bp
);
9804 bnx2x_calc_fc_adv(bp
);
9806 } else if (eeprom
->magic
== 0x53985943) {
9807 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9808 if (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
) ==
9809 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
) {
9811 XGXS_EXT_PHY_ADDR(bp
->link_params
.ext_phy_config
);
9813 /* DSP Remove Download Mode */
9814 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_0
,
9815 MISC_REGISTERS_GPIO_LOW
, port
);
9817 bnx2x_acquire_phy_lock(bp
);
9819 bnx2x_sfx7101_sp_sw_reset(bp
, port
, ext_phy_addr
);
9821 /* wait 0.5 sec to allow it to run */
9823 bnx2x_ext_phy_hw_reset(bp
, port
);
9825 bnx2x_release_phy_lock(bp
);
9828 rc
= bnx2x_nvram_write(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
9833 static int bnx2x_get_coalesce(struct net_device
*dev
,
9834 struct ethtool_coalesce
*coal
)
9836 struct bnx2x
*bp
= netdev_priv(dev
);
9838 memset(coal
, 0, sizeof(struct ethtool_coalesce
));
9840 coal
->rx_coalesce_usecs
= bp
->rx_ticks
;
9841 coal
->tx_coalesce_usecs
= bp
->tx_ticks
;
9846 #define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9847 static int bnx2x_set_coalesce(struct net_device
*dev
,
9848 struct ethtool_coalesce
*coal
)
9850 struct bnx2x
*bp
= netdev_priv(dev
);
9852 bp
->rx_ticks
= (u16
) coal
->rx_coalesce_usecs
;
9853 if (bp
->rx_ticks
> BNX2X_MAX_COALES_TOUT
)
9854 bp
->rx_ticks
= BNX2X_MAX_COALES_TOUT
;
9856 bp
->tx_ticks
= (u16
) coal
->tx_coalesce_usecs
;
9857 if (bp
->tx_ticks
> BNX2X_MAX_COALES_TOUT
)
9858 bp
->tx_ticks
= BNX2X_MAX_COALES_TOUT
;
9860 if (netif_running(dev
))
9861 bnx2x_update_coalesce(bp
);
9866 static void bnx2x_get_ringparam(struct net_device
*dev
,
9867 struct ethtool_ringparam
*ering
)
9869 struct bnx2x
*bp
= netdev_priv(dev
);
9871 ering
->rx_max_pending
= MAX_RX_AVAIL
;
9872 ering
->rx_mini_max_pending
= 0;
9873 ering
->rx_jumbo_max_pending
= 0;
9875 ering
->rx_pending
= bp
->rx_ring_size
;
9876 ering
->rx_mini_pending
= 0;
9877 ering
->rx_jumbo_pending
= 0;
9879 ering
->tx_max_pending
= MAX_TX_AVAIL
;
9880 ering
->tx_pending
= bp
->tx_ring_size
;
9883 static int bnx2x_set_ringparam(struct net_device
*dev
,
9884 struct ethtool_ringparam
*ering
)
9886 struct bnx2x
*bp
= netdev_priv(dev
);
9889 if ((ering
->rx_pending
> MAX_RX_AVAIL
) ||
9890 (ering
->tx_pending
> MAX_TX_AVAIL
) ||
9891 (ering
->tx_pending
<= MAX_SKB_FRAGS
+ 4))
9894 bp
->rx_ring_size
= ering
->rx_pending
;
9895 bp
->tx_ring_size
= ering
->tx_pending
;
9897 if (netif_running(dev
)) {
9898 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
9899 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
9905 static void bnx2x_get_pauseparam(struct net_device
*dev
,
9906 struct ethtool_pauseparam
*epause
)
9908 struct bnx2x
*bp
= netdev_priv(dev
);
9910 epause
->autoneg
= (bp
->link_params
.req_flow_ctrl
==
9911 BNX2X_FLOW_CTRL_AUTO
) &&
9912 (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
);
9914 epause
->rx_pause
= ((bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
) ==
9915 BNX2X_FLOW_CTRL_RX
);
9916 epause
->tx_pause
= ((bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
) ==
9917 BNX2X_FLOW_CTRL_TX
);
9919 DP(NETIF_MSG_LINK
, "ethtool_pauseparam: cmd %d\n"
9920 DP_LEVEL
" autoneg %d rx_pause %d tx_pause %d\n",
9921 epause
->cmd
, epause
->autoneg
, epause
->rx_pause
, epause
->tx_pause
);
9924 static int bnx2x_set_pauseparam(struct net_device
*dev
,
9925 struct ethtool_pauseparam
*epause
)
9927 struct bnx2x
*bp
= netdev_priv(dev
);
9932 DP(NETIF_MSG_LINK
, "ethtool_pauseparam: cmd %d\n"
9933 DP_LEVEL
" autoneg %d rx_pause %d tx_pause %d\n",
9934 epause
->cmd
, epause
->autoneg
, epause
->rx_pause
, epause
->tx_pause
);
9936 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_AUTO
;
9938 if (epause
->rx_pause
)
9939 bp
->link_params
.req_flow_ctrl
|= BNX2X_FLOW_CTRL_RX
;
9941 if (epause
->tx_pause
)
9942 bp
->link_params
.req_flow_ctrl
|= BNX2X_FLOW_CTRL_TX
;
9944 if (bp
->link_params
.req_flow_ctrl
== BNX2X_FLOW_CTRL_AUTO
)
9945 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_NONE
;
9947 if (epause
->autoneg
) {
9948 if (!(bp
->port
.supported
& SUPPORTED_Autoneg
)) {
9949 DP(NETIF_MSG_LINK
, "autoneg not supported\n");
9953 if (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
)
9954 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_AUTO
;
9958 "req_flow_ctrl 0x%x\n", bp
->link_params
.req_flow_ctrl
);
9960 if (netif_running(dev
)) {
9961 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
9968 static int bnx2x_set_flags(struct net_device
*dev
, u32 data
)
9970 struct bnx2x
*bp
= netdev_priv(dev
);
9974 /* TPA requires Rx CSUM offloading */
9975 if ((data
& ETH_FLAG_LRO
) && bp
->rx_csum
) {
9977 if (!(dev
->features
& NETIF_F_LRO
)) {
9978 dev
->features
|= NETIF_F_LRO
;
9979 bp
->flags
|= TPA_ENABLE_FLAG
;
9984 } else if (dev
->features
& NETIF_F_LRO
) {
9985 dev
->features
&= ~NETIF_F_LRO
;
9986 bp
->flags
&= ~TPA_ENABLE_FLAG
;
9990 if (changed
&& netif_running(dev
)) {
9991 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
9992 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
9998 static u32
bnx2x_get_rx_csum(struct net_device
*dev
)
10000 struct bnx2x
*bp
= netdev_priv(dev
);
10002 return bp
->rx_csum
;
10005 static int bnx2x_set_rx_csum(struct net_device
*dev
, u32 data
)
10007 struct bnx2x
*bp
= netdev_priv(dev
);
10010 bp
->rx_csum
= data
;
10012 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10013 TPA'ed packets will be discarded due to wrong TCP CSUM */
10015 u32 flags
= ethtool_op_get_flags(dev
);
10017 rc
= bnx2x_set_flags(dev
, (flags
& ~ETH_FLAG_LRO
));
10023 static int bnx2x_set_tso(struct net_device
*dev
, u32 data
)
10026 dev
->features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
10027 dev
->features
|= NETIF_F_TSO6
;
10029 dev
->features
&= ~(NETIF_F_TSO
| NETIF_F_TSO_ECN
);
10030 dev
->features
&= ~NETIF_F_TSO6
;
10036 static const struct {
10037 char string
[ETH_GSTRING_LEN
];
10038 } bnx2x_tests_str_arr
[BNX2X_NUM_TESTS
] = {
10039 { "register_test (offline)" },
10040 { "memory_test (offline)" },
10041 { "loopback_test (offline)" },
10042 { "nvram_test (online)" },
10043 { "interrupt_test (online)" },
10044 { "link_test (online)" },
10045 { "idle check (online)" }
10048 static int bnx2x_test_registers(struct bnx2x
*bp
)
10050 int idx
, i
, rc
= -ENODEV
;
10052 int port
= BP_PORT(bp
);
10053 static const struct {
10058 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0
, 4, 0x000003ff },
10059 { DORQ_REG_DB_ADDR0
, 4, 0xffffffff },
10060 { HC_REG_AGG_INT_0
, 4, 0x000003ff },
10061 { PBF_REG_MAC_IF0_ENABLE
, 4, 0x00000001 },
10062 { PBF_REG_P0_INIT_CRD
, 4, 0x000007ff },
10063 { PRS_REG_CID_PORT_0
, 4, 0x00ffffff },
10064 { PXP2_REG_PSWRQ_CDU0_L2P
, 4, 0x000fffff },
10065 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR
, 8, 0x0003ffff },
10066 { PXP2_REG_PSWRQ_TM0_L2P
, 4, 0x000fffff },
10067 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR
, 8, 0x0003ffff },
10068 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P
, 4, 0x000fffff },
10069 { QM_REG_CONNNUM_0
, 4, 0x000fffff },
10070 { TM_REG_LIN0_MAX_ACTIVE_CID
, 4, 0x0003ffff },
10071 { SRC_REG_KEYRSS0_0
, 40, 0xffffffff },
10072 { SRC_REG_KEYRSS0_7
, 40, 0xffffffff },
10073 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00
, 4, 0x00000001 },
10074 { XCM_REG_WU_DA_CNT_CMD00
, 4, 0x00000003 },
10075 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0
, 4, 0x000000ff },
10076 { NIG_REG_LLH0_T_BIT
, 4, 0x00000001 },
10077 { NIG_REG_EMAC0_IN_EN
, 4, 0x00000001 },
10078 /* 20 */ { NIG_REG_BMAC0_IN_EN
, 4, 0x00000001 },
10079 { NIG_REG_XCM0_OUT_EN
, 4, 0x00000001 },
10080 { NIG_REG_BRB0_OUT_EN
, 4, 0x00000001 },
10081 { NIG_REG_LLH0_XCM_MASK
, 4, 0x00000007 },
10082 { NIG_REG_LLH0_ACPI_PAT_6_LEN
, 68, 0x000000ff },
10083 { NIG_REG_LLH0_ACPI_PAT_0_CRC
, 68, 0xffffffff },
10084 { NIG_REG_LLH0_DEST_MAC_0_0
, 160, 0xffffffff },
10085 { NIG_REG_LLH0_DEST_IP_0_1
, 160, 0xffffffff },
10086 { NIG_REG_LLH0_IPV4_IPV6_0
, 160, 0x00000001 },
10087 { NIG_REG_LLH0_DEST_UDP_0
, 160, 0x0000ffff },
10088 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0
, 160, 0x0000ffff },
10089 { NIG_REG_LLH0_VLAN_ID_0
, 160, 0x00000fff },
10090 { NIG_REG_XGXS_SERDES0_MODE_SEL
, 4, 0x00000001 },
10091 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
, 4, 0x00000001 },
10092 { NIG_REG_STATUS_INTERRUPT_PORT0
, 4, 0x07ffffff },
10093 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST
, 24, 0x00000001 },
10094 { NIG_REG_SERDES0_CTRL_PHY_ADDR
, 16, 0x0000001f },
10096 { 0xffffffff, 0, 0x00000000 }
10099 if (!netif_running(bp
->dev
))
10102 /* Repeat the test twice:
10103 First by writing 0x00000000, second by writing 0xffffffff */
10104 for (idx
= 0; idx
< 2; idx
++) {
10111 wr_val
= 0xffffffff;
10115 for (i
= 0; reg_tbl
[i
].offset0
!= 0xffffffff; i
++) {
10116 u32 offset
, mask
, save_val
, val
;
10118 offset
= reg_tbl
[i
].offset0
+ port
*reg_tbl
[i
].offset1
;
10119 mask
= reg_tbl
[i
].mask
;
10121 save_val
= REG_RD(bp
, offset
);
10123 REG_WR(bp
, offset
, wr_val
);
10124 val
= REG_RD(bp
, offset
);
10126 /* Restore the original register's value */
10127 REG_WR(bp
, offset
, save_val
);
10129 /* verify that value is as expected value */
10130 if ((val
& mask
) != (wr_val
& mask
))
10131 goto test_reg_exit
;
10141 static int bnx2x_test_memory(struct bnx2x
*bp
)
10143 int i
, j
, rc
= -ENODEV
;
10145 static const struct {
10149 { CCM_REG_XX_DESCR_TABLE
, CCM_REG_XX_DESCR_TABLE_SIZE
},
10150 { CFC_REG_ACTIVITY_COUNTER
, CFC_REG_ACTIVITY_COUNTER_SIZE
},
10151 { CFC_REG_LINK_LIST
, CFC_REG_LINK_LIST_SIZE
},
10152 { DMAE_REG_CMD_MEM
, DMAE_REG_CMD_MEM_SIZE
},
10153 { TCM_REG_XX_DESCR_TABLE
, TCM_REG_XX_DESCR_TABLE_SIZE
},
10154 { UCM_REG_XX_DESCR_TABLE
, UCM_REG_XX_DESCR_TABLE_SIZE
},
10155 { XCM_REG_XX_DESCR_TABLE
, XCM_REG_XX_DESCR_TABLE_SIZE
},
10159 static const struct {
10165 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS
, 0x3ffc0, 0 },
10166 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS
, 0x2, 0x2 },
10167 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS
, 0, 0 },
10168 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS
, 0x3ffc0, 0 },
10169 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS
, 0x3ffc0, 0 },
10170 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS
, 0x3ffc1, 0 },
10172 { NULL
, 0xffffffff, 0, 0 }
10175 if (!netif_running(bp
->dev
))
10178 /* Go through all the memories */
10179 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++)
10180 for (j
= 0; j
< mem_tbl
[i
].size
; j
++)
10181 REG_RD(bp
, mem_tbl
[i
].offset
+ j
*4);
10183 /* Check the parity status */
10184 for (i
= 0; prty_tbl
[i
].offset
!= 0xffffffff; i
++) {
10185 val
= REG_RD(bp
, prty_tbl
[i
].offset
);
10186 if ((CHIP_IS_E1(bp
) && (val
& ~(prty_tbl
[i
].e1_mask
))) ||
10187 (CHIP_IS_E1H(bp
) && (val
& ~(prty_tbl
[i
].e1h_mask
)))) {
10189 "%s is 0x%x\n", prty_tbl
[i
].name
, val
);
10190 goto test_mem_exit
;
10200 static void bnx2x_wait_for_link(struct bnx2x
*bp
, u8 link_up
)
10205 while (bnx2x_link_test(bp
) && cnt
--)
10209 static int bnx2x_run_loopback(struct bnx2x
*bp
, int loopback_mode
, u8 link_up
)
10211 unsigned int pkt_size
, num_pkts
, i
;
10212 struct sk_buff
*skb
;
10213 unsigned char *packet
;
10214 struct bnx2x_fastpath
*fp_rx
= &bp
->fp
[0];
10215 struct bnx2x_fastpath
*fp_tx
= &bp
->fp
[0];
10216 u16 tx_start_idx
, tx_idx
;
10217 u16 rx_start_idx
, rx_idx
;
10218 u16 pkt_prod
, bd_prod
;
10219 struct sw_tx_bd
*tx_buf
;
10220 struct eth_tx_start_bd
*tx_start_bd
;
10221 struct eth_tx_parse_bd
*pbd
= NULL
;
10222 dma_addr_t mapping
;
10223 union eth_rx_cqe
*cqe
;
10225 struct sw_rx_bd
*rx_buf
;
10229 /* check the loopback mode */
10230 switch (loopback_mode
) {
10231 case BNX2X_PHY_LOOPBACK
:
10232 if (bp
->link_params
.loopback_mode
!= LOOPBACK_XGXS_10
)
10235 case BNX2X_MAC_LOOPBACK
:
10236 bp
->link_params
.loopback_mode
= LOOPBACK_BMAC
;
10237 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
10243 /* prepare the loopback packet */
10244 pkt_size
= (((bp
->dev
->mtu
< ETH_MAX_PACKET_SIZE
) ?
10245 bp
->dev
->mtu
: ETH_MAX_PACKET_SIZE
) + ETH_HLEN
);
10246 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
10249 goto test_loopback_exit
;
10251 packet
= skb_put(skb
, pkt_size
);
10252 memcpy(packet
, bp
->dev
->dev_addr
, ETH_ALEN
);
10253 memset(packet
+ ETH_ALEN
, 0, ETH_ALEN
);
10254 memset(packet
+ 2*ETH_ALEN
, 0x77, (ETH_HLEN
- 2*ETH_ALEN
));
10255 for (i
= ETH_HLEN
; i
< pkt_size
; i
++)
10256 packet
[i
] = (unsigned char) (i
& 0xff);
10258 /* send the loopback packet */
10260 tx_start_idx
= le16_to_cpu(*fp_tx
->tx_cons_sb
);
10261 rx_start_idx
= le16_to_cpu(*fp_rx
->rx_cons_sb
);
10263 pkt_prod
= fp_tx
->tx_pkt_prod
++;
10264 tx_buf
= &fp_tx
->tx_buf_ring
[TX_BD(pkt_prod
)];
10265 tx_buf
->first_bd
= fp_tx
->tx_bd_prod
;
10269 bd_prod
= TX_BD(fp_tx
->tx_bd_prod
);
10270 tx_start_bd
= &fp_tx
->tx_desc_ring
[bd_prod
].start_bd
;
10271 mapping
= pci_map_single(bp
->pdev
, skb
->data
,
10272 skb_headlen(skb
), PCI_DMA_TODEVICE
);
10273 tx_start_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
10274 tx_start_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
10275 tx_start_bd
->nbd
= cpu_to_le16(2); /* start + pbd */
10276 tx_start_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
10277 tx_start_bd
->vlan
= cpu_to_le16(pkt_prod
);
10278 tx_start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
10279 tx_start_bd
->general_data
= ((UNICAST_ADDRESS
<<
10280 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT
) | 1);
10282 /* turn on parsing and get a BD */
10283 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
10284 pbd
= &fp_tx
->tx_desc_ring
[bd_prod
].parse_bd
;
10286 memset(pbd
, 0, sizeof(struct eth_tx_parse_bd
));
10290 fp_tx
->tx_db
.data
.prod
+= 2;
10292 DOORBELL(bp
, fp_tx
->index
, fp_tx
->tx_db
.raw
);
10297 fp_tx
->tx_bd_prod
+= 2; /* start + pbd */
10301 tx_idx
= le16_to_cpu(*fp_tx
->tx_cons_sb
);
10302 if (tx_idx
!= tx_start_idx
+ num_pkts
)
10303 goto test_loopback_exit
;
10305 rx_idx
= le16_to_cpu(*fp_rx
->rx_cons_sb
);
10306 if (rx_idx
!= rx_start_idx
+ num_pkts
)
10307 goto test_loopback_exit
;
10309 cqe
= &fp_rx
->rx_comp_ring
[RCQ_BD(fp_rx
->rx_comp_cons
)];
10310 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
10311 if (CQE_TYPE(cqe_fp_flags
) || (cqe_fp_flags
& ETH_RX_ERROR_FALGS
))
10312 goto test_loopback_rx_exit
;
10314 len
= le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
);
10315 if (len
!= pkt_size
)
10316 goto test_loopback_rx_exit
;
10318 rx_buf
= &fp_rx
->rx_buf_ring
[RX_BD(fp_rx
->rx_bd_cons
)];
10320 skb_reserve(skb
, cqe
->fast_path_cqe
.placement_offset
);
10321 for (i
= ETH_HLEN
; i
< pkt_size
; i
++)
10322 if (*(skb
->data
+ i
) != (unsigned char) (i
& 0xff))
10323 goto test_loopback_rx_exit
;
10327 test_loopback_rx_exit
:
10329 fp_rx
->rx_bd_cons
= NEXT_RX_IDX(fp_rx
->rx_bd_cons
);
10330 fp_rx
->rx_bd_prod
= NEXT_RX_IDX(fp_rx
->rx_bd_prod
);
10331 fp_rx
->rx_comp_cons
= NEXT_RCQ_IDX(fp_rx
->rx_comp_cons
);
10332 fp_rx
->rx_comp_prod
= NEXT_RCQ_IDX(fp_rx
->rx_comp_prod
);
10334 /* Update producers */
10335 bnx2x_update_rx_prod(bp
, fp_rx
, fp_rx
->rx_bd_prod
, fp_rx
->rx_comp_prod
,
10336 fp_rx
->rx_sge_prod
);
10338 test_loopback_exit
:
10339 bp
->link_params
.loopback_mode
= LOOPBACK_NONE
;
10344 static int bnx2x_test_loopback(struct bnx2x
*bp
, u8 link_up
)
10348 if (!netif_running(bp
->dev
))
10349 return BNX2X_LOOPBACK_FAILED
;
10351 bnx2x_netif_stop(bp
, 1);
10352 bnx2x_acquire_phy_lock(bp
);
10354 res
= bnx2x_run_loopback(bp
, BNX2X_PHY_LOOPBACK
, link_up
);
10356 DP(NETIF_MSG_PROBE
, " PHY loopback failed (res %d)\n", res
);
10357 rc
|= BNX2X_PHY_LOOPBACK_FAILED
;
10360 res
= bnx2x_run_loopback(bp
, BNX2X_MAC_LOOPBACK
, link_up
);
10362 DP(NETIF_MSG_PROBE
, " MAC loopback failed (res %d)\n", res
);
10363 rc
|= BNX2X_MAC_LOOPBACK_FAILED
;
10366 bnx2x_release_phy_lock(bp
);
10367 bnx2x_netif_start(bp
);
10372 #define CRC32_RESIDUAL 0xdebb20e3
10374 static int bnx2x_test_nvram(struct bnx2x
*bp
)
10376 static const struct {
10380 { 0, 0x14 }, /* bootstrap */
10381 { 0x14, 0xec }, /* dir */
10382 { 0x100, 0x350 }, /* manuf_info */
10383 { 0x450, 0xf0 }, /* feature_info */
10384 { 0x640, 0x64 }, /* upgrade_key_info */
10386 { 0x708, 0x70 }, /* manuf_key_info */
10390 __be32 buf
[0x350 / 4];
10391 u8
*data
= (u8
*)buf
;
10395 rc
= bnx2x_nvram_read(bp
, 0, data
, 4);
10397 DP(NETIF_MSG_PROBE
, "magic value read (rc %d)\n", rc
);
10398 goto test_nvram_exit
;
10401 magic
= be32_to_cpu(buf
[0]);
10402 if (magic
!= 0x669955aa) {
10403 DP(NETIF_MSG_PROBE
, "magic value (0x%08x)\n", magic
);
10405 goto test_nvram_exit
;
10408 for (i
= 0; nvram_tbl
[i
].size
; i
++) {
10410 rc
= bnx2x_nvram_read(bp
, nvram_tbl
[i
].offset
, data
,
10411 nvram_tbl
[i
].size
);
10413 DP(NETIF_MSG_PROBE
,
10414 "nvram_tbl[%d] read data (rc %d)\n", i
, rc
);
10415 goto test_nvram_exit
;
10418 crc
= ether_crc_le(nvram_tbl
[i
].size
, data
);
10419 if (crc
!= CRC32_RESIDUAL
) {
10420 DP(NETIF_MSG_PROBE
,
10421 "nvram_tbl[%d] crc value (0x%08x)\n", i
, crc
);
10423 goto test_nvram_exit
;
10431 static int bnx2x_test_intr(struct bnx2x
*bp
)
10433 struct mac_configuration_cmd
*config
= bnx2x_sp(bp
, mac_config
);
10436 if (!netif_running(bp
->dev
))
10439 config
->hdr
.length
= 0;
10440 if (CHIP_IS_E1(bp
))
10441 /* use last unicast entries */
10442 config
->hdr
.offset
= (BP_PORT(bp
) ? 63 : 31);
10444 config
->hdr
.offset
= BP_FUNC(bp
);
10445 config
->hdr
.client_id
= bp
->fp
->cl_id
;
10446 config
->hdr
.reserved1
= 0;
10448 bp
->set_mac_pending
++;
10450 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
10451 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
10452 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
10454 for (i
= 0; i
< 10; i
++) {
10455 if (!bp
->set_mac_pending
)
10458 msleep_interruptible(10);
10467 static void bnx2x_self_test(struct net_device
*dev
,
10468 struct ethtool_test
*etest
, u64
*buf
)
10470 struct bnx2x
*bp
= netdev_priv(dev
);
10472 memset(buf
, 0, sizeof(u64
) * BNX2X_NUM_TESTS
);
10474 if (!netif_running(dev
))
10477 /* offline tests are not supported in MF mode */
10479 etest
->flags
&= ~ETH_TEST_FL_OFFLINE
;
10481 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
10482 int port
= BP_PORT(bp
);
10486 /* save current value of input enable for TX port IF */
10487 val
= REG_RD(bp
, NIG_REG_EGRESS_UMP0_IN_EN
+ port
*4);
10488 /* disable input for TX port IF */
10489 REG_WR(bp
, NIG_REG_EGRESS_UMP0_IN_EN
+ port
*4, 0);
10491 link_up
= (bnx2x_link_test(bp
) == 0);
10492 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
10493 bnx2x_nic_load(bp
, LOAD_DIAG
);
10494 /* wait until link state is restored */
10495 bnx2x_wait_for_link(bp
, link_up
);
10497 if (bnx2x_test_registers(bp
) != 0) {
10499 etest
->flags
|= ETH_TEST_FL_FAILED
;
10501 if (bnx2x_test_memory(bp
) != 0) {
10503 etest
->flags
|= ETH_TEST_FL_FAILED
;
10505 buf
[2] = bnx2x_test_loopback(bp
, link_up
);
10507 etest
->flags
|= ETH_TEST_FL_FAILED
;
10509 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
10511 /* restore input for TX port IF */
10512 REG_WR(bp
, NIG_REG_EGRESS_UMP0_IN_EN
+ port
*4, val
);
10514 bnx2x_nic_load(bp
, LOAD_NORMAL
);
10515 /* wait until link state is restored */
10516 bnx2x_wait_for_link(bp
, link_up
);
10518 if (bnx2x_test_nvram(bp
) != 0) {
10520 etest
->flags
|= ETH_TEST_FL_FAILED
;
10522 if (bnx2x_test_intr(bp
) != 0) {
10524 etest
->flags
|= ETH_TEST_FL_FAILED
;
10527 if (bnx2x_link_test(bp
) != 0) {
10529 etest
->flags
|= ETH_TEST_FL_FAILED
;
10532 #ifdef BNX2X_EXTRA_DEBUG
10533 bnx2x_panic_dump(bp
);
10537 static const struct {
10540 u8 string
[ETH_GSTRING_LEN
];
10541 } bnx2x_q_stats_arr
[BNX2X_NUM_Q_STATS
] = {
10542 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi
), 8, "[%d]: rx_bytes" },
10543 { Q_STATS_OFFSET32(error_bytes_received_hi
),
10544 8, "[%d]: rx_error_bytes" },
10545 { Q_STATS_OFFSET32(total_unicast_packets_received_hi
),
10546 8, "[%d]: rx_ucast_packets" },
10547 { Q_STATS_OFFSET32(total_multicast_packets_received_hi
),
10548 8, "[%d]: rx_mcast_packets" },
10549 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi
),
10550 8, "[%d]: rx_bcast_packets" },
10551 { Q_STATS_OFFSET32(no_buff_discard_hi
), 8, "[%d]: rx_discards" },
10552 { Q_STATS_OFFSET32(rx_err_discard_pkt
),
10553 4, "[%d]: rx_phy_ip_err_discards"},
10554 { Q_STATS_OFFSET32(rx_skb_alloc_failed
),
10555 4, "[%d]: rx_skb_alloc_discard" },
10556 { Q_STATS_OFFSET32(hw_csum_err
), 4, "[%d]: rx_csum_offload_errors" },
10558 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi
), 8, "[%d]: tx_bytes" },
10559 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi
),
10560 8, "[%d]: tx_packets" }
10563 static const struct {
10567 #define STATS_FLAGS_PORT 1
10568 #define STATS_FLAGS_FUNC 2
10569 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10570 u8 string
[ETH_GSTRING_LEN
];
10571 } bnx2x_stats_arr
[BNX2X_NUM_STATS
] = {
10572 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi
),
10573 8, STATS_FLAGS_BOTH
, "rx_bytes" },
10574 { STATS_OFFSET32(error_bytes_received_hi
),
10575 8, STATS_FLAGS_BOTH
, "rx_error_bytes" },
10576 { STATS_OFFSET32(total_unicast_packets_received_hi
),
10577 8, STATS_FLAGS_BOTH
, "rx_ucast_packets" },
10578 { STATS_OFFSET32(total_multicast_packets_received_hi
),
10579 8, STATS_FLAGS_BOTH
, "rx_mcast_packets" },
10580 { STATS_OFFSET32(total_broadcast_packets_received_hi
),
10581 8, STATS_FLAGS_BOTH
, "rx_bcast_packets" },
10582 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi
),
10583 8, STATS_FLAGS_PORT
, "rx_crc_errors" },
10584 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi
),
10585 8, STATS_FLAGS_PORT
, "rx_align_errors" },
10586 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi
),
10587 8, STATS_FLAGS_PORT
, "rx_undersize_packets" },
10588 { STATS_OFFSET32(etherstatsoverrsizepkts_hi
),
10589 8, STATS_FLAGS_PORT
, "rx_oversize_packets" },
10590 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi
),
10591 8, STATS_FLAGS_PORT
, "rx_fragments" },
10592 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi
),
10593 8, STATS_FLAGS_PORT
, "rx_jabbers" },
10594 { STATS_OFFSET32(no_buff_discard_hi
),
10595 8, STATS_FLAGS_BOTH
, "rx_discards" },
10596 { STATS_OFFSET32(mac_filter_discard
),
10597 4, STATS_FLAGS_PORT
, "rx_filtered_packets" },
10598 { STATS_OFFSET32(xxoverflow_discard
),
10599 4, STATS_FLAGS_PORT
, "rx_fw_discards" },
10600 { STATS_OFFSET32(brb_drop_hi
),
10601 8, STATS_FLAGS_PORT
, "rx_brb_discard" },
10602 { STATS_OFFSET32(brb_truncate_hi
),
10603 8, STATS_FLAGS_PORT
, "rx_brb_truncate" },
10604 { STATS_OFFSET32(pause_frames_received_hi
),
10605 8, STATS_FLAGS_PORT
, "rx_pause_frames" },
10606 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi
),
10607 8, STATS_FLAGS_PORT
, "rx_mac_ctrl_frames" },
10608 { STATS_OFFSET32(nig_timer_max
),
10609 4, STATS_FLAGS_PORT
, "rx_constant_pause_events" },
10610 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt
),
10611 4, STATS_FLAGS_BOTH
, "rx_phy_ip_err_discards"},
10612 { STATS_OFFSET32(rx_skb_alloc_failed
),
10613 4, STATS_FLAGS_BOTH
, "rx_skb_alloc_discard" },
10614 { STATS_OFFSET32(hw_csum_err
),
10615 4, STATS_FLAGS_BOTH
, "rx_csum_offload_errors" },
10617 { STATS_OFFSET32(total_bytes_transmitted_hi
),
10618 8, STATS_FLAGS_BOTH
, "tx_bytes" },
10619 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi
),
10620 8, STATS_FLAGS_PORT
, "tx_error_bytes" },
10621 { STATS_OFFSET32(total_unicast_packets_transmitted_hi
),
10622 8, STATS_FLAGS_BOTH
, "tx_packets" },
10623 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi
),
10624 8, STATS_FLAGS_PORT
, "tx_mac_errors" },
10625 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi
),
10626 8, STATS_FLAGS_PORT
, "tx_carrier_errors" },
10627 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi
),
10628 8, STATS_FLAGS_PORT
, "tx_single_collisions" },
10629 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi
),
10630 8, STATS_FLAGS_PORT
, "tx_multi_collisions" },
10631 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi
),
10632 8, STATS_FLAGS_PORT
, "tx_deferred" },
10633 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi
),
10634 8, STATS_FLAGS_PORT
, "tx_excess_collisions" },
10635 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi
),
10636 8, STATS_FLAGS_PORT
, "tx_late_collisions" },
10637 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi
),
10638 8, STATS_FLAGS_PORT
, "tx_total_collisions" },
10639 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi
),
10640 8, STATS_FLAGS_PORT
, "tx_64_byte_packets" },
10641 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi
),
10642 8, STATS_FLAGS_PORT
, "tx_65_to_127_byte_packets" },
10643 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi
),
10644 8, STATS_FLAGS_PORT
, "tx_128_to_255_byte_packets" },
10645 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi
),
10646 8, STATS_FLAGS_PORT
, "tx_256_to_511_byte_packets" },
10647 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi
),
10648 8, STATS_FLAGS_PORT
, "tx_512_to_1023_byte_packets" },
10649 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi
),
10650 8, STATS_FLAGS_PORT
, "tx_1024_to_1522_byte_packets" },
10651 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi
),
10652 8, STATS_FLAGS_PORT
, "tx_1523_to_9022_byte_packets" },
10653 { STATS_OFFSET32(pause_frames_sent_hi
),
10654 8, STATS_FLAGS_PORT
, "tx_pause_frames" }
10657 #define IS_PORT_STAT(i) \
10658 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10659 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10660 #define IS_E1HMF_MODE_STAT(bp) \
10661 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
10663 static int bnx2x_get_sset_count(struct net_device
*dev
, int stringset
)
10665 struct bnx2x
*bp
= netdev_priv(dev
);
10668 switch(stringset
) {
10670 if (is_multi(bp
)) {
10671 num_stats
= BNX2X_NUM_Q_STATS
* bp
->num_queues
;
10672 if (!IS_E1HMF_MODE_STAT(bp
))
10673 num_stats
+= BNX2X_NUM_STATS
;
10675 if (IS_E1HMF_MODE_STAT(bp
)) {
10677 for (i
= 0; i
< BNX2X_NUM_STATS
; i
++)
10678 if (IS_FUNC_STAT(i
))
10681 num_stats
= BNX2X_NUM_STATS
;
10686 return BNX2X_NUM_TESTS
;
10693 static void bnx2x_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
10695 struct bnx2x
*bp
= netdev_priv(dev
);
10698 switch (stringset
) {
10700 if (is_multi(bp
)) {
10702 for_each_queue(bp
, i
) {
10703 for (j
= 0; j
< BNX2X_NUM_Q_STATS
; j
++)
10704 sprintf(buf
+ (k
+ j
)*ETH_GSTRING_LEN
,
10705 bnx2x_q_stats_arr
[j
].string
, i
);
10706 k
+= BNX2X_NUM_Q_STATS
;
10708 if (IS_E1HMF_MODE_STAT(bp
))
10710 for (j
= 0; j
< BNX2X_NUM_STATS
; j
++)
10711 strcpy(buf
+ (k
+ j
)*ETH_GSTRING_LEN
,
10712 bnx2x_stats_arr
[j
].string
);
10714 for (i
= 0, j
= 0; i
< BNX2X_NUM_STATS
; i
++) {
10715 if (IS_E1HMF_MODE_STAT(bp
) && IS_PORT_STAT(i
))
10717 strcpy(buf
+ j
*ETH_GSTRING_LEN
,
10718 bnx2x_stats_arr
[i
].string
);
10725 memcpy(buf
, bnx2x_tests_str_arr
, sizeof(bnx2x_tests_str_arr
));
10730 static void bnx2x_get_ethtool_stats(struct net_device
*dev
,
10731 struct ethtool_stats
*stats
, u64
*buf
)
10733 struct bnx2x
*bp
= netdev_priv(dev
);
10734 u32
*hw_stats
, *offset
;
10737 if (is_multi(bp
)) {
10739 for_each_queue(bp
, i
) {
10740 hw_stats
= (u32
*)&bp
->fp
[i
].eth_q_stats
;
10741 for (j
= 0; j
< BNX2X_NUM_Q_STATS
; j
++) {
10742 if (bnx2x_q_stats_arr
[j
].size
== 0) {
10743 /* skip this counter */
10747 offset
= (hw_stats
+
10748 bnx2x_q_stats_arr
[j
].offset
);
10749 if (bnx2x_q_stats_arr
[j
].size
== 4) {
10750 /* 4-byte counter */
10751 buf
[k
+ j
] = (u64
) *offset
;
10754 /* 8-byte counter */
10755 buf
[k
+ j
] = HILO_U64(*offset
, *(offset
+ 1));
10757 k
+= BNX2X_NUM_Q_STATS
;
10759 if (IS_E1HMF_MODE_STAT(bp
))
10761 hw_stats
= (u32
*)&bp
->eth_stats
;
10762 for (j
= 0; j
< BNX2X_NUM_STATS
; j
++) {
10763 if (bnx2x_stats_arr
[j
].size
== 0) {
10764 /* skip this counter */
10768 offset
= (hw_stats
+ bnx2x_stats_arr
[j
].offset
);
10769 if (bnx2x_stats_arr
[j
].size
== 4) {
10770 /* 4-byte counter */
10771 buf
[k
+ j
] = (u64
) *offset
;
10774 /* 8-byte counter */
10775 buf
[k
+ j
] = HILO_U64(*offset
, *(offset
+ 1));
10778 hw_stats
= (u32
*)&bp
->eth_stats
;
10779 for (i
= 0, j
= 0; i
< BNX2X_NUM_STATS
; i
++) {
10780 if (IS_E1HMF_MODE_STAT(bp
) && IS_PORT_STAT(i
))
10782 if (bnx2x_stats_arr
[i
].size
== 0) {
10783 /* skip this counter */
10788 offset
= (hw_stats
+ bnx2x_stats_arr
[i
].offset
);
10789 if (bnx2x_stats_arr
[i
].size
== 4) {
10790 /* 4-byte counter */
10791 buf
[j
] = (u64
) *offset
;
10795 /* 8-byte counter */
10796 buf
[j
] = HILO_U64(*offset
, *(offset
+ 1));
10802 static int bnx2x_phys_id(struct net_device
*dev
, u32 data
)
10804 struct bnx2x
*bp
= netdev_priv(dev
);
10807 if (!netif_running(dev
))
10816 for (i
= 0; i
< (data
* 2); i
++) {
10818 bnx2x_set_led(&bp
->link_params
, LED_MODE_OPER
,
10821 bnx2x_set_led(&bp
->link_params
, LED_MODE_OFF
, 0);
10823 msleep_interruptible(500);
10824 if (signal_pending(current
))
10828 if (bp
->link_vars
.link_up
)
10829 bnx2x_set_led(&bp
->link_params
, LED_MODE_OPER
,
10830 bp
->link_vars
.line_speed
);
10835 static const struct ethtool_ops bnx2x_ethtool_ops
= {
10836 .get_settings
= bnx2x_get_settings
,
10837 .set_settings
= bnx2x_set_settings
,
10838 .get_drvinfo
= bnx2x_get_drvinfo
,
10839 .get_regs_len
= bnx2x_get_regs_len
,
10840 .get_regs
= bnx2x_get_regs
,
10841 .get_wol
= bnx2x_get_wol
,
10842 .set_wol
= bnx2x_set_wol
,
10843 .get_msglevel
= bnx2x_get_msglevel
,
10844 .set_msglevel
= bnx2x_set_msglevel
,
10845 .nway_reset
= bnx2x_nway_reset
,
10846 .get_link
= bnx2x_get_link
,
10847 .get_eeprom_len
= bnx2x_get_eeprom_len
,
10848 .get_eeprom
= bnx2x_get_eeprom
,
10849 .set_eeprom
= bnx2x_set_eeprom
,
10850 .get_coalesce
= bnx2x_get_coalesce
,
10851 .set_coalesce
= bnx2x_set_coalesce
,
10852 .get_ringparam
= bnx2x_get_ringparam
,
10853 .set_ringparam
= bnx2x_set_ringparam
,
10854 .get_pauseparam
= bnx2x_get_pauseparam
,
10855 .set_pauseparam
= bnx2x_set_pauseparam
,
10856 .get_rx_csum
= bnx2x_get_rx_csum
,
10857 .set_rx_csum
= bnx2x_set_rx_csum
,
10858 .get_tx_csum
= ethtool_op_get_tx_csum
,
10859 .set_tx_csum
= ethtool_op_set_tx_hw_csum
,
10860 .set_flags
= bnx2x_set_flags
,
10861 .get_flags
= ethtool_op_get_flags
,
10862 .get_sg
= ethtool_op_get_sg
,
10863 .set_sg
= ethtool_op_set_sg
,
10864 .get_tso
= ethtool_op_get_tso
,
10865 .set_tso
= bnx2x_set_tso
,
10866 .self_test
= bnx2x_self_test
,
10867 .get_sset_count
= bnx2x_get_sset_count
,
10868 .get_strings
= bnx2x_get_strings
,
10869 .phys_id
= bnx2x_phys_id
,
10870 .get_ethtool_stats
= bnx2x_get_ethtool_stats
,
10873 /* end of ethtool_ops */
10875 /****************************************************************************
10876 * General service functions
10877 ****************************************************************************/
10879 static int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
)
10883 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
10887 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
10888 ((pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
10889 PCI_PM_CTRL_PME_STATUS
));
10891 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
10892 /* delay required during transition out of D3hot */
10897 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
10901 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
10903 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
10906 /* No more memory access after this point until
10907 * device is brought back to D0.
10917 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath
*fp
)
10921 /* Tell compiler that status block fields can change */
10923 rx_cons_sb
= le16_to_cpu(*fp
->rx_cons_sb
);
10924 if ((rx_cons_sb
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
10926 return (fp
->rx_comp_cons
!= rx_cons_sb
);
10930 * net_device service functions
10933 static int bnx2x_poll(struct napi_struct
*napi
, int budget
)
10936 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
10938 struct bnx2x
*bp
= fp
->bp
;
10941 #ifdef BNX2X_STOP_ON_ERROR
10942 if (unlikely(bp
->panic
)) {
10943 napi_complete(napi
);
10948 if (bnx2x_has_tx_work(fp
))
10951 if (bnx2x_has_rx_work(fp
)) {
10952 work_done
+= bnx2x_rx_int(fp
, budget
- work_done
);
10954 /* must not complete if we consumed full budget */
10955 if (work_done
>= budget
)
10959 /* Fall out from the NAPI loop if needed */
10960 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
10961 bnx2x_update_fpsb_idx(fp
);
10962 /* bnx2x_has_rx_work() reads the status block, thus we need
10963 * to ensure that status block indices have been actually read
10964 * (bnx2x_update_fpsb_idx) prior to this check
10965 * (bnx2x_has_rx_work) so that we won't write the "newer"
10966 * value of the status block to IGU (if there was a DMA right
10967 * after bnx2x_has_rx_work and if there is no rmb, the memory
10968 * reading (bnx2x_update_fpsb_idx) may be postponed to right
10969 * before bnx2x_ack_sb). In this case there will never be
10970 * another interrupt until there is another update of the
10971 * status block, while there is still unhandled work.
10975 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
10976 napi_complete(napi
);
10977 /* Re-enable interrupts */
10978 bnx2x_ack_sb(bp
, fp
->sb_id
, CSTORM_ID
,
10979 le16_to_cpu(fp
->fp_c_idx
),
10981 bnx2x_ack_sb(bp
, fp
->sb_id
, USTORM_ID
,
10982 le16_to_cpu(fp
->fp_u_idx
),
10983 IGU_INT_ENABLE
, 1);
10993 /* we split the first BD into headers and data BDs
10994 * to ease the pain of our fellow microcode engineers
10995 * we use one mapping for both BDs
10996 * So far this has only been observed to happen
10997 * in Other Operating Systems(TM)
10999 static noinline u16
bnx2x_tx_split(struct bnx2x
*bp
,
11000 struct bnx2x_fastpath
*fp
,
11001 struct sw_tx_bd
*tx_buf
,
11002 struct eth_tx_start_bd
**tx_bd
, u16 hlen
,
11003 u16 bd_prod
, int nbd
)
11005 struct eth_tx_start_bd
*h_tx_bd
= *tx_bd
;
11006 struct eth_tx_bd
*d_tx_bd
;
11007 dma_addr_t mapping
;
11008 int old_len
= le16_to_cpu(h_tx_bd
->nbytes
);
11010 /* first fix first BD */
11011 h_tx_bd
->nbd
= cpu_to_le16(nbd
);
11012 h_tx_bd
->nbytes
= cpu_to_le16(hlen
);
11014 DP(NETIF_MSG_TX_QUEUED
, "TSO split header size is %d "
11015 "(%x:%x) nbd %d\n", h_tx_bd
->nbytes
, h_tx_bd
->addr_hi
,
11016 h_tx_bd
->addr_lo
, h_tx_bd
->nbd
);
11018 /* now get a new data BD
11019 * (after the pbd) and fill it */
11020 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
11021 d_tx_bd
= &fp
->tx_desc_ring
[bd_prod
].reg_bd
;
11023 mapping
= HILO_U64(le32_to_cpu(h_tx_bd
->addr_hi
),
11024 le32_to_cpu(h_tx_bd
->addr_lo
)) + hlen
;
11026 d_tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
11027 d_tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
11028 d_tx_bd
->nbytes
= cpu_to_le16(old_len
- hlen
);
11030 /* this marks the BD as one that has no individual mapping */
11031 tx_buf
->flags
|= BNX2X_TSO_SPLIT_BD
;
11033 DP(NETIF_MSG_TX_QUEUED
,
11034 "TSO split data size is %d (%x:%x)\n",
11035 d_tx_bd
->nbytes
, d_tx_bd
->addr_hi
, d_tx_bd
->addr_lo
);
11038 *tx_bd
= (struct eth_tx_start_bd
*)d_tx_bd
;
11043 static inline u16
bnx2x_csum_fix(unsigned char *t_header
, u16 csum
, s8 fix
)
11046 csum
= (u16
) ~csum_fold(csum_sub(csum
,
11047 csum_partial(t_header
- fix
, fix
, 0)));
11050 csum
= (u16
) ~csum_fold(csum_add(csum
,
11051 csum_partial(t_header
, -fix
, 0)));
11053 return swab16(csum
);
11056 static inline u32
bnx2x_xmit_type(struct bnx2x
*bp
, struct sk_buff
*skb
)
11060 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
11064 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
11066 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
11067 rc
|= XMIT_CSUM_TCP
;
11071 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
11072 rc
|= XMIT_CSUM_TCP
;
11076 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
)
11077 rc
|= (XMIT_GSO_V4
| XMIT_CSUM_V4
| XMIT_CSUM_TCP
);
11079 else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
11080 rc
|= (XMIT_GSO_V6
| XMIT_CSUM_TCP
| XMIT_CSUM_V6
);
11085 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11086 /* check if packet requires linearization (packet is too fragmented)
11087 no need to check fragmentation if page size > 8K (there will be no
11088 violation to FW restrictions) */
11089 static int bnx2x_pkt_req_lin(struct bnx2x
*bp
, struct sk_buff
*skb
,
11094 int first_bd_sz
= 0;
11096 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11097 if (skb_shinfo(skb
)->nr_frags
>= (MAX_FETCH_BD
- 3)) {
11099 if (xmit_type
& XMIT_GSO
) {
11100 unsigned short lso_mss
= skb_shinfo(skb
)->gso_size
;
11101 /* Check if LSO packet needs to be copied:
11102 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11103 int wnd_size
= MAX_FETCH_BD
- 3;
11104 /* Number of windows to check */
11105 int num_wnds
= skb_shinfo(skb
)->nr_frags
- wnd_size
;
11110 /* Headers length */
11111 hlen
= (int)(skb_transport_header(skb
) - skb
->data
) +
11114 /* Amount of data (w/o headers) on linear part of SKB*/
11115 first_bd_sz
= skb_headlen(skb
) - hlen
;
11117 wnd_sum
= first_bd_sz
;
11119 /* Calculate the first sum - it's special */
11120 for (frag_idx
= 0; frag_idx
< wnd_size
- 1; frag_idx
++)
11122 skb_shinfo(skb
)->frags
[frag_idx
].size
;
11124 /* If there was data on linear skb data - check it */
11125 if (first_bd_sz
> 0) {
11126 if (unlikely(wnd_sum
< lso_mss
)) {
11131 wnd_sum
-= first_bd_sz
;
11134 /* Others are easier: run through the frag list and
11135 check all windows */
11136 for (wnd_idx
= 0; wnd_idx
<= num_wnds
; wnd_idx
++) {
11138 skb_shinfo(skb
)->frags
[wnd_idx
+ wnd_size
- 1].size
;
11140 if (unlikely(wnd_sum
< lso_mss
)) {
11145 skb_shinfo(skb
)->frags
[wnd_idx
].size
;
11148 /* in non-LSO too fragmented packet should always
11155 if (unlikely(to_copy
))
11156 DP(NETIF_MSG_TX_QUEUED
,
11157 "Linearization IS REQUIRED for %s packet. "
11158 "num_frags %d hlen %d first_bd_sz %d\n",
11159 (xmit_type
& XMIT_GSO
) ? "LSO" : "non-LSO",
11160 skb_shinfo(skb
)->nr_frags
, hlen
, first_bd_sz
);
11166 /* called with netif_tx_lock
11167 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
11168 * netif_wake_queue()
11170 static netdev_tx_t
bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
11172 struct bnx2x
*bp
= netdev_priv(dev
);
11173 struct bnx2x_fastpath
*fp
;
11174 struct netdev_queue
*txq
;
11175 struct sw_tx_bd
*tx_buf
;
11176 struct eth_tx_start_bd
*tx_start_bd
;
11177 struct eth_tx_bd
*tx_data_bd
, *total_pkt_bd
= NULL
;
11178 struct eth_tx_parse_bd
*pbd
= NULL
;
11179 u16 pkt_prod
, bd_prod
;
11181 dma_addr_t mapping
;
11182 u32 xmit_type
= bnx2x_xmit_type(bp
, skb
);
11185 __le16 pkt_size
= 0;
11187 #ifdef BNX2X_STOP_ON_ERROR
11188 if (unlikely(bp
->panic
))
11189 return NETDEV_TX_BUSY
;
11192 fp_index
= skb_get_queue_mapping(skb
);
11193 txq
= netdev_get_tx_queue(dev
, fp_index
);
11195 fp
= &bp
->fp
[fp_index
];
11197 if (unlikely(bnx2x_tx_avail(fp
) < (skb_shinfo(skb
)->nr_frags
+ 3))) {
11198 fp
->eth_q_stats
.driver_xoff
++;
11199 netif_tx_stop_queue(txq
);
11200 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11201 return NETDEV_TX_BUSY
;
11204 DP(NETIF_MSG_TX_QUEUED
, "SKB: summed %x protocol %x protocol(%x,%x)"
11205 " gso type %x xmit_type %x\n",
11206 skb
->ip_summed
, skb
->protocol
, ipv6_hdr(skb
)->nexthdr
,
11207 ip_hdr(skb
)->protocol
, skb_shinfo(skb
)->gso_type
, xmit_type
);
11209 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11210 /* First, check if we need to linearize the skb (due to FW
11211 restrictions). No need to check fragmentation if page size > 8K
11212 (there will be no violation to FW restrictions) */
11213 if (bnx2x_pkt_req_lin(bp
, skb
, xmit_type
)) {
11214 /* Statistics of linearization */
11216 if (skb_linearize(skb
) != 0) {
11217 DP(NETIF_MSG_TX_QUEUED
, "SKB linearization failed - "
11218 "silently dropping this SKB\n");
11219 dev_kfree_skb_any(skb
);
11220 return NETDEV_TX_OK
;
11226 Please read carefully. First we use one BD which we mark as start,
11227 then we have a parsing info BD (used for TSO or xsum),
11228 and only then we have the rest of the TSO BDs.
11229 (don't forget to mark the last one as last,
11230 and to unmap only AFTER you write to the BD ...)
11231 And above all, all pdb sizes are in words - NOT DWORDS!
11234 pkt_prod
= fp
->tx_pkt_prod
++;
11235 bd_prod
= TX_BD(fp
->tx_bd_prod
);
11237 /* get a tx_buf and first BD */
11238 tx_buf
= &fp
->tx_buf_ring
[TX_BD(pkt_prod
)];
11239 tx_start_bd
= &fp
->tx_desc_ring
[bd_prod
].start_bd
;
11241 tx_start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
11242 tx_start_bd
->general_data
= (UNICAST_ADDRESS
<<
11243 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT
);
11245 tx_start_bd
->general_data
|= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT
);
11247 /* remember the first BD of the packet */
11248 tx_buf
->first_bd
= fp
->tx_bd_prod
;
11252 DP(NETIF_MSG_TX_QUEUED
,
11253 "sending pkt %u @%p next_idx %u bd %u @%p\n",
11254 pkt_prod
, tx_buf
, fp
->tx_pkt_prod
, bd_prod
, tx_start_bd
);
11257 if ((bp
->vlgrp
!= NULL
) && vlan_tx_tag_present(skb
) &&
11258 (bp
->flags
& HW_VLAN_TX_FLAG
)) {
11259 tx_start_bd
->vlan
= cpu_to_le16(vlan_tx_tag_get(skb
));
11260 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_VLAN_TAG
;
11263 tx_start_bd
->vlan
= cpu_to_le16(pkt_prod
);
11265 /* turn on parsing and get a BD */
11266 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
11267 pbd
= &fp
->tx_desc_ring
[bd_prod
].parse_bd
;
11269 memset(pbd
, 0, sizeof(struct eth_tx_parse_bd
));
11271 if (xmit_type
& XMIT_CSUM
) {
11272 hlen
= (skb_network_header(skb
) - skb
->data
) / 2;
11274 /* for now NS flag is not used in Linux */
11276 (hlen
| ((skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) <<
11277 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT
));
11279 pbd
->ip_hlen
= (skb_transport_header(skb
) -
11280 skb_network_header(skb
)) / 2;
11282 hlen
+= pbd
->ip_hlen
+ tcp_hdrlen(skb
) / 2;
11284 pbd
->total_hlen
= cpu_to_le16(hlen
);
11287 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_L4_CSUM
;
11289 if (xmit_type
& XMIT_CSUM_V4
)
11290 tx_start_bd
->bd_flags
.as_bitfield
|=
11291 ETH_TX_BD_FLAGS_IP_CSUM
;
11293 tx_start_bd
->bd_flags
.as_bitfield
|=
11294 ETH_TX_BD_FLAGS_IPV6
;
11296 if (xmit_type
& XMIT_CSUM_TCP
) {
11297 pbd
->tcp_pseudo_csum
= swab16(tcp_hdr(skb
)->check
);
11300 s8 fix
= SKB_CS_OFF(skb
); /* signed! */
11302 pbd
->global_data
|= ETH_TX_PARSE_BD_UDP_CS_FLG
;
11304 DP(NETIF_MSG_TX_QUEUED
,
11305 "hlen %d fix %d csum before fix %x\n",
11306 le16_to_cpu(pbd
->total_hlen
), fix
, SKB_CS(skb
));
11308 /* HW bug: fixup the CSUM */
11309 pbd
->tcp_pseudo_csum
=
11310 bnx2x_csum_fix(skb_transport_header(skb
),
11313 DP(NETIF_MSG_TX_QUEUED
, "csum after fix %x\n",
11314 pbd
->tcp_pseudo_csum
);
11318 mapping
= pci_map_single(bp
->pdev
, skb
->data
,
11319 skb_headlen(skb
), PCI_DMA_TODEVICE
);
11321 tx_start_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
11322 tx_start_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
11323 nbd
= skb_shinfo(skb
)->nr_frags
+ 2; /* start_bd + pbd + frags */
11324 tx_start_bd
->nbd
= cpu_to_le16(nbd
);
11325 tx_start_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
11326 pkt_size
= tx_start_bd
->nbytes
;
11328 DP(NETIF_MSG_TX_QUEUED
, "first bd @%p addr (%x:%x) nbd %d"
11329 " nbytes %d flags %x vlan %x\n",
11330 tx_start_bd
, tx_start_bd
->addr_hi
, tx_start_bd
->addr_lo
,
11331 le16_to_cpu(tx_start_bd
->nbd
), le16_to_cpu(tx_start_bd
->nbytes
),
11332 tx_start_bd
->bd_flags
.as_bitfield
, le16_to_cpu(tx_start_bd
->vlan
));
11334 if (xmit_type
& XMIT_GSO
) {
11336 DP(NETIF_MSG_TX_QUEUED
,
11337 "TSO packet len %d hlen %d total len %d tso size %d\n",
11338 skb
->len
, hlen
, skb_headlen(skb
),
11339 skb_shinfo(skb
)->gso_size
);
11341 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_SW_LSO
;
11343 if (unlikely(skb_headlen(skb
) > hlen
))
11344 bd_prod
= bnx2x_tx_split(bp
, fp
, tx_buf
, &tx_start_bd
,
11345 hlen
, bd_prod
, ++nbd
);
11347 pbd
->lso_mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
11348 pbd
->tcp_send_seq
= swab32(tcp_hdr(skb
)->seq
);
11349 pbd
->tcp_flags
= pbd_tcp_flags(skb
);
11351 if (xmit_type
& XMIT_GSO_V4
) {
11352 pbd
->ip_id
= swab16(ip_hdr(skb
)->id
);
11353 pbd
->tcp_pseudo_csum
=
11354 swab16(~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
11355 ip_hdr(skb
)->daddr
,
11356 0, IPPROTO_TCP
, 0));
11359 pbd
->tcp_pseudo_csum
=
11360 swab16(~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
11361 &ipv6_hdr(skb
)->daddr
,
11362 0, IPPROTO_TCP
, 0));
11364 pbd
->global_data
|= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN
;
11366 tx_data_bd
= (struct eth_tx_bd
*)tx_start_bd
;
11368 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
11369 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
11371 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
11372 tx_data_bd
= &fp
->tx_desc_ring
[bd_prod
].reg_bd
;
11373 if (total_pkt_bd
== NULL
)
11374 total_pkt_bd
= &fp
->tx_desc_ring
[bd_prod
].reg_bd
;
11376 mapping
= pci_map_page(bp
->pdev
, frag
->page
, frag
->page_offset
,
11377 frag
->size
, PCI_DMA_TODEVICE
);
11379 tx_data_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
11380 tx_data_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
11381 tx_data_bd
->nbytes
= cpu_to_le16(frag
->size
);
11382 le16_add_cpu(&pkt_size
, frag
->size
);
11384 DP(NETIF_MSG_TX_QUEUED
,
11385 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11386 i
, tx_data_bd
, tx_data_bd
->addr_hi
, tx_data_bd
->addr_lo
,
11387 le16_to_cpu(tx_data_bd
->nbytes
));
11390 DP(NETIF_MSG_TX_QUEUED
, "last bd @%p\n", tx_data_bd
);
11392 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
11394 /* now send a tx doorbell, counting the next BD
11395 * if the packet contains or ends with it
11397 if (TX_BD_POFF(bd_prod
) < nbd
)
11400 if (total_pkt_bd
!= NULL
)
11401 total_pkt_bd
->total_pkt_bytes
= pkt_size
;
11404 DP(NETIF_MSG_TX_QUEUED
,
11405 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11406 " tcp_flags %x xsum %x seq %u hlen %u\n",
11407 pbd
, pbd
->global_data
, pbd
->ip_hlen
, pbd
->ip_id
,
11408 pbd
->lso_mss
, pbd
->tcp_flags
, pbd
->tcp_pseudo_csum
,
11409 pbd
->tcp_send_seq
, le16_to_cpu(pbd
->total_hlen
));
11411 DP(NETIF_MSG_TX_QUEUED
, "doorbell: nbd %d bd %u\n", nbd
, bd_prod
);
11414 * Make sure that the BD data is updated before updating the producer
11415 * since FW might read the BD right after the producer is updated.
11416 * This is only applicable for weak-ordered memory model archs such
11417 * as IA-64. The following barrier is also mandatory since FW will
11418 * assumes packets must have BDs.
11422 fp
->tx_db
.data
.prod
+= nbd
;
11424 DOORBELL(bp
, fp
->index
, fp
->tx_db
.raw
);
11428 fp
->tx_bd_prod
+= nbd
;
11430 if (unlikely(bnx2x_tx_avail(fp
) < MAX_SKB_FRAGS
+ 3)) {
11431 netif_tx_stop_queue(txq
);
11432 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11433 if we put Tx into XOFF state. */
11435 fp
->eth_q_stats
.driver_xoff
++;
11436 if (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3)
11437 netif_tx_wake_queue(txq
);
11441 return NETDEV_TX_OK
;
11444 /* called with rtnl_lock */
11445 static int bnx2x_open(struct net_device
*dev
)
11447 struct bnx2x
*bp
= netdev_priv(dev
);
11449 netif_carrier_off(dev
);
11451 bnx2x_set_power_state(bp
, PCI_D0
);
11453 return bnx2x_nic_load(bp
, LOAD_OPEN
);
11456 /* called with rtnl_lock */
11457 static int bnx2x_close(struct net_device
*dev
)
11459 struct bnx2x
*bp
= netdev_priv(dev
);
11461 /* Unload the driver, release IRQs */
11462 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
11463 if (atomic_read(&bp
->pdev
->enable_cnt
) == 1)
11464 if (!CHIP_REV_IS_SLOW(bp
))
11465 bnx2x_set_power_state(bp
, PCI_D3hot
);
11470 /* called with netif_tx_lock from dev_mcast.c */
11471 static void bnx2x_set_rx_mode(struct net_device
*dev
)
11473 struct bnx2x
*bp
= netdev_priv(dev
);
11474 u32 rx_mode
= BNX2X_RX_MODE_NORMAL
;
11475 int port
= BP_PORT(bp
);
11477 if (bp
->state
!= BNX2X_STATE_OPEN
) {
11478 DP(NETIF_MSG_IFUP
, "state is %x, returning\n", bp
->state
);
11482 DP(NETIF_MSG_IFUP
, "dev->flags = %x\n", dev
->flags
);
11484 if (dev
->flags
& IFF_PROMISC
)
11485 rx_mode
= BNX2X_RX_MODE_PROMISC
;
11487 else if ((dev
->flags
& IFF_ALLMULTI
) ||
11488 ((netdev_mc_count(dev
) > BNX2X_MAX_MULTICAST
) &&
11490 rx_mode
= BNX2X_RX_MODE_ALLMULTI
;
11492 else { /* some multicasts */
11493 if (CHIP_IS_E1(bp
)) {
11494 int i
, old
, offset
;
11495 struct dev_mc_list
*mclist
;
11496 struct mac_configuration_cmd
*config
=
11497 bnx2x_sp(bp
, mcast_config
);
11500 netdev_for_each_mc_addr(mclist
, dev
) {
11501 config
->config_table
[i
].
11502 cam_entry
.msb_mac_addr
=
11503 swab16(*(u16
*)&mclist
->dmi_addr
[0]);
11504 config
->config_table
[i
].
11505 cam_entry
.middle_mac_addr
=
11506 swab16(*(u16
*)&mclist
->dmi_addr
[2]);
11507 config
->config_table
[i
].
11508 cam_entry
.lsb_mac_addr
=
11509 swab16(*(u16
*)&mclist
->dmi_addr
[4]);
11510 config
->config_table
[i
].cam_entry
.flags
=
11512 config
->config_table
[i
].
11513 target_table_entry
.flags
= 0;
11514 config
->config_table
[i
].target_table_entry
.
11515 clients_bit_vector
=
11516 cpu_to_le32(1 << BP_L_ID(bp
));
11517 config
->config_table
[i
].
11518 target_table_entry
.vlan_id
= 0;
11521 "setting MCAST[%d] (%04x:%04x:%04x)\n", i
,
11522 config
->config_table
[i
].
11523 cam_entry
.msb_mac_addr
,
11524 config
->config_table
[i
].
11525 cam_entry
.middle_mac_addr
,
11526 config
->config_table
[i
].
11527 cam_entry
.lsb_mac_addr
);
11530 old
= config
->hdr
.length
;
11532 for (; i
< old
; i
++) {
11533 if (CAM_IS_INVALID(config
->
11534 config_table
[i
])) {
11535 /* already invalidated */
11539 CAM_INVALIDATE(config
->
11544 if (CHIP_REV_IS_SLOW(bp
))
11545 offset
= BNX2X_MAX_EMUL_MULTI
*(1 + port
);
11547 offset
= BNX2X_MAX_MULTICAST
*(1 + port
);
11549 config
->hdr
.length
= i
;
11550 config
->hdr
.offset
= offset
;
11551 config
->hdr
.client_id
= bp
->fp
->cl_id
;
11552 config
->hdr
.reserved1
= 0;
11554 bp
->set_mac_pending
++;
11557 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
11558 U64_HI(bnx2x_sp_mapping(bp
, mcast_config
)),
11559 U64_LO(bnx2x_sp_mapping(bp
, mcast_config
)),
11562 /* Accept one or more multicasts */
11563 struct dev_mc_list
*mclist
;
11564 u32 mc_filter
[MC_HASH_SIZE
];
11565 u32 crc
, bit
, regidx
;
11568 memset(mc_filter
, 0, 4 * MC_HASH_SIZE
);
11570 netdev_for_each_mc_addr(mclist
, dev
) {
11571 DP(NETIF_MSG_IFUP
, "Adding mcast MAC: %pM\n",
11574 crc
= crc32c_le(0, mclist
->dmi_addr
, ETH_ALEN
);
11575 bit
= (crc
>> 24) & 0xff;
11578 mc_filter
[regidx
] |= (1 << bit
);
11581 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
11582 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
),
11587 bp
->rx_mode
= rx_mode
;
11588 bnx2x_set_storm_rx_mode(bp
);
11591 /* called with rtnl_lock */
11592 static int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
)
11594 struct sockaddr
*addr
= p
;
11595 struct bnx2x
*bp
= netdev_priv(dev
);
11597 if (!is_valid_ether_addr((u8
*)(addr
->sa_data
)))
11600 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
11601 if (netif_running(dev
)) {
11602 if (CHIP_IS_E1(bp
))
11603 bnx2x_set_eth_mac_addr_e1(bp
, 1);
11605 bnx2x_set_eth_mac_addr_e1h(bp
, 1);
11611 /* called with rtnl_lock */
11612 static int bnx2x_mdio_read(struct net_device
*netdev
, int prtad
,
11613 int devad
, u16 addr
)
11615 struct bnx2x
*bp
= netdev_priv(netdev
);
11618 u32 phy_type
= XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
11620 DP(NETIF_MSG_LINK
, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11621 prtad
, devad
, addr
);
11623 if (prtad
!= bp
->mdio
.prtad
) {
11624 DP(NETIF_MSG_LINK
, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11625 prtad
, bp
->mdio
.prtad
);
11629 /* The HW expects different devad if CL22 is used */
11630 devad
= (devad
== MDIO_DEVAD_NONE
) ? DEFAULT_PHY_DEV_ADDR
: devad
;
11632 bnx2x_acquire_phy_lock(bp
);
11633 rc
= bnx2x_cl45_read(bp
, BP_PORT(bp
), phy_type
, prtad
,
11634 devad
, addr
, &value
);
11635 bnx2x_release_phy_lock(bp
);
11636 DP(NETIF_MSG_LINK
, "mdio_read_val 0x%x rc = 0x%x\n", value
, rc
);
11643 /* called with rtnl_lock */
11644 static int bnx2x_mdio_write(struct net_device
*netdev
, int prtad
, int devad
,
11645 u16 addr
, u16 value
)
11647 struct bnx2x
*bp
= netdev_priv(netdev
);
11648 u32 ext_phy_type
= XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
11651 DP(NETIF_MSG_LINK
, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11652 " value 0x%x\n", prtad
, devad
, addr
, value
);
11654 if (prtad
!= bp
->mdio
.prtad
) {
11655 DP(NETIF_MSG_LINK
, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11656 prtad
, bp
->mdio
.prtad
);
11660 /* The HW expects different devad if CL22 is used */
11661 devad
= (devad
== MDIO_DEVAD_NONE
) ? DEFAULT_PHY_DEV_ADDR
: devad
;
11663 bnx2x_acquire_phy_lock(bp
);
11664 rc
= bnx2x_cl45_write(bp
, BP_PORT(bp
), ext_phy_type
, prtad
,
11665 devad
, addr
, value
);
11666 bnx2x_release_phy_lock(bp
);
11670 /* called with rtnl_lock */
11671 static int bnx2x_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
11673 struct bnx2x
*bp
= netdev_priv(dev
);
11674 struct mii_ioctl_data
*mdio
= if_mii(ifr
);
11676 DP(NETIF_MSG_LINK
, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11677 mdio
->phy_id
, mdio
->reg_num
, mdio
->val_in
);
11679 if (!netif_running(dev
))
11682 return mdio_mii_ioctl(&bp
->mdio
, mdio
, cmd
);
11685 /* called with rtnl_lock */
11686 static int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
)
11688 struct bnx2x
*bp
= netdev_priv(dev
);
11691 if ((new_mtu
> ETH_MAX_JUMBO_PACKET_SIZE
) ||
11692 ((new_mtu
+ ETH_HLEN
) < ETH_MIN_PACKET_SIZE
))
11695 /* This does not race with packet allocation
11696 * because the actual alloc size is
11697 * only updated as part of load
11699 dev
->mtu
= new_mtu
;
11701 if (netif_running(dev
)) {
11702 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
11703 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
11709 static void bnx2x_tx_timeout(struct net_device
*dev
)
11711 struct bnx2x
*bp
= netdev_priv(dev
);
11713 #ifdef BNX2X_STOP_ON_ERROR
11717 /* This allows the netif to be shutdown gracefully before resetting */
11718 schedule_work(&bp
->reset_task
);
11722 /* called with rtnl_lock */
11723 static void bnx2x_vlan_rx_register(struct net_device
*dev
,
11724 struct vlan_group
*vlgrp
)
11726 struct bnx2x
*bp
= netdev_priv(dev
);
11730 /* Set flags according to the required capabilities */
11731 bp
->flags
&= ~(HW_VLAN_RX_FLAG
| HW_VLAN_TX_FLAG
);
11733 if (dev
->features
& NETIF_F_HW_VLAN_TX
)
11734 bp
->flags
|= HW_VLAN_TX_FLAG
;
11736 if (dev
->features
& NETIF_F_HW_VLAN_RX
)
11737 bp
->flags
|= HW_VLAN_RX_FLAG
;
11739 if (netif_running(dev
))
11740 bnx2x_set_client_config(bp
);
11745 #ifdef CONFIG_NET_POLL_CONTROLLER
11746 static void poll_bnx2x(struct net_device
*dev
)
11748 struct bnx2x
*bp
= netdev_priv(dev
);
11750 disable_irq(bp
->pdev
->irq
);
11751 bnx2x_interrupt(bp
->pdev
->irq
, dev
);
11752 enable_irq(bp
->pdev
->irq
);
11756 static const struct net_device_ops bnx2x_netdev_ops
= {
11757 .ndo_open
= bnx2x_open
,
11758 .ndo_stop
= bnx2x_close
,
11759 .ndo_start_xmit
= bnx2x_start_xmit
,
11760 .ndo_set_multicast_list
= bnx2x_set_rx_mode
,
11761 .ndo_set_mac_address
= bnx2x_change_mac_addr
,
11762 .ndo_validate_addr
= eth_validate_addr
,
11763 .ndo_do_ioctl
= bnx2x_ioctl
,
11764 .ndo_change_mtu
= bnx2x_change_mtu
,
11765 .ndo_tx_timeout
= bnx2x_tx_timeout
,
11767 .ndo_vlan_rx_register
= bnx2x_vlan_rx_register
,
11769 #ifdef CONFIG_NET_POLL_CONTROLLER
11770 .ndo_poll_controller
= poll_bnx2x
,
11774 static int __devinit
bnx2x_init_dev(struct pci_dev
*pdev
,
11775 struct net_device
*dev
)
11780 SET_NETDEV_DEV(dev
, &pdev
->dev
);
11781 bp
= netdev_priv(dev
);
11786 bp
->func
= PCI_FUNC(pdev
->devfn
);
11788 rc
= pci_enable_device(pdev
);
11790 pr_err("Cannot enable PCI device, aborting\n");
11794 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
11795 pr_err("Cannot find PCI device base address, aborting\n");
11797 goto err_out_disable
;
11800 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
11801 pr_err("Cannot find second PCI device base address, aborting\n");
11803 goto err_out_disable
;
11806 if (atomic_read(&pdev
->enable_cnt
) == 1) {
11807 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
11809 pr_err("Cannot obtain PCI resources, aborting\n");
11810 goto err_out_disable
;
11813 pci_set_master(pdev
);
11814 pci_save_state(pdev
);
11817 bp
->pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
11818 if (bp
->pm_cap
== 0) {
11819 pr_err("Cannot find power management capability, aborting\n");
11821 goto err_out_release
;
11824 bp
->pcie_cap
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
11825 if (bp
->pcie_cap
== 0) {
11826 pr_err("Cannot find PCI Express capability, aborting\n");
11828 goto err_out_release
;
11831 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(64)) == 0) {
11832 bp
->flags
|= USING_DAC_FLAG
;
11833 if (pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64)) != 0) {
11834 pr_err("pci_set_consistent_dma_mask failed, aborting\n");
11836 goto err_out_release
;
11839 } else if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(32)) != 0) {
11840 pr_err("System does not support DMA, aborting\n");
11842 goto err_out_release
;
11845 dev
->mem_start
= pci_resource_start(pdev
, 0);
11846 dev
->base_addr
= dev
->mem_start
;
11847 dev
->mem_end
= pci_resource_end(pdev
, 0);
11849 dev
->irq
= pdev
->irq
;
11851 bp
->regview
= pci_ioremap_bar(pdev
, 0);
11852 if (!bp
->regview
) {
11853 pr_err("Cannot map register space, aborting\n");
11855 goto err_out_release
;
11858 bp
->doorbells
= ioremap_nocache(pci_resource_start(pdev
, 2),
11859 min_t(u64
, BNX2X_DB_SIZE
,
11860 pci_resource_len(pdev
, 2)));
11861 if (!bp
->doorbells
) {
11862 pr_err("Cannot map doorbell space, aborting\n");
11864 goto err_out_unmap
;
11867 bnx2x_set_power_state(bp
, PCI_D0
);
11869 /* clean indirect addresses */
11870 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
11871 PCICFG_VENDOR_ID_OFFSET
);
11872 REG_WR(bp
, PXP2_REG_PGL_ADDR_88_F0
+ BP_PORT(bp
)*16, 0);
11873 REG_WR(bp
, PXP2_REG_PGL_ADDR_8C_F0
+ BP_PORT(bp
)*16, 0);
11874 REG_WR(bp
, PXP2_REG_PGL_ADDR_90_F0
+ BP_PORT(bp
)*16, 0);
11875 REG_WR(bp
, PXP2_REG_PGL_ADDR_94_F0
+ BP_PORT(bp
)*16, 0);
11877 dev
->watchdog_timeo
= TX_TIMEOUT
;
11879 dev
->netdev_ops
= &bnx2x_netdev_ops
;
11880 dev
->ethtool_ops
= &bnx2x_ethtool_ops
;
11881 dev
->features
|= NETIF_F_SG
;
11882 dev
->features
|= NETIF_F_HW_CSUM
;
11883 if (bp
->flags
& USING_DAC_FLAG
)
11884 dev
->features
|= NETIF_F_HIGHDMA
;
11885 dev
->features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
11886 dev
->features
|= NETIF_F_TSO6
;
11888 dev
->features
|= (NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
);
11889 bp
->flags
|= (HW_VLAN_RX_FLAG
| HW_VLAN_TX_FLAG
);
11891 dev
->vlan_features
|= NETIF_F_SG
;
11892 dev
->vlan_features
|= NETIF_F_HW_CSUM
;
11893 if (bp
->flags
& USING_DAC_FLAG
)
11894 dev
->vlan_features
|= NETIF_F_HIGHDMA
;
11895 dev
->vlan_features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
11896 dev
->vlan_features
|= NETIF_F_TSO6
;
11899 /* get_port_hwinfo() will set prtad and mmds properly */
11900 bp
->mdio
.prtad
= MDIO_PRTAD_NONE
;
11902 bp
->mdio
.mode_support
= MDIO_SUPPORTS_C45
| MDIO_EMULATE_C22
;
11903 bp
->mdio
.dev
= dev
;
11904 bp
->mdio
.mdio_read
= bnx2x_mdio_read
;
11905 bp
->mdio
.mdio_write
= bnx2x_mdio_write
;
11911 iounmap(bp
->regview
);
11912 bp
->regview
= NULL
;
11914 if (bp
->doorbells
) {
11915 iounmap(bp
->doorbells
);
11916 bp
->doorbells
= NULL
;
11920 if (atomic_read(&pdev
->enable_cnt
) == 1)
11921 pci_release_regions(pdev
);
11924 pci_disable_device(pdev
);
11925 pci_set_drvdata(pdev
, NULL
);
11931 static void __devinit
bnx2x_get_pcie_width_speed(struct bnx2x
*bp
,
11932 int *width
, int *speed
)
11934 u32 val
= REG_RD(bp
, PCICFG_OFFSET
+ PCICFG_LINK_CONTROL
);
11936 *width
= (val
& PCICFG_LINK_WIDTH
) >> PCICFG_LINK_WIDTH_SHIFT
;
11938 /* return value of 1=2.5GHz 2=5GHz */
11939 *speed
= (val
& PCICFG_LINK_SPEED
) >> PCICFG_LINK_SPEED_SHIFT
;
11942 static int __devinit
bnx2x_check_firmware(struct bnx2x
*bp
)
11944 const struct firmware
*firmware
= bp
->firmware
;
11945 struct bnx2x_fw_file_hdr
*fw_hdr
;
11946 struct bnx2x_fw_file_section
*sections
;
11947 u32 offset
, len
, num_ops
;
11952 if (firmware
->size
< sizeof(struct bnx2x_fw_file_hdr
))
11955 fw_hdr
= (struct bnx2x_fw_file_hdr
*)firmware
->data
;
11956 sections
= (struct bnx2x_fw_file_section
*)fw_hdr
;
11958 /* Make sure none of the offsets and sizes make us read beyond
11959 * the end of the firmware data */
11960 for (i
= 0; i
< sizeof(*fw_hdr
) / sizeof(*sections
); i
++) {
11961 offset
= be32_to_cpu(sections
[i
].offset
);
11962 len
= be32_to_cpu(sections
[i
].len
);
11963 if (offset
+ len
> firmware
->size
) {
11964 pr_err("Section %d length is out of bounds\n", i
);
11969 /* Likewise for the init_ops offsets */
11970 offset
= be32_to_cpu(fw_hdr
->init_ops_offsets
.offset
);
11971 ops_offsets
= (u16
*)(firmware
->data
+ offset
);
11972 num_ops
= be32_to_cpu(fw_hdr
->init_ops
.len
) / sizeof(struct raw_op
);
11974 for (i
= 0; i
< be32_to_cpu(fw_hdr
->init_ops_offsets
.len
) / 2; i
++) {
11975 if (be16_to_cpu(ops_offsets
[i
]) > num_ops
) {
11976 pr_err("Section offset %d is out of bounds\n", i
);
11981 /* Check FW version */
11982 offset
= be32_to_cpu(fw_hdr
->fw_version
.offset
);
11983 fw_ver
= firmware
->data
+ offset
;
11984 if ((fw_ver
[0] != BCM_5710_FW_MAJOR_VERSION
) ||
11985 (fw_ver
[1] != BCM_5710_FW_MINOR_VERSION
) ||
11986 (fw_ver
[2] != BCM_5710_FW_REVISION_VERSION
) ||
11987 (fw_ver
[3] != BCM_5710_FW_ENGINEERING_VERSION
)) {
11988 pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
11989 fw_ver
[0], fw_ver
[1], fw_ver
[2],
11990 fw_ver
[3], BCM_5710_FW_MAJOR_VERSION
,
11991 BCM_5710_FW_MINOR_VERSION
,
11992 BCM_5710_FW_REVISION_VERSION
,
11993 BCM_5710_FW_ENGINEERING_VERSION
);
12000 static inline void be32_to_cpu_n(const u8
*_source
, u8
*_target
, u32 n
)
12002 const __be32
*source
= (const __be32
*)_source
;
12003 u32
*target
= (u32
*)_target
;
12006 for (i
= 0; i
< n
/4; i
++)
12007 target
[i
] = be32_to_cpu(source
[i
]);
12011 Ops array is stored in the following format:
12012 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12014 static inline void bnx2x_prep_ops(const u8
*_source
, u8
*_target
, u32 n
)
12016 const __be32
*source
= (const __be32
*)_source
;
12017 struct raw_op
*target
= (struct raw_op
*)_target
;
12020 for (i
= 0, j
= 0; i
< n
/8; i
++, j
+= 2) {
12021 tmp
= be32_to_cpu(source
[j
]);
12022 target
[i
].op
= (tmp
>> 24) & 0xff;
12023 target
[i
].offset
= tmp
& 0xffffff;
12024 target
[i
].raw_data
= be32_to_cpu(source
[j
+1]);
12028 static inline void be16_to_cpu_n(const u8
*_source
, u8
*_target
, u32 n
)
12030 const __be16
*source
= (const __be16
*)_source
;
12031 u16
*target
= (u16
*)_target
;
12034 for (i
= 0; i
< n
/2; i
++)
12035 target
[i
] = be16_to_cpu(source
[i
]);
12038 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12040 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12041 bp->arr = kmalloc(len, GFP_KERNEL); \
12043 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
12046 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12047 (u8 *)bp->arr, len); \
12050 static int __devinit
bnx2x_init_firmware(struct bnx2x
*bp
, struct device
*dev
)
12052 const char *fw_file_name
;
12053 struct bnx2x_fw_file_hdr
*fw_hdr
;
12056 if (CHIP_IS_E1(bp
))
12057 fw_file_name
= FW_FILE_NAME_E1
;
12059 fw_file_name
= FW_FILE_NAME_E1H
;
12061 pr_info("Loading %s\n", fw_file_name
);
12063 rc
= request_firmware(&bp
->firmware
, fw_file_name
, dev
);
12065 pr_err("Can't load firmware file %s\n", fw_file_name
);
12066 goto request_firmware_exit
;
12069 rc
= bnx2x_check_firmware(bp
);
12071 pr_err("Corrupt firmware file %s\n", fw_file_name
);
12072 goto request_firmware_exit
;
12075 fw_hdr
= (struct bnx2x_fw_file_hdr
*)bp
->firmware
->data
;
12077 /* Initialize the pointers to the init arrays */
12079 BNX2X_ALLOC_AND_SET(init_data
, request_firmware_exit
, be32_to_cpu_n
);
12082 BNX2X_ALLOC_AND_SET(init_ops
, init_ops_alloc_err
, bnx2x_prep_ops
);
12085 BNX2X_ALLOC_AND_SET(init_ops_offsets
, init_offsets_alloc_err
,
12088 /* STORMs firmware */
12089 INIT_TSEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
12090 be32_to_cpu(fw_hdr
->tsem_int_table_data
.offset
);
12091 INIT_TSEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
12092 be32_to_cpu(fw_hdr
->tsem_pram_data
.offset
);
12093 INIT_USEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
12094 be32_to_cpu(fw_hdr
->usem_int_table_data
.offset
);
12095 INIT_USEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
12096 be32_to_cpu(fw_hdr
->usem_pram_data
.offset
);
12097 INIT_XSEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
12098 be32_to_cpu(fw_hdr
->xsem_int_table_data
.offset
);
12099 INIT_XSEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
12100 be32_to_cpu(fw_hdr
->xsem_pram_data
.offset
);
12101 INIT_CSEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
12102 be32_to_cpu(fw_hdr
->csem_int_table_data
.offset
);
12103 INIT_CSEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
12104 be32_to_cpu(fw_hdr
->csem_pram_data
.offset
);
12108 init_offsets_alloc_err
:
12109 kfree(bp
->init_ops
);
12110 init_ops_alloc_err
:
12111 kfree(bp
->init_data
);
12112 request_firmware_exit
:
12113 release_firmware(bp
->firmware
);
12119 static int __devinit
bnx2x_init_one(struct pci_dev
*pdev
,
12120 const struct pci_device_id
*ent
)
12122 struct net_device
*dev
= NULL
;
12124 int pcie_width
, pcie_speed
;
12127 /* dev zeroed in init_etherdev */
12128 dev
= alloc_etherdev_mq(sizeof(*bp
), MAX_CONTEXT
);
12130 pr_err("Cannot allocate net device\n");
12134 bp
= netdev_priv(dev
);
12135 bp
->msg_enable
= debug
;
12137 pci_set_drvdata(pdev
, dev
);
12139 rc
= bnx2x_init_dev(pdev
, dev
);
12145 rc
= bnx2x_init_bp(bp
);
12147 goto init_one_exit
;
12149 /* Set init arrays */
12150 rc
= bnx2x_init_firmware(bp
, &pdev
->dev
);
12152 pr_err("Error loading firmware\n");
12153 goto init_one_exit
;
12156 rc
= register_netdev(dev
);
12158 dev_err(&pdev
->dev
, "Cannot register net device\n");
12159 goto init_one_exit
;
12162 bnx2x_get_pcie_width_speed(bp
, &pcie_width
, &pcie_speed
);
12163 netdev_info(dev
, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
12164 board_info
[ent
->driver_data
].name
,
12165 (CHIP_REV(bp
) >> 12) + 'A', (CHIP_METAL(bp
) >> 4),
12166 pcie_width
, (pcie_speed
== 2) ? "5GHz (Gen2)" : "2.5GHz",
12167 dev
->base_addr
, bp
->pdev
->irq
, dev
->dev_addr
);
12173 iounmap(bp
->regview
);
12176 iounmap(bp
->doorbells
);
12180 if (atomic_read(&pdev
->enable_cnt
) == 1)
12181 pci_release_regions(pdev
);
12183 pci_disable_device(pdev
);
12184 pci_set_drvdata(pdev
, NULL
);
12189 static void __devexit
bnx2x_remove_one(struct pci_dev
*pdev
)
12191 struct net_device
*dev
= pci_get_drvdata(pdev
);
12195 pr_err("BAD net device from bnx2x_init_one\n");
12198 bp
= netdev_priv(dev
);
12200 unregister_netdev(dev
);
12202 kfree(bp
->init_ops_offsets
);
12203 kfree(bp
->init_ops
);
12204 kfree(bp
->init_data
);
12205 release_firmware(bp
->firmware
);
12208 iounmap(bp
->regview
);
12211 iounmap(bp
->doorbells
);
12215 if (atomic_read(&pdev
->enable_cnt
) == 1)
12216 pci_release_regions(pdev
);
12218 pci_disable_device(pdev
);
12219 pci_set_drvdata(pdev
, NULL
);
12222 static int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
)
12224 struct net_device
*dev
= pci_get_drvdata(pdev
);
12228 pr_err("BAD net device from bnx2x_init_one\n");
12231 bp
= netdev_priv(dev
);
12235 pci_save_state(pdev
);
12237 if (!netif_running(dev
)) {
12242 netif_device_detach(dev
);
12244 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
12246 bnx2x_set_power_state(bp
, pci_choose_state(pdev
, state
));
12253 static int bnx2x_resume(struct pci_dev
*pdev
)
12255 struct net_device
*dev
= pci_get_drvdata(pdev
);
12260 pr_err("BAD net device from bnx2x_init_one\n");
12263 bp
= netdev_priv(dev
);
12267 pci_restore_state(pdev
);
12269 if (!netif_running(dev
)) {
12274 bnx2x_set_power_state(bp
, PCI_D0
);
12275 netif_device_attach(dev
);
12277 rc
= bnx2x_nic_load(bp
, LOAD_OPEN
);
12284 static int bnx2x_eeh_nic_unload(struct bnx2x
*bp
)
12288 bp
->state
= BNX2X_STATE_ERROR
;
12290 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
12292 bnx2x_netif_stop(bp
, 0);
12294 del_timer_sync(&bp
->timer
);
12295 bp
->stats_state
= STATS_STATE_DISABLED
;
12296 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
12299 bnx2x_free_irq(bp
, false);
12301 if (CHIP_IS_E1(bp
)) {
12302 struct mac_configuration_cmd
*config
=
12303 bnx2x_sp(bp
, mcast_config
);
12305 for (i
= 0; i
< config
->hdr
.length
; i
++)
12306 CAM_INVALIDATE(config
->config_table
[i
]);
12309 /* Free SKBs, SGEs, TPA pool and driver internals */
12310 bnx2x_free_skbs(bp
);
12311 for_each_queue(bp
, i
)
12312 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
12313 for_each_queue(bp
, i
)
12314 netif_napi_del(&bnx2x_fp(bp
, i
, napi
));
12315 bnx2x_free_mem(bp
);
12317 bp
->state
= BNX2X_STATE_CLOSED
;
12319 netif_carrier_off(bp
->dev
);
12324 static void bnx2x_eeh_recover(struct bnx2x
*bp
)
12328 mutex_init(&bp
->port
.phy_mutex
);
12330 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
12331 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
12332 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp
->common
.shmem_base
);
12334 if (!bp
->common
.shmem_base
||
12335 (bp
->common
.shmem_base
< 0xA0000) ||
12336 (bp
->common
.shmem_base
>= 0xC0000)) {
12337 BNX2X_DEV_INFO("MCP not active\n");
12338 bp
->flags
|= NO_MCP_FLAG
;
12342 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
12343 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
12344 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
12345 BNX2X_ERR("BAD MCP validity signature\n");
12347 if (!BP_NOMCP(bp
)) {
12348 bp
->fw_seq
= (SHMEM_RD(bp
, func_mb
[BP_FUNC(bp
)].drv_mb_header
)
12349 & DRV_MSG_SEQ_NUMBER_MASK
);
12350 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
12355 * bnx2x_io_error_detected - called when PCI error is detected
12356 * @pdev: Pointer to PCI device
12357 * @state: The current pci connection state
12359 * This function is called after a PCI bus error affecting
12360 * this device has been detected.
12362 static pci_ers_result_t
bnx2x_io_error_detected(struct pci_dev
*pdev
,
12363 pci_channel_state_t state
)
12365 struct net_device
*dev
= pci_get_drvdata(pdev
);
12366 struct bnx2x
*bp
= netdev_priv(dev
);
12370 netif_device_detach(dev
);
12372 if (state
== pci_channel_io_perm_failure
) {
12374 return PCI_ERS_RESULT_DISCONNECT
;
12377 if (netif_running(dev
))
12378 bnx2x_eeh_nic_unload(bp
);
12380 pci_disable_device(pdev
);
12384 /* Request a slot reset */
12385 return PCI_ERS_RESULT_NEED_RESET
;
12389 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12390 * @pdev: Pointer to PCI device
12392 * Restart the card from scratch, as if from a cold-boot.
12394 static pci_ers_result_t
bnx2x_io_slot_reset(struct pci_dev
*pdev
)
12396 struct net_device
*dev
= pci_get_drvdata(pdev
);
12397 struct bnx2x
*bp
= netdev_priv(dev
);
12401 if (pci_enable_device(pdev
)) {
12402 dev_err(&pdev
->dev
,
12403 "Cannot re-enable PCI device after reset\n");
12405 return PCI_ERS_RESULT_DISCONNECT
;
12408 pci_set_master(pdev
);
12409 pci_restore_state(pdev
);
12411 if (netif_running(dev
))
12412 bnx2x_set_power_state(bp
, PCI_D0
);
12416 return PCI_ERS_RESULT_RECOVERED
;
12420 * bnx2x_io_resume - called when traffic can start flowing again
12421 * @pdev: Pointer to PCI device
12423 * This callback is called when the error recovery driver tells us that
12424 * its OK to resume normal operation.
12426 static void bnx2x_io_resume(struct pci_dev
*pdev
)
12428 struct net_device
*dev
= pci_get_drvdata(pdev
);
12429 struct bnx2x
*bp
= netdev_priv(dev
);
12433 bnx2x_eeh_recover(bp
);
12435 if (netif_running(dev
))
12436 bnx2x_nic_load(bp
, LOAD_NORMAL
);
12438 netif_device_attach(dev
);
12443 static struct pci_error_handlers bnx2x_err_handler
= {
12444 .error_detected
= bnx2x_io_error_detected
,
12445 .slot_reset
= bnx2x_io_slot_reset
,
12446 .resume
= bnx2x_io_resume
,
12449 static struct pci_driver bnx2x_pci_driver
= {
12450 .name
= DRV_MODULE_NAME
,
12451 .id_table
= bnx2x_pci_tbl
,
12452 .probe
= bnx2x_init_one
,
12453 .remove
= __devexit_p(bnx2x_remove_one
),
12454 .suspend
= bnx2x_suspend
,
12455 .resume
= bnx2x_resume
,
12456 .err_handler
= &bnx2x_err_handler
,
12459 static int __init
bnx2x_init(void)
12463 pr_info("%s", version
);
12465 bnx2x_wq
= create_singlethread_workqueue("bnx2x");
12466 if (bnx2x_wq
== NULL
) {
12467 pr_err("Cannot create workqueue\n");
12471 ret
= pci_register_driver(&bnx2x_pci_driver
);
12473 pr_err("Cannot register driver\n");
12474 destroy_workqueue(bnx2x_wq
);
12479 static void __exit
bnx2x_cleanup(void)
12481 pci_unregister_driver(&bnx2x_pci_driver
);
12483 destroy_workqueue(bnx2x_wq
);
12486 module_init(bnx2x_init
);
12487 module_exit(bnx2x_cleanup
);
12491 /* count denotes the number of new completions we have seen */
12492 static void bnx2x_cnic_sp_post(struct bnx2x
*bp
, int count
)
12494 struct eth_spe
*spe
;
12496 #ifdef BNX2X_STOP_ON_ERROR
12497 if (unlikely(bp
->panic
))
12501 spin_lock_bh(&bp
->spq_lock
);
12502 bp
->cnic_spq_pending
-= count
;
12504 for (; bp
->cnic_spq_pending
< bp
->cnic_eth_dev
.max_kwqe_pending
;
12505 bp
->cnic_spq_pending
++) {
12507 if (!bp
->cnic_kwq_pending
)
12510 spe
= bnx2x_sp_get_next(bp
);
12511 *spe
= *bp
->cnic_kwq_cons
;
12513 bp
->cnic_kwq_pending
--;
12515 DP(NETIF_MSG_TIMER
, "pending on SPQ %d, on KWQ %d count %d\n",
12516 bp
->cnic_spq_pending
, bp
->cnic_kwq_pending
, count
);
12518 if (bp
->cnic_kwq_cons
== bp
->cnic_kwq_last
)
12519 bp
->cnic_kwq_cons
= bp
->cnic_kwq
;
12521 bp
->cnic_kwq_cons
++;
12523 bnx2x_sp_prod_update(bp
);
12524 spin_unlock_bh(&bp
->spq_lock
);
12527 static int bnx2x_cnic_sp_queue(struct net_device
*dev
,
12528 struct kwqe_16
*kwqes
[], u32 count
)
12530 struct bnx2x
*bp
= netdev_priv(dev
);
12533 #ifdef BNX2X_STOP_ON_ERROR
12534 if (unlikely(bp
->panic
))
12538 spin_lock_bh(&bp
->spq_lock
);
12540 for (i
= 0; i
< count
; i
++) {
12541 struct eth_spe
*spe
= (struct eth_spe
*)kwqes
[i
];
12543 if (bp
->cnic_kwq_pending
== MAX_SP_DESC_CNT
)
12546 *bp
->cnic_kwq_prod
= *spe
;
12548 bp
->cnic_kwq_pending
++;
12550 DP(NETIF_MSG_TIMER
, "L5 SPQE %x %x %x:%x pos %d\n",
12551 spe
->hdr
.conn_and_cmd_data
, spe
->hdr
.type
,
12552 spe
->data
.mac_config_addr
.hi
,
12553 spe
->data
.mac_config_addr
.lo
,
12554 bp
->cnic_kwq_pending
);
12556 if (bp
->cnic_kwq_prod
== bp
->cnic_kwq_last
)
12557 bp
->cnic_kwq_prod
= bp
->cnic_kwq
;
12559 bp
->cnic_kwq_prod
++;
12562 spin_unlock_bh(&bp
->spq_lock
);
12564 if (bp
->cnic_spq_pending
< bp
->cnic_eth_dev
.max_kwqe_pending
)
12565 bnx2x_cnic_sp_post(bp
, 0);
12570 static int bnx2x_cnic_ctl_send(struct bnx2x
*bp
, struct cnic_ctl_info
*ctl
)
12572 struct cnic_ops
*c_ops
;
12575 mutex_lock(&bp
->cnic_mutex
);
12576 c_ops
= bp
->cnic_ops
;
12578 rc
= c_ops
->cnic_ctl(bp
->cnic_data
, ctl
);
12579 mutex_unlock(&bp
->cnic_mutex
);
12584 static int bnx2x_cnic_ctl_send_bh(struct bnx2x
*bp
, struct cnic_ctl_info
*ctl
)
12586 struct cnic_ops
*c_ops
;
12590 c_ops
= rcu_dereference(bp
->cnic_ops
);
12592 rc
= c_ops
->cnic_ctl(bp
->cnic_data
, ctl
);
12599 * for commands that have no data
12601 static int bnx2x_cnic_notify(struct bnx2x
*bp
, int cmd
)
12603 struct cnic_ctl_info ctl
= {0};
12607 return bnx2x_cnic_ctl_send(bp
, &ctl
);
12610 static void bnx2x_cnic_cfc_comp(struct bnx2x
*bp
, int cid
)
12612 struct cnic_ctl_info ctl
;
12614 /* first we tell CNIC and only then we count this as a completion */
12615 ctl
.cmd
= CNIC_CTL_COMPLETION_CMD
;
12616 ctl
.data
.comp
.cid
= cid
;
12618 bnx2x_cnic_ctl_send_bh(bp
, &ctl
);
12619 bnx2x_cnic_sp_post(bp
, 1);
12622 static int bnx2x_drv_ctl(struct net_device
*dev
, struct drv_ctl_info
*ctl
)
12624 struct bnx2x
*bp
= netdev_priv(dev
);
12627 switch (ctl
->cmd
) {
12628 case DRV_CTL_CTXTBL_WR_CMD
: {
12629 u32 index
= ctl
->data
.io
.offset
;
12630 dma_addr_t addr
= ctl
->data
.io
.dma_addr
;
12632 bnx2x_ilt_wr(bp
, index
, addr
);
12636 case DRV_CTL_COMPLETION_CMD
: {
12637 int count
= ctl
->data
.comp
.comp_count
;
12639 bnx2x_cnic_sp_post(bp
, count
);
12643 /* rtnl_lock is held. */
12644 case DRV_CTL_START_L2_CMD
: {
12645 u32 cli
= ctl
->data
.ring
.client_id
;
12647 bp
->rx_mode_cl_mask
|= (1 << cli
);
12648 bnx2x_set_storm_rx_mode(bp
);
12652 /* rtnl_lock is held. */
12653 case DRV_CTL_STOP_L2_CMD
: {
12654 u32 cli
= ctl
->data
.ring
.client_id
;
12656 bp
->rx_mode_cl_mask
&= ~(1 << cli
);
12657 bnx2x_set_storm_rx_mode(bp
);
12662 BNX2X_ERR("unknown command %x\n", ctl
->cmd
);
12669 static void bnx2x_setup_cnic_irq_info(struct bnx2x
*bp
)
12671 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
12673 if (bp
->flags
& USING_MSIX_FLAG
) {
12674 cp
->drv_state
|= CNIC_DRV_STATE_USING_MSIX
;
12675 cp
->irq_arr
[0].irq_flags
|= CNIC_IRQ_FL_MSIX
;
12676 cp
->irq_arr
[0].vector
= bp
->msix_table
[1].vector
;
12678 cp
->drv_state
&= ~CNIC_DRV_STATE_USING_MSIX
;
12679 cp
->irq_arr
[0].irq_flags
&= ~CNIC_IRQ_FL_MSIX
;
12681 cp
->irq_arr
[0].status_blk
= bp
->cnic_sb
;
12682 cp
->irq_arr
[0].status_blk_num
= CNIC_SB_ID(bp
);
12683 cp
->irq_arr
[1].status_blk
= bp
->def_status_blk
;
12684 cp
->irq_arr
[1].status_blk_num
= DEF_SB_ID
;
12689 static int bnx2x_register_cnic(struct net_device
*dev
, struct cnic_ops
*ops
,
12692 struct bnx2x
*bp
= netdev_priv(dev
);
12693 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
12698 if (atomic_read(&bp
->intr_sem
) != 0)
12701 bp
->cnic_kwq
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
12705 bp
->cnic_kwq_cons
= bp
->cnic_kwq
;
12706 bp
->cnic_kwq_prod
= bp
->cnic_kwq
;
12707 bp
->cnic_kwq_last
= bp
->cnic_kwq
+ MAX_SP_DESC_CNT
;
12709 bp
->cnic_spq_pending
= 0;
12710 bp
->cnic_kwq_pending
= 0;
12712 bp
->cnic_data
= data
;
12715 cp
->drv_state
= CNIC_DRV_STATE_REGD
;
12717 bnx2x_init_sb(bp
, bp
->cnic_sb
, bp
->cnic_sb_mapping
, CNIC_SB_ID(bp
));
12719 bnx2x_setup_cnic_irq_info(bp
);
12720 bnx2x_set_iscsi_eth_mac_addr(bp
, 1);
12721 bp
->cnic_flags
|= BNX2X_CNIC_FLAG_MAC_SET
;
12722 rcu_assign_pointer(bp
->cnic_ops
, ops
);
12727 static int bnx2x_unregister_cnic(struct net_device
*dev
)
12729 struct bnx2x
*bp
= netdev_priv(dev
);
12730 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
12732 mutex_lock(&bp
->cnic_mutex
);
12733 if (bp
->cnic_flags
& BNX2X_CNIC_FLAG_MAC_SET
) {
12734 bp
->cnic_flags
&= ~BNX2X_CNIC_FLAG_MAC_SET
;
12735 bnx2x_set_iscsi_eth_mac_addr(bp
, 0);
12738 rcu_assign_pointer(bp
->cnic_ops
, NULL
);
12739 mutex_unlock(&bp
->cnic_mutex
);
12741 kfree(bp
->cnic_kwq
);
12742 bp
->cnic_kwq
= NULL
;
12747 struct cnic_eth_dev
*bnx2x_cnic_probe(struct net_device
*dev
)
12749 struct bnx2x
*bp
= netdev_priv(dev
);
12750 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
12752 cp
->drv_owner
= THIS_MODULE
;
12753 cp
->chip_id
= CHIP_ID(bp
);
12754 cp
->pdev
= bp
->pdev
;
12755 cp
->io_base
= bp
->regview
;
12756 cp
->io_base2
= bp
->doorbells
;
12757 cp
->max_kwqe_pending
= 8;
12758 cp
->ctx_blk_size
= CNIC_CTX_PER_ILT
* sizeof(union cdu_context
);
12759 cp
->ctx_tbl_offset
= FUNC_ILT_BASE(BP_FUNC(bp
)) + 1;
12760 cp
->ctx_tbl_len
= CNIC_ILT_LINES
;
12761 cp
->starting_cid
= BCM_CNIC_CID_START
;
12762 cp
->drv_submit_kwqes_16
= bnx2x_cnic_sp_queue
;
12763 cp
->drv_ctl
= bnx2x_drv_ctl
;
12764 cp
->drv_register_cnic
= bnx2x_register_cnic
;
12765 cp
->drv_unregister_cnic
= bnx2x_unregister_cnic
;
12769 EXPORT_SYMBOL(bnx2x_cnic_probe
);
12771 #endif /* BCM_CNIC */