1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
52 #include <linux/stringify.h>
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_cmn.h"
61 #include <linux/firmware.h>
62 #include "bnx2x_fw_file_hdr.h"
64 #define FW_FILE_VERSION \
65 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
66 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
67 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
68 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
69 #define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
70 #define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
72 /* Time in jiffies before concluding the transmitter is hung */
73 #define TX_TIMEOUT (5*HZ)
75 static char version
[] __devinitdata
=
76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
77 DRV_MODULE_NAME
" " DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
79 MODULE_AUTHOR("Eliezer Tamir");
80 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_MODULE_VERSION
);
83 MODULE_FIRMWARE(FW_FILE_NAME_E1
);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1H
);
86 static int multi_mode
= 1;
87 module_param(multi_mode
, int, 0);
88 MODULE_PARM_DESC(multi_mode
, " Multi queue mode "
89 "(0 Disable; 1 Enable (default))");
91 static int num_queues
;
92 module_param(num_queues
, int, 0);
93 MODULE_PARM_DESC(num_queues
, " Number of queues for multi_mode=1"
94 " (default is as a number of CPUs)");
96 static int disable_tpa
;
97 module_param(disable_tpa
, int, 0);
98 MODULE_PARM_DESC(disable_tpa
, " Disable the TPA (LRO) feature");
101 module_param(int_mode
, int, 0);
102 MODULE_PARM_DESC(int_mode
, " Force interrupt mode other then MSI-X "
105 static int dropless_fc
;
106 module_param(dropless_fc
, int, 0);
107 MODULE_PARM_DESC(dropless_fc
, " Pause on exhausted host ring");
110 module_param(poll
, int, 0);
111 MODULE_PARM_DESC(poll
, " Use polling (for debug)");
113 static int mrrs
= -1;
114 module_param(mrrs
, int, 0);
115 MODULE_PARM_DESC(mrrs
, " Force Max Read Req Size (0..3) (for debug)");
118 module_param(debug
, int, 0);
119 MODULE_PARM_DESC(debug
, " Default debug msglevel");
121 static struct workqueue_struct
*bnx2x_wq
;
123 enum bnx2x_board_type
{
129 /* indexed by board_type, above */
132 } board_info
[] __devinitdata
= {
133 { "Broadcom NetXtreme II BCM57710 XGb" },
134 { "Broadcom NetXtreme II BCM57711 XGb" },
135 { "Broadcom NetXtreme II BCM57711E XGb" }
139 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl
) = {
140 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57710
), BCM57710
},
141 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57711
), BCM57711
},
142 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57711E
), BCM57711E
},
146 MODULE_DEVICE_TABLE(pci
, bnx2x_pci_tbl
);
148 /****************************************************************************
149 * General service functions
150 ****************************************************************************/
153 * locking is done by mcp
155 void bnx2x_reg_wr_ind(struct bnx2x
*bp
, u32 addr
, u32 val
)
157 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
158 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, val
);
159 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
160 PCICFG_VENDOR_ID_OFFSET
);
163 static u32
bnx2x_reg_rd_ind(struct bnx2x
*bp
, u32 addr
)
167 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
168 pci_read_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, &val
);
169 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
170 PCICFG_VENDOR_ID_OFFSET
);
175 const u32 dmae_reg_go_c
[] = {
176 DMAE_REG_GO_C0
, DMAE_REG_GO_C1
, DMAE_REG_GO_C2
, DMAE_REG_GO_C3
,
177 DMAE_REG_GO_C4
, DMAE_REG_GO_C5
, DMAE_REG_GO_C6
, DMAE_REG_GO_C7
,
178 DMAE_REG_GO_C8
, DMAE_REG_GO_C9
, DMAE_REG_GO_C10
, DMAE_REG_GO_C11
,
179 DMAE_REG_GO_C12
, DMAE_REG_GO_C13
, DMAE_REG_GO_C14
, DMAE_REG_GO_C15
182 /* copy command into DMAE command memory and set DMAE command go */
183 void bnx2x_post_dmae(struct bnx2x
*bp
, struct dmae_command
*dmae
, int idx
)
188 cmd_offset
= (DMAE_REG_CMD_MEM
+ sizeof(struct dmae_command
) * idx
);
189 for (i
= 0; i
< (sizeof(struct dmae_command
)/4); i
++) {
190 REG_WR(bp
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
192 DP(BNX2X_MSG_OFF
, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
193 idx
, i
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
195 REG_WR(bp
, dmae_reg_go_c
[idx
], 1);
198 void bnx2x_write_dmae(struct bnx2x
*bp
, dma_addr_t dma_addr
, u32 dst_addr
,
201 struct dmae_command dmae
;
202 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
205 if (!bp
->dmae_ready
) {
206 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
208 DP(BNX2X_MSG_OFF
, "DMAE is not ready (dst_addr %08x len32 %d)"
209 " using indirect\n", dst_addr
, len32
);
210 bnx2x_init_ind_wr(bp
, dst_addr
, data
, len32
);
214 memset(&dmae
, 0, sizeof(struct dmae_command
));
216 dmae
.opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
217 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
218 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
220 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
222 DMAE_CMD_ENDIANITY_DW_SWAP
|
224 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
225 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
226 dmae
.src_addr_lo
= U64_LO(dma_addr
);
227 dmae
.src_addr_hi
= U64_HI(dma_addr
);
228 dmae
.dst_addr_lo
= dst_addr
>> 2;
229 dmae
.dst_addr_hi
= 0;
231 dmae
.comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
232 dmae
.comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
233 dmae
.comp_val
= DMAE_COMP_VAL
;
235 DP(BNX2X_MSG_OFF
, "DMAE: opcode 0x%08x\n"
236 DP_LEVEL
"src_addr [%x:%08x] len [%d *4] "
237 "dst_addr [%x:%08x (%08x)]\n"
238 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
239 dmae
.opcode
, dmae
.src_addr_hi
, dmae
.src_addr_lo
,
240 dmae
.len
, dmae
.dst_addr_hi
, dmae
.dst_addr_lo
, dst_addr
,
241 dmae
.comp_addr_hi
, dmae
.comp_addr_lo
, dmae
.comp_val
);
242 DP(BNX2X_MSG_OFF
, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
243 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
244 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
246 mutex_lock(&bp
->dmae_mutex
);
250 bnx2x_post_dmae(bp
, &dmae
, INIT_DMAE_C(bp
));
254 while (*wb_comp
!= DMAE_COMP_VAL
) {
255 DP(BNX2X_MSG_OFF
, "wb_comp 0x%08x\n", *wb_comp
);
258 BNX2X_ERR("DMAE timeout!\n");
262 /* adjust delay for emulation/FPGA */
263 if (CHIP_REV_IS_SLOW(bp
))
269 mutex_unlock(&bp
->dmae_mutex
);
272 void bnx2x_read_dmae(struct bnx2x
*bp
, u32 src_addr
, u32 len32
)
274 struct dmae_command dmae
;
275 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
278 if (!bp
->dmae_ready
) {
279 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
282 DP(BNX2X_MSG_OFF
, "DMAE is not ready (src_addr %08x len32 %d)"
283 " using indirect\n", src_addr
, len32
);
284 for (i
= 0; i
< len32
; i
++)
285 data
[i
] = bnx2x_reg_rd_ind(bp
, src_addr
+ i
*4);
289 memset(&dmae
, 0, sizeof(struct dmae_command
));
291 dmae
.opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
292 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
293 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
295 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
297 DMAE_CMD_ENDIANITY_DW_SWAP
|
299 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
300 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
301 dmae
.src_addr_lo
= src_addr
>> 2;
302 dmae
.src_addr_hi
= 0;
303 dmae
.dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_data
));
304 dmae
.dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_data
));
306 dmae
.comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
307 dmae
.comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
308 dmae
.comp_val
= DMAE_COMP_VAL
;
310 DP(BNX2X_MSG_OFF
, "DMAE: opcode 0x%08x\n"
311 DP_LEVEL
"src_addr [%x:%08x] len [%d *4] "
312 "dst_addr [%x:%08x (%08x)]\n"
313 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
314 dmae
.opcode
, dmae
.src_addr_hi
, dmae
.src_addr_lo
,
315 dmae
.len
, dmae
.dst_addr_hi
, dmae
.dst_addr_lo
, src_addr
,
316 dmae
.comp_addr_hi
, dmae
.comp_addr_lo
, dmae
.comp_val
);
318 mutex_lock(&bp
->dmae_mutex
);
320 memset(bnx2x_sp(bp
, wb_data
[0]), 0, sizeof(u32
) * 4);
323 bnx2x_post_dmae(bp
, &dmae
, INIT_DMAE_C(bp
));
327 while (*wb_comp
!= DMAE_COMP_VAL
) {
330 BNX2X_ERR("DMAE timeout!\n");
334 /* adjust delay for emulation/FPGA */
335 if (CHIP_REV_IS_SLOW(bp
))
340 DP(BNX2X_MSG_OFF
, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
341 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
342 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
344 mutex_unlock(&bp
->dmae_mutex
);
347 void bnx2x_write_dmae_phys_len(struct bnx2x
*bp
, dma_addr_t phys_addr
,
350 int dmae_wr_max
= DMAE_LEN32_WR_MAX(bp
);
353 while (len
> dmae_wr_max
) {
354 bnx2x_write_dmae(bp
, phys_addr
+ offset
,
355 addr
+ offset
, dmae_wr_max
);
356 offset
+= dmae_wr_max
* 4;
360 bnx2x_write_dmae(bp
, phys_addr
+ offset
, addr
+ offset
, len
);
363 /* used only for slowpath so not inlined */
364 static void bnx2x_wb_wr(struct bnx2x
*bp
, int reg
, u32 val_hi
, u32 val_lo
)
368 wb_write
[0] = val_hi
;
369 wb_write
[1] = val_lo
;
370 REG_WR_DMAE(bp
, reg
, wb_write
, 2);
374 static u64
bnx2x_wb_rd(struct bnx2x
*bp
, int reg
)
378 REG_RD_DMAE(bp
, reg
, wb_data
, 2);
380 return HILO_U64(wb_data
[0], wb_data
[1]);
384 static int bnx2x_mc_assert(struct bnx2x
*bp
)
388 u32 row0
, row1
, row2
, row3
;
391 last_idx
= REG_RD8(bp
, BAR_XSTRORM_INTMEM
+
392 XSTORM_ASSERT_LIST_INDEX_OFFSET
);
394 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
396 /* print the asserts */
397 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
399 row0
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
400 XSTORM_ASSERT_LIST_OFFSET(i
));
401 row1
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
402 XSTORM_ASSERT_LIST_OFFSET(i
) + 4);
403 row2
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
404 XSTORM_ASSERT_LIST_OFFSET(i
) + 8);
405 row3
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
406 XSTORM_ASSERT_LIST_OFFSET(i
) + 12);
408 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
409 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
410 " 0x%08x 0x%08x 0x%08x\n",
411 i
, row3
, row2
, row1
, row0
);
419 last_idx
= REG_RD8(bp
, BAR_TSTRORM_INTMEM
+
420 TSTORM_ASSERT_LIST_INDEX_OFFSET
);
422 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
424 /* print the asserts */
425 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
427 row0
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
428 TSTORM_ASSERT_LIST_OFFSET(i
));
429 row1
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
430 TSTORM_ASSERT_LIST_OFFSET(i
) + 4);
431 row2
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
432 TSTORM_ASSERT_LIST_OFFSET(i
) + 8);
433 row3
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
434 TSTORM_ASSERT_LIST_OFFSET(i
) + 12);
436 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
437 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
438 " 0x%08x 0x%08x 0x%08x\n",
439 i
, row3
, row2
, row1
, row0
);
447 last_idx
= REG_RD8(bp
, BAR_CSTRORM_INTMEM
+
448 CSTORM_ASSERT_LIST_INDEX_OFFSET
);
450 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
452 /* print the asserts */
453 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
455 row0
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
456 CSTORM_ASSERT_LIST_OFFSET(i
));
457 row1
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
458 CSTORM_ASSERT_LIST_OFFSET(i
) + 4);
459 row2
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
460 CSTORM_ASSERT_LIST_OFFSET(i
) + 8);
461 row3
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
462 CSTORM_ASSERT_LIST_OFFSET(i
) + 12);
464 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
465 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
466 " 0x%08x 0x%08x 0x%08x\n",
467 i
, row3
, row2
, row1
, row0
);
475 last_idx
= REG_RD8(bp
, BAR_USTRORM_INTMEM
+
476 USTORM_ASSERT_LIST_INDEX_OFFSET
);
478 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
480 /* print the asserts */
481 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
483 row0
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
484 USTORM_ASSERT_LIST_OFFSET(i
));
485 row1
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
486 USTORM_ASSERT_LIST_OFFSET(i
) + 4);
487 row2
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
488 USTORM_ASSERT_LIST_OFFSET(i
) + 8);
489 row3
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
490 USTORM_ASSERT_LIST_OFFSET(i
) + 12);
492 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
493 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
494 " 0x%08x 0x%08x 0x%08x\n",
495 i
, row3
, row2
, row1
, row0
);
505 static void bnx2x_fw_dump(struct bnx2x
*bp
)
513 BNX2X_ERR("NO MCP - can not dump\n");
517 addr
= bp
->common
.shmem_base
- 0x0800 + 4;
518 mark
= REG_RD(bp
, addr
);
519 mark
= MCP_REG_MCPR_SCRATCH
+ ((mark
+ 0x3) & ~0x3) - 0x08000000;
520 pr_err("begin fw dump (mark 0x%x)\n", mark
);
523 for (offset
= mark
; offset
<= bp
->common
.shmem_base
; offset
+= 0x8*4) {
524 for (word
= 0; word
< 8; word
++)
525 data
[word
] = htonl(REG_RD(bp
, offset
+ 4*word
));
527 pr_cont("%s", (char *)data
);
529 for (offset
= addr
+ 4; offset
<= mark
; offset
+= 0x8*4) {
530 for (word
= 0; word
< 8; word
++)
531 data
[word
] = htonl(REG_RD(bp
, offset
+ 4*word
));
533 pr_cont("%s", (char *)data
);
535 pr_err("end of fw dump\n");
538 void bnx2x_panic_dump(struct bnx2x
*bp
)
543 bp
->stats_state
= STATS_STATE_DISABLED
;
544 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
546 BNX2X_ERR("begin crash dump -----------------\n");
550 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
551 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
552 " spq_prod_idx(0x%x)\n",
553 bp
->def_c_idx
, bp
->def_u_idx
, bp
->def_x_idx
, bp
->def_t_idx
,
554 bp
->def_att_idx
, bp
->attn_state
, bp
->spq_prod_idx
);
557 for_each_queue(bp
, i
) {
558 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
560 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
561 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
562 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
563 i
, fp
->rx_bd_prod
, fp
->rx_bd_cons
,
564 le16_to_cpu(*fp
->rx_bd_cons_sb
), fp
->rx_comp_prod
,
565 fp
->rx_comp_cons
, le16_to_cpu(*fp
->rx_cons_sb
));
566 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
567 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
568 fp
->rx_sge_prod
, fp
->last_max_sge
,
569 le16_to_cpu(fp
->fp_u_idx
),
570 fp
->status_blk
->u_status_block
.status_block_index
);
574 for_each_queue(bp
, i
) {
575 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
577 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
578 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
579 " *tx_cons_sb(0x%x)\n",
580 i
, fp
->tx_pkt_prod
, fp
->tx_pkt_cons
, fp
->tx_bd_prod
,
581 fp
->tx_bd_cons
, le16_to_cpu(*fp
->tx_cons_sb
));
582 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
583 " tx_db_prod(0x%x)\n", le16_to_cpu(fp
->fp_c_idx
),
584 fp
->status_blk
->c_status_block
.status_block_index
,
585 fp
->tx_db
.data
.prod
);
590 for_each_queue(bp
, i
) {
591 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
593 start
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) - 10);
594 end
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) + 503);
595 for (j
= start
; j
!= end
; j
= RX_BD(j
+ 1)) {
596 u32
*rx_bd
= (u32
*)&fp
->rx_desc_ring
[j
];
597 struct sw_rx_bd
*sw_bd
= &fp
->rx_buf_ring
[j
];
599 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
600 i
, j
, rx_bd
[1], rx_bd
[0], sw_bd
->skb
);
603 start
= RX_SGE(fp
->rx_sge_prod
);
604 end
= RX_SGE(fp
->last_max_sge
);
605 for (j
= start
; j
!= end
; j
= RX_SGE(j
+ 1)) {
606 u32
*rx_sge
= (u32
*)&fp
->rx_sge_ring
[j
];
607 struct sw_rx_page
*sw_page
= &fp
->rx_page_ring
[j
];
609 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
610 i
, j
, rx_sge
[1], rx_sge
[0], sw_page
->page
);
613 start
= RCQ_BD(fp
->rx_comp_cons
- 10);
614 end
= RCQ_BD(fp
->rx_comp_cons
+ 503);
615 for (j
= start
; j
!= end
; j
= RCQ_BD(j
+ 1)) {
616 u32
*cqe
= (u32
*)&fp
->rx_comp_ring
[j
];
618 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
619 i
, j
, cqe
[0], cqe
[1], cqe
[2], cqe
[3]);
624 for_each_queue(bp
, i
) {
625 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
627 start
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) - 10);
628 end
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) + 245);
629 for (j
= start
; j
!= end
; j
= TX_BD(j
+ 1)) {
630 struct sw_tx_bd
*sw_bd
= &fp
->tx_buf_ring
[j
];
632 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
633 i
, j
, sw_bd
->skb
, sw_bd
->first_bd
);
636 start
= TX_BD(fp
->tx_bd_cons
- 10);
637 end
= TX_BD(fp
->tx_bd_cons
+ 254);
638 for (j
= start
; j
!= end
; j
= TX_BD(j
+ 1)) {
639 u32
*tx_bd
= (u32
*)&fp
->tx_desc_ring
[j
];
641 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
642 i
, j
, tx_bd
[0], tx_bd
[1], tx_bd
[2], tx_bd
[3]);
648 BNX2X_ERR("end crash dump -----------------\n");
651 void bnx2x_int_enable(struct bnx2x
*bp
)
653 int port
= BP_PORT(bp
);
654 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
655 u32 val
= REG_RD(bp
, addr
);
656 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
657 int msi
= (bp
->flags
& USING_MSI_FLAG
) ? 1 : 0;
660 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
661 HC_CONFIG_0_REG_INT_LINE_EN_0
);
662 val
|= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
665 val
&= ~HC_CONFIG_0_REG_INT_LINE_EN_0
;
666 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
667 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
670 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
671 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
672 HC_CONFIG_0_REG_INT_LINE_EN_0
|
673 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
675 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
678 REG_WR(bp
, addr
, val
);
680 val
&= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
;
683 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x) mode %s\n",
684 val
, port
, addr
, (msix
? "MSI-X" : (msi
? "MSI" : "INTx")));
686 REG_WR(bp
, addr
, val
);
688 * Ensure that HC_CONFIG is written before leading/trailing edge config
693 if (CHIP_IS_E1H(bp
)) {
694 /* init leading/trailing edge */
696 val
= (0xee0f | (1 << (BP_E1HVN(bp
) + 4)));
698 /* enable nig and gpio3 attention */
703 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
704 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
707 /* Make sure that interrupts are indeed enabled from here on */
711 static void bnx2x_int_disable(struct bnx2x
*bp
)
713 int port
= BP_PORT(bp
);
714 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
715 u32 val
= REG_RD(bp
, addr
);
717 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
718 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
719 HC_CONFIG_0_REG_INT_LINE_EN_0
|
720 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
722 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
725 /* flush all outstanding writes */
728 REG_WR(bp
, addr
, val
);
729 if (REG_RD(bp
, addr
) != val
)
730 BNX2X_ERR("BUG! proper val not read from IGU!\n");
733 void bnx2x_int_disable_sync(struct bnx2x
*bp
, int disable_hw
)
735 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
738 /* disable interrupt handling */
739 atomic_inc(&bp
->intr_sem
);
740 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
743 /* prevent the HW from sending interrupts */
744 bnx2x_int_disable(bp
);
746 /* make sure all ISRs are done */
748 synchronize_irq(bp
->msix_table
[0].vector
);
753 for_each_queue(bp
, i
)
754 synchronize_irq(bp
->msix_table
[i
+ offset
].vector
);
756 synchronize_irq(bp
->pdev
->irq
);
758 /* make sure sp_task is not running */
759 cancel_delayed_work(&bp
->sp_task
);
760 flush_workqueue(bnx2x_wq
);
766 * General service functions
769 /* Return true if succeeded to acquire the lock */
770 static bool bnx2x_trylock_hw_lock(struct bnx2x
*bp
, u32 resource
)
773 u32 resource_bit
= (1 << resource
);
774 int func
= BP_FUNC(bp
);
775 u32 hw_lock_control_reg
;
777 DP(NETIF_MSG_HW
, "Trying to take a lock on resource %d\n", resource
);
779 /* Validating that the resource is within range */
780 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
782 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
783 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
788 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
790 hw_lock_control_reg
=
791 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
793 /* Try to acquire the lock */
794 REG_WR(bp
, hw_lock_control_reg
+ 4, resource_bit
);
795 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
796 if (lock_status
& resource_bit
)
799 DP(NETIF_MSG_HW
, "Failed to get a lock on resource %d\n", resource
);
805 static void bnx2x_cnic_cfc_comp(struct bnx2x
*bp
, int cid
);
808 void bnx2x_sp_event(struct bnx2x_fastpath
*fp
,
809 union eth_rx_cqe
*rr_cqe
)
811 struct bnx2x
*bp
= fp
->bp
;
812 int cid
= SW_CID(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
813 int command
= CQE_CMD(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
816 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
817 fp
->index
, cid
, command
, bp
->state
,
818 rr_cqe
->ramrod_cqe
.ramrod_type
);
823 switch (command
| fp
->state
) {
824 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP
|
825 BNX2X_FP_STATE_OPENING
):
826 DP(NETIF_MSG_IFUP
, "got MULTI[%d] setup ramrod\n",
828 fp
->state
= BNX2X_FP_STATE_OPEN
;
831 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_FP_STATE_HALTING
):
832 DP(NETIF_MSG_IFDOWN
, "got MULTI[%d] halt ramrod\n",
834 fp
->state
= BNX2X_FP_STATE_HALTED
;
838 BNX2X_ERR("unexpected MC reply (%d) "
839 "fp[%d] state is %x\n",
840 command
, fp
->index
, fp
->state
);
843 mb(); /* force bnx2x_wait_ramrod() to see the change */
847 switch (command
| bp
->state
) {
848 case (RAMROD_CMD_ID_ETH_PORT_SETUP
| BNX2X_STATE_OPENING_WAIT4_PORT
):
849 DP(NETIF_MSG_IFUP
, "got setup ramrod\n");
850 bp
->state
= BNX2X_STATE_OPEN
;
853 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
854 DP(NETIF_MSG_IFDOWN
, "got halt ramrod\n");
855 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_DELETE
;
856 fp
->state
= BNX2X_FP_STATE_HALTED
;
859 case (RAMROD_CMD_ID_ETH_CFC_DEL
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
860 DP(NETIF_MSG_IFDOWN
, "got delete ramrod for MULTI[%d]\n", cid
);
861 bnx2x_fp(bp
, cid
, state
) = BNX2X_FP_STATE_CLOSED
;
865 case (RAMROD_CMD_ID_ETH_CFC_DEL
| BNX2X_STATE_OPEN
):
866 DP(NETIF_MSG_IFDOWN
, "got delete ramrod for CID %d\n", cid
);
867 bnx2x_cnic_cfc_comp(bp
, cid
);
871 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_OPEN
):
872 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_DIAG
):
873 DP(NETIF_MSG_IFUP
, "got set mac ramrod\n");
874 bp
->set_mac_pending
--;
878 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
879 DP(NETIF_MSG_IFDOWN
, "got (un)set mac ramrod\n");
880 bp
->set_mac_pending
--;
885 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
889 mb(); /* force bnx2x_wait_ramrod() to see the change */
892 irqreturn_t
bnx2x_interrupt(int irq
, void *dev_instance
)
894 struct bnx2x
*bp
= netdev_priv(dev_instance
);
895 u16 status
= bnx2x_ack_int(bp
);
899 /* Return here if interrupt is shared and it's not for us */
900 if (unlikely(status
== 0)) {
901 DP(NETIF_MSG_INTR
, "not our interrupt!\n");
904 DP(NETIF_MSG_INTR
, "got an interrupt status 0x%x\n", status
);
906 /* Return here if interrupt is disabled */
907 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
908 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
912 #ifdef BNX2X_STOP_ON_ERROR
913 if (unlikely(bp
->panic
))
917 for (i
= 0; i
< BNX2X_NUM_QUEUES(bp
); i
++) {
918 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
920 mask
= 0x2 << fp
->sb_id
;
922 /* Handle Rx and Tx according to SB id */
923 prefetch(fp
->rx_cons_sb
);
924 prefetch(&fp
->status_blk
->u_status_block
.
926 prefetch(fp
->tx_cons_sb
);
927 prefetch(&fp
->status_blk
->c_status_block
.
929 napi_schedule(&bnx2x_fp(bp
, fp
->index
, napi
));
935 mask
= 0x2 << CNIC_SB_ID(bp
);
936 if (status
& (mask
| 0x1)) {
937 struct cnic_ops
*c_ops
= NULL
;
940 c_ops
= rcu_dereference(bp
->cnic_ops
);
942 c_ops
->cnic_handler(bp
->cnic_data
, NULL
);
949 if (unlikely(status
& 0x1)) {
950 queue_delayed_work(bnx2x_wq
, &bp
->sp_task
, 0);
957 if (unlikely(status
))
958 DP(NETIF_MSG_INTR
, "got an unknown interrupt! (status 0x%x)\n",
964 /* end of fast path */
970 * General service functions
973 int bnx2x_acquire_hw_lock(struct bnx2x
*bp
, u32 resource
)
976 u32 resource_bit
= (1 << resource
);
977 int func
= BP_FUNC(bp
);
978 u32 hw_lock_control_reg
;
981 /* Validating that the resource is within range */
982 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
984 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
985 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
990 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
992 hw_lock_control_reg
=
993 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
996 /* Validating that the resource is not already taken */
997 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
998 if (lock_status
& resource_bit
) {
999 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1000 lock_status
, resource_bit
);
1004 /* Try for 5 second every 5ms */
1005 for (cnt
= 0; cnt
< 1000; cnt
++) {
1006 /* Try to acquire the lock */
1007 REG_WR(bp
, hw_lock_control_reg
+ 4, resource_bit
);
1008 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1009 if (lock_status
& resource_bit
)
1014 DP(NETIF_MSG_HW
, "Timeout\n");
1018 int bnx2x_release_hw_lock(struct bnx2x
*bp
, u32 resource
)
1021 u32 resource_bit
= (1 << resource
);
1022 int func
= BP_FUNC(bp
);
1023 u32 hw_lock_control_reg
;
1025 DP(NETIF_MSG_HW
, "Releasing a lock on resource %d\n", resource
);
1027 /* Validating that the resource is within range */
1028 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1030 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1031 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1036 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1038 hw_lock_control_reg
=
1039 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1042 /* Validating that the resource is currently taken */
1043 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1044 if (!(lock_status
& resource_bit
)) {
1045 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1046 lock_status
, resource_bit
);
1050 REG_WR(bp
, hw_lock_control_reg
, resource_bit
);
1055 int bnx2x_get_gpio(struct bnx2x
*bp
, int gpio_num
, u8 port
)
1057 /* The GPIO should be swapped if swap register is set and active */
1058 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1059 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1060 int gpio_shift
= gpio_num
+
1061 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1062 u32 gpio_mask
= (1 << gpio_shift
);
1066 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1067 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1071 /* read GPIO value */
1072 gpio_reg
= REG_RD(bp
, MISC_REG_GPIO
);
1074 /* get the requested pin value */
1075 if ((gpio_reg
& gpio_mask
) == gpio_mask
)
1080 DP(NETIF_MSG_LINK
, "pin %d value 0x%x\n", gpio_num
, value
);
1085 int bnx2x_set_gpio(struct bnx2x
*bp
, int gpio_num
, u32 mode
, u8 port
)
1087 /* The GPIO should be swapped if swap register is set and active */
1088 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1089 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1090 int gpio_shift
= gpio_num
+
1091 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1092 u32 gpio_mask
= (1 << gpio_shift
);
1095 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1096 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1100 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1101 /* read GPIO and mask except the float bits */
1102 gpio_reg
= (REG_RD(bp
, MISC_REG_GPIO
) & MISC_REGISTERS_GPIO_FLOAT
);
1105 case MISC_REGISTERS_GPIO_OUTPUT_LOW
:
1106 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output low\n",
1107 gpio_num
, gpio_shift
);
1108 /* clear FLOAT and set CLR */
1109 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1110 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_CLR_POS
);
1113 case MISC_REGISTERS_GPIO_OUTPUT_HIGH
:
1114 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output high\n",
1115 gpio_num
, gpio_shift
);
1116 /* clear FLOAT and set SET */
1117 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1118 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_SET_POS
);
1121 case MISC_REGISTERS_GPIO_INPUT_HI_Z
:
1122 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> input\n",
1123 gpio_num
, gpio_shift
);
1125 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1132 REG_WR(bp
, MISC_REG_GPIO
, gpio_reg
);
1133 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1138 int bnx2x_set_gpio_int(struct bnx2x
*bp
, int gpio_num
, u32 mode
, u8 port
)
1140 /* The GPIO should be swapped if swap register is set and active */
1141 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1142 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1143 int gpio_shift
= gpio_num
+
1144 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1145 u32 gpio_mask
= (1 << gpio_shift
);
1148 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1149 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1153 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1155 gpio_reg
= REG_RD(bp
, MISC_REG_GPIO_INT
);
1158 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR
:
1159 DP(NETIF_MSG_LINK
, "Clear GPIO INT %d (shift %d) -> "
1160 "output low\n", gpio_num
, gpio_shift
);
1161 /* clear SET and set CLR */
1162 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_INT_SET_POS
);
1163 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_INT_CLR_POS
);
1166 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET
:
1167 DP(NETIF_MSG_LINK
, "Set GPIO INT %d (shift %d) -> "
1168 "output high\n", gpio_num
, gpio_shift
);
1169 /* clear CLR and set SET */
1170 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_INT_CLR_POS
);
1171 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_INT_SET_POS
);
1178 REG_WR(bp
, MISC_REG_GPIO_INT
, gpio_reg
);
1179 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1184 static int bnx2x_set_spio(struct bnx2x
*bp
, int spio_num
, u32 mode
)
1186 u32 spio_mask
= (1 << spio_num
);
1189 if ((spio_num
< MISC_REGISTERS_SPIO_4
) ||
1190 (spio_num
> MISC_REGISTERS_SPIO_7
)) {
1191 BNX2X_ERR("Invalid SPIO %d\n", spio_num
);
1195 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
1196 /* read SPIO and mask except the float bits */
1197 spio_reg
= (REG_RD(bp
, MISC_REG_SPIO
) & MISC_REGISTERS_SPIO_FLOAT
);
1200 case MISC_REGISTERS_SPIO_OUTPUT_LOW
:
1201 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output low\n", spio_num
);
1202 /* clear FLOAT and set CLR */
1203 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1204 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_CLR_POS
);
1207 case MISC_REGISTERS_SPIO_OUTPUT_HIGH
:
1208 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output high\n", spio_num
);
1209 /* clear FLOAT and set SET */
1210 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1211 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_SET_POS
);
1214 case MISC_REGISTERS_SPIO_INPUT_HI_Z
:
1215 DP(NETIF_MSG_LINK
, "Set SPIO %d -> input\n", spio_num
);
1217 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1224 REG_WR(bp
, MISC_REG_SPIO
, spio_reg
);
1225 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
1230 void bnx2x_calc_fc_adv(struct bnx2x
*bp
)
1232 switch (bp
->link_vars
.ieee_fc
&
1233 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK
) {
1234 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE
:
1235 bp
->port
.advertising
&= ~(ADVERTISED_Asym_Pause
|
1239 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH
:
1240 bp
->port
.advertising
|= (ADVERTISED_Asym_Pause
|
1244 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC
:
1245 bp
->port
.advertising
|= ADVERTISED_Asym_Pause
;
1249 bp
->port
.advertising
&= ~(ADVERTISED_Asym_Pause
|
1256 u8
bnx2x_initial_phy_init(struct bnx2x
*bp
, int load_mode
)
1258 if (!BP_NOMCP(bp
)) {
1261 /* Initialize link parameters structure variables */
1262 /* It is recommended to turn off RX FC for jumbo frames
1263 for better performance */
1264 if (bp
->dev
->mtu
> 5000)
1265 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_TX
;
1267 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_BOTH
;
1269 bnx2x_acquire_phy_lock(bp
);
1271 if (load_mode
== LOAD_DIAG
)
1272 bp
->link_params
.loopback_mode
= LOOPBACK_XGXS_10
;
1274 rc
= bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
1276 bnx2x_release_phy_lock(bp
);
1278 bnx2x_calc_fc_adv(bp
);
1280 if (CHIP_REV_IS_SLOW(bp
) && bp
->link_vars
.link_up
) {
1281 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
1282 bnx2x_link_report(bp
);
1287 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1291 void bnx2x_link_set(struct bnx2x
*bp
)
1293 if (!BP_NOMCP(bp
)) {
1294 bnx2x_acquire_phy_lock(bp
);
1295 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
1296 bnx2x_release_phy_lock(bp
);
1298 bnx2x_calc_fc_adv(bp
);
1300 BNX2X_ERR("Bootcode is missing - can not set link\n");
1303 static void bnx2x__link_reset(struct bnx2x
*bp
)
1305 if (!BP_NOMCP(bp
)) {
1306 bnx2x_acquire_phy_lock(bp
);
1307 bnx2x_link_reset(&bp
->link_params
, &bp
->link_vars
, 1);
1308 bnx2x_release_phy_lock(bp
);
1310 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1313 u8
bnx2x_link_test(struct bnx2x
*bp
)
1317 if (!BP_NOMCP(bp
)) {
1318 bnx2x_acquire_phy_lock(bp
);
1319 rc
= bnx2x_test_link(&bp
->link_params
, &bp
->link_vars
);
1320 bnx2x_release_phy_lock(bp
);
1322 BNX2X_ERR("Bootcode is missing - can not test link\n");
1327 static void bnx2x_init_port_minmax(struct bnx2x
*bp
)
1329 u32 r_param
= bp
->link_vars
.line_speed
/ 8;
1330 u32 fair_periodic_timeout_usec
;
1333 memset(&(bp
->cmng
.rs_vars
), 0,
1334 sizeof(struct rate_shaping_vars_per_port
));
1335 memset(&(bp
->cmng
.fair_vars
), 0, sizeof(struct fairness_vars_per_port
));
1337 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1338 bp
->cmng
.rs_vars
.rs_periodic_timeout
= RS_PERIODIC_TIMEOUT_USEC
/ 4;
1340 /* this is the threshold below which no timer arming will occur
1341 1.25 coefficient is for the threshold to be a little bigger
1342 than the real time, to compensate for timer in-accuracy */
1343 bp
->cmng
.rs_vars
.rs_threshold
=
1344 (RS_PERIODIC_TIMEOUT_USEC
* r_param
* 5) / 4;
1346 /* resolution of fairness timer */
1347 fair_periodic_timeout_usec
= QM_ARB_BYTES
/ r_param
;
1348 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1349 t_fair
= T_FAIR_COEF
/ bp
->link_vars
.line_speed
;
1351 /* this is the threshold below which we won't arm the timer anymore */
1352 bp
->cmng
.fair_vars
.fair_threshold
= QM_ARB_BYTES
;
1354 /* we multiply by 1e3/8 to get bytes/msec.
1355 We don't want the credits to pass a credit
1356 of the t_fair*FAIR_MEM (algorithm resolution) */
1357 bp
->cmng
.fair_vars
.upper_bound
= r_param
* t_fair
* FAIR_MEM
;
1358 /* since each tick is 4 usec */
1359 bp
->cmng
.fair_vars
.fairness_timeout
= fair_periodic_timeout_usec
/ 4;
1362 /* Calculates the sum of vn_min_rates.
1363 It's needed for further normalizing of the min_rates.
1365 sum of vn_min_rates.
1367 0 - if all the min_rates are 0.
1368 In the later case fainess algorithm should be deactivated.
1369 If not all min_rates are zero then those that are zeroes will be set to 1.
1371 static void bnx2x_calc_vn_weight_sum(struct bnx2x
*bp
)
1374 int port
= BP_PORT(bp
);
1377 bp
->vn_weight_sum
= 0;
1378 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
1379 int func
= 2*vn
+ port
;
1380 u32 vn_cfg
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
1381 u32 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
1382 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
1384 /* Skip hidden vns */
1385 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
)
1388 /* If min rate is zero - set it to 1 */
1390 vn_min_rate
= DEF_MIN_RATE
;
1394 bp
->vn_weight_sum
+= vn_min_rate
;
1397 /* ... only if all min rates are zeros - disable fairness */
1399 bp
->cmng
.flags
.cmng_enables
&=
1400 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN
;
1401 DP(NETIF_MSG_IFUP
, "All MIN values are zeroes"
1402 " fairness will be disabled\n");
1404 bp
->cmng
.flags
.cmng_enables
|=
1405 CMNG_FLAGS_PER_PORT_FAIRNESS_VN
;
1408 static void bnx2x_init_vn_minmax(struct bnx2x
*bp
, int func
)
1410 struct rate_shaping_vars_per_vn m_rs_vn
;
1411 struct fairness_vars_per_vn m_fair_vn
;
1412 u32 vn_cfg
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
1413 u16 vn_min_rate
, vn_max_rate
;
1416 /* If function is hidden - set min and max to zeroes */
1417 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
) {
1422 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
1423 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
1424 /* If min rate is zero - set it to 1 */
1426 vn_min_rate
= DEF_MIN_RATE
;
1427 vn_max_rate
= ((vn_cfg
& FUNC_MF_CFG_MAX_BW_MASK
) >>
1428 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
1431 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
1432 func
, vn_min_rate
, vn_max_rate
, bp
->vn_weight_sum
);
1434 memset(&m_rs_vn
, 0, sizeof(struct rate_shaping_vars_per_vn
));
1435 memset(&m_fair_vn
, 0, sizeof(struct fairness_vars_per_vn
));
1437 /* global vn counter - maximal Mbps for this vn */
1438 m_rs_vn
.vn_counter
.rate
= vn_max_rate
;
1440 /* quota - number of bytes transmitted in this period */
1441 m_rs_vn
.vn_counter
.quota
=
1442 (vn_max_rate
* RS_PERIODIC_TIMEOUT_USEC
) / 8;
1444 if (bp
->vn_weight_sum
) {
1445 /* credit for each period of the fairness algorithm:
1446 number of bytes in T_FAIR (the vn share the port rate).
1447 vn_weight_sum should not be larger than 10000, thus
1448 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1450 m_fair_vn
.vn_credit_delta
=
1451 max_t(u32
, (vn_min_rate
* (T_FAIR_COEF
/
1452 (8 * bp
->vn_weight_sum
))),
1453 (bp
->cmng
.fair_vars
.fair_threshold
* 2));
1454 DP(NETIF_MSG_IFUP
, "m_fair_vn.vn_credit_delta %d\n",
1455 m_fair_vn
.vn_credit_delta
);
1458 /* Store it to internal memory */
1459 for (i
= 0; i
< sizeof(struct rate_shaping_vars_per_vn
)/4; i
++)
1460 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
1461 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func
) + i
* 4,
1462 ((u32
*)(&m_rs_vn
))[i
]);
1464 for (i
= 0; i
< sizeof(struct fairness_vars_per_vn
)/4; i
++)
1465 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
1466 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func
) + i
* 4,
1467 ((u32
*)(&m_fair_vn
))[i
]);
1471 /* This function is called upon link interrupt */
1472 static void bnx2x_link_attn(struct bnx2x
*bp
)
1474 u32 prev_link_status
= bp
->link_vars
.link_status
;
1475 /* Make sure that we are synced with the current statistics */
1476 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
1478 bnx2x_link_update(&bp
->link_params
, &bp
->link_vars
);
1480 if (bp
->link_vars
.link_up
) {
1482 /* dropless flow control */
1483 if (CHIP_IS_E1H(bp
) && bp
->dropless_fc
) {
1484 int port
= BP_PORT(bp
);
1485 u32 pause_enabled
= 0;
1487 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
)
1490 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1491 USTORM_ETH_PAUSE_ENABLED_OFFSET(port
),
1495 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
1496 struct host_port_stats
*pstats
;
1498 pstats
= bnx2x_sp(bp
, port_stats
);
1499 /* reset old bmac stats */
1500 memset(&(pstats
->mac_stx
[0]), 0,
1501 sizeof(struct mac_stx
));
1503 if (bp
->state
== BNX2X_STATE_OPEN
)
1504 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
1507 /* indicate link status only if link status actually changed */
1508 if (prev_link_status
!= bp
->link_vars
.link_status
)
1509 bnx2x_link_report(bp
);
1512 int port
= BP_PORT(bp
);
1516 /* Set the attention towards other drivers on the same port */
1517 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
1518 if (vn
== BP_E1HVN(bp
))
1521 func
= ((vn
<< 1) | port
);
1522 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_0
+
1523 (LINK_SYNC_ATTENTION_BIT_FUNC_0
+ func
)*4, 1);
1526 if (bp
->link_vars
.link_up
) {
1529 /* Init rate shaping and fairness contexts */
1530 bnx2x_init_port_minmax(bp
);
1532 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
1533 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
);
1535 /* Store it to internal memory */
1537 i
< sizeof(struct cmng_struct_per_port
) / 4; i
++)
1538 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
1539 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
) + i
*4,
1540 ((u32
*)(&bp
->cmng
))[i
]);
1545 void bnx2x__link_status_update(struct bnx2x
*bp
)
1547 if ((bp
->state
!= BNX2X_STATE_OPEN
) || (bp
->flags
& MF_FUNC_DIS
))
1550 bnx2x_link_status_update(&bp
->link_params
, &bp
->link_vars
);
1552 if (bp
->link_vars
.link_up
)
1553 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
1555 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
1557 bnx2x_calc_vn_weight_sum(bp
);
1559 /* indicate link status */
1560 bnx2x_link_report(bp
);
1563 static void bnx2x_pmf_update(struct bnx2x
*bp
)
1565 int port
= BP_PORT(bp
);
1569 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
1571 /* enable nig attention */
1572 val
= (0xff0f | (1 << (BP_E1HVN(bp
) + 4)));
1573 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
1574 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
1576 bnx2x_stats_handle(bp
, STATS_EVENT_PMF
);
1584 * General service functions
1587 /* send the MCP a request, block until there is a reply */
1588 u32
bnx2x_fw_command(struct bnx2x
*bp
, u32 command
)
1590 int func
= BP_FUNC(bp
);
1591 u32 seq
= ++bp
->fw_seq
;
1594 u8 delay
= CHIP_REV_IS_SLOW(bp
) ? 100 : 10;
1596 mutex_lock(&bp
->fw_mb_mutex
);
1597 SHMEM_WR(bp
, func_mb
[func
].drv_mb_header
, (command
| seq
));
1598 DP(BNX2X_MSG_MCP
, "wrote command (%x) to FW MB\n", (command
| seq
));
1601 /* let the FW do it's magic ... */
1604 rc
= SHMEM_RD(bp
, func_mb
[func
].fw_mb_header
);
1606 /* Give the FW up to 5 second (500*10ms) */
1607 } while ((seq
!= (rc
& FW_MSG_SEQ_NUMBER_MASK
)) && (cnt
++ < 500));
1609 DP(BNX2X_MSG_MCP
, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
1610 cnt
*delay
, rc
, seq
);
1612 /* is this a reply to our command? */
1613 if (seq
== (rc
& FW_MSG_SEQ_NUMBER_MASK
))
1614 rc
&= FW_MSG_CODE_MASK
;
1617 BNX2X_ERR("FW failed to respond!\n");
1621 mutex_unlock(&bp
->fw_mb_mutex
);
1626 static void bnx2x_e1h_disable(struct bnx2x
*bp
)
1628 int port
= BP_PORT(bp
);
1630 netif_tx_disable(bp
->dev
);
1632 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 0);
1634 netif_carrier_off(bp
->dev
);
1637 static void bnx2x_e1h_enable(struct bnx2x
*bp
)
1639 int port
= BP_PORT(bp
);
1641 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 1);
1643 /* Tx queue should be only reenabled */
1644 netif_tx_wake_all_queues(bp
->dev
);
1647 * Should not call netif_carrier_on since it will be called if the link
1648 * is up when checking for link state
1652 static void bnx2x_update_min_max(struct bnx2x
*bp
)
1654 int port
= BP_PORT(bp
);
1657 /* Init rate shaping and fairness contexts */
1658 bnx2x_init_port_minmax(bp
);
1660 bnx2x_calc_vn_weight_sum(bp
);
1662 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
1663 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
);
1668 /* Set the attention towards other drivers on the same port */
1669 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
1670 if (vn
== BP_E1HVN(bp
))
1673 func
= ((vn
<< 1) | port
);
1674 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_0
+
1675 (LINK_SYNC_ATTENTION_BIT_FUNC_0
+ func
)*4, 1);
1678 /* Store it to internal memory */
1679 for (i
= 0; i
< sizeof(struct cmng_struct_per_port
) / 4; i
++)
1680 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
1681 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
) + i
*4,
1682 ((u32
*)(&bp
->cmng
))[i
]);
1686 static void bnx2x_dcc_event(struct bnx2x
*bp
, u32 dcc_event
)
1688 DP(BNX2X_MSG_MCP
, "dcc_event 0x%x\n", dcc_event
);
1690 if (dcc_event
& DRV_STATUS_DCC_DISABLE_ENABLE_PF
) {
1693 * This is the only place besides the function initialization
1694 * where the bp->flags can change so it is done without any
1697 if (bp
->mf_config
& FUNC_MF_CFG_FUNC_DISABLED
) {
1698 DP(NETIF_MSG_IFDOWN
, "mf_cfg function disabled\n");
1699 bp
->flags
|= MF_FUNC_DIS
;
1701 bnx2x_e1h_disable(bp
);
1703 DP(NETIF_MSG_IFUP
, "mf_cfg function enabled\n");
1704 bp
->flags
&= ~MF_FUNC_DIS
;
1706 bnx2x_e1h_enable(bp
);
1708 dcc_event
&= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF
;
1710 if (dcc_event
& DRV_STATUS_DCC_BANDWIDTH_ALLOCATION
) {
1712 bnx2x_update_min_max(bp
);
1713 dcc_event
&= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION
;
1716 /* Report results to MCP */
1718 bnx2x_fw_command(bp
, DRV_MSG_CODE_DCC_FAILURE
);
1720 bnx2x_fw_command(bp
, DRV_MSG_CODE_DCC_OK
);
1723 /* must be called under the spq lock */
1724 static inline struct eth_spe
*bnx2x_sp_get_next(struct bnx2x
*bp
)
1726 struct eth_spe
*next_spe
= bp
->spq_prod_bd
;
1728 if (bp
->spq_prod_bd
== bp
->spq_last_bd
) {
1729 bp
->spq_prod_bd
= bp
->spq
;
1730 bp
->spq_prod_idx
= 0;
1731 DP(NETIF_MSG_TIMER
, "end of spq\n");
1739 /* must be called under the spq lock */
1740 static inline void bnx2x_sp_prod_update(struct bnx2x
*bp
)
1742 int func
= BP_FUNC(bp
);
1744 /* Make sure that BD data is updated before writing the producer */
1747 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_SPQ_PROD_OFFSET(func
),
1752 /* the slow path queue is odd since completions arrive on the fastpath ring */
1753 int bnx2x_sp_post(struct bnx2x
*bp
, int command
, int cid
,
1754 u32 data_hi
, u32 data_lo
, int common
)
1756 struct eth_spe
*spe
;
1758 #ifdef BNX2X_STOP_ON_ERROR
1759 if (unlikely(bp
->panic
))
1763 spin_lock_bh(&bp
->spq_lock
);
1765 if (!bp
->spq_left
) {
1766 BNX2X_ERR("BUG! SPQ ring full!\n");
1767 spin_unlock_bh(&bp
->spq_lock
);
1772 spe
= bnx2x_sp_get_next(bp
);
1774 /* CID needs port number to be encoded int it */
1775 spe
->hdr
.conn_and_cmd_data
=
1776 cpu_to_le32((command
<< SPE_HDR_CMD_ID_SHIFT
) |
1778 spe
->hdr
.type
= cpu_to_le16(ETH_CONNECTION_TYPE
);
1781 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT
));
1783 spe
->data
.mac_config_addr
.hi
= cpu_to_le32(data_hi
);
1784 spe
->data
.mac_config_addr
.lo
= cpu_to_le32(data_lo
);
1788 DP(BNX2X_MSG_SP
/*NETIF_MSG_TIMER*/,
1789 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
1790 bp
->spq_prod_idx
, (u32
)U64_HI(bp
->spq_mapping
),
1791 (u32
)(U64_LO(bp
->spq_mapping
) +
1792 (void *)bp
->spq_prod_bd
- (void *)bp
->spq
), command
,
1793 HW_CID(bp
, cid
), data_hi
, data_lo
, bp
->spq_left
);
1795 bnx2x_sp_prod_update(bp
);
1796 spin_unlock_bh(&bp
->spq_lock
);
1800 /* acquire split MCP access lock register */
1801 static int bnx2x_acquire_alr(struct bnx2x
*bp
)
1807 for (j
= 0; j
< 1000; j
++) {
1809 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
1810 val
= REG_RD(bp
, GRCBASE_MCP
+ 0x9c);
1811 if (val
& (1L << 31))
1816 if (!(val
& (1L << 31))) {
1817 BNX2X_ERR("Cannot acquire MCP access lock register\n");
1824 /* release split MCP access lock register */
1825 static void bnx2x_release_alr(struct bnx2x
*bp
)
1827 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, 0);
1830 static inline u16
bnx2x_update_dsb_idx(struct bnx2x
*bp
)
1832 struct host_def_status_block
*def_sb
= bp
->def_status_blk
;
1835 barrier(); /* status block is written to by the chip */
1836 if (bp
->def_att_idx
!= def_sb
->atten_status_block
.attn_bits_index
) {
1837 bp
->def_att_idx
= def_sb
->atten_status_block
.attn_bits_index
;
1840 if (bp
->def_c_idx
!= def_sb
->c_def_status_block
.status_block_index
) {
1841 bp
->def_c_idx
= def_sb
->c_def_status_block
.status_block_index
;
1844 if (bp
->def_u_idx
!= def_sb
->u_def_status_block
.status_block_index
) {
1845 bp
->def_u_idx
= def_sb
->u_def_status_block
.status_block_index
;
1848 if (bp
->def_x_idx
!= def_sb
->x_def_status_block
.status_block_index
) {
1849 bp
->def_x_idx
= def_sb
->x_def_status_block
.status_block_index
;
1852 if (bp
->def_t_idx
!= def_sb
->t_def_status_block
.status_block_index
) {
1853 bp
->def_t_idx
= def_sb
->t_def_status_block
.status_block_index
;
1860 * slow path service functions
1863 static void bnx2x_attn_int_asserted(struct bnx2x
*bp
, u32 asserted
)
1865 int port
= BP_PORT(bp
);
1866 u32 hc_addr
= (HC_REG_COMMAND_REG
+ port
*32 +
1867 COMMAND_REG_ATTN_BITS_SET
);
1868 u32 aeu_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
1869 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
1870 u32 nig_int_mask_addr
= port
? NIG_REG_MASK_INTERRUPT_PORT1
:
1871 NIG_REG_MASK_INTERRUPT_PORT0
;
1875 if (bp
->attn_state
& asserted
)
1876 BNX2X_ERR("IGU ERROR\n");
1878 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
1879 aeu_mask
= REG_RD(bp
, aeu_addr
);
1881 DP(NETIF_MSG_HW
, "aeu_mask %x newly asserted %x\n",
1882 aeu_mask
, asserted
);
1883 aeu_mask
&= ~(asserted
& 0x3ff);
1884 DP(NETIF_MSG_HW
, "new mask %x\n", aeu_mask
);
1886 REG_WR(bp
, aeu_addr
, aeu_mask
);
1887 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
1889 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
1890 bp
->attn_state
|= asserted
;
1891 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
1893 if (asserted
& ATTN_HARD_WIRED_MASK
) {
1894 if (asserted
& ATTN_NIG_FOR_FUNC
) {
1896 bnx2x_acquire_phy_lock(bp
);
1898 /* save nig interrupt mask */
1899 nig_mask
= REG_RD(bp
, nig_int_mask_addr
);
1900 REG_WR(bp
, nig_int_mask_addr
, 0);
1902 bnx2x_link_attn(bp
);
1904 /* handle unicore attn? */
1906 if (asserted
& ATTN_SW_TIMER_4_FUNC
)
1907 DP(NETIF_MSG_HW
, "ATTN_SW_TIMER_4_FUNC!\n");
1909 if (asserted
& GPIO_2_FUNC
)
1910 DP(NETIF_MSG_HW
, "GPIO_2_FUNC!\n");
1912 if (asserted
& GPIO_3_FUNC
)
1913 DP(NETIF_MSG_HW
, "GPIO_3_FUNC!\n");
1915 if (asserted
& GPIO_4_FUNC
)
1916 DP(NETIF_MSG_HW
, "GPIO_4_FUNC!\n");
1919 if (asserted
& ATTN_GENERAL_ATTN_1
) {
1920 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_1!\n");
1921 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_1
, 0x0);
1923 if (asserted
& ATTN_GENERAL_ATTN_2
) {
1924 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_2!\n");
1925 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_2
, 0x0);
1927 if (asserted
& ATTN_GENERAL_ATTN_3
) {
1928 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_3!\n");
1929 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_3
, 0x0);
1932 if (asserted
& ATTN_GENERAL_ATTN_4
) {
1933 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_4!\n");
1934 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_4
, 0x0);
1936 if (asserted
& ATTN_GENERAL_ATTN_5
) {
1937 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_5!\n");
1938 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_5
, 0x0);
1940 if (asserted
& ATTN_GENERAL_ATTN_6
) {
1941 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_6!\n");
1942 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_6
, 0x0);
1946 } /* if hardwired */
1948 DP(NETIF_MSG_HW
, "about to mask 0x%08x at HC addr 0x%x\n",
1950 REG_WR(bp
, hc_addr
, asserted
);
1952 /* now set back the mask */
1953 if (asserted
& ATTN_NIG_FOR_FUNC
) {
1954 REG_WR(bp
, nig_int_mask_addr
, nig_mask
);
1955 bnx2x_release_phy_lock(bp
);
1959 static inline void bnx2x_fan_failure(struct bnx2x
*bp
)
1961 int port
= BP_PORT(bp
);
1963 /* mark the failure */
1964 bp
->link_params
.ext_phy_config
&= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
1965 bp
->link_params
.ext_phy_config
|= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
;
1966 SHMEM_WR(bp
, dev_info
.port_hw_config
[port
].external_phy_config
,
1967 bp
->link_params
.ext_phy_config
);
1969 /* log the failure */
1970 netdev_err(bp
->dev
, "Fan Failure on Network Controller has caused"
1971 " the driver to shutdown the card to prevent permanent"
1972 " damage. Please contact OEM Support for assistance\n");
1975 static inline void bnx2x_attn_int_deasserted0(struct bnx2x
*bp
, u32 attn
)
1977 int port
= BP_PORT(bp
);
1979 u32 val
, swap_val
, swap_override
;
1981 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
1982 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
1984 if (attn
& AEU_INPUTS_ATTN_BITS_SPIO5
) {
1986 val
= REG_RD(bp
, reg_offset
);
1987 val
&= ~AEU_INPUTS_ATTN_BITS_SPIO5
;
1988 REG_WR(bp
, reg_offset
, val
);
1990 BNX2X_ERR("SPIO5 hw attention\n");
1992 /* Fan failure attention */
1993 switch (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
)) {
1994 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
1995 /* Low power mode is controlled by GPIO 2 */
1996 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_2
,
1997 MISC_REGISTERS_GPIO_OUTPUT_LOW
, port
);
1998 /* The PHY reset is controlled by GPIO 1 */
1999 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_1
,
2000 MISC_REGISTERS_GPIO_OUTPUT_LOW
, port
);
2003 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
:
2004 /* The PHY reset is controlled by GPIO 1 */
2005 /* fake the port number to cancel the swap done in
2007 swap_val
= REG_RD(bp
, NIG_REG_PORT_SWAP
);
2008 swap_override
= REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
);
2009 port
= (swap_val
&& swap_override
) ^ 1;
2010 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_1
,
2011 MISC_REGISTERS_GPIO_OUTPUT_LOW
, port
);
2017 bnx2x_fan_failure(bp
);
2020 if (attn
& (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0
|
2021 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1
)) {
2022 bnx2x_acquire_phy_lock(bp
);
2023 bnx2x_handle_module_detect_int(&bp
->link_params
);
2024 bnx2x_release_phy_lock(bp
);
2027 if (attn
& HW_INTERRUT_ASSERT_SET_0
) {
2029 val
= REG_RD(bp
, reg_offset
);
2030 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_0
);
2031 REG_WR(bp
, reg_offset
, val
);
2033 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2034 (u32
)(attn
& HW_INTERRUT_ASSERT_SET_0
));
2039 static inline void bnx2x_attn_int_deasserted1(struct bnx2x
*bp
, u32 attn
)
2043 if (attn
& AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT
) {
2045 val
= REG_RD(bp
, DORQ_REG_DORQ_INT_STS_CLR
);
2046 BNX2X_ERR("DB hw attention 0x%x\n", val
);
2047 /* DORQ discard attention */
2049 BNX2X_ERR("FATAL error from DORQ\n");
2052 if (attn
& HW_INTERRUT_ASSERT_SET_1
) {
2054 int port
= BP_PORT(bp
);
2057 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1
:
2058 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1
);
2060 val
= REG_RD(bp
, reg_offset
);
2061 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_1
);
2062 REG_WR(bp
, reg_offset
, val
);
2064 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2065 (u32
)(attn
& HW_INTERRUT_ASSERT_SET_1
));
2070 static inline void bnx2x_attn_int_deasserted2(struct bnx2x
*bp
, u32 attn
)
2074 if (attn
& AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT
) {
2076 val
= REG_RD(bp
, CFC_REG_CFC_INT_STS_CLR
);
2077 BNX2X_ERR("CFC hw attention 0x%x\n", val
);
2078 /* CFC error attention */
2080 BNX2X_ERR("FATAL error from CFC\n");
2083 if (attn
& AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT
) {
2085 val
= REG_RD(bp
, PXP_REG_PXP_INT_STS_CLR_0
);
2086 BNX2X_ERR("PXP hw attention 0x%x\n", val
);
2087 /* RQ_USDMDP_FIFO_OVERFLOW */
2089 BNX2X_ERR("FATAL error from PXP\n");
2092 if (attn
& HW_INTERRUT_ASSERT_SET_2
) {
2094 int port
= BP_PORT(bp
);
2097 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2
:
2098 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2
);
2100 val
= REG_RD(bp
, reg_offset
);
2101 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_2
);
2102 REG_WR(bp
, reg_offset
, val
);
2104 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2105 (u32
)(attn
& HW_INTERRUT_ASSERT_SET_2
));
2110 static inline void bnx2x_attn_int_deasserted3(struct bnx2x
*bp
, u32 attn
)
2114 if (attn
& EVEREST_GEN_ATTN_IN_USE_MASK
) {
2116 if (attn
& BNX2X_PMF_LINK_ASSERT
) {
2117 int func
= BP_FUNC(bp
);
2119 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
2120 bp
->mf_config
= SHMEM_RD(bp
,
2121 mf_cfg
.func_mf_config
[func
].config
);
2122 val
= SHMEM_RD(bp
, func_mb
[func
].drv_status
);
2123 if (val
& DRV_STATUS_DCC_EVENT_MASK
)
2125 (val
& DRV_STATUS_DCC_EVENT_MASK
));
2126 bnx2x__link_status_update(bp
);
2127 if ((bp
->port
.pmf
== 0) && (val
& DRV_STATUS_PMF
))
2128 bnx2x_pmf_update(bp
);
2130 } else if (attn
& BNX2X_MC_ASSERT_BITS
) {
2132 BNX2X_ERR("MC assert!\n");
2133 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_10
, 0);
2134 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_9
, 0);
2135 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_8
, 0);
2136 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_7
, 0);
2139 } else if (attn
& BNX2X_MCP_ASSERT
) {
2141 BNX2X_ERR("MCP assert!\n");
2142 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_11
, 0);
2146 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn
);
2149 if (attn
& EVEREST_LATCHED_ATTN_IN_USE_MASK
) {
2150 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn
);
2151 if (attn
& BNX2X_GRC_TIMEOUT
) {
2152 val
= CHIP_IS_E1H(bp
) ?
2153 REG_RD(bp
, MISC_REG_GRC_TIMEOUT_ATTN
) : 0;
2154 BNX2X_ERR("GRC time-out 0x%08x\n", val
);
2156 if (attn
& BNX2X_GRC_RSV
) {
2157 val
= CHIP_IS_E1H(bp
) ?
2158 REG_RD(bp
, MISC_REG_GRC_RSV_ATTN
) : 0;
2159 BNX2X_ERR("GRC reserved 0x%08x\n", val
);
2161 REG_WR(bp
, MISC_REG_AEU_CLR_LATCH_SIGNAL
, 0x7ff);
2165 #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
2166 #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
2167 #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2168 #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
2169 #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
2170 #define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2172 * should be run under rtnl lock
2174 static inline void bnx2x_set_reset_done(struct bnx2x
*bp
)
2176 u32 val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
2177 val
&= ~(1 << RESET_DONE_FLAG_SHIFT
);
2178 REG_WR(bp
, BNX2X_MISC_GEN_REG
, val
);
2184 * should be run under rtnl lock
2186 static inline void bnx2x_set_reset_in_progress(struct bnx2x
*bp
)
2188 u32 val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
2190 REG_WR(bp
, BNX2X_MISC_GEN_REG
, val
);
2196 * should be run under rtnl lock
2198 bool bnx2x_reset_is_done(struct bnx2x
*bp
)
2200 u32 val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
2201 DP(NETIF_MSG_HW
, "GEN_REG_VAL=0x%08x\n", val
);
2202 return (val
& RESET_DONE_FLAG_MASK
) ? false : true;
2206 * should be run under rtnl lock
2208 inline void bnx2x_inc_load_cnt(struct bnx2x
*bp
)
2210 u32 val1
, val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
2212 DP(NETIF_MSG_HW
, "Old GEN_REG_VAL=0x%08x\n", val
);
2214 val1
= ((val
& LOAD_COUNTER_MASK
) + 1) & LOAD_COUNTER_MASK
;
2215 REG_WR(bp
, BNX2X_MISC_GEN_REG
, (val
& RESET_DONE_FLAG_MASK
) | val1
);
2221 * should be run under rtnl lock
2223 u32
bnx2x_dec_load_cnt(struct bnx2x
*bp
)
2225 u32 val1
, val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
2227 DP(NETIF_MSG_HW
, "Old GEN_REG_VAL=0x%08x\n", val
);
2229 val1
= ((val
& LOAD_COUNTER_MASK
) - 1) & LOAD_COUNTER_MASK
;
2230 REG_WR(bp
, BNX2X_MISC_GEN_REG
, (val
& RESET_DONE_FLAG_MASK
) | val1
);
2238 * should be run under rtnl lock
2240 static inline u32
bnx2x_get_load_cnt(struct bnx2x
*bp
)
2242 return REG_RD(bp
, BNX2X_MISC_GEN_REG
) & LOAD_COUNTER_MASK
;
2245 static inline void bnx2x_clear_load_cnt(struct bnx2x
*bp
)
2247 u32 val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
2248 REG_WR(bp
, BNX2X_MISC_GEN_REG
, val
& (~LOAD_COUNTER_MASK
));
2251 static inline void _print_next_block(int idx
, const char *blk
)
2258 static inline int bnx2x_print_blocks_with_parity0(u32 sig
, int par_num
)
2262 for (i
= 0; sig
; i
++) {
2263 cur_bit
= ((u32
)0x1 << i
);
2264 if (sig
& cur_bit
) {
2266 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR
:
2267 _print_next_block(par_num
++, "BRB");
2269 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR
:
2270 _print_next_block(par_num
++, "PARSER");
2272 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR
:
2273 _print_next_block(par_num
++, "TSDM");
2275 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR
:
2276 _print_next_block(par_num
++, "SEARCHER");
2278 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR
:
2279 _print_next_block(par_num
++, "TSEMI");
2291 static inline int bnx2x_print_blocks_with_parity1(u32 sig
, int par_num
)
2295 for (i
= 0; sig
; i
++) {
2296 cur_bit
= ((u32
)0x1 << i
);
2297 if (sig
& cur_bit
) {
2299 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR
:
2300 _print_next_block(par_num
++, "PBCLIENT");
2302 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR
:
2303 _print_next_block(par_num
++, "QM");
2305 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR
:
2306 _print_next_block(par_num
++, "XSDM");
2308 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR
:
2309 _print_next_block(par_num
++, "XSEMI");
2311 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR
:
2312 _print_next_block(par_num
++, "DOORBELLQ");
2314 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR
:
2315 _print_next_block(par_num
++, "VAUX PCI CORE");
2317 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR
:
2318 _print_next_block(par_num
++, "DEBUG");
2320 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR
:
2321 _print_next_block(par_num
++, "USDM");
2323 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR
:
2324 _print_next_block(par_num
++, "USEMI");
2326 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR
:
2327 _print_next_block(par_num
++, "UPB");
2329 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR
:
2330 _print_next_block(par_num
++, "CSDM");
2342 static inline int bnx2x_print_blocks_with_parity2(u32 sig
, int par_num
)
2346 for (i
= 0; sig
; i
++) {
2347 cur_bit
= ((u32
)0x1 << i
);
2348 if (sig
& cur_bit
) {
2350 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR
:
2351 _print_next_block(par_num
++, "CSEMI");
2353 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR
:
2354 _print_next_block(par_num
++, "PXP");
2356 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
:
2357 _print_next_block(par_num
++,
2358 "PXPPCICLOCKCLIENT");
2360 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR
:
2361 _print_next_block(par_num
++, "CFC");
2363 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR
:
2364 _print_next_block(par_num
++, "CDU");
2366 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR
:
2367 _print_next_block(par_num
++, "IGU");
2369 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR
:
2370 _print_next_block(par_num
++, "MISC");
2382 static inline int bnx2x_print_blocks_with_parity3(u32 sig
, int par_num
)
2386 for (i
= 0; sig
; i
++) {
2387 cur_bit
= ((u32
)0x1 << i
);
2388 if (sig
& cur_bit
) {
2390 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY
:
2391 _print_next_block(par_num
++, "MCP ROM");
2393 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY
:
2394 _print_next_block(par_num
++, "MCP UMP RX");
2396 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY
:
2397 _print_next_block(par_num
++, "MCP UMP TX");
2399 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY
:
2400 _print_next_block(par_num
++, "MCP SCPAD");
2412 static inline bool bnx2x_parity_attn(struct bnx2x
*bp
, u32 sig0
, u32 sig1
,
2415 if ((sig0
& HW_PRTY_ASSERT_SET_0
) || (sig1
& HW_PRTY_ASSERT_SET_1
) ||
2416 (sig2
& HW_PRTY_ASSERT_SET_2
) || (sig3
& HW_PRTY_ASSERT_SET_3
)) {
2418 DP(NETIF_MSG_HW
, "Was parity error: HW block parity attention: "
2419 "[0]:0x%08x [1]:0x%08x "
2420 "[2]:0x%08x [3]:0x%08x\n",
2421 sig0
& HW_PRTY_ASSERT_SET_0
,
2422 sig1
& HW_PRTY_ASSERT_SET_1
,
2423 sig2
& HW_PRTY_ASSERT_SET_2
,
2424 sig3
& HW_PRTY_ASSERT_SET_3
);
2425 printk(KERN_ERR
"%s: Parity errors detected in blocks: ",
2427 par_num
= bnx2x_print_blocks_with_parity0(
2428 sig0
& HW_PRTY_ASSERT_SET_0
, par_num
);
2429 par_num
= bnx2x_print_blocks_with_parity1(
2430 sig1
& HW_PRTY_ASSERT_SET_1
, par_num
);
2431 par_num
= bnx2x_print_blocks_with_parity2(
2432 sig2
& HW_PRTY_ASSERT_SET_2
, par_num
);
2433 par_num
= bnx2x_print_blocks_with_parity3(
2434 sig3
& HW_PRTY_ASSERT_SET_3
, par_num
);
2441 bool bnx2x_chk_parity_attn(struct bnx2x
*bp
)
2443 struct attn_route attn
;
2444 int port
= BP_PORT(bp
);
2446 attn
.sig
[0] = REG_RD(bp
,
2447 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+
2449 attn
.sig
[1] = REG_RD(bp
,
2450 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0
+
2452 attn
.sig
[2] = REG_RD(bp
,
2453 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0
+
2455 attn
.sig
[3] = REG_RD(bp
,
2456 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0
+
2459 return bnx2x_parity_attn(bp
, attn
.sig
[0], attn
.sig
[1], attn
.sig
[2],
2463 static void bnx2x_attn_int_deasserted(struct bnx2x
*bp
, u32 deasserted
)
2465 struct attn_route attn
, *group_mask
;
2466 int port
= BP_PORT(bp
);
2472 /* need to take HW lock because MCP or other port might also
2473 try to handle this event */
2474 bnx2x_acquire_alr(bp
);
2476 if (bnx2x_chk_parity_attn(bp
)) {
2477 bp
->recovery_state
= BNX2X_RECOVERY_INIT
;
2478 bnx2x_set_reset_in_progress(bp
);
2479 schedule_delayed_work(&bp
->reset_task
, 0);
2480 /* Disable HW interrupts */
2481 bnx2x_int_disable(bp
);
2482 bnx2x_release_alr(bp
);
2483 /* In case of parity errors don't handle attentions so that
2484 * other function would "see" parity errors.
2489 attn
.sig
[0] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ port
*4);
2490 attn
.sig
[1] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0
+ port
*4);
2491 attn
.sig
[2] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0
+ port
*4);
2492 attn
.sig
[3] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0
+ port
*4);
2493 DP(NETIF_MSG_HW
, "attn: %08x %08x %08x %08x\n",
2494 attn
.sig
[0], attn
.sig
[1], attn
.sig
[2], attn
.sig
[3]);
2496 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
2497 if (deasserted
& (1 << index
)) {
2498 group_mask
= &bp
->attn_group
[index
];
2500 DP(NETIF_MSG_HW
, "group[%d]: %08x %08x %08x %08x\n",
2501 index
, group_mask
->sig
[0], group_mask
->sig
[1],
2502 group_mask
->sig
[2], group_mask
->sig
[3]);
2504 bnx2x_attn_int_deasserted3(bp
,
2505 attn
.sig
[3] & group_mask
->sig
[3]);
2506 bnx2x_attn_int_deasserted1(bp
,
2507 attn
.sig
[1] & group_mask
->sig
[1]);
2508 bnx2x_attn_int_deasserted2(bp
,
2509 attn
.sig
[2] & group_mask
->sig
[2]);
2510 bnx2x_attn_int_deasserted0(bp
,
2511 attn
.sig
[0] & group_mask
->sig
[0]);
2515 bnx2x_release_alr(bp
);
2517 reg_addr
= (HC_REG_COMMAND_REG
+ port
*32 + COMMAND_REG_ATTN_BITS_CLR
);
2520 DP(NETIF_MSG_HW
, "about to mask 0x%08x at HC addr 0x%x\n",
2522 REG_WR(bp
, reg_addr
, val
);
2524 if (~bp
->attn_state
& deasserted
)
2525 BNX2X_ERR("IGU ERROR\n");
2527 reg_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
2528 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
2530 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2531 aeu_mask
= REG_RD(bp
, reg_addr
);
2533 DP(NETIF_MSG_HW
, "aeu_mask %x newly deasserted %x\n",
2534 aeu_mask
, deasserted
);
2535 aeu_mask
|= (deasserted
& 0x3ff);
2536 DP(NETIF_MSG_HW
, "new mask %x\n", aeu_mask
);
2538 REG_WR(bp
, reg_addr
, aeu_mask
);
2539 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2541 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
2542 bp
->attn_state
&= ~deasserted
;
2543 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
2546 static void bnx2x_attn_int(struct bnx2x
*bp
)
2548 /* read local copy of bits */
2549 u32 attn_bits
= le32_to_cpu(bp
->def_status_blk
->atten_status_block
.
2551 u32 attn_ack
= le32_to_cpu(bp
->def_status_blk
->atten_status_block
.
2553 u32 attn_state
= bp
->attn_state
;
2555 /* look for changed bits */
2556 u32 asserted
= attn_bits
& ~attn_ack
& ~attn_state
;
2557 u32 deasserted
= ~attn_bits
& attn_ack
& attn_state
;
2560 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2561 attn_bits
, attn_ack
, asserted
, deasserted
);
2563 if (~(attn_bits
^ attn_ack
) & (attn_bits
^ attn_state
))
2564 BNX2X_ERR("BAD attention state\n");
2566 /* handle bits that were raised */
2568 bnx2x_attn_int_asserted(bp
, asserted
);
2571 bnx2x_attn_int_deasserted(bp
, deasserted
);
2574 static void bnx2x_sp_task(struct work_struct
*work
)
2576 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, sp_task
.work
);
2579 /* Return here if interrupt is disabled */
2580 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
2581 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
2585 status
= bnx2x_update_dsb_idx(bp
);
2586 /* if (status == 0) */
2587 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2589 DP(NETIF_MSG_INTR
, "got a slowpath interrupt (status 0x%x)\n", status
);
2597 /* CStorm events: STAT_QUERY */
2599 DP(BNX2X_MSG_SP
, "CStorm events: STAT_QUERY\n");
2603 if (unlikely(status
))
2604 DP(NETIF_MSG_INTR
, "got an unknown interrupt! (status 0x%x)\n",
2607 bnx2x_ack_sb(bp
, DEF_SB_ID
, ATTENTION_ID
, le16_to_cpu(bp
->def_att_idx
),
2609 bnx2x_ack_sb(bp
, DEF_SB_ID
, USTORM_ID
, le16_to_cpu(bp
->def_u_idx
),
2611 bnx2x_ack_sb(bp
, DEF_SB_ID
, CSTORM_ID
, le16_to_cpu(bp
->def_c_idx
),
2613 bnx2x_ack_sb(bp
, DEF_SB_ID
, XSTORM_ID
, le16_to_cpu(bp
->def_x_idx
),
2615 bnx2x_ack_sb(bp
, DEF_SB_ID
, TSTORM_ID
, le16_to_cpu(bp
->def_t_idx
),
2619 irqreturn_t
bnx2x_msix_sp_int(int irq
, void *dev_instance
)
2621 struct net_device
*dev
= dev_instance
;
2622 struct bnx2x
*bp
= netdev_priv(dev
);
2624 /* Return here if interrupt is disabled */
2625 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
2626 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
2630 bnx2x_ack_sb(bp
, DEF_SB_ID
, TSTORM_ID
, 0, IGU_INT_DISABLE
, 0);
2632 #ifdef BNX2X_STOP_ON_ERROR
2633 if (unlikely(bp
->panic
))
2639 struct cnic_ops
*c_ops
;
2642 c_ops
= rcu_dereference(bp
->cnic_ops
);
2644 c_ops
->cnic_handler(bp
->cnic_data
, NULL
);
2648 queue_delayed_work(bnx2x_wq
, &bp
->sp_task
, 0);
2653 /* end of slow path */
2655 static void bnx2x_timer(unsigned long data
)
2657 struct bnx2x
*bp
= (struct bnx2x
*) data
;
2659 if (!netif_running(bp
->dev
))
2662 if (atomic_read(&bp
->intr_sem
) != 0)
2666 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
2670 rc
= bnx2x_rx_int(fp
, 1000);
2673 if (!BP_NOMCP(bp
)) {
2674 int func
= BP_FUNC(bp
);
2678 ++bp
->fw_drv_pulse_wr_seq
;
2679 bp
->fw_drv_pulse_wr_seq
&= DRV_PULSE_SEQ_MASK
;
2680 /* TBD - add SYSTEM_TIME */
2681 drv_pulse
= bp
->fw_drv_pulse_wr_seq
;
2682 SHMEM_WR(bp
, func_mb
[func
].drv_pulse_mb
, drv_pulse
);
2684 mcp_pulse
= (SHMEM_RD(bp
, func_mb
[func
].mcp_pulse_mb
) &
2685 MCP_PULSE_SEQ_MASK
);
2686 /* The delta between driver pulse and mcp response
2687 * should be 1 (before mcp response) or 0 (after mcp response)
2689 if ((drv_pulse
!= mcp_pulse
) &&
2690 (drv_pulse
!= ((mcp_pulse
+ 1) & MCP_PULSE_SEQ_MASK
))) {
2691 /* someone lost a heartbeat... */
2692 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2693 drv_pulse
, mcp_pulse
);
2697 if (bp
->state
== BNX2X_STATE_OPEN
)
2698 bnx2x_stats_handle(bp
, STATS_EVENT_UPDATE
);
2701 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
2704 /* end of Statistics */
2709 * nic init service functions
2712 static void bnx2x_zero_sb(struct bnx2x
*bp
, int sb_id
)
2714 int port
= BP_PORT(bp
);
2717 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
+
2718 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port
, sb_id
), 0,
2719 CSTORM_SB_STATUS_BLOCK_U_SIZE
/ 4);
2720 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
+
2721 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port
, sb_id
), 0,
2722 CSTORM_SB_STATUS_BLOCK_C_SIZE
/ 4);
2725 void bnx2x_init_sb(struct bnx2x
*bp
, struct host_status_block
*sb
,
2726 dma_addr_t mapping
, int sb_id
)
2728 int port
= BP_PORT(bp
);
2729 int func
= BP_FUNC(bp
);
2734 section
= ((u64
)mapping
) + offsetof(struct host_status_block
,
2736 sb
->u_status_block
.status_block_id
= sb_id
;
2738 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
2739 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port
, sb_id
), U64_LO(section
));
2740 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
2741 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port
, sb_id
)) + 4),
2743 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ FP_USB_FUNC_OFF
+
2744 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port
, sb_id
), func
);
2746 for (index
= 0; index
< HC_USTORM_SB_NUM_INDICES
; index
++)
2747 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
2748 CSTORM_SB_HC_DISABLE_U_OFFSET(port
, sb_id
, index
), 1);
2751 section
= ((u64
)mapping
) + offsetof(struct host_status_block
,
2753 sb
->c_status_block
.status_block_id
= sb_id
;
2755 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
2756 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port
, sb_id
), U64_LO(section
));
2757 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
2758 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port
, sb_id
)) + 4),
2760 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ FP_CSB_FUNC_OFF
+
2761 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port
, sb_id
), func
);
2763 for (index
= 0; index
< HC_CSTORM_SB_NUM_INDICES
; index
++)
2764 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
2765 CSTORM_SB_HC_DISABLE_C_OFFSET(port
, sb_id
, index
), 1);
2767 bnx2x_ack_sb(bp
, sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
2770 static void bnx2x_zero_def_sb(struct bnx2x
*bp
)
2772 int func
= BP_FUNC(bp
);
2774 bnx2x_init_fill(bp
, TSEM_REG_FAST_MEMORY
+
2775 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
2776 sizeof(struct tstorm_def_status_block
)/4);
2777 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
+
2778 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func
), 0,
2779 sizeof(struct cstorm_def_status_block_u
)/4);
2780 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
+
2781 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func
), 0,
2782 sizeof(struct cstorm_def_status_block_c
)/4);
2783 bnx2x_init_fill(bp
, XSEM_REG_FAST_MEMORY
+
2784 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
2785 sizeof(struct xstorm_def_status_block
)/4);
2788 static void bnx2x_init_def_sb(struct bnx2x
*bp
,
2789 struct host_def_status_block
*def_sb
,
2790 dma_addr_t mapping
, int sb_id
)
2792 int port
= BP_PORT(bp
);
2793 int func
= BP_FUNC(bp
);
2794 int index
, val
, reg_offset
;
2798 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
2799 atten_status_block
);
2800 def_sb
->atten_status_block
.status_block_id
= sb_id
;
2804 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
2805 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
2807 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
2808 bp
->attn_group
[index
].sig
[0] = REG_RD(bp
,
2809 reg_offset
+ 0x10*index
);
2810 bp
->attn_group
[index
].sig
[1] = REG_RD(bp
,
2811 reg_offset
+ 0x4 + 0x10*index
);
2812 bp
->attn_group
[index
].sig
[2] = REG_RD(bp
,
2813 reg_offset
+ 0x8 + 0x10*index
);
2814 bp
->attn_group
[index
].sig
[3] = REG_RD(bp
,
2815 reg_offset
+ 0xc + 0x10*index
);
2818 reg_offset
= (port
? HC_REG_ATTN_MSG1_ADDR_L
:
2819 HC_REG_ATTN_MSG0_ADDR_L
);
2821 REG_WR(bp
, reg_offset
, U64_LO(section
));
2822 REG_WR(bp
, reg_offset
+ 4, U64_HI(section
));
2824 reg_offset
= (port
? HC_REG_ATTN_NUM_P1
: HC_REG_ATTN_NUM_P0
);
2826 val
= REG_RD(bp
, reg_offset
);
2828 REG_WR(bp
, reg_offset
, val
);
2831 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
2832 u_def_status_block
);
2833 def_sb
->u_def_status_block
.status_block_id
= sb_id
;
2835 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
2836 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func
), U64_LO(section
));
2837 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
2838 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func
)) + 4),
2840 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ DEF_USB_FUNC_OFF
+
2841 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func
), func
);
2843 for (index
= 0; index
< HC_USTORM_DEF_SB_NUM_INDICES
; index
++)
2844 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
2845 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func
, index
), 1);
2848 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
2849 c_def_status_block
);
2850 def_sb
->c_def_status_block
.status_block_id
= sb_id
;
2852 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
2853 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func
), U64_LO(section
));
2854 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
2855 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func
)) + 4),
2857 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ DEF_CSB_FUNC_OFF
+
2858 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func
), func
);
2860 for (index
= 0; index
< HC_CSTORM_DEF_SB_NUM_INDICES
; index
++)
2861 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
2862 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func
, index
), 1);
2865 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
2866 t_def_status_block
);
2867 def_sb
->t_def_status_block
.status_block_id
= sb_id
;
2869 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
2870 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
2871 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
2872 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
2874 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ DEF_TSB_FUNC_OFF
+
2875 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
2877 for (index
= 0; index
< HC_TSTORM_DEF_SB_NUM_INDICES
; index
++)
2878 REG_WR16(bp
, BAR_TSTRORM_INTMEM
+
2879 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
2882 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
2883 x_def_status_block
);
2884 def_sb
->x_def_status_block
.status_block_id
= sb_id
;
2886 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2887 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
2888 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2889 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
2891 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ DEF_XSB_FUNC_OFF
+
2892 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
2894 for (index
= 0; index
< HC_XSTORM_DEF_SB_NUM_INDICES
; index
++)
2895 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+
2896 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
2898 bp
->stats_pending
= 0;
2899 bp
->set_mac_pending
= 0;
2901 bnx2x_ack_sb(bp
, sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
2904 void bnx2x_update_coalesce(struct bnx2x
*bp
)
2906 int port
= BP_PORT(bp
);
2909 for_each_queue(bp
, i
) {
2910 int sb_id
= bp
->fp
[i
].sb_id
;
2912 /* HC_INDEX_U_ETH_RX_CQ_CONS */
2913 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+
2914 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port
, sb_id
,
2915 U_SB_ETH_RX_CQ_INDEX
),
2916 bp
->rx_ticks
/(4 * BNX2X_BTR
));
2917 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
2918 CSTORM_SB_HC_DISABLE_U_OFFSET(port
, sb_id
,
2919 U_SB_ETH_RX_CQ_INDEX
),
2920 (bp
->rx_ticks
/(4 * BNX2X_BTR
)) ? 0 : 1);
2922 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2923 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+
2924 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port
, sb_id
,
2925 C_SB_ETH_TX_CQ_INDEX
),
2926 bp
->tx_ticks
/(4 * BNX2X_BTR
));
2927 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
2928 CSTORM_SB_HC_DISABLE_C_OFFSET(port
, sb_id
,
2929 C_SB_ETH_TX_CQ_INDEX
),
2930 (bp
->tx_ticks
/(4 * BNX2X_BTR
)) ? 0 : 1);
2934 static void bnx2x_init_sp_ring(struct bnx2x
*bp
)
2936 int func
= BP_FUNC(bp
);
2938 spin_lock_init(&bp
->spq_lock
);
2940 bp
->spq_left
= MAX_SPQ_PENDING
;
2941 bp
->spq_prod_idx
= 0;
2942 bp
->dsb_sp_prod
= BNX2X_SP_DSB_INDEX
;
2943 bp
->spq_prod_bd
= bp
->spq
;
2944 bp
->spq_last_bd
= bp
->spq_prod_bd
+ MAX_SP_DESC_CNT
;
2946 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PAGE_BASE_OFFSET(func
),
2947 U64_LO(bp
->spq_mapping
));
2949 XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PAGE_BASE_OFFSET(func
) + 4,
2950 U64_HI(bp
->spq_mapping
));
2952 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PROD_OFFSET(func
),
2956 static void bnx2x_init_context(struct bnx2x
*bp
)
2961 for_each_queue(bp
, i
) {
2962 struct eth_context
*context
= bnx2x_sp(bp
, context
[i
].eth
);
2963 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
2964 u8 cl_id
= fp
->cl_id
;
2966 context
->ustorm_st_context
.common
.sb_index_numbers
=
2967 BNX2X_RX_SB_INDEX_NUM
;
2968 context
->ustorm_st_context
.common
.clientId
= cl_id
;
2969 context
->ustorm_st_context
.common
.status_block_id
= fp
->sb_id
;
2970 context
->ustorm_st_context
.common
.flags
=
2971 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT
|
2972 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS
);
2973 context
->ustorm_st_context
.common
.statistics_counter_id
=
2975 context
->ustorm_st_context
.common
.mc_alignment_log_size
=
2976 BNX2X_RX_ALIGN_SHIFT
;
2977 context
->ustorm_st_context
.common
.bd_buff_size
=
2979 context
->ustorm_st_context
.common
.bd_page_base_hi
=
2980 U64_HI(fp
->rx_desc_mapping
);
2981 context
->ustorm_st_context
.common
.bd_page_base_lo
=
2982 U64_LO(fp
->rx_desc_mapping
);
2983 if (!fp
->disable_tpa
) {
2984 context
->ustorm_st_context
.common
.flags
|=
2985 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA
;
2986 context
->ustorm_st_context
.common
.sge_buff_size
=
2987 (u16
)min_t(u32
, SGE_PAGE_SIZE
*PAGES_PER_SGE
,
2989 context
->ustorm_st_context
.common
.sge_page_base_hi
=
2990 U64_HI(fp
->rx_sge_mapping
);
2991 context
->ustorm_st_context
.common
.sge_page_base_lo
=
2992 U64_LO(fp
->rx_sge_mapping
);
2994 context
->ustorm_st_context
.common
.max_sges_for_packet
=
2995 SGE_PAGE_ALIGN(bp
->dev
->mtu
) >> SGE_PAGE_SHIFT
;
2996 context
->ustorm_st_context
.common
.max_sges_for_packet
=
2997 ((context
->ustorm_st_context
.common
.
2998 max_sges_for_packet
+ PAGES_PER_SGE
- 1) &
2999 (~(PAGES_PER_SGE
- 1))) >> PAGES_PER_SGE_SHIFT
;
3002 context
->ustorm_ag_context
.cdu_usage
=
3003 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, i
),
3004 CDU_REGION_NUMBER_UCM_AG
,
3005 ETH_CONNECTION_TYPE
);
3007 context
->xstorm_ag_context
.cdu_reserved
=
3008 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, i
),
3009 CDU_REGION_NUMBER_XCM_AG
,
3010 ETH_CONNECTION_TYPE
);
3014 for_each_queue(bp
, i
) {
3015 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
3016 struct eth_context
*context
=
3017 bnx2x_sp(bp
, context
[i
].eth
);
3019 context
->cstorm_st_context
.sb_index_number
=
3020 C_SB_ETH_TX_CQ_INDEX
;
3021 context
->cstorm_st_context
.status_block_id
= fp
->sb_id
;
3023 context
->xstorm_st_context
.tx_bd_page_base_hi
=
3024 U64_HI(fp
->tx_desc_mapping
);
3025 context
->xstorm_st_context
.tx_bd_page_base_lo
=
3026 U64_LO(fp
->tx_desc_mapping
);
3027 context
->xstorm_st_context
.statistics_data
= (fp
->cl_id
|
3028 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE
);
3032 static void bnx2x_init_ind_table(struct bnx2x
*bp
)
3034 int func
= BP_FUNC(bp
);
3037 if (bp
->multi_mode
== ETH_RSS_MODE_DISABLED
)
3041 "Initializing indirection table multi_mode %d\n", bp
->multi_mode
);
3042 for (i
= 0; i
< TSTORM_INDIRECTION_TABLE_SIZE
; i
++)
3043 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+
3044 TSTORM_INDIRECTION_TABLE_OFFSET(func
) + i
,
3045 bp
->fp
->cl_id
+ (i
% bp
->num_queues
));
3048 void bnx2x_set_client_config(struct bnx2x
*bp
)
3050 struct tstorm_eth_client_config tstorm_client
= {0};
3051 int port
= BP_PORT(bp
);
3054 tstorm_client
.mtu
= bp
->dev
->mtu
;
3055 tstorm_client
.config_flags
=
3056 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE
|
3057 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE
);
3059 if (bp
->rx_mode
&& bp
->vlgrp
&& (bp
->flags
& HW_VLAN_RX_FLAG
)) {
3060 tstorm_client
.config_flags
|=
3061 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE
;
3062 DP(NETIF_MSG_IFUP
, "vlan removal enabled\n");
3066 for_each_queue(bp
, i
) {
3067 tstorm_client
.statistics_counter_id
= bp
->fp
[i
].cl_id
;
3069 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
3070 TSTORM_CLIENT_CONFIG_OFFSET(port
, bp
->fp
[i
].cl_id
),
3071 ((u32
*)&tstorm_client
)[0]);
3072 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
3073 TSTORM_CLIENT_CONFIG_OFFSET(port
, bp
->fp
[i
].cl_id
) + 4,
3074 ((u32
*)&tstorm_client
)[1]);
3077 DP(BNX2X_MSG_OFF
, "tstorm_client: 0x%08x 0x%08x\n",
3078 ((u32
*)&tstorm_client
)[0], ((u32
*)&tstorm_client
)[1]);
3081 void bnx2x_set_storm_rx_mode(struct bnx2x
*bp
)
3083 struct tstorm_eth_mac_filter_config tstorm_mac_filter
= {0};
3084 int mode
= bp
->rx_mode
;
3085 int mask
= bp
->rx_mode_cl_mask
;
3086 int func
= BP_FUNC(bp
);
3087 int port
= BP_PORT(bp
);
3089 /* All but management unicast packets should pass to the host as well */
3091 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST
|
3092 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST
|
3093 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN
|
3094 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN
;
3096 DP(NETIF_MSG_IFUP
, "rx mode %d mask 0x%x\n", mode
, mask
);
3099 case BNX2X_RX_MODE_NONE
: /* no Rx */
3100 tstorm_mac_filter
.ucast_drop_all
= mask
;
3101 tstorm_mac_filter
.mcast_drop_all
= mask
;
3102 tstorm_mac_filter
.bcast_drop_all
= mask
;
3105 case BNX2X_RX_MODE_NORMAL
:
3106 tstorm_mac_filter
.bcast_accept_all
= mask
;
3109 case BNX2X_RX_MODE_ALLMULTI
:
3110 tstorm_mac_filter
.mcast_accept_all
= mask
;
3111 tstorm_mac_filter
.bcast_accept_all
= mask
;
3114 case BNX2X_RX_MODE_PROMISC
:
3115 tstorm_mac_filter
.ucast_accept_all
= mask
;
3116 tstorm_mac_filter
.mcast_accept_all
= mask
;
3117 tstorm_mac_filter
.bcast_accept_all
= mask
;
3118 /* pass management unicast packets as well */
3119 llh_mask
|= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST
;
3123 BNX2X_ERR("BAD rx mode (%d)\n", mode
);
3128 (port
? NIG_REG_LLH1_BRB1_DRV_MASK
: NIG_REG_LLH0_BRB1_DRV_MASK
),
3131 for (i
= 0; i
< sizeof(struct tstorm_eth_mac_filter_config
)/4; i
++) {
3132 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
3133 TSTORM_MAC_FILTER_CONFIG_OFFSET(func
) + i
* 4,
3134 ((u32
*)&tstorm_mac_filter
)[i
]);
3136 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
3137 ((u32 *)&tstorm_mac_filter)[i]); */
3140 if (mode
!= BNX2X_RX_MODE_NONE
)
3141 bnx2x_set_client_config(bp
);
3144 static void bnx2x_init_internal_common(struct bnx2x
*bp
)
3148 /* Zero this manually as its initialization is
3149 currently missing in the initTool */
3150 for (i
= 0; i
< (USTORM_AGG_DATA_SIZE
>> 2); i
++)
3151 REG_WR(bp
, BAR_USTRORM_INTMEM
+
3152 USTORM_AGG_DATA_OFFSET
+ i
* 4, 0);
3155 static void bnx2x_init_internal_port(struct bnx2x
*bp
)
3157 int port
= BP_PORT(bp
);
3160 BAR_CSTRORM_INTMEM
+ CSTORM_HC_BTR_U_OFFSET(port
), BNX2X_BTR
);
3162 BAR_CSTRORM_INTMEM
+ CSTORM_HC_BTR_C_OFFSET(port
), BNX2X_BTR
);
3163 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
3164 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
3167 static void bnx2x_init_internal_func(struct bnx2x
*bp
)
3169 struct tstorm_eth_function_common_config tstorm_config
= {0};
3170 struct stats_indication_flags stats_flags
= {0};
3171 int port
= BP_PORT(bp
);
3172 int func
= BP_FUNC(bp
);
3177 tstorm_config
.config_flags
= RSS_FLAGS(bp
);
3180 tstorm_config
.rss_result_mask
= MULTI_MASK
;
3182 /* Enable TPA if needed */
3183 if (bp
->flags
& TPA_ENABLE_FLAG
)
3184 tstorm_config
.config_flags
|=
3185 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA
;
3188 tstorm_config
.config_flags
|=
3189 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM
;
3191 tstorm_config
.leading_client_id
= BP_L_ID(bp
);
3193 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
3194 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func
),
3195 (*(u32
*)&tstorm_config
));
3197 bp
->rx_mode
= BNX2X_RX_MODE_NONE
; /* no rx until link is up */
3198 bp
->rx_mode_cl_mask
= (1 << BP_L_ID(bp
));
3199 bnx2x_set_storm_rx_mode(bp
);
3201 for_each_queue(bp
, i
) {
3202 u8 cl_id
= bp
->fp
[i
].cl_id
;
3204 /* reset xstorm per client statistics */
3205 offset
= BAR_XSTRORM_INTMEM
+
3206 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cl_id
);
3208 j
< sizeof(struct xstorm_per_client_stats
) / 4; j
++)
3209 REG_WR(bp
, offset
+ j
*4, 0);
3211 /* reset tstorm per client statistics */
3212 offset
= BAR_TSTRORM_INTMEM
+
3213 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cl_id
);
3215 j
< sizeof(struct tstorm_per_client_stats
) / 4; j
++)
3216 REG_WR(bp
, offset
+ j
*4, 0);
3218 /* reset ustorm per client statistics */
3219 offset
= BAR_USTRORM_INTMEM
+
3220 USTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cl_id
);
3222 j
< sizeof(struct ustorm_per_client_stats
) / 4; j
++)
3223 REG_WR(bp
, offset
+ j
*4, 0);
3226 /* Init statistics related context */
3227 stats_flags
.collect_eth
= 1;
3229 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(func
),
3230 ((u32
*)&stats_flags
)[0]);
3231 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(func
) + 4,
3232 ((u32
*)&stats_flags
)[1]);
3234 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(func
),
3235 ((u32
*)&stats_flags
)[0]);
3236 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(func
) + 4,
3237 ((u32
*)&stats_flags
)[1]);
3239 REG_WR(bp
, BAR_USTRORM_INTMEM
+ USTORM_STATS_FLAGS_OFFSET(func
),
3240 ((u32
*)&stats_flags
)[0]);
3241 REG_WR(bp
, BAR_USTRORM_INTMEM
+ USTORM_STATS_FLAGS_OFFSET(func
) + 4,
3242 ((u32
*)&stats_flags
)[1]);
3244 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(func
),
3245 ((u32
*)&stats_flags
)[0]);
3246 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(func
) + 4,
3247 ((u32
*)&stats_flags
)[1]);
3249 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
3250 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
3251 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
3252 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
3253 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
3254 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
3256 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
3257 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
3258 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
3259 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
3260 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
3261 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
3263 REG_WR(bp
, BAR_USTRORM_INTMEM
+
3264 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
3265 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
3266 REG_WR(bp
, BAR_USTRORM_INTMEM
+
3267 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
3268 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
3270 if (CHIP_IS_E1H(bp
)) {
3271 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_FUNCTION_MODE_OFFSET
,
3273 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_FUNCTION_MODE_OFFSET
,
3275 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_FUNCTION_MODE_OFFSET
,
3277 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_FUNCTION_MODE_OFFSET
,
3280 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_E1HOV_OFFSET(func
),
3284 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
3285 max_agg_size
= min_t(u32
, (min_t(u32
, 8, MAX_SKB_FRAGS
) *
3286 SGE_PAGE_SIZE
* PAGES_PER_SGE
), 0xffff);
3287 for_each_queue(bp
, i
) {
3288 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
3290 REG_WR(bp
, BAR_USTRORM_INTMEM
+
3291 USTORM_CQE_PAGE_BASE_OFFSET(port
, fp
->cl_id
),
3292 U64_LO(fp
->rx_comp_mapping
));
3293 REG_WR(bp
, BAR_USTRORM_INTMEM
+
3294 USTORM_CQE_PAGE_BASE_OFFSET(port
, fp
->cl_id
) + 4,
3295 U64_HI(fp
->rx_comp_mapping
));
3298 REG_WR(bp
, BAR_USTRORM_INTMEM
+
3299 USTORM_CQE_PAGE_NEXT_OFFSET(port
, fp
->cl_id
),
3300 U64_LO(fp
->rx_comp_mapping
+ BCM_PAGE_SIZE
));
3301 REG_WR(bp
, BAR_USTRORM_INTMEM
+
3302 USTORM_CQE_PAGE_NEXT_OFFSET(port
, fp
->cl_id
) + 4,
3303 U64_HI(fp
->rx_comp_mapping
+ BCM_PAGE_SIZE
));
3305 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
3306 USTORM_MAX_AGG_SIZE_OFFSET(port
, fp
->cl_id
),
3310 /* dropless flow control */
3311 if (CHIP_IS_E1H(bp
)) {
3312 struct ustorm_eth_rx_pause_data_e1h rx_pause
= {0};
3314 rx_pause
.bd_thr_low
= 250;
3315 rx_pause
.cqe_thr_low
= 250;
3317 rx_pause
.sge_thr_low
= 0;
3318 rx_pause
.bd_thr_high
= 350;
3319 rx_pause
.cqe_thr_high
= 350;
3320 rx_pause
.sge_thr_high
= 0;
3322 for_each_queue(bp
, i
) {
3323 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
3325 if (!fp
->disable_tpa
) {
3326 rx_pause
.sge_thr_low
= 150;
3327 rx_pause
.sge_thr_high
= 250;
3331 offset
= BAR_USTRORM_INTMEM
+
3332 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port
,
3335 j
< sizeof(struct ustorm_eth_rx_pause_data_e1h
)/4;
3337 REG_WR(bp
, offset
+ j
*4,
3338 ((u32
*)&rx_pause
)[j
]);
3342 memset(&(bp
->cmng
), 0, sizeof(struct cmng_struct_per_port
));
3344 /* Init rate shaping and fairness contexts */
3348 /* During init there is no active link
3349 Until link is up, set link rate to 10Gbps */
3350 bp
->link_vars
.line_speed
= SPEED_10000
;
3351 bnx2x_init_port_minmax(bp
);
3355 SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
3356 bnx2x_calc_vn_weight_sum(bp
);
3358 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
3359 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
);
3361 /* Enable rate shaping and fairness */
3362 bp
->cmng
.flags
.cmng_enables
|=
3363 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN
;
3366 /* rate shaping and fairness are disabled */
3368 "single function mode minmax will be disabled\n");
3372 /* Store cmng structures to internal memory */
3374 for (i
= 0; i
< sizeof(struct cmng_struct_per_port
) / 4; i
++)
3375 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
3376 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
) + i
* 4,
3377 ((u32
*)(&bp
->cmng
))[i
]);
3380 static void bnx2x_init_internal(struct bnx2x
*bp
, u32 load_code
)
3382 switch (load_code
) {
3383 case FW_MSG_CODE_DRV_LOAD_COMMON
:
3384 bnx2x_init_internal_common(bp
);
3387 case FW_MSG_CODE_DRV_LOAD_PORT
:
3388 bnx2x_init_internal_port(bp
);
3391 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
3392 bnx2x_init_internal_func(bp
);
3396 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
3401 void bnx2x_nic_init(struct bnx2x
*bp
, u32 load_code
)
3405 for_each_queue(bp
, i
) {
3406 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
3409 fp
->state
= BNX2X_FP_STATE_CLOSED
;
3411 fp
->cl_id
= BP_L_ID(bp
) + i
;
3413 fp
->sb_id
= fp
->cl_id
+ 1;
3415 fp
->sb_id
= fp
->cl_id
;
3418 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
3419 i
, bp
, fp
->status_blk
, fp
->cl_id
, fp
->sb_id
);
3420 bnx2x_init_sb(bp
, fp
->status_blk
, fp
->status_blk_mapping
,
3422 bnx2x_update_fpsb_idx(fp
);
3425 /* ensure status block indices were read */
3429 bnx2x_init_def_sb(bp
, bp
->def_status_blk
, bp
->def_status_blk_mapping
,
3431 bnx2x_update_dsb_idx(bp
);
3432 bnx2x_update_coalesce(bp
);
3433 bnx2x_init_rx_rings(bp
);
3434 bnx2x_init_tx_ring(bp
);
3435 bnx2x_init_sp_ring(bp
);
3436 bnx2x_init_context(bp
);
3437 bnx2x_init_internal(bp
, load_code
);
3438 bnx2x_init_ind_table(bp
);
3439 bnx2x_stats_init(bp
);
3441 /* At this point, we are ready for interrupts */
3442 atomic_set(&bp
->intr_sem
, 0);
3444 /* flush all before enabling interrupts */
3448 bnx2x_int_enable(bp
);
3450 /* Check for SPIO5 */
3451 bnx2x_attn_int_deasserted0(bp
,
3452 REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ BP_PORT(bp
)*4) &
3453 AEU_INPUTS_ATTN_BITS_SPIO5
);
3456 /* end of nic init */
3459 * gzip service functions
3462 static int bnx2x_gunzip_init(struct bnx2x
*bp
)
3464 bp
->gunzip_buf
= dma_alloc_coherent(&bp
->pdev
->dev
, FW_BUF_SIZE
,
3465 &bp
->gunzip_mapping
, GFP_KERNEL
);
3466 if (bp
->gunzip_buf
== NULL
)
3469 bp
->strm
= kmalloc(sizeof(*bp
->strm
), GFP_KERNEL
);
3470 if (bp
->strm
== NULL
)
3473 bp
->strm
->workspace
= kmalloc(zlib_inflate_workspacesize(),
3475 if (bp
->strm
->workspace
== NULL
)
3485 dma_free_coherent(&bp
->pdev
->dev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
3486 bp
->gunzip_mapping
);
3487 bp
->gunzip_buf
= NULL
;
3490 netdev_err(bp
->dev
, "Cannot allocate firmware buffer for"
3491 " un-compression\n");
3495 static void bnx2x_gunzip_end(struct bnx2x
*bp
)
3497 kfree(bp
->strm
->workspace
);
3502 if (bp
->gunzip_buf
) {
3503 dma_free_coherent(&bp
->pdev
->dev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
3504 bp
->gunzip_mapping
);
3505 bp
->gunzip_buf
= NULL
;
3509 static int bnx2x_gunzip(struct bnx2x
*bp
, const u8
*zbuf
, int len
)
3513 /* check gzip header */
3514 if ((zbuf
[0] != 0x1f) || (zbuf
[1] != 0x8b) || (zbuf
[2] != Z_DEFLATED
)) {
3515 BNX2X_ERR("Bad gzip header\n");
3523 if (zbuf
[3] & FNAME
)
3524 while ((zbuf
[n
++] != 0) && (n
< len
));
3526 bp
->strm
->next_in
= (typeof(bp
->strm
->next_in
))zbuf
+ n
;
3527 bp
->strm
->avail_in
= len
- n
;
3528 bp
->strm
->next_out
= bp
->gunzip_buf
;
3529 bp
->strm
->avail_out
= FW_BUF_SIZE
;
3531 rc
= zlib_inflateInit2(bp
->strm
, -MAX_WBITS
);
3535 rc
= zlib_inflate(bp
->strm
, Z_FINISH
);
3536 if ((rc
!= Z_OK
) && (rc
!= Z_STREAM_END
))
3537 netdev_err(bp
->dev
, "Firmware decompression error: %s\n",
3540 bp
->gunzip_outlen
= (FW_BUF_SIZE
- bp
->strm
->avail_out
);
3541 if (bp
->gunzip_outlen
& 0x3)
3542 netdev_err(bp
->dev
, "Firmware decompression error:"
3543 " gunzip_outlen (%d) not aligned\n",
3545 bp
->gunzip_outlen
>>= 2;
3547 zlib_inflateEnd(bp
->strm
);
3549 if (rc
== Z_STREAM_END
)
3555 /* nic load/unload */
3558 * General service functions
3561 /* send a NIG loopback debug packet */
3562 static void bnx2x_lb_pckt(struct bnx2x
*bp
)
3566 /* Ethernet source and destination addresses */
3567 wb_write
[0] = 0x55555555;
3568 wb_write
[1] = 0x55555555;
3569 wb_write
[2] = 0x20; /* SOP */
3570 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
3572 /* NON-IP protocol */
3573 wb_write
[0] = 0x09000000;
3574 wb_write
[1] = 0x55555555;
3575 wb_write
[2] = 0x10; /* EOP, eop_bvalid = 0 */
3576 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
3579 /* some of the internal memories
3580 * are not directly readable from the driver
3581 * to test them we send debug packets
3583 static int bnx2x_int_mem_test(struct bnx2x
*bp
)
3589 if (CHIP_REV_IS_FPGA(bp
))
3591 else if (CHIP_REV_IS_EMUL(bp
))
3596 DP(NETIF_MSG_HW
, "start part1\n");
3598 /* Disable inputs of parser neighbor blocks */
3599 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
3600 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
3601 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
3602 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x0);
3604 /* Write 0 to parser credits for CFC search request */
3605 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
3607 /* send Ethernet packet */
3610 /* TODO do i reset NIG statistic? */
3611 /* Wait until NIG register shows 1 packet of size 0x10 */
3612 count
= 1000 * factor
;
3615 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
3616 val
= *bnx2x_sp(bp
, wb_data
[0]);
3624 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
3628 /* Wait until PRS register shows 1 packet */
3629 count
= 1000 * factor
;
3631 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
3639 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
3643 /* Reset and init BRB, PRS */
3644 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
3646 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
3648 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
3649 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
3651 DP(NETIF_MSG_HW
, "part2\n");
3653 /* Disable inputs of parser neighbor blocks */
3654 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
3655 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
3656 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
3657 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x0);
3659 /* Write 0 to parser credits for CFC search request */
3660 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
3662 /* send 10 Ethernet packets */
3663 for (i
= 0; i
< 10; i
++)
3666 /* Wait until NIG register shows 10 + 1
3667 packets of size 11*0x10 = 0xb0 */
3668 count
= 1000 * factor
;
3671 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
3672 val
= *bnx2x_sp(bp
, wb_data
[0]);
3680 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
3684 /* Wait until PRS register shows 2 packets */
3685 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
3687 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
3689 /* Write 1 to parser credits for CFC search request */
3690 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x1);
3692 /* Wait until PRS register shows 3 packets */
3693 msleep(10 * factor
);
3694 /* Wait until NIG register shows 1 packet of size 0x10 */
3695 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
3697 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
3699 /* clear NIG EOP FIFO */
3700 for (i
= 0; i
< 11; i
++)
3701 REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_FIFO
);
3702 val
= REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_EMPTY
);
3704 BNX2X_ERR("clear of NIG failed\n");
3708 /* Reset and init BRB, PRS, NIG */
3709 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
3711 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
3713 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
3714 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
3717 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
3720 /* Enable inputs of parser neighbor blocks */
3721 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x7fffffff);
3722 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x1);
3723 REG_WR(bp
, CFC_REG_DEBUG0
, 0x0);
3724 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x1);
3726 DP(NETIF_MSG_HW
, "done\n");
3731 static void enable_blocks_attention(struct bnx2x
*bp
)
3733 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
3734 REG_WR(bp
, PXP_REG_PXP_INT_MASK_1
, 0);
3735 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
3736 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
3737 REG_WR(bp
, QM_REG_QM_INT_MASK
, 0);
3738 REG_WR(bp
, TM_REG_TM_INT_MASK
, 0);
3739 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_0
, 0);
3740 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_1
, 0);
3741 REG_WR(bp
, XCM_REG_XCM_INT_MASK
, 0);
3742 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3743 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
3744 REG_WR(bp
, USDM_REG_USDM_INT_MASK_0
, 0);
3745 REG_WR(bp
, USDM_REG_USDM_INT_MASK_1
, 0);
3746 REG_WR(bp
, UCM_REG_UCM_INT_MASK
, 0);
3747 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3748 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
3749 REG_WR(bp
, GRCBASE_UPB
+ PB_REG_PB_INT_MASK
, 0);
3750 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_0
, 0);
3751 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_1
, 0);
3752 REG_WR(bp
, CCM_REG_CCM_INT_MASK
, 0);
3753 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3754 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3755 if (CHIP_REV_IS_FPGA(bp
))
3756 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x580000);
3758 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x480000);
3759 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_0
, 0);
3760 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_1
, 0);
3761 REG_WR(bp
, TCM_REG_TCM_INT_MASK
, 0);
3762 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3763 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
3764 REG_WR(bp
, CDU_REG_CDU_INT_MASK
, 0);
3765 REG_WR(bp
, DMAE_REG_DMAE_INT_MASK
, 0);
3766 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3767 REG_WR(bp
, PBF_REG_PBF_INT_MASK
, 0X18); /* bit 3,4 masked */
3770 static const struct {
3773 } bnx2x_parity_mask
[] = {
3774 {PXP_REG_PXP_PRTY_MASK
, 0xffffffff},
3775 {PXP2_REG_PXP2_PRTY_MASK_0
, 0xffffffff},
3776 {PXP2_REG_PXP2_PRTY_MASK_1
, 0xffffffff},
3777 {HC_REG_HC_PRTY_MASK
, 0xffffffff},
3778 {MISC_REG_MISC_PRTY_MASK
, 0xffffffff},
3779 {QM_REG_QM_PRTY_MASK
, 0x0},
3780 {DORQ_REG_DORQ_PRTY_MASK
, 0x0},
3781 {GRCBASE_UPB
+ PB_REG_PB_PRTY_MASK
, 0x0},
3782 {GRCBASE_XPB
+ PB_REG_PB_PRTY_MASK
, 0x0},
3783 {SRC_REG_SRC_PRTY_MASK
, 0x4}, /* bit 2 */
3784 {CDU_REG_CDU_PRTY_MASK
, 0x0},
3785 {CFC_REG_CFC_PRTY_MASK
, 0x0},
3786 {DBG_REG_DBG_PRTY_MASK
, 0x0},
3787 {DMAE_REG_DMAE_PRTY_MASK
, 0x0},
3788 {BRB1_REG_BRB1_PRTY_MASK
, 0x0},
3789 {PRS_REG_PRS_PRTY_MASK
, (1<<6)},/* bit 6 */
3790 {TSDM_REG_TSDM_PRTY_MASK
, 0x18},/* bit 3,4 */
3791 {CSDM_REG_CSDM_PRTY_MASK
, 0x8}, /* bit 3 */
3792 {USDM_REG_USDM_PRTY_MASK
, 0x38},/* bit 3,4,5 */
3793 {XSDM_REG_XSDM_PRTY_MASK
, 0x8}, /* bit 3 */
3794 {TSEM_REG_TSEM_PRTY_MASK_0
, 0x0},
3795 {TSEM_REG_TSEM_PRTY_MASK_1
, 0x0},
3796 {USEM_REG_USEM_PRTY_MASK_0
, 0x0},
3797 {USEM_REG_USEM_PRTY_MASK_1
, 0x0},
3798 {CSEM_REG_CSEM_PRTY_MASK_0
, 0x0},
3799 {CSEM_REG_CSEM_PRTY_MASK_1
, 0x0},
3800 {XSEM_REG_XSEM_PRTY_MASK_0
, 0x0},
3801 {XSEM_REG_XSEM_PRTY_MASK_1
, 0x0}
3804 static void enable_blocks_parity(struct bnx2x
*bp
)
3806 int i
, mask_arr_len
=
3807 sizeof(bnx2x_parity_mask
)/(sizeof(bnx2x_parity_mask
[0]));
3809 for (i
= 0; i
< mask_arr_len
; i
++)
3810 REG_WR(bp
, bnx2x_parity_mask
[i
].addr
,
3811 bnx2x_parity_mask
[i
].mask
);
3815 static void bnx2x_reset_common(struct bnx2x
*bp
)
3818 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
3820 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
, 0x1403);
3823 static void bnx2x_init_pxp(struct bnx2x
*bp
)
3826 int r_order
, w_order
;
3828 pci_read_config_word(bp
->pdev
,
3829 bp
->pcie_cap
+ PCI_EXP_DEVCTL
, &devctl
);
3830 DP(NETIF_MSG_HW
, "read 0x%x from devctl\n", devctl
);
3831 w_order
= ((devctl
& PCI_EXP_DEVCTL_PAYLOAD
) >> 5);
3833 r_order
= ((devctl
& PCI_EXP_DEVCTL_READRQ
) >> 12);
3835 DP(NETIF_MSG_HW
, "force read order to %d\n", bp
->mrrs
);
3839 bnx2x_init_pxp_arb(bp
, r_order
, w_order
);
3842 static void bnx2x_setup_fan_failure_detection(struct bnx2x
*bp
)
3852 val
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.config2
) &
3853 SHARED_HW_CFG_FAN_FAILURE_MASK
;
3855 if (val
== SHARED_HW_CFG_FAN_FAILURE_ENABLED
)
3859 * The fan failure mechanism is usually related to the PHY type since
3860 * the power consumption of the board is affected by the PHY. Currently,
3861 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
3863 else if (val
== SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE
)
3864 for (port
= PORT_0
; port
< PORT_MAX
; port
++) {
3866 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].
3867 external_phy_config
) &
3868 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
3871 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
) ||
3873 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
) ||
3875 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481
));
3878 DP(NETIF_MSG_HW
, "fan detection setting: %d\n", is_required
);
3880 if (is_required
== 0)
3883 /* Fan failure is indicated by SPIO 5 */
3884 bnx2x_set_spio(bp
, MISC_REGISTERS_SPIO_5
,
3885 MISC_REGISTERS_SPIO_INPUT_HI_Z
);
3887 /* set to active low mode */
3888 val
= REG_RD(bp
, MISC_REG_SPIO_INT
);
3889 val
|= ((1 << MISC_REGISTERS_SPIO_5
) <<
3890 MISC_REGISTERS_SPIO_INT_OLD_SET_POS
);
3891 REG_WR(bp
, MISC_REG_SPIO_INT
, val
);
3893 /* enable interrupt to signal the IGU */
3894 val
= REG_RD(bp
, MISC_REG_SPIO_EVENT_EN
);
3895 val
|= (1 << MISC_REGISTERS_SPIO_5
);
3896 REG_WR(bp
, MISC_REG_SPIO_EVENT_EN
, val
);
3899 static int bnx2x_init_common(struct bnx2x
*bp
)
3906 DP(BNX2X_MSG_MCP
, "starting common init func %d\n", BP_FUNC(bp
));
3908 bnx2x_reset_common(bp
);
3909 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0xffffffff);
3910 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_SET
, 0xfffc);
3912 bnx2x_init_block(bp
, MISC_BLOCK
, COMMON_STAGE
);
3913 if (CHIP_IS_E1H(bp
))
3914 REG_WR(bp
, MISC_REG_E1HMF_MODE
, IS_E1HMF(bp
));
3916 REG_WR(bp
, MISC_REG_LCPLL_CTRL_REG_2
, 0x100);
3918 REG_WR(bp
, MISC_REG_LCPLL_CTRL_REG_2
, 0x0);
3920 bnx2x_init_block(bp
, PXP_BLOCK
, COMMON_STAGE
);
3921 if (CHIP_IS_E1(bp
)) {
3922 /* enable HW interrupt from PXP on USDM overflow
3923 bit 16 on INT_MASK_0 */
3924 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
3927 bnx2x_init_block(bp
, PXP2_BLOCK
, COMMON_STAGE
);
3931 REG_WR(bp
, PXP2_REG_RQ_QM_ENDIAN_M
, 1);
3932 REG_WR(bp
, PXP2_REG_RQ_TM_ENDIAN_M
, 1);
3933 REG_WR(bp
, PXP2_REG_RQ_SRC_ENDIAN_M
, 1);
3934 REG_WR(bp
, PXP2_REG_RQ_CDU_ENDIAN_M
, 1);
3935 REG_WR(bp
, PXP2_REG_RQ_DBG_ENDIAN_M
, 1);
3936 /* make sure this value is 0 */
3937 REG_WR(bp
, PXP2_REG_RQ_HC_ENDIAN_M
, 0);
3939 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3940 REG_WR(bp
, PXP2_REG_RD_QM_SWAP_MODE
, 1);
3941 REG_WR(bp
, PXP2_REG_RD_TM_SWAP_MODE
, 1);
3942 REG_WR(bp
, PXP2_REG_RD_SRC_SWAP_MODE
, 1);
3943 REG_WR(bp
, PXP2_REG_RD_CDURD_SWAP_MODE
, 1);
3946 REG_WR(bp
, PXP2_REG_RQ_CDU_P_SIZE
, 2);
3948 REG_WR(bp
, PXP2_REG_RQ_TM_P_SIZE
, 5);
3949 REG_WR(bp
, PXP2_REG_RQ_QM_P_SIZE
, 5);
3950 REG_WR(bp
, PXP2_REG_RQ_SRC_P_SIZE
, 5);
3953 if (CHIP_REV_IS_FPGA(bp
) && CHIP_IS_E1H(bp
))
3954 REG_WR(bp
, PXP2_REG_PGL_TAGS_LIMIT
, 0x1);
3956 /* let the HW do it's magic ... */
3958 /* finish PXP init */
3959 val
= REG_RD(bp
, PXP2_REG_RQ_CFG_DONE
);
3961 BNX2X_ERR("PXP2 CFG failed\n");
3964 val
= REG_RD(bp
, PXP2_REG_RD_INIT_DONE
);
3966 BNX2X_ERR("PXP2 RD_INIT failed\n");
3970 REG_WR(bp
, PXP2_REG_RQ_DISABLE_INPUTS
, 0);
3971 REG_WR(bp
, PXP2_REG_RD_DISABLE_INPUTS
, 0);
3973 bnx2x_init_block(bp
, DMAE_BLOCK
, COMMON_STAGE
);
3975 /* clean the DMAE memory */
3977 bnx2x_init_fill(bp
, TSEM_REG_PRAM
, 0, 8);
3979 bnx2x_init_block(bp
, TCM_BLOCK
, COMMON_STAGE
);
3980 bnx2x_init_block(bp
, UCM_BLOCK
, COMMON_STAGE
);
3981 bnx2x_init_block(bp
, CCM_BLOCK
, COMMON_STAGE
);
3982 bnx2x_init_block(bp
, XCM_BLOCK
, COMMON_STAGE
);
3984 bnx2x_read_dmae(bp
, XSEM_REG_PASSIVE_BUFFER
, 3);
3985 bnx2x_read_dmae(bp
, CSEM_REG_PASSIVE_BUFFER
, 3);
3986 bnx2x_read_dmae(bp
, TSEM_REG_PASSIVE_BUFFER
, 3);
3987 bnx2x_read_dmae(bp
, USEM_REG_PASSIVE_BUFFER
, 3);
3989 bnx2x_init_block(bp
, QM_BLOCK
, COMMON_STAGE
);
3994 for (i
= 0; i
< 64; i
++) {
3995 REG_WR(bp
, QM_REG_BASEADDR
+ i
*4, 1024 * 4 * (i
%16));
3996 bnx2x_init_ind_wr(bp
, QM_REG_PTRTBL
+ i
*8, wb_write
, 2);
3998 if (CHIP_IS_E1H(bp
)) {
3999 REG_WR(bp
, QM_REG_BASEADDR_EXT_A
+ i
*4, 1024*4*(i
%16));
4000 bnx2x_init_ind_wr(bp
, QM_REG_PTRTBL_EXT_A
+ i
*8,
4005 /* soft reset pulse */
4006 REG_WR(bp
, QM_REG_SOFT_RESET
, 1);
4007 REG_WR(bp
, QM_REG_SOFT_RESET
, 0);
4010 bnx2x_init_block(bp
, TIMERS_BLOCK
, COMMON_STAGE
);
4013 bnx2x_init_block(bp
, DQ_BLOCK
, COMMON_STAGE
);
4014 REG_WR(bp
, DORQ_REG_DPM_CID_OFST
, BCM_PAGE_SHIFT
);
4015 if (!CHIP_REV_IS_SLOW(bp
)) {
4016 /* enable hw interrupt from doorbell Q */
4017 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
4020 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
4021 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
4022 REG_WR(bp
, PRS_REG_A_PRSU_20
, 0xf);
4025 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
4027 if (CHIP_IS_E1H(bp
))
4028 REG_WR(bp
, PRS_REG_E1HOV_MODE
, IS_E1HMF(bp
));
4030 bnx2x_init_block(bp
, TSDM_BLOCK
, COMMON_STAGE
);
4031 bnx2x_init_block(bp
, CSDM_BLOCK
, COMMON_STAGE
);
4032 bnx2x_init_block(bp
, USDM_BLOCK
, COMMON_STAGE
);
4033 bnx2x_init_block(bp
, XSDM_BLOCK
, COMMON_STAGE
);
4035 bnx2x_init_fill(bp
, TSEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
4036 bnx2x_init_fill(bp
, USEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
4037 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
4038 bnx2x_init_fill(bp
, XSEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
4040 bnx2x_init_block(bp
, TSEM_BLOCK
, COMMON_STAGE
);
4041 bnx2x_init_block(bp
, USEM_BLOCK
, COMMON_STAGE
);
4042 bnx2x_init_block(bp
, CSEM_BLOCK
, COMMON_STAGE
);
4043 bnx2x_init_block(bp
, XSEM_BLOCK
, COMMON_STAGE
);
4046 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
4048 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
4051 bnx2x_init_block(bp
, UPB_BLOCK
, COMMON_STAGE
);
4052 bnx2x_init_block(bp
, XPB_BLOCK
, COMMON_STAGE
);
4053 bnx2x_init_block(bp
, PBF_BLOCK
, COMMON_STAGE
);
4055 REG_WR(bp
, SRC_REG_SOFT_RST
, 1);
4056 for (i
= SRC_REG_KEYRSS0_0
; i
<= SRC_REG_KEYRSS1_9
; i
+= 4)
4057 REG_WR(bp
, i
, random32());
4058 bnx2x_init_block(bp
, SRCH_BLOCK
, COMMON_STAGE
);
4060 REG_WR(bp
, SRC_REG_KEYSEARCH_0
, 0x63285672);
4061 REG_WR(bp
, SRC_REG_KEYSEARCH_1
, 0x24b8f2cc);
4062 REG_WR(bp
, SRC_REG_KEYSEARCH_2
, 0x223aef9b);
4063 REG_WR(bp
, SRC_REG_KEYSEARCH_3
, 0x26001e3a);
4064 REG_WR(bp
, SRC_REG_KEYSEARCH_4
, 0x7ae91116);
4065 REG_WR(bp
, SRC_REG_KEYSEARCH_5
, 0x5ce5230b);
4066 REG_WR(bp
, SRC_REG_KEYSEARCH_6
, 0x298d8adf);
4067 REG_WR(bp
, SRC_REG_KEYSEARCH_7
, 0x6eb0ff09);
4068 REG_WR(bp
, SRC_REG_KEYSEARCH_8
, 0x1830f82f);
4069 REG_WR(bp
, SRC_REG_KEYSEARCH_9
, 0x01e46be7);
4071 REG_WR(bp
, SRC_REG_SOFT_RST
, 0);
4073 if (sizeof(union cdu_context
) != 1024)
4074 /* we currently assume that a context is 1024 bytes */
4075 dev_alert(&bp
->pdev
->dev
, "please adjust the size "
4076 "of cdu_context(%ld)\n",
4077 (long)sizeof(union cdu_context
));
4079 bnx2x_init_block(bp
, CDU_BLOCK
, COMMON_STAGE
);
4080 val
= (4 << 24) + (0 << 12) + 1024;
4081 REG_WR(bp
, CDU_REG_CDU_GLOBAL_PARAMS
, val
);
4083 bnx2x_init_block(bp
, CFC_BLOCK
, COMMON_STAGE
);
4084 REG_WR(bp
, CFC_REG_INIT_REG
, 0x7FF);
4085 /* enable context validation interrupt from CFC */
4086 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
4088 /* set the thresholds to prevent CFC/CDU race */
4089 REG_WR(bp
, CFC_REG_DEBUG0
, 0x20020000);
4091 bnx2x_init_block(bp
, HC_BLOCK
, COMMON_STAGE
);
4092 bnx2x_init_block(bp
, MISC_AEU_BLOCK
, COMMON_STAGE
);
4094 bnx2x_init_block(bp
, PXPCS_BLOCK
, COMMON_STAGE
);
4095 /* Reset PCIE errors for debug */
4096 REG_WR(bp
, 0x2814, 0xffffffff);
4097 REG_WR(bp
, 0x3820, 0xffffffff);
4099 bnx2x_init_block(bp
, EMAC0_BLOCK
, COMMON_STAGE
);
4100 bnx2x_init_block(bp
, EMAC1_BLOCK
, COMMON_STAGE
);
4101 bnx2x_init_block(bp
, DBU_BLOCK
, COMMON_STAGE
);
4102 bnx2x_init_block(bp
, DBG_BLOCK
, COMMON_STAGE
);
4104 bnx2x_init_block(bp
, NIG_BLOCK
, COMMON_STAGE
);
4105 if (CHIP_IS_E1H(bp
)) {
4106 REG_WR(bp
, NIG_REG_LLH_MF_MODE
, IS_E1HMF(bp
));
4107 REG_WR(bp
, NIG_REG_LLH_E1HOV_MODE
, IS_E1HMF(bp
));
4110 if (CHIP_REV_IS_SLOW(bp
))
4113 /* finish CFC init */
4114 val
= reg_poll(bp
, CFC_REG_LL_INIT_DONE
, 1, 100, 10);
4116 BNX2X_ERR("CFC LL_INIT failed\n");
4119 val
= reg_poll(bp
, CFC_REG_AC_INIT_DONE
, 1, 100, 10);
4121 BNX2X_ERR("CFC AC_INIT failed\n");
4124 val
= reg_poll(bp
, CFC_REG_CAM_INIT_DONE
, 1, 100, 10);
4126 BNX2X_ERR("CFC CAM_INIT failed\n");
4129 REG_WR(bp
, CFC_REG_DEBUG0
, 0);
4131 /* read NIG statistic
4132 to see if this is our first up since powerup */
4133 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
4134 val
= *bnx2x_sp(bp
, wb_data
[0]);
4136 /* do internal memory self test */
4137 if ((CHIP_IS_E1(bp
)) && (val
== 0) && bnx2x_int_mem_test(bp
)) {
4138 BNX2X_ERR("internal mem self test failed\n");
4142 switch (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
)) {
4143 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
4144 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
4145 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726
:
4146 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
:
4147 bp
->port
.need_hw_lock
= 1;
4154 bnx2x_setup_fan_failure_detection(bp
);
4156 /* clear PXP2 attentions */
4157 REG_RD(bp
, PXP2_REG_PXP2_INT_STS_CLR_0
);
4159 enable_blocks_attention(bp
);
4160 if (CHIP_PARITY_SUPPORTED(bp
))
4161 enable_blocks_parity(bp
);
4163 if (!BP_NOMCP(bp
)) {
4164 bnx2x_acquire_phy_lock(bp
);
4165 bnx2x_common_init_phy(bp
, bp
->common
.shmem_base
);
4166 bnx2x_release_phy_lock(bp
);
4168 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4173 static int bnx2x_init_port(struct bnx2x
*bp
)
4175 int port
= BP_PORT(bp
);
4176 int init_stage
= port
? PORT1_STAGE
: PORT0_STAGE
;
4180 DP(BNX2X_MSG_MCP
, "starting port init port %d\n", port
);
4182 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
4184 bnx2x_init_block(bp
, PXP_BLOCK
, init_stage
);
4185 bnx2x_init_block(bp
, PXP2_BLOCK
, init_stage
);
4187 bnx2x_init_block(bp
, TCM_BLOCK
, init_stage
);
4188 bnx2x_init_block(bp
, UCM_BLOCK
, init_stage
);
4189 bnx2x_init_block(bp
, CCM_BLOCK
, init_stage
);
4190 bnx2x_init_block(bp
, XCM_BLOCK
, init_stage
);
4193 REG_WR(bp
, QM_REG_CONNNUM_0
+ port
*4, 1024/16 - 1);
4195 bnx2x_init_block(bp
, TIMERS_BLOCK
, init_stage
);
4196 REG_WR(bp
, TM_REG_LIN0_SCAN_TIME
+ port
*4, 20);
4197 REG_WR(bp
, TM_REG_LIN0_MAX_ACTIVE_CID
+ port
*4, 31);
4200 bnx2x_init_block(bp
, DQ_BLOCK
, init_stage
);
4202 bnx2x_init_block(bp
, BRB1_BLOCK
, init_stage
);
4203 if (CHIP_REV_IS_SLOW(bp
) && !CHIP_IS_E1H(bp
)) {
4204 /* no pause for emulation and FPGA */
4209 low
= ((bp
->flags
& ONE_PORT_FLAG
) ? 160 : 246);
4210 else if (bp
->dev
->mtu
> 4096) {
4211 if (bp
->flags
& ONE_PORT_FLAG
)
4215 /* (24*1024 + val*4)/256 */
4216 low
= 96 + (val
/64) + ((val
% 64) ? 1 : 0);
4219 low
= ((bp
->flags
& ONE_PORT_FLAG
) ? 80 : 160);
4220 high
= low
+ 56; /* 14*1024/256 */
4222 REG_WR(bp
, BRB1_REG_PAUSE_LOW_THRESHOLD_0
+ port
*4, low
);
4223 REG_WR(bp
, BRB1_REG_PAUSE_HIGH_THRESHOLD_0
+ port
*4, high
);
4226 bnx2x_init_block(bp
, PRS_BLOCK
, init_stage
);
4228 bnx2x_init_block(bp
, TSDM_BLOCK
, init_stage
);
4229 bnx2x_init_block(bp
, CSDM_BLOCK
, init_stage
);
4230 bnx2x_init_block(bp
, USDM_BLOCK
, init_stage
);
4231 bnx2x_init_block(bp
, XSDM_BLOCK
, init_stage
);
4233 bnx2x_init_block(bp
, TSEM_BLOCK
, init_stage
);
4234 bnx2x_init_block(bp
, USEM_BLOCK
, init_stage
);
4235 bnx2x_init_block(bp
, CSEM_BLOCK
, init_stage
);
4236 bnx2x_init_block(bp
, XSEM_BLOCK
, init_stage
);
4238 bnx2x_init_block(bp
, UPB_BLOCK
, init_stage
);
4239 bnx2x_init_block(bp
, XPB_BLOCK
, init_stage
);
4241 bnx2x_init_block(bp
, PBF_BLOCK
, init_stage
);
4243 /* configure PBF to work without PAUSE mtu 9000 */
4244 REG_WR(bp
, PBF_REG_P0_PAUSE_ENABLE
+ port
*4, 0);
4246 /* update threshold */
4247 REG_WR(bp
, PBF_REG_P0_ARB_THRSH
+ port
*4, (9040/16));
4248 /* update init credit */
4249 REG_WR(bp
, PBF_REG_P0_INIT_CRD
+ port
*4, (9040/16) + 553 - 22);
4252 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 1);
4254 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 0);
4257 bnx2x_init_block(bp
, SRCH_BLOCK
, init_stage
);
4259 bnx2x_init_block(bp
, CDU_BLOCK
, init_stage
);
4260 bnx2x_init_block(bp
, CFC_BLOCK
, init_stage
);
4262 if (CHIP_IS_E1(bp
)) {
4263 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
4264 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
4266 bnx2x_init_block(bp
, HC_BLOCK
, init_stage
);
4268 bnx2x_init_block(bp
, MISC_AEU_BLOCK
, init_stage
);
4269 /* init aeu_mask_attn_func_0/1:
4270 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4271 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4272 * bits 4-7 are used for "per vn group attention" */
4273 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4,
4274 (IS_E1HMF(bp
) ? 0xF7 : 0x7));
4276 bnx2x_init_block(bp
, PXPCS_BLOCK
, init_stage
);
4277 bnx2x_init_block(bp
, EMAC0_BLOCK
, init_stage
);
4278 bnx2x_init_block(bp
, EMAC1_BLOCK
, init_stage
);
4279 bnx2x_init_block(bp
, DBU_BLOCK
, init_stage
);
4280 bnx2x_init_block(bp
, DBG_BLOCK
, init_stage
);
4282 bnx2x_init_block(bp
, NIG_BLOCK
, init_stage
);
4284 REG_WR(bp
, NIG_REG_XGXS_SERDES0_MODE_SEL
+ port
*4, 1);
4286 if (CHIP_IS_E1H(bp
)) {
4287 /* 0x2 disable e1hov, 0x1 enable */
4288 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK_MF
+ port
*4,
4289 (IS_E1HMF(bp
) ? 0x1 : 0x2));
4292 REG_WR(bp
, NIG_REG_LLFC_ENABLE_0
+ port
*4, 0);
4293 REG_WR(bp
, NIG_REG_LLFC_OUT_EN_0
+ port
*4, 0);
4294 REG_WR(bp
, NIG_REG_PAUSE_ENABLE_0
+ port
*4, 1);
4298 bnx2x_init_block(bp
, MCP_BLOCK
, init_stage
);
4299 bnx2x_init_block(bp
, DMAE_BLOCK
, init_stage
);
4301 switch (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
)) {
4302 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726
:
4304 u32 swap_val
, swap_override
, aeu_gpio_mask
, offset
;
4306 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_3
,
4307 MISC_REGISTERS_GPIO_INPUT_HI_Z
, port
);
4309 /* The GPIO should be swapped if the swap register is
4311 swap_val
= REG_RD(bp
, NIG_REG_PORT_SWAP
);
4312 swap_override
= REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
);
4314 /* Select function upon port-swap configuration */
4316 offset
= MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
;
4317 aeu_gpio_mask
= (swap_val
&& swap_override
) ?
4318 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1
:
4319 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0
;
4321 offset
= MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
;
4322 aeu_gpio_mask
= (swap_val
&& swap_override
) ?
4323 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0
:
4324 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1
;
4326 val
= REG_RD(bp
, offset
);
4327 /* add GPIO3 to group */
4328 val
|= aeu_gpio_mask
;
4329 REG_WR(bp
, offset
, val
);
4331 bp
->port
.need_hw_lock
= 1;
4334 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
:
4335 bp
->port
.need_hw_lock
= 1;
4336 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
4337 /* add SPIO 5 to group 0 */
4339 u32 reg_addr
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
4340 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
4341 val
= REG_RD(bp
, reg_addr
);
4342 val
|= AEU_INPUTS_ATTN_BITS_SPIO5
;
4343 REG_WR(bp
, reg_addr
, val
);
4346 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
4347 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
4348 bp
->port
.need_hw_lock
= 1;
4354 bnx2x__link_reset(bp
);
4359 #define ILT_PER_FUNC (768/2)
4360 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
4361 /* the phys address is shifted right 12 bits and has an added
4362 1=valid bit added to the 53rd bit
4363 then since this is a wide register(TM)
4364 we split it into two 32 bit writes
4366 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4367 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
4368 #define PXP_ONE_ILT(x) (((x) << 10) | x)
4369 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
4372 #define CNIC_ILT_LINES 127
4373 #define CNIC_CTX_PER_ILT 16
4375 #define CNIC_ILT_LINES 0
4378 static void bnx2x_ilt_wr(struct bnx2x
*bp
, u32 index
, dma_addr_t addr
)
4382 if (CHIP_IS_E1H(bp
))
4383 reg
= PXP2_REG_RQ_ONCHIP_AT_B0
+ index
*8;
4385 reg
= PXP2_REG_RQ_ONCHIP_AT
+ index
*8;
4387 bnx2x_wb_wr(bp
, reg
, ONCHIP_ADDR1(addr
), ONCHIP_ADDR2(addr
));
4390 static int bnx2x_init_func(struct bnx2x
*bp
)
4392 int port
= BP_PORT(bp
);
4393 int func
= BP_FUNC(bp
);
4397 DP(BNX2X_MSG_MCP
, "starting func init func %d\n", func
);
4399 /* set MSI reconfigure capability */
4400 addr
= (port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
);
4401 val
= REG_RD(bp
, addr
);
4402 val
|= HC_CONFIG_0_REG_MSI_ATTN_EN_0
;
4403 REG_WR(bp
, addr
, val
);
4405 i
= FUNC_ILT_BASE(func
);
4407 bnx2x_ilt_wr(bp
, i
, bnx2x_sp_mapping(bp
, context
));
4408 if (CHIP_IS_E1H(bp
)) {
4409 REG_WR(bp
, PXP2_REG_RQ_CDU_FIRST_ILT
, i
);
4410 REG_WR(bp
, PXP2_REG_RQ_CDU_LAST_ILT
, i
+ CNIC_ILT_LINES
);
4412 REG_WR(bp
, PXP2_REG_PSWRQ_CDU0_L2P
+ func
*4,
4413 PXP_ILT_RANGE(i
, i
+ CNIC_ILT_LINES
));
4416 i
+= 1 + CNIC_ILT_LINES
;
4417 bnx2x_ilt_wr(bp
, i
, bp
->timers_mapping
);
4419 REG_WR(bp
, PXP2_REG_PSWRQ_TM0_L2P
+ func
*4, PXP_ONE_ILT(i
));
4421 REG_WR(bp
, PXP2_REG_RQ_TM_FIRST_ILT
, i
);
4422 REG_WR(bp
, PXP2_REG_RQ_TM_LAST_ILT
, i
);
4426 bnx2x_ilt_wr(bp
, i
, bp
->qm_mapping
);
4428 REG_WR(bp
, PXP2_REG_PSWRQ_QM0_L2P
+ func
*4, PXP_ONE_ILT(i
));
4430 REG_WR(bp
, PXP2_REG_RQ_QM_FIRST_ILT
, i
);
4431 REG_WR(bp
, PXP2_REG_RQ_QM_LAST_ILT
, i
);
4435 bnx2x_ilt_wr(bp
, i
, bp
->t1_mapping
);
4437 REG_WR(bp
, PXP2_REG_PSWRQ_SRC0_L2P
+ func
*4, PXP_ONE_ILT(i
));
4439 REG_WR(bp
, PXP2_REG_RQ_SRC_FIRST_ILT
, i
);
4440 REG_WR(bp
, PXP2_REG_RQ_SRC_LAST_ILT
, i
);
4443 /* tell the searcher where the T2 table is */
4444 REG_WR(bp
, SRC_REG_COUNTFREE0
+ port
*4, 16*1024/64);
4446 bnx2x_wb_wr(bp
, SRC_REG_FIRSTFREE0
+ port
*16,
4447 U64_LO(bp
->t2_mapping
), U64_HI(bp
->t2_mapping
));
4449 bnx2x_wb_wr(bp
, SRC_REG_LASTFREE0
+ port
*16,
4450 U64_LO((u64
)bp
->t2_mapping
+ 16*1024 - 64),
4451 U64_HI((u64
)bp
->t2_mapping
+ 16*1024 - 64));
4453 REG_WR(bp
, SRC_REG_NUMBER_HASH_BITS0
+ port
*4, 10);
4456 if (CHIP_IS_E1H(bp
)) {
4457 bnx2x_init_block(bp
, MISC_BLOCK
, FUNC0_STAGE
+ func
);
4458 bnx2x_init_block(bp
, TCM_BLOCK
, FUNC0_STAGE
+ func
);
4459 bnx2x_init_block(bp
, UCM_BLOCK
, FUNC0_STAGE
+ func
);
4460 bnx2x_init_block(bp
, CCM_BLOCK
, FUNC0_STAGE
+ func
);
4461 bnx2x_init_block(bp
, XCM_BLOCK
, FUNC0_STAGE
+ func
);
4462 bnx2x_init_block(bp
, TSEM_BLOCK
, FUNC0_STAGE
+ func
);
4463 bnx2x_init_block(bp
, USEM_BLOCK
, FUNC0_STAGE
+ func
);
4464 bnx2x_init_block(bp
, CSEM_BLOCK
, FUNC0_STAGE
+ func
);
4465 bnx2x_init_block(bp
, XSEM_BLOCK
, FUNC0_STAGE
+ func
);
4467 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 1);
4468 REG_WR(bp
, NIG_REG_LLH0_FUNC_VLAN_ID
+ port
*8, bp
->e1hov
);
4471 /* HC init per function */
4472 if (CHIP_IS_E1H(bp
)) {
4473 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
4475 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
4476 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
4478 bnx2x_init_block(bp
, HC_BLOCK
, FUNC0_STAGE
+ func
);
4480 /* Reset PCIE errors for debug */
4481 REG_WR(bp
, 0x2114, 0xffffffff);
4482 REG_WR(bp
, 0x2120, 0xffffffff);
4487 int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
)
4491 DP(BNX2X_MSG_MCP
, "function %d load_code %x\n",
4492 BP_FUNC(bp
), load_code
);
4495 mutex_init(&bp
->dmae_mutex
);
4496 rc
= bnx2x_gunzip_init(bp
);
4500 switch (load_code
) {
4501 case FW_MSG_CODE_DRV_LOAD_COMMON
:
4502 rc
= bnx2x_init_common(bp
);
4507 case FW_MSG_CODE_DRV_LOAD_PORT
:
4509 rc
= bnx2x_init_port(bp
);
4514 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
4516 rc
= bnx2x_init_func(bp
);
4522 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
4526 if (!BP_NOMCP(bp
)) {
4527 int func
= BP_FUNC(bp
);
4529 bp
->fw_drv_pulse_wr_seq
=
4530 (SHMEM_RD(bp
, func_mb
[func
].drv_pulse_mb
) &
4531 DRV_PULSE_SEQ_MASK
);
4532 DP(BNX2X_MSG_MCP
, "drv_pulse 0x%x\n", bp
->fw_drv_pulse_wr_seq
);
4535 /* this needs to be done before gunzip end */
4536 bnx2x_zero_def_sb(bp
);
4537 for_each_queue(bp
, i
)
4538 bnx2x_zero_sb(bp
, BP_L_ID(bp
) + i
);
4540 bnx2x_zero_sb(bp
, BP_L_ID(bp
) + i
);
4544 bnx2x_gunzip_end(bp
);
4549 void bnx2x_free_mem(struct bnx2x
*bp
)
4552 #define BNX2X_PCI_FREE(x, y, size) \
4555 dma_free_coherent(&bp->pdev->dev, size, x, y); \
4561 #define BNX2X_FREE(x) \
4573 for_each_queue(bp
, i
) {
4576 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, status_blk
),
4577 bnx2x_fp(bp
, i
, status_blk_mapping
),
4578 sizeof(struct host_status_block
));
4581 for_each_queue(bp
, i
) {
4583 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4584 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_buf_ring
));
4585 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_desc_ring
),
4586 bnx2x_fp(bp
, i
, rx_desc_mapping
),
4587 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
4589 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_comp_ring
),
4590 bnx2x_fp(bp
, i
, rx_comp_mapping
),
4591 sizeof(struct eth_fast_path_rx_cqe
) *
4595 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_page_ring
));
4596 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_sge_ring
),
4597 bnx2x_fp(bp
, i
, rx_sge_mapping
),
4598 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
4601 for_each_queue(bp
, i
) {
4603 /* fastpath tx rings: tx_buf tx_desc */
4604 BNX2X_FREE(bnx2x_fp(bp
, i
, tx_buf_ring
));
4605 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, tx_desc_ring
),
4606 bnx2x_fp(bp
, i
, tx_desc_mapping
),
4607 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
4609 /* end of fastpath */
4611 BNX2X_PCI_FREE(bp
->def_status_blk
, bp
->def_status_blk_mapping
,
4612 sizeof(struct host_def_status_block
));
4614 BNX2X_PCI_FREE(bp
->slowpath
, bp
->slowpath_mapping
,
4615 sizeof(struct bnx2x_slowpath
));
4618 BNX2X_PCI_FREE(bp
->t1
, bp
->t1_mapping
, 64*1024);
4619 BNX2X_PCI_FREE(bp
->t2
, bp
->t2_mapping
, 16*1024);
4620 BNX2X_PCI_FREE(bp
->timers
, bp
->timers_mapping
, 8*1024);
4621 BNX2X_PCI_FREE(bp
->qm
, bp
->qm_mapping
, 128*1024);
4622 BNX2X_PCI_FREE(bp
->cnic_sb
, bp
->cnic_sb_mapping
,
4623 sizeof(struct host_status_block
));
4625 BNX2X_PCI_FREE(bp
->spq
, bp
->spq_mapping
, BCM_PAGE_SIZE
);
4627 #undef BNX2X_PCI_FREE
4631 int bnx2x_alloc_mem(struct bnx2x
*bp
)
4634 #define BNX2X_PCI_ALLOC(x, y, size) \
4636 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
4638 goto alloc_mem_err; \
4639 memset(x, 0, size); \
4642 #define BNX2X_ALLOC(x, size) \
4644 x = vmalloc(size); \
4646 goto alloc_mem_err; \
4647 memset(x, 0, size); \
4654 for_each_queue(bp
, i
) {
4655 bnx2x_fp(bp
, i
, bp
) = bp
;
4658 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, status_blk
),
4659 &bnx2x_fp(bp
, i
, status_blk_mapping
),
4660 sizeof(struct host_status_block
));
4663 for_each_queue(bp
, i
) {
4665 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4666 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_buf_ring
),
4667 sizeof(struct sw_rx_bd
) * NUM_RX_BD
);
4668 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_desc_ring
),
4669 &bnx2x_fp(bp
, i
, rx_desc_mapping
),
4670 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
4672 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_comp_ring
),
4673 &bnx2x_fp(bp
, i
, rx_comp_mapping
),
4674 sizeof(struct eth_fast_path_rx_cqe
) *
4678 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_page_ring
),
4679 sizeof(struct sw_rx_page
) * NUM_RX_SGE
);
4680 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_sge_ring
),
4681 &bnx2x_fp(bp
, i
, rx_sge_mapping
),
4682 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
4685 for_each_queue(bp
, i
) {
4687 /* fastpath tx rings: tx_buf tx_desc */
4688 BNX2X_ALLOC(bnx2x_fp(bp
, i
, tx_buf_ring
),
4689 sizeof(struct sw_tx_bd
) * NUM_TX_BD
);
4690 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, tx_desc_ring
),
4691 &bnx2x_fp(bp
, i
, tx_desc_mapping
),
4692 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
4694 /* end of fastpath */
4696 BNX2X_PCI_ALLOC(bp
->def_status_blk
, &bp
->def_status_blk_mapping
,
4697 sizeof(struct host_def_status_block
));
4699 BNX2X_PCI_ALLOC(bp
->slowpath
, &bp
->slowpath_mapping
,
4700 sizeof(struct bnx2x_slowpath
));
4703 BNX2X_PCI_ALLOC(bp
->t1
, &bp
->t1_mapping
, 64*1024);
4705 /* allocate searcher T2 table
4706 we allocate 1/4 of alloc num for T2
4707 (which is not entered into the ILT) */
4708 BNX2X_PCI_ALLOC(bp
->t2
, &bp
->t2_mapping
, 16*1024);
4710 /* Initialize T2 (for 1024 connections) */
4711 for (i
= 0; i
< 16*1024; i
+= 64)
4712 *(u64
*)((char *)bp
->t2
+ i
+ 56) = bp
->t2_mapping
+ i
+ 64;
4714 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
4715 BNX2X_PCI_ALLOC(bp
->timers
, &bp
->timers_mapping
, 8*1024);
4717 /* QM queues (128*MAX_CONN) */
4718 BNX2X_PCI_ALLOC(bp
->qm
, &bp
->qm_mapping
, 128*1024);
4720 BNX2X_PCI_ALLOC(bp
->cnic_sb
, &bp
->cnic_sb_mapping
,
4721 sizeof(struct host_status_block
));
4724 /* Slow path ring */
4725 BNX2X_PCI_ALLOC(bp
->spq
, &bp
->spq_mapping
, BCM_PAGE_SIZE
);
4733 #undef BNX2X_PCI_ALLOC
4739 * Init service functions
4743 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
4745 * @param bp driver descriptor
4746 * @param set set or clear an entry (1 or 0)
4747 * @param mac pointer to a buffer containing a MAC
4748 * @param cl_bit_vec bit vector of clients to register a MAC for
4749 * @param cam_offset offset in a CAM to use
4750 * @param with_bcast set broadcast MAC as well
4752 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x
*bp
, int set
, u8
*mac
,
4753 u32 cl_bit_vec
, u8 cam_offset
,
4756 struct mac_configuration_cmd
*config
= bnx2x_sp(bp
, mac_config
);
4757 int port
= BP_PORT(bp
);
4760 * unicasts 0-31:port0 32-63:port1
4761 * multicast 64-127:port0 128-191:port1
4763 config
->hdr
.length
= 1 + (with_bcast
? 1 : 0);
4764 config
->hdr
.offset
= cam_offset
;
4765 config
->hdr
.client_id
= 0xff;
4766 config
->hdr
.reserved1
= 0;
4769 config
->config_table
[0].cam_entry
.msb_mac_addr
=
4770 swab16(*(u16
*)&mac
[0]);
4771 config
->config_table
[0].cam_entry
.middle_mac_addr
=
4772 swab16(*(u16
*)&mac
[2]);
4773 config
->config_table
[0].cam_entry
.lsb_mac_addr
=
4774 swab16(*(u16
*)&mac
[4]);
4775 config
->config_table
[0].cam_entry
.flags
= cpu_to_le16(port
);
4777 config
->config_table
[0].target_table_entry
.flags
= 0;
4779 CAM_INVALIDATE(config
->config_table
[0]);
4780 config
->config_table
[0].target_table_entry
.clients_bit_vector
=
4781 cpu_to_le32(cl_bit_vec
);
4782 config
->config_table
[0].target_table_entry
.vlan_id
= 0;
4784 DP(NETIF_MSG_IFUP
, "%s MAC (%04x:%04x:%04x)\n",
4785 (set
? "setting" : "clearing"),
4786 config
->config_table
[0].cam_entry
.msb_mac_addr
,
4787 config
->config_table
[0].cam_entry
.middle_mac_addr
,
4788 config
->config_table
[0].cam_entry
.lsb_mac_addr
);
4792 config
->config_table
[1].cam_entry
.msb_mac_addr
=
4793 cpu_to_le16(0xffff);
4794 config
->config_table
[1].cam_entry
.middle_mac_addr
=
4795 cpu_to_le16(0xffff);
4796 config
->config_table
[1].cam_entry
.lsb_mac_addr
=
4797 cpu_to_le16(0xffff);
4798 config
->config_table
[1].cam_entry
.flags
= cpu_to_le16(port
);
4800 config
->config_table
[1].target_table_entry
.flags
=
4801 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST
;
4803 CAM_INVALIDATE(config
->config_table
[1]);
4804 config
->config_table
[1].target_table_entry
.clients_bit_vector
=
4805 cpu_to_le32(cl_bit_vec
);
4806 config
->config_table
[1].target_table_entry
.vlan_id
= 0;
4809 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
4810 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
4811 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
4815 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
4817 * @param bp driver descriptor
4818 * @param set set or clear an entry (1 or 0)
4819 * @param mac pointer to a buffer containing a MAC
4820 * @param cl_bit_vec bit vector of clients to register a MAC for
4821 * @param cam_offset offset in a CAM to use
4823 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x
*bp
, int set
, u8
*mac
,
4824 u32 cl_bit_vec
, u8 cam_offset
)
4826 struct mac_configuration_cmd_e1h
*config
=
4827 (struct mac_configuration_cmd_e1h
*)bnx2x_sp(bp
, mac_config
);
4829 config
->hdr
.length
= 1;
4830 config
->hdr
.offset
= cam_offset
;
4831 config
->hdr
.client_id
= 0xff;
4832 config
->hdr
.reserved1
= 0;
4835 config
->config_table
[0].msb_mac_addr
=
4836 swab16(*(u16
*)&mac
[0]);
4837 config
->config_table
[0].middle_mac_addr
=
4838 swab16(*(u16
*)&mac
[2]);
4839 config
->config_table
[0].lsb_mac_addr
=
4840 swab16(*(u16
*)&mac
[4]);
4841 config
->config_table
[0].clients_bit_vector
=
4842 cpu_to_le32(cl_bit_vec
);
4843 config
->config_table
[0].vlan_id
= 0;
4844 config
->config_table
[0].e1hov_id
= cpu_to_le16(bp
->e1hov
);
4846 config
->config_table
[0].flags
= BP_PORT(bp
);
4848 config
->config_table
[0].flags
=
4849 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE
;
4851 DP(NETIF_MSG_IFUP
, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
4852 (set
? "setting" : "clearing"),
4853 config
->config_table
[0].msb_mac_addr
,
4854 config
->config_table
[0].middle_mac_addr
,
4855 config
->config_table
[0].lsb_mac_addr
, bp
->e1hov
, cl_bit_vec
);
4857 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
4858 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
4859 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
4862 static int bnx2x_wait_ramrod(struct bnx2x
*bp
, int state
, int idx
,
4863 int *state_p
, int poll
)
4865 /* can take a while if any port is running */
4868 DP(NETIF_MSG_IFUP
, "%s for state to become %x on IDX [%d]\n",
4869 poll
? "polling" : "waiting", state
, idx
);
4874 bnx2x_rx_int(bp
->fp
, 10);
4875 /* if index is different from 0
4876 * the reply for some commands will
4877 * be on the non default queue
4880 bnx2x_rx_int(&bp
->fp
[idx
], 10);
4883 mb(); /* state is changed by bnx2x_sp_event() */
4884 if (*state_p
== state
) {
4885 #ifdef BNX2X_STOP_ON_ERROR
4886 DP(NETIF_MSG_IFUP
, "exit (cnt %d)\n", 5000 - cnt
);
4898 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4899 poll
? "polling" : "waiting", state
, idx
);
4900 #ifdef BNX2X_STOP_ON_ERROR
4907 void bnx2x_set_eth_mac_addr_e1h(struct bnx2x
*bp
, int set
)
4909 bp
->set_mac_pending
++;
4912 bnx2x_set_mac_addr_e1h_gen(bp
, set
, bp
->dev
->dev_addr
,
4913 (1 << bp
->fp
->cl_id
), BP_FUNC(bp
));
4915 /* Wait for a completion */
4916 bnx2x_wait_ramrod(bp
, 0, 0, &bp
->set_mac_pending
, set
? 0 : 1);
4919 void bnx2x_set_eth_mac_addr_e1(struct bnx2x
*bp
, int set
)
4921 bp
->set_mac_pending
++;
4924 bnx2x_set_mac_addr_e1_gen(bp
, set
, bp
->dev
->dev_addr
,
4925 (1 << bp
->fp
->cl_id
), (BP_PORT(bp
) ? 32 : 0),
4928 /* Wait for a completion */
4929 bnx2x_wait_ramrod(bp
, 0, 0, &bp
->set_mac_pending
, set
? 0 : 1);
4934 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
4935 * MAC(s). This function will wait until the ramdord completion
4938 * @param bp driver handle
4939 * @param set set or clear the CAM entry
4941 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
4943 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x
*bp
, int set
)
4945 u32 cl_bit_vec
= (1 << BCM_ISCSI_ETH_CL_ID
);
4947 bp
->set_mac_pending
++;
4950 /* Send a SET_MAC ramrod */
4952 bnx2x_set_mac_addr_e1_gen(bp
, set
, bp
->iscsi_mac
,
4953 cl_bit_vec
, (BP_PORT(bp
) ? 32 : 0) + 2,
4956 /* CAM allocation for E1H
4957 * unicasts: by func number
4958 * multicast: 20+FUNC*20, 20 each
4960 bnx2x_set_mac_addr_e1h_gen(bp
, set
, bp
->iscsi_mac
,
4961 cl_bit_vec
, E1H_FUNC_MAX
+ BP_FUNC(bp
));
4963 /* Wait for a completion when setting */
4964 bnx2x_wait_ramrod(bp
, 0, 0, &bp
->set_mac_pending
, set
? 0 : 1);
4970 int bnx2x_setup_leading(struct bnx2x
*bp
)
4974 /* reset IGU state */
4975 bnx2x_ack_sb(bp
, bp
->fp
[0].sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4978 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_PORT_SETUP
, 0, 0, 0, 0);
4980 /* Wait for completion */
4981 rc
= bnx2x_wait_ramrod(bp
, BNX2X_STATE_OPEN
, 0, &(bp
->state
), 0);
4986 int bnx2x_setup_multi(struct bnx2x
*bp
, int index
)
4988 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
4990 /* reset IGU state */
4991 bnx2x_ack_sb(bp
, fp
->sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4994 fp
->state
= BNX2X_FP_STATE_OPENING
;
4995 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CLIENT_SETUP
, index
, 0,
4998 /* Wait for completion */
4999 return bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_OPEN
, index
,
5004 void bnx2x_set_num_queues_msix(struct bnx2x
*bp
)
5007 switch (bp
->multi_mode
) {
5008 case ETH_RSS_MODE_DISABLED
:
5012 case ETH_RSS_MODE_REGULAR
:
5014 bp
->num_queues
= min_t(u32
, num_queues
,
5015 BNX2X_MAX_QUEUES(bp
));
5017 bp
->num_queues
= min_t(u32
, num_online_cpus(),
5018 BNX2X_MAX_QUEUES(bp
));
5030 static int bnx2x_stop_multi(struct bnx2x
*bp
, int index
)
5032 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
5035 /* halt the connection */
5036 fp
->state
= BNX2X_FP_STATE_HALTING
;
5037 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, index
, 0, fp
->cl_id
, 0);
5039 /* Wait for completion */
5040 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, index
,
5042 if (rc
) /* timeout */
5045 /* delete cfc entry */
5046 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CFC_DEL
, index
, 0, 0, 1);
5048 /* Wait for completion */
5049 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_CLOSED
, index
,
5054 static int bnx2x_stop_leading(struct bnx2x
*bp
)
5056 __le16 dsb_sp_prod_idx
;
5057 /* if the other port is handling traffic,
5058 this can take a lot of time */
5064 /* Send HALT ramrod */
5065 bp
->fp
[0].state
= BNX2X_FP_STATE_HALTING
;
5066 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, 0, 0, bp
->fp
->cl_id
, 0);
5068 /* Wait for completion */
5069 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, 0,
5070 &(bp
->fp
[0].state
), 1);
5071 if (rc
) /* timeout */
5074 dsb_sp_prod_idx
= *bp
->dsb_sp_prod
;
5076 /* Send PORT_DELETE ramrod */
5077 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_PORT_DEL
, 0, 0, 0, 1);
5079 /* Wait for completion to arrive on default status block
5080 we are going to reset the chip anyway
5081 so there is not much to do if this times out
5083 while (dsb_sp_prod_idx
== *bp
->dsb_sp_prod
) {
5085 DP(NETIF_MSG_IFDOWN
, "timeout waiting for port del "
5086 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5087 *bp
->dsb_sp_prod
, dsb_sp_prod_idx
);
5088 #ifdef BNX2X_STOP_ON_ERROR
5096 rmb(); /* Refresh the dsb_sp_prod */
5098 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_UNLOAD
;
5099 bp
->fp
[0].state
= BNX2X_FP_STATE_CLOSED
;
5104 static void bnx2x_reset_func(struct bnx2x
*bp
)
5106 int port
= BP_PORT(bp
);
5107 int func
= BP_FUNC(bp
);
5111 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
5112 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
5115 /* Disable Timer scan */
5116 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 0);
5118 * Wait for at least 10ms and up to 2 second for the timers scan to
5121 for (i
= 0; i
< 200; i
++) {
5123 if (!REG_RD(bp
, TM_REG_LIN0_SCAN_ON
+ port
*4))
5128 base
= FUNC_ILT_BASE(func
);
5129 for (i
= base
; i
< base
+ ILT_PER_FUNC
; i
++)
5130 bnx2x_ilt_wr(bp
, i
, 0);
5133 static void bnx2x_reset_port(struct bnx2x
*bp
)
5135 int port
= BP_PORT(bp
);
5138 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
5140 /* Do not rcv packets to BRB */
5141 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK
+ port
*4, 0x0);
5142 /* Do not direct rcv packets that are not for MCP to the BRB */
5143 REG_WR(bp
, (port
? NIG_REG_LLH1_BRB1_NOT_MCP
:
5144 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
5147 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4, 0);
5150 /* Check for BRB port occupancy */
5151 val
= REG_RD(bp
, BRB1_REG_PORT_NUM_OCC_BLOCKS_0
+ port
*4);
5153 DP(NETIF_MSG_IFDOWN
,
5154 "BRB1 is not empty %d blocks are occupied\n", val
);
5156 /* TODO: Close Doorbell port? */
5159 static void bnx2x_reset_chip(struct bnx2x
*bp
, u32 reset_code
)
5161 DP(BNX2X_MSG_MCP
, "function %d reset_code %x\n",
5162 BP_FUNC(bp
), reset_code
);
5164 switch (reset_code
) {
5165 case FW_MSG_CODE_DRV_UNLOAD_COMMON
:
5166 bnx2x_reset_port(bp
);
5167 bnx2x_reset_func(bp
);
5168 bnx2x_reset_common(bp
);
5171 case FW_MSG_CODE_DRV_UNLOAD_PORT
:
5172 bnx2x_reset_port(bp
);
5173 bnx2x_reset_func(bp
);
5176 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION
:
5177 bnx2x_reset_func(bp
);
5181 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code
);
5186 void bnx2x_chip_cleanup(struct bnx2x
*bp
, int unload_mode
)
5188 int port
= BP_PORT(bp
);
5192 /* Wait until tx fastpath tasks complete */
5193 for_each_queue(bp
, i
) {
5194 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5197 while (bnx2x_has_tx_work_unload(fp
)) {
5201 BNX2X_ERR("timeout waiting for queue[%d]\n",
5203 #ifdef BNX2X_STOP_ON_ERROR
5214 /* Give HW time to discard old tx messages */
5217 if (CHIP_IS_E1(bp
)) {
5218 struct mac_configuration_cmd
*config
=
5219 bnx2x_sp(bp
, mcast_config
);
5221 bnx2x_set_eth_mac_addr_e1(bp
, 0);
5223 for (i
= 0; i
< config
->hdr
.length
; i
++)
5224 CAM_INVALIDATE(config
->config_table
[i
]);
5226 config
->hdr
.length
= i
;
5227 if (CHIP_REV_IS_SLOW(bp
))
5228 config
->hdr
.offset
= BNX2X_MAX_EMUL_MULTI
*(1 + port
);
5230 config
->hdr
.offset
= BNX2X_MAX_MULTICAST
*(1 + port
);
5231 config
->hdr
.client_id
= bp
->fp
->cl_id
;
5232 config
->hdr
.reserved1
= 0;
5234 bp
->set_mac_pending
++;
5237 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
5238 U64_HI(bnx2x_sp_mapping(bp
, mcast_config
)),
5239 U64_LO(bnx2x_sp_mapping(bp
, mcast_config
)), 0);
5242 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 0);
5244 bnx2x_set_eth_mac_addr_e1h(bp
, 0);
5246 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
5247 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
), 0);
5249 REG_WR(bp
, MISC_REG_E1HMF_MODE
, 0);
5252 /* Clear iSCSI L2 MAC */
5253 mutex_lock(&bp
->cnic_mutex
);
5254 if (bp
->cnic_flags
& BNX2X_CNIC_FLAG_MAC_SET
) {
5255 bnx2x_set_iscsi_eth_mac_addr(bp
, 0);
5256 bp
->cnic_flags
&= ~BNX2X_CNIC_FLAG_MAC_SET
;
5258 mutex_unlock(&bp
->cnic_mutex
);
5261 if (unload_mode
== UNLOAD_NORMAL
)
5262 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
5264 else if (bp
->flags
& NO_WOL_FLAG
)
5265 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
;
5268 u32 emac_base
= port
? GRCBASE_EMAC1
: GRCBASE_EMAC0
;
5269 u8
*mac_addr
= bp
->dev
->dev_addr
;
5271 /* The mac address is written to entries 1-4 to
5272 preserve entry 0 which is used by the PMF */
5273 u8 entry
= (BP_E1HVN(bp
) + 1)*8;
5275 val
= (mac_addr
[0] << 8) | mac_addr
[1];
5276 EMAC_WR(bp
, EMAC_REG_EMAC_MAC_MATCH
+ entry
, val
);
5278 val
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
5279 (mac_addr
[4] << 8) | mac_addr
[5];
5280 EMAC_WR(bp
, EMAC_REG_EMAC_MAC_MATCH
+ entry
+ 4, val
);
5282 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_EN
;
5285 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
5287 /* Close multi and leading connections
5288 Completions for ramrods are collected in a synchronous way */
5289 for_each_nondefault_queue(bp
, i
)
5290 if (bnx2x_stop_multi(bp
, i
))
5293 rc
= bnx2x_stop_leading(bp
);
5295 BNX2X_ERR("Stop leading failed!\n");
5296 #ifdef BNX2X_STOP_ON_ERROR
5305 reset_code
= bnx2x_fw_command(bp
, reset_code
);
5307 DP(NETIF_MSG_IFDOWN
, "NO MCP - load counts %d, %d, %d\n",
5308 load_count
[0], load_count
[1], load_count
[2]);
5310 load_count
[1 + port
]--;
5311 DP(NETIF_MSG_IFDOWN
, "NO MCP - new load counts %d, %d, %d\n",
5312 load_count
[0], load_count
[1], load_count
[2]);
5313 if (load_count
[0] == 0)
5314 reset_code
= FW_MSG_CODE_DRV_UNLOAD_COMMON
;
5315 else if (load_count
[1 + port
] == 0)
5316 reset_code
= FW_MSG_CODE_DRV_UNLOAD_PORT
;
5318 reset_code
= FW_MSG_CODE_DRV_UNLOAD_FUNCTION
;
5321 if ((reset_code
== FW_MSG_CODE_DRV_UNLOAD_COMMON
) ||
5322 (reset_code
== FW_MSG_CODE_DRV_UNLOAD_PORT
))
5323 bnx2x__link_reset(bp
);
5325 /* Reset the chip */
5326 bnx2x_reset_chip(bp
, reset_code
);
5328 /* Report UNLOAD_DONE to MCP */
5330 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
5334 void bnx2x_disable_close_the_gate(struct bnx2x
*bp
)
5338 DP(NETIF_MSG_HW
, "Disabling \"close the gates\"\n");
5340 if (CHIP_IS_E1(bp
)) {
5341 int port
= BP_PORT(bp
);
5342 u32 addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
5343 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
5345 val
= REG_RD(bp
, addr
);
5347 REG_WR(bp
, addr
, val
);
5348 } else if (CHIP_IS_E1H(bp
)) {
5349 val
= REG_RD(bp
, MISC_REG_AEU_GENERAL_MASK
);
5350 val
&= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK
|
5351 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK
);
5352 REG_WR(bp
, MISC_REG_AEU_GENERAL_MASK
, val
);
5357 /* Close gates #2, #3 and #4: */
5358 static void bnx2x_set_234_gates(struct bnx2x
*bp
, bool close
)
5362 /* Gates #2 and #4a are closed/opened for "not E1" only */
5363 if (!CHIP_IS_E1(bp
)) {
5365 val
= REG_RD(bp
, PXP_REG_HST_DISCARD_DOORBELLS
);
5366 REG_WR(bp
, PXP_REG_HST_DISCARD_DOORBELLS
,
5367 close
? (val
| 0x1) : (val
& (~(u32
)1)));
5369 val
= REG_RD(bp
, PXP_REG_HST_DISCARD_INTERNAL_WRITES
);
5370 REG_WR(bp
, PXP_REG_HST_DISCARD_INTERNAL_WRITES
,
5371 close
? (val
| 0x1) : (val
& (~(u32
)1)));
5375 addr
= BP_PORT(bp
) ? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
5376 val
= REG_RD(bp
, addr
);
5377 REG_WR(bp
, addr
, (!close
) ? (val
| 0x1) : (val
& (~(u32
)1)));
5379 DP(NETIF_MSG_HW
, "%s gates #2, #3 and #4\n",
5380 close
? "closing" : "opening");
5384 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
5386 static void bnx2x_clp_reset_prep(struct bnx2x
*bp
, u32
*magic_val
)
5388 /* Do some magic... */
5389 u32 val
= MF_CFG_RD(bp
, shared_mf_config
.clp_mb
);
5390 *magic_val
= val
& SHARED_MF_CLP_MAGIC
;
5391 MF_CFG_WR(bp
, shared_mf_config
.clp_mb
, val
| SHARED_MF_CLP_MAGIC
);
5394 /* Restore the value of the `magic' bit.
5396 * @param pdev Device handle.
5397 * @param magic_val Old value of the `magic' bit.
5399 static void bnx2x_clp_reset_done(struct bnx2x
*bp
, u32 magic_val
)
5401 /* Restore the `magic' bit value... */
5402 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
5403 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
5404 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
5405 u32 val
= MF_CFG_RD(bp
, shared_mf_config
.clp_mb
);
5406 MF_CFG_WR(bp
, shared_mf_config
.clp_mb
,
5407 (val
& (~SHARED_MF_CLP_MAGIC
)) | magic_val
);
5410 /* Prepares for MCP reset: takes care of CLP configurations.
5413 * @param magic_val Old value of 'magic' bit.
5415 static void bnx2x_reset_mcp_prep(struct bnx2x
*bp
, u32
*magic_val
)
5418 u32 validity_offset
;
5420 DP(NETIF_MSG_HW
, "Starting\n");
5422 /* Set `magic' bit in order to save MF config */
5423 if (!CHIP_IS_E1(bp
))
5424 bnx2x_clp_reset_prep(bp
, magic_val
);
5426 /* Get shmem offset */
5427 shmem
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
5428 validity_offset
= offsetof(struct shmem_region
, validity_map
[0]);
5430 /* Clear validity map flags */
5432 REG_WR(bp
, shmem
+ validity_offset
, 0);
5435 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
5436 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
5438 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
5439 * depending on the HW type.
5443 static inline void bnx2x_mcp_wait_one(struct bnx2x
*bp
)
5445 /* special handling for emulation and FPGA,
5446 wait 10 times longer */
5447 if (CHIP_REV_IS_SLOW(bp
))
5448 msleep(MCP_ONE_TIMEOUT
*10);
5450 msleep(MCP_ONE_TIMEOUT
);
5453 static int bnx2x_reset_mcp_comp(struct bnx2x
*bp
, u32 magic_val
)
5455 u32 shmem
, cnt
, validity_offset
, val
;
5460 /* Get shmem offset */
5461 shmem
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
5463 BNX2X_ERR("Shmem 0 return failure\n");
5468 validity_offset
= offsetof(struct shmem_region
, validity_map
[0]);
5470 /* Wait for MCP to come up */
5471 for (cnt
= 0; cnt
< (MCP_TIMEOUT
/ MCP_ONE_TIMEOUT
); cnt
++) {
5472 /* TBD: its best to check validity map of last port.
5473 * currently checks on port 0.
5475 val
= REG_RD(bp
, shmem
+ validity_offset
);
5476 DP(NETIF_MSG_HW
, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem
,
5477 shmem
+ validity_offset
, val
);
5479 /* check that shared memory is valid. */
5480 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
5481 == (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
5484 bnx2x_mcp_wait_one(bp
);
5487 DP(NETIF_MSG_HW
, "Cnt=%d Shmem validity map 0x%x\n", cnt
, val
);
5489 /* Check that shared memory is valid. This indicates that MCP is up. */
5490 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
)) !=
5491 (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
)) {
5492 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
5498 /* Restore the `magic' bit value */
5499 if (!CHIP_IS_E1(bp
))
5500 bnx2x_clp_reset_done(bp
, magic_val
);
5505 static void bnx2x_pxp_prep(struct bnx2x
*bp
)
5507 if (!CHIP_IS_E1(bp
)) {
5508 REG_WR(bp
, PXP2_REG_RD_START_INIT
, 0);
5509 REG_WR(bp
, PXP2_REG_RQ_RBC_DONE
, 0);
5510 REG_WR(bp
, PXP2_REG_RQ_CFG_DONE
, 0);
5516 * Reset the whole chip except for:
5518 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
5521 * - MISC (including AEU)
5525 static void bnx2x_process_kill_chip_reset(struct bnx2x
*bp
)
5527 u32 not_reset_mask1
, reset_mask1
, not_reset_mask2
, reset_mask2
;
5530 MISC_REGISTERS_RESET_REG_1_RST_HC
|
5531 MISC_REGISTERS_RESET_REG_1_RST_PXPV
|
5532 MISC_REGISTERS_RESET_REG_1_RST_PXP
;
5535 MISC_REGISTERS_RESET_REG_2_RST_MDIO
|
5536 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE
|
5537 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE
|
5538 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE
|
5539 MISC_REGISTERS_RESET_REG_2_RST_RBCN
|
5540 MISC_REGISTERS_RESET_REG_2_RST_GRC
|
5541 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE
|
5542 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B
;
5544 reset_mask1
= 0xffffffff;
5547 reset_mask2
= 0xffff;
5549 reset_mask2
= 0x1ffff;
5551 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
5552 reset_mask1
& (~not_reset_mask1
));
5553 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
,
5554 reset_mask2
& (~not_reset_mask2
));
5559 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, reset_mask1
);
5560 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_SET
, reset_mask2
);
5564 static int bnx2x_process_kill(struct bnx2x
*bp
)
5568 u32 sr_cnt
, blk_cnt
, port_is_idle_0
, port_is_idle_1
, pgl_exp_rom2
;
5571 /* Empty the Tetris buffer, wait for 1s */
5573 sr_cnt
= REG_RD(bp
, PXP2_REG_RD_SR_CNT
);
5574 blk_cnt
= REG_RD(bp
, PXP2_REG_RD_BLK_CNT
);
5575 port_is_idle_0
= REG_RD(bp
, PXP2_REG_RD_PORT_IS_IDLE_0
);
5576 port_is_idle_1
= REG_RD(bp
, PXP2_REG_RD_PORT_IS_IDLE_1
);
5577 pgl_exp_rom2
= REG_RD(bp
, PXP2_REG_PGL_EXP_ROM2
);
5578 if ((sr_cnt
== 0x7e) && (blk_cnt
== 0xa0) &&
5579 ((port_is_idle_0
& 0x1) == 0x1) &&
5580 ((port_is_idle_1
& 0x1) == 0x1) &&
5581 (pgl_exp_rom2
== 0xffffffff))
5584 } while (cnt
-- > 0);
5587 DP(NETIF_MSG_HW
, "Tetris buffer didn't get empty or there"
5589 " outstanding read requests after 1s!\n");
5590 DP(NETIF_MSG_HW
, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
5591 " port_is_idle_0=0x%08x,"
5592 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
5593 sr_cnt
, blk_cnt
, port_is_idle_0
, port_is_idle_1
,
5600 /* Close gates #2, #3 and #4 */
5601 bnx2x_set_234_gates(bp
, true);
5603 /* TBD: Indicate that "process kill" is in progress to MCP */
5605 /* Clear "unprepared" bit */
5606 REG_WR(bp
, MISC_REG_UNPREPARED
, 0);
5609 /* Make sure all is written to the chip before the reset */
5612 /* Wait for 1ms to empty GLUE and PCI-E core queues,
5613 * PSWHST, GRC and PSWRD Tetris buffer.
5617 /* Prepare to chip reset: */
5619 bnx2x_reset_mcp_prep(bp
, &val
);
5625 /* reset the chip */
5626 bnx2x_process_kill_chip_reset(bp
);
5629 /* Recover after reset: */
5631 if (bnx2x_reset_mcp_comp(bp
, val
))
5637 /* Open the gates #2, #3 and #4 */
5638 bnx2x_set_234_gates(bp
, false);
5640 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
5641 * reset state, re-enable attentions. */
5646 static int bnx2x_leader_reset(struct bnx2x
*bp
)
5649 /* Try to recover after the failure */
5650 if (bnx2x_process_kill(bp
)) {
5651 printk(KERN_ERR
"%s: Something bad had happen! Aii!\n",
5654 goto exit_leader_reset
;
5657 /* Clear "reset is in progress" bit and update the driver state */
5658 bnx2x_set_reset_done(bp
);
5659 bp
->recovery_state
= BNX2X_RECOVERY_DONE
;
5663 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_RESERVED_08
);
5668 /* Assumption: runs under rtnl lock. This together with the fact
5669 * that it's called only from bnx2x_reset_task() ensure that it
5670 * will never be called when netif_running(bp->dev) is false.
5672 static void bnx2x_parity_recover(struct bnx2x
*bp
)
5674 DP(NETIF_MSG_HW
, "Handling parity\n");
5676 switch (bp
->recovery_state
) {
5677 case BNX2X_RECOVERY_INIT
:
5678 DP(NETIF_MSG_HW
, "State is BNX2X_RECOVERY_INIT\n");
5679 /* Try to get a LEADER_LOCK HW lock */
5680 if (bnx2x_trylock_hw_lock(bp
,
5681 HW_LOCK_RESOURCE_RESERVED_08
))
5684 /* Stop the driver */
5685 /* If interface has been removed - break */
5686 if (bnx2x_nic_unload(bp
, UNLOAD_RECOVERY
))
5689 bp
->recovery_state
= BNX2X_RECOVERY_WAIT
;
5690 /* Ensure "is_leader" and "recovery_state"
5691 * update values are seen on other CPUs
5696 case BNX2X_RECOVERY_WAIT
:
5697 DP(NETIF_MSG_HW
, "State is BNX2X_RECOVERY_WAIT\n");
5698 if (bp
->is_leader
) {
5699 u32 load_counter
= bnx2x_get_load_cnt(bp
);
5701 /* Wait until all other functions get
5704 schedule_delayed_work(&bp
->reset_task
,
5708 /* If all other functions got down -
5709 * try to bring the chip back to
5710 * normal. In any case it's an exit
5711 * point for a leader.
5713 if (bnx2x_leader_reset(bp
) ||
5714 bnx2x_nic_load(bp
, LOAD_NORMAL
)) {
5715 printk(KERN_ERR
"%s: Recovery "
5716 "has failed. Power cycle is "
5717 "needed.\n", bp
->dev
->name
);
5718 /* Disconnect this device */
5719 netif_device_detach(bp
->dev
);
5720 /* Block ifup for all function
5721 * of this ASIC until
5722 * "process kill" or power
5725 bnx2x_set_reset_in_progress(bp
);
5726 /* Shut down the power */
5727 bnx2x_set_power_state(bp
,
5734 } else { /* non-leader */
5735 if (!bnx2x_reset_is_done(bp
)) {
5736 /* Try to get a LEADER_LOCK HW lock as
5737 * long as a former leader may have
5738 * been unloaded by the user or
5739 * released a leadership by another
5742 if (bnx2x_trylock_hw_lock(bp
,
5743 HW_LOCK_RESOURCE_RESERVED_08
)) {
5744 /* I'm a leader now! Restart a
5751 schedule_delayed_work(&bp
->reset_task
,
5755 } else { /* A leader has completed
5756 * the "process kill". It's an exit
5757 * point for a non-leader.
5759 bnx2x_nic_load(bp
, LOAD_NORMAL
);
5760 bp
->recovery_state
=
5761 BNX2X_RECOVERY_DONE
;
5772 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
5773 * scheduled on a general queue in order to prevent a dead lock.
5775 static void bnx2x_reset_task(struct work_struct
*work
)
5777 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, reset_task
.work
);
5779 #ifdef BNX2X_STOP_ON_ERROR
5780 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
5781 " so reset not done to allow debug dump,\n"
5782 KERN_ERR
" you will need to reboot when done\n");
5788 if (!netif_running(bp
->dev
))
5789 goto reset_task_exit
;
5791 if (unlikely(bp
->recovery_state
!= BNX2X_RECOVERY_DONE
))
5792 bnx2x_parity_recover(bp
);
5794 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
5795 bnx2x_nic_load(bp
, LOAD_NORMAL
);
5802 /* end of nic load/unload */
5805 * Init service functions
5808 static inline u32
bnx2x_get_pretend_reg(struct bnx2x
*bp
, int func
)
5811 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0
;
5812 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1
;
5813 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2
;
5814 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3
;
5815 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4
;
5816 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5
;
5817 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6
;
5818 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7
;
5820 BNX2X_ERR("Unsupported function index: %d\n", func
);
5825 static void bnx2x_undi_int_disable_e1h(struct bnx2x
*bp
, int orig_func
)
5827 u32 reg
= bnx2x_get_pretend_reg(bp
, orig_func
), new_val
;
5829 /* Flush all outstanding writes */
5832 /* Pretend to be function 0 */
5834 /* Flush the GRC transaction (in the chip) */
5835 new_val
= REG_RD(bp
, reg
);
5837 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
5842 /* From now we are in the "like-E1" mode */
5843 bnx2x_int_disable(bp
);
5845 /* Flush all outstanding writes */
5848 /* Restore the original funtion settings */
5849 REG_WR(bp
, reg
, orig_func
);
5850 new_val
= REG_RD(bp
, reg
);
5851 if (new_val
!= orig_func
) {
5852 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
5853 orig_func
, new_val
);
5858 static inline void bnx2x_undi_int_disable(struct bnx2x
*bp
, int func
)
5860 if (CHIP_IS_E1H(bp
))
5861 bnx2x_undi_int_disable_e1h(bp
, func
);
5863 bnx2x_int_disable(bp
);
5866 static void __devinit
bnx2x_undi_unload(struct bnx2x
*bp
)
5870 /* Check if there is any driver already loaded */
5871 val
= REG_RD(bp
, MISC_REG_UNPREPARED
);
5873 /* Check if it is the UNDI driver
5874 * UNDI driver initializes CID offset for normal bell to 0x7
5876 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
5877 val
= REG_RD(bp
, DORQ_REG_NORM_CID_OFST
);
5879 u32 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
5881 int func
= BP_FUNC(bp
);
5885 /* clear the UNDI indication */
5886 REG_WR(bp
, DORQ_REG_NORM_CID_OFST
, 0);
5888 BNX2X_DEV_INFO("UNDI is active! reset device\n");
5890 /* try unload UNDI on port 0 */
5893 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
5894 DRV_MSG_SEQ_NUMBER_MASK
);
5895 reset_code
= bnx2x_fw_command(bp
, reset_code
);
5897 /* if UNDI is loaded on the other port */
5898 if (reset_code
!= FW_MSG_CODE_DRV_UNLOAD_COMMON
) {
5900 /* send "DONE" for previous unload */
5901 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
5903 /* unload UNDI on port 1 */
5906 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
5907 DRV_MSG_SEQ_NUMBER_MASK
);
5908 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
5910 bnx2x_fw_command(bp
, reset_code
);
5913 /* now it's safe to release the lock */
5914 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
5916 bnx2x_undi_int_disable(bp
, func
);
5918 /* close input traffic and wait for it */
5919 /* Do not rcv packets to BRB */
5921 (BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_DRV_MASK
:
5922 NIG_REG_LLH0_BRB1_DRV_MASK
), 0x0);
5923 /* Do not direct rcv packets that are not for MCP to
5926 (BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_NOT_MCP
:
5927 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
5930 (BP_PORT(bp
) ? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
5931 MISC_REG_AEU_MASK_ATTN_FUNC_0
), 0);
5934 /* save NIG port swap info */
5935 swap_val
= REG_RD(bp
, NIG_REG_PORT_SWAP
);
5936 swap_en
= REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
);
5939 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
5942 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
,
5944 /* take the NIG out of reset and restore swap values */
5946 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
5947 MISC_REGISTERS_RESET_REG_1_RST_NIG
);
5948 REG_WR(bp
, NIG_REG_PORT_SWAP
, swap_val
);
5949 REG_WR(bp
, NIG_REG_STRAP_OVERRIDE
, swap_en
);
5951 /* send unload done to the MCP */
5952 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
5954 /* restore our func and fw_seq */
5957 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
5958 DRV_MSG_SEQ_NUMBER_MASK
);
5961 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
5965 static void __devinit
bnx2x_get_common_hwinfo(struct bnx2x
*bp
)
5967 u32 val
, val2
, val3
, val4
, id
;
5970 /* Get the chip revision id and number. */
5971 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5972 val
= REG_RD(bp
, MISC_REG_CHIP_NUM
);
5973 id
= ((val
& 0xffff) << 16);
5974 val
= REG_RD(bp
, MISC_REG_CHIP_REV
);
5975 id
|= ((val
& 0xf) << 12);
5976 val
= REG_RD(bp
, MISC_REG_CHIP_METAL
);
5977 id
|= ((val
& 0xff) << 4);
5978 val
= REG_RD(bp
, MISC_REG_BOND_ID
);
5980 bp
->common
.chip_id
= id
;
5981 bp
->link_params
.chip_id
= bp
->common
.chip_id
;
5982 BNX2X_DEV_INFO("chip ID is 0x%x\n", id
);
5984 val
= (REG_RD(bp
, 0x2874) & 0x55);
5985 if ((bp
->common
.chip_id
& 0x1) ||
5986 (CHIP_IS_E1(bp
) && val
) || (CHIP_IS_E1H(bp
) && (val
== 0x55))) {
5987 bp
->flags
|= ONE_PORT_FLAG
;
5988 BNX2X_DEV_INFO("single port device\n");
5991 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_CFG4
);
5992 bp
->common
.flash_size
= (NVRAM_1MB_SIZE
<<
5993 (val
& MCPR_NVM_CFG4_FLASH_SIZE
));
5994 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5995 bp
->common
.flash_size
, bp
->common
.flash_size
);
5997 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
5998 bp
->common
.shmem2_base
= REG_RD(bp
, MISC_REG_GENERIC_CR_0
);
5999 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
6000 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
6001 bp
->common
.shmem_base
, bp
->common
.shmem2_base
);
6003 if (!bp
->common
.shmem_base
||
6004 (bp
->common
.shmem_base
< 0xA0000) ||
6005 (bp
->common
.shmem_base
>= 0xC0000)) {
6006 BNX2X_DEV_INFO("MCP not active\n");
6007 bp
->flags
|= NO_MCP_FLAG
;
6011 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
6012 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
6013 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
6014 BNX2X_ERROR("BAD MCP validity signature\n");
6016 bp
->common
.hw_config
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.config
);
6017 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp
->common
.hw_config
);
6019 bp
->link_params
.hw_led_mode
= ((bp
->common
.hw_config
&
6020 SHARED_HW_CFG_LED_MODE_MASK
) >>
6021 SHARED_HW_CFG_LED_MODE_SHIFT
);
6023 bp
->link_params
.feature_config_flags
= 0;
6024 val
= SHMEM_RD(bp
, dev_info
.shared_feature_config
.config
);
6025 if (val
& SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED
)
6026 bp
->link_params
.feature_config_flags
|=
6027 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED
;
6029 bp
->link_params
.feature_config_flags
&=
6030 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED
;
6032 val
= SHMEM_RD(bp
, dev_info
.bc_rev
) >> 8;
6033 bp
->common
.bc_ver
= val
;
6034 BNX2X_DEV_INFO("bc_ver %X\n", val
);
6035 if (val
< BNX2X_BC_VER
) {
6036 /* for now only warn
6037 * later we might need to enforce this */
6038 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
6039 "please upgrade BC\n", BNX2X_BC_VER
, val
);
6041 bp
->link_params
.feature_config_flags
|=
6042 (val
>= REQ_BC_VER_4_VRFY_OPT_MDL
) ?
6043 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY
: 0;
6045 if (BP_E1HVN(bp
) == 0) {
6046 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_PMC
, &pmc
);
6047 bp
->flags
|= (pmc
& PCI_PM_CAP_PME_D3cold
) ? 0 : NO_WOL_FLAG
;
6049 /* no WOL capability for E1HVN != 0 */
6050 bp
->flags
|= NO_WOL_FLAG
;
6052 BNX2X_DEV_INFO("%sWoL capable\n",
6053 (bp
->flags
& NO_WOL_FLAG
) ? "not " : "");
6055 val
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
);
6056 val2
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[4]);
6057 val3
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[8]);
6058 val4
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[12]);
6060 dev_info(&bp
->pdev
->dev
, "part number %X-%X-%X-%X\n",
6061 val
, val2
, val3
, val4
);
6064 static void __devinit
bnx2x_link_settings_supported(struct bnx2x
*bp
,
6067 int port
= BP_PORT(bp
);
6070 switch (switch_cfg
) {
6072 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg
);
6075 SERDES_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
6076 switch (ext_phy_type
) {
6077 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT
:
6078 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6081 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
6082 SUPPORTED_10baseT_Full
|
6083 SUPPORTED_100baseT_Half
|
6084 SUPPORTED_100baseT_Full
|
6085 SUPPORTED_1000baseT_Full
|
6086 SUPPORTED_2500baseX_Full
|
6091 SUPPORTED_Asym_Pause
);
6094 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482
:
6095 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6098 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
6099 SUPPORTED_10baseT_Full
|
6100 SUPPORTED_100baseT_Half
|
6101 SUPPORTED_100baseT_Full
|
6102 SUPPORTED_1000baseT_Full
|
6107 SUPPORTED_Asym_Pause
);
6111 BNX2X_ERR("NVRAM config error. "
6112 "BAD SerDes ext_phy_config 0x%x\n",
6113 bp
->link_params
.ext_phy_config
);
6117 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_SERDES0_CTRL_PHY_ADDR
+
6119 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
6122 case SWITCH_CFG_10G
:
6123 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg
);
6126 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
6127 switch (ext_phy_type
) {
6128 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
:
6129 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6132 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
6133 SUPPORTED_10baseT_Full
|
6134 SUPPORTED_100baseT_Half
|
6135 SUPPORTED_100baseT_Full
|
6136 SUPPORTED_1000baseT_Full
|
6137 SUPPORTED_2500baseX_Full
|
6138 SUPPORTED_10000baseT_Full
|
6143 SUPPORTED_Asym_Pause
);
6146 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
6147 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
6150 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
6151 SUPPORTED_1000baseT_Full
|
6155 SUPPORTED_Asym_Pause
);
6158 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
6159 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
6162 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
6163 SUPPORTED_2500baseX_Full
|
6164 SUPPORTED_1000baseT_Full
|
6168 SUPPORTED_Asym_Pause
);
6171 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
:
6172 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
6175 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
6178 SUPPORTED_Asym_Pause
);
6181 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
:
6182 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
6185 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
6186 SUPPORTED_1000baseT_Full
|
6189 SUPPORTED_Asym_Pause
);
6192 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726
:
6193 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
6196 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
6197 SUPPORTED_1000baseT_Full
|
6201 SUPPORTED_Asym_Pause
);
6204 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
:
6205 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
6208 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
6209 SUPPORTED_1000baseT_Full
|
6213 SUPPORTED_Asym_Pause
);
6216 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
6217 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
6220 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
6224 SUPPORTED_Asym_Pause
);
6227 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481
:
6228 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
6231 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
6232 SUPPORTED_10baseT_Full
|
6233 SUPPORTED_100baseT_Half
|
6234 SUPPORTED_100baseT_Full
|
6235 SUPPORTED_1000baseT_Full
|
6236 SUPPORTED_10000baseT_Full
|
6240 SUPPORTED_Asym_Pause
);
6243 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
:
6244 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
6245 bp
->link_params
.ext_phy_config
);
6249 BNX2X_ERR("NVRAM config error. "
6250 "BAD XGXS ext_phy_config 0x%x\n",
6251 bp
->link_params
.ext_phy_config
);
6255 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_XGXS0_CTRL_PHY_ADDR
+
6257 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
6262 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
6263 bp
->port
.link_config
);
6266 bp
->link_params
.phy_addr
= bp
->port
.phy_addr
;
6268 /* mask what we support according to speed_cap_mask */
6269 if (!(bp
->link_params
.speed_cap_mask
&
6270 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF
))
6271 bp
->port
.supported
&= ~SUPPORTED_10baseT_Half
;
6273 if (!(bp
->link_params
.speed_cap_mask
&
6274 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL
))
6275 bp
->port
.supported
&= ~SUPPORTED_10baseT_Full
;
6277 if (!(bp
->link_params
.speed_cap_mask
&
6278 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF
))
6279 bp
->port
.supported
&= ~SUPPORTED_100baseT_Half
;
6281 if (!(bp
->link_params
.speed_cap_mask
&
6282 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL
))
6283 bp
->port
.supported
&= ~SUPPORTED_100baseT_Full
;
6285 if (!(bp
->link_params
.speed_cap_mask
&
6286 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G
))
6287 bp
->port
.supported
&= ~(SUPPORTED_1000baseT_Half
|
6288 SUPPORTED_1000baseT_Full
);
6290 if (!(bp
->link_params
.speed_cap_mask
&
6291 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G
))
6292 bp
->port
.supported
&= ~SUPPORTED_2500baseX_Full
;
6294 if (!(bp
->link_params
.speed_cap_mask
&
6295 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G
))
6296 bp
->port
.supported
&= ~SUPPORTED_10000baseT_Full
;
6298 BNX2X_DEV_INFO("supported 0x%x\n", bp
->port
.supported
);
6301 static void __devinit
bnx2x_link_settings_requested(struct bnx2x
*bp
)
6303 bp
->link_params
.req_duplex
= DUPLEX_FULL
;
6305 switch (bp
->port
.link_config
& PORT_FEATURE_LINK_SPEED_MASK
) {
6306 case PORT_FEATURE_LINK_SPEED_AUTO
:
6307 if (bp
->port
.supported
& SUPPORTED_Autoneg
) {
6308 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
6309 bp
->port
.advertising
= bp
->port
.supported
;
6312 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
6314 if ((ext_phy_type
==
6315 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
) ||
6317 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
)) {
6318 /* force 10G, no AN */
6319 bp
->link_params
.req_line_speed
= SPEED_10000
;
6320 bp
->port
.advertising
=
6321 (ADVERTISED_10000baseT_Full
|
6325 BNX2X_ERR("NVRAM config error. "
6326 "Invalid link_config 0x%x"
6327 " Autoneg not supported\n",
6328 bp
->port
.link_config
);
6333 case PORT_FEATURE_LINK_SPEED_10M_FULL
:
6334 if (bp
->port
.supported
& SUPPORTED_10baseT_Full
) {
6335 bp
->link_params
.req_line_speed
= SPEED_10
;
6336 bp
->port
.advertising
= (ADVERTISED_10baseT_Full
|
6339 BNX2X_ERROR("NVRAM config error. "
6340 "Invalid link_config 0x%x"
6341 " speed_cap_mask 0x%x\n",
6342 bp
->port
.link_config
,
6343 bp
->link_params
.speed_cap_mask
);
6348 case PORT_FEATURE_LINK_SPEED_10M_HALF
:
6349 if (bp
->port
.supported
& SUPPORTED_10baseT_Half
) {
6350 bp
->link_params
.req_line_speed
= SPEED_10
;
6351 bp
->link_params
.req_duplex
= DUPLEX_HALF
;
6352 bp
->port
.advertising
= (ADVERTISED_10baseT_Half
|
6355 BNX2X_ERROR("NVRAM config error. "
6356 "Invalid link_config 0x%x"
6357 " speed_cap_mask 0x%x\n",
6358 bp
->port
.link_config
,
6359 bp
->link_params
.speed_cap_mask
);
6364 case PORT_FEATURE_LINK_SPEED_100M_FULL
:
6365 if (bp
->port
.supported
& SUPPORTED_100baseT_Full
) {
6366 bp
->link_params
.req_line_speed
= SPEED_100
;
6367 bp
->port
.advertising
= (ADVERTISED_100baseT_Full
|
6370 BNX2X_ERROR("NVRAM config error. "
6371 "Invalid link_config 0x%x"
6372 " speed_cap_mask 0x%x\n",
6373 bp
->port
.link_config
,
6374 bp
->link_params
.speed_cap_mask
);
6379 case PORT_FEATURE_LINK_SPEED_100M_HALF
:
6380 if (bp
->port
.supported
& SUPPORTED_100baseT_Half
) {
6381 bp
->link_params
.req_line_speed
= SPEED_100
;
6382 bp
->link_params
.req_duplex
= DUPLEX_HALF
;
6383 bp
->port
.advertising
= (ADVERTISED_100baseT_Half
|
6386 BNX2X_ERROR("NVRAM config error. "
6387 "Invalid link_config 0x%x"
6388 " speed_cap_mask 0x%x\n",
6389 bp
->port
.link_config
,
6390 bp
->link_params
.speed_cap_mask
);
6395 case PORT_FEATURE_LINK_SPEED_1G
:
6396 if (bp
->port
.supported
& SUPPORTED_1000baseT_Full
) {
6397 bp
->link_params
.req_line_speed
= SPEED_1000
;
6398 bp
->port
.advertising
= (ADVERTISED_1000baseT_Full
|
6401 BNX2X_ERROR("NVRAM config error. "
6402 "Invalid link_config 0x%x"
6403 " speed_cap_mask 0x%x\n",
6404 bp
->port
.link_config
,
6405 bp
->link_params
.speed_cap_mask
);
6410 case PORT_FEATURE_LINK_SPEED_2_5G
:
6411 if (bp
->port
.supported
& SUPPORTED_2500baseX_Full
) {
6412 bp
->link_params
.req_line_speed
= SPEED_2500
;
6413 bp
->port
.advertising
= (ADVERTISED_2500baseX_Full
|
6416 BNX2X_ERROR("NVRAM config error. "
6417 "Invalid link_config 0x%x"
6418 " speed_cap_mask 0x%x\n",
6419 bp
->port
.link_config
,
6420 bp
->link_params
.speed_cap_mask
);
6425 case PORT_FEATURE_LINK_SPEED_10G_CX4
:
6426 case PORT_FEATURE_LINK_SPEED_10G_KX4
:
6427 case PORT_FEATURE_LINK_SPEED_10G_KR
:
6428 if (bp
->port
.supported
& SUPPORTED_10000baseT_Full
) {
6429 bp
->link_params
.req_line_speed
= SPEED_10000
;
6430 bp
->port
.advertising
= (ADVERTISED_10000baseT_Full
|
6433 BNX2X_ERROR("NVRAM config error. "
6434 "Invalid link_config 0x%x"
6435 " speed_cap_mask 0x%x\n",
6436 bp
->port
.link_config
,
6437 bp
->link_params
.speed_cap_mask
);
6443 BNX2X_ERROR("NVRAM config error. "
6444 "BAD link speed link_config 0x%x\n",
6445 bp
->port
.link_config
);
6446 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
6447 bp
->port
.advertising
= bp
->port
.supported
;
6451 bp
->link_params
.req_flow_ctrl
= (bp
->port
.link_config
&
6452 PORT_FEATURE_FLOW_CONTROL_MASK
);
6453 if ((bp
->link_params
.req_flow_ctrl
== BNX2X_FLOW_CTRL_AUTO
) &&
6454 !(bp
->port
.supported
& SUPPORTED_Autoneg
))
6455 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_NONE
;
6457 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
6458 " advertising 0x%x\n",
6459 bp
->link_params
.req_line_speed
,
6460 bp
->link_params
.req_duplex
,
6461 bp
->link_params
.req_flow_ctrl
, bp
->port
.advertising
);
6464 static void __devinit
bnx2x_set_mac_buf(u8
*mac_buf
, u32 mac_lo
, u16 mac_hi
)
6466 mac_hi
= cpu_to_be16(mac_hi
);
6467 mac_lo
= cpu_to_be32(mac_lo
);
6468 memcpy(mac_buf
, &mac_hi
, sizeof(mac_hi
));
6469 memcpy(mac_buf
+ sizeof(mac_hi
), &mac_lo
, sizeof(mac_lo
));
6472 static void __devinit
bnx2x_get_port_hwinfo(struct bnx2x
*bp
)
6474 int port
= BP_PORT(bp
);
6480 bp
->link_params
.bp
= bp
;
6481 bp
->link_params
.port
= port
;
6483 bp
->link_params
.lane_config
=
6484 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].lane_config
);
6485 bp
->link_params
.ext_phy_config
=
6487 dev_info
.port_hw_config
[port
].external_phy_config
);
6488 /* BCM8727_NOC => BCM8727 no over current */
6489 if (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
) ==
6490 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC
) {
6491 bp
->link_params
.ext_phy_config
&=
6492 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
6493 bp
->link_params
.ext_phy_config
|=
6494 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
;
6495 bp
->link_params
.feature_config_flags
|=
6496 FEATURE_CONFIG_BCM8727_NOC
;
6499 bp
->link_params
.speed_cap_mask
=
6501 dev_info
.port_hw_config
[port
].speed_capability_mask
);
6503 bp
->port
.link_config
=
6504 SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].link_config
);
6506 /* Get the 4 lanes xgxs config rx and tx */
6507 for (i
= 0; i
< 2; i
++) {
6509 dev_info
.port_hw_config
[port
].xgxs_config_rx
[i
<<1]);
6510 bp
->link_params
.xgxs_config_rx
[i
<< 1] = ((val
>>16) & 0xffff);
6511 bp
->link_params
.xgxs_config_rx
[(i
<< 1) + 1] = (val
& 0xffff);
6514 dev_info
.port_hw_config
[port
].xgxs_config_tx
[i
<<1]);
6515 bp
->link_params
.xgxs_config_tx
[i
<< 1] = ((val
>>16) & 0xffff);
6516 bp
->link_params
.xgxs_config_tx
[(i
<< 1) + 1] = (val
& 0xffff);
6519 /* If the device is capable of WoL, set the default state according
6522 config
= SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].config
);
6523 bp
->wol
= (!(bp
->flags
& NO_WOL_FLAG
) &&
6524 (config
& PORT_FEATURE_WOL_ENABLED
));
6526 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
6527 " speed_cap_mask 0x%08x link_config 0x%08x\n",
6528 bp
->link_params
.lane_config
,
6529 bp
->link_params
.ext_phy_config
,
6530 bp
->link_params
.speed_cap_mask
, bp
->port
.link_config
);
6532 bp
->link_params
.switch_cfg
|= (bp
->port
.link_config
&
6533 PORT_FEATURE_CONNECTED_SWITCH_MASK
);
6534 bnx2x_link_settings_supported(bp
, bp
->link_params
.switch_cfg
);
6536 bnx2x_link_settings_requested(bp
);
6539 * If connected directly, work with the internal PHY, otherwise, work
6540 * with the external PHY
6542 ext_phy_type
= XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
6543 if (ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
)
6544 bp
->mdio
.prtad
= bp
->link_params
.phy_addr
;
6546 else if ((ext_phy_type
!= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
) &&
6547 (ext_phy_type
!= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN
))
6549 XGXS_EXT_PHY_ADDR(bp
->link_params
.ext_phy_config
);
6551 val2
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_upper
);
6552 val
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_lower
);
6553 bnx2x_set_mac_buf(bp
->dev
->dev_addr
, val
, val2
);
6554 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
6555 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
6558 val2
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].iscsi_mac_upper
);
6559 val
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].iscsi_mac_lower
);
6560 bnx2x_set_mac_buf(bp
->iscsi_mac
, val
, val2
);
6564 static int __devinit
bnx2x_get_hwinfo(struct bnx2x
*bp
)
6566 int func
= BP_FUNC(bp
);
6570 bnx2x_get_common_hwinfo(bp
);
6574 if (CHIP_IS_E1H(bp
) && !BP_NOMCP(bp
)) {
6576 SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
6578 val
= (SHMEM_RD(bp
, mf_cfg
.func_mf_config
[FUNC_0
].e1hov_tag
) &
6579 FUNC_MF_CFG_E1HOV_TAG_MASK
);
6580 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
)
6582 BNX2X_DEV_INFO("%s function mode\n",
6583 IS_E1HMF(bp
) ? "multi" : "single");
6586 val
= (SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].
6588 FUNC_MF_CFG_E1HOV_TAG_MASK
);
6589 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
) {
6591 BNX2X_DEV_INFO("E1HOV for func %d is %d "
6593 func
, bp
->e1hov
, bp
->e1hov
);
6595 BNX2X_ERROR("No valid E1HOV for func %d,"
6596 " aborting\n", func
);
6601 BNX2X_ERROR("VN %d in single function mode,"
6602 " aborting\n", BP_E1HVN(bp
));
6608 if (!BP_NOMCP(bp
)) {
6609 bnx2x_get_port_hwinfo(bp
);
6611 bp
->fw_seq
= (SHMEM_RD(bp
, func_mb
[func
].drv_mb_header
) &
6612 DRV_MSG_SEQ_NUMBER_MASK
);
6613 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
6617 val2
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].mac_upper
);
6618 val
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].mac_lower
);
6619 if ((val2
!= FUNC_MF_CFG_UPPERMAC_DEFAULT
) &&
6620 (val
!= FUNC_MF_CFG_LOWERMAC_DEFAULT
)) {
6621 bp
->dev
->dev_addr
[0] = (u8
)(val2
>> 8 & 0xff);
6622 bp
->dev
->dev_addr
[1] = (u8
)(val2
& 0xff);
6623 bp
->dev
->dev_addr
[2] = (u8
)(val
>> 24 & 0xff);
6624 bp
->dev
->dev_addr
[3] = (u8
)(val
>> 16 & 0xff);
6625 bp
->dev
->dev_addr
[4] = (u8
)(val
>> 8 & 0xff);
6626 bp
->dev
->dev_addr
[5] = (u8
)(val
& 0xff);
6627 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
,
6629 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
,
6637 /* only supposed to happen on emulation/FPGA */
6638 BNX2X_ERROR("warning: random MAC workaround active\n");
6639 random_ether_addr(bp
->dev
->dev_addr
);
6640 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
6646 static void __devinit
bnx2x_read_fwinfo(struct bnx2x
*bp
)
6648 int cnt
, i
, block_end
, rodi
;
6649 char vpd_data
[BNX2X_VPD_LEN
+1];
6650 char str_id_reg
[VENDOR_ID_LEN
+1];
6651 char str_id_cap
[VENDOR_ID_LEN
+1];
6654 cnt
= pci_read_vpd(bp
->pdev
, 0, BNX2X_VPD_LEN
, vpd_data
);
6655 memset(bp
->fw_ver
, 0, sizeof(bp
->fw_ver
));
6657 if (cnt
< BNX2X_VPD_LEN
)
6660 i
= pci_vpd_find_tag(vpd_data
, 0, BNX2X_VPD_LEN
,
6661 PCI_VPD_LRDT_RO_DATA
);
6666 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+
6667 pci_vpd_lrdt_size(&vpd_data
[i
]);
6669 i
+= PCI_VPD_LRDT_TAG_SIZE
;
6671 if (block_end
> BNX2X_VPD_LEN
)
6674 rodi
= pci_vpd_find_info_keyword(vpd_data
, i
, block_end
,
6675 PCI_VPD_RO_KEYWORD_MFR_ID
);
6679 len
= pci_vpd_info_field_size(&vpd_data
[rodi
]);
6681 if (len
!= VENDOR_ID_LEN
)
6684 rodi
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
6686 /* vendor specific info */
6687 snprintf(str_id_reg
, VENDOR_ID_LEN
+ 1, "%04x", PCI_VENDOR_ID_DELL
);
6688 snprintf(str_id_cap
, VENDOR_ID_LEN
+ 1, "%04X", PCI_VENDOR_ID_DELL
);
6689 if (!strncmp(str_id_reg
, &vpd_data
[rodi
], VENDOR_ID_LEN
) ||
6690 !strncmp(str_id_cap
, &vpd_data
[rodi
], VENDOR_ID_LEN
)) {
6692 rodi
= pci_vpd_find_info_keyword(vpd_data
, i
, block_end
,
6693 PCI_VPD_RO_KEYWORD_VENDOR0
);
6695 len
= pci_vpd_info_field_size(&vpd_data
[rodi
]);
6697 rodi
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
6699 if (len
< 32 && (len
+ rodi
) <= BNX2X_VPD_LEN
) {
6700 memcpy(bp
->fw_ver
, &vpd_data
[rodi
], len
);
6701 bp
->fw_ver
[len
] = ' ';
6710 static int __devinit
bnx2x_init_bp(struct bnx2x
*bp
)
6712 int func
= BP_FUNC(bp
);
6716 /* Disable interrupt handling until HW is initialized */
6717 atomic_set(&bp
->intr_sem
, 1);
6718 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6720 mutex_init(&bp
->port
.phy_mutex
);
6721 mutex_init(&bp
->fw_mb_mutex
);
6722 spin_lock_init(&bp
->stats_lock
);
6724 mutex_init(&bp
->cnic_mutex
);
6727 INIT_DELAYED_WORK(&bp
->sp_task
, bnx2x_sp_task
);
6728 INIT_DELAYED_WORK(&bp
->reset_task
, bnx2x_reset_task
);
6730 rc
= bnx2x_get_hwinfo(bp
);
6732 bnx2x_read_fwinfo(bp
);
6733 /* need to reset chip if undi was active */
6735 bnx2x_undi_unload(bp
);
6737 if (CHIP_REV_IS_FPGA(bp
))
6738 dev_err(&bp
->pdev
->dev
, "FPGA detected\n");
6740 if (BP_NOMCP(bp
) && (func
== 0))
6741 dev_err(&bp
->pdev
->dev
, "MCP disabled, "
6742 "must load devices in order!\n");
6744 /* Set multi queue mode */
6745 if ((multi_mode
!= ETH_RSS_MODE_DISABLED
) &&
6746 ((int_mode
== INT_MODE_INTx
) || (int_mode
== INT_MODE_MSI
))) {
6747 dev_err(&bp
->pdev
->dev
, "Multi disabled since int_mode "
6748 "requested is not MSI-X\n");
6749 multi_mode
= ETH_RSS_MODE_DISABLED
;
6751 bp
->multi_mode
= multi_mode
;
6752 bp
->int_mode
= int_mode
;
6754 bp
->dev
->features
|= NETIF_F_GRO
;
6758 bp
->flags
&= ~TPA_ENABLE_FLAG
;
6759 bp
->dev
->features
&= ~NETIF_F_LRO
;
6761 bp
->flags
|= TPA_ENABLE_FLAG
;
6762 bp
->dev
->features
|= NETIF_F_LRO
;
6764 bp
->disable_tpa
= disable_tpa
;
6767 bp
->dropless_fc
= 0;
6769 bp
->dropless_fc
= dropless_fc
;
6773 bp
->tx_ring_size
= MAX_TX_AVAIL
;
6774 bp
->rx_ring_size
= MAX_RX_AVAIL
;
6778 /* make sure that the numbers are in the right granularity */
6779 bp
->tx_ticks
= (50 / (4 * BNX2X_BTR
)) * (4 * BNX2X_BTR
);
6780 bp
->rx_ticks
= (25 / (4 * BNX2X_BTR
)) * (4 * BNX2X_BTR
);
6782 timer_interval
= (CHIP_REV_IS_SLOW(bp
) ? 5*HZ
: HZ
);
6783 bp
->current_interval
= (poll
? poll
: timer_interval
);
6785 init_timer(&bp
->timer
);
6786 bp
->timer
.expires
= jiffies
+ bp
->current_interval
;
6787 bp
->timer
.data
= (unsigned long) bp
;
6788 bp
->timer
.function
= bnx2x_timer
;
6794 /****************************************************************************
6795 * General service functions
6796 ****************************************************************************/
6798 /* called with rtnl_lock */
6799 static int bnx2x_open(struct net_device
*dev
)
6801 struct bnx2x
*bp
= netdev_priv(dev
);
6803 netif_carrier_off(dev
);
6805 bnx2x_set_power_state(bp
, PCI_D0
);
6807 if (!bnx2x_reset_is_done(bp
)) {
6809 /* Reset MCP mail box sequence if there is on going
6814 /* If it's the first function to load and reset done
6815 * is still not cleared it may mean that. We don't
6816 * check the attention state here because it may have
6817 * already been cleared by a "common" reset but we
6818 * shell proceed with "process kill" anyway.
6820 if ((bnx2x_get_load_cnt(bp
) == 0) &&
6821 bnx2x_trylock_hw_lock(bp
,
6822 HW_LOCK_RESOURCE_RESERVED_08
) &&
6823 (!bnx2x_leader_reset(bp
))) {
6824 DP(NETIF_MSG_HW
, "Recovered in open\n");
6828 bnx2x_set_power_state(bp
, PCI_D3hot
);
6830 printk(KERN_ERR
"%s: Recovery flow hasn't been properly"
6831 " completed yet. Try again later. If u still see this"
6832 " message after a few retries then power cycle is"
6833 " required.\n", bp
->dev
->name
);
6839 bp
->recovery_state
= BNX2X_RECOVERY_DONE
;
6841 return bnx2x_nic_load(bp
, LOAD_OPEN
);
6844 /* called with rtnl_lock */
6845 static int bnx2x_close(struct net_device
*dev
)
6847 struct bnx2x
*bp
= netdev_priv(dev
);
6849 /* Unload the driver, release IRQs */
6850 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
6851 bnx2x_set_power_state(bp
, PCI_D3hot
);
6856 /* called with netif_tx_lock from dev_mcast.c */
6857 void bnx2x_set_rx_mode(struct net_device
*dev
)
6859 struct bnx2x
*bp
= netdev_priv(dev
);
6860 u32 rx_mode
= BNX2X_RX_MODE_NORMAL
;
6861 int port
= BP_PORT(bp
);
6863 if (bp
->state
!= BNX2X_STATE_OPEN
) {
6864 DP(NETIF_MSG_IFUP
, "state is %x, returning\n", bp
->state
);
6868 DP(NETIF_MSG_IFUP
, "dev->flags = %x\n", dev
->flags
);
6870 if (dev
->flags
& IFF_PROMISC
)
6871 rx_mode
= BNX2X_RX_MODE_PROMISC
;
6873 else if ((dev
->flags
& IFF_ALLMULTI
) ||
6874 ((netdev_mc_count(dev
) > BNX2X_MAX_MULTICAST
) &&
6876 rx_mode
= BNX2X_RX_MODE_ALLMULTI
;
6878 else { /* some multicasts */
6879 if (CHIP_IS_E1(bp
)) {
6881 struct netdev_hw_addr
*ha
;
6882 struct mac_configuration_cmd
*config
=
6883 bnx2x_sp(bp
, mcast_config
);
6886 netdev_for_each_mc_addr(ha
, dev
) {
6887 config
->config_table
[i
].
6888 cam_entry
.msb_mac_addr
=
6889 swab16(*(u16
*)&ha
->addr
[0]);
6890 config
->config_table
[i
].
6891 cam_entry
.middle_mac_addr
=
6892 swab16(*(u16
*)&ha
->addr
[2]);
6893 config
->config_table
[i
].
6894 cam_entry
.lsb_mac_addr
=
6895 swab16(*(u16
*)&ha
->addr
[4]);
6896 config
->config_table
[i
].cam_entry
.flags
=
6898 config
->config_table
[i
].
6899 target_table_entry
.flags
= 0;
6900 config
->config_table
[i
].target_table_entry
.
6901 clients_bit_vector
=
6902 cpu_to_le32(1 << BP_L_ID(bp
));
6903 config
->config_table
[i
].
6904 target_table_entry
.vlan_id
= 0;
6907 "setting MCAST[%d] (%04x:%04x:%04x)\n", i
,
6908 config
->config_table
[i
].
6909 cam_entry
.msb_mac_addr
,
6910 config
->config_table
[i
].
6911 cam_entry
.middle_mac_addr
,
6912 config
->config_table
[i
].
6913 cam_entry
.lsb_mac_addr
);
6916 old
= config
->hdr
.length
;
6918 for (; i
< old
; i
++) {
6919 if (CAM_IS_INVALID(config
->
6921 /* already invalidated */
6925 CAM_INVALIDATE(config
->
6930 if (CHIP_REV_IS_SLOW(bp
))
6931 offset
= BNX2X_MAX_EMUL_MULTI
*(1 + port
);
6933 offset
= BNX2X_MAX_MULTICAST
*(1 + port
);
6935 config
->hdr
.length
= i
;
6936 config
->hdr
.offset
= offset
;
6937 config
->hdr
.client_id
= bp
->fp
->cl_id
;
6938 config
->hdr
.reserved1
= 0;
6940 bp
->set_mac_pending
++;
6943 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
6944 U64_HI(bnx2x_sp_mapping(bp
, mcast_config
)),
6945 U64_LO(bnx2x_sp_mapping(bp
, mcast_config
)),
6948 /* Accept one or more multicasts */
6949 struct netdev_hw_addr
*ha
;
6950 u32 mc_filter
[MC_HASH_SIZE
];
6951 u32 crc
, bit
, regidx
;
6954 memset(mc_filter
, 0, 4 * MC_HASH_SIZE
);
6956 netdev_for_each_mc_addr(ha
, dev
) {
6957 DP(NETIF_MSG_IFUP
, "Adding mcast MAC: %pM\n",
6960 crc
= crc32c_le(0, ha
->addr
, ETH_ALEN
);
6961 bit
= (crc
>> 24) & 0xff;
6964 mc_filter
[regidx
] |= (1 << bit
);
6967 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
6968 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
),
6973 bp
->rx_mode
= rx_mode
;
6974 bnx2x_set_storm_rx_mode(bp
);
6978 /* called with rtnl_lock */
6979 static int bnx2x_mdio_read(struct net_device
*netdev
, int prtad
,
6980 int devad
, u16 addr
)
6982 struct bnx2x
*bp
= netdev_priv(netdev
);
6985 u32 phy_type
= XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
6987 DP(NETIF_MSG_LINK
, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
6988 prtad
, devad
, addr
);
6990 if (prtad
!= bp
->mdio
.prtad
) {
6991 DP(NETIF_MSG_LINK
, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
6992 prtad
, bp
->mdio
.prtad
);
6996 /* The HW expects different devad if CL22 is used */
6997 devad
= (devad
== MDIO_DEVAD_NONE
) ? DEFAULT_PHY_DEV_ADDR
: devad
;
6999 bnx2x_acquire_phy_lock(bp
);
7000 rc
= bnx2x_cl45_read(bp
, BP_PORT(bp
), phy_type
, prtad
,
7001 devad
, addr
, &value
);
7002 bnx2x_release_phy_lock(bp
);
7003 DP(NETIF_MSG_LINK
, "mdio_read_val 0x%x rc = 0x%x\n", value
, rc
);
7010 /* called with rtnl_lock */
7011 static int bnx2x_mdio_write(struct net_device
*netdev
, int prtad
, int devad
,
7012 u16 addr
, u16 value
)
7014 struct bnx2x
*bp
= netdev_priv(netdev
);
7015 u32 ext_phy_type
= XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
7018 DP(NETIF_MSG_LINK
, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
7019 " value 0x%x\n", prtad
, devad
, addr
, value
);
7021 if (prtad
!= bp
->mdio
.prtad
) {
7022 DP(NETIF_MSG_LINK
, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
7023 prtad
, bp
->mdio
.prtad
);
7027 /* The HW expects different devad if CL22 is used */
7028 devad
= (devad
== MDIO_DEVAD_NONE
) ? DEFAULT_PHY_DEV_ADDR
: devad
;
7030 bnx2x_acquire_phy_lock(bp
);
7031 rc
= bnx2x_cl45_write(bp
, BP_PORT(bp
), ext_phy_type
, prtad
,
7032 devad
, addr
, value
);
7033 bnx2x_release_phy_lock(bp
);
7037 /* called with rtnl_lock */
7038 static int bnx2x_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
7040 struct bnx2x
*bp
= netdev_priv(dev
);
7041 struct mii_ioctl_data
*mdio
= if_mii(ifr
);
7043 DP(NETIF_MSG_LINK
, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
7044 mdio
->phy_id
, mdio
->reg_num
, mdio
->val_in
);
7046 if (!netif_running(dev
))
7049 return mdio_mii_ioctl(&bp
->mdio
, mdio
, cmd
);
7052 #ifdef CONFIG_NET_POLL_CONTROLLER
7053 static void poll_bnx2x(struct net_device
*dev
)
7055 struct bnx2x
*bp
= netdev_priv(dev
);
7057 disable_irq(bp
->pdev
->irq
);
7058 bnx2x_interrupt(bp
->pdev
->irq
, dev
);
7059 enable_irq(bp
->pdev
->irq
);
7063 static const struct net_device_ops bnx2x_netdev_ops
= {
7064 .ndo_open
= bnx2x_open
,
7065 .ndo_stop
= bnx2x_close
,
7066 .ndo_start_xmit
= bnx2x_start_xmit
,
7067 .ndo_set_multicast_list
= bnx2x_set_rx_mode
,
7068 .ndo_set_mac_address
= bnx2x_change_mac_addr
,
7069 .ndo_validate_addr
= eth_validate_addr
,
7070 .ndo_do_ioctl
= bnx2x_ioctl
,
7071 .ndo_change_mtu
= bnx2x_change_mtu
,
7072 .ndo_tx_timeout
= bnx2x_tx_timeout
,
7074 .ndo_vlan_rx_register
= bnx2x_vlan_rx_register
,
7076 #ifdef CONFIG_NET_POLL_CONTROLLER
7077 .ndo_poll_controller
= poll_bnx2x
,
7081 static int __devinit
bnx2x_init_dev(struct pci_dev
*pdev
,
7082 struct net_device
*dev
)
7087 SET_NETDEV_DEV(dev
, &pdev
->dev
);
7088 bp
= netdev_priv(dev
);
7093 bp
->func
= PCI_FUNC(pdev
->devfn
);
7095 rc
= pci_enable_device(pdev
);
7097 dev_err(&bp
->pdev
->dev
,
7098 "Cannot enable PCI device, aborting\n");
7102 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
7103 dev_err(&bp
->pdev
->dev
,
7104 "Cannot find PCI device base address, aborting\n");
7106 goto err_out_disable
;
7109 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
7110 dev_err(&bp
->pdev
->dev
, "Cannot find second PCI device"
7111 " base address, aborting\n");
7113 goto err_out_disable
;
7116 if (atomic_read(&pdev
->enable_cnt
) == 1) {
7117 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
7119 dev_err(&bp
->pdev
->dev
,
7120 "Cannot obtain PCI resources, aborting\n");
7121 goto err_out_disable
;
7124 pci_set_master(pdev
);
7125 pci_save_state(pdev
);
7128 bp
->pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
7129 if (bp
->pm_cap
== 0) {
7130 dev_err(&bp
->pdev
->dev
,
7131 "Cannot find power management capability, aborting\n");
7133 goto err_out_release
;
7136 bp
->pcie_cap
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
7137 if (bp
->pcie_cap
== 0) {
7138 dev_err(&bp
->pdev
->dev
,
7139 "Cannot find PCI Express capability, aborting\n");
7141 goto err_out_release
;
7144 if (dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64)) == 0) {
7145 bp
->flags
|= USING_DAC_FLAG
;
7146 if (dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64)) != 0) {
7147 dev_err(&bp
->pdev
->dev
, "dma_set_coherent_mask"
7148 " failed, aborting\n");
7150 goto err_out_release
;
7153 } else if (dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32)) != 0) {
7154 dev_err(&bp
->pdev
->dev
,
7155 "System does not support DMA, aborting\n");
7157 goto err_out_release
;
7160 dev
->mem_start
= pci_resource_start(pdev
, 0);
7161 dev
->base_addr
= dev
->mem_start
;
7162 dev
->mem_end
= pci_resource_end(pdev
, 0);
7164 dev
->irq
= pdev
->irq
;
7166 bp
->regview
= pci_ioremap_bar(pdev
, 0);
7168 dev_err(&bp
->pdev
->dev
,
7169 "Cannot map register space, aborting\n");
7171 goto err_out_release
;
7174 bp
->doorbells
= ioremap_nocache(pci_resource_start(pdev
, 2),
7175 min_t(u64
, BNX2X_DB_SIZE
,
7176 pci_resource_len(pdev
, 2)));
7177 if (!bp
->doorbells
) {
7178 dev_err(&bp
->pdev
->dev
,
7179 "Cannot map doorbell space, aborting\n");
7184 bnx2x_set_power_state(bp
, PCI_D0
);
7186 /* clean indirect addresses */
7187 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
7188 PCICFG_VENDOR_ID_OFFSET
);
7189 REG_WR(bp
, PXP2_REG_PGL_ADDR_88_F0
+ BP_PORT(bp
)*16, 0);
7190 REG_WR(bp
, PXP2_REG_PGL_ADDR_8C_F0
+ BP_PORT(bp
)*16, 0);
7191 REG_WR(bp
, PXP2_REG_PGL_ADDR_90_F0
+ BP_PORT(bp
)*16, 0);
7192 REG_WR(bp
, PXP2_REG_PGL_ADDR_94_F0
+ BP_PORT(bp
)*16, 0);
7194 /* Reset the load counter */
7195 bnx2x_clear_load_cnt(bp
);
7197 dev
->watchdog_timeo
= TX_TIMEOUT
;
7199 dev
->netdev_ops
= &bnx2x_netdev_ops
;
7200 bnx2x_set_ethtool_ops(dev
);
7201 dev
->features
|= NETIF_F_SG
;
7202 dev
->features
|= NETIF_F_HW_CSUM
;
7203 if (bp
->flags
& USING_DAC_FLAG
)
7204 dev
->features
|= NETIF_F_HIGHDMA
;
7205 dev
->features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
7206 dev
->features
|= NETIF_F_TSO6
;
7208 dev
->features
|= (NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
);
7209 bp
->flags
|= (HW_VLAN_RX_FLAG
| HW_VLAN_TX_FLAG
);
7211 dev
->vlan_features
|= NETIF_F_SG
;
7212 dev
->vlan_features
|= NETIF_F_HW_CSUM
;
7213 if (bp
->flags
& USING_DAC_FLAG
)
7214 dev
->vlan_features
|= NETIF_F_HIGHDMA
;
7215 dev
->vlan_features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
7216 dev
->vlan_features
|= NETIF_F_TSO6
;
7219 /* get_port_hwinfo() will set prtad and mmds properly */
7220 bp
->mdio
.prtad
= MDIO_PRTAD_NONE
;
7222 bp
->mdio
.mode_support
= MDIO_SUPPORTS_C45
| MDIO_EMULATE_C22
;
7224 bp
->mdio
.mdio_read
= bnx2x_mdio_read
;
7225 bp
->mdio
.mdio_write
= bnx2x_mdio_write
;
7231 iounmap(bp
->regview
);
7234 if (bp
->doorbells
) {
7235 iounmap(bp
->doorbells
);
7236 bp
->doorbells
= NULL
;
7240 if (atomic_read(&pdev
->enable_cnt
) == 1)
7241 pci_release_regions(pdev
);
7244 pci_disable_device(pdev
);
7245 pci_set_drvdata(pdev
, NULL
);
7251 static void __devinit
bnx2x_get_pcie_width_speed(struct bnx2x
*bp
,
7252 int *width
, int *speed
)
7254 u32 val
= REG_RD(bp
, PCICFG_OFFSET
+ PCICFG_LINK_CONTROL
);
7256 *width
= (val
& PCICFG_LINK_WIDTH
) >> PCICFG_LINK_WIDTH_SHIFT
;
7258 /* return value of 1=2.5GHz 2=5GHz */
7259 *speed
= (val
& PCICFG_LINK_SPEED
) >> PCICFG_LINK_SPEED_SHIFT
;
7262 static int __devinit
bnx2x_check_firmware(struct bnx2x
*bp
)
7264 const struct firmware
*firmware
= bp
->firmware
;
7265 struct bnx2x_fw_file_hdr
*fw_hdr
;
7266 struct bnx2x_fw_file_section
*sections
;
7267 u32 offset
, len
, num_ops
;
7272 if (firmware
->size
< sizeof(struct bnx2x_fw_file_hdr
))
7275 fw_hdr
= (struct bnx2x_fw_file_hdr
*)firmware
->data
;
7276 sections
= (struct bnx2x_fw_file_section
*)fw_hdr
;
7278 /* Make sure none of the offsets and sizes make us read beyond
7279 * the end of the firmware data */
7280 for (i
= 0; i
< sizeof(*fw_hdr
) / sizeof(*sections
); i
++) {
7281 offset
= be32_to_cpu(sections
[i
].offset
);
7282 len
= be32_to_cpu(sections
[i
].len
);
7283 if (offset
+ len
> firmware
->size
) {
7284 dev_err(&bp
->pdev
->dev
,
7285 "Section %d length is out of bounds\n", i
);
7290 /* Likewise for the init_ops offsets */
7291 offset
= be32_to_cpu(fw_hdr
->init_ops_offsets
.offset
);
7292 ops_offsets
= (u16
*)(firmware
->data
+ offset
);
7293 num_ops
= be32_to_cpu(fw_hdr
->init_ops
.len
) / sizeof(struct raw_op
);
7295 for (i
= 0; i
< be32_to_cpu(fw_hdr
->init_ops_offsets
.len
) / 2; i
++) {
7296 if (be16_to_cpu(ops_offsets
[i
]) > num_ops
) {
7297 dev_err(&bp
->pdev
->dev
,
7298 "Section offset %d is out of bounds\n", i
);
7303 /* Check FW version */
7304 offset
= be32_to_cpu(fw_hdr
->fw_version
.offset
);
7305 fw_ver
= firmware
->data
+ offset
;
7306 if ((fw_ver
[0] != BCM_5710_FW_MAJOR_VERSION
) ||
7307 (fw_ver
[1] != BCM_5710_FW_MINOR_VERSION
) ||
7308 (fw_ver
[2] != BCM_5710_FW_REVISION_VERSION
) ||
7309 (fw_ver
[3] != BCM_5710_FW_ENGINEERING_VERSION
)) {
7310 dev_err(&bp
->pdev
->dev
,
7311 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
7312 fw_ver
[0], fw_ver
[1], fw_ver
[2],
7313 fw_ver
[3], BCM_5710_FW_MAJOR_VERSION
,
7314 BCM_5710_FW_MINOR_VERSION
,
7315 BCM_5710_FW_REVISION_VERSION
,
7316 BCM_5710_FW_ENGINEERING_VERSION
);
7323 static inline void be32_to_cpu_n(const u8
*_source
, u8
*_target
, u32 n
)
7325 const __be32
*source
= (const __be32
*)_source
;
7326 u32
*target
= (u32
*)_target
;
7329 for (i
= 0; i
< n
/4; i
++)
7330 target
[i
] = be32_to_cpu(source
[i
]);
7334 Ops array is stored in the following format:
7335 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7337 static inline void bnx2x_prep_ops(const u8
*_source
, u8
*_target
, u32 n
)
7339 const __be32
*source
= (const __be32
*)_source
;
7340 struct raw_op
*target
= (struct raw_op
*)_target
;
7343 for (i
= 0, j
= 0; i
< n
/8; i
++, j
+= 2) {
7344 tmp
= be32_to_cpu(source
[j
]);
7345 target
[i
].op
= (tmp
>> 24) & 0xff;
7346 target
[i
].offset
= tmp
& 0xffffff;
7347 target
[i
].raw_data
= be32_to_cpu(source
[j
+ 1]);
7351 static inline void be16_to_cpu_n(const u8
*_source
, u8
*_target
, u32 n
)
7353 const __be16
*source
= (const __be16
*)_source
;
7354 u16
*target
= (u16
*)_target
;
7357 for (i
= 0; i
< n
/2; i
++)
7358 target
[i
] = be16_to_cpu(source
[i
]);
7361 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
7363 u32 len = be32_to_cpu(fw_hdr->arr.len); \
7364 bp->arr = kmalloc(len, GFP_KERNEL); \
7366 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7369 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
7370 (u8 *)bp->arr, len); \
7373 static int __devinit
bnx2x_init_firmware(struct bnx2x
*bp
, struct device
*dev
)
7375 const char *fw_file_name
;
7376 struct bnx2x_fw_file_hdr
*fw_hdr
;
7380 fw_file_name
= FW_FILE_NAME_E1
;
7381 else if (CHIP_IS_E1H(bp
))
7382 fw_file_name
= FW_FILE_NAME_E1H
;
7384 dev_err(dev
, "Unsupported chip revision\n");
7388 dev_info(dev
, "Loading %s\n", fw_file_name
);
7390 rc
= request_firmware(&bp
->firmware
, fw_file_name
, dev
);
7392 dev_err(dev
, "Can't load firmware file %s\n", fw_file_name
);
7393 goto request_firmware_exit
;
7396 rc
= bnx2x_check_firmware(bp
);
7398 dev_err(dev
, "Corrupt firmware file %s\n", fw_file_name
);
7399 goto request_firmware_exit
;
7402 fw_hdr
= (struct bnx2x_fw_file_hdr
*)bp
->firmware
->data
;
7404 /* Initialize the pointers to the init arrays */
7406 BNX2X_ALLOC_AND_SET(init_data
, request_firmware_exit
, be32_to_cpu_n
);
7409 BNX2X_ALLOC_AND_SET(init_ops
, init_ops_alloc_err
, bnx2x_prep_ops
);
7412 BNX2X_ALLOC_AND_SET(init_ops_offsets
, init_offsets_alloc_err
,
7415 /* STORMs firmware */
7416 INIT_TSEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
7417 be32_to_cpu(fw_hdr
->tsem_int_table_data
.offset
);
7418 INIT_TSEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
7419 be32_to_cpu(fw_hdr
->tsem_pram_data
.offset
);
7420 INIT_USEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
7421 be32_to_cpu(fw_hdr
->usem_int_table_data
.offset
);
7422 INIT_USEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
7423 be32_to_cpu(fw_hdr
->usem_pram_data
.offset
);
7424 INIT_XSEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
7425 be32_to_cpu(fw_hdr
->xsem_int_table_data
.offset
);
7426 INIT_XSEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
7427 be32_to_cpu(fw_hdr
->xsem_pram_data
.offset
);
7428 INIT_CSEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
7429 be32_to_cpu(fw_hdr
->csem_int_table_data
.offset
);
7430 INIT_CSEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
7431 be32_to_cpu(fw_hdr
->csem_pram_data
.offset
);
7435 init_offsets_alloc_err
:
7436 kfree(bp
->init_ops
);
7438 kfree(bp
->init_data
);
7439 request_firmware_exit
:
7440 release_firmware(bp
->firmware
);
7446 static int __devinit
bnx2x_init_one(struct pci_dev
*pdev
,
7447 const struct pci_device_id
*ent
)
7449 struct net_device
*dev
= NULL
;
7451 int pcie_width
, pcie_speed
;
7454 /* dev zeroed in init_etherdev */
7455 dev
= alloc_etherdev_mq(sizeof(*bp
), MAX_CONTEXT
);
7457 dev_err(&pdev
->dev
, "Cannot allocate net device\n");
7461 bp
= netdev_priv(dev
);
7462 bp
->msg_enable
= debug
;
7464 pci_set_drvdata(pdev
, dev
);
7466 rc
= bnx2x_init_dev(pdev
, dev
);
7472 rc
= bnx2x_init_bp(bp
);
7476 /* Set init arrays */
7477 rc
= bnx2x_init_firmware(bp
, &pdev
->dev
);
7479 dev_err(&pdev
->dev
, "Error loading firmware\n");
7483 rc
= register_netdev(dev
);
7485 dev_err(&pdev
->dev
, "Cannot register net device\n");
7489 bnx2x_get_pcie_width_speed(bp
, &pcie_width
, &pcie_speed
);
7490 netdev_info(dev
, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
7491 " IRQ %d, ", board_info
[ent
->driver_data
].name
,
7492 (CHIP_REV(bp
) >> 12) + 'A', (CHIP_METAL(bp
) >> 4),
7493 pcie_width
, (pcie_speed
== 2) ? "5GHz (Gen2)" : "2.5GHz",
7494 dev
->base_addr
, bp
->pdev
->irq
);
7495 pr_cont("node addr %pM\n", dev
->dev_addr
);
7501 iounmap(bp
->regview
);
7504 iounmap(bp
->doorbells
);
7508 if (atomic_read(&pdev
->enable_cnt
) == 1)
7509 pci_release_regions(pdev
);
7511 pci_disable_device(pdev
);
7512 pci_set_drvdata(pdev
, NULL
);
7517 static void __devexit
bnx2x_remove_one(struct pci_dev
*pdev
)
7519 struct net_device
*dev
= pci_get_drvdata(pdev
);
7523 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
7526 bp
= netdev_priv(dev
);
7528 unregister_netdev(dev
);
7530 /* Make sure RESET task is not scheduled before continuing */
7531 cancel_delayed_work_sync(&bp
->reset_task
);
7533 kfree(bp
->init_ops_offsets
);
7534 kfree(bp
->init_ops
);
7535 kfree(bp
->init_data
);
7536 release_firmware(bp
->firmware
);
7539 iounmap(bp
->regview
);
7542 iounmap(bp
->doorbells
);
7546 if (atomic_read(&pdev
->enable_cnt
) == 1)
7547 pci_release_regions(pdev
);
7549 pci_disable_device(pdev
);
7550 pci_set_drvdata(pdev
, NULL
);
7553 static int bnx2x_eeh_nic_unload(struct bnx2x
*bp
)
7557 bp
->state
= BNX2X_STATE_ERROR
;
7559 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
7561 bnx2x_netif_stop(bp
, 0);
7562 netif_carrier_off(bp
->dev
);
7564 del_timer_sync(&bp
->timer
);
7565 bp
->stats_state
= STATS_STATE_DISABLED
;
7566 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
7569 bnx2x_free_irq(bp
, false);
7571 if (CHIP_IS_E1(bp
)) {
7572 struct mac_configuration_cmd
*config
=
7573 bnx2x_sp(bp
, mcast_config
);
7575 for (i
= 0; i
< config
->hdr
.length
; i
++)
7576 CAM_INVALIDATE(config
->config_table
[i
]);
7579 /* Free SKBs, SGEs, TPA pool and driver internals */
7580 bnx2x_free_skbs(bp
);
7581 for_each_queue(bp
, i
)
7582 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
7583 for_each_queue(bp
, i
)
7584 netif_napi_del(&bnx2x_fp(bp
, i
, napi
));
7587 bp
->state
= BNX2X_STATE_CLOSED
;
7592 static void bnx2x_eeh_recover(struct bnx2x
*bp
)
7596 mutex_init(&bp
->port
.phy_mutex
);
7598 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
7599 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
7600 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp
->common
.shmem_base
);
7602 if (!bp
->common
.shmem_base
||
7603 (bp
->common
.shmem_base
< 0xA0000) ||
7604 (bp
->common
.shmem_base
>= 0xC0000)) {
7605 BNX2X_DEV_INFO("MCP not active\n");
7606 bp
->flags
|= NO_MCP_FLAG
;
7610 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
7611 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
7612 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
7613 BNX2X_ERR("BAD MCP validity signature\n");
7615 if (!BP_NOMCP(bp
)) {
7616 bp
->fw_seq
= (SHMEM_RD(bp
, func_mb
[BP_FUNC(bp
)].drv_mb_header
)
7617 & DRV_MSG_SEQ_NUMBER_MASK
);
7618 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
7623 * bnx2x_io_error_detected - called when PCI error is detected
7624 * @pdev: Pointer to PCI device
7625 * @state: The current pci connection state
7627 * This function is called after a PCI bus error affecting
7628 * this device has been detected.
7630 static pci_ers_result_t
bnx2x_io_error_detected(struct pci_dev
*pdev
,
7631 pci_channel_state_t state
)
7633 struct net_device
*dev
= pci_get_drvdata(pdev
);
7634 struct bnx2x
*bp
= netdev_priv(dev
);
7638 netif_device_detach(dev
);
7640 if (state
== pci_channel_io_perm_failure
) {
7642 return PCI_ERS_RESULT_DISCONNECT
;
7645 if (netif_running(dev
))
7646 bnx2x_eeh_nic_unload(bp
);
7648 pci_disable_device(pdev
);
7652 /* Request a slot reset */
7653 return PCI_ERS_RESULT_NEED_RESET
;
7657 * bnx2x_io_slot_reset - called after the PCI bus has been reset
7658 * @pdev: Pointer to PCI device
7660 * Restart the card from scratch, as if from a cold-boot.
7662 static pci_ers_result_t
bnx2x_io_slot_reset(struct pci_dev
*pdev
)
7664 struct net_device
*dev
= pci_get_drvdata(pdev
);
7665 struct bnx2x
*bp
= netdev_priv(dev
);
7669 if (pci_enable_device(pdev
)) {
7671 "Cannot re-enable PCI device after reset\n");
7673 return PCI_ERS_RESULT_DISCONNECT
;
7676 pci_set_master(pdev
);
7677 pci_restore_state(pdev
);
7679 if (netif_running(dev
))
7680 bnx2x_set_power_state(bp
, PCI_D0
);
7684 return PCI_ERS_RESULT_RECOVERED
;
7688 * bnx2x_io_resume - called when traffic can start flowing again
7689 * @pdev: Pointer to PCI device
7691 * This callback is called when the error recovery driver tells us that
7692 * its OK to resume normal operation.
7694 static void bnx2x_io_resume(struct pci_dev
*pdev
)
7696 struct net_device
*dev
= pci_get_drvdata(pdev
);
7697 struct bnx2x
*bp
= netdev_priv(dev
);
7699 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
7700 printk(KERN_ERR
"Handling parity error recovery. Try again later\n");
7706 bnx2x_eeh_recover(bp
);
7708 if (netif_running(dev
))
7709 bnx2x_nic_load(bp
, LOAD_NORMAL
);
7711 netif_device_attach(dev
);
7716 static struct pci_error_handlers bnx2x_err_handler
= {
7717 .error_detected
= bnx2x_io_error_detected
,
7718 .slot_reset
= bnx2x_io_slot_reset
,
7719 .resume
= bnx2x_io_resume
,
7722 static struct pci_driver bnx2x_pci_driver
= {
7723 .name
= DRV_MODULE_NAME
,
7724 .id_table
= bnx2x_pci_tbl
,
7725 .probe
= bnx2x_init_one
,
7726 .remove
= __devexit_p(bnx2x_remove_one
),
7727 .suspend
= bnx2x_suspend
,
7728 .resume
= bnx2x_resume
,
7729 .err_handler
= &bnx2x_err_handler
,
7732 static int __init
bnx2x_init(void)
7736 pr_info("%s", version
);
7738 bnx2x_wq
= create_singlethread_workqueue("bnx2x");
7739 if (bnx2x_wq
== NULL
) {
7740 pr_err("Cannot create workqueue\n");
7744 ret
= pci_register_driver(&bnx2x_pci_driver
);
7746 pr_err("Cannot register driver\n");
7747 destroy_workqueue(bnx2x_wq
);
7752 static void __exit
bnx2x_cleanup(void)
7754 pci_unregister_driver(&bnx2x_pci_driver
);
7756 destroy_workqueue(bnx2x_wq
);
7759 module_init(bnx2x_init
);
7760 module_exit(bnx2x_cleanup
);
7764 /* count denotes the number of new completions we have seen */
7765 static void bnx2x_cnic_sp_post(struct bnx2x
*bp
, int count
)
7767 struct eth_spe
*spe
;
7769 #ifdef BNX2X_STOP_ON_ERROR
7770 if (unlikely(bp
->panic
))
7774 spin_lock_bh(&bp
->spq_lock
);
7775 bp
->cnic_spq_pending
-= count
;
7777 for (; bp
->cnic_spq_pending
< bp
->cnic_eth_dev
.max_kwqe_pending
;
7778 bp
->cnic_spq_pending
++) {
7780 if (!bp
->cnic_kwq_pending
)
7783 spe
= bnx2x_sp_get_next(bp
);
7784 *spe
= *bp
->cnic_kwq_cons
;
7786 bp
->cnic_kwq_pending
--;
7788 DP(NETIF_MSG_TIMER
, "pending on SPQ %d, on KWQ %d count %d\n",
7789 bp
->cnic_spq_pending
, bp
->cnic_kwq_pending
, count
);
7791 if (bp
->cnic_kwq_cons
== bp
->cnic_kwq_last
)
7792 bp
->cnic_kwq_cons
= bp
->cnic_kwq
;
7794 bp
->cnic_kwq_cons
++;
7796 bnx2x_sp_prod_update(bp
);
7797 spin_unlock_bh(&bp
->spq_lock
);
7800 static int bnx2x_cnic_sp_queue(struct net_device
*dev
,
7801 struct kwqe_16
*kwqes
[], u32 count
)
7803 struct bnx2x
*bp
= netdev_priv(dev
);
7806 #ifdef BNX2X_STOP_ON_ERROR
7807 if (unlikely(bp
->panic
))
7811 spin_lock_bh(&bp
->spq_lock
);
7813 for (i
= 0; i
< count
; i
++) {
7814 struct eth_spe
*spe
= (struct eth_spe
*)kwqes
[i
];
7816 if (bp
->cnic_kwq_pending
== MAX_SP_DESC_CNT
)
7819 *bp
->cnic_kwq_prod
= *spe
;
7821 bp
->cnic_kwq_pending
++;
7823 DP(NETIF_MSG_TIMER
, "L5 SPQE %x %x %x:%x pos %d\n",
7824 spe
->hdr
.conn_and_cmd_data
, spe
->hdr
.type
,
7825 spe
->data
.mac_config_addr
.hi
,
7826 spe
->data
.mac_config_addr
.lo
,
7827 bp
->cnic_kwq_pending
);
7829 if (bp
->cnic_kwq_prod
== bp
->cnic_kwq_last
)
7830 bp
->cnic_kwq_prod
= bp
->cnic_kwq
;
7832 bp
->cnic_kwq_prod
++;
7835 spin_unlock_bh(&bp
->spq_lock
);
7837 if (bp
->cnic_spq_pending
< bp
->cnic_eth_dev
.max_kwqe_pending
)
7838 bnx2x_cnic_sp_post(bp
, 0);
7843 static int bnx2x_cnic_ctl_send(struct bnx2x
*bp
, struct cnic_ctl_info
*ctl
)
7845 struct cnic_ops
*c_ops
;
7848 mutex_lock(&bp
->cnic_mutex
);
7849 c_ops
= bp
->cnic_ops
;
7851 rc
= c_ops
->cnic_ctl(bp
->cnic_data
, ctl
);
7852 mutex_unlock(&bp
->cnic_mutex
);
7857 static int bnx2x_cnic_ctl_send_bh(struct bnx2x
*bp
, struct cnic_ctl_info
*ctl
)
7859 struct cnic_ops
*c_ops
;
7863 c_ops
= rcu_dereference(bp
->cnic_ops
);
7865 rc
= c_ops
->cnic_ctl(bp
->cnic_data
, ctl
);
7872 * for commands that have no data
7874 int bnx2x_cnic_notify(struct bnx2x
*bp
, int cmd
)
7876 struct cnic_ctl_info ctl
= {0};
7880 return bnx2x_cnic_ctl_send(bp
, &ctl
);
7883 static void bnx2x_cnic_cfc_comp(struct bnx2x
*bp
, int cid
)
7885 struct cnic_ctl_info ctl
;
7887 /* first we tell CNIC and only then we count this as a completion */
7888 ctl
.cmd
= CNIC_CTL_COMPLETION_CMD
;
7889 ctl
.data
.comp
.cid
= cid
;
7891 bnx2x_cnic_ctl_send_bh(bp
, &ctl
);
7892 bnx2x_cnic_sp_post(bp
, 1);
7895 static int bnx2x_drv_ctl(struct net_device
*dev
, struct drv_ctl_info
*ctl
)
7897 struct bnx2x
*bp
= netdev_priv(dev
);
7901 case DRV_CTL_CTXTBL_WR_CMD
: {
7902 u32 index
= ctl
->data
.io
.offset
;
7903 dma_addr_t addr
= ctl
->data
.io
.dma_addr
;
7905 bnx2x_ilt_wr(bp
, index
, addr
);
7909 case DRV_CTL_COMPLETION_CMD
: {
7910 int count
= ctl
->data
.comp
.comp_count
;
7912 bnx2x_cnic_sp_post(bp
, count
);
7916 /* rtnl_lock is held. */
7917 case DRV_CTL_START_L2_CMD
: {
7918 u32 cli
= ctl
->data
.ring
.client_id
;
7920 bp
->rx_mode_cl_mask
|= (1 << cli
);
7921 bnx2x_set_storm_rx_mode(bp
);
7925 /* rtnl_lock is held. */
7926 case DRV_CTL_STOP_L2_CMD
: {
7927 u32 cli
= ctl
->data
.ring
.client_id
;
7929 bp
->rx_mode_cl_mask
&= ~(1 << cli
);
7930 bnx2x_set_storm_rx_mode(bp
);
7935 BNX2X_ERR("unknown command %x\n", ctl
->cmd
);
7942 void bnx2x_setup_cnic_irq_info(struct bnx2x
*bp
)
7944 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
7946 if (bp
->flags
& USING_MSIX_FLAG
) {
7947 cp
->drv_state
|= CNIC_DRV_STATE_USING_MSIX
;
7948 cp
->irq_arr
[0].irq_flags
|= CNIC_IRQ_FL_MSIX
;
7949 cp
->irq_arr
[0].vector
= bp
->msix_table
[1].vector
;
7951 cp
->drv_state
&= ~CNIC_DRV_STATE_USING_MSIX
;
7952 cp
->irq_arr
[0].irq_flags
&= ~CNIC_IRQ_FL_MSIX
;
7954 cp
->irq_arr
[0].status_blk
= bp
->cnic_sb
;
7955 cp
->irq_arr
[0].status_blk_num
= CNIC_SB_ID(bp
);
7956 cp
->irq_arr
[1].status_blk
= bp
->def_status_blk
;
7957 cp
->irq_arr
[1].status_blk_num
= DEF_SB_ID
;
7962 static int bnx2x_register_cnic(struct net_device
*dev
, struct cnic_ops
*ops
,
7965 struct bnx2x
*bp
= netdev_priv(dev
);
7966 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
7971 if (atomic_read(&bp
->intr_sem
) != 0)
7974 bp
->cnic_kwq
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
7978 bp
->cnic_kwq_cons
= bp
->cnic_kwq
;
7979 bp
->cnic_kwq_prod
= bp
->cnic_kwq
;
7980 bp
->cnic_kwq_last
= bp
->cnic_kwq
+ MAX_SP_DESC_CNT
;
7982 bp
->cnic_spq_pending
= 0;
7983 bp
->cnic_kwq_pending
= 0;
7985 bp
->cnic_data
= data
;
7988 cp
->drv_state
= CNIC_DRV_STATE_REGD
;
7990 bnx2x_init_sb(bp
, bp
->cnic_sb
, bp
->cnic_sb_mapping
, CNIC_SB_ID(bp
));
7992 bnx2x_setup_cnic_irq_info(bp
);
7993 bnx2x_set_iscsi_eth_mac_addr(bp
, 1);
7994 bp
->cnic_flags
|= BNX2X_CNIC_FLAG_MAC_SET
;
7995 rcu_assign_pointer(bp
->cnic_ops
, ops
);
8000 static int bnx2x_unregister_cnic(struct net_device
*dev
)
8002 struct bnx2x
*bp
= netdev_priv(dev
);
8003 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
8005 mutex_lock(&bp
->cnic_mutex
);
8006 if (bp
->cnic_flags
& BNX2X_CNIC_FLAG_MAC_SET
) {
8007 bp
->cnic_flags
&= ~BNX2X_CNIC_FLAG_MAC_SET
;
8008 bnx2x_set_iscsi_eth_mac_addr(bp
, 0);
8011 rcu_assign_pointer(bp
->cnic_ops
, NULL
);
8012 mutex_unlock(&bp
->cnic_mutex
);
8014 kfree(bp
->cnic_kwq
);
8015 bp
->cnic_kwq
= NULL
;
8020 struct cnic_eth_dev
*bnx2x_cnic_probe(struct net_device
*dev
)
8022 struct bnx2x
*bp
= netdev_priv(dev
);
8023 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
8025 cp
->drv_owner
= THIS_MODULE
;
8026 cp
->chip_id
= CHIP_ID(bp
);
8027 cp
->pdev
= bp
->pdev
;
8028 cp
->io_base
= bp
->regview
;
8029 cp
->io_base2
= bp
->doorbells
;
8030 cp
->max_kwqe_pending
= 8;
8031 cp
->ctx_blk_size
= CNIC_CTX_PER_ILT
* sizeof(union cdu_context
);
8032 cp
->ctx_tbl_offset
= FUNC_ILT_BASE(BP_FUNC(bp
)) + 1;
8033 cp
->ctx_tbl_len
= CNIC_ILT_LINES
;
8034 cp
->starting_cid
= BCM_CNIC_CID_START
;
8035 cp
->drv_submit_kwqes_16
= bnx2x_cnic_sp_queue
;
8036 cp
->drv_ctl
= bnx2x_drv_ctl
;
8037 cp
->drv_register_cnic
= bnx2x_register_cnic
;
8038 cp
->drv_unregister_cnic
= bnx2x_unregister_cnic
;
8042 EXPORT_SYMBOL(bnx2x_cnic_probe
);
8044 #endif /* BCM_CNIC */