1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
52 #include <linux/stringify.h>
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_cmn.h"
61 #include <linux/firmware.h>
62 #include "bnx2x_fw_file_hdr.h"
64 #define FW_FILE_VERSION \
65 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
66 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
67 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
68 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
69 #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
70 #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
72 /* Time in jiffies before concluding the transmitter is hung */
73 #define TX_TIMEOUT (5*HZ)
75 static char version
[] __devinitdata
=
76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
77 DRV_MODULE_NAME
" " DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
79 MODULE_AUTHOR("Eliezer Tamir");
80 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_MODULE_VERSION
);
83 MODULE_FIRMWARE(FW_FILE_NAME_E1
);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1H
);
86 static int multi_mode
= 1;
87 module_param(multi_mode
, int, 0);
88 MODULE_PARM_DESC(multi_mode
, " Multi queue mode "
89 "(0 Disable; 1 Enable (default))");
91 static int num_queues
;
92 module_param(num_queues
, int, 0);
93 MODULE_PARM_DESC(num_queues
, " Number of queues for multi_mode=1"
94 " (default is as a number of CPUs)");
96 static int disable_tpa
;
97 module_param(disable_tpa
, int, 0);
98 MODULE_PARM_DESC(disable_tpa
, " Disable the TPA (LRO) feature");
101 module_param(int_mode
, int, 0);
102 MODULE_PARM_DESC(int_mode
, " Force interrupt mode other then MSI-X "
105 static int dropless_fc
;
106 module_param(dropless_fc
, int, 0);
107 MODULE_PARM_DESC(dropless_fc
, " Pause on exhausted host ring");
110 module_param(poll
, int, 0);
111 MODULE_PARM_DESC(poll
, " Use polling (for debug)");
113 static int mrrs
= -1;
114 module_param(mrrs
, int, 0);
115 MODULE_PARM_DESC(mrrs
, " Force Max Read Req Size (0..3) (for debug)");
118 module_param(debug
, int, 0);
119 MODULE_PARM_DESC(debug
, " Default debug msglevel");
121 static struct workqueue_struct
*bnx2x_wq
;
123 enum bnx2x_board_type
{
129 /* indexed by board_type, above */
132 } board_info
[] __devinitdata
= {
133 { "Broadcom NetXtreme II BCM57710 XGb" },
134 { "Broadcom NetXtreme II BCM57711 XGb" },
135 { "Broadcom NetXtreme II BCM57711E XGb" }
139 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl
) = {
140 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57710
), BCM57710
},
141 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57711
), BCM57711
},
142 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57711E
), BCM57711E
},
146 MODULE_DEVICE_TABLE(pci
, bnx2x_pci_tbl
);
148 /****************************************************************************
149 * General service functions
150 ****************************************************************************/
153 * locking is done by mcp
155 void bnx2x_reg_wr_ind(struct bnx2x
*bp
, u32 addr
, u32 val
)
157 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
158 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, val
);
159 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
160 PCICFG_VENDOR_ID_OFFSET
);
163 static u32
bnx2x_reg_rd_ind(struct bnx2x
*bp
, u32 addr
)
167 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
168 pci_read_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, &val
);
169 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
170 PCICFG_VENDOR_ID_OFFSET
);
175 const u32 dmae_reg_go_c
[] = {
176 DMAE_REG_GO_C0
, DMAE_REG_GO_C1
, DMAE_REG_GO_C2
, DMAE_REG_GO_C3
,
177 DMAE_REG_GO_C4
, DMAE_REG_GO_C5
, DMAE_REG_GO_C6
, DMAE_REG_GO_C7
,
178 DMAE_REG_GO_C8
, DMAE_REG_GO_C9
, DMAE_REG_GO_C10
, DMAE_REG_GO_C11
,
179 DMAE_REG_GO_C12
, DMAE_REG_GO_C13
, DMAE_REG_GO_C14
, DMAE_REG_GO_C15
182 /* copy command into DMAE command memory and set DMAE command go */
183 void bnx2x_post_dmae(struct bnx2x
*bp
, struct dmae_command
*dmae
, int idx
)
188 cmd_offset
= (DMAE_REG_CMD_MEM
+ sizeof(struct dmae_command
) * idx
);
189 for (i
= 0; i
< (sizeof(struct dmae_command
)/4); i
++) {
190 REG_WR(bp
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
192 DP(BNX2X_MSG_OFF
, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
193 idx
, i
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
195 REG_WR(bp
, dmae_reg_go_c
[idx
], 1);
198 void bnx2x_write_dmae(struct bnx2x
*bp
, dma_addr_t dma_addr
, u32 dst_addr
,
201 struct dmae_command dmae
;
202 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
205 if (!bp
->dmae_ready
) {
206 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
208 DP(BNX2X_MSG_OFF
, "DMAE is not ready (dst_addr %08x len32 %d)"
209 " using indirect\n", dst_addr
, len32
);
210 bnx2x_init_ind_wr(bp
, dst_addr
, data
, len32
);
214 memset(&dmae
, 0, sizeof(struct dmae_command
));
216 dmae
.opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
217 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
218 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
220 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
222 DMAE_CMD_ENDIANITY_DW_SWAP
|
224 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
225 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
226 dmae
.src_addr_lo
= U64_LO(dma_addr
);
227 dmae
.src_addr_hi
= U64_HI(dma_addr
);
228 dmae
.dst_addr_lo
= dst_addr
>> 2;
229 dmae
.dst_addr_hi
= 0;
231 dmae
.comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
232 dmae
.comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
233 dmae
.comp_val
= DMAE_COMP_VAL
;
235 DP(BNX2X_MSG_OFF
, "DMAE: opcode 0x%08x\n"
236 DP_LEVEL
"src_addr [%x:%08x] len [%d *4] "
237 "dst_addr [%x:%08x (%08x)]\n"
238 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
239 dmae
.opcode
, dmae
.src_addr_hi
, dmae
.src_addr_lo
,
240 dmae
.len
, dmae
.dst_addr_hi
, dmae
.dst_addr_lo
, dst_addr
,
241 dmae
.comp_addr_hi
, dmae
.comp_addr_lo
, dmae
.comp_val
);
242 DP(BNX2X_MSG_OFF
, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
243 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
244 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
246 mutex_lock(&bp
->dmae_mutex
);
250 bnx2x_post_dmae(bp
, &dmae
, INIT_DMAE_C(bp
));
254 while (*wb_comp
!= DMAE_COMP_VAL
) {
255 DP(BNX2X_MSG_OFF
, "wb_comp 0x%08x\n", *wb_comp
);
258 BNX2X_ERR("DMAE timeout!\n");
262 /* adjust delay for emulation/FPGA */
263 if (CHIP_REV_IS_SLOW(bp
))
269 mutex_unlock(&bp
->dmae_mutex
);
272 void bnx2x_read_dmae(struct bnx2x
*bp
, u32 src_addr
, u32 len32
)
274 struct dmae_command dmae
;
275 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
278 if (!bp
->dmae_ready
) {
279 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
282 DP(BNX2X_MSG_OFF
, "DMAE is not ready (src_addr %08x len32 %d)"
283 " using indirect\n", src_addr
, len32
);
284 for (i
= 0; i
< len32
; i
++)
285 data
[i
] = bnx2x_reg_rd_ind(bp
, src_addr
+ i
*4);
289 memset(&dmae
, 0, sizeof(struct dmae_command
));
291 dmae
.opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
292 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
293 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
295 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
297 DMAE_CMD_ENDIANITY_DW_SWAP
|
299 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
300 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
301 dmae
.src_addr_lo
= src_addr
>> 2;
302 dmae
.src_addr_hi
= 0;
303 dmae
.dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_data
));
304 dmae
.dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_data
));
306 dmae
.comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
307 dmae
.comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
308 dmae
.comp_val
= DMAE_COMP_VAL
;
310 DP(BNX2X_MSG_OFF
, "DMAE: opcode 0x%08x\n"
311 DP_LEVEL
"src_addr [%x:%08x] len [%d *4] "
312 "dst_addr [%x:%08x (%08x)]\n"
313 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
314 dmae
.opcode
, dmae
.src_addr_hi
, dmae
.src_addr_lo
,
315 dmae
.len
, dmae
.dst_addr_hi
, dmae
.dst_addr_lo
, src_addr
,
316 dmae
.comp_addr_hi
, dmae
.comp_addr_lo
, dmae
.comp_val
);
318 mutex_lock(&bp
->dmae_mutex
);
320 memset(bnx2x_sp(bp
, wb_data
[0]), 0, sizeof(u32
) * 4);
323 bnx2x_post_dmae(bp
, &dmae
, INIT_DMAE_C(bp
));
327 while (*wb_comp
!= DMAE_COMP_VAL
) {
330 BNX2X_ERR("DMAE timeout!\n");
334 /* adjust delay for emulation/FPGA */
335 if (CHIP_REV_IS_SLOW(bp
))
340 DP(BNX2X_MSG_OFF
, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
341 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
342 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
344 mutex_unlock(&bp
->dmae_mutex
);
347 void bnx2x_write_dmae_phys_len(struct bnx2x
*bp
, dma_addr_t phys_addr
,
350 int dmae_wr_max
= DMAE_LEN32_WR_MAX(bp
);
353 while (len
> dmae_wr_max
) {
354 bnx2x_write_dmae(bp
, phys_addr
+ offset
,
355 addr
+ offset
, dmae_wr_max
);
356 offset
+= dmae_wr_max
* 4;
360 bnx2x_write_dmae(bp
, phys_addr
+ offset
, addr
+ offset
, len
);
363 /* used only for slowpath so not inlined */
364 static void bnx2x_wb_wr(struct bnx2x
*bp
, int reg
, u32 val_hi
, u32 val_lo
)
368 wb_write
[0] = val_hi
;
369 wb_write
[1] = val_lo
;
370 REG_WR_DMAE(bp
, reg
, wb_write
, 2);
374 static u64
bnx2x_wb_rd(struct bnx2x
*bp
, int reg
)
378 REG_RD_DMAE(bp
, reg
, wb_data
, 2);
380 return HILO_U64(wb_data
[0], wb_data
[1]);
384 static int bnx2x_mc_assert(struct bnx2x
*bp
)
388 u32 row0
, row1
, row2
, row3
;
391 last_idx
= REG_RD8(bp
, BAR_XSTRORM_INTMEM
+
392 XSTORM_ASSERT_LIST_INDEX_OFFSET
);
394 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
396 /* print the asserts */
397 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
399 row0
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
400 XSTORM_ASSERT_LIST_OFFSET(i
));
401 row1
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
402 XSTORM_ASSERT_LIST_OFFSET(i
) + 4);
403 row2
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
404 XSTORM_ASSERT_LIST_OFFSET(i
) + 8);
405 row3
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
406 XSTORM_ASSERT_LIST_OFFSET(i
) + 12);
408 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
409 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
410 " 0x%08x 0x%08x 0x%08x\n",
411 i
, row3
, row2
, row1
, row0
);
419 last_idx
= REG_RD8(bp
, BAR_TSTRORM_INTMEM
+
420 TSTORM_ASSERT_LIST_INDEX_OFFSET
);
422 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
424 /* print the asserts */
425 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
427 row0
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
428 TSTORM_ASSERT_LIST_OFFSET(i
));
429 row1
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
430 TSTORM_ASSERT_LIST_OFFSET(i
) + 4);
431 row2
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
432 TSTORM_ASSERT_LIST_OFFSET(i
) + 8);
433 row3
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
434 TSTORM_ASSERT_LIST_OFFSET(i
) + 12);
436 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
437 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
438 " 0x%08x 0x%08x 0x%08x\n",
439 i
, row3
, row2
, row1
, row0
);
447 last_idx
= REG_RD8(bp
, BAR_CSTRORM_INTMEM
+
448 CSTORM_ASSERT_LIST_INDEX_OFFSET
);
450 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
452 /* print the asserts */
453 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
455 row0
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
456 CSTORM_ASSERT_LIST_OFFSET(i
));
457 row1
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
458 CSTORM_ASSERT_LIST_OFFSET(i
) + 4);
459 row2
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
460 CSTORM_ASSERT_LIST_OFFSET(i
) + 8);
461 row3
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
462 CSTORM_ASSERT_LIST_OFFSET(i
) + 12);
464 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
465 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
466 " 0x%08x 0x%08x 0x%08x\n",
467 i
, row3
, row2
, row1
, row0
);
475 last_idx
= REG_RD8(bp
, BAR_USTRORM_INTMEM
+
476 USTORM_ASSERT_LIST_INDEX_OFFSET
);
478 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
480 /* print the asserts */
481 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
483 row0
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
484 USTORM_ASSERT_LIST_OFFSET(i
));
485 row1
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
486 USTORM_ASSERT_LIST_OFFSET(i
) + 4);
487 row2
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
488 USTORM_ASSERT_LIST_OFFSET(i
) + 8);
489 row3
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
490 USTORM_ASSERT_LIST_OFFSET(i
) + 12);
492 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
493 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
494 " 0x%08x 0x%08x 0x%08x\n",
495 i
, row3
, row2
, row1
, row0
);
505 static void bnx2x_fw_dump(struct bnx2x
*bp
)
513 BNX2X_ERR("NO MCP - can not dump\n");
517 addr
= bp
->common
.shmem_base
- 0x0800 + 4;
518 mark
= REG_RD(bp
, addr
);
519 mark
= MCP_REG_MCPR_SCRATCH
+ ((mark
+ 0x3) & ~0x3) - 0x08000000;
520 pr_err("begin fw dump (mark 0x%x)\n", mark
);
523 for (offset
= mark
; offset
<= bp
->common
.shmem_base
; offset
+= 0x8*4) {
524 for (word
= 0; word
< 8; word
++)
525 data
[word
] = htonl(REG_RD(bp
, offset
+ 4*word
));
527 pr_cont("%s", (char *)data
);
529 for (offset
= addr
+ 4; offset
<= mark
; offset
+= 0x8*4) {
530 for (word
= 0; word
< 8; word
++)
531 data
[word
] = htonl(REG_RD(bp
, offset
+ 4*word
));
533 pr_cont("%s", (char *)data
);
535 pr_err("end of fw dump\n");
538 void bnx2x_panic_dump(struct bnx2x
*bp
)
543 bp
->stats_state
= STATS_STATE_DISABLED
;
544 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
546 BNX2X_ERR("begin crash dump -----------------\n");
550 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
551 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
552 " spq_prod_idx(0x%x)\n",
553 bp
->def_c_idx
, bp
->def_u_idx
, bp
->def_x_idx
, bp
->def_t_idx
,
554 bp
->def_att_idx
, bp
->attn_state
, bp
->spq_prod_idx
);
557 for_each_queue(bp
, i
) {
558 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
560 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
561 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
562 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
563 i
, fp
->rx_bd_prod
, fp
->rx_bd_cons
,
564 le16_to_cpu(*fp
->rx_bd_cons_sb
), fp
->rx_comp_prod
,
565 fp
->rx_comp_cons
, le16_to_cpu(*fp
->rx_cons_sb
));
566 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
567 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
568 fp
->rx_sge_prod
, fp
->last_max_sge
,
569 le16_to_cpu(fp
->fp_u_idx
),
570 fp
->status_blk
->u_status_block
.status_block_index
);
574 for_each_queue(bp
, i
) {
575 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
577 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
578 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
579 " *tx_cons_sb(0x%x)\n",
580 i
, fp
->tx_pkt_prod
, fp
->tx_pkt_cons
, fp
->tx_bd_prod
,
581 fp
->tx_bd_cons
, le16_to_cpu(*fp
->tx_cons_sb
));
582 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
583 " tx_db_prod(0x%x)\n", le16_to_cpu(fp
->fp_c_idx
),
584 fp
->status_blk
->c_status_block
.status_block_index
,
585 fp
->tx_db
.data
.prod
);
590 for_each_queue(bp
, i
) {
591 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
593 start
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) - 10);
594 end
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) + 503);
595 for (j
= start
; j
!= end
; j
= RX_BD(j
+ 1)) {
596 u32
*rx_bd
= (u32
*)&fp
->rx_desc_ring
[j
];
597 struct sw_rx_bd
*sw_bd
= &fp
->rx_buf_ring
[j
];
599 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
600 i
, j
, rx_bd
[1], rx_bd
[0], sw_bd
->skb
);
603 start
= RX_SGE(fp
->rx_sge_prod
);
604 end
= RX_SGE(fp
->last_max_sge
);
605 for (j
= start
; j
!= end
; j
= RX_SGE(j
+ 1)) {
606 u32
*rx_sge
= (u32
*)&fp
->rx_sge_ring
[j
];
607 struct sw_rx_page
*sw_page
= &fp
->rx_page_ring
[j
];
609 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
610 i
, j
, rx_sge
[1], rx_sge
[0], sw_page
->page
);
613 start
= RCQ_BD(fp
->rx_comp_cons
- 10);
614 end
= RCQ_BD(fp
->rx_comp_cons
+ 503);
615 for (j
= start
; j
!= end
; j
= RCQ_BD(j
+ 1)) {
616 u32
*cqe
= (u32
*)&fp
->rx_comp_ring
[j
];
618 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
619 i
, j
, cqe
[0], cqe
[1], cqe
[2], cqe
[3]);
624 for_each_queue(bp
, i
) {
625 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
627 start
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) - 10);
628 end
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) + 245);
629 for (j
= start
; j
!= end
; j
= TX_BD(j
+ 1)) {
630 struct sw_tx_bd
*sw_bd
= &fp
->tx_buf_ring
[j
];
632 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
633 i
, j
, sw_bd
->skb
, sw_bd
->first_bd
);
636 start
= TX_BD(fp
->tx_bd_cons
- 10);
637 end
= TX_BD(fp
->tx_bd_cons
+ 254);
638 for (j
= start
; j
!= end
; j
= TX_BD(j
+ 1)) {
639 u32
*tx_bd
= (u32
*)&fp
->tx_desc_ring
[j
];
641 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
642 i
, j
, tx_bd
[0], tx_bd
[1], tx_bd
[2], tx_bd
[3]);
648 BNX2X_ERR("end crash dump -----------------\n");
651 void bnx2x_int_enable(struct bnx2x
*bp
)
653 int port
= BP_PORT(bp
);
654 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
655 u32 val
= REG_RD(bp
, addr
);
656 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
657 int msi
= (bp
->flags
& USING_MSI_FLAG
) ? 1 : 0;
660 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
661 HC_CONFIG_0_REG_INT_LINE_EN_0
);
662 val
|= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
665 val
&= ~HC_CONFIG_0_REG_INT_LINE_EN_0
;
666 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
667 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
670 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
671 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
672 HC_CONFIG_0_REG_INT_LINE_EN_0
|
673 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
675 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
678 REG_WR(bp
, addr
, val
);
680 val
&= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
;
683 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x) mode %s\n",
684 val
, port
, addr
, (msix
? "MSI-X" : (msi
? "MSI" : "INTx")));
686 REG_WR(bp
, addr
, val
);
688 * Ensure that HC_CONFIG is written before leading/trailing edge config
693 if (CHIP_IS_E1H(bp
)) {
694 /* init leading/trailing edge */
696 val
= (0xee0f | (1 << (BP_E1HVN(bp
) + 4)));
698 /* enable nig and gpio3 attention */
703 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
704 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
707 /* Make sure that interrupts are indeed enabled from here on */
711 static void bnx2x_int_disable(struct bnx2x
*bp
)
713 int port
= BP_PORT(bp
);
714 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
715 u32 val
= REG_RD(bp
, addr
);
717 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
718 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
719 HC_CONFIG_0_REG_INT_LINE_EN_0
|
720 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
722 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
725 /* flush all outstanding writes */
728 REG_WR(bp
, addr
, val
);
729 if (REG_RD(bp
, addr
) != val
)
730 BNX2X_ERR("BUG! proper val not read from IGU!\n");
733 void bnx2x_int_disable_sync(struct bnx2x
*bp
, int disable_hw
)
735 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
738 /* disable interrupt handling */
739 atomic_inc(&bp
->intr_sem
);
740 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
743 /* prevent the HW from sending interrupts */
744 bnx2x_int_disable(bp
);
746 /* make sure all ISRs are done */
748 synchronize_irq(bp
->msix_table
[0].vector
);
753 for_each_queue(bp
, i
)
754 synchronize_irq(bp
->msix_table
[i
+ offset
].vector
);
756 synchronize_irq(bp
->pdev
->irq
);
758 /* make sure sp_task is not running */
759 cancel_delayed_work(&bp
->sp_task
);
760 flush_workqueue(bnx2x_wq
);
766 * General service functions
769 /* Return true if succeeded to acquire the lock */
770 static bool bnx2x_trylock_hw_lock(struct bnx2x
*bp
, u32 resource
)
773 u32 resource_bit
= (1 << resource
);
774 int func
= BP_FUNC(bp
);
775 u32 hw_lock_control_reg
;
777 DP(NETIF_MSG_HW
, "Trying to take a lock on resource %d\n", resource
);
779 /* Validating that the resource is within range */
780 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
782 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
783 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
788 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
790 hw_lock_control_reg
=
791 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
793 /* Try to acquire the lock */
794 REG_WR(bp
, hw_lock_control_reg
+ 4, resource_bit
);
795 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
796 if (lock_status
& resource_bit
)
799 DP(NETIF_MSG_HW
, "Failed to get a lock on resource %d\n", resource
);
805 static void bnx2x_cnic_cfc_comp(struct bnx2x
*bp
, int cid
);
808 void bnx2x_sp_event(struct bnx2x_fastpath
*fp
,
809 union eth_rx_cqe
*rr_cqe
)
811 struct bnx2x
*bp
= fp
->bp
;
812 int cid
= SW_CID(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
813 int command
= CQE_CMD(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
816 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
817 fp
->index
, cid
, command
, bp
->state
,
818 rr_cqe
->ramrod_cqe
.ramrod_type
);
823 switch (command
| fp
->state
) {
824 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP
|
825 BNX2X_FP_STATE_OPENING
):
826 DP(NETIF_MSG_IFUP
, "got MULTI[%d] setup ramrod\n",
828 fp
->state
= BNX2X_FP_STATE_OPEN
;
831 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_FP_STATE_HALTING
):
832 DP(NETIF_MSG_IFDOWN
, "got MULTI[%d] halt ramrod\n",
834 fp
->state
= BNX2X_FP_STATE_HALTED
;
838 BNX2X_ERR("unexpected MC reply (%d) "
839 "fp[%d] state is %x\n",
840 command
, fp
->index
, fp
->state
);
843 mb(); /* force bnx2x_wait_ramrod() to see the change */
847 switch (command
| bp
->state
) {
848 case (RAMROD_CMD_ID_ETH_PORT_SETUP
| BNX2X_STATE_OPENING_WAIT4_PORT
):
849 DP(NETIF_MSG_IFUP
, "got setup ramrod\n");
850 bp
->state
= BNX2X_STATE_OPEN
;
853 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
854 DP(NETIF_MSG_IFDOWN
, "got halt ramrod\n");
855 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_DELETE
;
856 fp
->state
= BNX2X_FP_STATE_HALTED
;
859 case (RAMROD_CMD_ID_ETH_CFC_DEL
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
860 DP(NETIF_MSG_IFDOWN
, "got delete ramrod for MULTI[%d]\n", cid
);
861 bnx2x_fp(bp
, cid
, state
) = BNX2X_FP_STATE_CLOSED
;
865 case (RAMROD_CMD_ID_ETH_CFC_DEL
| BNX2X_STATE_OPEN
):
866 DP(NETIF_MSG_IFDOWN
, "got delete ramrod for CID %d\n", cid
);
867 bnx2x_cnic_cfc_comp(bp
, cid
);
871 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_OPEN
):
872 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_DIAG
):
873 DP(NETIF_MSG_IFUP
, "got set mac ramrod\n");
874 bp
->set_mac_pending
--;
878 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
879 DP(NETIF_MSG_IFDOWN
, "got (un)set mac ramrod\n");
880 bp
->set_mac_pending
--;
885 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
889 mb(); /* force bnx2x_wait_ramrod() to see the change */
892 irqreturn_t
bnx2x_interrupt(int irq
, void *dev_instance
)
894 struct bnx2x
*bp
= netdev_priv(dev_instance
);
895 u16 status
= bnx2x_ack_int(bp
);
899 /* Return here if interrupt is shared and it's not for us */
900 if (unlikely(status
== 0)) {
901 DP(NETIF_MSG_INTR
, "not our interrupt!\n");
904 DP(NETIF_MSG_INTR
, "got an interrupt status 0x%x\n", status
);
906 /* Return here if interrupt is disabled */
907 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
908 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
912 #ifdef BNX2X_STOP_ON_ERROR
913 if (unlikely(bp
->panic
))
917 for (i
= 0; i
< BNX2X_NUM_QUEUES(bp
); i
++) {
918 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
920 mask
= 0x2 << fp
->sb_id
;
922 /* Handle Rx and Tx according to SB id */
923 prefetch(fp
->rx_cons_sb
);
924 prefetch(&fp
->status_blk
->u_status_block
.
926 prefetch(fp
->tx_cons_sb
);
927 prefetch(&fp
->status_blk
->c_status_block
.
929 napi_schedule(&bnx2x_fp(bp
, fp
->index
, napi
));
935 mask
= 0x2 << CNIC_SB_ID(bp
);
936 if (status
& (mask
| 0x1)) {
937 struct cnic_ops
*c_ops
= NULL
;
940 c_ops
= rcu_dereference(bp
->cnic_ops
);
942 c_ops
->cnic_handler(bp
->cnic_data
, NULL
);
949 if (unlikely(status
& 0x1)) {
950 queue_delayed_work(bnx2x_wq
, &bp
->sp_task
, 0);
957 if (unlikely(status
))
958 DP(NETIF_MSG_INTR
, "got an unknown interrupt! (status 0x%x)\n",
964 /* end of fast path */
970 * General service functions
973 int bnx2x_acquire_hw_lock(struct bnx2x
*bp
, u32 resource
)
976 u32 resource_bit
= (1 << resource
);
977 int func
= BP_FUNC(bp
);
978 u32 hw_lock_control_reg
;
981 /* Validating that the resource is within range */
982 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
984 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
985 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
990 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
992 hw_lock_control_reg
=
993 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
996 /* Validating that the resource is not already taken */
997 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
998 if (lock_status
& resource_bit
) {
999 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1000 lock_status
, resource_bit
);
1004 /* Try for 5 second every 5ms */
1005 for (cnt
= 0; cnt
< 1000; cnt
++) {
1006 /* Try to acquire the lock */
1007 REG_WR(bp
, hw_lock_control_reg
+ 4, resource_bit
);
1008 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1009 if (lock_status
& resource_bit
)
1014 DP(NETIF_MSG_HW
, "Timeout\n");
1018 int bnx2x_release_hw_lock(struct bnx2x
*bp
, u32 resource
)
1021 u32 resource_bit
= (1 << resource
);
1022 int func
= BP_FUNC(bp
);
1023 u32 hw_lock_control_reg
;
1025 DP(NETIF_MSG_HW
, "Releasing a lock on resource %d\n", resource
);
1027 /* Validating that the resource is within range */
1028 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1030 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1031 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1036 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1038 hw_lock_control_reg
=
1039 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1042 /* Validating that the resource is currently taken */
1043 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1044 if (!(lock_status
& resource_bit
)) {
1045 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1046 lock_status
, resource_bit
);
1050 REG_WR(bp
, hw_lock_control_reg
, resource_bit
);
1055 int bnx2x_get_gpio(struct bnx2x
*bp
, int gpio_num
, u8 port
)
1057 /* The GPIO should be swapped if swap register is set and active */
1058 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1059 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1060 int gpio_shift
= gpio_num
+
1061 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1062 u32 gpio_mask
= (1 << gpio_shift
);
1066 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1067 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1071 /* read GPIO value */
1072 gpio_reg
= REG_RD(bp
, MISC_REG_GPIO
);
1074 /* get the requested pin value */
1075 if ((gpio_reg
& gpio_mask
) == gpio_mask
)
1080 DP(NETIF_MSG_LINK
, "pin %d value 0x%x\n", gpio_num
, value
);
1085 int bnx2x_set_gpio(struct bnx2x
*bp
, int gpio_num
, u32 mode
, u8 port
)
1087 /* The GPIO should be swapped if swap register is set and active */
1088 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1089 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1090 int gpio_shift
= gpio_num
+
1091 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1092 u32 gpio_mask
= (1 << gpio_shift
);
1095 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1096 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1100 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1101 /* read GPIO and mask except the float bits */
1102 gpio_reg
= (REG_RD(bp
, MISC_REG_GPIO
) & MISC_REGISTERS_GPIO_FLOAT
);
1105 case MISC_REGISTERS_GPIO_OUTPUT_LOW
:
1106 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output low\n",
1107 gpio_num
, gpio_shift
);
1108 /* clear FLOAT and set CLR */
1109 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1110 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_CLR_POS
);
1113 case MISC_REGISTERS_GPIO_OUTPUT_HIGH
:
1114 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output high\n",
1115 gpio_num
, gpio_shift
);
1116 /* clear FLOAT and set SET */
1117 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1118 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_SET_POS
);
1121 case MISC_REGISTERS_GPIO_INPUT_HI_Z
:
1122 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> input\n",
1123 gpio_num
, gpio_shift
);
1125 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1132 REG_WR(bp
, MISC_REG_GPIO
, gpio_reg
);
1133 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1138 int bnx2x_set_gpio_int(struct bnx2x
*bp
, int gpio_num
, u32 mode
, u8 port
)
1140 /* The GPIO should be swapped if swap register is set and active */
1141 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1142 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1143 int gpio_shift
= gpio_num
+
1144 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1145 u32 gpio_mask
= (1 << gpio_shift
);
1148 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1149 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1153 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1155 gpio_reg
= REG_RD(bp
, MISC_REG_GPIO_INT
);
1158 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR
:
1159 DP(NETIF_MSG_LINK
, "Clear GPIO INT %d (shift %d) -> "
1160 "output low\n", gpio_num
, gpio_shift
);
1161 /* clear SET and set CLR */
1162 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_INT_SET_POS
);
1163 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_INT_CLR_POS
);
1166 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET
:
1167 DP(NETIF_MSG_LINK
, "Set GPIO INT %d (shift %d) -> "
1168 "output high\n", gpio_num
, gpio_shift
);
1169 /* clear CLR and set SET */
1170 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_INT_CLR_POS
);
1171 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_INT_SET_POS
);
1178 REG_WR(bp
, MISC_REG_GPIO_INT
, gpio_reg
);
1179 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1184 static int bnx2x_set_spio(struct bnx2x
*bp
, int spio_num
, u32 mode
)
1186 u32 spio_mask
= (1 << spio_num
);
1189 if ((spio_num
< MISC_REGISTERS_SPIO_4
) ||
1190 (spio_num
> MISC_REGISTERS_SPIO_7
)) {
1191 BNX2X_ERR("Invalid SPIO %d\n", spio_num
);
1195 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
1196 /* read SPIO and mask except the float bits */
1197 spio_reg
= (REG_RD(bp
, MISC_REG_SPIO
) & MISC_REGISTERS_SPIO_FLOAT
);
1200 case MISC_REGISTERS_SPIO_OUTPUT_LOW
:
1201 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output low\n", spio_num
);
1202 /* clear FLOAT and set CLR */
1203 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1204 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_CLR_POS
);
1207 case MISC_REGISTERS_SPIO_OUTPUT_HIGH
:
1208 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output high\n", spio_num
);
1209 /* clear FLOAT and set SET */
1210 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1211 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_SET_POS
);
1214 case MISC_REGISTERS_SPIO_INPUT_HI_Z
:
1215 DP(NETIF_MSG_LINK
, "Set SPIO %d -> input\n", spio_num
);
1217 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1224 REG_WR(bp
, MISC_REG_SPIO
, spio_reg
);
1225 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
1230 int bnx2x_get_link_cfg_idx(struct bnx2x
*bp
)
1232 u32 sel_phy_idx
= 0;
1233 if (bp
->link_vars
.link_up
) {
1234 sel_phy_idx
= EXT_PHY1
;
1235 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1236 if ((bp
->link_vars
.link_status
& LINK_STATUS_SERDES_LINK
) &&
1237 (bp
->link_params
.phy
[EXT_PHY2
].supported
& SUPPORTED_FIBRE
))
1238 sel_phy_idx
= EXT_PHY2
;
1241 switch (bnx2x_phy_selection(&bp
->link_params
)) {
1242 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT
:
1243 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY
:
1244 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY
:
1245 sel_phy_idx
= EXT_PHY1
;
1247 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY
:
1248 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY
:
1249 sel_phy_idx
= EXT_PHY2
;
1254 * The selected actived PHY is always after swapping (in case PHY
1255 * swapping is enabled). So when swapping is enabled, we need to reverse
1259 if (bp
->link_params
.multi_phy_config
&
1260 PORT_HW_CFG_PHY_SWAPPED_ENABLED
) {
1261 if (sel_phy_idx
== EXT_PHY1
)
1262 sel_phy_idx
= EXT_PHY2
;
1263 else if (sel_phy_idx
== EXT_PHY2
)
1264 sel_phy_idx
= EXT_PHY1
;
1266 return LINK_CONFIG_IDX(sel_phy_idx
);
1269 void bnx2x_calc_fc_adv(struct bnx2x
*bp
)
1271 u8 cfg_idx
= bnx2x_get_link_cfg_idx(bp
);
1272 switch (bp
->link_vars
.ieee_fc
&
1273 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK
) {
1274 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE
:
1275 bp
->port
.advertising
[cfg_idx
] &= ~(ADVERTISED_Asym_Pause
|
1279 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH
:
1280 bp
->port
.advertising
[cfg_idx
] |= (ADVERTISED_Asym_Pause
|
1284 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC
:
1285 bp
->port
.advertising
[cfg_idx
] |= ADVERTISED_Asym_Pause
;
1289 bp
->port
.advertising
[cfg_idx
] &= ~(ADVERTISED_Asym_Pause
|
1296 u8
bnx2x_initial_phy_init(struct bnx2x
*bp
, int load_mode
)
1298 if (!BP_NOMCP(bp
)) {
1300 int cfx_idx
= bnx2x_get_link_cfg_idx(bp
);
1301 u16 req_line_speed
= bp
->link_params
.req_line_speed
[cfx_idx
];
1302 /* Initialize link parameters structure variables */
1303 /* It is recommended to turn off RX FC for jumbo frames
1304 for better performance */
1305 if (bp
->dev
->mtu
> 5000)
1306 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_TX
;
1308 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_BOTH
;
1310 bnx2x_acquire_phy_lock(bp
);
1312 if (load_mode
== LOAD_DIAG
) {
1313 bp
->link_params
.loopback_mode
= LOOPBACK_XGXS
;
1314 bp
->link_params
.req_line_speed
[cfx_idx
] = SPEED_10000
;
1317 rc
= bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
1319 bnx2x_release_phy_lock(bp
);
1321 bnx2x_calc_fc_adv(bp
);
1323 if (CHIP_REV_IS_SLOW(bp
) && bp
->link_vars
.link_up
) {
1324 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
1325 bnx2x_link_report(bp
);
1327 bp
->link_params
.req_line_speed
[cfx_idx
] = req_line_speed
;
1330 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1334 void bnx2x_link_set(struct bnx2x
*bp
)
1336 if (!BP_NOMCP(bp
)) {
1337 bnx2x_acquire_phy_lock(bp
);
1338 bnx2x_link_reset(&bp
->link_params
, &bp
->link_vars
, 1);
1339 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
1340 bnx2x_release_phy_lock(bp
);
1342 bnx2x_calc_fc_adv(bp
);
1344 BNX2X_ERR("Bootcode is missing - can not set link\n");
1347 static void bnx2x__link_reset(struct bnx2x
*bp
)
1349 if (!BP_NOMCP(bp
)) {
1350 bnx2x_acquire_phy_lock(bp
);
1351 bnx2x_link_reset(&bp
->link_params
, &bp
->link_vars
, 1);
1352 bnx2x_release_phy_lock(bp
);
1354 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1357 u8
bnx2x_link_test(struct bnx2x
*bp
, u8 is_serdes
)
1361 if (!BP_NOMCP(bp
)) {
1362 bnx2x_acquire_phy_lock(bp
);
1363 rc
= bnx2x_test_link(&bp
->link_params
, &bp
->link_vars
,
1365 bnx2x_release_phy_lock(bp
);
1367 BNX2X_ERR("Bootcode is missing - can not test link\n");
1372 static void bnx2x_init_port_minmax(struct bnx2x
*bp
)
1374 u32 r_param
= bp
->link_vars
.line_speed
/ 8;
1375 u32 fair_periodic_timeout_usec
;
1378 memset(&(bp
->cmng
.rs_vars
), 0,
1379 sizeof(struct rate_shaping_vars_per_port
));
1380 memset(&(bp
->cmng
.fair_vars
), 0, sizeof(struct fairness_vars_per_port
));
1382 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1383 bp
->cmng
.rs_vars
.rs_periodic_timeout
= RS_PERIODIC_TIMEOUT_USEC
/ 4;
1385 /* this is the threshold below which no timer arming will occur
1386 1.25 coefficient is for the threshold to be a little bigger
1387 than the real time, to compensate for timer in-accuracy */
1388 bp
->cmng
.rs_vars
.rs_threshold
=
1389 (RS_PERIODIC_TIMEOUT_USEC
* r_param
* 5) / 4;
1391 /* resolution of fairness timer */
1392 fair_periodic_timeout_usec
= QM_ARB_BYTES
/ r_param
;
1393 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1394 t_fair
= T_FAIR_COEF
/ bp
->link_vars
.line_speed
;
1396 /* this is the threshold below which we won't arm the timer anymore */
1397 bp
->cmng
.fair_vars
.fair_threshold
= QM_ARB_BYTES
;
1399 /* we multiply by 1e3/8 to get bytes/msec.
1400 We don't want the credits to pass a credit
1401 of the t_fair*FAIR_MEM (algorithm resolution) */
1402 bp
->cmng
.fair_vars
.upper_bound
= r_param
* t_fair
* FAIR_MEM
;
1403 /* since each tick is 4 usec */
1404 bp
->cmng
.fair_vars
.fairness_timeout
= fair_periodic_timeout_usec
/ 4;
1407 /* Calculates the sum of vn_min_rates.
1408 It's needed for further normalizing of the min_rates.
1410 sum of vn_min_rates.
1412 0 - if all the min_rates are 0.
1413 In the later case fainess algorithm should be deactivated.
1414 If not all min_rates are zero then those that are zeroes will be set to 1.
1416 static void bnx2x_calc_vn_weight_sum(struct bnx2x
*bp
)
1419 int port
= BP_PORT(bp
);
1422 bp
->vn_weight_sum
= 0;
1423 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
1424 int func
= 2*vn
+ port
;
1425 u32 vn_cfg
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
1426 u32 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
1427 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
1429 /* Skip hidden vns */
1430 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
)
1433 /* If min rate is zero - set it to 1 */
1435 vn_min_rate
= DEF_MIN_RATE
;
1439 bp
->vn_weight_sum
+= vn_min_rate
;
1442 /* ... only if all min rates are zeros - disable fairness */
1444 bp
->cmng
.flags
.cmng_enables
&=
1445 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN
;
1446 DP(NETIF_MSG_IFUP
, "All MIN values are zeroes"
1447 " fairness will be disabled\n");
1449 bp
->cmng
.flags
.cmng_enables
|=
1450 CMNG_FLAGS_PER_PORT_FAIRNESS_VN
;
1453 static void bnx2x_init_vn_minmax(struct bnx2x
*bp
, int func
)
1455 struct rate_shaping_vars_per_vn m_rs_vn
;
1456 struct fairness_vars_per_vn m_fair_vn
;
1457 u32 vn_cfg
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
1458 u16 vn_min_rate
, vn_max_rate
;
1461 /* If function is hidden - set min and max to zeroes */
1462 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
) {
1467 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
1468 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
1469 /* If min rate is zero - set it to 1 */
1471 vn_min_rate
= DEF_MIN_RATE
;
1472 vn_max_rate
= ((vn_cfg
& FUNC_MF_CFG_MAX_BW_MASK
) >>
1473 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
1476 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
1477 func
, vn_min_rate
, vn_max_rate
, bp
->vn_weight_sum
);
1479 memset(&m_rs_vn
, 0, sizeof(struct rate_shaping_vars_per_vn
));
1480 memset(&m_fair_vn
, 0, sizeof(struct fairness_vars_per_vn
));
1482 /* global vn counter - maximal Mbps for this vn */
1483 m_rs_vn
.vn_counter
.rate
= vn_max_rate
;
1485 /* quota - number of bytes transmitted in this period */
1486 m_rs_vn
.vn_counter
.quota
=
1487 (vn_max_rate
* RS_PERIODIC_TIMEOUT_USEC
) / 8;
1489 if (bp
->vn_weight_sum
) {
1490 /* credit for each period of the fairness algorithm:
1491 number of bytes in T_FAIR (the vn share the port rate).
1492 vn_weight_sum should not be larger than 10000, thus
1493 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1495 m_fair_vn
.vn_credit_delta
=
1496 max_t(u32
, (vn_min_rate
* (T_FAIR_COEF
/
1497 (8 * bp
->vn_weight_sum
))),
1498 (bp
->cmng
.fair_vars
.fair_threshold
* 2));
1499 DP(NETIF_MSG_IFUP
, "m_fair_vn.vn_credit_delta %d\n",
1500 m_fair_vn
.vn_credit_delta
);
1503 /* Store it to internal memory */
1504 for (i
= 0; i
< sizeof(struct rate_shaping_vars_per_vn
)/4; i
++)
1505 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
1506 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func
) + i
* 4,
1507 ((u32
*)(&m_rs_vn
))[i
]);
1509 for (i
= 0; i
< sizeof(struct fairness_vars_per_vn
)/4; i
++)
1510 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
1511 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func
) + i
* 4,
1512 ((u32
*)(&m_fair_vn
))[i
]);
1516 /* This function is called upon link interrupt */
1517 static void bnx2x_link_attn(struct bnx2x
*bp
)
1519 u32 prev_link_status
= bp
->link_vars
.link_status
;
1520 /* Make sure that we are synced with the current statistics */
1521 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
1523 bnx2x_link_update(&bp
->link_params
, &bp
->link_vars
);
1525 if (bp
->link_vars
.link_up
) {
1527 /* dropless flow control */
1528 if (CHIP_IS_E1H(bp
) && bp
->dropless_fc
) {
1529 int port
= BP_PORT(bp
);
1530 u32 pause_enabled
= 0;
1532 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
)
1535 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1536 USTORM_ETH_PAUSE_ENABLED_OFFSET(port
),
1540 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
1541 struct host_port_stats
*pstats
;
1543 pstats
= bnx2x_sp(bp
, port_stats
);
1544 /* reset old bmac stats */
1545 memset(&(pstats
->mac_stx
[0]), 0,
1546 sizeof(struct mac_stx
));
1548 if (bp
->state
== BNX2X_STATE_OPEN
)
1549 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
1552 /* indicate link status only if link status actually changed */
1553 if (prev_link_status
!= bp
->link_vars
.link_status
)
1554 bnx2x_link_report(bp
);
1557 int port
= BP_PORT(bp
);
1561 /* Set the attention towards other drivers on the same port */
1562 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
1563 if (vn
== BP_E1HVN(bp
))
1566 func
= ((vn
<< 1) | port
);
1567 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_0
+
1568 (LINK_SYNC_ATTENTION_BIT_FUNC_0
+ func
)*4, 1);
1571 if (bp
->link_vars
.link_up
) {
1574 /* Init rate shaping and fairness contexts */
1575 bnx2x_init_port_minmax(bp
);
1577 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
1578 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
);
1580 /* Store it to internal memory */
1582 i
< sizeof(struct cmng_struct_per_port
) / 4; i
++)
1583 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
1584 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
) + i
*4,
1585 ((u32
*)(&bp
->cmng
))[i
]);
1590 void bnx2x__link_status_update(struct bnx2x
*bp
)
1592 if ((bp
->state
!= BNX2X_STATE_OPEN
) || (bp
->flags
& MF_FUNC_DIS
))
1595 bnx2x_link_status_update(&bp
->link_params
, &bp
->link_vars
);
1597 if (bp
->link_vars
.link_up
)
1598 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
1600 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
1602 bnx2x_calc_vn_weight_sum(bp
);
1604 /* indicate link status */
1605 bnx2x_link_report(bp
);
1608 static void bnx2x_pmf_update(struct bnx2x
*bp
)
1610 int port
= BP_PORT(bp
);
1614 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
1616 /* enable nig attention */
1617 val
= (0xff0f | (1 << (BP_E1HVN(bp
) + 4)));
1618 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
1619 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
1621 bnx2x_stats_handle(bp
, STATS_EVENT_PMF
);
1629 * General service functions
1632 /* send the MCP a request, block until there is a reply */
1633 u32
bnx2x_fw_command(struct bnx2x
*bp
, u32 command
, u32 param
)
1635 int func
= BP_FUNC(bp
);
1636 u32 seq
= ++bp
->fw_seq
;
1639 u8 delay
= CHIP_REV_IS_SLOW(bp
) ? 100 : 10;
1641 mutex_lock(&bp
->fw_mb_mutex
);
1642 SHMEM_WR(bp
, func_mb
[func
].drv_mb_param
, param
);
1643 SHMEM_WR(bp
, func_mb
[func
].drv_mb_header
, (command
| seq
));
1644 DP(BNX2X_MSG_MCP
, "wrote command (%x) to FW MB\n", (command
| seq
));
1647 /* let the FW do it's magic ... */
1650 rc
= SHMEM_RD(bp
, func_mb
[func
].fw_mb_header
);
1652 /* Give the FW up to 5 second (500*10ms) */
1653 } while ((seq
!= (rc
& FW_MSG_SEQ_NUMBER_MASK
)) && (cnt
++ < 500));
1655 DP(BNX2X_MSG_MCP
, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
1656 cnt
*delay
, rc
, seq
);
1658 /* is this a reply to our command? */
1659 if (seq
== (rc
& FW_MSG_SEQ_NUMBER_MASK
))
1660 rc
&= FW_MSG_CODE_MASK
;
1663 BNX2X_ERR("FW failed to respond!\n");
1667 mutex_unlock(&bp
->fw_mb_mutex
);
1672 static void bnx2x_e1h_disable(struct bnx2x
*bp
)
1674 int port
= BP_PORT(bp
);
1676 netif_tx_disable(bp
->dev
);
1678 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 0);
1680 netif_carrier_off(bp
->dev
);
1683 static void bnx2x_e1h_enable(struct bnx2x
*bp
)
1685 int port
= BP_PORT(bp
);
1687 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 1);
1689 /* Tx queue should be only reenabled */
1690 netif_tx_wake_all_queues(bp
->dev
);
1693 * Should not call netif_carrier_on since it will be called if the link
1694 * is up when checking for link state
1698 static void bnx2x_update_min_max(struct bnx2x
*bp
)
1700 int port
= BP_PORT(bp
);
1703 /* Init rate shaping and fairness contexts */
1704 bnx2x_init_port_minmax(bp
);
1706 bnx2x_calc_vn_weight_sum(bp
);
1708 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
1709 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
);
1714 /* Set the attention towards other drivers on the same port */
1715 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
1716 if (vn
== BP_E1HVN(bp
))
1719 func
= ((vn
<< 1) | port
);
1720 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_0
+
1721 (LINK_SYNC_ATTENTION_BIT_FUNC_0
+ func
)*4, 1);
1724 /* Store it to internal memory */
1725 for (i
= 0; i
< sizeof(struct cmng_struct_per_port
) / 4; i
++)
1726 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
1727 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
) + i
*4,
1728 ((u32
*)(&bp
->cmng
))[i
]);
1732 static void bnx2x_dcc_event(struct bnx2x
*bp
, u32 dcc_event
)
1734 DP(BNX2X_MSG_MCP
, "dcc_event 0x%x\n", dcc_event
);
1736 if (dcc_event
& DRV_STATUS_DCC_DISABLE_ENABLE_PF
) {
1739 * This is the only place besides the function initialization
1740 * where the bp->flags can change so it is done without any
1743 if (bp
->mf_config
& FUNC_MF_CFG_FUNC_DISABLED
) {
1744 DP(NETIF_MSG_IFDOWN
, "mf_cfg function disabled\n");
1745 bp
->flags
|= MF_FUNC_DIS
;
1747 bnx2x_e1h_disable(bp
);
1749 DP(NETIF_MSG_IFUP
, "mf_cfg function enabled\n");
1750 bp
->flags
&= ~MF_FUNC_DIS
;
1752 bnx2x_e1h_enable(bp
);
1754 dcc_event
&= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF
;
1756 if (dcc_event
& DRV_STATUS_DCC_BANDWIDTH_ALLOCATION
) {
1758 bnx2x_update_min_max(bp
);
1759 dcc_event
&= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION
;
1762 /* Report results to MCP */
1764 bnx2x_fw_command(bp
, DRV_MSG_CODE_DCC_FAILURE
, 0);
1766 bnx2x_fw_command(bp
, DRV_MSG_CODE_DCC_OK
, 0);
1769 /* must be called under the spq lock */
1770 static inline struct eth_spe
*bnx2x_sp_get_next(struct bnx2x
*bp
)
1772 struct eth_spe
*next_spe
= bp
->spq_prod_bd
;
1774 if (bp
->spq_prod_bd
== bp
->spq_last_bd
) {
1775 bp
->spq_prod_bd
= bp
->spq
;
1776 bp
->spq_prod_idx
= 0;
1777 DP(NETIF_MSG_TIMER
, "end of spq\n");
1785 /* must be called under the spq lock */
1786 static inline void bnx2x_sp_prod_update(struct bnx2x
*bp
)
1788 int func
= BP_FUNC(bp
);
1790 /* Make sure that BD data is updated before writing the producer */
1793 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_SPQ_PROD_OFFSET(func
),
1798 /* the slow path queue is odd since completions arrive on the fastpath ring */
1799 int bnx2x_sp_post(struct bnx2x
*bp
, int command
, int cid
,
1800 u32 data_hi
, u32 data_lo
, int common
)
1802 struct eth_spe
*spe
;
1804 #ifdef BNX2X_STOP_ON_ERROR
1805 if (unlikely(bp
->panic
))
1809 spin_lock_bh(&bp
->spq_lock
);
1811 if (!bp
->spq_left
) {
1812 BNX2X_ERR("BUG! SPQ ring full!\n");
1813 spin_unlock_bh(&bp
->spq_lock
);
1818 spe
= bnx2x_sp_get_next(bp
);
1820 /* CID needs port number to be encoded int it */
1821 spe
->hdr
.conn_and_cmd_data
=
1822 cpu_to_le32((command
<< SPE_HDR_CMD_ID_SHIFT
) |
1824 spe
->hdr
.type
= cpu_to_le16(ETH_CONNECTION_TYPE
);
1827 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT
));
1829 spe
->data
.mac_config_addr
.hi
= cpu_to_le32(data_hi
);
1830 spe
->data
.mac_config_addr
.lo
= cpu_to_le32(data_lo
);
1834 DP(BNX2X_MSG_SP
/*NETIF_MSG_TIMER*/,
1835 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
1836 bp
->spq_prod_idx
, (u32
)U64_HI(bp
->spq_mapping
),
1837 (u32
)(U64_LO(bp
->spq_mapping
) +
1838 (void *)bp
->spq_prod_bd
- (void *)bp
->spq
), command
,
1839 HW_CID(bp
, cid
), data_hi
, data_lo
, bp
->spq_left
);
1841 bnx2x_sp_prod_update(bp
);
1842 spin_unlock_bh(&bp
->spq_lock
);
1846 /* acquire split MCP access lock register */
1847 static int bnx2x_acquire_alr(struct bnx2x
*bp
)
1853 for (j
= 0; j
< 1000; j
++) {
1855 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
1856 val
= REG_RD(bp
, GRCBASE_MCP
+ 0x9c);
1857 if (val
& (1L << 31))
1862 if (!(val
& (1L << 31))) {
1863 BNX2X_ERR("Cannot acquire MCP access lock register\n");
1870 /* release split MCP access lock register */
1871 static void bnx2x_release_alr(struct bnx2x
*bp
)
1873 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, 0);
1876 static inline u16
bnx2x_update_dsb_idx(struct bnx2x
*bp
)
1878 struct host_def_status_block
*def_sb
= bp
->def_status_blk
;
1881 barrier(); /* status block is written to by the chip */
1882 if (bp
->def_att_idx
!= def_sb
->atten_status_block
.attn_bits_index
) {
1883 bp
->def_att_idx
= def_sb
->atten_status_block
.attn_bits_index
;
1886 if (bp
->def_c_idx
!= def_sb
->c_def_status_block
.status_block_index
) {
1887 bp
->def_c_idx
= def_sb
->c_def_status_block
.status_block_index
;
1890 if (bp
->def_u_idx
!= def_sb
->u_def_status_block
.status_block_index
) {
1891 bp
->def_u_idx
= def_sb
->u_def_status_block
.status_block_index
;
1894 if (bp
->def_x_idx
!= def_sb
->x_def_status_block
.status_block_index
) {
1895 bp
->def_x_idx
= def_sb
->x_def_status_block
.status_block_index
;
1898 if (bp
->def_t_idx
!= def_sb
->t_def_status_block
.status_block_index
) {
1899 bp
->def_t_idx
= def_sb
->t_def_status_block
.status_block_index
;
1906 * slow path service functions
1909 static void bnx2x_attn_int_asserted(struct bnx2x
*bp
, u32 asserted
)
1911 int port
= BP_PORT(bp
);
1912 u32 hc_addr
= (HC_REG_COMMAND_REG
+ port
*32 +
1913 COMMAND_REG_ATTN_BITS_SET
);
1914 u32 aeu_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
1915 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
1916 u32 nig_int_mask_addr
= port
? NIG_REG_MASK_INTERRUPT_PORT1
:
1917 NIG_REG_MASK_INTERRUPT_PORT0
;
1921 if (bp
->attn_state
& asserted
)
1922 BNX2X_ERR("IGU ERROR\n");
1924 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
1925 aeu_mask
= REG_RD(bp
, aeu_addr
);
1927 DP(NETIF_MSG_HW
, "aeu_mask %x newly asserted %x\n",
1928 aeu_mask
, asserted
);
1929 aeu_mask
&= ~(asserted
& 0x3ff);
1930 DP(NETIF_MSG_HW
, "new mask %x\n", aeu_mask
);
1932 REG_WR(bp
, aeu_addr
, aeu_mask
);
1933 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
1935 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
1936 bp
->attn_state
|= asserted
;
1937 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
1939 if (asserted
& ATTN_HARD_WIRED_MASK
) {
1940 if (asserted
& ATTN_NIG_FOR_FUNC
) {
1942 bnx2x_acquire_phy_lock(bp
);
1944 /* save nig interrupt mask */
1945 nig_mask
= REG_RD(bp
, nig_int_mask_addr
);
1946 REG_WR(bp
, nig_int_mask_addr
, 0);
1948 bnx2x_link_attn(bp
);
1950 /* handle unicore attn? */
1952 if (asserted
& ATTN_SW_TIMER_4_FUNC
)
1953 DP(NETIF_MSG_HW
, "ATTN_SW_TIMER_4_FUNC!\n");
1955 if (asserted
& GPIO_2_FUNC
)
1956 DP(NETIF_MSG_HW
, "GPIO_2_FUNC!\n");
1958 if (asserted
& GPIO_3_FUNC
)
1959 DP(NETIF_MSG_HW
, "GPIO_3_FUNC!\n");
1961 if (asserted
& GPIO_4_FUNC
)
1962 DP(NETIF_MSG_HW
, "GPIO_4_FUNC!\n");
1965 if (asserted
& ATTN_GENERAL_ATTN_1
) {
1966 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_1!\n");
1967 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_1
, 0x0);
1969 if (asserted
& ATTN_GENERAL_ATTN_2
) {
1970 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_2!\n");
1971 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_2
, 0x0);
1973 if (asserted
& ATTN_GENERAL_ATTN_3
) {
1974 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_3!\n");
1975 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_3
, 0x0);
1978 if (asserted
& ATTN_GENERAL_ATTN_4
) {
1979 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_4!\n");
1980 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_4
, 0x0);
1982 if (asserted
& ATTN_GENERAL_ATTN_5
) {
1983 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_5!\n");
1984 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_5
, 0x0);
1986 if (asserted
& ATTN_GENERAL_ATTN_6
) {
1987 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_6!\n");
1988 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_6
, 0x0);
1992 } /* if hardwired */
1994 DP(NETIF_MSG_HW
, "about to mask 0x%08x at HC addr 0x%x\n",
1996 REG_WR(bp
, hc_addr
, asserted
);
1998 /* now set back the mask */
1999 if (asserted
& ATTN_NIG_FOR_FUNC
) {
2000 REG_WR(bp
, nig_int_mask_addr
, nig_mask
);
2001 bnx2x_release_phy_lock(bp
);
2005 static inline void bnx2x_fan_failure(struct bnx2x
*bp
)
2007 int port
= BP_PORT(bp
);
2009 /* mark the failure */
2012 dev_info
.port_hw_config
[port
].external_phy_config
);
2014 ext_phy_config
&= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
2015 ext_phy_config
|= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
;
2016 SHMEM_WR(bp
, dev_info
.port_hw_config
[port
].external_phy_config
,
2019 /* log the failure */
2020 netdev_err(bp
->dev
, "Fan Failure on Network Controller has caused"
2021 " the driver to shutdown the card to prevent permanent"
2022 " damage. Please contact OEM Support for assistance\n");
2025 static inline void bnx2x_attn_int_deasserted0(struct bnx2x
*bp
, u32 attn
)
2027 int port
= BP_PORT(bp
);
2031 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
2032 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
2034 if (attn
& AEU_INPUTS_ATTN_BITS_SPIO5
) {
2036 val
= REG_RD(bp
, reg_offset
);
2037 val
&= ~AEU_INPUTS_ATTN_BITS_SPIO5
;
2038 REG_WR(bp
, reg_offset
, val
);
2040 BNX2X_ERR("SPIO5 hw attention\n");
2042 /* Fan failure attention */
2043 bnx2x_hw_reset_phy(&bp
->link_params
);
2044 bnx2x_fan_failure(bp
);
2047 if (attn
& (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0
|
2048 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1
)) {
2049 bnx2x_acquire_phy_lock(bp
);
2050 bnx2x_handle_module_detect_int(&bp
->link_params
);
2051 bnx2x_release_phy_lock(bp
);
2054 if (attn
& HW_INTERRUT_ASSERT_SET_0
) {
2056 val
= REG_RD(bp
, reg_offset
);
2057 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_0
);
2058 REG_WR(bp
, reg_offset
, val
);
2060 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2061 (u32
)(attn
& HW_INTERRUT_ASSERT_SET_0
));
2066 static inline void bnx2x_attn_int_deasserted1(struct bnx2x
*bp
, u32 attn
)
2070 if (attn
& AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT
) {
2072 val
= REG_RD(bp
, DORQ_REG_DORQ_INT_STS_CLR
);
2073 BNX2X_ERR("DB hw attention 0x%x\n", val
);
2074 /* DORQ discard attention */
2076 BNX2X_ERR("FATAL error from DORQ\n");
2079 if (attn
& HW_INTERRUT_ASSERT_SET_1
) {
2081 int port
= BP_PORT(bp
);
2084 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1
:
2085 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1
);
2087 val
= REG_RD(bp
, reg_offset
);
2088 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_1
);
2089 REG_WR(bp
, reg_offset
, val
);
2091 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2092 (u32
)(attn
& HW_INTERRUT_ASSERT_SET_1
));
2097 static inline void bnx2x_attn_int_deasserted2(struct bnx2x
*bp
, u32 attn
)
2101 if (attn
& AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT
) {
2103 val
= REG_RD(bp
, CFC_REG_CFC_INT_STS_CLR
);
2104 BNX2X_ERR("CFC hw attention 0x%x\n", val
);
2105 /* CFC error attention */
2107 BNX2X_ERR("FATAL error from CFC\n");
2110 if (attn
& AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT
) {
2112 val
= REG_RD(bp
, PXP_REG_PXP_INT_STS_CLR_0
);
2113 BNX2X_ERR("PXP hw attention 0x%x\n", val
);
2114 /* RQ_USDMDP_FIFO_OVERFLOW */
2116 BNX2X_ERR("FATAL error from PXP\n");
2119 if (attn
& HW_INTERRUT_ASSERT_SET_2
) {
2121 int port
= BP_PORT(bp
);
2124 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2
:
2125 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2
);
2127 val
= REG_RD(bp
, reg_offset
);
2128 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_2
);
2129 REG_WR(bp
, reg_offset
, val
);
2131 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2132 (u32
)(attn
& HW_INTERRUT_ASSERT_SET_2
));
2137 static inline void bnx2x_attn_int_deasserted3(struct bnx2x
*bp
, u32 attn
)
2141 if (attn
& EVEREST_GEN_ATTN_IN_USE_MASK
) {
2143 if (attn
& BNX2X_PMF_LINK_ASSERT
) {
2144 int func
= BP_FUNC(bp
);
2146 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
2147 bp
->mf_config
= SHMEM_RD(bp
,
2148 mf_cfg
.func_mf_config
[func
].config
);
2149 val
= SHMEM_RD(bp
, func_mb
[func
].drv_status
);
2150 if (val
& DRV_STATUS_DCC_EVENT_MASK
)
2152 (val
& DRV_STATUS_DCC_EVENT_MASK
));
2153 bnx2x__link_status_update(bp
);
2154 if ((bp
->port
.pmf
== 0) && (val
& DRV_STATUS_PMF
))
2155 bnx2x_pmf_update(bp
);
2157 } else if (attn
& BNX2X_MC_ASSERT_BITS
) {
2159 BNX2X_ERR("MC assert!\n");
2160 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_10
, 0);
2161 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_9
, 0);
2162 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_8
, 0);
2163 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_7
, 0);
2166 } else if (attn
& BNX2X_MCP_ASSERT
) {
2168 BNX2X_ERR("MCP assert!\n");
2169 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_11
, 0);
2173 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn
);
2176 if (attn
& EVEREST_LATCHED_ATTN_IN_USE_MASK
) {
2177 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn
);
2178 if (attn
& BNX2X_GRC_TIMEOUT
) {
2179 val
= CHIP_IS_E1H(bp
) ?
2180 REG_RD(bp
, MISC_REG_GRC_TIMEOUT_ATTN
) : 0;
2181 BNX2X_ERR("GRC time-out 0x%08x\n", val
);
2183 if (attn
& BNX2X_GRC_RSV
) {
2184 val
= CHIP_IS_E1H(bp
) ?
2185 REG_RD(bp
, MISC_REG_GRC_RSV_ATTN
) : 0;
2186 BNX2X_ERR("GRC reserved 0x%08x\n", val
);
2188 REG_WR(bp
, MISC_REG_AEU_CLR_LATCH_SIGNAL
, 0x7ff);
2192 #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
2193 #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
2194 #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2195 #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
2196 #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
2197 #define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2199 * should be run under rtnl lock
2201 static inline void bnx2x_set_reset_done(struct bnx2x
*bp
)
2203 u32 val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
2204 val
&= ~(1 << RESET_DONE_FLAG_SHIFT
);
2205 REG_WR(bp
, BNX2X_MISC_GEN_REG
, val
);
2211 * should be run under rtnl lock
2213 static inline void bnx2x_set_reset_in_progress(struct bnx2x
*bp
)
2215 u32 val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
2217 REG_WR(bp
, BNX2X_MISC_GEN_REG
, val
);
2223 * should be run under rtnl lock
2225 bool bnx2x_reset_is_done(struct bnx2x
*bp
)
2227 u32 val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
2228 DP(NETIF_MSG_HW
, "GEN_REG_VAL=0x%08x\n", val
);
2229 return (val
& RESET_DONE_FLAG_MASK
) ? false : true;
2233 * should be run under rtnl lock
2235 inline void bnx2x_inc_load_cnt(struct bnx2x
*bp
)
2237 u32 val1
, val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
2239 DP(NETIF_MSG_HW
, "Old GEN_REG_VAL=0x%08x\n", val
);
2241 val1
= ((val
& LOAD_COUNTER_MASK
) + 1) & LOAD_COUNTER_MASK
;
2242 REG_WR(bp
, BNX2X_MISC_GEN_REG
, (val
& RESET_DONE_FLAG_MASK
) | val1
);
2248 * should be run under rtnl lock
2250 u32
bnx2x_dec_load_cnt(struct bnx2x
*bp
)
2252 u32 val1
, val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
2254 DP(NETIF_MSG_HW
, "Old GEN_REG_VAL=0x%08x\n", val
);
2256 val1
= ((val
& LOAD_COUNTER_MASK
) - 1) & LOAD_COUNTER_MASK
;
2257 REG_WR(bp
, BNX2X_MISC_GEN_REG
, (val
& RESET_DONE_FLAG_MASK
) | val1
);
2265 * should be run under rtnl lock
2267 static inline u32
bnx2x_get_load_cnt(struct bnx2x
*bp
)
2269 return REG_RD(bp
, BNX2X_MISC_GEN_REG
) & LOAD_COUNTER_MASK
;
2272 static inline void bnx2x_clear_load_cnt(struct bnx2x
*bp
)
2274 u32 val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
2275 REG_WR(bp
, BNX2X_MISC_GEN_REG
, val
& (~LOAD_COUNTER_MASK
));
2278 static inline void _print_next_block(int idx
, const char *blk
)
2285 static inline int bnx2x_print_blocks_with_parity0(u32 sig
, int par_num
)
2289 for (i
= 0; sig
; i
++) {
2290 cur_bit
= ((u32
)0x1 << i
);
2291 if (sig
& cur_bit
) {
2293 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR
:
2294 _print_next_block(par_num
++, "BRB");
2296 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR
:
2297 _print_next_block(par_num
++, "PARSER");
2299 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR
:
2300 _print_next_block(par_num
++, "TSDM");
2302 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR
:
2303 _print_next_block(par_num
++, "SEARCHER");
2305 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR
:
2306 _print_next_block(par_num
++, "TSEMI");
2318 static inline int bnx2x_print_blocks_with_parity1(u32 sig
, int par_num
)
2322 for (i
= 0; sig
; i
++) {
2323 cur_bit
= ((u32
)0x1 << i
);
2324 if (sig
& cur_bit
) {
2326 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR
:
2327 _print_next_block(par_num
++, "PBCLIENT");
2329 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR
:
2330 _print_next_block(par_num
++, "QM");
2332 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR
:
2333 _print_next_block(par_num
++, "XSDM");
2335 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR
:
2336 _print_next_block(par_num
++, "XSEMI");
2338 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR
:
2339 _print_next_block(par_num
++, "DOORBELLQ");
2341 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR
:
2342 _print_next_block(par_num
++, "VAUX PCI CORE");
2344 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR
:
2345 _print_next_block(par_num
++, "DEBUG");
2347 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR
:
2348 _print_next_block(par_num
++, "USDM");
2350 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR
:
2351 _print_next_block(par_num
++, "USEMI");
2353 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR
:
2354 _print_next_block(par_num
++, "UPB");
2356 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR
:
2357 _print_next_block(par_num
++, "CSDM");
2369 static inline int bnx2x_print_blocks_with_parity2(u32 sig
, int par_num
)
2373 for (i
= 0; sig
; i
++) {
2374 cur_bit
= ((u32
)0x1 << i
);
2375 if (sig
& cur_bit
) {
2377 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR
:
2378 _print_next_block(par_num
++, "CSEMI");
2380 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR
:
2381 _print_next_block(par_num
++, "PXP");
2383 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
:
2384 _print_next_block(par_num
++,
2385 "PXPPCICLOCKCLIENT");
2387 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR
:
2388 _print_next_block(par_num
++, "CFC");
2390 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR
:
2391 _print_next_block(par_num
++, "CDU");
2393 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR
:
2394 _print_next_block(par_num
++, "IGU");
2396 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR
:
2397 _print_next_block(par_num
++, "MISC");
2409 static inline int bnx2x_print_blocks_with_parity3(u32 sig
, int par_num
)
2413 for (i
= 0; sig
; i
++) {
2414 cur_bit
= ((u32
)0x1 << i
);
2415 if (sig
& cur_bit
) {
2417 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY
:
2418 _print_next_block(par_num
++, "MCP ROM");
2420 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY
:
2421 _print_next_block(par_num
++, "MCP UMP RX");
2423 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY
:
2424 _print_next_block(par_num
++, "MCP UMP TX");
2426 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY
:
2427 _print_next_block(par_num
++, "MCP SCPAD");
2439 static inline bool bnx2x_parity_attn(struct bnx2x
*bp
, u32 sig0
, u32 sig1
,
2442 if ((sig0
& HW_PRTY_ASSERT_SET_0
) || (sig1
& HW_PRTY_ASSERT_SET_1
) ||
2443 (sig2
& HW_PRTY_ASSERT_SET_2
) || (sig3
& HW_PRTY_ASSERT_SET_3
)) {
2445 DP(NETIF_MSG_HW
, "Was parity error: HW block parity attention: "
2446 "[0]:0x%08x [1]:0x%08x "
2447 "[2]:0x%08x [3]:0x%08x\n",
2448 sig0
& HW_PRTY_ASSERT_SET_0
,
2449 sig1
& HW_PRTY_ASSERT_SET_1
,
2450 sig2
& HW_PRTY_ASSERT_SET_2
,
2451 sig3
& HW_PRTY_ASSERT_SET_3
);
2452 printk(KERN_ERR
"%s: Parity errors detected in blocks: ",
2454 par_num
= bnx2x_print_blocks_with_parity0(
2455 sig0
& HW_PRTY_ASSERT_SET_0
, par_num
);
2456 par_num
= bnx2x_print_blocks_with_parity1(
2457 sig1
& HW_PRTY_ASSERT_SET_1
, par_num
);
2458 par_num
= bnx2x_print_blocks_with_parity2(
2459 sig2
& HW_PRTY_ASSERT_SET_2
, par_num
);
2460 par_num
= bnx2x_print_blocks_with_parity3(
2461 sig3
& HW_PRTY_ASSERT_SET_3
, par_num
);
2468 bool bnx2x_chk_parity_attn(struct bnx2x
*bp
)
2470 struct attn_route attn
;
2471 int port
= BP_PORT(bp
);
2473 attn
.sig
[0] = REG_RD(bp
,
2474 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+
2476 attn
.sig
[1] = REG_RD(bp
,
2477 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0
+
2479 attn
.sig
[2] = REG_RD(bp
,
2480 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0
+
2482 attn
.sig
[3] = REG_RD(bp
,
2483 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0
+
2486 return bnx2x_parity_attn(bp
, attn
.sig
[0], attn
.sig
[1], attn
.sig
[2],
2490 static void bnx2x_attn_int_deasserted(struct bnx2x
*bp
, u32 deasserted
)
2492 struct attn_route attn
, *group_mask
;
2493 int port
= BP_PORT(bp
);
2499 /* need to take HW lock because MCP or other port might also
2500 try to handle this event */
2501 bnx2x_acquire_alr(bp
);
2503 if (bnx2x_chk_parity_attn(bp
)) {
2504 bp
->recovery_state
= BNX2X_RECOVERY_INIT
;
2505 bnx2x_set_reset_in_progress(bp
);
2506 schedule_delayed_work(&bp
->reset_task
, 0);
2507 /* Disable HW interrupts */
2508 bnx2x_int_disable(bp
);
2509 bnx2x_release_alr(bp
);
2510 /* In case of parity errors don't handle attentions so that
2511 * other function would "see" parity errors.
2516 attn
.sig
[0] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ port
*4);
2517 attn
.sig
[1] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0
+ port
*4);
2518 attn
.sig
[2] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0
+ port
*4);
2519 attn
.sig
[3] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0
+ port
*4);
2520 DP(NETIF_MSG_HW
, "attn: %08x %08x %08x %08x\n",
2521 attn
.sig
[0], attn
.sig
[1], attn
.sig
[2], attn
.sig
[3]);
2523 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
2524 if (deasserted
& (1 << index
)) {
2525 group_mask
= &bp
->attn_group
[index
];
2527 DP(NETIF_MSG_HW
, "group[%d]: %08x %08x %08x %08x\n",
2528 index
, group_mask
->sig
[0], group_mask
->sig
[1],
2529 group_mask
->sig
[2], group_mask
->sig
[3]);
2531 bnx2x_attn_int_deasserted3(bp
,
2532 attn
.sig
[3] & group_mask
->sig
[3]);
2533 bnx2x_attn_int_deasserted1(bp
,
2534 attn
.sig
[1] & group_mask
->sig
[1]);
2535 bnx2x_attn_int_deasserted2(bp
,
2536 attn
.sig
[2] & group_mask
->sig
[2]);
2537 bnx2x_attn_int_deasserted0(bp
,
2538 attn
.sig
[0] & group_mask
->sig
[0]);
2542 bnx2x_release_alr(bp
);
2544 reg_addr
= (HC_REG_COMMAND_REG
+ port
*32 + COMMAND_REG_ATTN_BITS_CLR
);
2547 DP(NETIF_MSG_HW
, "about to mask 0x%08x at HC addr 0x%x\n",
2549 REG_WR(bp
, reg_addr
, val
);
2551 if (~bp
->attn_state
& deasserted
)
2552 BNX2X_ERR("IGU ERROR\n");
2554 reg_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
2555 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
2557 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2558 aeu_mask
= REG_RD(bp
, reg_addr
);
2560 DP(NETIF_MSG_HW
, "aeu_mask %x newly deasserted %x\n",
2561 aeu_mask
, deasserted
);
2562 aeu_mask
|= (deasserted
& 0x3ff);
2563 DP(NETIF_MSG_HW
, "new mask %x\n", aeu_mask
);
2565 REG_WR(bp
, reg_addr
, aeu_mask
);
2566 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2568 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
2569 bp
->attn_state
&= ~deasserted
;
2570 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
2573 static void bnx2x_attn_int(struct bnx2x
*bp
)
2575 /* read local copy of bits */
2576 u32 attn_bits
= le32_to_cpu(bp
->def_status_blk
->atten_status_block
.
2578 u32 attn_ack
= le32_to_cpu(bp
->def_status_blk
->atten_status_block
.
2580 u32 attn_state
= bp
->attn_state
;
2582 /* look for changed bits */
2583 u32 asserted
= attn_bits
& ~attn_ack
& ~attn_state
;
2584 u32 deasserted
= ~attn_bits
& attn_ack
& attn_state
;
2587 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2588 attn_bits
, attn_ack
, asserted
, deasserted
);
2590 if (~(attn_bits
^ attn_ack
) & (attn_bits
^ attn_state
))
2591 BNX2X_ERR("BAD attention state\n");
2593 /* handle bits that were raised */
2595 bnx2x_attn_int_asserted(bp
, asserted
);
2598 bnx2x_attn_int_deasserted(bp
, deasserted
);
2601 static void bnx2x_sp_task(struct work_struct
*work
)
2603 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, sp_task
.work
);
2606 /* Return here if interrupt is disabled */
2607 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
2608 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
2612 status
= bnx2x_update_dsb_idx(bp
);
2613 /* if (status == 0) */
2614 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2616 DP(NETIF_MSG_INTR
, "got a slowpath interrupt (status 0x%x)\n", status
);
2624 /* CStorm events: STAT_QUERY */
2626 DP(BNX2X_MSG_SP
, "CStorm events: STAT_QUERY\n");
2630 if (unlikely(status
))
2631 DP(NETIF_MSG_INTR
, "got an unknown interrupt! (status 0x%x)\n",
2634 bnx2x_ack_sb(bp
, DEF_SB_ID
, ATTENTION_ID
, le16_to_cpu(bp
->def_att_idx
),
2636 bnx2x_ack_sb(bp
, DEF_SB_ID
, USTORM_ID
, le16_to_cpu(bp
->def_u_idx
),
2638 bnx2x_ack_sb(bp
, DEF_SB_ID
, CSTORM_ID
, le16_to_cpu(bp
->def_c_idx
),
2640 bnx2x_ack_sb(bp
, DEF_SB_ID
, XSTORM_ID
, le16_to_cpu(bp
->def_x_idx
),
2642 bnx2x_ack_sb(bp
, DEF_SB_ID
, TSTORM_ID
, le16_to_cpu(bp
->def_t_idx
),
2646 irqreturn_t
bnx2x_msix_sp_int(int irq
, void *dev_instance
)
2648 struct net_device
*dev
= dev_instance
;
2649 struct bnx2x
*bp
= netdev_priv(dev
);
2651 /* Return here if interrupt is disabled */
2652 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
2653 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
2657 bnx2x_ack_sb(bp
, DEF_SB_ID
, TSTORM_ID
, 0, IGU_INT_DISABLE
, 0);
2659 #ifdef BNX2X_STOP_ON_ERROR
2660 if (unlikely(bp
->panic
))
2666 struct cnic_ops
*c_ops
;
2669 c_ops
= rcu_dereference(bp
->cnic_ops
);
2671 c_ops
->cnic_handler(bp
->cnic_data
, NULL
);
2675 queue_delayed_work(bnx2x_wq
, &bp
->sp_task
, 0);
2680 /* end of slow path */
2682 static void bnx2x_timer(unsigned long data
)
2684 struct bnx2x
*bp
= (struct bnx2x
*) data
;
2686 if (!netif_running(bp
->dev
))
2689 if (atomic_read(&bp
->intr_sem
) != 0)
2693 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
2697 rc
= bnx2x_rx_int(fp
, 1000);
2700 if (!BP_NOMCP(bp
)) {
2701 int func
= BP_FUNC(bp
);
2705 ++bp
->fw_drv_pulse_wr_seq
;
2706 bp
->fw_drv_pulse_wr_seq
&= DRV_PULSE_SEQ_MASK
;
2707 /* TBD - add SYSTEM_TIME */
2708 drv_pulse
= bp
->fw_drv_pulse_wr_seq
;
2709 SHMEM_WR(bp
, func_mb
[func
].drv_pulse_mb
, drv_pulse
);
2711 mcp_pulse
= (SHMEM_RD(bp
, func_mb
[func
].mcp_pulse_mb
) &
2712 MCP_PULSE_SEQ_MASK
);
2713 /* The delta between driver pulse and mcp response
2714 * should be 1 (before mcp response) or 0 (after mcp response)
2716 if ((drv_pulse
!= mcp_pulse
) &&
2717 (drv_pulse
!= ((mcp_pulse
+ 1) & MCP_PULSE_SEQ_MASK
))) {
2718 /* someone lost a heartbeat... */
2719 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2720 drv_pulse
, mcp_pulse
);
2724 if (bp
->state
== BNX2X_STATE_OPEN
)
2725 bnx2x_stats_handle(bp
, STATS_EVENT_UPDATE
);
2728 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
2731 /* end of Statistics */
2736 * nic init service functions
2739 static void bnx2x_zero_sb(struct bnx2x
*bp
, int sb_id
)
2741 int port
= BP_PORT(bp
);
2744 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
+
2745 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port
, sb_id
), 0,
2746 CSTORM_SB_STATUS_BLOCK_U_SIZE
/ 4);
2747 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
+
2748 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port
, sb_id
), 0,
2749 CSTORM_SB_STATUS_BLOCK_C_SIZE
/ 4);
2752 void bnx2x_init_sb(struct bnx2x
*bp
, struct host_status_block
*sb
,
2753 dma_addr_t mapping
, int sb_id
)
2755 int port
= BP_PORT(bp
);
2756 int func
= BP_FUNC(bp
);
2761 section
= ((u64
)mapping
) + offsetof(struct host_status_block
,
2763 sb
->u_status_block
.status_block_id
= sb_id
;
2765 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
2766 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port
, sb_id
), U64_LO(section
));
2767 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
2768 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port
, sb_id
)) + 4),
2770 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ FP_USB_FUNC_OFF
+
2771 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port
, sb_id
), func
);
2773 for (index
= 0; index
< HC_USTORM_SB_NUM_INDICES
; index
++)
2774 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
2775 CSTORM_SB_HC_DISABLE_U_OFFSET(port
, sb_id
, index
), 1);
2778 section
= ((u64
)mapping
) + offsetof(struct host_status_block
,
2780 sb
->c_status_block
.status_block_id
= sb_id
;
2782 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
2783 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port
, sb_id
), U64_LO(section
));
2784 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
2785 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port
, sb_id
)) + 4),
2787 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ FP_CSB_FUNC_OFF
+
2788 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port
, sb_id
), func
);
2790 for (index
= 0; index
< HC_CSTORM_SB_NUM_INDICES
; index
++)
2791 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
2792 CSTORM_SB_HC_DISABLE_C_OFFSET(port
, sb_id
, index
), 1);
2794 bnx2x_ack_sb(bp
, sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
2797 static void bnx2x_zero_def_sb(struct bnx2x
*bp
)
2799 int func
= BP_FUNC(bp
);
2801 bnx2x_init_fill(bp
, TSEM_REG_FAST_MEMORY
+
2802 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
2803 sizeof(struct tstorm_def_status_block
)/4);
2804 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
+
2805 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func
), 0,
2806 sizeof(struct cstorm_def_status_block_u
)/4);
2807 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
+
2808 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func
), 0,
2809 sizeof(struct cstorm_def_status_block_c
)/4);
2810 bnx2x_init_fill(bp
, XSEM_REG_FAST_MEMORY
+
2811 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
2812 sizeof(struct xstorm_def_status_block
)/4);
2815 static void bnx2x_init_def_sb(struct bnx2x
*bp
,
2816 struct host_def_status_block
*def_sb
,
2817 dma_addr_t mapping
, int sb_id
)
2819 int port
= BP_PORT(bp
);
2820 int func
= BP_FUNC(bp
);
2821 int index
, val
, reg_offset
;
2825 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
2826 atten_status_block
);
2827 def_sb
->atten_status_block
.status_block_id
= sb_id
;
2831 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
2832 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
2834 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
2835 bp
->attn_group
[index
].sig
[0] = REG_RD(bp
,
2836 reg_offset
+ 0x10*index
);
2837 bp
->attn_group
[index
].sig
[1] = REG_RD(bp
,
2838 reg_offset
+ 0x4 + 0x10*index
);
2839 bp
->attn_group
[index
].sig
[2] = REG_RD(bp
,
2840 reg_offset
+ 0x8 + 0x10*index
);
2841 bp
->attn_group
[index
].sig
[3] = REG_RD(bp
,
2842 reg_offset
+ 0xc + 0x10*index
);
2845 reg_offset
= (port
? HC_REG_ATTN_MSG1_ADDR_L
:
2846 HC_REG_ATTN_MSG0_ADDR_L
);
2848 REG_WR(bp
, reg_offset
, U64_LO(section
));
2849 REG_WR(bp
, reg_offset
+ 4, U64_HI(section
));
2851 reg_offset
= (port
? HC_REG_ATTN_NUM_P1
: HC_REG_ATTN_NUM_P0
);
2853 val
= REG_RD(bp
, reg_offset
);
2855 REG_WR(bp
, reg_offset
, val
);
2858 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
2859 u_def_status_block
);
2860 def_sb
->u_def_status_block
.status_block_id
= sb_id
;
2862 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
2863 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func
), U64_LO(section
));
2864 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
2865 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func
)) + 4),
2867 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ DEF_USB_FUNC_OFF
+
2868 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func
), func
);
2870 for (index
= 0; index
< HC_USTORM_DEF_SB_NUM_INDICES
; index
++)
2871 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
2872 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func
, index
), 1);
2875 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
2876 c_def_status_block
);
2877 def_sb
->c_def_status_block
.status_block_id
= sb_id
;
2879 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
2880 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func
), U64_LO(section
));
2881 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
2882 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func
)) + 4),
2884 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ DEF_CSB_FUNC_OFF
+
2885 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func
), func
);
2887 for (index
= 0; index
< HC_CSTORM_DEF_SB_NUM_INDICES
; index
++)
2888 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
2889 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func
, index
), 1);
2892 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
2893 t_def_status_block
);
2894 def_sb
->t_def_status_block
.status_block_id
= sb_id
;
2896 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
2897 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
2898 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
2899 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
2901 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ DEF_TSB_FUNC_OFF
+
2902 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
2904 for (index
= 0; index
< HC_TSTORM_DEF_SB_NUM_INDICES
; index
++)
2905 REG_WR16(bp
, BAR_TSTRORM_INTMEM
+
2906 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
2909 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
2910 x_def_status_block
);
2911 def_sb
->x_def_status_block
.status_block_id
= sb_id
;
2913 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2914 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
2915 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2916 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
2918 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ DEF_XSB_FUNC_OFF
+
2919 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
2921 for (index
= 0; index
< HC_XSTORM_DEF_SB_NUM_INDICES
; index
++)
2922 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+
2923 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
2925 bp
->stats_pending
= 0;
2926 bp
->set_mac_pending
= 0;
2928 bnx2x_ack_sb(bp
, sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
2931 void bnx2x_update_coalesce(struct bnx2x
*bp
)
2933 int port
= BP_PORT(bp
);
2936 for_each_queue(bp
, i
) {
2937 int sb_id
= bp
->fp
[i
].sb_id
;
2939 /* HC_INDEX_U_ETH_RX_CQ_CONS */
2940 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+
2941 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port
, sb_id
,
2942 U_SB_ETH_RX_CQ_INDEX
),
2943 bp
->rx_ticks
/(4 * BNX2X_BTR
));
2944 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
2945 CSTORM_SB_HC_DISABLE_U_OFFSET(port
, sb_id
,
2946 U_SB_ETH_RX_CQ_INDEX
),
2947 (bp
->rx_ticks
/(4 * BNX2X_BTR
)) ? 0 : 1);
2949 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2950 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+
2951 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port
, sb_id
,
2952 C_SB_ETH_TX_CQ_INDEX
),
2953 bp
->tx_ticks
/(4 * BNX2X_BTR
));
2954 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
2955 CSTORM_SB_HC_DISABLE_C_OFFSET(port
, sb_id
,
2956 C_SB_ETH_TX_CQ_INDEX
),
2957 (bp
->tx_ticks
/(4 * BNX2X_BTR
)) ? 0 : 1);
2961 static void bnx2x_init_sp_ring(struct bnx2x
*bp
)
2963 int func
= BP_FUNC(bp
);
2965 spin_lock_init(&bp
->spq_lock
);
2967 bp
->spq_left
= MAX_SPQ_PENDING
;
2968 bp
->spq_prod_idx
= 0;
2969 bp
->dsb_sp_prod
= BNX2X_SP_DSB_INDEX
;
2970 bp
->spq_prod_bd
= bp
->spq
;
2971 bp
->spq_last_bd
= bp
->spq_prod_bd
+ MAX_SP_DESC_CNT
;
2973 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PAGE_BASE_OFFSET(func
),
2974 U64_LO(bp
->spq_mapping
));
2976 XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PAGE_BASE_OFFSET(func
) + 4,
2977 U64_HI(bp
->spq_mapping
));
2979 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PROD_OFFSET(func
),
2983 static void bnx2x_init_context(struct bnx2x
*bp
)
2988 for_each_queue(bp
, i
) {
2989 struct eth_context
*context
= bnx2x_sp(bp
, context
[i
].eth
);
2990 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
2991 u8 cl_id
= fp
->cl_id
;
2993 context
->ustorm_st_context
.common
.sb_index_numbers
=
2994 BNX2X_RX_SB_INDEX_NUM
;
2995 context
->ustorm_st_context
.common
.clientId
= cl_id
;
2996 context
->ustorm_st_context
.common
.status_block_id
= fp
->sb_id
;
2997 context
->ustorm_st_context
.common
.flags
=
2998 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT
|
2999 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS
);
3000 context
->ustorm_st_context
.common
.statistics_counter_id
=
3002 context
->ustorm_st_context
.common
.mc_alignment_log_size
=
3003 BNX2X_RX_ALIGN_SHIFT
;
3004 context
->ustorm_st_context
.common
.bd_buff_size
=
3006 context
->ustorm_st_context
.common
.bd_page_base_hi
=
3007 U64_HI(fp
->rx_desc_mapping
);
3008 context
->ustorm_st_context
.common
.bd_page_base_lo
=
3009 U64_LO(fp
->rx_desc_mapping
);
3010 if (!fp
->disable_tpa
) {
3011 context
->ustorm_st_context
.common
.flags
|=
3012 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA
;
3013 context
->ustorm_st_context
.common
.sge_buff_size
=
3014 (u16
)min_t(u32
, SGE_PAGE_SIZE
*PAGES_PER_SGE
,
3016 context
->ustorm_st_context
.common
.sge_page_base_hi
=
3017 U64_HI(fp
->rx_sge_mapping
);
3018 context
->ustorm_st_context
.common
.sge_page_base_lo
=
3019 U64_LO(fp
->rx_sge_mapping
);
3021 context
->ustorm_st_context
.common
.max_sges_for_packet
=
3022 SGE_PAGE_ALIGN(bp
->dev
->mtu
) >> SGE_PAGE_SHIFT
;
3023 context
->ustorm_st_context
.common
.max_sges_for_packet
=
3024 ((context
->ustorm_st_context
.common
.
3025 max_sges_for_packet
+ PAGES_PER_SGE
- 1) &
3026 (~(PAGES_PER_SGE
- 1))) >> PAGES_PER_SGE_SHIFT
;
3029 context
->ustorm_ag_context
.cdu_usage
=
3030 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, i
),
3031 CDU_REGION_NUMBER_UCM_AG
,
3032 ETH_CONNECTION_TYPE
);
3034 context
->xstorm_ag_context
.cdu_reserved
=
3035 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, i
),
3036 CDU_REGION_NUMBER_XCM_AG
,
3037 ETH_CONNECTION_TYPE
);
3041 for_each_queue(bp
, i
) {
3042 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
3043 struct eth_context
*context
=
3044 bnx2x_sp(bp
, context
[i
].eth
);
3046 context
->cstorm_st_context
.sb_index_number
=
3047 C_SB_ETH_TX_CQ_INDEX
;
3048 context
->cstorm_st_context
.status_block_id
= fp
->sb_id
;
3050 context
->xstorm_st_context
.tx_bd_page_base_hi
=
3051 U64_HI(fp
->tx_desc_mapping
);
3052 context
->xstorm_st_context
.tx_bd_page_base_lo
=
3053 U64_LO(fp
->tx_desc_mapping
);
3054 context
->xstorm_st_context
.statistics_data
= (fp
->cl_id
|
3055 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE
);
3059 static void bnx2x_init_ind_table(struct bnx2x
*bp
)
3061 int func
= BP_FUNC(bp
);
3064 if (bp
->multi_mode
== ETH_RSS_MODE_DISABLED
)
3068 "Initializing indirection table multi_mode %d\n", bp
->multi_mode
);
3069 for (i
= 0; i
< TSTORM_INDIRECTION_TABLE_SIZE
; i
++)
3070 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+
3071 TSTORM_INDIRECTION_TABLE_OFFSET(func
) + i
,
3072 bp
->fp
->cl_id
+ (i
% bp
->num_queues
));
3075 void bnx2x_set_client_config(struct bnx2x
*bp
)
3077 struct tstorm_eth_client_config tstorm_client
= {0};
3078 int port
= BP_PORT(bp
);
3081 tstorm_client
.mtu
= bp
->dev
->mtu
;
3082 tstorm_client
.config_flags
=
3083 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE
|
3084 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE
);
3086 if (bp
->rx_mode
&& bp
->vlgrp
&& (bp
->flags
& HW_VLAN_RX_FLAG
)) {
3087 tstorm_client
.config_flags
|=
3088 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE
;
3089 DP(NETIF_MSG_IFUP
, "vlan removal enabled\n");
3093 for_each_queue(bp
, i
) {
3094 tstorm_client
.statistics_counter_id
= bp
->fp
[i
].cl_id
;
3096 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
3097 TSTORM_CLIENT_CONFIG_OFFSET(port
, bp
->fp
[i
].cl_id
),
3098 ((u32
*)&tstorm_client
)[0]);
3099 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
3100 TSTORM_CLIENT_CONFIG_OFFSET(port
, bp
->fp
[i
].cl_id
) + 4,
3101 ((u32
*)&tstorm_client
)[1]);
3104 DP(BNX2X_MSG_OFF
, "tstorm_client: 0x%08x 0x%08x\n",
3105 ((u32
*)&tstorm_client
)[0], ((u32
*)&tstorm_client
)[1]);
3108 void bnx2x_set_storm_rx_mode(struct bnx2x
*bp
)
3110 struct tstorm_eth_mac_filter_config tstorm_mac_filter
= {0};
3111 int mode
= bp
->rx_mode
;
3112 int mask
= bp
->rx_mode_cl_mask
;
3113 int func
= BP_FUNC(bp
);
3114 int port
= BP_PORT(bp
);
3116 /* All but management unicast packets should pass to the host as well */
3118 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST
|
3119 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST
|
3120 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN
|
3121 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN
;
3123 DP(NETIF_MSG_IFUP
, "rx mode %d mask 0x%x\n", mode
, mask
);
3126 case BNX2X_RX_MODE_NONE
: /* no Rx */
3127 tstorm_mac_filter
.ucast_drop_all
= mask
;
3128 tstorm_mac_filter
.mcast_drop_all
= mask
;
3129 tstorm_mac_filter
.bcast_drop_all
= mask
;
3132 case BNX2X_RX_MODE_NORMAL
:
3133 tstorm_mac_filter
.bcast_accept_all
= mask
;
3136 case BNX2X_RX_MODE_ALLMULTI
:
3137 tstorm_mac_filter
.mcast_accept_all
= mask
;
3138 tstorm_mac_filter
.bcast_accept_all
= mask
;
3141 case BNX2X_RX_MODE_PROMISC
:
3142 tstorm_mac_filter
.ucast_accept_all
= mask
;
3143 tstorm_mac_filter
.mcast_accept_all
= mask
;
3144 tstorm_mac_filter
.bcast_accept_all
= mask
;
3145 /* pass management unicast packets as well */
3146 llh_mask
|= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST
;
3150 BNX2X_ERR("BAD rx mode (%d)\n", mode
);
3155 (port
? NIG_REG_LLH1_BRB1_DRV_MASK
: NIG_REG_LLH0_BRB1_DRV_MASK
),
3158 for (i
= 0; i
< sizeof(struct tstorm_eth_mac_filter_config
)/4; i
++) {
3159 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
3160 TSTORM_MAC_FILTER_CONFIG_OFFSET(func
) + i
* 4,
3161 ((u32
*)&tstorm_mac_filter
)[i
]);
3163 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
3164 ((u32 *)&tstorm_mac_filter)[i]); */
3167 if (mode
!= BNX2X_RX_MODE_NONE
)
3168 bnx2x_set_client_config(bp
);
3171 static void bnx2x_init_internal_common(struct bnx2x
*bp
)
3175 /* Zero this manually as its initialization is
3176 currently missing in the initTool */
3177 for (i
= 0; i
< (USTORM_AGG_DATA_SIZE
>> 2); i
++)
3178 REG_WR(bp
, BAR_USTRORM_INTMEM
+
3179 USTORM_AGG_DATA_OFFSET
+ i
* 4, 0);
3182 static void bnx2x_init_internal_port(struct bnx2x
*bp
)
3184 int port
= BP_PORT(bp
);
3187 BAR_CSTRORM_INTMEM
+ CSTORM_HC_BTR_U_OFFSET(port
), BNX2X_BTR
);
3189 BAR_CSTRORM_INTMEM
+ CSTORM_HC_BTR_C_OFFSET(port
), BNX2X_BTR
);
3190 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
3191 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
3194 static void bnx2x_init_internal_func(struct bnx2x
*bp
)
3196 struct tstorm_eth_function_common_config tstorm_config
= {0};
3197 struct stats_indication_flags stats_flags
= {0};
3198 int port
= BP_PORT(bp
);
3199 int func
= BP_FUNC(bp
);
3204 tstorm_config
.config_flags
= RSS_FLAGS(bp
);
3207 tstorm_config
.rss_result_mask
= MULTI_MASK
;
3209 /* Enable TPA if needed */
3210 if (bp
->flags
& TPA_ENABLE_FLAG
)
3211 tstorm_config
.config_flags
|=
3212 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA
;
3215 tstorm_config
.config_flags
|=
3216 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM
;
3218 tstorm_config
.leading_client_id
= BP_L_ID(bp
);
3220 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
3221 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func
),
3222 (*(u32
*)&tstorm_config
));
3224 bp
->rx_mode
= BNX2X_RX_MODE_NONE
; /* no rx until link is up */
3225 bp
->rx_mode_cl_mask
= (1 << BP_L_ID(bp
));
3226 bnx2x_set_storm_rx_mode(bp
);
3228 for_each_queue(bp
, i
) {
3229 u8 cl_id
= bp
->fp
[i
].cl_id
;
3231 /* reset xstorm per client statistics */
3232 offset
= BAR_XSTRORM_INTMEM
+
3233 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cl_id
);
3235 j
< sizeof(struct xstorm_per_client_stats
) / 4; j
++)
3236 REG_WR(bp
, offset
+ j
*4, 0);
3238 /* reset tstorm per client statistics */
3239 offset
= BAR_TSTRORM_INTMEM
+
3240 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cl_id
);
3242 j
< sizeof(struct tstorm_per_client_stats
) / 4; j
++)
3243 REG_WR(bp
, offset
+ j
*4, 0);
3245 /* reset ustorm per client statistics */
3246 offset
= BAR_USTRORM_INTMEM
+
3247 USTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cl_id
);
3249 j
< sizeof(struct ustorm_per_client_stats
) / 4; j
++)
3250 REG_WR(bp
, offset
+ j
*4, 0);
3253 /* Init statistics related context */
3254 stats_flags
.collect_eth
= 1;
3256 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(func
),
3257 ((u32
*)&stats_flags
)[0]);
3258 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(func
) + 4,
3259 ((u32
*)&stats_flags
)[1]);
3261 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(func
),
3262 ((u32
*)&stats_flags
)[0]);
3263 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(func
) + 4,
3264 ((u32
*)&stats_flags
)[1]);
3266 REG_WR(bp
, BAR_USTRORM_INTMEM
+ USTORM_STATS_FLAGS_OFFSET(func
),
3267 ((u32
*)&stats_flags
)[0]);
3268 REG_WR(bp
, BAR_USTRORM_INTMEM
+ USTORM_STATS_FLAGS_OFFSET(func
) + 4,
3269 ((u32
*)&stats_flags
)[1]);
3271 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(func
),
3272 ((u32
*)&stats_flags
)[0]);
3273 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(func
) + 4,
3274 ((u32
*)&stats_flags
)[1]);
3276 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
3277 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
3278 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
3279 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
3280 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
3281 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
3283 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
3284 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
3285 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
3286 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
3287 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
3288 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
3290 REG_WR(bp
, BAR_USTRORM_INTMEM
+
3291 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
3292 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
3293 REG_WR(bp
, BAR_USTRORM_INTMEM
+
3294 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
3295 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
3297 if (CHIP_IS_E1H(bp
)) {
3298 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_FUNCTION_MODE_OFFSET
,
3300 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_FUNCTION_MODE_OFFSET
,
3302 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_FUNCTION_MODE_OFFSET
,
3304 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_FUNCTION_MODE_OFFSET
,
3307 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_E1HOV_OFFSET(func
),
3311 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
3312 max_agg_size
= min_t(u32
, (min_t(u32
, 8, MAX_SKB_FRAGS
) *
3313 SGE_PAGE_SIZE
* PAGES_PER_SGE
), 0xffff);
3314 for_each_queue(bp
, i
) {
3315 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
3317 REG_WR(bp
, BAR_USTRORM_INTMEM
+
3318 USTORM_CQE_PAGE_BASE_OFFSET(port
, fp
->cl_id
),
3319 U64_LO(fp
->rx_comp_mapping
));
3320 REG_WR(bp
, BAR_USTRORM_INTMEM
+
3321 USTORM_CQE_PAGE_BASE_OFFSET(port
, fp
->cl_id
) + 4,
3322 U64_HI(fp
->rx_comp_mapping
));
3325 REG_WR(bp
, BAR_USTRORM_INTMEM
+
3326 USTORM_CQE_PAGE_NEXT_OFFSET(port
, fp
->cl_id
),
3327 U64_LO(fp
->rx_comp_mapping
+ BCM_PAGE_SIZE
));
3328 REG_WR(bp
, BAR_USTRORM_INTMEM
+
3329 USTORM_CQE_PAGE_NEXT_OFFSET(port
, fp
->cl_id
) + 4,
3330 U64_HI(fp
->rx_comp_mapping
+ BCM_PAGE_SIZE
));
3332 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
3333 USTORM_MAX_AGG_SIZE_OFFSET(port
, fp
->cl_id
),
3337 /* dropless flow control */
3338 if (CHIP_IS_E1H(bp
)) {
3339 struct ustorm_eth_rx_pause_data_e1h rx_pause
= {0};
3341 rx_pause
.bd_thr_low
= 250;
3342 rx_pause
.cqe_thr_low
= 250;
3344 rx_pause
.sge_thr_low
= 0;
3345 rx_pause
.bd_thr_high
= 350;
3346 rx_pause
.cqe_thr_high
= 350;
3347 rx_pause
.sge_thr_high
= 0;
3349 for_each_queue(bp
, i
) {
3350 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
3352 if (!fp
->disable_tpa
) {
3353 rx_pause
.sge_thr_low
= 150;
3354 rx_pause
.sge_thr_high
= 250;
3358 offset
= BAR_USTRORM_INTMEM
+
3359 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port
,
3362 j
< sizeof(struct ustorm_eth_rx_pause_data_e1h
)/4;
3364 REG_WR(bp
, offset
+ j
*4,
3365 ((u32
*)&rx_pause
)[j
]);
3369 memset(&(bp
->cmng
), 0, sizeof(struct cmng_struct_per_port
));
3371 /* Init rate shaping and fairness contexts */
3375 /* During init there is no active link
3376 Until link is up, set link rate to 10Gbps */
3377 bp
->link_vars
.line_speed
= SPEED_10000
;
3378 bnx2x_init_port_minmax(bp
);
3382 SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
3383 bnx2x_calc_vn_weight_sum(bp
);
3385 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
3386 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
);
3388 /* Enable rate shaping and fairness */
3389 bp
->cmng
.flags
.cmng_enables
|=
3390 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN
;
3393 /* rate shaping and fairness are disabled */
3395 "single function mode minmax will be disabled\n");
3399 /* Store cmng structures to internal memory */
3401 for (i
= 0; i
< sizeof(struct cmng_struct_per_port
) / 4; i
++)
3402 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
3403 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
) + i
* 4,
3404 ((u32
*)(&bp
->cmng
))[i
]);
3407 static void bnx2x_init_internal(struct bnx2x
*bp
, u32 load_code
)
3409 switch (load_code
) {
3410 case FW_MSG_CODE_DRV_LOAD_COMMON
:
3411 bnx2x_init_internal_common(bp
);
3414 case FW_MSG_CODE_DRV_LOAD_PORT
:
3415 bnx2x_init_internal_port(bp
);
3418 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
3419 bnx2x_init_internal_func(bp
);
3423 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
3428 void bnx2x_nic_init(struct bnx2x
*bp
, u32 load_code
)
3432 for_each_queue(bp
, i
) {
3433 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
3436 fp
->state
= BNX2X_FP_STATE_CLOSED
;
3438 fp
->cl_id
= BP_L_ID(bp
) + i
;
3440 fp
->sb_id
= fp
->cl_id
+ 1;
3442 fp
->sb_id
= fp
->cl_id
;
3445 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
3446 i
, bp
, fp
->status_blk
, fp
->cl_id
, fp
->sb_id
);
3447 bnx2x_init_sb(bp
, fp
->status_blk
, fp
->status_blk_mapping
,
3449 bnx2x_update_fpsb_idx(fp
);
3452 /* ensure status block indices were read */
3456 bnx2x_init_def_sb(bp
, bp
->def_status_blk
, bp
->def_status_blk_mapping
,
3458 bnx2x_update_dsb_idx(bp
);
3459 bnx2x_update_coalesce(bp
);
3460 bnx2x_init_rx_rings(bp
);
3461 bnx2x_init_tx_ring(bp
);
3462 bnx2x_init_sp_ring(bp
);
3463 bnx2x_init_context(bp
);
3464 bnx2x_init_internal(bp
, load_code
);
3465 bnx2x_init_ind_table(bp
);
3466 bnx2x_stats_init(bp
);
3468 /* At this point, we are ready for interrupts */
3469 atomic_set(&bp
->intr_sem
, 0);
3471 /* flush all before enabling interrupts */
3475 bnx2x_int_enable(bp
);
3477 /* Check for SPIO5 */
3478 bnx2x_attn_int_deasserted0(bp
,
3479 REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ BP_PORT(bp
)*4) &
3480 AEU_INPUTS_ATTN_BITS_SPIO5
);
3483 /* end of nic init */
3486 * gzip service functions
3489 static int bnx2x_gunzip_init(struct bnx2x
*bp
)
3491 bp
->gunzip_buf
= dma_alloc_coherent(&bp
->pdev
->dev
, FW_BUF_SIZE
,
3492 &bp
->gunzip_mapping
, GFP_KERNEL
);
3493 if (bp
->gunzip_buf
== NULL
)
3496 bp
->strm
= kmalloc(sizeof(*bp
->strm
), GFP_KERNEL
);
3497 if (bp
->strm
== NULL
)
3500 bp
->strm
->workspace
= kmalloc(zlib_inflate_workspacesize(),
3502 if (bp
->strm
->workspace
== NULL
)
3512 dma_free_coherent(&bp
->pdev
->dev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
3513 bp
->gunzip_mapping
);
3514 bp
->gunzip_buf
= NULL
;
3517 netdev_err(bp
->dev
, "Cannot allocate firmware buffer for"
3518 " un-compression\n");
3522 static void bnx2x_gunzip_end(struct bnx2x
*bp
)
3524 kfree(bp
->strm
->workspace
);
3529 if (bp
->gunzip_buf
) {
3530 dma_free_coherent(&bp
->pdev
->dev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
3531 bp
->gunzip_mapping
);
3532 bp
->gunzip_buf
= NULL
;
3536 static int bnx2x_gunzip(struct bnx2x
*bp
, const u8
*zbuf
, int len
)
3540 /* check gzip header */
3541 if ((zbuf
[0] != 0x1f) || (zbuf
[1] != 0x8b) || (zbuf
[2] != Z_DEFLATED
)) {
3542 BNX2X_ERR("Bad gzip header\n");
3550 if (zbuf
[3] & FNAME
)
3551 while ((zbuf
[n
++] != 0) && (n
< len
));
3553 bp
->strm
->next_in
= (typeof(bp
->strm
->next_in
))zbuf
+ n
;
3554 bp
->strm
->avail_in
= len
- n
;
3555 bp
->strm
->next_out
= bp
->gunzip_buf
;
3556 bp
->strm
->avail_out
= FW_BUF_SIZE
;
3558 rc
= zlib_inflateInit2(bp
->strm
, -MAX_WBITS
);
3562 rc
= zlib_inflate(bp
->strm
, Z_FINISH
);
3563 if ((rc
!= Z_OK
) && (rc
!= Z_STREAM_END
))
3564 netdev_err(bp
->dev
, "Firmware decompression error: %s\n",
3567 bp
->gunzip_outlen
= (FW_BUF_SIZE
- bp
->strm
->avail_out
);
3568 if (bp
->gunzip_outlen
& 0x3)
3569 netdev_err(bp
->dev
, "Firmware decompression error:"
3570 " gunzip_outlen (%d) not aligned\n",
3572 bp
->gunzip_outlen
>>= 2;
3574 zlib_inflateEnd(bp
->strm
);
3576 if (rc
== Z_STREAM_END
)
3582 /* nic load/unload */
3585 * General service functions
3588 /* send a NIG loopback debug packet */
3589 static void bnx2x_lb_pckt(struct bnx2x
*bp
)
3593 /* Ethernet source and destination addresses */
3594 wb_write
[0] = 0x55555555;
3595 wb_write
[1] = 0x55555555;
3596 wb_write
[2] = 0x20; /* SOP */
3597 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
3599 /* NON-IP protocol */
3600 wb_write
[0] = 0x09000000;
3601 wb_write
[1] = 0x55555555;
3602 wb_write
[2] = 0x10; /* EOP, eop_bvalid = 0 */
3603 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
3606 /* some of the internal memories
3607 * are not directly readable from the driver
3608 * to test them we send debug packets
3610 static int bnx2x_int_mem_test(struct bnx2x
*bp
)
3616 if (CHIP_REV_IS_FPGA(bp
))
3618 else if (CHIP_REV_IS_EMUL(bp
))
3623 DP(NETIF_MSG_HW
, "start part1\n");
3625 /* Disable inputs of parser neighbor blocks */
3626 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
3627 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
3628 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
3629 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x0);
3631 /* Write 0 to parser credits for CFC search request */
3632 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
3634 /* send Ethernet packet */
3637 /* TODO do i reset NIG statistic? */
3638 /* Wait until NIG register shows 1 packet of size 0x10 */
3639 count
= 1000 * factor
;
3642 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
3643 val
= *bnx2x_sp(bp
, wb_data
[0]);
3651 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
3655 /* Wait until PRS register shows 1 packet */
3656 count
= 1000 * factor
;
3658 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
3666 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
3670 /* Reset and init BRB, PRS */
3671 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
3673 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
3675 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
3676 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
3678 DP(NETIF_MSG_HW
, "part2\n");
3680 /* Disable inputs of parser neighbor blocks */
3681 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
3682 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
3683 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
3684 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x0);
3686 /* Write 0 to parser credits for CFC search request */
3687 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
3689 /* send 10 Ethernet packets */
3690 for (i
= 0; i
< 10; i
++)
3693 /* Wait until NIG register shows 10 + 1
3694 packets of size 11*0x10 = 0xb0 */
3695 count
= 1000 * factor
;
3698 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
3699 val
= *bnx2x_sp(bp
, wb_data
[0]);
3707 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
3711 /* Wait until PRS register shows 2 packets */
3712 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
3714 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
3716 /* Write 1 to parser credits for CFC search request */
3717 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x1);
3719 /* Wait until PRS register shows 3 packets */
3720 msleep(10 * factor
);
3721 /* Wait until NIG register shows 1 packet of size 0x10 */
3722 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
3724 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
3726 /* clear NIG EOP FIFO */
3727 for (i
= 0; i
< 11; i
++)
3728 REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_FIFO
);
3729 val
= REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_EMPTY
);
3731 BNX2X_ERR("clear of NIG failed\n");
3735 /* Reset and init BRB, PRS, NIG */
3736 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
3738 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
3740 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
3741 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
3744 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
3747 /* Enable inputs of parser neighbor blocks */
3748 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x7fffffff);
3749 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x1);
3750 REG_WR(bp
, CFC_REG_DEBUG0
, 0x0);
3751 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x1);
3753 DP(NETIF_MSG_HW
, "done\n");
3758 static void enable_blocks_attention(struct bnx2x
*bp
)
3760 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
3761 REG_WR(bp
, PXP_REG_PXP_INT_MASK_1
, 0);
3762 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
3763 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
3764 REG_WR(bp
, QM_REG_QM_INT_MASK
, 0);
3765 REG_WR(bp
, TM_REG_TM_INT_MASK
, 0);
3766 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_0
, 0);
3767 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_1
, 0);
3768 REG_WR(bp
, XCM_REG_XCM_INT_MASK
, 0);
3769 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3770 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
3771 REG_WR(bp
, USDM_REG_USDM_INT_MASK_0
, 0);
3772 REG_WR(bp
, USDM_REG_USDM_INT_MASK_1
, 0);
3773 REG_WR(bp
, UCM_REG_UCM_INT_MASK
, 0);
3774 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3775 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
3776 REG_WR(bp
, GRCBASE_UPB
+ PB_REG_PB_INT_MASK
, 0);
3777 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_0
, 0);
3778 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_1
, 0);
3779 REG_WR(bp
, CCM_REG_CCM_INT_MASK
, 0);
3780 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3781 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3782 if (CHIP_REV_IS_FPGA(bp
))
3783 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x580000);
3785 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x480000);
3786 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_0
, 0);
3787 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_1
, 0);
3788 REG_WR(bp
, TCM_REG_TCM_INT_MASK
, 0);
3789 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3790 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
3791 REG_WR(bp
, CDU_REG_CDU_INT_MASK
, 0);
3792 REG_WR(bp
, DMAE_REG_DMAE_INT_MASK
, 0);
3793 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3794 REG_WR(bp
, PBF_REG_PBF_INT_MASK
, 0X18); /* bit 3,4 masked */
3797 static const struct {
3800 } bnx2x_parity_mask
[] = {
3801 {PXP_REG_PXP_PRTY_MASK
, 0xffffffff},
3802 {PXP2_REG_PXP2_PRTY_MASK_0
, 0xffffffff},
3803 {PXP2_REG_PXP2_PRTY_MASK_1
, 0xffffffff},
3804 {HC_REG_HC_PRTY_MASK
, 0xffffffff},
3805 {MISC_REG_MISC_PRTY_MASK
, 0xffffffff},
3806 {QM_REG_QM_PRTY_MASK
, 0x0},
3807 {DORQ_REG_DORQ_PRTY_MASK
, 0x0},
3808 {GRCBASE_UPB
+ PB_REG_PB_PRTY_MASK
, 0x0},
3809 {GRCBASE_XPB
+ PB_REG_PB_PRTY_MASK
, 0x0},
3810 {SRC_REG_SRC_PRTY_MASK
, 0x4}, /* bit 2 */
3811 {CDU_REG_CDU_PRTY_MASK
, 0x0},
3812 {CFC_REG_CFC_PRTY_MASK
, 0x0},
3813 {DBG_REG_DBG_PRTY_MASK
, 0x0},
3814 {DMAE_REG_DMAE_PRTY_MASK
, 0x0},
3815 {BRB1_REG_BRB1_PRTY_MASK
, 0x0},
3816 {PRS_REG_PRS_PRTY_MASK
, (1<<6)},/* bit 6 */
3817 {TSDM_REG_TSDM_PRTY_MASK
, 0x18},/* bit 3,4 */
3818 {CSDM_REG_CSDM_PRTY_MASK
, 0x8}, /* bit 3 */
3819 {USDM_REG_USDM_PRTY_MASK
, 0x38},/* bit 3,4,5 */
3820 {XSDM_REG_XSDM_PRTY_MASK
, 0x8}, /* bit 3 */
3821 {TSEM_REG_TSEM_PRTY_MASK_0
, 0x0},
3822 {TSEM_REG_TSEM_PRTY_MASK_1
, 0x0},
3823 {USEM_REG_USEM_PRTY_MASK_0
, 0x0},
3824 {USEM_REG_USEM_PRTY_MASK_1
, 0x0},
3825 {CSEM_REG_CSEM_PRTY_MASK_0
, 0x0},
3826 {CSEM_REG_CSEM_PRTY_MASK_1
, 0x0},
3827 {XSEM_REG_XSEM_PRTY_MASK_0
, 0x0},
3828 {XSEM_REG_XSEM_PRTY_MASK_1
, 0x0}
3831 static void enable_blocks_parity(struct bnx2x
*bp
)
3835 for (i
= 0; i
< ARRAY_SIZE(bnx2x_parity_mask
); i
++)
3836 REG_WR(bp
, bnx2x_parity_mask
[i
].addr
,
3837 bnx2x_parity_mask
[i
].mask
);
3841 static void bnx2x_reset_common(struct bnx2x
*bp
)
3844 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
3846 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
, 0x1403);
3849 static void bnx2x_init_pxp(struct bnx2x
*bp
)
3852 int r_order
, w_order
;
3854 pci_read_config_word(bp
->pdev
,
3855 bp
->pcie_cap
+ PCI_EXP_DEVCTL
, &devctl
);
3856 DP(NETIF_MSG_HW
, "read 0x%x from devctl\n", devctl
);
3857 w_order
= ((devctl
& PCI_EXP_DEVCTL_PAYLOAD
) >> 5);
3859 r_order
= ((devctl
& PCI_EXP_DEVCTL_READRQ
) >> 12);
3861 DP(NETIF_MSG_HW
, "force read order to %d\n", bp
->mrrs
);
3865 bnx2x_init_pxp_arb(bp
, r_order
, w_order
);
3868 static void bnx2x_setup_fan_failure_detection(struct bnx2x
*bp
)
3878 val
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.config2
) &
3879 SHARED_HW_CFG_FAN_FAILURE_MASK
;
3881 if (val
== SHARED_HW_CFG_FAN_FAILURE_ENABLED
)
3885 * The fan failure mechanism is usually related to the PHY type since
3886 * the power consumption of the board is affected by the PHY. Currently,
3887 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
3889 else if (val
== SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE
)
3890 for (port
= PORT_0
; port
< PORT_MAX
; port
++) {
3892 bnx2x_fan_failure_det_req(
3894 bp
->common
.shmem_base
,
3895 bp
->common
.shmem2_base
,
3899 DP(NETIF_MSG_HW
, "fan detection setting: %d\n", is_required
);
3901 if (is_required
== 0)
3904 /* Fan failure is indicated by SPIO 5 */
3905 bnx2x_set_spio(bp
, MISC_REGISTERS_SPIO_5
,
3906 MISC_REGISTERS_SPIO_INPUT_HI_Z
);
3908 /* set to active low mode */
3909 val
= REG_RD(bp
, MISC_REG_SPIO_INT
);
3910 val
|= ((1 << MISC_REGISTERS_SPIO_5
) <<
3911 MISC_REGISTERS_SPIO_INT_OLD_SET_POS
);
3912 REG_WR(bp
, MISC_REG_SPIO_INT
, val
);
3914 /* enable interrupt to signal the IGU */
3915 val
= REG_RD(bp
, MISC_REG_SPIO_EVENT_EN
);
3916 val
|= (1 << MISC_REGISTERS_SPIO_5
);
3917 REG_WR(bp
, MISC_REG_SPIO_EVENT_EN
, val
);
3920 static int bnx2x_init_common(struct bnx2x
*bp
)
3927 DP(BNX2X_MSG_MCP
, "starting common init func %d\n", BP_FUNC(bp
));
3929 bnx2x_reset_common(bp
);
3930 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0xffffffff);
3931 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_SET
, 0xfffc);
3933 bnx2x_init_block(bp
, MISC_BLOCK
, COMMON_STAGE
);
3934 if (CHIP_IS_E1H(bp
))
3935 REG_WR(bp
, MISC_REG_E1HMF_MODE
, IS_E1HMF(bp
));
3937 REG_WR(bp
, MISC_REG_LCPLL_CTRL_REG_2
, 0x100);
3939 REG_WR(bp
, MISC_REG_LCPLL_CTRL_REG_2
, 0x0);
3941 bnx2x_init_block(bp
, PXP_BLOCK
, COMMON_STAGE
);
3942 if (CHIP_IS_E1(bp
)) {
3943 /* enable HW interrupt from PXP on USDM overflow
3944 bit 16 on INT_MASK_0 */
3945 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
3948 bnx2x_init_block(bp
, PXP2_BLOCK
, COMMON_STAGE
);
3952 REG_WR(bp
, PXP2_REG_RQ_QM_ENDIAN_M
, 1);
3953 REG_WR(bp
, PXP2_REG_RQ_TM_ENDIAN_M
, 1);
3954 REG_WR(bp
, PXP2_REG_RQ_SRC_ENDIAN_M
, 1);
3955 REG_WR(bp
, PXP2_REG_RQ_CDU_ENDIAN_M
, 1);
3956 REG_WR(bp
, PXP2_REG_RQ_DBG_ENDIAN_M
, 1);
3957 /* make sure this value is 0 */
3958 REG_WR(bp
, PXP2_REG_RQ_HC_ENDIAN_M
, 0);
3960 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3961 REG_WR(bp
, PXP2_REG_RD_QM_SWAP_MODE
, 1);
3962 REG_WR(bp
, PXP2_REG_RD_TM_SWAP_MODE
, 1);
3963 REG_WR(bp
, PXP2_REG_RD_SRC_SWAP_MODE
, 1);
3964 REG_WR(bp
, PXP2_REG_RD_CDURD_SWAP_MODE
, 1);
3967 REG_WR(bp
, PXP2_REG_RQ_CDU_P_SIZE
, 2);
3969 REG_WR(bp
, PXP2_REG_RQ_TM_P_SIZE
, 5);
3970 REG_WR(bp
, PXP2_REG_RQ_QM_P_SIZE
, 5);
3971 REG_WR(bp
, PXP2_REG_RQ_SRC_P_SIZE
, 5);
3974 if (CHIP_REV_IS_FPGA(bp
) && CHIP_IS_E1H(bp
))
3975 REG_WR(bp
, PXP2_REG_PGL_TAGS_LIMIT
, 0x1);
3977 /* let the HW do it's magic ... */
3979 /* finish PXP init */
3980 val
= REG_RD(bp
, PXP2_REG_RQ_CFG_DONE
);
3982 BNX2X_ERR("PXP2 CFG failed\n");
3985 val
= REG_RD(bp
, PXP2_REG_RD_INIT_DONE
);
3987 BNX2X_ERR("PXP2 RD_INIT failed\n");
3991 REG_WR(bp
, PXP2_REG_RQ_DISABLE_INPUTS
, 0);
3992 REG_WR(bp
, PXP2_REG_RD_DISABLE_INPUTS
, 0);
3994 bnx2x_init_block(bp
, DMAE_BLOCK
, COMMON_STAGE
);
3996 /* clean the DMAE memory */
3998 bnx2x_init_fill(bp
, TSEM_REG_PRAM
, 0, 8);
4000 bnx2x_init_block(bp
, TCM_BLOCK
, COMMON_STAGE
);
4001 bnx2x_init_block(bp
, UCM_BLOCK
, COMMON_STAGE
);
4002 bnx2x_init_block(bp
, CCM_BLOCK
, COMMON_STAGE
);
4003 bnx2x_init_block(bp
, XCM_BLOCK
, COMMON_STAGE
);
4005 bnx2x_read_dmae(bp
, XSEM_REG_PASSIVE_BUFFER
, 3);
4006 bnx2x_read_dmae(bp
, CSEM_REG_PASSIVE_BUFFER
, 3);
4007 bnx2x_read_dmae(bp
, TSEM_REG_PASSIVE_BUFFER
, 3);
4008 bnx2x_read_dmae(bp
, USEM_REG_PASSIVE_BUFFER
, 3);
4010 bnx2x_init_block(bp
, QM_BLOCK
, COMMON_STAGE
);
4015 for (i
= 0; i
< 64; i
++) {
4016 REG_WR(bp
, QM_REG_BASEADDR
+ i
*4, 1024 * 4 * (i
%16));
4017 bnx2x_init_ind_wr(bp
, QM_REG_PTRTBL
+ i
*8, wb_write
, 2);
4019 if (CHIP_IS_E1H(bp
)) {
4020 REG_WR(bp
, QM_REG_BASEADDR_EXT_A
+ i
*4, 1024*4*(i
%16));
4021 bnx2x_init_ind_wr(bp
, QM_REG_PTRTBL_EXT_A
+ i
*8,
4026 /* soft reset pulse */
4027 REG_WR(bp
, QM_REG_SOFT_RESET
, 1);
4028 REG_WR(bp
, QM_REG_SOFT_RESET
, 0);
4031 bnx2x_init_block(bp
, TIMERS_BLOCK
, COMMON_STAGE
);
4034 bnx2x_init_block(bp
, DQ_BLOCK
, COMMON_STAGE
);
4035 REG_WR(bp
, DORQ_REG_DPM_CID_OFST
, BCM_PAGE_SHIFT
);
4036 if (!CHIP_REV_IS_SLOW(bp
)) {
4037 /* enable hw interrupt from doorbell Q */
4038 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
4041 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
4042 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
4043 REG_WR(bp
, PRS_REG_A_PRSU_20
, 0xf);
4046 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
4048 if (CHIP_IS_E1H(bp
))
4049 REG_WR(bp
, PRS_REG_E1HOV_MODE
, IS_E1HMF(bp
));
4051 bnx2x_init_block(bp
, TSDM_BLOCK
, COMMON_STAGE
);
4052 bnx2x_init_block(bp
, CSDM_BLOCK
, COMMON_STAGE
);
4053 bnx2x_init_block(bp
, USDM_BLOCK
, COMMON_STAGE
);
4054 bnx2x_init_block(bp
, XSDM_BLOCK
, COMMON_STAGE
);
4056 bnx2x_init_fill(bp
, TSEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
4057 bnx2x_init_fill(bp
, USEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
4058 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
4059 bnx2x_init_fill(bp
, XSEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
4061 bnx2x_init_block(bp
, TSEM_BLOCK
, COMMON_STAGE
);
4062 bnx2x_init_block(bp
, USEM_BLOCK
, COMMON_STAGE
);
4063 bnx2x_init_block(bp
, CSEM_BLOCK
, COMMON_STAGE
);
4064 bnx2x_init_block(bp
, XSEM_BLOCK
, COMMON_STAGE
);
4067 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
4069 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
4072 bnx2x_init_block(bp
, UPB_BLOCK
, COMMON_STAGE
);
4073 bnx2x_init_block(bp
, XPB_BLOCK
, COMMON_STAGE
);
4074 bnx2x_init_block(bp
, PBF_BLOCK
, COMMON_STAGE
);
4076 REG_WR(bp
, SRC_REG_SOFT_RST
, 1);
4077 for (i
= SRC_REG_KEYRSS0_0
; i
<= SRC_REG_KEYRSS1_9
; i
+= 4)
4078 REG_WR(bp
, i
, random32());
4079 bnx2x_init_block(bp
, SRCH_BLOCK
, COMMON_STAGE
);
4081 REG_WR(bp
, SRC_REG_KEYSEARCH_0
, 0x63285672);
4082 REG_WR(bp
, SRC_REG_KEYSEARCH_1
, 0x24b8f2cc);
4083 REG_WR(bp
, SRC_REG_KEYSEARCH_2
, 0x223aef9b);
4084 REG_WR(bp
, SRC_REG_KEYSEARCH_3
, 0x26001e3a);
4085 REG_WR(bp
, SRC_REG_KEYSEARCH_4
, 0x7ae91116);
4086 REG_WR(bp
, SRC_REG_KEYSEARCH_5
, 0x5ce5230b);
4087 REG_WR(bp
, SRC_REG_KEYSEARCH_6
, 0x298d8adf);
4088 REG_WR(bp
, SRC_REG_KEYSEARCH_7
, 0x6eb0ff09);
4089 REG_WR(bp
, SRC_REG_KEYSEARCH_8
, 0x1830f82f);
4090 REG_WR(bp
, SRC_REG_KEYSEARCH_9
, 0x01e46be7);
4092 REG_WR(bp
, SRC_REG_SOFT_RST
, 0);
4094 if (sizeof(union cdu_context
) != 1024)
4095 /* we currently assume that a context is 1024 bytes */
4096 dev_alert(&bp
->pdev
->dev
, "please adjust the size "
4097 "of cdu_context(%ld)\n",
4098 (long)sizeof(union cdu_context
));
4100 bnx2x_init_block(bp
, CDU_BLOCK
, COMMON_STAGE
);
4101 val
= (4 << 24) + (0 << 12) + 1024;
4102 REG_WR(bp
, CDU_REG_CDU_GLOBAL_PARAMS
, val
);
4104 bnx2x_init_block(bp
, CFC_BLOCK
, COMMON_STAGE
);
4105 REG_WR(bp
, CFC_REG_INIT_REG
, 0x7FF);
4106 /* enable context validation interrupt from CFC */
4107 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
4109 /* set the thresholds to prevent CFC/CDU race */
4110 REG_WR(bp
, CFC_REG_DEBUG0
, 0x20020000);
4112 bnx2x_init_block(bp
, HC_BLOCK
, COMMON_STAGE
);
4113 bnx2x_init_block(bp
, MISC_AEU_BLOCK
, COMMON_STAGE
);
4115 bnx2x_init_block(bp
, PXPCS_BLOCK
, COMMON_STAGE
);
4116 /* Reset PCIE errors for debug */
4117 REG_WR(bp
, 0x2814, 0xffffffff);
4118 REG_WR(bp
, 0x3820, 0xffffffff);
4120 bnx2x_init_block(bp
, EMAC0_BLOCK
, COMMON_STAGE
);
4121 bnx2x_init_block(bp
, EMAC1_BLOCK
, COMMON_STAGE
);
4122 bnx2x_init_block(bp
, DBU_BLOCK
, COMMON_STAGE
);
4123 bnx2x_init_block(bp
, DBG_BLOCK
, COMMON_STAGE
);
4125 bnx2x_init_block(bp
, NIG_BLOCK
, COMMON_STAGE
);
4126 if (CHIP_IS_E1H(bp
)) {
4127 REG_WR(bp
, NIG_REG_LLH_MF_MODE
, IS_E1HMF(bp
));
4128 REG_WR(bp
, NIG_REG_LLH_E1HOV_MODE
, IS_E1HMF(bp
));
4131 if (CHIP_REV_IS_SLOW(bp
))
4134 /* finish CFC init */
4135 val
= reg_poll(bp
, CFC_REG_LL_INIT_DONE
, 1, 100, 10);
4137 BNX2X_ERR("CFC LL_INIT failed\n");
4140 val
= reg_poll(bp
, CFC_REG_AC_INIT_DONE
, 1, 100, 10);
4142 BNX2X_ERR("CFC AC_INIT failed\n");
4145 val
= reg_poll(bp
, CFC_REG_CAM_INIT_DONE
, 1, 100, 10);
4147 BNX2X_ERR("CFC CAM_INIT failed\n");
4150 REG_WR(bp
, CFC_REG_DEBUG0
, 0);
4152 /* read NIG statistic
4153 to see if this is our first up since powerup */
4154 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
4155 val
= *bnx2x_sp(bp
, wb_data
[0]);
4157 /* do internal memory self test */
4158 if ((CHIP_IS_E1(bp
)) && (val
== 0) && bnx2x_int_mem_test(bp
)) {
4159 BNX2X_ERR("internal mem self test failed\n");
4163 bp
->port
.need_hw_lock
= bnx2x_hw_lock_required(bp
,
4164 bp
->common
.shmem_base
,
4165 bp
->common
.shmem2_base
);
4167 bnx2x_setup_fan_failure_detection(bp
);
4169 /* clear PXP2 attentions */
4170 REG_RD(bp
, PXP2_REG_PXP2_INT_STS_CLR_0
);
4172 enable_blocks_attention(bp
);
4173 if (CHIP_PARITY_SUPPORTED(bp
))
4174 enable_blocks_parity(bp
);
4176 if (!BP_NOMCP(bp
)) {
4177 bnx2x_acquire_phy_lock(bp
);
4178 bnx2x_common_init_phy(bp
, bp
->common
.shmem_base
,
4179 bp
->common
.shmem2_base
);
4180 bnx2x_release_phy_lock(bp
);
4182 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4187 static int bnx2x_init_port(struct bnx2x
*bp
)
4189 int port
= BP_PORT(bp
);
4190 int init_stage
= port
? PORT1_STAGE
: PORT0_STAGE
;
4194 DP(BNX2X_MSG_MCP
, "starting port init port %d\n", port
);
4196 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
4198 bnx2x_init_block(bp
, PXP_BLOCK
, init_stage
);
4199 bnx2x_init_block(bp
, PXP2_BLOCK
, init_stage
);
4201 bnx2x_init_block(bp
, TCM_BLOCK
, init_stage
);
4202 bnx2x_init_block(bp
, UCM_BLOCK
, init_stage
);
4203 bnx2x_init_block(bp
, CCM_BLOCK
, init_stage
);
4204 bnx2x_init_block(bp
, XCM_BLOCK
, init_stage
);
4207 REG_WR(bp
, QM_REG_CONNNUM_0
+ port
*4, 1024/16 - 1);
4209 bnx2x_init_block(bp
, TIMERS_BLOCK
, init_stage
);
4210 REG_WR(bp
, TM_REG_LIN0_SCAN_TIME
+ port
*4, 20);
4211 REG_WR(bp
, TM_REG_LIN0_MAX_ACTIVE_CID
+ port
*4, 31);
4214 bnx2x_init_block(bp
, DQ_BLOCK
, init_stage
);
4216 bnx2x_init_block(bp
, BRB1_BLOCK
, init_stage
);
4217 if (CHIP_REV_IS_SLOW(bp
) && !CHIP_IS_E1H(bp
)) {
4218 /* no pause for emulation and FPGA */
4223 low
= ((bp
->flags
& ONE_PORT_FLAG
) ? 160 : 246);
4224 else if (bp
->dev
->mtu
> 4096) {
4225 if (bp
->flags
& ONE_PORT_FLAG
)
4229 /* (24*1024 + val*4)/256 */
4230 low
= 96 + (val
/64) + ((val
% 64) ? 1 : 0);
4233 low
= ((bp
->flags
& ONE_PORT_FLAG
) ? 80 : 160);
4234 high
= low
+ 56; /* 14*1024/256 */
4236 REG_WR(bp
, BRB1_REG_PAUSE_LOW_THRESHOLD_0
+ port
*4, low
);
4237 REG_WR(bp
, BRB1_REG_PAUSE_HIGH_THRESHOLD_0
+ port
*4, high
);
4240 bnx2x_init_block(bp
, PRS_BLOCK
, init_stage
);
4242 bnx2x_init_block(bp
, TSDM_BLOCK
, init_stage
);
4243 bnx2x_init_block(bp
, CSDM_BLOCK
, init_stage
);
4244 bnx2x_init_block(bp
, USDM_BLOCK
, init_stage
);
4245 bnx2x_init_block(bp
, XSDM_BLOCK
, init_stage
);
4247 bnx2x_init_block(bp
, TSEM_BLOCK
, init_stage
);
4248 bnx2x_init_block(bp
, USEM_BLOCK
, init_stage
);
4249 bnx2x_init_block(bp
, CSEM_BLOCK
, init_stage
);
4250 bnx2x_init_block(bp
, XSEM_BLOCK
, init_stage
);
4252 bnx2x_init_block(bp
, UPB_BLOCK
, init_stage
);
4253 bnx2x_init_block(bp
, XPB_BLOCK
, init_stage
);
4255 bnx2x_init_block(bp
, PBF_BLOCK
, init_stage
);
4257 /* configure PBF to work without PAUSE mtu 9000 */
4258 REG_WR(bp
, PBF_REG_P0_PAUSE_ENABLE
+ port
*4, 0);
4260 /* update threshold */
4261 REG_WR(bp
, PBF_REG_P0_ARB_THRSH
+ port
*4, (9040/16));
4262 /* update init credit */
4263 REG_WR(bp
, PBF_REG_P0_INIT_CRD
+ port
*4, (9040/16) + 553 - 22);
4266 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 1);
4268 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 0);
4271 bnx2x_init_block(bp
, SRCH_BLOCK
, init_stage
);
4273 bnx2x_init_block(bp
, CDU_BLOCK
, init_stage
);
4274 bnx2x_init_block(bp
, CFC_BLOCK
, init_stage
);
4276 if (CHIP_IS_E1(bp
)) {
4277 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
4278 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
4280 bnx2x_init_block(bp
, HC_BLOCK
, init_stage
);
4282 bnx2x_init_block(bp
, MISC_AEU_BLOCK
, init_stage
);
4283 /* init aeu_mask_attn_func_0/1:
4284 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4285 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4286 * bits 4-7 are used for "per vn group attention" */
4287 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4,
4288 (IS_E1HMF(bp
) ? 0xF7 : 0x7));
4290 bnx2x_init_block(bp
, PXPCS_BLOCK
, init_stage
);
4291 bnx2x_init_block(bp
, EMAC0_BLOCK
, init_stage
);
4292 bnx2x_init_block(bp
, EMAC1_BLOCK
, init_stage
);
4293 bnx2x_init_block(bp
, DBU_BLOCK
, init_stage
);
4294 bnx2x_init_block(bp
, DBG_BLOCK
, init_stage
);
4296 bnx2x_init_block(bp
, NIG_BLOCK
, init_stage
);
4298 REG_WR(bp
, NIG_REG_XGXS_SERDES0_MODE_SEL
+ port
*4, 1);
4300 if (CHIP_IS_E1H(bp
)) {
4301 /* 0x2 disable e1hov, 0x1 enable */
4302 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK_MF
+ port
*4,
4303 (IS_E1HMF(bp
) ? 0x1 : 0x2));
4306 REG_WR(bp
, NIG_REG_LLFC_ENABLE_0
+ port
*4, 0);
4307 REG_WR(bp
, NIG_REG_LLFC_OUT_EN_0
+ port
*4, 0);
4308 REG_WR(bp
, NIG_REG_PAUSE_ENABLE_0
+ port
*4, 1);
4312 bnx2x_init_block(bp
, MCP_BLOCK
, init_stage
);
4313 bnx2x_init_block(bp
, DMAE_BLOCK
, init_stage
);
4314 bp
->port
.need_hw_lock
= bnx2x_hw_lock_required(bp
,
4315 bp
->common
.shmem_base
,
4316 bp
->common
.shmem2_base
);
4317 if (bnx2x_fan_failure_det_req(bp
, bp
->common
.shmem_base
,
4318 bp
->common
.shmem2_base
, port
)) {
4319 u32 reg_addr
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
4320 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
4321 val
= REG_RD(bp
, reg_addr
);
4322 val
|= AEU_INPUTS_ATTN_BITS_SPIO5
;
4323 REG_WR(bp
, reg_addr
, val
);
4325 bnx2x__link_reset(bp
);
4330 #define ILT_PER_FUNC (768/2)
4331 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
4332 /* the phys address is shifted right 12 bits and has an added
4333 1=valid bit added to the 53rd bit
4334 then since this is a wide register(TM)
4335 we split it into two 32 bit writes
4337 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4338 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
4339 #define PXP_ONE_ILT(x) (((x) << 10) | x)
4340 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
4343 #define CNIC_ILT_LINES 127
4344 #define CNIC_CTX_PER_ILT 16
4346 #define CNIC_ILT_LINES 0
4349 static void bnx2x_ilt_wr(struct bnx2x
*bp
, u32 index
, dma_addr_t addr
)
4353 if (CHIP_IS_E1H(bp
))
4354 reg
= PXP2_REG_RQ_ONCHIP_AT_B0
+ index
*8;
4356 reg
= PXP2_REG_RQ_ONCHIP_AT
+ index
*8;
4358 bnx2x_wb_wr(bp
, reg
, ONCHIP_ADDR1(addr
), ONCHIP_ADDR2(addr
));
4361 static int bnx2x_init_func(struct bnx2x
*bp
)
4363 int port
= BP_PORT(bp
);
4364 int func
= BP_FUNC(bp
);
4368 DP(BNX2X_MSG_MCP
, "starting func init func %d\n", func
);
4370 /* set MSI reconfigure capability */
4371 addr
= (port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
);
4372 val
= REG_RD(bp
, addr
);
4373 val
|= HC_CONFIG_0_REG_MSI_ATTN_EN_0
;
4374 REG_WR(bp
, addr
, val
);
4376 i
= FUNC_ILT_BASE(func
);
4378 bnx2x_ilt_wr(bp
, i
, bnx2x_sp_mapping(bp
, context
));
4379 if (CHIP_IS_E1H(bp
)) {
4380 REG_WR(bp
, PXP2_REG_RQ_CDU_FIRST_ILT
, i
);
4381 REG_WR(bp
, PXP2_REG_RQ_CDU_LAST_ILT
, i
+ CNIC_ILT_LINES
);
4383 REG_WR(bp
, PXP2_REG_PSWRQ_CDU0_L2P
+ func
*4,
4384 PXP_ILT_RANGE(i
, i
+ CNIC_ILT_LINES
));
4387 i
+= 1 + CNIC_ILT_LINES
;
4388 bnx2x_ilt_wr(bp
, i
, bp
->timers_mapping
);
4390 REG_WR(bp
, PXP2_REG_PSWRQ_TM0_L2P
+ func
*4, PXP_ONE_ILT(i
));
4392 REG_WR(bp
, PXP2_REG_RQ_TM_FIRST_ILT
, i
);
4393 REG_WR(bp
, PXP2_REG_RQ_TM_LAST_ILT
, i
);
4397 bnx2x_ilt_wr(bp
, i
, bp
->qm_mapping
);
4399 REG_WR(bp
, PXP2_REG_PSWRQ_QM0_L2P
+ func
*4, PXP_ONE_ILT(i
));
4401 REG_WR(bp
, PXP2_REG_RQ_QM_FIRST_ILT
, i
);
4402 REG_WR(bp
, PXP2_REG_RQ_QM_LAST_ILT
, i
);
4406 bnx2x_ilt_wr(bp
, i
, bp
->t1_mapping
);
4408 REG_WR(bp
, PXP2_REG_PSWRQ_SRC0_L2P
+ func
*4, PXP_ONE_ILT(i
));
4410 REG_WR(bp
, PXP2_REG_RQ_SRC_FIRST_ILT
, i
);
4411 REG_WR(bp
, PXP2_REG_RQ_SRC_LAST_ILT
, i
);
4414 /* tell the searcher where the T2 table is */
4415 REG_WR(bp
, SRC_REG_COUNTFREE0
+ port
*4, 16*1024/64);
4417 bnx2x_wb_wr(bp
, SRC_REG_FIRSTFREE0
+ port
*16,
4418 U64_LO(bp
->t2_mapping
), U64_HI(bp
->t2_mapping
));
4420 bnx2x_wb_wr(bp
, SRC_REG_LASTFREE0
+ port
*16,
4421 U64_LO((u64
)bp
->t2_mapping
+ 16*1024 - 64),
4422 U64_HI((u64
)bp
->t2_mapping
+ 16*1024 - 64));
4424 REG_WR(bp
, SRC_REG_NUMBER_HASH_BITS0
+ port
*4, 10);
4427 if (CHIP_IS_E1H(bp
)) {
4428 bnx2x_init_block(bp
, MISC_BLOCK
, FUNC0_STAGE
+ func
);
4429 bnx2x_init_block(bp
, TCM_BLOCK
, FUNC0_STAGE
+ func
);
4430 bnx2x_init_block(bp
, UCM_BLOCK
, FUNC0_STAGE
+ func
);
4431 bnx2x_init_block(bp
, CCM_BLOCK
, FUNC0_STAGE
+ func
);
4432 bnx2x_init_block(bp
, XCM_BLOCK
, FUNC0_STAGE
+ func
);
4433 bnx2x_init_block(bp
, TSEM_BLOCK
, FUNC0_STAGE
+ func
);
4434 bnx2x_init_block(bp
, USEM_BLOCK
, FUNC0_STAGE
+ func
);
4435 bnx2x_init_block(bp
, CSEM_BLOCK
, FUNC0_STAGE
+ func
);
4436 bnx2x_init_block(bp
, XSEM_BLOCK
, FUNC0_STAGE
+ func
);
4438 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 1);
4439 REG_WR(bp
, NIG_REG_LLH0_FUNC_VLAN_ID
+ port
*8, bp
->e1hov
);
4442 /* HC init per function */
4443 if (CHIP_IS_E1H(bp
)) {
4444 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
4446 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
4447 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
4449 bnx2x_init_block(bp
, HC_BLOCK
, FUNC0_STAGE
+ func
);
4451 /* Reset PCIE errors for debug */
4452 REG_WR(bp
, 0x2114, 0xffffffff);
4453 REG_WR(bp
, 0x2120, 0xffffffff);
4454 bnx2x_phy_probe(&bp
->link_params
);
4458 int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
)
4462 DP(BNX2X_MSG_MCP
, "function %d load_code %x\n",
4463 BP_FUNC(bp
), load_code
);
4466 mutex_init(&bp
->dmae_mutex
);
4467 rc
= bnx2x_gunzip_init(bp
);
4471 switch (load_code
) {
4472 case FW_MSG_CODE_DRV_LOAD_COMMON
:
4473 rc
= bnx2x_init_common(bp
);
4478 case FW_MSG_CODE_DRV_LOAD_PORT
:
4480 rc
= bnx2x_init_port(bp
);
4485 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
4487 rc
= bnx2x_init_func(bp
);
4493 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
4497 if (!BP_NOMCP(bp
)) {
4498 int func
= BP_FUNC(bp
);
4500 bp
->fw_drv_pulse_wr_seq
=
4501 (SHMEM_RD(bp
, func_mb
[func
].drv_pulse_mb
) &
4502 DRV_PULSE_SEQ_MASK
);
4503 DP(BNX2X_MSG_MCP
, "drv_pulse 0x%x\n", bp
->fw_drv_pulse_wr_seq
);
4506 /* this needs to be done before gunzip end */
4507 bnx2x_zero_def_sb(bp
);
4508 for_each_queue(bp
, i
)
4509 bnx2x_zero_sb(bp
, BP_L_ID(bp
) + i
);
4511 bnx2x_zero_sb(bp
, BP_L_ID(bp
) + i
);
4515 bnx2x_gunzip_end(bp
);
4520 void bnx2x_free_mem(struct bnx2x
*bp
)
4523 #define BNX2X_PCI_FREE(x, y, size) \
4526 dma_free_coherent(&bp->pdev->dev, size, x, y); \
4532 #define BNX2X_FREE(x) \
4544 for_each_queue(bp
, i
) {
4547 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, status_blk
),
4548 bnx2x_fp(bp
, i
, status_blk_mapping
),
4549 sizeof(struct host_status_block
));
4552 for_each_queue(bp
, i
) {
4554 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4555 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_buf_ring
));
4556 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_desc_ring
),
4557 bnx2x_fp(bp
, i
, rx_desc_mapping
),
4558 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
4560 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_comp_ring
),
4561 bnx2x_fp(bp
, i
, rx_comp_mapping
),
4562 sizeof(struct eth_fast_path_rx_cqe
) *
4566 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_page_ring
));
4567 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_sge_ring
),
4568 bnx2x_fp(bp
, i
, rx_sge_mapping
),
4569 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
4572 for_each_queue(bp
, i
) {
4574 /* fastpath tx rings: tx_buf tx_desc */
4575 BNX2X_FREE(bnx2x_fp(bp
, i
, tx_buf_ring
));
4576 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, tx_desc_ring
),
4577 bnx2x_fp(bp
, i
, tx_desc_mapping
),
4578 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
4580 /* end of fastpath */
4582 BNX2X_PCI_FREE(bp
->def_status_blk
, bp
->def_status_blk_mapping
,
4583 sizeof(struct host_def_status_block
));
4585 BNX2X_PCI_FREE(bp
->slowpath
, bp
->slowpath_mapping
,
4586 sizeof(struct bnx2x_slowpath
));
4589 BNX2X_PCI_FREE(bp
->t1
, bp
->t1_mapping
, 64*1024);
4590 BNX2X_PCI_FREE(bp
->t2
, bp
->t2_mapping
, 16*1024);
4591 BNX2X_PCI_FREE(bp
->timers
, bp
->timers_mapping
, 8*1024);
4592 BNX2X_PCI_FREE(bp
->qm
, bp
->qm_mapping
, 128*1024);
4593 BNX2X_PCI_FREE(bp
->cnic_sb
, bp
->cnic_sb_mapping
,
4594 sizeof(struct host_status_block
));
4596 BNX2X_PCI_FREE(bp
->spq
, bp
->spq_mapping
, BCM_PAGE_SIZE
);
4598 #undef BNX2X_PCI_FREE
4602 int bnx2x_alloc_mem(struct bnx2x
*bp
)
4605 #define BNX2X_PCI_ALLOC(x, y, size) \
4607 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
4609 goto alloc_mem_err; \
4610 memset(x, 0, size); \
4613 #define BNX2X_ALLOC(x, size) \
4615 x = vmalloc(size); \
4617 goto alloc_mem_err; \
4618 memset(x, 0, size); \
4625 for_each_queue(bp
, i
) {
4626 bnx2x_fp(bp
, i
, bp
) = bp
;
4629 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, status_blk
),
4630 &bnx2x_fp(bp
, i
, status_blk_mapping
),
4631 sizeof(struct host_status_block
));
4634 for_each_queue(bp
, i
) {
4636 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4637 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_buf_ring
),
4638 sizeof(struct sw_rx_bd
) * NUM_RX_BD
);
4639 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_desc_ring
),
4640 &bnx2x_fp(bp
, i
, rx_desc_mapping
),
4641 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
4643 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_comp_ring
),
4644 &bnx2x_fp(bp
, i
, rx_comp_mapping
),
4645 sizeof(struct eth_fast_path_rx_cqe
) *
4649 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_page_ring
),
4650 sizeof(struct sw_rx_page
) * NUM_RX_SGE
);
4651 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_sge_ring
),
4652 &bnx2x_fp(bp
, i
, rx_sge_mapping
),
4653 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
4656 for_each_queue(bp
, i
) {
4658 /* fastpath tx rings: tx_buf tx_desc */
4659 BNX2X_ALLOC(bnx2x_fp(bp
, i
, tx_buf_ring
),
4660 sizeof(struct sw_tx_bd
) * NUM_TX_BD
);
4661 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, tx_desc_ring
),
4662 &bnx2x_fp(bp
, i
, tx_desc_mapping
),
4663 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
4665 /* end of fastpath */
4667 BNX2X_PCI_ALLOC(bp
->def_status_blk
, &bp
->def_status_blk_mapping
,
4668 sizeof(struct host_def_status_block
));
4670 BNX2X_PCI_ALLOC(bp
->slowpath
, &bp
->slowpath_mapping
,
4671 sizeof(struct bnx2x_slowpath
));
4674 BNX2X_PCI_ALLOC(bp
->t1
, &bp
->t1_mapping
, 64*1024);
4676 /* allocate searcher T2 table
4677 we allocate 1/4 of alloc num for T2
4678 (which is not entered into the ILT) */
4679 BNX2X_PCI_ALLOC(bp
->t2
, &bp
->t2_mapping
, 16*1024);
4681 /* Initialize T2 (for 1024 connections) */
4682 for (i
= 0; i
< 16*1024; i
+= 64)
4683 *(u64
*)((char *)bp
->t2
+ i
+ 56) = bp
->t2_mapping
+ i
+ 64;
4685 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
4686 BNX2X_PCI_ALLOC(bp
->timers
, &bp
->timers_mapping
, 8*1024);
4688 /* QM queues (128*MAX_CONN) */
4689 BNX2X_PCI_ALLOC(bp
->qm
, &bp
->qm_mapping
, 128*1024);
4691 BNX2X_PCI_ALLOC(bp
->cnic_sb
, &bp
->cnic_sb_mapping
,
4692 sizeof(struct host_status_block
));
4695 /* Slow path ring */
4696 BNX2X_PCI_ALLOC(bp
->spq
, &bp
->spq_mapping
, BCM_PAGE_SIZE
);
4704 #undef BNX2X_PCI_ALLOC
4710 * Init service functions
4714 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
4716 * @param bp driver descriptor
4717 * @param set set or clear an entry (1 or 0)
4718 * @param mac pointer to a buffer containing a MAC
4719 * @param cl_bit_vec bit vector of clients to register a MAC for
4720 * @param cam_offset offset in a CAM to use
4721 * @param with_bcast set broadcast MAC as well
4723 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x
*bp
, int set
, u8
*mac
,
4724 u32 cl_bit_vec
, u8 cam_offset
,
4727 struct mac_configuration_cmd
*config
= bnx2x_sp(bp
, mac_config
);
4728 int port
= BP_PORT(bp
);
4731 * unicasts 0-31:port0 32-63:port1
4732 * multicast 64-127:port0 128-191:port1
4734 config
->hdr
.length
= 1 + (with_bcast
? 1 : 0);
4735 config
->hdr
.offset
= cam_offset
;
4736 config
->hdr
.client_id
= 0xff;
4737 config
->hdr
.reserved1
= 0;
4740 config
->config_table
[0].cam_entry
.msb_mac_addr
=
4741 swab16(*(u16
*)&mac
[0]);
4742 config
->config_table
[0].cam_entry
.middle_mac_addr
=
4743 swab16(*(u16
*)&mac
[2]);
4744 config
->config_table
[0].cam_entry
.lsb_mac_addr
=
4745 swab16(*(u16
*)&mac
[4]);
4746 config
->config_table
[0].cam_entry
.flags
= cpu_to_le16(port
);
4748 config
->config_table
[0].target_table_entry
.flags
= 0;
4750 CAM_INVALIDATE(config
->config_table
[0]);
4751 config
->config_table
[0].target_table_entry
.clients_bit_vector
=
4752 cpu_to_le32(cl_bit_vec
);
4753 config
->config_table
[0].target_table_entry
.vlan_id
= 0;
4755 DP(NETIF_MSG_IFUP
, "%s MAC (%04x:%04x:%04x)\n",
4756 (set
? "setting" : "clearing"),
4757 config
->config_table
[0].cam_entry
.msb_mac_addr
,
4758 config
->config_table
[0].cam_entry
.middle_mac_addr
,
4759 config
->config_table
[0].cam_entry
.lsb_mac_addr
);
4763 config
->config_table
[1].cam_entry
.msb_mac_addr
=
4764 cpu_to_le16(0xffff);
4765 config
->config_table
[1].cam_entry
.middle_mac_addr
=
4766 cpu_to_le16(0xffff);
4767 config
->config_table
[1].cam_entry
.lsb_mac_addr
=
4768 cpu_to_le16(0xffff);
4769 config
->config_table
[1].cam_entry
.flags
= cpu_to_le16(port
);
4771 config
->config_table
[1].target_table_entry
.flags
=
4772 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST
;
4774 CAM_INVALIDATE(config
->config_table
[1]);
4775 config
->config_table
[1].target_table_entry
.clients_bit_vector
=
4776 cpu_to_le32(cl_bit_vec
);
4777 config
->config_table
[1].target_table_entry
.vlan_id
= 0;
4780 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
4781 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
4782 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
4786 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
4788 * @param bp driver descriptor
4789 * @param set set or clear an entry (1 or 0)
4790 * @param mac pointer to a buffer containing a MAC
4791 * @param cl_bit_vec bit vector of clients to register a MAC for
4792 * @param cam_offset offset in a CAM to use
4794 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x
*bp
, int set
, u8
*mac
,
4795 u32 cl_bit_vec
, u8 cam_offset
)
4797 struct mac_configuration_cmd_e1h
*config
=
4798 (struct mac_configuration_cmd_e1h
*)bnx2x_sp(bp
, mac_config
);
4800 config
->hdr
.length
= 1;
4801 config
->hdr
.offset
= cam_offset
;
4802 config
->hdr
.client_id
= 0xff;
4803 config
->hdr
.reserved1
= 0;
4806 config
->config_table
[0].msb_mac_addr
=
4807 swab16(*(u16
*)&mac
[0]);
4808 config
->config_table
[0].middle_mac_addr
=
4809 swab16(*(u16
*)&mac
[2]);
4810 config
->config_table
[0].lsb_mac_addr
=
4811 swab16(*(u16
*)&mac
[4]);
4812 config
->config_table
[0].clients_bit_vector
=
4813 cpu_to_le32(cl_bit_vec
);
4814 config
->config_table
[0].vlan_id
= 0;
4815 config
->config_table
[0].e1hov_id
= cpu_to_le16(bp
->e1hov
);
4817 config
->config_table
[0].flags
= BP_PORT(bp
);
4819 config
->config_table
[0].flags
=
4820 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE
;
4822 DP(NETIF_MSG_IFUP
, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
4823 (set
? "setting" : "clearing"),
4824 config
->config_table
[0].msb_mac_addr
,
4825 config
->config_table
[0].middle_mac_addr
,
4826 config
->config_table
[0].lsb_mac_addr
, bp
->e1hov
, cl_bit_vec
);
4828 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
4829 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
4830 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
4833 static int bnx2x_wait_ramrod(struct bnx2x
*bp
, int state
, int idx
,
4834 int *state_p
, int poll
)
4836 /* can take a while if any port is running */
4839 DP(NETIF_MSG_IFUP
, "%s for state to become %x on IDX [%d]\n",
4840 poll
? "polling" : "waiting", state
, idx
);
4845 bnx2x_rx_int(bp
->fp
, 10);
4846 /* if index is different from 0
4847 * the reply for some commands will
4848 * be on the non default queue
4851 bnx2x_rx_int(&bp
->fp
[idx
], 10);
4854 mb(); /* state is changed by bnx2x_sp_event() */
4855 if (*state_p
== state
) {
4856 #ifdef BNX2X_STOP_ON_ERROR
4857 DP(NETIF_MSG_IFUP
, "exit (cnt %d)\n", 5000 - cnt
);
4869 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4870 poll
? "polling" : "waiting", state
, idx
);
4871 #ifdef BNX2X_STOP_ON_ERROR
4878 void bnx2x_set_eth_mac_addr_e1h(struct bnx2x
*bp
, int set
)
4880 bp
->set_mac_pending
++;
4883 bnx2x_set_mac_addr_e1h_gen(bp
, set
, bp
->dev
->dev_addr
,
4884 (1 << bp
->fp
->cl_id
), BP_FUNC(bp
));
4886 /* Wait for a completion */
4887 bnx2x_wait_ramrod(bp
, 0, 0, &bp
->set_mac_pending
, set
? 0 : 1);
4890 void bnx2x_set_eth_mac_addr_e1(struct bnx2x
*bp
, int set
)
4892 bp
->set_mac_pending
++;
4895 bnx2x_set_mac_addr_e1_gen(bp
, set
, bp
->dev
->dev_addr
,
4896 (1 << bp
->fp
->cl_id
), (BP_PORT(bp
) ? 32 : 0),
4899 /* Wait for a completion */
4900 bnx2x_wait_ramrod(bp
, 0, 0, &bp
->set_mac_pending
, set
? 0 : 1);
4905 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
4906 * MAC(s). This function will wait until the ramdord completion
4909 * @param bp driver handle
4910 * @param set set or clear the CAM entry
4912 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
4914 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x
*bp
, int set
)
4916 u32 cl_bit_vec
= (1 << BCM_ISCSI_ETH_CL_ID
);
4918 bp
->set_mac_pending
++;
4921 /* Send a SET_MAC ramrod */
4923 bnx2x_set_mac_addr_e1_gen(bp
, set
, bp
->iscsi_mac
,
4924 cl_bit_vec
, (BP_PORT(bp
) ? 32 : 0) + 2,
4927 /* CAM allocation for E1H
4928 * unicasts: by func number
4929 * multicast: 20+FUNC*20, 20 each
4931 bnx2x_set_mac_addr_e1h_gen(bp
, set
, bp
->iscsi_mac
,
4932 cl_bit_vec
, E1H_FUNC_MAX
+ BP_FUNC(bp
));
4934 /* Wait for a completion when setting */
4935 bnx2x_wait_ramrod(bp
, 0, 0, &bp
->set_mac_pending
, set
? 0 : 1);
4941 int bnx2x_setup_leading(struct bnx2x
*bp
)
4945 /* reset IGU state */
4946 bnx2x_ack_sb(bp
, bp
->fp
[0].sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4949 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_PORT_SETUP
, 0, 0, 0, 0);
4951 /* Wait for completion */
4952 rc
= bnx2x_wait_ramrod(bp
, BNX2X_STATE_OPEN
, 0, &(bp
->state
), 0);
4957 int bnx2x_setup_multi(struct bnx2x
*bp
, int index
)
4959 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
4961 /* reset IGU state */
4962 bnx2x_ack_sb(bp
, fp
->sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4965 fp
->state
= BNX2X_FP_STATE_OPENING
;
4966 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CLIENT_SETUP
, index
, 0,
4969 /* Wait for completion */
4970 return bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_OPEN
, index
,
4975 void bnx2x_set_num_queues_msix(struct bnx2x
*bp
)
4978 switch (bp
->multi_mode
) {
4979 case ETH_RSS_MODE_DISABLED
:
4983 case ETH_RSS_MODE_REGULAR
:
4985 bp
->num_queues
= min_t(u32
, num_queues
,
4986 BNX2X_MAX_QUEUES(bp
));
4988 bp
->num_queues
= min_t(u32
, num_online_cpus(),
4989 BNX2X_MAX_QUEUES(bp
));
5001 static int bnx2x_stop_multi(struct bnx2x
*bp
, int index
)
5003 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
5006 /* halt the connection */
5007 fp
->state
= BNX2X_FP_STATE_HALTING
;
5008 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, index
, 0, fp
->cl_id
, 0);
5010 /* Wait for completion */
5011 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, index
,
5013 if (rc
) /* timeout */
5016 /* delete cfc entry */
5017 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CFC_DEL
, index
, 0, 0, 1);
5019 /* Wait for completion */
5020 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_CLOSED
, index
,
5025 static int bnx2x_stop_leading(struct bnx2x
*bp
)
5027 __le16 dsb_sp_prod_idx
;
5028 /* if the other port is handling traffic,
5029 this can take a lot of time */
5035 /* Send HALT ramrod */
5036 bp
->fp
[0].state
= BNX2X_FP_STATE_HALTING
;
5037 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, 0, 0, bp
->fp
->cl_id
, 0);
5039 /* Wait for completion */
5040 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, 0,
5041 &(bp
->fp
[0].state
), 1);
5042 if (rc
) /* timeout */
5045 dsb_sp_prod_idx
= *bp
->dsb_sp_prod
;
5047 /* Send PORT_DELETE ramrod */
5048 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_PORT_DEL
, 0, 0, 0, 1);
5050 /* Wait for completion to arrive on default status block
5051 we are going to reset the chip anyway
5052 so there is not much to do if this times out
5054 while (dsb_sp_prod_idx
== *bp
->dsb_sp_prod
) {
5056 DP(NETIF_MSG_IFDOWN
, "timeout waiting for port del "
5057 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5058 *bp
->dsb_sp_prod
, dsb_sp_prod_idx
);
5059 #ifdef BNX2X_STOP_ON_ERROR
5067 rmb(); /* Refresh the dsb_sp_prod */
5069 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_UNLOAD
;
5070 bp
->fp
[0].state
= BNX2X_FP_STATE_CLOSED
;
5075 static void bnx2x_reset_func(struct bnx2x
*bp
)
5077 int port
= BP_PORT(bp
);
5078 int func
= BP_FUNC(bp
);
5082 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
5083 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
5086 /* Disable Timer scan */
5087 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 0);
5089 * Wait for at least 10ms and up to 2 second for the timers scan to
5092 for (i
= 0; i
< 200; i
++) {
5094 if (!REG_RD(bp
, TM_REG_LIN0_SCAN_ON
+ port
*4))
5099 base
= FUNC_ILT_BASE(func
);
5100 for (i
= base
; i
< base
+ ILT_PER_FUNC
; i
++)
5101 bnx2x_ilt_wr(bp
, i
, 0);
5104 static void bnx2x_reset_port(struct bnx2x
*bp
)
5106 int port
= BP_PORT(bp
);
5109 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
5111 /* Do not rcv packets to BRB */
5112 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK
+ port
*4, 0x0);
5113 /* Do not direct rcv packets that are not for MCP to the BRB */
5114 REG_WR(bp
, (port
? NIG_REG_LLH1_BRB1_NOT_MCP
:
5115 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
5118 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4, 0);
5121 /* Check for BRB port occupancy */
5122 val
= REG_RD(bp
, BRB1_REG_PORT_NUM_OCC_BLOCKS_0
+ port
*4);
5124 DP(NETIF_MSG_IFDOWN
,
5125 "BRB1 is not empty %d blocks are occupied\n", val
);
5127 /* TODO: Close Doorbell port? */
5130 static void bnx2x_reset_chip(struct bnx2x
*bp
, u32 reset_code
)
5132 DP(BNX2X_MSG_MCP
, "function %d reset_code %x\n",
5133 BP_FUNC(bp
), reset_code
);
5135 switch (reset_code
) {
5136 case FW_MSG_CODE_DRV_UNLOAD_COMMON
:
5137 bnx2x_reset_port(bp
);
5138 bnx2x_reset_func(bp
);
5139 bnx2x_reset_common(bp
);
5142 case FW_MSG_CODE_DRV_UNLOAD_PORT
:
5143 bnx2x_reset_port(bp
);
5144 bnx2x_reset_func(bp
);
5147 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION
:
5148 bnx2x_reset_func(bp
);
5152 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code
);
5157 void bnx2x_chip_cleanup(struct bnx2x
*bp
, int unload_mode
)
5159 int port
= BP_PORT(bp
);
5163 /* Wait until tx fastpath tasks complete */
5164 for_each_queue(bp
, i
) {
5165 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5168 while (bnx2x_has_tx_work_unload(fp
)) {
5172 BNX2X_ERR("timeout waiting for queue[%d]\n",
5174 #ifdef BNX2X_STOP_ON_ERROR
5185 /* Give HW time to discard old tx messages */
5188 if (CHIP_IS_E1(bp
)) {
5189 struct mac_configuration_cmd
*config
=
5190 bnx2x_sp(bp
, mcast_config
);
5192 bnx2x_set_eth_mac_addr_e1(bp
, 0);
5194 for (i
= 0; i
< config
->hdr
.length
; i
++)
5195 CAM_INVALIDATE(config
->config_table
[i
]);
5197 config
->hdr
.length
= i
;
5198 if (CHIP_REV_IS_SLOW(bp
))
5199 config
->hdr
.offset
= BNX2X_MAX_EMUL_MULTI
*(1 + port
);
5201 config
->hdr
.offset
= BNX2X_MAX_MULTICAST
*(1 + port
);
5202 config
->hdr
.client_id
= bp
->fp
->cl_id
;
5203 config
->hdr
.reserved1
= 0;
5205 bp
->set_mac_pending
++;
5208 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
5209 U64_HI(bnx2x_sp_mapping(bp
, mcast_config
)),
5210 U64_LO(bnx2x_sp_mapping(bp
, mcast_config
)), 0);
5213 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 0);
5215 bnx2x_set_eth_mac_addr_e1h(bp
, 0);
5217 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
5218 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
), 0);
5220 REG_WR(bp
, MISC_REG_E1HMF_MODE
, 0);
5223 /* Clear iSCSI L2 MAC */
5224 mutex_lock(&bp
->cnic_mutex
);
5225 if (bp
->cnic_flags
& BNX2X_CNIC_FLAG_MAC_SET
) {
5226 bnx2x_set_iscsi_eth_mac_addr(bp
, 0);
5227 bp
->cnic_flags
&= ~BNX2X_CNIC_FLAG_MAC_SET
;
5229 mutex_unlock(&bp
->cnic_mutex
);
5232 if (unload_mode
== UNLOAD_NORMAL
)
5233 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
5235 else if (bp
->flags
& NO_WOL_FLAG
)
5236 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
;
5239 u32 emac_base
= port
? GRCBASE_EMAC1
: GRCBASE_EMAC0
;
5240 u8
*mac_addr
= bp
->dev
->dev_addr
;
5242 /* The mac address is written to entries 1-4 to
5243 preserve entry 0 which is used by the PMF */
5244 u8 entry
= (BP_E1HVN(bp
) + 1)*8;
5246 val
= (mac_addr
[0] << 8) | mac_addr
[1];
5247 EMAC_WR(bp
, EMAC_REG_EMAC_MAC_MATCH
+ entry
, val
);
5249 val
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
5250 (mac_addr
[4] << 8) | mac_addr
[5];
5251 EMAC_WR(bp
, EMAC_REG_EMAC_MAC_MATCH
+ entry
+ 4, val
);
5253 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_EN
;
5256 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
5258 /* Close multi and leading connections
5259 Completions for ramrods are collected in a synchronous way */
5260 for_each_nondefault_queue(bp
, i
)
5261 if (bnx2x_stop_multi(bp
, i
))
5264 rc
= bnx2x_stop_leading(bp
);
5266 BNX2X_ERR("Stop leading failed!\n");
5267 #ifdef BNX2X_STOP_ON_ERROR
5276 reset_code
= bnx2x_fw_command(bp
, reset_code
, 0);
5278 DP(NETIF_MSG_IFDOWN
, "NO MCP - load counts %d, %d, %d\n",
5279 load_count
[0], load_count
[1], load_count
[2]);
5281 load_count
[1 + port
]--;
5282 DP(NETIF_MSG_IFDOWN
, "NO MCP - new load counts %d, %d, %d\n",
5283 load_count
[0], load_count
[1], load_count
[2]);
5284 if (load_count
[0] == 0)
5285 reset_code
= FW_MSG_CODE_DRV_UNLOAD_COMMON
;
5286 else if (load_count
[1 + port
] == 0)
5287 reset_code
= FW_MSG_CODE_DRV_UNLOAD_PORT
;
5289 reset_code
= FW_MSG_CODE_DRV_UNLOAD_FUNCTION
;
5292 if ((reset_code
== FW_MSG_CODE_DRV_UNLOAD_COMMON
) ||
5293 (reset_code
== FW_MSG_CODE_DRV_UNLOAD_PORT
))
5294 bnx2x__link_reset(bp
);
5296 /* Reset the chip */
5297 bnx2x_reset_chip(bp
, reset_code
);
5299 /* Report UNLOAD_DONE to MCP */
5301 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
, 0);
5305 void bnx2x_disable_close_the_gate(struct bnx2x
*bp
)
5309 DP(NETIF_MSG_HW
, "Disabling \"close the gates\"\n");
5311 if (CHIP_IS_E1(bp
)) {
5312 int port
= BP_PORT(bp
);
5313 u32 addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
5314 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
5316 val
= REG_RD(bp
, addr
);
5318 REG_WR(bp
, addr
, val
);
5319 } else if (CHIP_IS_E1H(bp
)) {
5320 val
= REG_RD(bp
, MISC_REG_AEU_GENERAL_MASK
);
5321 val
&= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK
|
5322 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK
);
5323 REG_WR(bp
, MISC_REG_AEU_GENERAL_MASK
, val
);
5328 /* Close gates #2, #3 and #4: */
5329 static void bnx2x_set_234_gates(struct bnx2x
*bp
, bool close
)
5333 /* Gates #2 and #4a are closed/opened for "not E1" only */
5334 if (!CHIP_IS_E1(bp
)) {
5336 val
= REG_RD(bp
, PXP_REG_HST_DISCARD_DOORBELLS
);
5337 REG_WR(bp
, PXP_REG_HST_DISCARD_DOORBELLS
,
5338 close
? (val
| 0x1) : (val
& (~(u32
)1)));
5340 val
= REG_RD(bp
, PXP_REG_HST_DISCARD_INTERNAL_WRITES
);
5341 REG_WR(bp
, PXP_REG_HST_DISCARD_INTERNAL_WRITES
,
5342 close
? (val
| 0x1) : (val
& (~(u32
)1)));
5346 addr
= BP_PORT(bp
) ? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
5347 val
= REG_RD(bp
, addr
);
5348 REG_WR(bp
, addr
, (!close
) ? (val
| 0x1) : (val
& (~(u32
)1)));
5350 DP(NETIF_MSG_HW
, "%s gates #2, #3 and #4\n",
5351 close
? "closing" : "opening");
5355 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
5357 static void bnx2x_clp_reset_prep(struct bnx2x
*bp
, u32
*magic_val
)
5359 /* Do some magic... */
5360 u32 val
= MF_CFG_RD(bp
, shared_mf_config
.clp_mb
);
5361 *magic_val
= val
& SHARED_MF_CLP_MAGIC
;
5362 MF_CFG_WR(bp
, shared_mf_config
.clp_mb
, val
| SHARED_MF_CLP_MAGIC
);
5365 /* Restore the value of the `magic' bit.
5367 * @param pdev Device handle.
5368 * @param magic_val Old value of the `magic' bit.
5370 static void bnx2x_clp_reset_done(struct bnx2x
*bp
, u32 magic_val
)
5372 /* Restore the `magic' bit value... */
5373 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
5374 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
5375 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
5376 u32 val
= MF_CFG_RD(bp
, shared_mf_config
.clp_mb
);
5377 MF_CFG_WR(bp
, shared_mf_config
.clp_mb
,
5378 (val
& (~SHARED_MF_CLP_MAGIC
)) | magic_val
);
5381 /* Prepares for MCP reset: takes care of CLP configurations.
5384 * @param magic_val Old value of 'magic' bit.
5386 static void bnx2x_reset_mcp_prep(struct bnx2x
*bp
, u32
*magic_val
)
5389 u32 validity_offset
;
5391 DP(NETIF_MSG_HW
, "Starting\n");
5393 /* Set `magic' bit in order to save MF config */
5394 if (!CHIP_IS_E1(bp
))
5395 bnx2x_clp_reset_prep(bp
, magic_val
);
5397 /* Get shmem offset */
5398 shmem
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
5399 validity_offset
= offsetof(struct shmem_region
, validity_map
[0]);
5401 /* Clear validity map flags */
5403 REG_WR(bp
, shmem
+ validity_offset
, 0);
5406 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
5407 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
5409 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
5410 * depending on the HW type.
5414 static inline void bnx2x_mcp_wait_one(struct bnx2x
*bp
)
5416 /* special handling for emulation and FPGA,
5417 wait 10 times longer */
5418 if (CHIP_REV_IS_SLOW(bp
))
5419 msleep(MCP_ONE_TIMEOUT
*10);
5421 msleep(MCP_ONE_TIMEOUT
);
5424 static int bnx2x_reset_mcp_comp(struct bnx2x
*bp
, u32 magic_val
)
5426 u32 shmem
, cnt
, validity_offset
, val
;
5431 /* Get shmem offset */
5432 shmem
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
5434 BNX2X_ERR("Shmem 0 return failure\n");
5439 validity_offset
= offsetof(struct shmem_region
, validity_map
[0]);
5441 /* Wait for MCP to come up */
5442 for (cnt
= 0; cnt
< (MCP_TIMEOUT
/ MCP_ONE_TIMEOUT
); cnt
++) {
5443 /* TBD: its best to check validity map of last port.
5444 * currently checks on port 0.
5446 val
= REG_RD(bp
, shmem
+ validity_offset
);
5447 DP(NETIF_MSG_HW
, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem
,
5448 shmem
+ validity_offset
, val
);
5450 /* check that shared memory is valid. */
5451 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
5452 == (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
5455 bnx2x_mcp_wait_one(bp
);
5458 DP(NETIF_MSG_HW
, "Cnt=%d Shmem validity map 0x%x\n", cnt
, val
);
5460 /* Check that shared memory is valid. This indicates that MCP is up. */
5461 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
)) !=
5462 (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
)) {
5463 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
5469 /* Restore the `magic' bit value */
5470 if (!CHIP_IS_E1(bp
))
5471 bnx2x_clp_reset_done(bp
, magic_val
);
5476 static void bnx2x_pxp_prep(struct bnx2x
*bp
)
5478 if (!CHIP_IS_E1(bp
)) {
5479 REG_WR(bp
, PXP2_REG_RD_START_INIT
, 0);
5480 REG_WR(bp
, PXP2_REG_RQ_RBC_DONE
, 0);
5481 REG_WR(bp
, PXP2_REG_RQ_CFG_DONE
, 0);
5487 * Reset the whole chip except for:
5489 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
5492 * - MISC (including AEU)
5496 static void bnx2x_process_kill_chip_reset(struct bnx2x
*bp
)
5498 u32 not_reset_mask1
, reset_mask1
, not_reset_mask2
, reset_mask2
;
5501 MISC_REGISTERS_RESET_REG_1_RST_HC
|
5502 MISC_REGISTERS_RESET_REG_1_RST_PXPV
|
5503 MISC_REGISTERS_RESET_REG_1_RST_PXP
;
5506 MISC_REGISTERS_RESET_REG_2_RST_MDIO
|
5507 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE
|
5508 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE
|
5509 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE
|
5510 MISC_REGISTERS_RESET_REG_2_RST_RBCN
|
5511 MISC_REGISTERS_RESET_REG_2_RST_GRC
|
5512 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE
|
5513 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B
;
5515 reset_mask1
= 0xffffffff;
5518 reset_mask2
= 0xffff;
5520 reset_mask2
= 0x1ffff;
5522 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
5523 reset_mask1
& (~not_reset_mask1
));
5524 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
,
5525 reset_mask2
& (~not_reset_mask2
));
5530 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, reset_mask1
);
5531 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_SET
, reset_mask2
);
5535 static int bnx2x_process_kill(struct bnx2x
*bp
)
5539 u32 sr_cnt
, blk_cnt
, port_is_idle_0
, port_is_idle_1
, pgl_exp_rom2
;
5542 /* Empty the Tetris buffer, wait for 1s */
5544 sr_cnt
= REG_RD(bp
, PXP2_REG_RD_SR_CNT
);
5545 blk_cnt
= REG_RD(bp
, PXP2_REG_RD_BLK_CNT
);
5546 port_is_idle_0
= REG_RD(bp
, PXP2_REG_RD_PORT_IS_IDLE_0
);
5547 port_is_idle_1
= REG_RD(bp
, PXP2_REG_RD_PORT_IS_IDLE_1
);
5548 pgl_exp_rom2
= REG_RD(bp
, PXP2_REG_PGL_EXP_ROM2
);
5549 if ((sr_cnt
== 0x7e) && (blk_cnt
== 0xa0) &&
5550 ((port_is_idle_0
& 0x1) == 0x1) &&
5551 ((port_is_idle_1
& 0x1) == 0x1) &&
5552 (pgl_exp_rom2
== 0xffffffff))
5555 } while (cnt
-- > 0);
5558 DP(NETIF_MSG_HW
, "Tetris buffer didn't get empty or there"
5560 " outstanding read requests after 1s!\n");
5561 DP(NETIF_MSG_HW
, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
5562 " port_is_idle_0=0x%08x,"
5563 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
5564 sr_cnt
, blk_cnt
, port_is_idle_0
, port_is_idle_1
,
5571 /* Close gates #2, #3 and #4 */
5572 bnx2x_set_234_gates(bp
, true);
5574 /* TBD: Indicate that "process kill" is in progress to MCP */
5576 /* Clear "unprepared" bit */
5577 REG_WR(bp
, MISC_REG_UNPREPARED
, 0);
5580 /* Make sure all is written to the chip before the reset */
5583 /* Wait for 1ms to empty GLUE and PCI-E core queues,
5584 * PSWHST, GRC and PSWRD Tetris buffer.
5588 /* Prepare to chip reset: */
5590 bnx2x_reset_mcp_prep(bp
, &val
);
5596 /* reset the chip */
5597 bnx2x_process_kill_chip_reset(bp
);
5600 /* Recover after reset: */
5602 if (bnx2x_reset_mcp_comp(bp
, val
))
5608 /* Open the gates #2, #3 and #4 */
5609 bnx2x_set_234_gates(bp
, false);
5611 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
5612 * reset state, re-enable attentions. */
5617 static int bnx2x_leader_reset(struct bnx2x
*bp
)
5620 /* Try to recover after the failure */
5621 if (bnx2x_process_kill(bp
)) {
5622 printk(KERN_ERR
"%s: Something bad had happen! Aii!\n",
5625 goto exit_leader_reset
;
5628 /* Clear "reset is in progress" bit and update the driver state */
5629 bnx2x_set_reset_done(bp
);
5630 bp
->recovery_state
= BNX2X_RECOVERY_DONE
;
5634 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_RESERVED_08
);
5639 /* Assumption: runs under rtnl lock. This together with the fact
5640 * that it's called only from bnx2x_reset_task() ensure that it
5641 * will never be called when netif_running(bp->dev) is false.
5643 static void bnx2x_parity_recover(struct bnx2x
*bp
)
5645 DP(NETIF_MSG_HW
, "Handling parity\n");
5647 switch (bp
->recovery_state
) {
5648 case BNX2X_RECOVERY_INIT
:
5649 DP(NETIF_MSG_HW
, "State is BNX2X_RECOVERY_INIT\n");
5650 /* Try to get a LEADER_LOCK HW lock */
5651 if (bnx2x_trylock_hw_lock(bp
,
5652 HW_LOCK_RESOURCE_RESERVED_08
))
5655 /* Stop the driver */
5656 /* If interface has been removed - break */
5657 if (bnx2x_nic_unload(bp
, UNLOAD_RECOVERY
))
5660 bp
->recovery_state
= BNX2X_RECOVERY_WAIT
;
5661 /* Ensure "is_leader" and "recovery_state"
5662 * update values are seen on other CPUs
5667 case BNX2X_RECOVERY_WAIT
:
5668 DP(NETIF_MSG_HW
, "State is BNX2X_RECOVERY_WAIT\n");
5669 if (bp
->is_leader
) {
5670 u32 load_counter
= bnx2x_get_load_cnt(bp
);
5672 /* Wait until all other functions get
5675 schedule_delayed_work(&bp
->reset_task
,
5679 /* If all other functions got down -
5680 * try to bring the chip back to
5681 * normal. In any case it's an exit
5682 * point for a leader.
5684 if (bnx2x_leader_reset(bp
) ||
5685 bnx2x_nic_load(bp
, LOAD_NORMAL
)) {
5686 printk(KERN_ERR
"%s: Recovery "
5687 "has failed. Power cycle is "
5688 "needed.\n", bp
->dev
->name
);
5689 /* Disconnect this device */
5690 netif_device_detach(bp
->dev
);
5691 /* Block ifup for all function
5692 * of this ASIC until
5693 * "process kill" or power
5696 bnx2x_set_reset_in_progress(bp
);
5697 /* Shut down the power */
5698 bnx2x_set_power_state(bp
,
5705 } else { /* non-leader */
5706 if (!bnx2x_reset_is_done(bp
)) {
5707 /* Try to get a LEADER_LOCK HW lock as
5708 * long as a former leader may have
5709 * been unloaded by the user or
5710 * released a leadership by another
5713 if (bnx2x_trylock_hw_lock(bp
,
5714 HW_LOCK_RESOURCE_RESERVED_08
)) {
5715 /* I'm a leader now! Restart a
5722 schedule_delayed_work(&bp
->reset_task
,
5726 } else { /* A leader has completed
5727 * the "process kill". It's an exit
5728 * point for a non-leader.
5730 bnx2x_nic_load(bp
, LOAD_NORMAL
);
5731 bp
->recovery_state
=
5732 BNX2X_RECOVERY_DONE
;
5743 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
5744 * scheduled on a general queue in order to prevent a dead lock.
5746 static void bnx2x_reset_task(struct work_struct
*work
)
5748 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, reset_task
.work
);
5750 #ifdef BNX2X_STOP_ON_ERROR
5751 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
5752 " so reset not done to allow debug dump,\n"
5753 KERN_ERR
" you will need to reboot when done\n");
5759 if (!netif_running(bp
->dev
))
5760 goto reset_task_exit
;
5762 if (unlikely(bp
->recovery_state
!= BNX2X_RECOVERY_DONE
))
5763 bnx2x_parity_recover(bp
);
5765 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
5766 bnx2x_nic_load(bp
, LOAD_NORMAL
);
5773 /* end of nic load/unload */
5776 * Init service functions
5779 static inline u32
bnx2x_get_pretend_reg(struct bnx2x
*bp
, int func
)
5782 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0
;
5783 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1
;
5784 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2
;
5785 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3
;
5786 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4
;
5787 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5
;
5788 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6
;
5789 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7
;
5791 BNX2X_ERR("Unsupported function index: %d\n", func
);
5796 static void bnx2x_undi_int_disable_e1h(struct bnx2x
*bp
, int orig_func
)
5798 u32 reg
= bnx2x_get_pretend_reg(bp
, orig_func
), new_val
;
5800 /* Flush all outstanding writes */
5803 /* Pretend to be function 0 */
5805 /* Flush the GRC transaction (in the chip) */
5806 new_val
= REG_RD(bp
, reg
);
5808 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
5813 /* From now we are in the "like-E1" mode */
5814 bnx2x_int_disable(bp
);
5816 /* Flush all outstanding writes */
5819 /* Restore the original funtion settings */
5820 REG_WR(bp
, reg
, orig_func
);
5821 new_val
= REG_RD(bp
, reg
);
5822 if (new_val
!= orig_func
) {
5823 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
5824 orig_func
, new_val
);
5829 static inline void bnx2x_undi_int_disable(struct bnx2x
*bp
, int func
)
5831 if (CHIP_IS_E1H(bp
))
5832 bnx2x_undi_int_disable_e1h(bp
, func
);
5834 bnx2x_int_disable(bp
);
5837 static void __devinit
bnx2x_undi_unload(struct bnx2x
*bp
)
5841 /* Check if there is any driver already loaded */
5842 val
= REG_RD(bp
, MISC_REG_UNPREPARED
);
5844 /* Check if it is the UNDI driver
5845 * UNDI driver initializes CID offset for normal bell to 0x7
5847 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
5848 val
= REG_RD(bp
, DORQ_REG_NORM_CID_OFST
);
5850 u32 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
5852 int func
= BP_FUNC(bp
);
5856 /* clear the UNDI indication */
5857 REG_WR(bp
, DORQ_REG_NORM_CID_OFST
, 0);
5859 BNX2X_DEV_INFO("UNDI is active! reset device\n");
5861 /* try unload UNDI on port 0 */
5864 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
5865 DRV_MSG_SEQ_NUMBER_MASK
);
5866 reset_code
= bnx2x_fw_command(bp
, reset_code
, 0);
5868 /* if UNDI is loaded on the other port */
5869 if (reset_code
!= FW_MSG_CODE_DRV_UNLOAD_COMMON
) {
5871 /* send "DONE" for previous unload */
5872 bnx2x_fw_command(bp
,
5873 DRV_MSG_CODE_UNLOAD_DONE
, 0);
5875 /* unload UNDI on port 1 */
5878 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
5879 DRV_MSG_SEQ_NUMBER_MASK
);
5880 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
5882 bnx2x_fw_command(bp
, reset_code
, 0);
5885 /* now it's safe to release the lock */
5886 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
5888 bnx2x_undi_int_disable(bp
, func
);
5890 /* close input traffic and wait for it */
5891 /* Do not rcv packets to BRB */
5893 (BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_DRV_MASK
:
5894 NIG_REG_LLH0_BRB1_DRV_MASK
), 0x0);
5895 /* Do not direct rcv packets that are not for MCP to
5898 (BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_NOT_MCP
:
5899 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
5902 (BP_PORT(bp
) ? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
5903 MISC_REG_AEU_MASK_ATTN_FUNC_0
), 0);
5906 /* save NIG port swap info */
5907 swap_val
= REG_RD(bp
, NIG_REG_PORT_SWAP
);
5908 swap_en
= REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
);
5911 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
5914 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
,
5916 /* take the NIG out of reset and restore swap values */
5918 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
5919 MISC_REGISTERS_RESET_REG_1_RST_NIG
);
5920 REG_WR(bp
, NIG_REG_PORT_SWAP
, swap_val
);
5921 REG_WR(bp
, NIG_REG_STRAP_OVERRIDE
, swap_en
);
5923 /* send unload done to the MCP */
5924 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
, 0);
5926 /* restore our func and fw_seq */
5929 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
5930 DRV_MSG_SEQ_NUMBER_MASK
);
5933 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
5937 static void __devinit
bnx2x_get_common_hwinfo(struct bnx2x
*bp
)
5939 u32 val
, val2
, val3
, val4
, id
;
5942 /* Get the chip revision id and number. */
5943 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5944 val
= REG_RD(bp
, MISC_REG_CHIP_NUM
);
5945 id
= ((val
& 0xffff) << 16);
5946 val
= REG_RD(bp
, MISC_REG_CHIP_REV
);
5947 id
|= ((val
& 0xf) << 12);
5948 val
= REG_RD(bp
, MISC_REG_CHIP_METAL
);
5949 id
|= ((val
& 0xff) << 4);
5950 val
= REG_RD(bp
, MISC_REG_BOND_ID
);
5952 bp
->common
.chip_id
= id
;
5953 bp
->link_params
.chip_id
= bp
->common
.chip_id
;
5954 BNX2X_DEV_INFO("chip ID is 0x%x\n", id
);
5956 val
= (REG_RD(bp
, 0x2874) & 0x55);
5957 if ((bp
->common
.chip_id
& 0x1) ||
5958 (CHIP_IS_E1(bp
) && val
) || (CHIP_IS_E1H(bp
) && (val
== 0x55))) {
5959 bp
->flags
|= ONE_PORT_FLAG
;
5960 BNX2X_DEV_INFO("single port device\n");
5963 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_CFG4
);
5964 bp
->common
.flash_size
= (NVRAM_1MB_SIZE
<<
5965 (val
& MCPR_NVM_CFG4_FLASH_SIZE
));
5966 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5967 bp
->common
.flash_size
, bp
->common
.flash_size
);
5969 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
5970 bp
->common
.shmem2_base
= REG_RD(bp
, MISC_REG_GENERIC_CR_0
);
5971 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
5972 bp
->link_params
.shmem2_base
= bp
->common
.shmem2_base
;
5973 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
5974 bp
->common
.shmem_base
, bp
->common
.shmem2_base
);
5976 if (!bp
->common
.shmem_base
||
5977 (bp
->common
.shmem_base
< 0xA0000) ||
5978 (bp
->common
.shmem_base
>= 0xC0000)) {
5979 BNX2X_DEV_INFO("MCP not active\n");
5980 bp
->flags
|= NO_MCP_FLAG
;
5984 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
5985 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
5986 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
5987 BNX2X_ERROR("BAD MCP validity signature\n");
5989 bp
->common
.hw_config
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.config
);
5990 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp
->common
.hw_config
);
5992 bp
->link_params
.hw_led_mode
= ((bp
->common
.hw_config
&
5993 SHARED_HW_CFG_LED_MODE_MASK
) >>
5994 SHARED_HW_CFG_LED_MODE_SHIFT
);
5996 bp
->link_params
.feature_config_flags
= 0;
5997 val
= SHMEM_RD(bp
, dev_info
.shared_feature_config
.config
);
5998 if (val
& SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED
)
5999 bp
->link_params
.feature_config_flags
|=
6000 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED
;
6002 bp
->link_params
.feature_config_flags
&=
6003 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED
;
6005 val
= SHMEM_RD(bp
, dev_info
.bc_rev
) >> 8;
6006 bp
->common
.bc_ver
= val
;
6007 BNX2X_DEV_INFO("bc_ver %X\n", val
);
6008 if (val
< BNX2X_BC_VER
) {
6009 /* for now only warn
6010 * later we might need to enforce this */
6011 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
6012 "please upgrade BC\n", BNX2X_BC_VER
, val
);
6014 bp
->link_params
.feature_config_flags
|=
6015 (val
>= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL
) ?
6016 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY
: 0;
6017 bp
->link_params
.feature_config_flags
|=
6018 (val
>= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL
) ?
6019 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY
: 0;
6021 if (BP_E1HVN(bp
) == 0) {
6022 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_PMC
, &pmc
);
6023 bp
->flags
|= (pmc
& PCI_PM_CAP_PME_D3cold
) ? 0 : NO_WOL_FLAG
;
6025 /* no WOL capability for E1HVN != 0 */
6026 bp
->flags
|= NO_WOL_FLAG
;
6028 BNX2X_DEV_INFO("%sWoL capable\n",
6029 (bp
->flags
& NO_WOL_FLAG
) ? "not " : "");
6031 val
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
);
6032 val2
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[4]);
6033 val3
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[8]);
6034 val4
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[12]);
6036 dev_info(&bp
->pdev
->dev
, "part number %X-%X-%X-%X\n",
6037 val
, val2
, val3
, val4
);
6040 static void __devinit
bnx2x_link_settings_supported(struct bnx2x
*bp
,
6043 int cfg_size
= 0, idx
, port
= BP_PORT(bp
);
6045 /* Aggregation of supported attributes of all external phys */
6046 bp
->port
.supported
[0] = 0;
6047 bp
->port
.supported
[1] = 0;
6048 switch (bp
->link_params
.num_phys
) {
6050 bp
->port
.supported
[0] = bp
->link_params
.phy
[INT_PHY
].supported
;
6054 bp
->port
.supported
[0] = bp
->link_params
.phy
[EXT_PHY1
].supported
;
6058 if (bp
->link_params
.multi_phy_config
&
6059 PORT_HW_CFG_PHY_SWAPPED_ENABLED
) {
6060 bp
->port
.supported
[1] =
6061 bp
->link_params
.phy
[EXT_PHY1
].supported
;
6062 bp
->port
.supported
[0] =
6063 bp
->link_params
.phy
[EXT_PHY2
].supported
;
6065 bp
->port
.supported
[0] =
6066 bp
->link_params
.phy
[EXT_PHY1
].supported
;
6067 bp
->port
.supported
[1] =
6068 bp
->link_params
.phy
[EXT_PHY2
].supported
;
6074 if (!(bp
->port
.supported
[0] || bp
->port
.supported
[1])) {
6075 BNX2X_ERR("NVRAM config error. BAD phy config."
6076 "PHY1 config 0x%x, PHY2 config 0x%x\n",
6078 dev_info
.port_hw_config
[port
].external_phy_config
),
6080 dev_info
.port_hw_config
[port
].external_phy_config2
));
6084 switch (switch_cfg
) {
6086 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_SERDES0_CTRL_PHY_ADDR
+
6088 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
6091 case SWITCH_CFG_10G
:
6092 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_XGXS0_CTRL_PHY_ADDR
+
6094 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
6099 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
6100 bp
->port
.link_config
[0]);
6103 /* mask what we support according to speed_cap_mask per configuration */
6104 for (idx
= 0; idx
< cfg_size
; idx
++) {
6105 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
6106 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF
))
6107 bp
->port
.supported
[idx
] &= ~SUPPORTED_10baseT_Half
;
6109 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
6110 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL
))
6111 bp
->port
.supported
[idx
] &= ~SUPPORTED_10baseT_Full
;
6113 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
6114 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF
))
6115 bp
->port
.supported
[idx
] &= ~SUPPORTED_100baseT_Half
;
6117 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
6118 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL
))
6119 bp
->port
.supported
[idx
] &= ~SUPPORTED_100baseT_Full
;
6121 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
6122 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G
))
6123 bp
->port
.supported
[idx
] &= ~(SUPPORTED_1000baseT_Half
|
6124 SUPPORTED_1000baseT_Full
);
6126 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
6127 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G
))
6128 bp
->port
.supported
[idx
] &= ~SUPPORTED_2500baseX_Full
;
6130 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
6131 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G
))
6132 bp
->port
.supported
[idx
] &= ~SUPPORTED_10000baseT_Full
;
6136 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp
->port
.supported
[0],
6137 bp
->port
.supported
[1]);
6140 static void __devinit
bnx2x_link_settings_requested(struct bnx2x
*bp
)
6142 u32 link_config
, idx
, cfg_size
= 0;
6143 bp
->port
.advertising
[0] = 0;
6144 bp
->port
.advertising
[1] = 0;
6145 switch (bp
->link_params
.num_phys
) {
6154 for (idx
= 0; idx
< cfg_size
; idx
++) {
6155 bp
->link_params
.req_duplex
[idx
] = DUPLEX_FULL
;
6156 link_config
= bp
->port
.link_config
[idx
];
6157 switch (link_config
& PORT_FEATURE_LINK_SPEED_MASK
) {
6158 case PORT_FEATURE_LINK_SPEED_AUTO
:
6159 if (bp
->port
.supported
[idx
] & SUPPORTED_Autoneg
) {
6160 bp
->link_params
.req_line_speed
[idx
] =
6162 bp
->port
.advertising
[idx
] |=
6163 bp
->port
.supported
[idx
];
6165 /* force 10G, no AN */
6166 bp
->link_params
.req_line_speed
[idx
] =
6168 bp
->port
.advertising
[idx
] |=
6169 (ADVERTISED_10000baseT_Full
|
6175 case PORT_FEATURE_LINK_SPEED_10M_FULL
:
6176 if (bp
->port
.supported
[idx
] & SUPPORTED_10baseT_Full
) {
6177 bp
->link_params
.req_line_speed
[idx
] =
6179 bp
->port
.advertising
[idx
] |=
6180 (ADVERTISED_10baseT_Full
|
6183 BNX2X_ERROR("NVRAM config error. "
6184 "Invalid link_config 0x%x"
6185 " speed_cap_mask 0x%x\n",
6187 bp
->link_params
.speed_cap_mask
[idx
]);
6192 case PORT_FEATURE_LINK_SPEED_10M_HALF
:
6193 if (bp
->port
.supported
[idx
] & SUPPORTED_10baseT_Half
) {
6194 bp
->link_params
.req_line_speed
[idx
] =
6196 bp
->link_params
.req_duplex
[idx
] =
6198 bp
->port
.advertising
[idx
] |=
6199 (ADVERTISED_10baseT_Half
|
6202 BNX2X_ERROR("NVRAM config error. "
6203 "Invalid link_config 0x%x"
6204 " speed_cap_mask 0x%x\n",
6206 bp
->link_params
.speed_cap_mask
[idx
]);
6211 case PORT_FEATURE_LINK_SPEED_100M_FULL
:
6212 if (bp
->port
.supported
[idx
] & SUPPORTED_100baseT_Full
) {
6213 bp
->link_params
.req_line_speed
[idx
] =
6215 bp
->port
.advertising
[idx
] |=
6216 (ADVERTISED_100baseT_Full
|
6219 BNX2X_ERROR("NVRAM config error. "
6220 "Invalid link_config 0x%x"
6221 " speed_cap_mask 0x%x\n",
6223 bp
->link_params
.speed_cap_mask
[idx
]);
6228 case PORT_FEATURE_LINK_SPEED_100M_HALF
:
6229 if (bp
->port
.supported
[idx
] & SUPPORTED_100baseT_Half
) {
6230 bp
->link_params
.req_line_speed
[idx
] = SPEED_100
;
6231 bp
->link_params
.req_duplex
[idx
] = DUPLEX_HALF
;
6232 bp
->port
.advertising
[idx
] |=
6233 (ADVERTISED_100baseT_Half
|
6236 BNX2X_ERROR("NVRAM config error. "
6237 "Invalid link_config 0x%x"
6238 " speed_cap_mask 0x%x\n",
6240 bp
->link_params
.speed_cap_mask
[idx
]);
6245 case PORT_FEATURE_LINK_SPEED_1G
:
6246 if (bp
->port
.supported
[idx
] &
6247 SUPPORTED_1000baseT_Full
) {
6248 bp
->link_params
.req_line_speed
[idx
] =
6250 bp
->port
.advertising
[idx
] |=
6251 (ADVERTISED_1000baseT_Full
|
6254 BNX2X_ERROR("NVRAM config error. "
6255 "Invalid link_config 0x%x"
6256 " speed_cap_mask 0x%x\n",
6258 bp
->link_params
.speed_cap_mask
[idx
]);
6263 case PORT_FEATURE_LINK_SPEED_2_5G
:
6264 if (bp
->port
.supported
[idx
] &
6265 SUPPORTED_2500baseX_Full
) {
6266 bp
->link_params
.req_line_speed
[idx
] =
6268 bp
->port
.advertising
[idx
] |=
6269 (ADVERTISED_2500baseX_Full
|
6272 BNX2X_ERROR("NVRAM config error. "
6273 "Invalid link_config 0x%x"
6274 " speed_cap_mask 0x%x\n",
6276 bp
->link_params
.speed_cap_mask
[idx
]);
6281 case PORT_FEATURE_LINK_SPEED_10G_CX4
:
6282 case PORT_FEATURE_LINK_SPEED_10G_KX4
:
6283 case PORT_FEATURE_LINK_SPEED_10G_KR
:
6284 if (bp
->port
.supported
[idx
] &
6285 SUPPORTED_10000baseT_Full
) {
6286 bp
->link_params
.req_line_speed
[idx
] =
6288 bp
->port
.advertising
[idx
] |=
6289 (ADVERTISED_10000baseT_Full
|
6292 BNX2X_ERROR("NVRAM config error. "
6293 "Invalid link_config 0x%x"
6294 " speed_cap_mask 0x%x\n",
6296 bp
->link_params
.speed_cap_mask
[idx
]);
6302 BNX2X_ERROR("NVRAM config error. "
6303 "BAD link speed link_config 0x%x\n",
6305 bp
->link_params
.req_line_speed
[idx
] = SPEED_AUTO_NEG
;
6306 bp
->port
.advertising
[idx
] = bp
->port
.supported
[idx
];
6310 bp
->link_params
.req_flow_ctrl
[idx
] = (link_config
&
6311 PORT_FEATURE_FLOW_CONTROL_MASK
);
6312 if ((bp
->link_params
.req_flow_ctrl
[idx
] ==
6313 BNX2X_FLOW_CTRL_AUTO
) &&
6314 !(bp
->port
.supported
[idx
] & SUPPORTED_Autoneg
)) {
6315 bp
->link_params
.req_flow_ctrl
[idx
] =
6316 BNX2X_FLOW_CTRL_NONE
;
6319 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
6320 " 0x%x advertising 0x%x\n",
6321 bp
->link_params
.req_line_speed
[idx
],
6322 bp
->link_params
.req_duplex
[idx
],
6323 bp
->link_params
.req_flow_ctrl
[idx
],
6324 bp
->port
.advertising
[idx
]);
6328 static void __devinit
bnx2x_set_mac_buf(u8
*mac_buf
, u32 mac_lo
, u16 mac_hi
)
6330 mac_hi
= cpu_to_be16(mac_hi
);
6331 mac_lo
= cpu_to_be32(mac_lo
);
6332 memcpy(mac_buf
, &mac_hi
, sizeof(mac_hi
));
6333 memcpy(mac_buf
+ sizeof(mac_hi
), &mac_lo
, sizeof(mac_lo
));
6336 static void __devinit
bnx2x_get_port_hwinfo(struct bnx2x
*bp
)
6338 int port
= BP_PORT(bp
);
6341 u32 ext_phy_type
, ext_phy_config
;;
6343 bp
->link_params
.bp
= bp
;
6344 bp
->link_params
.port
= port
;
6346 bp
->link_params
.lane_config
=
6347 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].lane_config
);
6349 bp
->link_params
.speed_cap_mask
[0] =
6351 dev_info
.port_hw_config
[port
].speed_capability_mask
);
6352 bp
->link_params
.speed_cap_mask
[1] =
6354 dev_info
.port_hw_config
[port
].speed_capability_mask2
);
6355 bp
->port
.link_config
[0] =
6356 SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].link_config
);
6358 bp
->port
.link_config
[1] =
6359 SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].link_config2
);
6361 bp
->link_params
.multi_phy_config
=
6362 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].multi_phy_config
);
6363 /* If the device is capable of WoL, set the default state according
6366 config
= SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].config
);
6367 bp
->wol
= (!(bp
->flags
& NO_WOL_FLAG
) &&
6368 (config
& PORT_FEATURE_WOL_ENABLED
));
6370 BNX2X_DEV_INFO("lane_config 0x%08x"
6371 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
6372 bp
->link_params
.lane_config
,
6373 bp
->link_params
.speed_cap_mask
[0],
6374 bp
->port
.link_config
[0]);
6376 bp
->link_params
.switch_cfg
= (bp
->port
.link_config
[0] &
6377 PORT_FEATURE_CONNECTED_SWITCH_MASK
);
6378 bnx2x_phy_probe(&bp
->link_params
);
6379 bnx2x_link_settings_supported(bp
, bp
->link_params
.switch_cfg
);
6381 bnx2x_link_settings_requested(bp
);
6384 * If connected directly, work with the internal PHY, otherwise, work
6385 * with the external PHY
6389 dev_info
.port_hw_config
[port
].external_phy_config
);
6390 ext_phy_type
= XGXS_EXT_PHY_TYPE(ext_phy_config
);
6391 if (ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
)
6392 bp
->mdio
.prtad
= bp
->port
.phy_addr
;
6394 else if ((ext_phy_type
!= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
) &&
6395 (ext_phy_type
!= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN
))
6397 XGXS_EXT_PHY_ADDR(ext_phy_config
);
6399 val2
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_upper
);
6400 val
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_lower
);
6401 bnx2x_set_mac_buf(bp
->dev
->dev_addr
, val
, val2
);
6402 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
6403 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
6406 val2
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].iscsi_mac_upper
);
6407 val
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].iscsi_mac_lower
);
6408 bnx2x_set_mac_buf(bp
->iscsi_mac
, val
, val2
);
6412 static int __devinit
bnx2x_get_hwinfo(struct bnx2x
*bp
)
6414 int func
= BP_FUNC(bp
);
6418 bnx2x_get_common_hwinfo(bp
);
6422 if (CHIP_IS_E1H(bp
) && !BP_NOMCP(bp
)) {
6424 SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
6426 val
= (SHMEM_RD(bp
, mf_cfg
.func_mf_config
[FUNC_0
].e1hov_tag
) &
6427 FUNC_MF_CFG_E1HOV_TAG_MASK
);
6428 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
)
6430 BNX2X_DEV_INFO("%s function mode\n",
6431 IS_E1HMF(bp
) ? "multi" : "single");
6434 val
= (SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].
6436 FUNC_MF_CFG_E1HOV_TAG_MASK
);
6437 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
) {
6439 BNX2X_DEV_INFO("E1HOV for func %d is %d "
6441 func
, bp
->e1hov
, bp
->e1hov
);
6443 BNX2X_ERROR("No valid E1HOV for func %d,"
6444 " aborting\n", func
);
6449 BNX2X_ERROR("VN %d in single function mode,"
6450 " aborting\n", BP_E1HVN(bp
));
6456 if (!BP_NOMCP(bp
)) {
6457 bnx2x_get_port_hwinfo(bp
);
6459 bp
->fw_seq
= (SHMEM_RD(bp
, func_mb
[func
].drv_mb_header
) &
6460 DRV_MSG_SEQ_NUMBER_MASK
);
6461 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
6465 val2
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].mac_upper
);
6466 val
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].mac_lower
);
6467 if ((val2
!= FUNC_MF_CFG_UPPERMAC_DEFAULT
) &&
6468 (val
!= FUNC_MF_CFG_LOWERMAC_DEFAULT
)) {
6469 bp
->dev
->dev_addr
[0] = (u8
)(val2
>> 8 & 0xff);
6470 bp
->dev
->dev_addr
[1] = (u8
)(val2
& 0xff);
6471 bp
->dev
->dev_addr
[2] = (u8
)(val
>> 24 & 0xff);
6472 bp
->dev
->dev_addr
[3] = (u8
)(val
>> 16 & 0xff);
6473 bp
->dev
->dev_addr
[4] = (u8
)(val
>> 8 & 0xff);
6474 bp
->dev
->dev_addr
[5] = (u8
)(val
& 0xff);
6475 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
,
6477 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
,
6485 /* only supposed to happen on emulation/FPGA */
6486 BNX2X_ERROR("warning: random MAC workaround active\n");
6487 random_ether_addr(bp
->dev
->dev_addr
);
6488 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
6494 static void __devinit
bnx2x_read_fwinfo(struct bnx2x
*bp
)
6496 int cnt
, i
, block_end
, rodi
;
6497 char vpd_data
[BNX2X_VPD_LEN
+1];
6498 char str_id_reg
[VENDOR_ID_LEN
+1];
6499 char str_id_cap
[VENDOR_ID_LEN
+1];
6502 cnt
= pci_read_vpd(bp
->pdev
, 0, BNX2X_VPD_LEN
, vpd_data
);
6503 memset(bp
->fw_ver
, 0, sizeof(bp
->fw_ver
));
6505 if (cnt
< BNX2X_VPD_LEN
)
6508 i
= pci_vpd_find_tag(vpd_data
, 0, BNX2X_VPD_LEN
,
6509 PCI_VPD_LRDT_RO_DATA
);
6514 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+
6515 pci_vpd_lrdt_size(&vpd_data
[i
]);
6517 i
+= PCI_VPD_LRDT_TAG_SIZE
;
6519 if (block_end
> BNX2X_VPD_LEN
)
6522 rodi
= pci_vpd_find_info_keyword(vpd_data
, i
, block_end
,
6523 PCI_VPD_RO_KEYWORD_MFR_ID
);
6527 len
= pci_vpd_info_field_size(&vpd_data
[rodi
]);
6529 if (len
!= VENDOR_ID_LEN
)
6532 rodi
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
6534 /* vendor specific info */
6535 snprintf(str_id_reg
, VENDOR_ID_LEN
+ 1, "%04x", PCI_VENDOR_ID_DELL
);
6536 snprintf(str_id_cap
, VENDOR_ID_LEN
+ 1, "%04X", PCI_VENDOR_ID_DELL
);
6537 if (!strncmp(str_id_reg
, &vpd_data
[rodi
], VENDOR_ID_LEN
) ||
6538 !strncmp(str_id_cap
, &vpd_data
[rodi
], VENDOR_ID_LEN
)) {
6540 rodi
= pci_vpd_find_info_keyword(vpd_data
, i
, block_end
,
6541 PCI_VPD_RO_KEYWORD_VENDOR0
);
6543 len
= pci_vpd_info_field_size(&vpd_data
[rodi
]);
6545 rodi
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
6547 if (len
< 32 && (len
+ rodi
) <= BNX2X_VPD_LEN
) {
6548 memcpy(bp
->fw_ver
, &vpd_data
[rodi
], len
);
6549 bp
->fw_ver
[len
] = ' ';
6558 static int __devinit
bnx2x_init_bp(struct bnx2x
*bp
)
6560 int func
= BP_FUNC(bp
);
6564 /* Disable interrupt handling until HW is initialized */
6565 atomic_set(&bp
->intr_sem
, 1);
6566 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6568 mutex_init(&bp
->port
.phy_mutex
);
6569 mutex_init(&bp
->fw_mb_mutex
);
6570 spin_lock_init(&bp
->stats_lock
);
6572 mutex_init(&bp
->cnic_mutex
);
6575 INIT_DELAYED_WORK(&bp
->sp_task
, bnx2x_sp_task
);
6576 INIT_DELAYED_WORK(&bp
->reset_task
, bnx2x_reset_task
);
6578 rc
= bnx2x_get_hwinfo(bp
);
6580 bnx2x_read_fwinfo(bp
);
6581 /* need to reset chip if undi was active */
6583 bnx2x_undi_unload(bp
);
6585 if (CHIP_REV_IS_FPGA(bp
))
6586 dev_err(&bp
->pdev
->dev
, "FPGA detected\n");
6588 if (BP_NOMCP(bp
) && (func
== 0))
6589 dev_err(&bp
->pdev
->dev
, "MCP disabled, "
6590 "must load devices in order!\n");
6592 /* Set multi queue mode */
6593 if ((multi_mode
!= ETH_RSS_MODE_DISABLED
) &&
6594 ((int_mode
== INT_MODE_INTx
) || (int_mode
== INT_MODE_MSI
))) {
6595 dev_err(&bp
->pdev
->dev
, "Multi disabled since int_mode "
6596 "requested is not MSI-X\n");
6597 multi_mode
= ETH_RSS_MODE_DISABLED
;
6599 bp
->multi_mode
= multi_mode
;
6600 bp
->int_mode
= int_mode
;
6602 bp
->dev
->features
|= NETIF_F_GRO
;
6606 bp
->flags
&= ~TPA_ENABLE_FLAG
;
6607 bp
->dev
->features
&= ~NETIF_F_LRO
;
6609 bp
->flags
|= TPA_ENABLE_FLAG
;
6610 bp
->dev
->features
|= NETIF_F_LRO
;
6612 bp
->disable_tpa
= disable_tpa
;
6615 bp
->dropless_fc
= 0;
6617 bp
->dropless_fc
= dropless_fc
;
6621 bp
->tx_ring_size
= MAX_TX_AVAIL
;
6625 /* make sure that the numbers are in the right granularity */
6626 bp
->tx_ticks
= (50 / (4 * BNX2X_BTR
)) * (4 * BNX2X_BTR
);
6627 bp
->rx_ticks
= (25 / (4 * BNX2X_BTR
)) * (4 * BNX2X_BTR
);
6629 timer_interval
= (CHIP_REV_IS_SLOW(bp
) ? 5*HZ
: HZ
);
6630 bp
->current_interval
= (poll
? poll
: timer_interval
);
6632 init_timer(&bp
->timer
);
6633 bp
->timer
.expires
= jiffies
+ bp
->current_interval
;
6634 bp
->timer
.data
= (unsigned long) bp
;
6635 bp
->timer
.function
= bnx2x_timer
;
6641 /****************************************************************************
6642 * General service functions
6643 ****************************************************************************/
6645 /* called with rtnl_lock */
6646 static int bnx2x_open(struct net_device
*dev
)
6648 struct bnx2x
*bp
= netdev_priv(dev
);
6650 netif_carrier_off(dev
);
6652 bnx2x_set_power_state(bp
, PCI_D0
);
6654 if (!bnx2x_reset_is_done(bp
)) {
6656 /* Reset MCP mail box sequence if there is on going
6661 /* If it's the first function to load and reset done
6662 * is still not cleared it may mean that. We don't
6663 * check the attention state here because it may have
6664 * already been cleared by a "common" reset but we
6665 * shell proceed with "process kill" anyway.
6667 if ((bnx2x_get_load_cnt(bp
) == 0) &&
6668 bnx2x_trylock_hw_lock(bp
,
6669 HW_LOCK_RESOURCE_RESERVED_08
) &&
6670 (!bnx2x_leader_reset(bp
))) {
6671 DP(NETIF_MSG_HW
, "Recovered in open\n");
6675 bnx2x_set_power_state(bp
, PCI_D3hot
);
6677 printk(KERN_ERR
"%s: Recovery flow hasn't been properly"
6678 " completed yet. Try again later. If u still see this"
6679 " message after a few retries then power cycle is"
6680 " required.\n", bp
->dev
->name
);
6686 bp
->recovery_state
= BNX2X_RECOVERY_DONE
;
6688 return bnx2x_nic_load(bp
, LOAD_OPEN
);
6691 /* called with rtnl_lock */
6692 static int bnx2x_close(struct net_device
*dev
)
6694 struct bnx2x
*bp
= netdev_priv(dev
);
6696 /* Unload the driver, release IRQs */
6697 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
6698 bnx2x_set_power_state(bp
, PCI_D3hot
);
6703 /* called with netif_tx_lock from dev_mcast.c */
6704 void bnx2x_set_rx_mode(struct net_device
*dev
)
6706 struct bnx2x
*bp
= netdev_priv(dev
);
6707 u32 rx_mode
= BNX2X_RX_MODE_NORMAL
;
6708 int port
= BP_PORT(bp
);
6710 if (bp
->state
!= BNX2X_STATE_OPEN
) {
6711 DP(NETIF_MSG_IFUP
, "state is %x, returning\n", bp
->state
);
6715 DP(NETIF_MSG_IFUP
, "dev->flags = %x\n", dev
->flags
);
6717 if (dev
->flags
& IFF_PROMISC
)
6718 rx_mode
= BNX2X_RX_MODE_PROMISC
;
6720 else if ((dev
->flags
& IFF_ALLMULTI
) ||
6721 ((netdev_mc_count(dev
) > BNX2X_MAX_MULTICAST
) &&
6723 rx_mode
= BNX2X_RX_MODE_ALLMULTI
;
6725 else { /* some multicasts */
6726 if (CHIP_IS_E1(bp
)) {
6728 struct netdev_hw_addr
*ha
;
6729 struct mac_configuration_cmd
*config
=
6730 bnx2x_sp(bp
, mcast_config
);
6733 netdev_for_each_mc_addr(ha
, dev
) {
6734 config
->config_table
[i
].
6735 cam_entry
.msb_mac_addr
=
6736 swab16(*(u16
*)&ha
->addr
[0]);
6737 config
->config_table
[i
].
6738 cam_entry
.middle_mac_addr
=
6739 swab16(*(u16
*)&ha
->addr
[2]);
6740 config
->config_table
[i
].
6741 cam_entry
.lsb_mac_addr
=
6742 swab16(*(u16
*)&ha
->addr
[4]);
6743 config
->config_table
[i
].cam_entry
.flags
=
6745 config
->config_table
[i
].
6746 target_table_entry
.flags
= 0;
6747 config
->config_table
[i
].target_table_entry
.
6748 clients_bit_vector
=
6749 cpu_to_le32(1 << BP_L_ID(bp
));
6750 config
->config_table
[i
].
6751 target_table_entry
.vlan_id
= 0;
6754 "setting MCAST[%d] (%04x:%04x:%04x)\n", i
,
6755 config
->config_table
[i
].
6756 cam_entry
.msb_mac_addr
,
6757 config
->config_table
[i
].
6758 cam_entry
.middle_mac_addr
,
6759 config
->config_table
[i
].
6760 cam_entry
.lsb_mac_addr
);
6763 old
= config
->hdr
.length
;
6765 for (; i
< old
; i
++) {
6766 if (CAM_IS_INVALID(config
->
6768 /* already invalidated */
6772 CAM_INVALIDATE(config
->
6777 if (CHIP_REV_IS_SLOW(bp
))
6778 offset
= BNX2X_MAX_EMUL_MULTI
*(1 + port
);
6780 offset
= BNX2X_MAX_MULTICAST
*(1 + port
);
6782 config
->hdr
.length
= i
;
6783 config
->hdr
.offset
= offset
;
6784 config
->hdr
.client_id
= bp
->fp
->cl_id
;
6785 config
->hdr
.reserved1
= 0;
6787 bp
->set_mac_pending
++;
6790 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
6791 U64_HI(bnx2x_sp_mapping(bp
, mcast_config
)),
6792 U64_LO(bnx2x_sp_mapping(bp
, mcast_config
)),
6795 /* Accept one or more multicasts */
6796 struct netdev_hw_addr
*ha
;
6797 u32 mc_filter
[MC_HASH_SIZE
];
6798 u32 crc
, bit
, regidx
;
6801 memset(mc_filter
, 0, 4 * MC_HASH_SIZE
);
6803 netdev_for_each_mc_addr(ha
, dev
) {
6804 DP(NETIF_MSG_IFUP
, "Adding mcast MAC: %pM\n",
6807 crc
= crc32c_le(0, ha
->addr
, ETH_ALEN
);
6808 bit
= (crc
>> 24) & 0xff;
6811 mc_filter
[regidx
] |= (1 << bit
);
6814 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
6815 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
),
6820 bp
->rx_mode
= rx_mode
;
6821 bnx2x_set_storm_rx_mode(bp
);
6825 /* called with rtnl_lock */
6826 static int bnx2x_mdio_read(struct net_device
*netdev
, int prtad
,
6827 int devad
, u16 addr
)
6829 struct bnx2x
*bp
= netdev_priv(netdev
);
6833 DP(NETIF_MSG_LINK
, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
6834 prtad
, devad
, addr
);
6836 /* The HW expects different devad if CL22 is used */
6837 devad
= (devad
== MDIO_DEVAD_NONE
) ? DEFAULT_PHY_DEV_ADDR
: devad
;
6839 bnx2x_acquire_phy_lock(bp
);
6840 rc
= bnx2x_phy_read(&bp
->link_params
, prtad
, devad
, addr
, &value
);
6841 bnx2x_release_phy_lock(bp
);
6842 DP(NETIF_MSG_LINK
, "mdio_read_val 0x%x rc = 0x%x\n", value
, rc
);
6849 /* called with rtnl_lock */
6850 static int bnx2x_mdio_write(struct net_device
*netdev
, int prtad
, int devad
,
6851 u16 addr
, u16 value
)
6853 struct bnx2x
*bp
= netdev_priv(netdev
);
6856 DP(NETIF_MSG_LINK
, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
6857 " value 0x%x\n", prtad
, devad
, addr
, value
);
6859 /* The HW expects different devad if CL22 is used */
6860 devad
= (devad
== MDIO_DEVAD_NONE
) ? DEFAULT_PHY_DEV_ADDR
: devad
;
6862 bnx2x_acquire_phy_lock(bp
);
6863 rc
= bnx2x_phy_write(&bp
->link_params
, prtad
, devad
, addr
, value
);
6864 bnx2x_release_phy_lock(bp
);
6868 /* called with rtnl_lock */
6869 static int bnx2x_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
6871 struct bnx2x
*bp
= netdev_priv(dev
);
6872 struct mii_ioctl_data
*mdio
= if_mii(ifr
);
6874 DP(NETIF_MSG_LINK
, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
6875 mdio
->phy_id
, mdio
->reg_num
, mdio
->val_in
);
6877 if (!netif_running(dev
))
6880 return mdio_mii_ioctl(&bp
->mdio
, mdio
, cmd
);
6883 #ifdef CONFIG_NET_POLL_CONTROLLER
6884 static void poll_bnx2x(struct net_device
*dev
)
6886 struct bnx2x
*bp
= netdev_priv(dev
);
6888 disable_irq(bp
->pdev
->irq
);
6889 bnx2x_interrupt(bp
->pdev
->irq
, dev
);
6890 enable_irq(bp
->pdev
->irq
);
6894 static const struct net_device_ops bnx2x_netdev_ops
= {
6895 .ndo_open
= bnx2x_open
,
6896 .ndo_stop
= bnx2x_close
,
6897 .ndo_start_xmit
= bnx2x_start_xmit
,
6898 .ndo_set_multicast_list
= bnx2x_set_rx_mode
,
6899 .ndo_set_mac_address
= bnx2x_change_mac_addr
,
6900 .ndo_validate_addr
= eth_validate_addr
,
6901 .ndo_do_ioctl
= bnx2x_ioctl
,
6902 .ndo_change_mtu
= bnx2x_change_mtu
,
6903 .ndo_tx_timeout
= bnx2x_tx_timeout
,
6905 .ndo_vlan_rx_register
= bnx2x_vlan_rx_register
,
6907 #ifdef CONFIG_NET_POLL_CONTROLLER
6908 .ndo_poll_controller
= poll_bnx2x
,
6912 static int __devinit
bnx2x_init_dev(struct pci_dev
*pdev
,
6913 struct net_device
*dev
)
6918 SET_NETDEV_DEV(dev
, &pdev
->dev
);
6919 bp
= netdev_priv(dev
);
6924 bp
->func
= PCI_FUNC(pdev
->devfn
);
6926 rc
= pci_enable_device(pdev
);
6928 dev_err(&bp
->pdev
->dev
,
6929 "Cannot enable PCI device, aborting\n");
6933 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
6934 dev_err(&bp
->pdev
->dev
,
6935 "Cannot find PCI device base address, aborting\n");
6937 goto err_out_disable
;
6940 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
6941 dev_err(&bp
->pdev
->dev
, "Cannot find second PCI device"
6942 " base address, aborting\n");
6944 goto err_out_disable
;
6947 if (atomic_read(&pdev
->enable_cnt
) == 1) {
6948 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
6950 dev_err(&bp
->pdev
->dev
,
6951 "Cannot obtain PCI resources, aborting\n");
6952 goto err_out_disable
;
6955 pci_set_master(pdev
);
6956 pci_save_state(pdev
);
6959 bp
->pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
6960 if (bp
->pm_cap
== 0) {
6961 dev_err(&bp
->pdev
->dev
,
6962 "Cannot find power management capability, aborting\n");
6964 goto err_out_release
;
6967 bp
->pcie_cap
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
6968 if (bp
->pcie_cap
== 0) {
6969 dev_err(&bp
->pdev
->dev
,
6970 "Cannot find PCI Express capability, aborting\n");
6972 goto err_out_release
;
6975 if (dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64)) == 0) {
6976 bp
->flags
|= USING_DAC_FLAG
;
6977 if (dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64)) != 0) {
6978 dev_err(&bp
->pdev
->dev
, "dma_set_coherent_mask"
6979 " failed, aborting\n");
6981 goto err_out_release
;
6984 } else if (dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32)) != 0) {
6985 dev_err(&bp
->pdev
->dev
,
6986 "System does not support DMA, aborting\n");
6988 goto err_out_release
;
6991 dev
->mem_start
= pci_resource_start(pdev
, 0);
6992 dev
->base_addr
= dev
->mem_start
;
6993 dev
->mem_end
= pci_resource_end(pdev
, 0);
6995 dev
->irq
= pdev
->irq
;
6997 bp
->regview
= pci_ioremap_bar(pdev
, 0);
6999 dev_err(&bp
->pdev
->dev
,
7000 "Cannot map register space, aborting\n");
7002 goto err_out_release
;
7005 bp
->doorbells
= ioremap_nocache(pci_resource_start(pdev
, 2),
7006 min_t(u64
, BNX2X_DB_SIZE
,
7007 pci_resource_len(pdev
, 2)));
7008 if (!bp
->doorbells
) {
7009 dev_err(&bp
->pdev
->dev
,
7010 "Cannot map doorbell space, aborting\n");
7015 bnx2x_set_power_state(bp
, PCI_D0
);
7017 /* clean indirect addresses */
7018 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
7019 PCICFG_VENDOR_ID_OFFSET
);
7020 REG_WR(bp
, PXP2_REG_PGL_ADDR_88_F0
+ BP_PORT(bp
)*16, 0);
7021 REG_WR(bp
, PXP2_REG_PGL_ADDR_8C_F0
+ BP_PORT(bp
)*16, 0);
7022 REG_WR(bp
, PXP2_REG_PGL_ADDR_90_F0
+ BP_PORT(bp
)*16, 0);
7023 REG_WR(bp
, PXP2_REG_PGL_ADDR_94_F0
+ BP_PORT(bp
)*16, 0);
7025 /* Reset the load counter */
7026 bnx2x_clear_load_cnt(bp
);
7028 dev
->watchdog_timeo
= TX_TIMEOUT
;
7030 dev
->netdev_ops
= &bnx2x_netdev_ops
;
7031 bnx2x_set_ethtool_ops(dev
);
7032 dev
->features
|= NETIF_F_SG
;
7033 dev
->features
|= NETIF_F_HW_CSUM
;
7034 if (bp
->flags
& USING_DAC_FLAG
)
7035 dev
->features
|= NETIF_F_HIGHDMA
;
7036 dev
->features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
7037 dev
->features
|= NETIF_F_TSO6
;
7039 dev
->features
|= (NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
);
7040 bp
->flags
|= (HW_VLAN_RX_FLAG
| HW_VLAN_TX_FLAG
);
7042 dev
->vlan_features
|= NETIF_F_SG
;
7043 dev
->vlan_features
|= NETIF_F_HW_CSUM
;
7044 if (bp
->flags
& USING_DAC_FLAG
)
7045 dev
->vlan_features
|= NETIF_F_HIGHDMA
;
7046 dev
->vlan_features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
7047 dev
->vlan_features
|= NETIF_F_TSO6
;
7050 /* get_port_hwinfo() will set prtad and mmds properly */
7051 bp
->mdio
.prtad
= MDIO_PRTAD_NONE
;
7053 bp
->mdio
.mode_support
= MDIO_SUPPORTS_C45
| MDIO_EMULATE_C22
;
7055 bp
->mdio
.mdio_read
= bnx2x_mdio_read
;
7056 bp
->mdio
.mdio_write
= bnx2x_mdio_write
;
7062 iounmap(bp
->regview
);
7065 if (bp
->doorbells
) {
7066 iounmap(bp
->doorbells
);
7067 bp
->doorbells
= NULL
;
7071 if (atomic_read(&pdev
->enable_cnt
) == 1)
7072 pci_release_regions(pdev
);
7075 pci_disable_device(pdev
);
7076 pci_set_drvdata(pdev
, NULL
);
7082 static void __devinit
bnx2x_get_pcie_width_speed(struct bnx2x
*bp
,
7083 int *width
, int *speed
)
7085 u32 val
= REG_RD(bp
, PCICFG_OFFSET
+ PCICFG_LINK_CONTROL
);
7087 *width
= (val
& PCICFG_LINK_WIDTH
) >> PCICFG_LINK_WIDTH_SHIFT
;
7089 /* return value of 1=2.5GHz 2=5GHz */
7090 *speed
= (val
& PCICFG_LINK_SPEED
) >> PCICFG_LINK_SPEED_SHIFT
;
7093 static int bnx2x_check_firmware(struct bnx2x
*bp
)
7095 const struct firmware
*firmware
= bp
->firmware
;
7096 struct bnx2x_fw_file_hdr
*fw_hdr
;
7097 struct bnx2x_fw_file_section
*sections
;
7098 u32 offset
, len
, num_ops
;
7103 if (firmware
->size
< sizeof(struct bnx2x_fw_file_hdr
))
7106 fw_hdr
= (struct bnx2x_fw_file_hdr
*)firmware
->data
;
7107 sections
= (struct bnx2x_fw_file_section
*)fw_hdr
;
7109 /* Make sure none of the offsets and sizes make us read beyond
7110 * the end of the firmware data */
7111 for (i
= 0; i
< sizeof(*fw_hdr
) / sizeof(*sections
); i
++) {
7112 offset
= be32_to_cpu(sections
[i
].offset
);
7113 len
= be32_to_cpu(sections
[i
].len
);
7114 if (offset
+ len
> firmware
->size
) {
7115 dev_err(&bp
->pdev
->dev
,
7116 "Section %d length is out of bounds\n", i
);
7121 /* Likewise for the init_ops offsets */
7122 offset
= be32_to_cpu(fw_hdr
->init_ops_offsets
.offset
);
7123 ops_offsets
= (u16
*)(firmware
->data
+ offset
);
7124 num_ops
= be32_to_cpu(fw_hdr
->init_ops
.len
) / sizeof(struct raw_op
);
7126 for (i
= 0; i
< be32_to_cpu(fw_hdr
->init_ops_offsets
.len
) / 2; i
++) {
7127 if (be16_to_cpu(ops_offsets
[i
]) > num_ops
) {
7128 dev_err(&bp
->pdev
->dev
,
7129 "Section offset %d is out of bounds\n", i
);
7134 /* Check FW version */
7135 offset
= be32_to_cpu(fw_hdr
->fw_version
.offset
);
7136 fw_ver
= firmware
->data
+ offset
;
7137 if ((fw_ver
[0] != BCM_5710_FW_MAJOR_VERSION
) ||
7138 (fw_ver
[1] != BCM_5710_FW_MINOR_VERSION
) ||
7139 (fw_ver
[2] != BCM_5710_FW_REVISION_VERSION
) ||
7140 (fw_ver
[3] != BCM_5710_FW_ENGINEERING_VERSION
)) {
7141 dev_err(&bp
->pdev
->dev
,
7142 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
7143 fw_ver
[0], fw_ver
[1], fw_ver
[2],
7144 fw_ver
[3], BCM_5710_FW_MAJOR_VERSION
,
7145 BCM_5710_FW_MINOR_VERSION
,
7146 BCM_5710_FW_REVISION_VERSION
,
7147 BCM_5710_FW_ENGINEERING_VERSION
);
7154 static inline void be32_to_cpu_n(const u8
*_source
, u8
*_target
, u32 n
)
7156 const __be32
*source
= (const __be32
*)_source
;
7157 u32
*target
= (u32
*)_target
;
7160 for (i
= 0; i
< n
/4; i
++)
7161 target
[i
] = be32_to_cpu(source
[i
]);
7165 Ops array is stored in the following format:
7166 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7168 static inline void bnx2x_prep_ops(const u8
*_source
, u8
*_target
, u32 n
)
7170 const __be32
*source
= (const __be32
*)_source
;
7171 struct raw_op
*target
= (struct raw_op
*)_target
;
7174 for (i
= 0, j
= 0; i
< n
/8; i
++, j
+= 2) {
7175 tmp
= be32_to_cpu(source
[j
]);
7176 target
[i
].op
= (tmp
>> 24) & 0xff;
7177 target
[i
].offset
= tmp
& 0xffffff;
7178 target
[i
].raw_data
= be32_to_cpu(source
[j
+ 1]);
7182 static inline void be16_to_cpu_n(const u8
*_source
, u8
*_target
, u32 n
)
7184 const __be16
*source
= (const __be16
*)_source
;
7185 u16
*target
= (u16
*)_target
;
7188 for (i
= 0; i
< n
/2; i
++)
7189 target
[i
] = be16_to_cpu(source
[i
]);
7192 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
7194 u32 len = be32_to_cpu(fw_hdr->arr.len); \
7195 bp->arr = kmalloc(len, GFP_KERNEL); \
7197 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7200 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
7201 (u8 *)bp->arr, len); \
7204 int bnx2x_init_firmware(struct bnx2x
*bp
)
7206 const char *fw_file_name
;
7207 struct bnx2x_fw_file_hdr
*fw_hdr
;
7211 fw_file_name
= FW_FILE_NAME_E1
;
7212 else if (CHIP_IS_E1H(bp
))
7213 fw_file_name
= FW_FILE_NAME_E1H
;
7215 BNX2X_ERR("Unsupported chip revision\n");
7219 BNX2X_DEV_INFO("Loading %s\n", fw_file_name
);
7221 rc
= request_firmware(&bp
->firmware
, fw_file_name
, &bp
->pdev
->dev
);
7223 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name
);
7224 goto request_firmware_exit
;
7227 rc
= bnx2x_check_firmware(bp
);
7229 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name
);
7230 goto request_firmware_exit
;
7233 fw_hdr
= (struct bnx2x_fw_file_hdr
*)bp
->firmware
->data
;
7235 /* Initialize the pointers to the init arrays */
7237 BNX2X_ALLOC_AND_SET(init_data
, request_firmware_exit
, be32_to_cpu_n
);
7240 BNX2X_ALLOC_AND_SET(init_ops
, init_ops_alloc_err
, bnx2x_prep_ops
);
7243 BNX2X_ALLOC_AND_SET(init_ops_offsets
, init_offsets_alloc_err
,
7246 /* STORMs firmware */
7247 INIT_TSEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
7248 be32_to_cpu(fw_hdr
->tsem_int_table_data
.offset
);
7249 INIT_TSEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
7250 be32_to_cpu(fw_hdr
->tsem_pram_data
.offset
);
7251 INIT_USEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
7252 be32_to_cpu(fw_hdr
->usem_int_table_data
.offset
);
7253 INIT_USEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
7254 be32_to_cpu(fw_hdr
->usem_pram_data
.offset
);
7255 INIT_XSEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
7256 be32_to_cpu(fw_hdr
->xsem_int_table_data
.offset
);
7257 INIT_XSEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
7258 be32_to_cpu(fw_hdr
->xsem_pram_data
.offset
);
7259 INIT_CSEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
7260 be32_to_cpu(fw_hdr
->csem_int_table_data
.offset
);
7261 INIT_CSEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
7262 be32_to_cpu(fw_hdr
->csem_pram_data
.offset
);
7266 init_offsets_alloc_err
:
7267 kfree(bp
->init_ops
);
7269 kfree(bp
->init_data
);
7270 request_firmware_exit
:
7271 release_firmware(bp
->firmware
);
7277 static int __devinit
bnx2x_init_one(struct pci_dev
*pdev
,
7278 const struct pci_device_id
*ent
)
7280 struct net_device
*dev
= NULL
;
7282 int pcie_width
, pcie_speed
;
7285 /* dev zeroed in init_etherdev */
7286 dev
= alloc_etherdev_mq(sizeof(*bp
), MAX_CONTEXT
);
7288 dev_err(&pdev
->dev
, "Cannot allocate net device\n");
7292 bp
= netdev_priv(dev
);
7293 bp
->msg_enable
= debug
;
7295 pci_set_drvdata(pdev
, dev
);
7297 rc
= bnx2x_init_dev(pdev
, dev
);
7303 rc
= bnx2x_init_bp(bp
);
7307 rc
= register_netdev(dev
);
7309 dev_err(&pdev
->dev
, "Cannot register net device\n");
7313 bnx2x_get_pcie_width_speed(bp
, &pcie_width
, &pcie_speed
);
7314 netdev_info(dev
, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
7315 " IRQ %d, ", board_info
[ent
->driver_data
].name
,
7316 (CHIP_REV(bp
) >> 12) + 'A', (CHIP_METAL(bp
) >> 4),
7317 pcie_width
, (pcie_speed
== 2) ? "5GHz (Gen2)" : "2.5GHz",
7318 dev
->base_addr
, bp
->pdev
->irq
);
7319 pr_cont("node addr %pM\n", dev
->dev_addr
);
7325 iounmap(bp
->regview
);
7328 iounmap(bp
->doorbells
);
7332 if (atomic_read(&pdev
->enable_cnt
) == 1)
7333 pci_release_regions(pdev
);
7335 pci_disable_device(pdev
);
7336 pci_set_drvdata(pdev
, NULL
);
7341 static void __devexit
bnx2x_remove_one(struct pci_dev
*pdev
)
7343 struct net_device
*dev
= pci_get_drvdata(pdev
);
7347 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
7350 bp
= netdev_priv(dev
);
7352 unregister_netdev(dev
);
7354 /* Make sure RESET task is not scheduled before continuing */
7355 cancel_delayed_work_sync(&bp
->reset_task
);
7358 iounmap(bp
->regview
);
7361 iounmap(bp
->doorbells
);
7365 if (atomic_read(&pdev
->enable_cnt
) == 1)
7366 pci_release_regions(pdev
);
7368 pci_disable_device(pdev
);
7369 pci_set_drvdata(pdev
, NULL
);
7372 static int bnx2x_eeh_nic_unload(struct bnx2x
*bp
)
7376 bp
->state
= BNX2X_STATE_ERROR
;
7378 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
7380 bnx2x_netif_stop(bp
, 0);
7381 netif_carrier_off(bp
->dev
);
7383 del_timer_sync(&bp
->timer
);
7384 bp
->stats_state
= STATS_STATE_DISABLED
;
7385 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
7388 bnx2x_free_irq(bp
, false);
7390 if (CHIP_IS_E1(bp
)) {
7391 struct mac_configuration_cmd
*config
=
7392 bnx2x_sp(bp
, mcast_config
);
7394 for (i
= 0; i
< config
->hdr
.length
; i
++)
7395 CAM_INVALIDATE(config
->config_table
[i
]);
7398 /* Free SKBs, SGEs, TPA pool and driver internals */
7399 bnx2x_free_skbs(bp
);
7400 for_each_queue(bp
, i
)
7401 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
7402 for_each_queue(bp
, i
)
7403 netif_napi_del(&bnx2x_fp(bp
, i
, napi
));
7406 bp
->state
= BNX2X_STATE_CLOSED
;
7411 static void bnx2x_eeh_recover(struct bnx2x
*bp
)
7415 mutex_init(&bp
->port
.phy_mutex
);
7417 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
7418 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
7419 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp
->common
.shmem_base
);
7421 if (!bp
->common
.shmem_base
||
7422 (bp
->common
.shmem_base
< 0xA0000) ||
7423 (bp
->common
.shmem_base
>= 0xC0000)) {
7424 BNX2X_DEV_INFO("MCP not active\n");
7425 bp
->flags
|= NO_MCP_FLAG
;
7429 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
7430 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
7431 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
7432 BNX2X_ERR("BAD MCP validity signature\n");
7434 if (!BP_NOMCP(bp
)) {
7435 bp
->fw_seq
= (SHMEM_RD(bp
, func_mb
[BP_FUNC(bp
)].drv_mb_header
)
7436 & DRV_MSG_SEQ_NUMBER_MASK
);
7437 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
7442 * bnx2x_io_error_detected - called when PCI error is detected
7443 * @pdev: Pointer to PCI device
7444 * @state: The current pci connection state
7446 * This function is called after a PCI bus error affecting
7447 * this device has been detected.
7449 static pci_ers_result_t
bnx2x_io_error_detected(struct pci_dev
*pdev
,
7450 pci_channel_state_t state
)
7452 struct net_device
*dev
= pci_get_drvdata(pdev
);
7453 struct bnx2x
*bp
= netdev_priv(dev
);
7457 netif_device_detach(dev
);
7459 if (state
== pci_channel_io_perm_failure
) {
7461 return PCI_ERS_RESULT_DISCONNECT
;
7464 if (netif_running(dev
))
7465 bnx2x_eeh_nic_unload(bp
);
7467 pci_disable_device(pdev
);
7471 /* Request a slot reset */
7472 return PCI_ERS_RESULT_NEED_RESET
;
7476 * bnx2x_io_slot_reset - called after the PCI bus has been reset
7477 * @pdev: Pointer to PCI device
7479 * Restart the card from scratch, as if from a cold-boot.
7481 static pci_ers_result_t
bnx2x_io_slot_reset(struct pci_dev
*pdev
)
7483 struct net_device
*dev
= pci_get_drvdata(pdev
);
7484 struct bnx2x
*bp
= netdev_priv(dev
);
7488 if (pci_enable_device(pdev
)) {
7490 "Cannot re-enable PCI device after reset\n");
7492 return PCI_ERS_RESULT_DISCONNECT
;
7495 pci_set_master(pdev
);
7496 pci_restore_state(pdev
);
7498 if (netif_running(dev
))
7499 bnx2x_set_power_state(bp
, PCI_D0
);
7503 return PCI_ERS_RESULT_RECOVERED
;
7507 * bnx2x_io_resume - called when traffic can start flowing again
7508 * @pdev: Pointer to PCI device
7510 * This callback is called when the error recovery driver tells us that
7511 * its OK to resume normal operation.
7513 static void bnx2x_io_resume(struct pci_dev
*pdev
)
7515 struct net_device
*dev
= pci_get_drvdata(pdev
);
7516 struct bnx2x
*bp
= netdev_priv(dev
);
7518 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
7519 printk(KERN_ERR
"Handling parity error recovery. Try again later\n");
7525 bnx2x_eeh_recover(bp
);
7527 if (netif_running(dev
))
7528 bnx2x_nic_load(bp
, LOAD_NORMAL
);
7530 netif_device_attach(dev
);
7535 static struct pci_error_handlers bnx2x_err_handler
= {
7536 .error_detected
= bnx2x_io_error_detected
,
7537 .slot_reset
= bnx2x_io_slot_reset
,
7538 .resume
= bnx2x_io_resume
,
7541 static struct pci_driver bnx2x_pci_driver
= {
7542 .name
= DRV_MODULE_NAME
,
7543 .id_table
= bnx2x_pci_tbl
,
7544 .probe
= bnx2x_init_one
,
7545 .remove
= __devexit_p(bnx2x_remove_one
),
7546 .suspend
= bnx2x_suspend
,
7547 .resume
= bnx2x_resume
,
7548 .err_handler
= &bnx2x_err_handler
,
7551 static int __init
bnx2x_init(void)
7555 pr_info("%s", version
);
7557 bnx2x_wq
= create_singlethread_workqueue("bnx2x");
7558 if (bnx2x_wq
== NULL
) {
7559 pr_err("Cannot create workqueue\n");
7563 ret
= pci_register_driver(&bnx2x_pci_driver
);
7565 pr_err("Cannot register driver\n");
7566 destroy_workqueue(bnx2x_wq
);
7571 static void __exit
bnx2x_cleanup(void)
7573 pci_unregister_driver(&bnx2x_pci_driver
);
7575 destroy_workqueue(bnx2x_wq
);
7578 module_init(bnx2x_init
);
7579 module_exit(bnx2x_cleanup
);
7583 /* count denotes the number of new completions we have seen */
7584 static void bnx2x_cnic_sp_post(struct bnx2x
*bp
, int count
)
7586 struct eth_spe
*spe
;
7588 #ifdef BNX2X_STOP_ON_ERROR
7589 if (unlikely(bp
->panic
))
7593 spin_lock_bh(&bp
->spq_lock
);
7594 bp
->cnic_spq_pending
-= count
;
7596 for (; bp
->cnic_spq_pending
< bp
->cnic_eth_dev
.max_kwqe_pending
;
7597 bp
->cnic_spq_pending
++) {
7599 if (!bp
->cnic_kwq_pending
)
7602 spe
= bnx2x_sp_get_next(bp
);
7603 *spe
= *bp
->cnic_kwq_cons
;
7605 bp
->cnic_kwq_pending
--;
7607 DP(NETIF_MSG_TIMER
, "pending on SPQ %d, on KWQ %d count %d\n",
7608 bp
->cnic_spq_pending
, bp
->cnic_kwq_pending
, count
);
7610 if (bp
->cnic_kwq_cons
== bp
->cnic_kwq_last
)
7611 bp
->cnic_kwq_cons
= bp
->cnic_kwq
;
7613 bp
->cnic_kwq_cons
++;
7615 bnx2x_sp_prod_update(bp
);
7616 spin_unlock_bh(&bp
->spq_lock
);
7619 static int bnx2x_cnic_sp_queue(struct net_device
*dev
,
7620 struct kwqe_16
*kwqes
[], u32 count
)
7622 struct bnx2x
*bp
= netdev_priv(dev
);
7625 #ifdef BNX2X_STOP_ON_ERROR
7626 if (unlikely(bp
->panic
))
7630 spin_lock_bh(&bp
->spq_lock
);
7632 for (i
= 0; i
< count
; i
++) {
7633 struct eth_spe
*spe
= (struct eth_spe
*)kwqes
[i
];
7635 if (bp
->cnic_kwq_pending
== MAX_SP_DESC_CNT
)
7638 *bp
->cnic_kwq_prod
= *spe
;
7640 bp
->cnic_kwq_pending
++;
7642 DP(NETIF_MSG_TIMER
, "L5 SPQE %x %x %x:%x pos %d\n",
7643 spe
->hdr
.conn_and_cmd_data
, spe
->hdr
.type
,
7644 spe
->data
.mac_config_addr
.hi
,
7645 spe
->data
.mac_config_addr
.lo
,
7646 bp
->cnic_kwq_pending
);
7648 if (bp
->cnic_kwq_prod
== bp
->cnic_kwq_last
)
7649 bp
->cnic_kwq_prod
= bp
->cnic_kwq
;
7651 bp
->cnic_kwq_prod
++;
7654 spin_unlock_bh(&bp
->spq_lock
);
7656 if (bp
->cnic_spq_pending
< bp
->cnic_eth_dev
.max_kwqe_pending
)
7657 bnx2x_cnic_sp_post(bp
, 0);
7662 static int bnx2x_cnic_ctl_send(struct bnx2x
*bp
, struct cnic_ctl_info
*ctl
)
7664 struct cnic_ops
*c_ops
;
7667 mutex_lock(&bp
->cnic_mutex
);
7668 c_ops
= bp
->cnic_ops
;
7670 rc
= c_ops
->cnic_ctl(bp
->cnic_data
, ctl
);
7671 mutex_unlock(&bp
->cnic_mutex
);
7676 static int bnx2x_cnic_ctl_send_bh(struct bnx2x
*bp
, struct cnic_ctl_info
*ctl
)
7678 struct cnic_ops
*c_ops
;
7682 c_ops
= rcu_dereference(bp
->cnic_ops
);
7684 rc
= c_ops
->cnic_ctl(bp
->cnic_data
, ctl
);
7691 * for commands that have no data
7693 int bnx2x_cnic_notify(struct bnx2x
*bp
, int cmd
)
7695 struct cnic_ctl_info ctl
= {0};
7699 return bnx2x_cnic_ctl_send(bp
, &ctl
);
7702 static void bnx2x_cnic_cfc_comp(struct bnx2x
*bp
, int cid
)
7704 struct cnic_ctl_info ctl
;
7706 /* first we tell CNIC and only then we count this as a completion */
7707 ctl
.cmd
= CNIC_CTL_COMPLETION_CMD
;
7708 ctl
.data
.comp
.cid
= cid
;
7710 bnx2x_cnic_ctl_send_bh(bp
, &ctl
);
7711 bnx2x_cnic_sp_post(bp
, 1);
7714 static int bnx2x_drv_ctl(struct net_device
*dev
, struct drv_ctl_info
*ctl
)
7716 struct bnx2x
*bp
= netdev_priv(dev
);
7720 case DRV_CTL_CTXTBL_WR_CMD
: {
7721 u32 index
= ctl
->data
.io
.offset
;
7722 dma_addr_t addr
= ctl
->data
.io
.dma_addr
;
7724 bnx2x_ilt_wr(bp
, index
, addr
);
7728 case DRV_CTL_COMPLETION_CMD
: {
7729 int count
= ctl
->data
.comp
.comp_count
;
7731 bnx2x_cnic_sp_post(bp
, count
);
7735 /* rtnl_lock is held. */
7736 case DRV_CTL_START_L2_CMD
: {
7737 u32 cli
= ctl
->data
.ring
.client_id
;
7739 bp
->rx_mode_cl_mask
|= (1 << cli
);
7740 bnx2x_set_storm_rx_mode(bp
);
7744 /* rtnl_lock is held. */
7745 case DRV_CTL_STOP_L2_CMD
: {
7746 u32 cli
= ctl
->data
.ring
.client_id
;
7748 bp
->rx_mode_cl_mask
&= ~(1 << cli
);
7749 bnx2x_set_storm_rx_mode(bp
);
7754 BNX2X_ERR("unknown command %x\n", ctl
->cmd
);
7761 void bnx2x_setup_cnic_irq_info(struct bnx2x
*bp
)
7763 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
7765 if (bp
->flags
& USING_MSIX_FLAG
) {
7766 cp
->drv_state
|= CNIC_DRV_STATE_USING_MSIX
;
7767 cp
->irq_arr
[0].irq_flags
|= CNIC_IRQ_FL_MSIX
;
7768 cp
->irq_arr
[0].vector
= bp
->msix_table
[1].vector
;
7770 cp
->drv_state
&= ~CNIC_DRV_STATE_USING_MSIX
;
7771 cp
->irq_arr
[0].irq_flags
&= ~CNIC_IRQ_FL_MSIX
;
7773 cp
->irq_arr
[0].status_blk
= bp
->cnic_sb
;
7774 cp
->irq_arr
[0].status_blk_num
= CNIC_SB_ID(bp
);
7775 cp
->irq_arr
[1].status_blk
= bp
->def_status_blk
;
7776 cp
->irq_arr
[1].status_blk_num
= DEF_SB_ID
;
7781 static int bnx2x_register_cnic(struct net_device
*dev
, struct cnic_ops
*ops
,
7784 struct bnx2x
*bp
= netdev_priv(dev
);
7785 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
7790 if (atomic_read(&bp
->intr_sem
) != 0)
7793 bp
->cnic_kwq
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
7797 bp
->cnic_kwq_cons
= bp
->cnic_kwq
;
7798 bp
->cnic_kwq_prod
= bp
->cnic_kwq
;
7799 bp
->cnic_kwq_last
= bp
->cnic_kwq
+ MAX_SP_DESC_CNT
;
7801 bp
->cnic_spq_pending
= 0;
7802 bp
->cnic_kwq_pending
= 0;
7804 bp
->cnic_data
= data
;
7807 cp
->drv_state
= CNIC_DRV_STATE_REGD
;
7809 bnx2x_init_sb(bp
, bp
->cnic_sb
, bp
->cnic_sb_mapping
, CNIC_SB_ID(bp
));
7811 bnx2x_setup_cnic_irq_info(bp
);
7812 bnx2x_set_iscsi_eth_mac_addr(bp
, 1);
7813 bp
->cnic_flags
|= BNX2X_CNIC_FLAG_MAC_SET
;
7814 rcu_assign_pointer(bp
->cnic_ops
, ops
);
7819 static int bnx2x_unregister_cnic(struct net_device
*dev
)
7821 struct bnx2x
*bp
= netdev_priv(dev
);
7822 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
7824 mutex_lock(&bp
->cnic_mutex
);
7825 if (bp
->cnic_flags
& BNX2X_CNIC_FLAG_MAC_SET
) {
7826 bp
->cnic_flags
&= ~BNX2X_CNIC_FLAG_MAC_SET
;
7827 bnx2x_set_iscsi_eth_mac_addr(bp
, 0);
7830 rcu_assign_pointer(bp
->cnic_ops
, NULL
);
7831 mutex_unlock(&bp
->cnic_mutex
);
7833 kfree(bp
->cnic_kwq
);
7834 bp
->cnic_kwq
= NULL
;
7839 struct cnic_eth_dev
*bnx2x_cnic_probe(struct net_device
*dev
)
7841 struct bnx2x
*bp
= netdev_priv(dev
);
7842 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
7844 cp
->drv_owner
= THIS_MODULE
;
7845 cp
->chip_id
= CHIP_ID(bp
);
7846 cp
->pdev
= bp
->pdev
;
7847 cp
->io_base
= bp
->regview
;
7848 cp
->io_base2
= bp
->doorbells
;
7849 cp
->max_kwqe_pending
= 8;
7850 cp
->ctx_blk_size
= CNIC_CTX_PER_ILT
* sizeof(union cdu_context
);
7851 cp
->ctx_tbl_offset
= FUNC_ILT_BASE(BP_FUNC(bp
)) + 1;
7852 cp
->ctx_tbl_len
= CNIC_ILT_LINES
;
7853 cp
->starting_cid
= BCM_CNIC_CID_START
;
7854 cp
->drv_submit_kwqes_16
= bnx2x_cnic_sp_queue
;
7855 cp
->drv_ctl
= bnx2x_drv_ctl
;
7856 cp
->drv_register_cnic
= bnx2x_register_cnic
;
7857 cp
->drv_unregister_cnic
= bnx2x_unregister_cnic
;
7861 EXPORT_SYMBOL(bnx2x_cnic_probe
);
7863 #endif /* BCM_CNIC */