1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/pci.h>
28 #include <linux/init.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitops.h>
34 #include <linux/irq.h>
35 #include <linux/delay.h>
36 #include <asm/byteorder.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if_vlan.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/crc32c.h>
48 #include <linux/prefetch.h>
49 #include <linux/zlib.h>
51 #include <linux/stringify.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_cmn.h"
59 #include <linux/firmware.h>
60 #include "bnx2x_fw_file_hdr.h"
62 #define FW_FILE_VERSION \
63 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
64 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
65 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
66 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
67 #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
68 #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
69 #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
71 /* Time in jiffies before concluding the transmitter is hung */
72 #define TX_TIMEOUT (5*HZ)
74 static char version
[] __devinitdata
=
75 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
76 DRV_MODULE_NAME
" " DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
78 MODULE_AUTHOR("Eliezer Tamir");
79 MODULE_DESCRIPTION("Broadcom NetXtreme II "
80 "BCM57710/57711/57711E/57712/57712E Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_MODULE_VERSION
);
83 MODULE_FIRMWARE(FW_FILE_NAME_E1
);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1H
);
85 MODULE_FIRMWARE(FW_FILE_NAME_E2
);
87 static int multi_mode
= 1;
88 module_param(multi_mode
, int, 0);
89 MODULE_PARM_DESC(multi_mode
, " Multi queue mode "
90 "(0 Disable; 1 Enable (default))");
93 module_param(num_queues
, int, 0);
94 MODULE_PARM_DESC(num_queues
, " Number of queues for multi_mode=1"
95 " (default is as a number of CPUs)");
97 static int disable_tpa
;
98 module_param(disable_tpa
, int, 0);
99 MODULE_PARM_DESC(disable_tpa
, " Disable the TPA (LRO) feature");
102 module_param(int_mode
, int, 0);
103 MODULE_PARM_DESC(int_mode
, " Force interrupt mode other then MSI-X "
106 static int dropless_fc
;
107 module_param(dropless_fc
, int, 0);
108 MODULE_PARM_DESC(dropless_fc
, " Pause on exhausted host ring");
111 module_param(poll
, int, 0);
112 MODULE_PARM_DESC(poll
, " Use polling (for debug)");
114 static int mrrs
= -1;
115 module_param(mrrs
, int, 0);
116 MODULE_PARM_DESC(mrrs
, " Force Max Read Req Size (0..3) (for debug)");
119 module_param(debug
, int, 0);
120 MODULE_PARM_DESC(debug
, " Default debug msglevel");
122 static struct workqueue_struct
*bnx2x_wq
;
124 enum bnx2x_board_type
{
132 /* indexed by board_type, above */
135 } board_info
[] __devinitdata
= {
136 { "Broadcom NetXtreme II BCM57710 XGb" },
137 { "Broadcom NetXtreme II BCM57711 XGb" },
138 { "Broadcom NetXtreme II BCM57711E XGb" },
139 { "Broadcom NetXtreme II BCM57712 XGb" },
140 { "Broadcom NetXtreme II BCM57712E XGb" }
143 #ifndef PCI_DEVICE_ID_NX2_57712
144 #define PCI_DEVICE_ID_NX2_57712 0x1662
146 #ifndef PCI_DEVICE_ID_NX2_57712E
147 #define PCI_DEVICE_ID_NX2_57712E 0x1663
150 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl
) = {
151 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57710
), BCM57710
},
152 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57711
), BCM57711
},
153 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57711E
), BCM57711E
},
154 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57712
), BCM57712
},
155 { PCI_VDEVICE(BROADCOM
, PCI_DEVICE_ID_NX2_57712E
), BCM57712E
},
159 MODULE_DEVICE_TABLE(pci
, bnx2x_pci_tbl
);
161 /****************************************************************************
162 * General service functions
163 ****************************************************************************/
165 static inline void __storm_memset_dma_mapping(struct bnx2x
*bp
,
166 u32 addr
, dma_addr_t mapping
)
168 REG_WR(bp
, addr
, U64_LO(mapping
));
169 REG_WR(bp
, addr
+ 4, U64_HI(mapping
));
172 static inline void __storm_memset_fill(struct bnx2x
*bp
,
173 u32 addr
, size_t size
, u32 val
)
176 for (i
= 0; i
< size
/4; i
++)
177 REG_WR(bp
, addr
+ (i
* 4), val
);
180 static inline void storm_memset_ustats_zero(struct bnx2x
*bp
,
181 u8 port
, u16 stat_id
)
183 size_t size
= sizeof(struct ustorm_per_client_stats
);
185 u32 addr
= BAR_USTRORM_INTMEM
+
186 USTORM_PER_COUNTER_ID_STATS_OFFSET(port
, stat_id
);
188 __storm_memset_fill(bp
, addr
, size
, 0);
191 static inline void storm_memset_tstats_zero(struct bnx2x
*bp
,
192 u8 port
, u16 stat_id
)
194 size_t size
= sizeof(struct tstorm_per_client_stats
);
196 u32 addr
= BAR_TSTRORM_INTMEM
+
197 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, stat_id
);
199 __storm_memset_fill(bp
, addr
, size
, 0);
202 static inline void storm_memset_xstats_zero(struct bnx2x
*bp
,
203 u8 port
, u16 stat_id
)
205 size_t size
= sizeof(struct xstorm_per_client_stats
);
207 u32 addr
= BAR_XSTRORM_INTMEM
+
208 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, stat_id
);
210 __storm_memset_fill(bp
, addr
, size
, 0);
214 static inline void storm_memset_spq_addr(struct bnx2x
*bp
,
215 dma_addr_t mapping
, u16 abs_fid
)
217 u32 addr
= XSEM_REG_FAST_MEMORY
+
218 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid
);
220 __storm_memset_dma_mapping(bp
, addr
, mapping
);
223 static inline void storm_memset_ov(struct bnx2x
*bp
, u16 ov
, u16 abs_fid
)
225 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_E1HOV_OFFSET(abs_fid
), ov
);
228 static inline void storm_memset_func_cfg(struct bnx2x
*bp
,
229 struct tstorm_eth_function_common_config
*tcfg
,
232 size_t size
= sizeof(struct tstorm_eth_function_common_config
);
234 u32 addr
= BAR_TSTRORM_INTMEM
+
235 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid
);
237 __storm_memset_struct(bp
, addr
, size
, (u32
*)tcfg
);
240 static inline void storm_memset_xstats_flags(struct bnx2x
*bp
,
241 struct stats_indication_flags
*flags
,
244 size_t size
= sizeof(struct stats_indication_flags
);
246 u32 addr
= BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(abs_fid
);
248 __storm_memset_struct(bp
, addr
, size
, (u32
*)flags
);
251 static inline void storm_memset_tstats_flags(struct bnx2x
*bp
,
252 struct stats_indication_flags
*flags
,
255 size_t size
= sizeof(struct stats_indication_flags
);
257 u32 addr
= BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(abs_fid
);
259 __storm_memset_struct(bp
, addr
, size
, (u32
*)flags
);
262 static inline void storm_memset_ustats_flags(struct bnx2x
*bp
,
263 struct stats_indication_flags
*flags
,
266 size_t size
= sizeof(struct stats_indication_flags
);
268 u32 addr
= BAR_USTRORM_INTMEM
+ USTORM_STATS_FLAGS_OFFSET(abs_fid
);
270 __storm_memset_struct(bp
, addr
, size
, (u32
*)flags
);
273 static inline void storm_memset_cstats_flags(struct bnx2x
*bp
,
274 struct stats_indication_flags
*flags
,
277 size_t size
= sizeof(struct stats_indication_flags
);
279 u32 addr
= BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(abs_fid
);
281 __storm_memset_struct(bp
, addr
, size
, (u32
*)flags
);
284 static inline void storm_memset_xstats_addr(struct bnx2x
*bp
,
285 dma_addr_t mapping
, u16 abs_fid
)
287 u32 addr
= BAR_XSTRORM_INTMEM
+
288 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid
);
290 __storm_memset_dma_mapping(bp
, addr
, mapping
);
293 static inline void storm_memset_tstats_addr(struct bnx2x
*bp
,
294 dma_addr_t mapping
, u16 abs_fid
)
296 u32 addr
= BAR_TSTRORM_INTMEM
+
297 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid
);
299 __storm_memset_dma_mapping(bp
, addr
, mapping
);
302 static inline void storm_memset_ustats_addr(struct bnx2x
*bp
,
303 dma_addr_t mapping
, u16 abs_fid
)
305 u32 addr
= BAR_USTRORM_INTMEM
+
306 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid
);
308 __storm_memset_dma_mapping(bp
, addr
, mapping
);
311 static inline void storm_memset_cstats_addr(struct bnx2x
*bp
,
312 dma_addr_t mapping
, u16 abs_fid
)
314 u32 addr
= BAR_CSTRORM_INTMEM
+
315 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid
);
317 __storm_memset_dma_mapping(bp
, addr
, mapping
);
320 static inline void storm_memset_vf_to_pf(struct bnx2x
*bp
, u16 abs_fid
,
323 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_VF_TO_PF_OFFSET(abs_fid
),
325 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_VF_TO_PF_OFFSET(abs_fid
),
327 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_VF_TO_PF_OFFSET(abs_fid
),
329 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_VF_TO_PF_OFFSET(abs_fid
),
333 static inline void storm_memset_func_en(struct bnx2x
*bp
, u16 abs_fid
,
336 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_FUNC_EN_OFFSET(abs_fid
),
338 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_FUNC_EN_OFFSET(abs_fid
),
340 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_FUNC_EN_OFFSET(abs_fid
),
342 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_FUNC_EN_OFFSET(abs_fid
),
346 static inline void storm_memset_eq_data(struct bnx2x
*bp
,
347 struct event_ring_data
*eq_data
,
350 size_t size
= sizeof(struct event_ring_data
);
352 u32 addr
= BAR_CSTRORM_INTMEM
+ CSTORM_EVENT_RING_DATA_OFFSET(pfid
);
354 __storm_memset_struct(bp
, addr
, size
, (u32
*)eq_data
);
357 static inline void storm_memset_eq_prod(struct bnx2x
*bp
, u16 eq_prod
,
360 u32 addr
= BAR_CSTRORM_INTMEM
+ CSTORM_EVENT_RING_PROD_OFFSET(pfid
);
361 REG_WR16(bp
, addr
, eq_prod
);
364 static inline void storm_memset_hc_timeout(struct bnx2x
*bp
, u8 port
,
365 u16 fw_sb_id
, u8 sb_index
,
369 int index_offset
= CHIP_IS_E2(bp
) ?
370 offsetof(struct hc_status_block_data_e2
, index_data
) :
371 offsetof(struct hc_status_block_data_e1x
, index_data
);
372 u32 addr
= BAR_CSTRORM_INTMEM
+
373 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id
) +
375 sizeof(struct hc_index_data
)*sb_index
+
376 offsetof(struct hc_index_data
, timeout
);
377 REG_WR8(bp
, addr
, ticks
);
378 DP(NETIF_MSG_HW
, "port %x fw_sb_id %d sb_index %d ticks %d\n",
379 port
, fw_sb_id
, sb_index
, ticks
);
381 static inline void storm_memset_hc_disable(struct bnx2x
*bp
, u8 port
,
382 u16 fw_sb_id
, u8 sb_index
,
385 u32 enable_flag
= disable
? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT
);
386 int index_offset
= CHIP_IS_E2(bp
) ?
387 offsetof(struct hc_status_block_data_e2
, index_data
) :
388 offsetof(struct hc_status_block_data_e1x
, index_data
);
389 u32 addr
= BAR_CSTRORM_INTMEM
+
390 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id
) +
392 sizeof(struct hc_index_data
)*sb_index
+
393 offsetof(struct hc_index_data
, flags
);
394 u16 flags
= REG_RD16(bp
, addr
);
396 flags
&= ~HC_INDEX_DATA_HC_ENABLED
;
397 flags
|= enable_flag
;
398 REG_WR16(bp
, addr
, flags
);
399 DP(NETIF_MSG_HW
, "port %x fw_sb_id %d sb_index %d disable %d\n",
400 port
, fw_sb_id
, sb_index
, disable
);
404 * locking is done by mcp
406 static void bnx2x_reg_wr_ind(struct bnx2x
*bp
, u32 addr
, u32 val
)
408 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
409 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, val
);
410 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
411 PCICFG_VENDOR_ID_OFFSET
);
414 static u32
bnx2x_reg_rd_ind(struct bnx2x
*bp
, u32 addr
)
418 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
419 pci_read_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, &val
);
420 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
421 PCICFG_VENDOR_ID_OFFSET
);
426 #define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
427 #define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
428 #define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
429 #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
430 #define DMAE_DP_DST_NONE "dst_addr [none]"
432 static void bnx2x_dp_dmae(struct bnx2x
*bp
, struct dmae_command
*dmae
,
435 u32 src_type
= dmae
->opcode
& DMAE_COMMAND_SRC
;
437 switch (dmae
->opcode
& DMAE_COMMAND_DST
) {
438 case DMAE_CMD_DST_PCI
:
439 if (src_type
== DMAE_CMD_SRC_PCI
)
440 DP(msglvl
, "DMAE: opcode 0x%08x\n"
441 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
442 "comp_addr [%x:%08x], comp_val 0x%08x\n",
443 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
444 dmae
->len
, dmae
->dst_addr_hi
, dmae
->dst_addr_lo
,
445 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
,
448 DP(msglvl
, "DMAE: opcode 0x%08x\n"
449 "src [%08x], len [%d*4], dst [%x:%08x]\n"
450 "comp_addr [%x:%08x], comp_val 0x%08x\n",
451 dmae
->opcode
, dmae
->src_addr_lo
>> 2,
452 dmae
->len
, dmae
->dst_addr_hi
, dmae
->dst_addr_lo
,
453 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
,
456 case DMAE_CMD_DST_GRC
:
457 if (src_type
== DMAE_CMD_SRC_PCI
)
458 DP(msglvl
, "DMAE: opcode 0x%08x\n"
459 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
460 "comp_addr [%x:%08x], comp_val 0x%08x\n",
461 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
462 dmae
->len
, dmae
->dst_addr_lo
>> 2,
463 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
,
466 DP(msglvl
, "DMAE: opcode 0x%08x\n"
467 "src [%08x], len [%d*4], dst [%08x]\n"
468 "comp_addr [%x:%08x], comp_val 0x%08x\n",
469 dmae
->opcode
, dmae
->src_addr_lo
>> 2,
470 dmae
->len
, dmae
->dst_addr_lo
>> 2,
471 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
,
475 if (src_type
== DMAE_CMD_SRC_PCI
)
476 DP(msglvl
, "DMAE: opcode 0x%08x\n"
477 DP_LEVEL
"src_addr [%x:%08x] len [%d * 4] "
479 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
480 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
481 dmae
->len
, dmae
->comp_addr_hi
, dmae
->comp_addr_lo
,
484 DP(msglvl
, "DMAE: opcode 0x%08x\n"
485 DP_LEVEL
"src_addr [%08x] len [%d * 4] "
487 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
488 dmae
->opcode
, dmae
->src_addr_lo
>> 2,
489 dmae
->len
, dmae
->comp_addr_hi
, dmae
->comp_addr_lo
,
496 const u32 dmae_reg_go_c
[] = {
497 DMAE_REG_GO_C0
, DMAE_REG_GO_C1
, DMAE_REG_GO_C2
, DMAE_REG_GO_C3
,
498 DMAE_REG_GO_C4
, DMAE_REG_GO_C5
, DMAE_REG_GO_C6
, DMAE_REG_GO_C7
,
499 DMAE_REG_GO_C8
, DMAE_REG_GO_C9
, DMAE_REG_GO_C10
, DMAE_REG_GO_C11
,
500 DMAE_REG_GO_C12
, DMAE_REG_GO_C13
, DMAE_REG_GO_C14
, DMAE_REG_GO_C15
503 /* copy command into DMAE command memory and set DMAE command go */
504 void bnx2x_post_dmae(struct bnx2x
*bp
, struct dmae_command
*dmae
, int idx
)
509 cmd_offset
= (DMAE_REG_CMD_MEM
+ sizeof(struct dmae_command
) * idx
);
510 for (i
= 0; i
< (sizeof(struct dmae_command
)/4); i
++) {
511 REG_WR(bp
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
513 DP(BNX2X_MSG_OFF
, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
514 idx
, i
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
516 REG_WR(bp
, dmae_reg_go_c
[idx
], 1);
519 u32
bnx2x_dmae_opcode_add_comp(u32 opcode
, u8 comp_type
)
521 return opcode
| ((comp_type
<< DMAE_COMMAND_C_DST_SHIFT
) |
525 u32
bnx2x_dmae_opcode_clr_src_reset(u32 opcode
)
527 return opcode
& ~DMAE_CMD_SRC_RESET
;
530 u32
bnx2x_dmae_opcode(struct bnx2x
*bp
, u8 src_type
, u8 dst_type
,
531 bool with_comp
, u8 comp_type
)
535 opcode
|= ((src_type
<< DMAE_COMMAND_SRC_SHIFT
) |
536 (dst_type
<< DMAE_COMMAND_DST_SHIFT
));
538 opcode
|= (DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
);
540 opcode
|= (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
);
541 opcode
|= ((BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
) |
542 (BP_E1HVN(bp
) << DMAE_COMMAND_DST_VN_SHIFT
));
543 opcode
|= (DMAE_COM_SET_ERR
<< DMAE_COMMAND_ERR_POLICY_SHIFT
);
546 opcode
|= DMAE_CMD_ENDIANITY_B_DW_SWAP
;
548 opcode
|= DMAE_CMD_ENDIANITY_DW_SWAP
;
551 opcode
= bnx2x_dmae_opcode_add_comp(opcode
, comp_type
);
555 static void bnx2x_prep_dmae_with_comp(struct bnx2x
*bp
,
556 struct dmae_command
*dmae
,
557 u8 src_type
, u8 dst_type
)
559 memset(dmae
, 0, sizeof(struct dmae_command
));
562 dmae
->opcode
= bnx2x_dmae_opcode(bp
, src_type
, dst_type
,
563 true, DMAE_COMP_PCI
);
565 /* fill in the completion parameters */
566 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
567 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
568 dmae
->comp_val
= DMAE_COMP_VAL
;
571 /* issue a dmae command over the init-channel and wailt for completion */
572 static int bnx2x_issue_dmae_with_comp(struct bnx2x
*bp
,
573 struct dmae_command
*dmae
)
575 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
576 int cnt
= CHIP_REV_IS_SLOW(bp
) ? (400000) : 40;
579 DP(BNX2X_MSG_OFF
, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
580 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
581 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
583 /* lock the dmae channel */
584 mutex_lock(&bp
->dmae_mutex
);
586 /* reset completion */
589 /* post the command on the channel used for initializations */
590 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
592 /* wait for completion */
594 while ((*wb_comp
& ~DMAE_PCI_ERR_FLAG
) != DMAE_COMP_VAL
) {
595 DP(BNX2X_MSG_OFF
, "wb_comp 0x%08x\n", *wb_comp
);
598 BNX2X_ERR("DMAE timeout!\n");
605 if (*wb_comp
& DMAE_PCI_ERR_FLAG
) {
606 BNX2X_ERR("DMAE PCI error!\n");
610 DP(BNX2X_MSG_OFF
, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
611 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
612 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
615 mutex_unlock(&bp
->dmae_mutex
);
619 void bnx2x_write_dmae(struct bnx2x
*bp
, dma_addr_t dma_addr
, u32 dst_addr
,
622 struct dmae_command dmae
;
624 if (!bp
->dmae_ready
) {
625 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
627 DP(BNX2X_MSG_OFF
, "DMAE is not ready (dst_addr %08x len32 %d)"
628 " using indirect\n", dst_addr
, len32
);
629 bnx2x_init_ind_wr(bp
, dst_addr
, data
, len32
);
633 /* set opcode and fixed command fields */
634 bnx2x_prep_dmae_with_comp(bp
, &dmae
, DMAE_SRC_PCI
, DMAE_DST_GRC
);
636 /* fill in addresses and len */
637 dmae
.src_addr_lo
= U64_LO(dma_addr
);
638 dmae
.src_addr_hi
= U64_HI(dma_addr
);
639 dmae
.dst_addr_lo
= dst_addr
>> 2;
640 dmae
.dst_addr_hi
= 0;
643 bnx2x_dp_dmae(bp
, &dmae
, BNX2X_MSG_OFF
);
645 /* issue the command and wait for completion */
646 bnx2x_issue_dmae_with_comp(bp
, &dmae
);
649 void bnx2x_read_dmae(struct bnx2x
*bp
, u32 src_addr
, u32 len32
)
651 struct dmae_command dmae
;
653 if (!bp
->dmae_ready
) {
654 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
657 DP(BNX2X_MSG_OFF
, "DMAE is not ready (src_addr %08x len32 %d)"
658 " using indirect\n", src_addr
, len32
);
659 for (i
= 0; i
< len32
; i
++)
660 data
[i
] = bnx2x_reg_rd_ind(bp
, src_addr
+ i
*4);
664 /* set opcode and fixed command fields */
665 bnx2x_prep_dmae_with_comp(bp
, &dmae
, DMAE_SRC_GRC
, DMAE_DST_PCI
);
667 /* fill in addresses and len */
668 dmae
.src_addr_lo
= src_addr
>> 2;
669 dmae
.src_addr_hi
= 0;
670 dmae
.dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_data
));
671 dmae
.dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_data
));
674 bnx2x_dp_dmae(bp
, &dmae
, BNX2X_MSG_OFF
);
676 /* issue the command and wait for completion */
677 bnx2x_issue_dmae_with_comp(bp
, &dmae
);
680 static void bnx2x_write_dmae_phys_len(struct bnx2x
*bp
, dma_addr_t phys_addr
,
683 int dmae_wr_max
= DMAE_LEN32_WR_MAX(bp
);
686 while (len
> dmae_wr_max
) {
687 bnx2x_write_dmae(bp
, phys_addr
+ offset
,
688 addr
+ offset
, dmae_wr_max
);
689 offset
+= dmae_wr_max
* 4;
693 bnx2x_write_dmae(bp
, phys_addr
+ offset
, addr
+ offset
, len
);
696 /* used only for slowpath so not inlined */
697 static void bnx2x_wb_wr(struct bnx2x
*bp
, int reg
, u32 val_hi
, u32 val_lo
)
701 wb_write
[0] = val_hi
;
702 wb_write
[1] = val_lo
;
703 REG_WR_DMAE(bp
, reg
, wb_write
, 2);
707 static u64
bnx2x_wb_rd(struct bnx2x
*bp
, int reg
)
711 REG_RD_DMAE(bp
, reg
, wb_data
, 2);
713 return HILO_U64(wb_data
[0], wb_data
[1]);
717 static int bnx2x_mc_assert(struct bnx2x
*bp
)
721 u32 row0
, row1
, row2
, row3
;
724 last_idx
= REG_RD8(bp
, BAR_XSTRORM_INTMEM
+
725 XSTORM_ASSERT_LIST_INDEX_OFFSET
);
727 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
729 /* print the asserts */
730 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
732 row0
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
733 XSTORM_ASSERT_LIST_OFFSET(i
));
734 row1
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
735 XSTORM_ASSERT_LIST_OFFSET(i
) + 4);
736 row2
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
737 XSTORM_ASSERT_LIST_OFFSET(i
) + 8);
738 row3
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
739 XSTORM_ASSERT_LIST_OFFSET(i
) + 12);
741 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
742 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
743 " 0x%08x 0x%08x 0x%08x\n",
744 i
, row3
, row2
, row1
, row0
);
752 last_idx
= REG_RD8(bp
, BAR_TSTRORM_INTMEM
+
753 TSTORM_ASSERT_LIST_INDEX_OFFSET
);
755 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
757 /* print the asserts */
758 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
760 row0
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
761 TSTORM_ASSERT_LIST_OFFSET(i
));
762 row1
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
763 TSTORM_ASSERT_LIST_OFFSET(i
) + 4);
764 row2
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
765 TSTORM_ASSERT_LIST_OFFSET(i
) + 8);
766 row3
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
767 TSTORM_ASSERT_LIST_OFFSET(i
) + 12);
769 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
770 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
771 " 0x%08x 0x%08x 0x%08x\n",
772 i
, row3
, row2
, row1
, row0
);
780 last_idx
= REG_RD8(bp
, BAR_CSTRORM_INTMEM
+
781 CSTORM_ASSERT_LIST_INDEX_OFFSET
);
783 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
785 /* print the asserts */
786 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
788 row0
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
789 CSTORM_ASSERT_LIST_OFFSET(i
));
790 row1
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
791 CSTORM_ASSERT_LIST_OFFSET(i
) + 4);
792 row2
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
793 CSTORM_ASSERT_LIST_OFFSET(i
) + 8);
794 row3
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
795 CSTORM_ASSERT_LIST_OFFSET(i
) + 12);
797 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
798 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
799 " 0x%08x 0x%08x 0x%08x\n",
800 i
, row3
, row2
, row1
, row0
);
808 last_idx
= REG_RD8(bp
, BAR_USTRORM_INTMEM
+
809 USTORM_ASSERT_LIST_INDEX_OFFSET
);
811 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
813 /* print the asserts */
814 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
816 row0
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
817 USTORM_ASSERT_LIST_OFFSET(i
));
818 row1
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
819 USTORM_ASSERT_LIST_OFFSET(i
) + 4);
820 row2
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
821 USTORM_ASSERT_LIST_OFFSET(i
) + 8);
822 row3
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
823 USTORM_ASSERT_LIST_OFFSET(i
) + 12);
825 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
826 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
827 " 0x%08x 0x%08x 0x%08x\n",
828 i
, row3
, row2
, row1
, row0
);
838 static void bnx2x_fw_dump(struct bnx2x
*bp
)
844 u32 trace_shmem_base
;
846 BNX2X_ERR("NO MCP - can not dump\n");
850 if (BP_PATH(bp
) == 0)
851 trace_shmem_base
= bp
->common
.shmem_base
;
853 trace_shmem_base
= SHMEM2_RD(bp
, other_shmem_base_addr
);
854 addr
= trace_shmem_base
- 0x0800 + 4;
855 mark
= REG_RD(bp
, addr
);
856 mark
= (CHIP_IS_E1x(bp
) ? MCP_REG_MCPR_SCRATCH
: MCP_A_REG_MCPR_SCRATCH
)
857 + ((mark
+ 0x3) & ~0x3) - 0x08000000;
858 pr_err("begin fw dump (mark 0x%x)\n", mark
);
861 for (offset
= mark
; offset
<= trace_shmem_base
; offset
+= 0x8*4) {
862 for (word
= 0; word
< 8; word
++)
863 data
[word
] = htonl(REG_RD(bp
, offset
+ 4*word
));
865 pr_cont("%s", (char *)data
);
867 for (offset
= addr
+ 4; offset
<= mark
; offset
+= 0x8*4) {
868 for (word
= 0; word
< 8; word
++)
869 data
[word
] = htonl(REG_RD(bp
, offset
+ 4*word
));
871 pr_cont("%s", (char *)data
);
873 pr_err("end of fw dump\n");
876 void bnx2x_panic_dump(struct bnx2x
*bp
)
880 struct hc_sp_status_block_data sp_sb_data
;
881 int func
= BP_FUNC(bp
);
882 #ifdef BNX2X_STOP_ON_ERROR
883 u16 start
= 0, end
= 0;
886 bp
->stats_state
= STATS_STATE_DISABLED
;
887 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
889 BNX2X_ERR("begin crash dump -----------------\n");
893 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
894 " spq_prod_idx(0x%x)\n",
895 bp
->def_idx
, bp
->def_att_idx
,
896 bp
->attn_state
, bp
->spq_prod_idx
);
897 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
898 bp
->def_status_blk
->atten_status_block
.attn_bits
,
899 bp
->def_status_blk
->atten_status_block
.attn_bits_ack
,
900 bp
->def_status_blk
->atten_status_block
.status_block_id
,
901 bp
->def_status_blk
->atten_status_block
.attn_bits_index
);
903 for (i
= 0; i
< HC_SP_SB_MAX_INDICES
; i
++)
905 bp
->def_status_blk
->sp_sb
.index_values
[i
],
906 (i
== HC_SP_SB_MAX_INDICES
- 1) ? ") " : " ");
908 for (i
= 0; i
< sizeof(struct hc_sp_status_block_data
)/sizeof(u32
); i
++)
909 *((u32
*)&sp_sb_data
+ i
) = REG_RD(bp
, BAR_CSTRORM_INTMEM
+
910 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func
) +
913 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
914 "pf_id(0x%x) vnic_id(0x%x) "
915 "vf_id(0x%x) vf_valid (0x%x)\n",
916 sp_sb_data
.igu_sb_id
,
917 sp_sb_data
.igu_seg_id
,
918 sp_sb_data
.p_func
.pf_id
,
919 sp_sb_data
.p_func
.vnic_id
,
920 sp_sb_data
.p_func
.vf_id
,
921 sp_sb_data
.p_func
.vf_valid
);
924 for_each_queue(bp
, i
) {
925 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
927 struct hc_status_block_data_e2 sb_data_e2
;
928 struct hc_status_block_data_e1x sb_data_e1x
;
929 struct hc_status_block_sm
*hc_sm_p
=
931 sb_data_e2
.common
.state_machine
:
932 sb_data_e1x
.common
.state_machine
;
933 struct hc_index_data
*hc_index_p
=
935 sb_data_e2
.index_data
:
936 sb_data_e1x
.index_data
;
941 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
942 " rx_comp_prod(0x%x)"
943 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
944 i
, fp
->rx_bd_prod
, fp
->rx_bd_cons
,
946 fp
->rx_comp_cons
, le16_to_cpu(*fp
->rx_cons_sb
));
947 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
948 " fp_hc_idx(0x%x)\n",
949 fp
->rx_sge_prod
, fp
->last_max_sge
,
950 le16_to_cpu(fp
->fp_hc_idx
));
953 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
954 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
955 " *tx_cons_sb(0x%x)\n",
956 i
, fp
->tx_pkt_prod
, fp
->tx_pkt_cons
, fp
->tx_bd_prod
,
957 fp
->tx_bd_cons
, le16_to_cpu(*fp
->tx_cons_sb
));
959 loop
= CHIP_IS_E2(bp
) ?
960 HC_SB_MAX_INDICES_E2
: HC_SB_MAX_INDICES_E1X
;
964 BNX2X_ERR(" run indexes (");
965 for (j
= 0; j
< HC_SB_MAX_SM
; j
++)
967 fp
->sb_running_index
[j
],
968 (j
== HC_SB_MAX_SM
- 1) ? ")" : " ");
970 BNX2X_ERR(" indexes (");
971 for (j
= 0; j
< loop
; j
++)
973 fp
->sb_index_values
[j
],
974 (j
== loop
- 1) ? ")" : " ");
976 data_size
= CHIP_IS_E2(bp
) ?
977 sizeof(struct hc_status_block_data_e2
) :
978 sizeof(struct hc_status_block_data_e1x
);
979 data_size
/= sizeof(u32
);
980 sb_data_p
= CHIP_IS_E2(bp
) ?
983 /* copy sb data in here */
984 for (j
= 0; j
< data_size
; j
++)
985 *(sb_data_p
+ j
) = REG_RD(bp
, BAR_CSTRORM_INTMEM
+
986 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp
->fw_sb_id
) +
989 if (CHIP_IS_E2(bp
)) {
990 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
991 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
992 sb_data_e2
.common
.p_func
.pf_id
,
993 sb_data_e2
.common
.p_func
.vf_id
,
994 sb_data_e2
.common
.p_func
.vf_valid
,
995 sb_data_e2
.common
.p_func
.vnic_id
,
996 sb_data_e2
.common
.same_igu_sb_1b
);
998 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
999 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1000 sb_data_e1x
.common
.p_func
.pf_id
,
1001 sb_data_e1x
.common
.p_func
.vf_id
,
1002 sb_data_e1x
.common
.p_func
.vf_valid
,
1003 sb_data_e1x
.common
.p_func
.vnic_id
,
1004 sb_data_e1x
.common
.same_igu_sb_1b
);
1008 for (j
= 0; j
< HC_SB_MAX_SM
; j
++) {
1009 pr_cont("SM[%d] __flags (0x%x) "
1010 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1011 "time_to_expire (0x%x) "
1012 "timer_value(0x%x)\n", j
,
1014 hc_sm_p
[j
].igu_sb_id
,
1015 hc_sm_p
[j
].igu_seg_id
,
1016 hc_sm_p
[j
].time_to_expire
,
1017 hc_sm_p
[j
].timer_value
);
1021 for (j
= 0; j
< loop
; j
++) {
1022 pr_cont("INDEX[%d] flags (0x%x) "
1023 "timeout (0x%x)\n", j
,
1024 hc_index_p
[j
].flags
,
1025 hc_index_p
[j
].timeout
);
1029 #ifdef BNX2X_STOP_ON_ERROR
1032 for_each_queue(bp
, i
) {
1033 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1035 start
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) - 10);
1036 end
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) + 503);
1037 for (j
= start
; j
!= end
; j
= RX_BD(j
+ 1)) {
1038 u32
*rx_bd
= (u32
*)&fp
->rx_desc_ring
[j
];
1039 struct sw_rx_bd
*sw_bd
= &fp
->rx_buf_ring
[j
];
1041 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1042 i
, j
, rx_bd
[1], rx_bd
[0], sw_bd
->skb
);
1045 start
= RX_SGE(fp
->rx_sge_prod
);
1046 end
= RX_SGE(fp
->last_max_sge
);
1047 for (j
= start
; j
!= end
; j
= RX_SGE(j
+ 1)) {
1048 u32
*rx_sge
= (u32
*)&fp
->rx_sge_ring
[j
];
1049 struct sw_rx_page
*sw_page
= &fp
->rx_page_ring
[j
];
1051 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1052 i
, j
, rx_sge
[1], rx_sge
[0], sw_page
->page
);
1055 start
= RCQ_BD(fp
->rx_comp_cons
- 10);
1056 end
= RCQ_BD(fp
->rx_comp_cons
+ 503);
1057 for (j
= start
; j
!= end
; j
= RCQ_BD(j
+ 1)) {
1058 u32
*cqe
= (u32
*)&fp
->rx_comp_ring
[j
];
1060 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1061 i
, j
, cqe
[0], cqe
[1], cqe
[2], cqe
[3]);
1066 for_each_queue(bp
, i
) {
1067 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1069 start
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) - 10);
1070 end
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) + 245);
1071 for (j
= start
; j
!= end
; j
= TX_BD(j
+ 1)) {
1072 struct sw_tx_bd
*sw_bd
= &fp
->tx_buf_ring
[j
];
1074 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1075 i
, j
, sw_bd
->skb
, sw_bd
->first_bd
);
1078 start
= TX_BD(fp
->tx_bd_cons
- 10);
1079 end
= TX_BD(fp
->tx_bd_cons
+ 254);
1080 for (j
= start
; j
!= end
; j
= TX_BD(j
+ 1)) {
1081 u32
*tx_bd
= (u32
*)&fp
->tx_desc_ring
[j
];
1083 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1084 i
, j
, tx_bd
[0], tx_bd
[1], tx_bd
[2], tx_bd
[3]);
1089 bnx2x_mc_assert(bp
);
1090 BNX2X_ERR("end crash dump -----------------\n");
1093 static void bnx2x_hc_int_enable(struct bnx2x
*bp
)
1095 int port
= BP_PORT(bp
);
1096 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
1097 u32 val
= REG_RD(bp
, addr
);
1098 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
1099 int msi
= (bp
->flags
& USING_MSI_FLAG
) ? 1 : 0;
1102 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
1103 HC_CONFIG_0_REG_INT_LINE_EN_0
);
1104 val
|= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
1105 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
1107 val
&= ~HC_CONFIG_0_REG_INT_LINE_EN_0
;
1108 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
1109 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
1110 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
1112 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
1113 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
1114 HC_CONFIG_0_REG_INT_LINE_EN_0
|
1115 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
1117 if (!CHIP_IS_E1(bp
)) {
1118 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
1121 REG_WR(bp
, addr
, val
);
1123 val
&= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
;
1128 REG_WR(bp
, HC_REG_INT_MASK
+ port
*4, 0x1FFFF);
1130 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x) mode %s\n",
1131 val
, port
, addr
, (msix
? "MSI-X" : (msi
? "MSI" : "INTx")));
1133 REG_WR(bp
, addr
, val
);
1135 * Ensure that HC_CONFIG is written before leading/trailing edge config
1140 if (!CHIP_IS_E1(bp
)) {
1141 /* init leading/trailing edge */
1143 val
= (0xee0f | (1 << (BP_E1HVN(bp
) + 4)));
1145 /* enable nig and gpio3 attention */
1150 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
1151 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
1154 /* Make sure that interrupts are indeed enabled from here on */
1158 static void bnx2x_igu_int_enable(struct bnx2x
*bp
)
1161 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
1162 int msi
= (bp
->flags
& USING_MSI_FLAG
) ? 1 : 0;
1164 val
= REG_RD(bp
, IGU_REG_PF_CONFIGURATION
);
1167 val
&= ~(IGU_PF_CONF_INT_LINE_EN
|
1168 IGU_PF_CONF_SINGLE_ISR_EN
);
1169 val
|= (IGU_PF_CONF_FUNC_EN
|
1170 IGU_PF_CONF_MSI_MSIX_EN
|
1171 IGU_PF_CONF_ATTN_BIT_EN
);
1173 val
&= ~IGU_PF_CONF_INT_LINE_EN
;
1174 val
|= (IGU_PF_CONF_FUNC_EN
|
1175 IGU_PF_CONF_MSI_MSIX_EN
|
1176 IGU_PF_CONF_ATTN_BIT_EN
|
1177 IGU_PF_CONF_SINGLE_ISR_EN
);
1179 val
&= ~IGU_PF_CONF_MSI_MSIX_EN
;
1180 val
|= (IGU_PF_CONF_FUNC_EN
|
1181 IGU_PF_CONF_INT_LINE_EN
|
1182 IGU_PF_CONF_ATTN_BIT_EN
|
1183 IGU_PF_CONF_SINGLE_ISR_EN
);
1186 DP(NETIF_MSG_INTR
, "write 0x%x to IGU mode %s\n",
1187 val
, (msix
? "MSI-X" : (msi
? "MSI" : "INTx")));
1189 REG_WR(bp
, IGU_REG_PF_CONFIGURATION
, val
);
1193 /* init leading/trailing edge */
1195 val
= (0xee0f | (1 << (BP_E1HVN(bp
) + 4)));
1197 /* enable nig and gpio3 attention */
1202 REG_WR(bp
, IGU_REG_TRAILING_EDGE_LATCH
, val
);
1203 REG_WR(bp
, IGU_REG_LEADING_EDGE_LATCH
, val
);
1205 /* Make sure that interrupts are indeed enabled from here on */
1209 void bnx2x_int_enable(struct bnx2x
*bp
)
1211 if (bp
->common
.int_block
== INT_BLOCK_HC
)
1212 bnx2x_hc_int_enable(bp
);
1214 bnx2x_igu_int_enable(bp
);
1217 static void bnx2x_hc_int_disable(struct bnx2x
*bp
)
1219 int port
= BP_PORT(bp
);
1220 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
1221 u32 val
= REG_RD(bp
, addr
);
1224 * in E1 we must use only PCI configuration space to disable
1225 * MSI/MSIX capablility
1226 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1228 if (CHIP_IS_E1(bp
)) {
1229 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1230 * Use mask register to prevent from HC sending interrupts
1231 * after we exit the function
1233 REG_WR(bp
, HC_REG_INT_MASK
+ port
*4, 0);
1235 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
1236 HC_CONFIG_0_REG_INT_LINE_EN_0
|
1237 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
1239 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
1240 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
1241 HC_CONFIG_0_REG_INT_LINE_EN_0
|
1242 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
1244 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
1247 /* flush all outstanding writes */
1250 REG_WR(bp
, addr
, val
);
1251 if (REG_RD(bp
, addr
) != val
)
1252 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1255 static void bnx2x_igu_int_disable(struct bnx2x
*bp
)
1257 u32 val
= REG_RD(bp
, IGU_REG_PF_CONFIGURATION
);
1259 val
&= ~(IGU_PF_CONF_MSI_MSIX_EN
|
1260 IGU_PF_CONF_INT_LINE_EN
|
1261 IGU_PF_CONF_ATTN_BIT_EN
);
1263 DP(NETIF_MSG_INTR
, "write %x to IGU\n", val
);
1265 /* flush all outstanding writes */
1268 REG_WR(bp
, IGU_REG_PF_CONFIGURATION
, val
);
1269 if (REG_RD(bp
, IGU_REG_PF_CONFIGURATION
) != val
)
1270 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1273 static void bnx2x_int_disable(struct bnx2x
*bp
)
1275 if (bp
->common
.int_block
== INT_BLOCK_HC
)
1276 bnx2x_hc_int_disable(bp
);
1278 bnx2x_igu_int_disable(bp
);
1281 void bnx2x_int_disable_sync(struct bnx2x
*bp
, int disable_hw
)
1283 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
1286 /* disable interrupt handling */
1287 atomic_inc(&bp
->intr_sem
);
1288 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1291 /* prevent the HW from sending interrupts */
1292 bnx2x_int_disable(bp
);
1294 /* make sure all ISRs are done */
1296 synchronize_irq(bp
->msix_table
[0].vector
);
1301 for_each_queue(bp
, i
)
1302 synchronize_irq(bp
->msix_table
[i
+ offset
].vector
);
1304 synchronize_irq(bp
->pdev
->irq
);
1306 /* make sure sp_task is not running */
1307 cancel_delayed_work(&bp
->sp_task
);
1308 flush_workqueue(bnx2x_wq
);
1314 * General service functions
1317 /* Return true if succeeded to acquire the lock */
1318 static bool bnx2x_trylock_hw_lock(struct bnx2x
*bp
, u32 resource
)
1321 u32 resource_bit
= (1 << resource
);
1322 int func
= BP_FUNC(bp
);
1323 u32 hw_lock_control_reg
;
1325 DP(NETIF_MSG_HW
, "Trying to take a lock on resource %d\n", resource
);
1327 /* Validating that the resource is within range */
1328 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1330 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1331 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1336 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1338 hw_lock_control_reg
=
1339 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1341 /* Try to acquire the lock */
1342 REG_WR(bp
, hw_lock_control_reg
+ 4, resource_bit
);
1343 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1344 if (lock_status
& resource_bit
)
1347 DP(NETIF_MSG_HW
, "Failed to get a lock on resource %d\n", resource
);
1352 static void bnx2x_cnic_cfc_comp(struct bnx2x
*bp
, int cid
);
1355 void bnx2x_sp_event(struct bnx2x_fastpath
*fp
,
1356 union eth_rx_cqe
*rr_cqe
)
1358 struct bnx2x
*bp
= fp
->bp
;
1359 int cid
= SW_CID(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
1360 int command
= CQE_CMD(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
1363 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1364 fp
->index
, cid
, command
, bp
->state
,
1365 rr_cqe
->ramrod_cqe
.ramrod_type
);
1367 switch (command
| fp
->state
) {
1368 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP
| BNX2X_FP_STATE_OPENING
):
1369 DP(NETIF_MSG_IFUP
, "got MULTI[%d] setup ramrod\n", cid
);
1370 fp
->state
= BNX2X_FP_STATE_OPEN
;
1373 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_FP_STATE_HALTING
):
1374 DP(NETIF_MSG_IFDOWN
, "got MULTI[%d] halt ramrod\n", cid
);
1375 fp
->state
= BNX2X_FP_STATE_HALTED
;
1378 case (RAMROD_CMD_ID_ETH_TERMINATE
| BNX2X_FP_STATE_TERMINATING
):
1379 DP(NETIF_MSG_IFDOWN
, "got MULTI[%d] teminate ramrod\n", cid
);
1380 fp
->state
= BNX2X_FP_STATE_TERMINATED
;
1384 BNX2X_ERR("unexpected MC reply (%d) "
1385 "fp[%d] state is %x\n",
1386 command
, fp
->index
, fp
->state
);
1390 smp_mb__before_atomic_inc();
1391 atomic_inc(&bp
->spq_left
);
1392 /* push the change in fp->state and towards the memory */
1398 irqreturn_t
bnx2x_interrupt(int irq
, void *dev_instance
)
1400 struct bnx2x
*bp
= netdev_priv(dev_instance
);
1401 u16 status
= bnx2x_ack_int(bp
);
1405 /* Return here if interrupt is shared and it's not for us */
1406 if (unlikely(status
== 0)) {
1407 DP(NETIF_MSG_INTR
, "not our interrupt!\n");
1410 DP(NETIF_MSG_INTR
, "got an interrupt status 0x%x\n", status
);
1412 /* Return here if interrupt is disabled */
1413 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
1414 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
1418 #ifdef BNX2X_STOP_ON_ERROR
1419 if (unlikely(bp
->panic
))
1423 for_each_queue(bp
, i
) {
1424 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1426 mask
= 0x2 << (fp
->index
+ CNIC_CONTEXT_USE
);
1427 if (status
& mask
) {
1428 /* Handle Rx and Tx according to SB id */
1429 prefetch(fp
->rx_cons_sb
);
1430 prefetch(fp
->tx_cons_sb
);
1431 prefetch(&fp
->sb_running_index
[SM_RX_ID
]);
1432 napi_schedule(&bnx2x_fp(bp
, fp
->index
, napi
));
1439 if (status
& (mask
| 0x1)) {
1440 struct cnic_ops
*c_ops
= NULL
;
1443 c_ops
= rcu_dereference(bp
->cnic_ops
);
1445 c_ops
->cnic_handler(bp
->cnic_data
, NULL
);
1452 if (unlikely(status
& 0x1)) {
1453 queue_delayed_work(bnx2x_wq
, &bp
->sp_task
, 0);
1460 if (unlikely(status
))
1461 DP(NETIF_MSG_INTR
, "got an unknown interrupt! (status 0x%x)\n",
1467 /* end of fast path */
1473 * General service functions
1476 int bnx2x_acquire_hw_lock(struct bnx2x
*bp
, u32 resource
)
1479 u32 resource_bit
= (1 << resource
);
1480 int func
= BP_FUNC(bp
);
1481 u32 hw_lock_control_reg
;
1484 /* Validating that the resource is within range */
1485 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1487 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1488 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1493 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1495 hw_lock_control_reg
=
1496 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1499 /* Validating that the resource is not already taken */
1500 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1501 if (lock_status
& resource_bit
) {
1502 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1503 lock_status
, resource_bit
);
1507 /* Try for 5 second every 5ms */
1508 for (cnt
= 0; cnt
< 1000; cnt
++) {
1509 /* Try to acquire the lock */
1510 REG_WR(bp
, hw_lock_control_reg
+ 4, resource_bit
);
1511 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1512 if (lock_status
& resource_bit
)
1517 DP(NETIF_MSG_HW
, "Timeout\n");
1521 int bnx2x_release_hw_lock(struct bnx2x
*bp
, u32 resource
)
1524 u32 resource_bit
= (1 << resource
);
1525 int func
= BP_FUNC(bp
);
1526 u32 hw_lock_control_reg
;
1528 DP(NETIF_MSG_HW
, "Releasing a lock on resource %d\n", resource
);
1530 /* Validating that the resource is within range */
1531 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1533 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1534 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1539 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1541 hw_lock_control_reg
=
1542 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1545 /* Validating that the resource is currently taken */
1546 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1547 if (!(lock_status
& resource_bit
)) {
1548 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1549 lock_status
, resource_bit
);
1553 REG_WR(bp
, hw_lock_control_reg
, resource_bit
);
1558 int bnx2x_get_gpio(struct bnx2x
*bp
, int gpio_num
, u8 port
)
1560 /* The GPIO should be swapped if swap register is set and active */
1561 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1562 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1563 int gpio_shift
= gpio_num
+
1564 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1565 u32 gpio_mask
= (1 << gpio_shift
);
1569 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1570 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1574 /* read GPIO value */
1575 gpio_reg
= REG_RD(bp
, MISC_REG_GPIO
);
1577 /* get the requested pin value */
1578 if ((gpio_reg
& gpio_mask
) == gpio_mask
)
1583 DP(NETIF_MSG_LINK
, "pin %d value 0x%x\n", gpio_num
, value
);
1588 int bnx2x_set_gpio(struct bnx2x
*bp
, int gpio_num
, u32 mode
, u8 port
)
1590 /* The GPIO should be swapped if swap register is set and active */
1591 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1592 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1593 int gpio_shift
= gpio_num
+
1594 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1595 u32 gpio_mask
= (1 << gpio_shift
);
1598 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1599 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1603 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1604 /* read GPIO and mask except the float bits */
1605 gpio_reg
= (REG_RD(bp
, MISC_REG_GPIO
) & MISC_REGISTERS_GPIO_FLOAT
);
1608 case MISC_REGISTERS_GPIO_OUTPUT_LOW
:
1609 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output low\n",
1610 gpio_num
, gpio_shift
);
1611 /* clear FLOAT and set CLR */
1612 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1613 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_CLR_POS
);
1616 case MISC_REGISTERS_GPIO_OUTPUT_HIGH
:
1617 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output high\n",
1618 gpio_num
, gpio_shift
);
1619 /* clear FLOAT and set SET */
1620 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1621 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_SET_POS
);
1624 case MISC_REGISTERS_GPIO_INPUT_HI_Z
:
1625 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> input\n",
1626 gpio_num
, gpio_shift
);
1628 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1635 REG_WR(bp
, MISC_REG_GPIO
, gpio_reg
);
1636 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1641 int bnx2x_set_gpio_int(struct bnx2x
*bp
, int gpio_num
, u32 mode
, u8 port
)
1643 /* The GPIO should be swapped if swap register is set and active */
1644 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1645 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1646 int gpio_shift
= gpio_num
+
1647 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1648 u32 gpio_mask
= (1 << gpio_shift
);
1651 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1652 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1656 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1658 gpio_reg
= REG_RD(bp
, MISC_REG_GPIO_INT
);
1661 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR
:
1662 DP(NETIF_MSG_LINK
, "Clear GPIO INT %d (shift %d) -> "
1663 "output low\n", gpio_num
, gpio_shift
);
1664 /* clear SET and set CLR */
1665 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_INT_SET_POS
);
1666 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_INT_CLR_POS
);
1669 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET
:
1670 DP(NETIF_MSG_LINK
, "Set GPIO INT %d (shift %d) -> "
1671 "output high\n", gpio_num
, gpio_shift
);
1672 /* clear CLR and set SET */
1673 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_INT_CLR_POS
);
1674 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_INT_SET_POS
);
1681 REG_WR(bp
, MISC_REG_GPIO_INT
, gpio_reg
);
1682 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1687 static int bnx2x_set_spio(struct bnx2x
*bp
, int spio_num
, u32 mode
)
1689 u32 spio_mask
= (1 << spio_num
);
1692 if ((spio_num
< MISC_REGISTERS_SPIO_4
) ||
1693 (spio_num
> MISC_REGISTERS_SPIO_7
)) {
1694 BNX2X_ERR("Invalid SPIO %d\n", spio_num
);
1698 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
1699 /* read SPIO and mask except the float bits */
1700 spio_reg
= (REG_RD(bp
, MISC_REG_SPIO
) & MISC_REGISTERS_SPIO_FLOAT
);
1703 case MISC_REGISTERS_SPIO_OUTPUT_LOW
:
1704 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output low\n", spio_num
);
1705 /* clear FLOAT and set CLR */
1706 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1707 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_CLR_POS
);
1710 case MISC_REGISTERS_SPIO_OUTPUT_HIGH
:
1711 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output high\n", spio_num
);
1712 /* clear FLOAT and set SET */
1713 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1714 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_SET_POS
);
1717 case MISC_REGISTERS_SPIO_INPUT_HI_Z
:
1718 DP(NETIF_MSG_LINK
, "Set SPIO %d -> input\n", spio_num
);
1720 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1727 REG_WR(bp
, MISC_REG_SPIO
, spio_reg
);
1728 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
1733 int bnx2x_get_link_cfg_idx(struct bnx2x
*bp
)
1735 u32 sel_phy_idx
= 0;
1736 if (bp
->link_vars
.link_up
) {
1737 sel_phy_idx
= EXT_PHY1
;
1738 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1739 if ((bp
->link_vars
.link_status
& LINK_STATUS_SERDES_LINK
) &&
1740 (bp
->link_params
.phy
[EXT_PHY2
].supported
& SUPPORTED_FIBRE
))
1741 sel_phy_idx
= EXT_PHY2
;
1744 switch (bnx2x_phy_selection(&bp
->link_params
)) {
1745 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT
:
1746 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY
:
1747 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY
:
1748 sel_phy_idx
= EXT_PHY1
;
1750 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY
:
1751 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY
:
1752 sel_phy_idx
= EXT_PHY2
;
1757 * The selected actived PHY is always after swapping (in case PHY
1758 * swapping is enabled). So when swapping is enabled, we need to reverse
1762 if (bp
->link_params
.multi_phy_config
&
1763 PORT_HW_CFG_PHY_SWAPPED_ENABLED
) {
1764 if (sel_phy_idx
== EXT_PHY1
)
1765 sel_phy_idx
= EXT_PHY2
;
1766 else if (sel_phy_idx
== EXT_PHY2
)
1767 sel_phy_idx
= EXT_PHY1
;
1769 return LINK_CONFIG_IDX(sel_phy_idx
);
1772 void bnx2x_calc_fc_adv(struct bnx2x
*bp
)
1774 u8 cfg_idx
= bnx2x_get_link_cfg_idx(bp
);
1775 switch (bp
->link_vars
.ieee_fc
&
1776 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK
) {
1777 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE
:
1778 bp
->port
.advertising
[cfg_idx
] &= ~(ADVERTISED_Asym_Pause
|
1782 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH
:
1783 bp
->port
.advertising
[cfg_idx
] |= (ADVERTISED_Asym_Pause
|
1787 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC
:
1788 bp
->port
.advertising
[cfg_idx
] |= ADVERTISED_Asym_Pause
;
1792 bp
->port
.advertising
[cfg_idx
] &= ~(ADVERTISED_Asym_Pause
|
1798 u8
bnx2x_initial_phy_init(struct bnx2x
*bp
, int load_mode
)
1800 if (!BP_NOMCP(bp
)) {
1802 int cfx_idx
= bnx2x_get_link_cfg_idx(bp
);
1803 u16 req_line_speed
= bp
->link_params
.req_line_speed
[cfx_idx
];
1804 /* Initialize link parameters structure variables */
1805 /* It is recommended to turn off RX FC for jumbo frames
1806 for better performance */
1807 if ((CHIP_IS_E1x(bp
)) && (bp
->dev
->mtu
> 5000))
1808 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_TX
;
1810 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_BOTH
;
1812 bnx2x_acquire_phy_lock(bp
);
1814 if (load_mode
== LOAD_DIAG
) {
1815 bp
->link_params
.loopback_mode
= LOOPBACK_XGXS
;
1816 bp
->link_params
.req_line_speed
[cfx_idx
] = SPEED_10000
;
1819 rc
= bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
1821 bnx2x_release_phy_lock(bp
);
1823 bnx2x_calc_fc_adv(bp
);
1825 if (CHIP_REV_IS_SLOW(bp
) && bp
->link_vars
.link_up
) {
1826 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
1827 bnx2x_link_report(bp
);
1829 bp
->link_params
.req_line_speed
[cfx_idx
] = req_line_speed
;
1832 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1836 void bnx2x_link_set(struct bnx2x
*bp
)
1838 if (!BP_NOMCP(bp
)) {
1839 bnx2x_acquire_phy_lock(bp
);
1840 bnx2x_link_reset(&bp
->link_params
, &bp
->link_vars
, 1);
1841 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
1842 bnx2x_release_phy_lock(bp
);
1844 bnx2x_calc_fc_adv(bp
);
1846 BNX2X_ERR("Bootcode is missing - can not set link\n");
1849 static void bnx2x__link_reset(struct bnx2x
*bp
)
1851 if (!BP_NOMCP(bp
)) {
1852 bnx2x_acquire_phy_lock(bp
);
1853 bnx2x_link_reset(&bp
->link_params
, &bp
->link_vars
, 1);
1854 bnx2x_release_phy_lock(bp
);
1856 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1859 u8
bnx2x_link_test(struct bnx2x
*bp
, u8 is_serdes
)
1863 if (!BP_NOMCP(bp
)) {
1864 bnx2x_acquire_phy_lock(bp
);
1865 rc
= bnx2x_test_link(&bp
->link_params
, &bp
->link_vars
,
1867 bnx2x_release_phy_lock(bp
);
1869 BNX2X_ERR("Bootcode is missing - can not test link\n");
1874 static void bnx2x_init_port_minmax(struct bnx2x
*bp
)
1876 u32 r_param
= bp
->link_vars
.line_speed
/ 8;
1877 u32 fair_periodic_timeout_usec
;
1880 memset(&(bp
->cmng
.rs_vars
), 0,
1881 sizeof(struct rate_shaping_vars_per_port
));
1882 memset(&(bp
->cmng
.fair_vars
), 0, sizeof(struct fairness_vars_per_port
));
1884 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1885 bp
->cmng
.rs_vars
.rs_periodic_timeout
= RS_PERIODIC_TIMEOUT_USEC
/ 4;
1887 /* this is the threshold below which no timer arming will occur
1888 1.25 coefficient is for the threshold to be a little bigger
1889 than the real time, to compensate for timer in-accuracy */
1890 bp
->cmng
.rs_vars
.rs_threshold
=
1891 (RS_PERIODIC_TIMEOUT_USEC
* r_param
* 5) / 4;
1893 /* resolution of fairness timer */
1894 fair_periodic_timeout_usec
= QM_ARB_BYTES
/ r_param
;
1895 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1896 t_fair
= T_FAIR_COEF
/ bp
->link_vars
.line_speed
;
1898 /* this is the threshold below which we won't arm the timer anymore */
1899 bp
->cmng
.fair_vars
.fair_threshold
= QM_ARB_BYTES
;
1901 /* we multiply by 1e3/8 to get bytes/msec.
1902 We don't want the credits to pass a credit
1903 of the t_fair*FAIR_MEM (algorithm resolution) */
1904 bp
->cmng
.fair_vars
.upper_bound
= r_param
* t_fair
* FAIR_MEM
;
1905 /* since each tick is 4 usec */
1906 bp
->cmng
.fair_vars
.fairness_timeout
= fair_periodic_timeout_usec
/ 4;
1909 /* Calculates the sum of vn_min_rates.
1910 It's needed for further normalizing of the min_rates.
1912 sum of vn_min_rates.
1914 0 - if all the min_rates are 0.
1915 In the later case fainess algorithm should be deactivated.
1916 If not all min_rates are zero then those that are zeroes will be set to 1.
1918 static void bnx2x_calc_vn_weight_sum(struct bnx2x
*bp
)
1923 bp
->vn_weight_sum
= 0;
1924 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
1925 u32 vn_cfg
= bp
->mf_config
[vn
];
1926 u32 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
1927 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
1929 /* Skip hidden vns */
1930 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
)
1933 /* If min rate is zero - set it to 1 */
1935 vn_min_rate
= DEF_MIN_RATE
;
1939 bp
->vn_weight_sum
+= vn_min_rate
;
1942 /* ... only if all min rates are zeros - disable fairness */
1944 bp
->cmng
.flags
.cmng_enables
&=
1945 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN
;
1946 DP(NETIF_MSG_IFUP
, "All MIN values are zeroes"
1947 " fairness will be disabled\n");
1949 bp
->cmng
.flags
.cmng_enables
|=
1950 CMNG_FLAGS_PER_PORT_FAIRNESS_VN
;
1953 static void bnx2x_init_vn_minmax(struct bnx2x
*bp
, int vn
)
1955 struct rate_shaping_vars_per_vn m_rs_vn
;
1956 struct fairness_vars_per_vn m_fair_vn
;
1957 u32 vn_cfg
= bp
->mf_config
[vn
];
1958 int func
= 2*vn
+ BP_PORT(bp
);
1959 u16 vn_min_rate
, vn_max_rate
;
1962 /* If function is hidden - set min and max to zeroes */
1963 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
) {
1968 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
1969 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
1970 /* If min rate is zero - set it to 1 */
1971 if (bp
->vn_weight_sum
&& (vn_min_rate
== 0))
1972 vn_min_rate
= DEF_MIN_RATE
;
1973 vn_max_rate
= ((vn_cfg
& FUNC_MF_CFG_MAX_BW_MASK
) >>
1974 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
1978 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
1979 func
, vn_min_rate
, vn_max_rate
, bp
->vn_weight_sum
);
1981 memset(&m_rs_vn
, 0, sizeof(struct rate_shaping_vars_per_vn
));
1982 memset(&m_fair_vn
, 0, sizeof(struct fairness_vars_per_vn
));
1984 /* global vn counter - maximal Mbps for this vn */
1985 m_rs_vn
.vn_counter
.rate
= vn_max_rate
;
1987 /* quota - number of bytes transmitted in this period */
1988 m_rs_vn
.vn_counter
.quota
=
1989 (vn_max_rate
* RS_PERIODIC_TIMEOUT_USEC
) / 8;
1991 if (bp
->vn_weight_sum
) {
1992 /* credit for each period of the fairness algorithm:
1993 number of bytes in T_FAIR (the vn share the port rate).
1994 vn_weight_sum should not be larger than 10000, thus
1995 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1997 m_fair_vn
.vn_credit_delta
=
1998 max_t(u32
, (vn_min_rate
* (T_FAIR_COEF
/
1999 (8 * bp
->vn_weight_sum
))),
2000 (bp
->cmng
.fair_vars
.fair_threshold
* 2));
2001 DP(NETIF_MSG_IFUP
, "m_fair_vn.vn_credit_delta %d\n",
2002 m_fair_vn
.vn_credit_delta
);
2005 /* Store it to internal memory */
2006 for (i
= 0; i
< sizeof(struct rate_shaping_vars_per_vn
)/4; i
++)
2007 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2008 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func
) + i
* 4,
2009 ((u32
*)(&m_rs_vn
))[i
]);
2011 for (i
= 0; i
< sizeof(struct fairness_vars_per_vn
)/4; i
++)
2012 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2013 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func
) + i
* 4,
2014 ((u32
*)(&m_fair_vn
))[i
]);
2017 static int bnx2x_get_cmng_fns_mode(struct bnx2x
*bp
)
2019 if (CHIP_REV_IS_SLOW(bp
))
2020 return CMNG_FNS_NONE
;
2022 return CMNG_FNS_MINMAX
;
2024 return CMNG_FNS_NONE
;
2027 static void bnx2x_read_mf_cfg(struct bnx2x
*bp
)
2029 int vn
, n
= (CHIP_MODE_IS_4_PORT(bp
) ? 2 : 1);
2032 return; /* what should be the default bvalue in this case */
2034 /* For 2 port configuration the absolute function number formula
2036 * abs_func = 2 * vn + BP_PORT + BP_PATH
2038 * and there are 4 functions per port
2040 * For 4 port configuration it is
2041 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2043 * and there are 2 functions per port
2045 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
2046 int /*abs*/func
= n
* (2 * vn
+ BP_PORT(bp
)) + BP_PATH(bp
);
2048 if (func
>= E1H_FUNC_MAX
)
2052 MF_CFG_RD(bp
, func_mf_config
[func
].config
);
2056 static void bnx2x_cmng_fns_init(struct bnx2x
*bp
, u8 read_cfg
, u8 cmng_type
)
2059 if (cmng_type
== CMNG_FNS_MINMAX
) {
2062 /* clear cmng_enables */
2063 bp
->cmng
.flags
.cmng_enables
= 0;
2065 /* read mf conf from shmem */
2067 bnx2x_read_mf_cfg(bp
);
2069 /* Init rate shaping and fairness contexts */
2070 bnx2x_init_port_minmax(bp
);
2072 /* vn_weight_sum and enable fairness if not 0 */
2073 bnx2x_calc_vn_weight_sum(bp
);
2075 /* calculate and set min-max rate for each vn */
2076 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
2077 bnx2x_init_vn_minmax(bp
, vn
);
2079 /* always enable rate shaping and fairness */
2080 bp
->cmng
.flags
.cmng_enables
|=
2081 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN
;
2082 if (!bp
->vn_weight_sum
)
2083 DP(NETIF_MSG_IFUP
, "All MIN values are zeroes"
2084 " fairness will be disabled\n");
2088 /* rate shaping and fairness are disabled */
2090 "rate shaping and fairness are disabled\n");
2093 static inline void bnx2x_link_sync_notify(struct bnx2x
*bp
)
2095 int port
= BP_PORT(bp
);
2099 /* Set the attention towards other drivers on the same port */
2100 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
2101 if (vn
== BP_E1HVN(bp
))
2104 func
= ((vn
<< 1) | port
);
2105 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_0
+
2106 (LINK_SYNC_ATTENTION_BIT_FUNC_0
+ func
)*4, 1);
2110 /* This function is called upon link interrupt */
2111 static void bnx2x_link_attn(struct bnx2x
*bp
)
2113 u32 prev_link_status
= bp
->link_vars
.link_status
;
2114 /* Make sure that we are synced with the current statistics */
2115 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2117 bnx2x_link_update(&bp
->link_params
, &bp
->link_vars
);
2119 if (bp
->link_vars
.link_up
) {
2121 /* dropless flow control */
2122 if (!CHIP_IS_E1(bp
) && bp
->dropless_fc
) {
2123 int port
= BP_PORT(bp
);
2124 u32 pause_enabled
= 0;
2126 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
)
2129 REG_WR(bp
, BAR_USTRORM_INTMEM
+
2130 USTORM_ETH_PAUSE_ENABLED_OFFSET(port
),
2134 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
2135 struct host_port_stats
*pstats
;
2137 pstats
= bnx2x_sp(bp
, port_stats
);
2138 /* reset old bmac stats */
2139 memset(&(pstats
->mac_stx
[0]), 0,
2140 sizeof(struct mac_stx
));
2142 if (bp
->state
== BNX2X_STATE_OPEN
)
2143 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2146 /* indicate link status only if link status actually changed */
2147 if (prev_link_status
!= bp
->link_vars
.link_status
)
2148 bnx2x_link_report(bp
);
2151 bnx2x_link_sync_notify(bp
);
2153 if (bp
->link_vars
.link_up
&& bp
->link_vars
.line_speed
) {
2154 int cmng_fns
= bnx2x_get_cmng_fns_mode(bp
);
2156 if (cmng_fns
!= CMNG_FNS_NONE
) {
2157 bnx2x_cmng_fns_init(bp
, false, cmng_fns
);
2158 storm_memset_cmng(bp
, &bp
->cmng
, BP_PORT(bp
));
2160 /* rate shaping and fairness are disabled */
2162 "single function mode without fairness\n");
2166 void bnx2x__link_status_update(struct bnx2x
*bp
)
2168 if ((bp
->state
!= BNX2X_STATE_OPEN
) || (bp
->flags
& MF_FUNC_DIS
))
2171 bnx2x_link_status_update(&bp
->link_params
, &bp
->link_vars
);
2173 if (bp
->link_vars
.link_up
)
2174 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2176 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2178 /* the link status update could be the result of a DCC event
2179 hence re-read the shmem mf configuration */
2180 bnx2x_read_mf_cfg(bp
);
2182 /* indicate link status */
2183 bnx2x_link_report(bp
);
2186 static void bnx2x_pmf_update(struct bnx2x
*bp
)
2188 int port
= BP_PORT(bp
);
2192 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
2194 /* enable nig attention */
2195 val
= (0xff0f | (1 << (BP_E1HVN(bp
) + 4)));
2196 if (bp
->common
.int_block
== INT_BLOCK_HC
) {
2197 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
2198 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
2199 } else if (CHIP_IS_E2(bp
)) {
2200 REG_WR(bp
, IGU_REG_TRAILING_EDGE_LATCH
, val
);
2201 REG_WR(bp
, IGU_REG_LEADING_EDGE_LATCH
, val
);
2204 bnx2x_stats_handle(bp
, STATS_EVENT_PMF
);
2212 * General service functions
2215 /* send the MCP a request, block until there is a reply */
2216 u32
bnx2x_fw_command(struct bnx2x
*bp
, u32 command
, u32 param
)
2218 int mb_idx
= BP_FW_MB_IDX(bp
);
2219 u32 seq
= ++bp
->fw_seq
;
2222 u8 delay
= CHIP_REV_IS_SLOW(bp
) ? 100 : 10;
2224 mutex_lock(&bp
->fw_mb_mutex
);
2225 SHMEM_WR(bp
, func_mb
[mb_idx
].drv_mb_param
, param
);
2226 SHMEM_WR(bp
, func_mb
[mb_idx
].drv_mb_header
, (command
| seq
));
2228 DP(BNX2X_MSG_MCP
, "wrote command (%x) to FW MB\n", (command
| seq
));
2231 /* let the FW do it's magic ... */
2234 rc
= SHMEM_RD(bp
, func_mb
[mb_idx
].fw_mb_header
);
2236 /* Give the FW up to 5 second (500*10ms) */
2237 } while ((seq
!= (rc
& FW_MSG_SEQ_NUMBER_MASK
)) && (cnt
++ < 500));
2239 DP(BNX2X_MSG_MCP
, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2240 cnt
*delay
, rc
, seq
);
2242 /* is this a reply to our command? */
2243 if (seq
== (rc
& FW_MSG_SEQ_NUMBER_MASK
))
2244 rc
&= FW_MSG_CODE_MASK
;
2247 BNX2X_ERR("FW failed to respond!\n");
2251 mutex_unlock(&bp
->fw_mb_mutex
);
2256 /* must be called under rtnl_lock */
2257 static void bnx2x_rxq_set_mac_filters(struct bnx2x
*bp
, u16 cl_id
, u32 filters
)
2259 u32 mask
= (1 << cl_id
);
2261 /* initial seeting is BNX2X_ACCEPT_NONE */
2262 u8 drop_all_ucast
= 1, drop_all_bcast
= 1, drop_all_mcast
= 1;
2263 u8 accp_all_ucast
= 0, accp_all_bcast
= 0, accp_all_mcast
= 0;
2264 u8 unmatched_unicast
= 0;
2266 if (filters
& BNX2X_ACCEPT_UNMATCHED_UCAST
)
2267 unmatched_unicast
= 1;
2269 if (filters
& BNX2X_PROMISCUOUS_MODE
) {
2270 /* promiscious - accept all, drop none */
2271 drop_all_ucast
= drop_all_bcast
= drop_all_mcast
= 0;
2272 accp_all_ucast
= accp_all_bcast
= accp_all_mcast
= 1;
2275 * SI mode defines to accept in promiscuos mode
2276 * only unmatched packets
2278 unmatched_unicast
= 1;
2282 if (filters
& BNX2X_ACCEPT_UNICAST
) {
2283 /* accept matched ucast */
2286 if (filters
& BNX2X_ACCEPT_MULTICAST
) {
2287 /* accept matched mcast */
2290 /* since mcast addresses won't arrive with ovlan,
2291 * fw needs to accept all of them in
2292 * switch-independent mode */
2295 if (filters
& BNX2X_ACCEPT_ALL_UNICAST
) {
2296 /* accept all mcast */
2300 if (filters
& BNX2X_ACCEPT_ALL_MULTICAST
) {
2301 /* accept all mcast */
2305 if (filters
& BNX2X_ACCEPT_BROADCAST
) {
2306 /* accept (all) bcast */
2311 bp
->mac_filters
.ucast_drop_all
= drop_all_ucast
?
2312 bp
->mac_filters
.ucast_drop_all
| mask
:
2313 bp
->mac_filters
.ucast_drop_all
& ~mask
;
2315 bp
->mac_filters
.mcast_drop_all
= drop_all_mcast
?
2316 bp
->mac_filters
.mcast_drop_all
| mask
:
2317 bp
->mac_filters
.mcast_drop_all
& ~mask
;
2319 bp
->mac_filters
.bcast_drop_all
= drop_all_bcast
?
2320 bp
->mac_filters
.bcast_drop_all
| mask
:
2321 bp
->mac_filters
.bcast_drop_all
& ~mask
;
2323 bp
->mac_filters
.ucast_accept_all
= accp_all_ucast
?
2324 bp
->mac_filters
.ucast_accept_all
| mask
:
2325 bp
->mac_filters
.ucast_accept_all
& ~mask
;
2327 bp
->mac_filters
.mcast_accept_all
= accp_all_mcast
?
2328 bp
->mac_filters
.mcast_accept_all
| mask
:
2329 bp
->mac_filters
.mcast_accept_all
& ~mask
;
2331 bp
->mac_filters
.bcast_accept_all
= accp_all_bcast
?
2332 bp
->mac_filters
.bcast_accept_all
| mask
:
2333 bp
->mac_filters
.bcast_accept_all
& ~mask
;
2335 bp
->mac_filters
.unmatched_unicast
= unmatched_unicast
?
2336 bp
->mac_filters
.unmatched_unicast
| mask
:
2337 bp
->mac_filters
.unmatched_unicast
& ~mask
;
2340 static void bnx2x_func_init(struct bnx2x
*bp
, struct bnx2x_func_init_params
*p
)
2342 struct tstorm_eth_function_common_config tcfg
= {0};
2346 if (p
->func_flgs
& FUNC_FLG_TPA
)
2347 tcfg
.config_flags
|=
2348 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA
;
2351 rss_flgs
= (p
->rss
->mode
<<
2352 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT
);
2354 if (p
->rss
->cap
& RSS_IPV4_CAP
)
2355 rss_flgs
|= RSS_IPV4_CAP_MASK
;
2356 if (p
->rss
->cap
& RSS_IPV4_TCP_CAP
)
2357 rss_flgs
|= RSS_IPV4_TCP_CAP_MASK
;
2358 if (p
->rss
->cap
& RSS_IPV6_CAP
)
2359 rss_flgs
|= RSS_IPV6_CAP_MASK
;
2360 if (p
->rss
->cap
& RSS_IPV6_TCP_CAP
)
2361 rss_flgs
|= RSS_IPV6_TCP_CAP_MASK
;
2363 tcfg
.config_flags
|= rss_flgs
;
2364 tcfg
.rss_result_mask
= p
->rss
->result_mask
;
2366 storm_memset_func_cfg(bp
, &tcfg
, p
->func_id
);
2368 /* Enable the function in the FW */
2369 storm_memset_vf_to_pf(bp
, p
->func_id
, p
->pf_id
);
2370 storm_memset_func_en(bp
, p
->func_id
, 1);
2373 if (p
->func_flgs
& FUNC_FLG_STATS
) {
2374 struct stats_indication_flags stats_flags
= {0};
2375 stats_flags
.collect_eth
= 1;
2377 storm_memset_xstats_flags(bp
, &stats_flags
, p
->func_id
);
2378 storm_memset_xstats_addr(bp
, p
->fw_stat_map
, p
->func_id
);
2380 storm_memset_tstats_flags(bp
, &stats_flags
, p
->func_id
);
2381 storm_memset_tstats_addr(bp
, p
->fw_stat_map
, p
->func_id
);
2383 storm_memset_ustats_flags(bp
, &stats_flags
, p
->func_id
);
2384 storm_memset_ustats_addr(bp
, p
->fw_stat_map
, p
->func_id
);
2386 storm_memset_cstats_flags(bp
, &stats_flags
, p
->func_id
);
2387 storm_memset_cstats_addr(bp
, p
->fw_stat_map
, p
->func_id
);
2391 if (p
->func_flgs
& FUNC_FLG_SPQ
) {
2392 storm_memset_spq_addr(bp
, p
->spq_map
, p
->func_id
);
2393 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+
2394 XSTORM_SPQ_PROD_OFFSET(p
->func_id
), p
->spq_prod
);
2398 static inline u16
bnx2x_get_cl_flags(struct bnx2x
*bp
,
2399 struct bnx2x_fastpath
*fp
)
2403 /* calculate queue flags */
2404 flags
|= QUEUE_FLG_CACHE_ALIGN
;
2405 flags
|= QUEUE_FLG_HC
;
2406 flags
|= IS_MF_SD(bp
) ? QUEUE_FLG_OV
: 0;
2408 flags
|= QUEUE_FLG_VLAN
;
2409 DP(NETIF_MSG_IFUP
, "vlan removal enabled\n");
2411 if (!fp
->disable_tpa
)
2412 flags
|= QUEUE_FLG_TPA
;
2414 flags
|= QUEUE_FLG_STATS
;
2419 static void bnx2x_pf_rx_cl_prep(struct bnx2x
*bp
,
2420 struct bnx2x_fastpath
*fp
, struct rxq_pause_params
*pause
,
2421 struct bnx2x_rxq_init_params
*rxq_init
)
2425 u16 tpa_agg_size
= 0;
2427 /* calculate queue flags */
2428 u16 flags
= bnx2x_get_cl_flags(bp
, fp
);
2430 if (!fp
->disable_tpa
) {
2431 pause
->sge_th_hi
= 250;
2432 pause
->sge_th_lo
= 150;
2433 tpa_agg_size
= min_t(u32
,
2434 (min_t(u32
, 8, MAX_SKB_FRAGS
) *
2435 SGE_PAGE_SIZE
* PAGES_PER_SGE
), 0xffff);
2436 max_sge
= SGE_PAGE_ALIGN(bp
->dev
->mtu
) >>
2438 max_sge
= ((max_sge
+ PAGES_PER_SGE
- 1) &
2439 (~(PAGES_PER_SGE
-1))) >> PAGES_PER_SGE_SHIFT
;
2440 sge_sz
= (u16
)min_t(u32
, SGE_PAGE_SIZE
* PAGES_PER_SGE
,
2444 /* pause - not for e1 */
2445 if (!CHIP_IS_E1(bp
)) {
2446 pause
->bd_th_hi
= 350;
2447 pause
->bd_th_lo
= 250;
2448 pause
->rcq_th_hi
= 350;
2449 pause
->rcq_th_lo
= 250;
2450 pause
->sge_th_hi
= 0;
2451 pause
->sge_th_lo
= 0;
2456 rxq_init
->flags
= flags
;
2457 rxq_init
->cxt
= &bp
->context
.vcxt
[fp
->cid
].eth
;
2458 rxq_init
->dscr_map
= fp
->rx_desc_mapping
;
2459 rxq_init
->sge_map
= fp
->rx_sge_mapping
;
2460 rxq_init
->rcq_map
= fp
->rx_comp_mapping
;
2461 rxq_init
->rcq_np_map
= fp
->rx_comp_mapping
+ BCM_PAGE_SIZE
;
2462 rxq_init
->mtu
= bp
->dev
->mtu
;
2463 rxq_init
->buf_sz
= bp
->rx_buf_size
;
2464 rxq_init
->cl_qzone_id
= fp
->cl_qzone_id
;
2465 rxq_init
->cl_id
= fp
->cl_id
;
2466 rxq_init
->spcl_id
= fp
->cl_id
;
2467 rxq_init
->stat_id
= fp
->cl_id
;
2468 rxq_init
->tpa_agg_sz
= tpa_agg_size
;
2469 rxq_init
->sge_buf_sz
= sge_sz
;
2470 rxq_init
->max_sges_pkt
= max_sge
;
2471 rxq_init
->cache_line_log
= BNX2X_RX_ALIGN_SHIFT
;
2472 rxq_init
->fw_sb_id
= fp
->fw_sb_id
;
2474 rxq_init
->sb_cq_index
= U_SB_ETH_RX_CQ_INDEX
;
2476 rxq_init
->cid
= HW_CID(bp
, fp
->cid
);
2478 rxq_init
->hc_rate
= bp
->rx_ticks
? (1000000 / bp
->rx_ticks
) : 0;
2481 static void bnx2x_pf_tx_cl_prep(struct bnx2x
*bp
,
2482 struct bnx2x_fastpath
*fp
, struct bnx2x_txq_init_params
*txq_init
)
2484 u16 flags
= bnx2x_get_cl_flags(bp
, fp
);
2486 txq_init
->flags
= flags
;
2487 txq_init
->cxt
= &bp
->context
.vcxt
[fp
->cid
].eth
;
2488 txq_init
->dscr_map
= fp
->tx_desc_mapping
;
2489 txq_init
->stat_id
= fp
->cl_id
;
2490 txq_init
->cid
= HW_CID(bp
, fp
->cid
);
2491 txq_init
->sb_cq_index
= C_SB_ETH_TX_CQ_INDEX
;
2492 txq_init
->traffic_type
= LLFC_TRAFFIC_TYPE_NW
;
2493 txq_init
->fw_sb_id
= fp
->fw_sb_id
;
2494 txq_init
->hc_rate
= bp
->tx_ticks
? (1000000 / bp
->tx_ticks
) : 0;
2497 static void bnx2x_pf_init(struct bnx2x
*bp
)
2499 struct bnx2x_func_init_params func_init
= {0};
2500 struct bnx2x_rss_params rss
= {0};
2501 struct event_ring_data eq_data
= { {0} };
2504 /* pf specific setups */
2505 if (!CHIP_IS_E1(bp
))
2506 storm_memset_ov(bp
, bp
->mf_ov
, BP_FUNC(bp
));
2508 if (CHIP_IS_E2(bp
)) {
2509 /* reset IGU PF statistics: MSIX + ATTN */
2511 REG_WR(bp
, IGU_REG_STATISTIC_NUM_MESSAGE_SENT
+
2512 BNX2X_IGU_STAS_MSG_VF_CNT
*4 +
2513 (CHIP_MODE_IS_4_PORT(bp
) ?
2514 BP_FUNC(bp
) : BP_VN(bp
))*4, 0);
2516 REG_WR(bp
, IGU_REG_STATISTIC_NUM_MESSAGE_SENT
+
2517 BNX2X_IGU_STAS_MSG_VF_CNT
*4 +
2518 BNX2X_IGU_STAS_MSG_PF_CNT
*4 +
2519 (CHIP_MODE_IS_4_PORT(bp
) ?
2520 BP_FUNC(bp
) : BP_VN(bp
))*4, 0);
2523 /* function setup flags */
2524 flags
= (FUNC_FLG_STATS
| FUNC_FLG_LEADING
| FUNC_FLG_SPQ
);
2526 if (CHIP_IS_E1x(bp
))
2527 flags
|= (bp
->flags
& TPA_ENABLE_FLAG
) ? FUNC_FLG_TPA
: 0;
2529 flags
|= FUNC_FLG_TPA
;
2531 /* function setup */
2534 * Although RSS is meaningless when there is a single HW queue we
2535 * still need it enabled in order to have HW Rx hash generated.
2537 rss
.cap
= (RSS_IPV4_CAP
| RSS_IPV4_TCP_CAP
|
2538 RSS_IPV6_CAP
| RSS_IPV6_TCP_CAP
);
2539 rss
.mode
= bp
->multi_mode
;
2540 rss
.result_mask
= MULTI_MASK
;
2541 func_init
.rss
= &rss
;
2543 func_init
.func_flgs
= flags
;
2544 func_init
.pf_id
= BP_FUNC(bp
);
2545 func_init
.func_id
= BP_FUNC(bp
);
2546 func_init
.fw_stat_map
= bnx2x_sp_mapping(bp
, fw_stats
);
2547 func_init
.spq_map
= bp
->spq_mapping
;
2548 func_init
.spq_prod
= bp
->spq_prod_idx
;
2550 bnx2x_func_init(bp
, &func_init
);
2552 memset(&(bp
->cmng
), 0, sizeof(struct cmng_struct_per_port
));
2555 Congestion management values depend on the link rate
2556 There is no active link so initial link rate is set to 10 Gbps.
2557 When the link comes up The congestion management values are
2558 re-calculated according to the actual link rate.
2560 bp
->link_vars
.line_speed
= SPEED_10000
;
2561 bnx2x_cmng_fns_init(bp
, true, bnx2x_get_cmng_fns_mode(bp
));
2563 /* Only the PMF sets the HW */
2565 storm_memset_cmng(bp
, &bp
->cmng
, BP_PORT(bp
));
2567 /* no rx until link is up */
2568 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
2569 bnx2x_set_storm_rx_mode(bp
);
2571 /* init Event Queue */
2572 eq_data
.base_addr
.hi
= U64_HI(bp
->eq_mapping
);
2573 eq_data
.base_addr
.lo
= U64_LO(bp
->eq_mapping
);
2574 eq_data
.producer
= bp
->eq_prod
;
2575 eq_data
.index_id
= HC_SP_INDEX_EQ_CONS
;
2576 eq_data
.sb_id
= DEF_SB_ID
;
2577 storm_memset_eq_data(bp
, &eq_data
, BP_FUNC(bp
));
2581 static void bnx2x_e1h_disable(struct bnx2x
*bp
)
2583 int port
= BP_PORT(bp
);
2585 netif_tx_disable(bp
->dev
);
2587 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 0);
2589 netif_carrier_off(bp
->dev
);
2592 static void bnx2x_e1h_enable(struct bnx2x
*bp
)
2594 int port
= BP_PORT(bp
);
2596 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 1);
2598 /* Tx queue should be only reenabled */
2599 netif_tx_wake_all_queues(bp
->dev
);
2602 * Should not call netif_carrier_on since it will be called if the link
2603 * is up when checking for link state
2607 /* called due to MCP event (on pmf):
2608 * reread new bandwidth configuration
2610 * notify others function about the change
2612 static inline void bnx2x_config_mf_bw(struct bnx2x
*bp
)
2614 if (bp
->link_vars
.link_up
) {
2615 bnx2x_cmng_fns_init(bp
, true, CMNG_FNS_MINMAX
);
2616 bnx2x_link_sync_notify(bp
);
2618 storm_memset_cmng(bp
, &bp
->cmng
, BP_PORT(bp
));
2621 static inline void bnx2x_set_mf_bw(struct bnx2x
*bp
)
2623 bnx2x_config_mf_bw(bp
);
2624 bnx2x_fw_command(bp
, DRV_MSG_CODE_SET_MF_BW_ACK
, 0);
2627 static void bnx2x_dcc_event(struct bnx2x
*bp
, u32 dcc_event
)
2629 DP(BNX2X_MSG_MCP
, "dcc_event 0x%x\n", dcc_event
);
2631 if (dcc_event
& DRV_STATUS_DCC_DISABLE_ENABLE_PF
) {
2634 * This is the only place besides the function initialization
2635 * where the bp->flags can change so it is done without any
2638 if (bp
->mf_config
[BP_VN(bp
)] & FUNC_MF_CFG_FUNC_DISABLED
) {
2639 DP(NETIF_MSG_IFDOWN
, "mf_cfg function disabled\n");
2640 bp
->flags
|= MF_FUNC_DIS
;
2642 bnx2x_e1h_disable(bp
);
2644 DP(NETIF_MSG_IFUP
, "mf_cfg function enabled\n");
2645 bp
->flags
&= ~MF_FUNC_DIS
;
2647 bnx2x_e1h_enable(bp
);
2649 dcc_event
&= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF
;
2651 if (dcc_event
& DRV_STATUS_DCC_BANDWIDTH_ALLOCATION
) {
2652 bnx2x_config_mf_bw(bp
);
2653 dcc_event
&= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION
;
2656 /* Report results to MCP */
2658 bnx2x_fw_command(bp
, DRV_MSG_CODE_DCC_FAILURE
, 0);
2660 bnx2x_fw_command(bp
, DRV_MSG_CODE_DCC_OK
, 0);
2663 /* must be called under the spq lock */
2664 static inline struct eth_spe
*bnx2x_sp_get_next(struct bnx2x
*bp
)
2666 struct eth_spe
*next_spe
= bp
->spq_prod_bd
;
2668 if (bp
->spq_prod_bd
== bp
->spq_last_bd
) {
2669 bp
->spq_prod_bd
= bp
->spq
;
2670 bp
->spq_prod_idx
= 0;
2671 DP(NETIF_MSG_TIMER
, "end of spq\n");
2679 /* must be called under the spq lock */
2680 static inline void bnx2x_sp_prod_update(struct bnx2x
*bp
)
2682 int func
= BP_FUNC(bp
);
2684 /* Make sure that BD data is updated before writing the producer */
2687 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_SPQ_PROD_OFFSET(func
),
2692 /* the slow path queue is odd since completions arrive on the fastpath ring */
2693 int bnx2x_sp_post(struct bnx2x
*bp
, int command
, int cid
,
2694 u32 data_hi
, u32 data_lo
, int common
)
2696 struct eth_spe
*spe
;
2699 #ifdef BNX2X_STOP_ON_ERROR
2700 if (unlikely(bp
->panic
))
2704 spin_lock_bh(&bp
->spq_lock
);
2706 if (!atomic_read(&bp
->spq_left
)) {
2707 BNX2X_ERR("BUG! SPQ ring full!\n");
2708 spin_unlock_bh(&bp
->spq_lock
);
2713 spe
= bnx2x_sp_get_next(bp
);
2715 /* CID needs port number to be encoded int it */
2716 spe
->hdr
.conn_and_cmd_data
=
2717 cpu_to_le32((command
<< SPE_HDR_CMD_ID_SHIFT
) |
2722 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2723 * TRAFFIC_STOP, TRAFFIC_START
2725 type
= (NONE_CONNECTION_TYPE
<< SPE_HDR_CONN_TYPE_SHIFT
)
2726 & SPE_HDR_CONN_TYPE
;
2728 /* ETH ramrods: SETUP, HALT */
2729 type
= (ETH_CONNECTION_TYPE
<< SPE_HDR_CONN_TYPE_SHIFT
)
2730 & SPE_HDR_CONN_TYPE
;
2732 type
|= ((BP_FUNC(bp
) << SPE_HDR_FUNCTION_ID_SHIFT
) &
2733 SPE_HDR_FUNCTION_ID
);
2735 spe
->hdr
.type
= cpu_to_le16(type
);
2737 spe
->data
.update_data_addr
.hi
= cpu_to_le32(data_hi
);
2738 spe
->data
.update_data_addr
.lo
= cpu_to_le32(data_lo
);
2740 /* stats ramrod has it's own slot on the spq */
2741 if (command
!= RAMROD_CMD_ID_COMMON_STAT_QUERY
)
2742 /* It's ok if the actual decrement is issued towards the memory
2743 * somewhere between the spin_lock and spin_unlock. Thus no
2744 * more explict memory barrier is needed.
2746 atomic_dec(&bp
->spq_left
);
2748 DP(BNX2X_MSG_SP
/*NETIF_MSG_TIMER*/,
2749 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2750 "type(0x%x) left %x\n",
2751 bp
->spq_prod_idx
, (u32
)U64_HI(bp
->spq_mapping
),
2752 (u32
)(U64_LO(bp
->spq_mapping
) +
2753 (void *)bp
->spq_prod_bd
- (void *)bp
->spq
), command
,
2754 HW_CID(bp
, cid
), data_hi
, data_lo
, type
, atomic_read(&bp
->spq_left
));
2756 bnx2x_sp_prod_update(bp
);
2757 spin_unlock_bh(&bp
->spq_lock
);
2761 /* acquire split MCP access lock register */
2762 static int bnx2x_acquire_alr(struct bnx2x
*bp
)
2768 for (j
= 0; j
< 1000; j
++) {
2770 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
2771 val
= REG_RD(bp
, GRCBASE_MCP
+ 0x9c);
2772 if (val
& (1L << 31))
2777 if (!(val
& (1L << 31))) {
2778 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2785 /* release split MCP access lock register */
2786 static void bnx2x_release_alr(struct bnx2x
*bp
)
2788 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, 0);
2791 #define BNX2X_DEF_SB_ATT_IDX 0x0001
2792 #define BNX2X_DEF_SB_IDX 0x0002
2794 static inline u16
bnx2x_update_dsb_idx(struct bnx2x
*bp
)
2796 struct host_sp_status_block
*def_sb
= bp
->def_status_blk
;
2799 barrier(); /* status block is written to by the chip */
2800 if (bp
->def_att_idx
!= def_sb
->atten_status_block
.attn_bits_index
) {
2801 bp
->def_att_idx
= def_sb
->atten_status_block
.attn_bits_index
;
2802 rc
|= BNX2X_DEF_SB_ATT_IDX
;
2805 if (bp
->def_idx
!= def_sb
->sp_sb
.running_index
) {
2806 bp
->def_idx
= def_sb
->sp_sb
.running_index
;
2807 rc
|= BNX2X_DEF_SB_IDX
;
2810 /* Do not reorder: indecies reading should complete before handling */
2816 * slow path service functions
2819 static void bnx2x_attn_int_asserted(struct bnx2x
*bp
, u32 asserted
)
2821 int port
= BP_PORT(bp
);
2822 u32 aeu_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
2823 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
2824 u32 nig_int_mask_addr
= port
? NIG_REG_MASK_INTERRUPT_PORT1
:
2825 NIG_REG_MASK_INTERRUPT_PORT0
;
2830 if (bp
->attn_state
& asserted
)
2831 BNX2X_ERR("IGU ERROR\n");
2833 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2834 aeu_mask
= REG_RD(bp
, aeu_addr
);
2836 DP(NETIF_MSG_HW
, "aeu_mask %x newly asserted %x\n",
2837 aeu_mask
, asserted
);
2838 aeu_mask
&= ~(asserted
& 0x3ff);
2839 DP(NETIF_MSG_HW
, "new mask %x\n", aeu_mask
);
2841 REG_WR(bp
, aeu_addr
, aeu_mask
);
2842 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2844 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
2845 bp
->attn_state
|= asserted
;
2846 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
2848 if (asserted
& ATTN_HARD_WIRED_MASK
) {
2849 if (asserted
& ATTN_NIG_FOR_FUNC
) {
2851 bnx2x_acquire_phy_lock(bp
);
2853 /* save nig interrupt mask */
2854 nig_mask
= REG_RD(bp
, nig_int_mask_addr
);
2855 REG_WR(bp
, nig_int_mask_addr
, 0);
2857 bnx2x_link_attn(bp
);
2859 /* handle unicore attn? */
2861 if (asserted
& ATTN_SW_TIMER_4_FUNC
)
2862 DP(NETIF_MSG_HW
, "ATTN_SW_TIMER_4_FUNC!\n");
2864 if (asserted
& GPIO_2_FUNC
)
2865 DP(NETIF_MSG_HW
, "GPIO_2_FUNC!\n");
2867 if (asserted
& GPIO_3_FUNC
)
2868 DP(NETIF_MSG_HW
, "GPIO_3_FUNC!\n");
2870 if (asserted
& GPIO_4_FUNC
)
2871 DP(NETIF_MSG_HW
, "GPIO_4_FUNC!\n");
2874 if (asserted
& ATTN_GENERAL_ATTN_1
) {
2875 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_1!\n");
2876 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_1
, 0x0);
2878 if (asserted
& ATTN_GENERAL_ATTN_2
) {
2879 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_2!\n");
2880 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_2
, 0x0);
2882 if (asserted
& ATTN_GENERAL_ATTN_3
) {
2883 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_3!\n");
2884 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_3
, 0x0);
2887 if (asserted
& ATTN_GENERAL_ATTN_4
) {
2888 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_4!\n");
2889 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_4
, 0x0);
2891 if (asserted
& ATTN_GENERAL_ATTN_5
) {
2892 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_5!\n");
2893 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_5
, 0x0);
2895 if (asserted
& ATTN_GENERAL_ATTN_6
) {
2896 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_6!\n");
2897 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_6
, 0x0);
2901 } /* if hardwired */
2903 if (bp
->common
.int_block
== INT_BLOCK_HC
)
2904 reg_addr
= (HC_REG_COMMAND_REG
+ port
*32 +
2905 COMMAND_REG_ATTN_BITS_SET
);
2907 reg_addr
= (BAR_IGU_INTMEM
+ IGU_CMD_ATTN_BIT_SET_UPPER
*8);
2909 DP(NETIF_MSG_HW
, "about to mask 0x%08x at %s addr 0x%x\n", asserted
,
2910 (bp
->common
.int_block
== INT_BLOCK_HC
) ? "HC" : "IGU", reg_addr
);
2911 REG_WR(bp
, reg_addr
, asserted
);
2913 /* now set back the mask */
2914 if (asserted
& ATTN_NIG_FOR_FUNC
) {
2915 REG_WR(bp
, nig_int_mask_addr
, nig_mask
);
2916 bnx2x_release_phy_lock(bp
);
2920 static inline void bnx2x_fan_failure(struct bnx2x
*bp
)
2922 int port
= BP_PORT(bp
);
2924 /* mark the failure */
2927 dev_info
.port_hw_config
[port
].external_phy_config
);
2929 ext_phy_config
&= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
2930 ext_phy_config
|= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
;
2931 SHMEM_WR(bp
, dev_info
.port_hw_config
[port
].external_phy_config
,
2934 /* log the failure */
2935 netdev_err(bp
->dev
, "Fan Failure on Network Controller has caused"
2936 " the driver to shutdown the card to prevent permanent"
2937 " damage. Please contact OEM Support for assistance\n");
2940 static inline void bnx2x_attn_int_deasserted0(struct bnx2x
*bp
, u32 attn
)
2942 int port
= BP_PORT(bp
);
2946 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
2947 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
2949 if (attn
& AEU_INPUTS_ATTN_BITS_SPIO5
) {
2951 val
= REG_RD(bp
, reg_offset
);
2952 val
&= ~AEU_INPUTS_ATTN_BITS_SPIO5
;
2953 REG_WR(bp
, reg_offset
, val
);
2955 BNX2X_ERR("SPIO5 hw attention\n");
2957 /* Fan failure attention */
2958 bnx2x_hw_reset_phy(&bp
->link_params
);
2959 bnx2x_fan_failure(bp
);
2962 if (attn
& (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0
|
2963 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1
)) {
2964 bnx2x_acquire_phy_lock(bp
);
2965 bnx2x_handle_module_detect_int(&bp
->link_params
);
2966 bnx2x_release_phy_lock(bp
);
2969 if (attn
& HW_INTERRUT_ASSERT_SET_0
) {
2971 val
= REG_RD(bp
, reg_offset
);
2972 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_0
);
2973 REG_WR(bp
, reg_offset
, val
);
2975 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2976 (u32
)(attn
& HW_INTERRUT_ASSERT_SET_0
));
2981 static inline void bnx2x_attn_int_deasserted1(struct bnx2x
*bp
, u32 attn
)
2985 if (attn
& AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT
) {
2987 val
= REG_RD(bp
, DORQ_REG_DORQ_INT_STS_CLR
);
2988 BNX2X_ERR("DB hw attention 0x%x\n", val
);
2989 /* DORQ discard attention */
2991 BNX2X_ERR("FATAL error from DORQ\n");
2994 if (attn
& HW_INTERRUT_ASSERT_SET_1
) {
2996 int port
= BP_PORT(bp
);
2999 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1
:
3000 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1
);
3002 val
= REG_RD(bp
, reg_offset
);
3003 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_1
);
3004 REG_WR(bp
, reg_offset
, val
);
3006 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3007 (u32
)(attn
& HW_INTERRUT_ASSERT_SET_1
));
3012 static inline void bnx2x_attn_int_deasserted2(struct bnx2x
*bp
, u32 attn
)
3016 if (attn
& AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT
) {
3018 val
= REG_RD(bp
, CFC_REG_CFC_INT_STS_CLR
);
3019 BNX2X_ERR("CFC hw attention 0x%x\n", val
);
3020 /* CFC error attention */
3022 BNX2X_ERR("FATAL error from CFC\n");
3025 if (attn
& AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT
) {
3027 val
= REG_RD(bp
, PXP_REG_PXP_INT_STS_CLR_0
);
3028 BNX2X_ERR("PXP hw attention 0x%x\n", val
);
3029 /* RQ_USDMDP_FIFO_OVERFLOW */
3031 BNX2X_ERR("FATAL error from PXP\n");
3032 if (CHIP_IS_E2(bp
)) {
3033 val
= REG_RD(bp
, PXP_REG_PXP_INT_STS_CLR_1
);
3034 BNX2X_ERR("PXP hw attention-1 0x%x\n", val
);
3038 if (attn
& HW_INTERRUT_ASSERT_SET_2
) {
3040 int port
= BP_PORT(bp
);
3043 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2
:
3044 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2
);
3046 val
= REG_RD(bp
, reg_offset
);
3047 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_2
);
3048 REG_WR(bp
, reg_offset
, val
);
3050 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3051 (u32
)(attn
& HW_INTERRUT_ASSERT_SET_2
));
3056 static inline void bnx2x_attn_int_deasserted3(struct bnx2x
*bp
, u32 attn
)
3060 if (attn
& EVEREST_GEN_ATTN_IN_USE_MASK
) {
3062 if (attn
& BNX2X_PMF_LINK_ASSERT
) {
3063 int func
= BP_FUNC(bp
);
3065 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
3066 bp
->mf_config
[BP_VN(bp
)] = MF_CFG_RD(bp
,
3067 func_mf_config
[BP_ABS_FUNC(bp
)].config
);
3069 func_mb
[BP_FW_MB_IDX(bp
)].drv_status
);
3070 if (val
& DRV_STATUS_DCC_EVENT_MASK
)
3072 (val
& DRV_STATUS_DCC_EVENT_MASK
));
3074 if (val
& DRV_STATUS_SET_MF_BW
)
3075 bnx2x_set_mf_bw(bp
);
3077 bnx2x__link_status_update(bp
);
3078 if ((bp
->port
.pmf
== 0) && (val
& DRV_STATUS_PMF
))
3079 bnx2x_pmf_update(bp
);
3081 } else if (attn
& BNX2X_MC_ASSERT_BITS
) {
3083 BNX2X_ERR("MC assert!\n");
3084 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_10
, 0);
3085 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_9
, 0);
3086 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_8
, 0);
3087 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_7
, 0);
3090 } else if (attn
& BNX2X_MCP_ASSERT
) {
3092 BNX2X_ERR("MCP assert!\n");
3093 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_11
, 0);
3097 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn
);
3100 if (attn
& EVEREST_LATCHED_ATTN_IN_USE_MASK
) {
3101 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn
);
3102 if (attn
& BNX2X_GRC_TIMEOUT
) {
3103 val
= CHIP_IS_E1(bp
) ? 0 :
3104 REG_RD(bp
, MISC_REG_GRC_TIMEOUT_ATTN
);
3105 BNX2X_ERR("GRC time-out 0x%08x\n", val
);
3107 if (attn
& BNX2X_GRC_RSV
) {
3108 val
= CHIP_IS_E1(bp
) ? 0 :
3109 REG_RD(bp
, MISC_REG_GRC_RSV_ATTN
);
3110 BNX2X_ERR("GRC reserved 0x%08x\n", val
);
3112 REG_WR(bp
, MISC_REG_AEU_CLR_LATCH_SIGNAL
, 0x7ff);
3116 #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3117 #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3118 #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3119 #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3120 #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3121 #define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3124 * should be run under rtnl lock
3126 static inline void bnx2x_set_reset_done(struct bnx2x
*bp
)
3128 u32 val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
3129 val
&= ~(1 << RESET_DONE_FLAG_SHIFT
);
3130 REG_WR(bp
, BNX2X_MISC_GEN_REG
, val
);
3136 * should be run under rtnl lock
3138 static inline void bnx2x_set_reset_in_progress(struct bnx2x
*bp
)
3140 u32 val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
3142 REG_WR(bp
, BNX2X_MISC_GEN_REG
, val
);
3148 * should be run under rtnl lock
3150 bool bnx2x_reset_is_done(struct bnx2x
*bp
)
3152 u32 val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
3153 DP(NETIF_MSG_HW
, "GEN_REG_VAL=0x%08x\n", val
);
3154 return (val
& RESET_DONE_FLAG_MASK
) ? false : true;
3158 * should be run under rtnl lock
3160 inline void bnx2x_inc_load_cnt(struct bnx2x
*bp
)
3162 u32 val1
, val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
3164 DP(NETIF_MSG_HW
, "Old GEN_REG_VAL=0x%08x\n", val
);
3166 val1
= ((val
& LOAD_COUNTER_MASK
) + 1) & LOAD_COUNTER_MASK
;
3167 REG_WR(bp
, BNX2X_MISC_GEN_REG
, (val
& RESET_DONE_FLAG_MASK
) | val1
);
3173 * should be run under rtnl lock
3175 u32
bnx2x_dec_load_cnt(struct bnx2x
*bp
)
3177 u32 val1
, val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
3179 DP(NETIF_MSG_HW
, "Old GEN_REG_VAL=0x%08x\n", val
);
3181 val1
= ((val
& LOAD_COUNTER_MASK
) - 1) & LOAD_COUNTER_MASK
;
3182 REG_WR(bp
, BNX2X_MISC_GEN_REG
, (val
& RESET_DONE_FLAG_MASK
) | val1
);
3190 * should be run under rtnl lock
3192 static inline u32
bnx2x_get_load_cnt(struct bnx2x
*bp
)
3194 return REG_RD(bp
, BNX2X_MISC_GEN_REG
) & LOAD_COUNTER_MASK
;
3197 static inline void bnx2x_clear_load_cnt(struct bnx2x
*bp
)
3199 u32 val
= REG_RD(bp
, BNX2X_MISC_GEN_REG
);
3200 REG_WR(bp
, BNX2X_MISC_GEN_REG
, val
& (~LOAD_COUNTER_MASK
));
3203 static inline void _print_next_block(int idx
, const char *blk
)
3210 static inline int bnx2x_print_blocks_with_parity0(u32 sig
, int par_num
)
3214 for (i
= 0; sig
; i
++) {
3215 cur_bit
= ((u32
)0x1 << i
);
3216 if (sig
& cur_bit
) {
3218 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR
:
3219 _print_next_block(par_num
++, "BRB");
3221 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR
:
3222 _print_next_block(par_num
++, "PARSER");
3224 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR
:
3225 _print_next_block(par_num
++, "TSDM");
3227 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR
:
3228 _print_next_block(par_num
++, "SEARCHER");
3230 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR
:
3231 _print_next_block(par_num
++, "TSEMI");
3243 static inline int bnx2x_print_blocks_with_parity1(u32 sig
, int par_num
)
3247 for (i
= 0; sig
; i
++) {
3248 cur_bit
= ((u32
)0x1 << i
);
3249 if (sig
& cur_bit
) {
3251 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR
:
3252 _print_next_block(par_num
++, "PBCLIENT");
3254 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR
:
3255 _print_next_block(par_num
++, "QM");
3257 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR
:
3258 _print_next_block(par_num
++, "XSDM");
3260 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR
:
3261 _print_next_block(par_num
++, "XSEMI");
3263 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR
:
3264 _print_next_block(par_num
++, "DOORBELLQ");
3266 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR
:
3267 _print_next_block(par_num
++, "VAUX PCI CORE");
3269 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR
:
3270 _print_next_block(par_num
++, "DEBUG");
3272 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR
:
3273 _print_next_block(par_num
++, "USDM");
3275 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR
:
3276 _print_next_block(par_num
++, "USEMI");
3278 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR
:
3279 _print_next_block(par_num
++, "UPB");
3281 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR
:
3282 _print_next_block(par_num
++, "CSDM");
3294 static inline int bnx2x_print_blocks_with_parity2(u32 sig
, int par_num
)
3298 for (i
= 0; sig
; i
++) {
3299 cur_bit
= ((u32
)0x1 << i
);
3300 if (sig
& cur_bit
) {
3302 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR
:
3303 _print_next_block(par_num
++, "CSEMI");
3305 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR
:
3306 _print_next_block(par_num
++, "PXP");
3308 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
:
3309 _print_next_block(par_num
++,
3310 "PXPPCICLOCKCLIENT");
3312 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR
:
3313 _print_next_block(par_num
++, "CFC");
3315 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR
:
3316 _print_next_block(par_num
++, "CDU");
3318 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR
:
3319 _print_next_block(par_num
++, "IGU");
3321 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR
:
3322 _print_next_block(par_num
++, "MISC");
3334 static inline int bnx2x_print_blocks_with_parity3(u32 sig
, int par_num
)
3338 for (i
= 0; sig
; i
++) {
3339 cur_bit
= ((u32
)0x1 << i
);
3340 if (sig
& cur_bit
) {
3342 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY
:
3343 _print_next_block(par_num
++, "MCP ROM");
3345 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY
:
3346 _print_next_block(par_num
++, "MCP UMP RX");
3348 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY
:
3349 _print_next_block(par_num
++, "MCP UMP TX");
3351 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY
:
3352 _print_next_block(par_num
++, "MCP SCPAD");
3364 static inline bool bnx2x_parity_attn(struct bnx2x
*bp
, u32 sig0
, u32 sig1
,
3367 if ((sig0
& HW_PRTY_ASSERT_SET_0
) || (sig1
& HW_PRTY_ASSERT_SET_1
) ||
3368 (sig2
& HW_PRTY_ASSERT_SET_2
) || (sig3
& HW_PRTY_ASSERT_SET_3
)) {
3370 DP(NETIF_MSG_HW
, "Was parity error: HW block parity attention: "
3371 "[0]:0x%08x [1]:0x%08x "
3372 "[2]:0x%08x [3]:0x%08x\n",
3373 sig0
& HW_PRTY_ASSERT_SET_0
,
3374 sig1
& HW_PRTY_ASSERT_SET_1
,
3375 sig2
& HW_PRTY_ASSERT_SET_2
,
3376 sig3
& HW_PRTY_ASSERT_SET_3
);
3377 printk(KERN_ERR
"%s: Parity errors detected in blocks: ",
3379 par_num
= bnx2x_print_blocks_with_parity0(
3380 sig0
& HW_PRTY_ASSERT_SET_0
, par_num
);
3381 par_num
= bnx2x_print_blocks_with_parity1(
3382 sig1
& HW_PRTY_ASSERT_SET_1
, par_num
);
3383 par_num
= bnx2x_print_blocks_with_parity2(
3384 sig2
& HW_PRTY_ASSERT_SET_2
, par_num
);
3385 par_num
= bnx2x_print_blocks_with_parity3(
3386 sig3
& HW_PRTY_ASSERT_SET_3
, par_num
);
3393 bool bnx2x_chk_parity_attn(struct bnx2x
*bp
)
3395 struct attn_route attn
;
3396 int port
= BP_PORT(bp
);
3398 attn
.sig
[0] = REG_RD(bp
,
3399 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+
3401 attn
.sig
[1] = REG_RD(bp
,
3402 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0
+
3404 attn
.sig
[2] = REG_RD(bp
,
3405 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0
+
3407 attn
.sig
[3] = REG_RD(bp
,
3408 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0
+
3411 return bnx2x_parity_attn(bp
, attn
.sig
[0], attn
.sig
[1], attn
.sig
[2],
3416 static inline void bnx2x_attn_int_deasserted4(struct bnx2x
*bp
, u32 attn
)
3419 if (attn
& AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT
) {
3421 val
= REG_RD(bp
, PGLUE_B_REG_PGLUE_B_INT_STS_CLR
);
3422 BNX2X_ERR("PGLUE hw attention 0x%x\n", val
);
3423 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR
)
3424 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3426 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR
)
3427 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3428 "INCORRECT_RCV_BEHAVIOR\n");
3429 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN
)
3430 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3431 "WAS_ERROR_ATTN\n");
3432 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN
)
3433 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3434 "VF_LENGTH_VIOLATION_ATTN\n");
3436 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN
)
3437 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3438 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3440 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN
)
3441 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3442 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3443 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN
)
3444 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3445 "TCPL_ERROR_ATTN\n");
3446 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN
)
3447 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3448 "TCPL_IN_TWO_RCBS_ATTN\n");
3449 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW
)
3450 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3451 "CSSNOOP_FIFO_OVERFLOW\n");
3453 if (attn
& AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT
) {
3454 val
= REG_RD(bp
, ATC_REG_ATC_INT_STS_CLR
);
3455 BNX2X_ERR("ATC hw attention 0x%x\n", val
);
3456 if (val
& ATC_ATC_INT_STS_REG_ADDRESS_ERROR
)
3457 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3458 if (val
& ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND
)
3459 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3460 "_ATC_TCPL_TO_NOT_PEND\n");
3461 if (val
& ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS
)
3462 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3463 "ATC_GPA_MULTIPLE_HITS\n");
3464 if (val
& ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT
)
3465 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3466 "ATC_RCPL_TO_EMPTY_CNT\n");
3467 if (val
& ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR
)
3468 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3469 if (val
& ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU
)
3470 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3471 "ATC_IREQ_LESS_THAN_STU\n");
3474 if (attn
& (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR
|
3475 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR
)) {
3476 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3477 (u32
)(attn
& (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR
|
3478 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR
)));
3483 static void bnx2x_attn_int_deasserted(struct bnx2x
*bp
, u32 deasserted
)
3485 struct attn_route attn
, *group_mask
;
3486 int port
= BP_PORT(bp
);
3492 /* need to take HW lock because MCP or other port might also
3493 try to handle this event */
3494 bnx2x_acquire_alr(bp
);
3496 if (bnx2x_chk_parity_attn(bp
)) {
3497 bp
->recovery_state
= BNX2X_RECOVERY_INIT
;
3498 bnx2x_set_reset_in_progress(bp
);
3499 schedule_delayed_work(&bp
->reset_task
, 0);
3500 /* Disable HW interrupts */
3501 bnx2x_int_disable(bp
);
3502 bnx2x_release_alr(bp
);
3503 /* In case of parity errors don't handle attentions so that
3504 * other function would "see" parity errors.
3509 attn
.sig
[0] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ port
*4);
3510 attn
.sig
[1] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0
+ port
*4);
3511 attn
.sig
[2] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0
+ port
*4);
3512 attn
.sig
[3] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0
+ port
*4);
3515 REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0
+ port
*4);
3519 DP(NETIF_MSG_HW
, "attn: %08x %08x %08x %08x %08x\n",
3520 attn
.sig
[0], attn
.sig
[1], attn
.sig
[2], attn
.sig
[3], attn
.sig
[4]);
3522 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
3523 if (deasserted
& (1 << index
)) {
3524 group_mask
= &bp
->attn_group
[index
];
3526 DP(NETIF_MSG_HW
, "group[%d]: %08x %08x "
3529 group_mask
->sig
[0], group_mask
->sig
[1],
3530 group_mask
->sig
[2], group_mask
->sig
[3],
3531 group_mask
->sig
[4]);
3533 bnx2x_attn_int_deasserted4(bp
,
3534 attn
.sig
[4] & group_mask
->sig
[4]);
3535 bnx2x_attn_int_deasserted3(bp
,
3536 attn
.sig
[3] & group_mask
->sig
[3]);
3537 bnx2x_attn_int_deasserted1(bp
,
3538 attn
.sig
[1] & group_mask
->sig
[1]);
3539 bnx2x_attn_int_deasserted2(bp
,
3540 attn
.sig
[2] & group_mask
->sig
[2]);
3541 bnx2x_attn_int_deasserted0(bp
,
3542 attn
.sig
[0] & group_mask
->sig
[0]);
3546 bnx2x_release_alr(bp
);
3548 if (bp
->common
.int_block
== INT_BLOCK_HC
)
3549 reg_addr
= (HC_REG_COMMAND_REG
+ port
*32 +
3550 COMMAND_REG_ATTN_BITS_CLR
);
3552 reg_addr
= (BAR_IGU_INTMEM
+ IGU_CMD_ATTN_BIT_CLR_UPPER
*8);
3555 DP(NETIF_MSG_HW
, "about to mask 0x%08x at %s addr 0x%x\n", val
,
3556 (bp
->common
.int_block
== INT_BLOCK_HC
) ? "HC" : "IGU", reg_addr
);
3557 REG_WR(bp
, reg_addr
, val
);
3559 if (~bp
->attn_state
& deasserted
)
3560 BNX2X_ERR("IGU ERROR\n");
3562 reg_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
3563 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
3565 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
3566 aeu_mask
= REG_RD(bp
, reg_addr
);
3568 DP(NETIF_MSG_HW
, "aeu_mask %x newly deasserted %x\n",
3569 aeu_mask
, deasserted
);
3570 aeu_mask
|= (deasserted
& 0x3ff);
3571 DP(NETIF_MSG_HW
, "new mask %x\n", aeu_mask
);
3573 REG_WR(bp
, reg_addr
, aeu_mask
);
3574 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
3576 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
3577 bp
->attn_state
&= ~deasserted
;
3578 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
3581 static void bnx2x_attn_int(struct bnx2x
*bp
)
3583 /* read local copy of bits */
3584 u32 attn_bits
= le32_to_cpu(bp
->def_status_blk
->atten_status_block
.
3586 u32 attn_ack
= le32_to_cpu(bp
->def_status_blk
->atten_status_block
.
3588 u32 attn_state
= bp
->attn_state
;
3590 /* look for changed bits */
3591 u32 asserted
= attn_bits
& ~attn_ack
& ~attn_state
;
3592 u32 deasserted
= ~attn_bits
& attn_ack
& attn_state
;
3595 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3596 attn_bits
, attn_ack
, asserted
, deasserted
);
3598 if (~(attn_bits
^ attn_ack
) & (attn_bits
^ attn_state
))
3599 BNX2X_ERR("BAD attention state\n");
3601 /* handle bits that were raised */
3603 bnx2x_attn_int_asserted(bp
, asserted
);
3606 bnx2x_attn_int_deasserted(bp
, deasserted
);
3609 static inline void bnx2x_update_eq_prod(struct bnx2x
*bp
, u16 prod
)
3611 /* No memory barriers */
3612 storm_memset_eq_prod(bp
, prod
, BP_FUNC(bp
));
3613 mmiowb(); /* keep prod updates ordered */
3617 static int bnx2x_cnic_handle_cfc_del(struct bnx2x
*bp
, u32 cid
,
3618 union event_ring_elem
*elem
)
3620 if (!bp
->cnic_eth_dev
.starting_cid
||
3621 cid
< bp
->cnic_eth_dev
.starting_cid
)
3624 DP(BNX2X_MSG_SP
, "got delete ramrod for CNIC CID %d\n", cid
);
3626 if (unlikely(elem
->message
.data
.cfc_del_event
.error
)) {
3627 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3629 bnx2x_panic_dump(bp
);
3631 bnx2x_cnic_cfc_comp(bp
, cid
);
3636 static void bnx2x_eq_int(struct bnx2x
*bp
)
3638 u16 hw_cons
, sw_cons
, sw_prod
;
3639 union event_ring_elem
*elem
;
3644 hw_cons
= le16_to_cpu(*bp
->eq_cons_sb
);
3646 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3647 * when we get the the next-page we nned to adjust so the loop
3648 * condition below will be met. The next element is the size of a
3649 * regular element and hence incrementing by 1
3651 if ((hw_cons
& EQ_DESC_MAX_PAGE
) == EQ_DESC_MAX_PAGE
)
3654 /* This function may never run in parralel with itself for a
3655 * specific bp, thus there is no need in "paired" read memory
3658 sw_cons
= bp
->eq_cons
;
3659 sw_prod
= bp
->eq_prod
;
3661 DP(BNX2X_MSG_SP
, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
3662 hw_cons
, sw_cons
, atomic_read(&bp
->spq_left
));
3664 for (; sw_cons
!= hw_cons
;
3665 sw_prod
= NEXT_EQ_IDX(sw_prod
), sw_cons
= NEXT_EQ_IDX(sw_cons
)) {
3668 elem
= &bp
->eq_ring
[EQ_DESC(sw_cons
)];
3670 cid
= SW_CID(elem
->message
.data
.cfc_del_event
.cid
);
3671 opcode
= elem
->message
.opcode
;
3674 /* handle eq element */
3676 case EVENT_RING_OPCODE_STAT_QUERY
:
3677 DP(NETIF_MSG_TIMER
, "got statistics comp event\n");
3678 /* nothing to do with stats comp */
3681 case EVENT_RING_OPCODE_CFC_DEL
:
3682 /* handle according to cid range */
3684 * we may want to verify here that the bp state is
3687 DP(NETIF_MSG_IFDOWN
,
3688 "got delete ramrod for MULTI[%d]\n", cid
);
3690 if (!bnx2x_cnic_handle_cfc_del(bp
, cid
, elem
))
3693 bnx2x_fp(bp
, cid
, state
) =
3694 BNX2X_FP_STATE_CLOSED
;
3699 switch (opcode
| bp
->state
) {
3700 case (EVENT_RING_OPCODE_FUNCTION_START
|
3701 BNX2X_STATE_OPENING_WAIT4_PORT
):
3702 DP(NETIF_MSG_IFUP
, "got setup ramrod\n");
3703 bp
->state
= BNX2X_STATE_FUNC_STARTED
;
3706 case (EVENT_RING_OPCODE_FUNCTION_STOP
|
3707 BNX2X_STATE_CLOSING_WAIT4_HALT
):
3708 DP(NETIF_MSG_IFDOWN
, "got halt ramrod\n");
3709 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_UNLOAD
;
3712 case (EVENT_RING_OPCODE_SET_MAC
| BNX2X_STATE_OPEN
):
3713 case (EVENT_RING_OPCODE_SET_MAC
| BNX2X_STATE_DIAG
):
3714 DP(NETIF_MSG_IFUP
, "got set mac ramrod\n");
3715 bp
->set_mac_pending
= 0;
3718 case (EVENT_RING_OPCODE_SET_MAC
|
3719 BNX2X_STATE_CLOSING_WAIT4_HALT
):
3720 DP(NETIF_MSG_IFDOWN
, "got (un)set mac ramrod\n");
3721 bp
->set_mac_pending
= 0;
3724 /* unknown event log error and continue */
3725 BNX2X_ERR("Unknown EQ event %d\n",
3726 elem
->message
.opcode
);
3732 smp_mb__before_atomic_inc();
3733 atomic_add(spqe_cnt
, &bp
->spq_left
);
3735 bp
->eq_cons
= sw_cons
;
3736 bp
->eq_prod
= sw_prod
;
3737 /* Make sure that above mem writes were issued towards the memory */
3740 /* update producer */
3741 bnx2x_update_eq_prod(bp
, bp
->eq_prod
);
3744 static void bnx2x_sp_task(struct work_struct
*work
)
3746 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, sp_task
.work
);
3749 /* Return here if interrupt is disabled */
3750 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
3751 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
3755 status
= bnx2x_update_dsb_idx(bp
);
3756 /* if (status == 0) */
3757 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3759 DP(NETIF_MSG_INTR
, "got a slowpath interrupt (status 0x%x)\n", status
);
3762 if (status
& BNX2X_DEF_SB_ATT_IDX
) {
3764 status
&= ~BNX2X_DEF_SB_ATT_IDX
;
3767 /* SP events: STAT_QUERY and others */
3768 if (status
& BNX2X_DEF_SB_IDX
) {
3770 /* Handle EQ completions */
3773 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
, USTORM_ID
,
3774 le16_to_cpu(bp
->def_idx
), IGU_INT_NOP
, 1);
3776 status
&= ~BNX2X_DEF_SB_IDX
;
3779 if (unlikely(status
))
3780 DP(NETIF_MSG_INTR
, "got an unknown interrupt! (status 0x%x)\n",
3783 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
, ATTENTION_ID
,
3784 le16_to_cpu(bp
->def_att_idx
), IGU_INT_ENABLE
, 1);
3787 irqreturn_t
bnx2x_msix_sp_int(int irq
, void *dev_instance
)
3789 struct net_device
*dev
= dev_instance
;
3790 struct bnx2x
*bp
= netdev_priv(dev
);
3792 /* Return here if interrupt is disabled */
3793 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
3794 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
3798 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
, USTORM_ID
, 0,
3799 IGU_INT_DISABLE
, 0);
3801 #ifdef BNX2X_STOP_ON_ERROR
3802 if (unlikely(bp
->panic
))
3808 struct cnic_ops
*c_ops
;
3811 c_ops
= rcu_dereference(bp
->cnic_ops
);
3813 c_ops
->cnic_handler(bp
->cnic_data
, NULL
);
3817 queue_delayed_work(bnx2x_wq
, &bp
->sp_task
, 0);
3822 /* end of slow path */
3824 static void bnx2x_timer(unsigned long data
)
3826 struct bnx2x
*bp
= (struct bnx2x
*) data
;
3828 if (!netif_running(bp
->dev
))
3831 if (atomic_read(&bp
->intr_sem
) != 0)
3835 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
3839 rc
= bnx2x_rx_int(fp
, 1000);
3842 if (!BP_NOMCP(bp
)) {
3843 int mb_idx
= BP_FW_MB_IDX(bp
);
3847 ++bp
->fw_drv_pulse_wr_seq
;
3848 bp
->fw_drv_pulse_wr_seq
&= DRV_PULSE_SEQ_MASK
;
3849 /* TBD - add SYSTEM_TIME */
3850 drv_pulse
= bp
->fw_drv_pulse_wr_seq
;
3851 SHMEM_WR(bp
, func_mb
[mb_idx
].drv_pulse_mb
, drv_pulse
);
3853 mcp_pulse
= (SHMEM_RD(bp
, func_mb
[mb_idx
].mcp_pulse_mb
) &
3854 MCP_PULSE_SEQ_MASK
);
3855 /* The delta between driver pulse and mcp response
3856 * should be 1 (before mcp response) or 0 (after mcp response)
3858 if ((drv_pulse
!= mcp_pulse
) &&
3859 (drv_pulse
!= ((mcp_pulse
+ 1) & MCP_PULSE_SEQ_MASK
))) {
3860 /* someone lost a heartbeat... */
3861 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3862 drv_pulse
, mcp_pulse
);
3866 if (bp
->state
== BNX2X_STATE_OPEN
)
3867 bnx2x_stats_handle(bp
, STATS_EVENT_UPDATE
);
3870 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
3873 /* end of Statistics */
3878 * nic init service functions
3881 static inline void bnx2x_fill(struct bnx2x
*bp
, u32 addr
, int fill
, u32 len
)
3884 if (!(len
%4) && !(addr
%4))
3885 for (i
= 0; i
< len
; i
+= 4)
3886 REG_WR(bp
, addr
+ i
, fill
);
3888 for (i
= 0; i
< len
; i
++)
3889 REG_WR8(bp
, addr
+ i
, fill
);
3893 /* helper: writes FP SP data to FW - data_size in dwords */
3894 static inline void bnx2x_wr_fp_sb_data(struct bnx2x
*bp
,
3900 for (index
= 0; index
< data_size
; index
++)
3901 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
3902 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id
) +
3904 *(sb_data_p
+ index
));
3907 static inline void bnx2x_zero_fp_sb(struct bnx2x
*bp
, int fw_sb_id
)
3911 struct hc_status_block_data_e2 sb_data_e2
;
3912 struct hc_status_block_data_e1x sb_data_e1x
;
3914 /* disable the function first */
3915 if (CHIP_IS_E2(bp
)) {
3916 memset(&sb_data_e2
, 0, sizeof(struct hc_status_block_data_e2
));
3917 sb_data_e2
.common
.p_func
.pf_id
= HC_FUNCTION_DISABLED
;
3918 sb_data_e2
.common
.p_func
.vf_id
= HC_FUNCTION_DISABLED
;
3919 sb_data_e2
.common
.p_func
.vf_valid
= false;
3920 sb_data_p
= (u32
*)&sb_data_e2
;
3921 data_size
= sizeof(struct hc_status_block_data_e2
)/sizeof(u32
);
3923 memset(&sb_data_e1x
, 0,
3924 sizeof(struct hc_status_block_data_e1x
));
3925 sb_data_e1x
.common
.p_func
.pf_id
= HC_FUNCTION_DISABLED
;
3926 sb_data_e1x
.common
.p_func
.vf_id
= HC_FUNCTION_DISABLED
;
3927 sb_data_e1x
.common
.p_func
.vf_valid
= false;
3928 sb_data_p
= (u32
*)&sb_data_e1x
;
3929 data_size
= sizeof(struct hc_status_block_data_e1x
)/sizeof(u32
);
3931 bnx2x_wr_fp_sb_data(bp
, fw_sb_id
, sb_data_p
, data_size
);
3933 bnx2x_fill(bp
, BAR_CSTRORM_INTMEM
+
3934 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id
), 0,
3935 CSTORM_STATUS_BLOCK_SIZE
);
3936 bnx2x_fill(bp
, BAR_CSTRORM_INTMEM
+
3937 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id
), 0,
3938 CSTORM_SYNC_BLOCK_SIZE
);
3941 /* helper: writes SP SB data to FW */
3942 static inline void bnx2x_wr_sp_sb_data(struct bnx2x
*bp
,
3943 struct hc_sp_status_block_data
*sp_sb_data
)
3945 int func
= BP_FUNC(bp
);
3947 for (i
= 0; i
< sizeof(struct hc_sp_status_block_data
)/sizeof(u32
); i
++)
3948 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
3949 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func
) +
3951 *((u32
*)sp_sb_data
+ i
));
3954 static inline void bnx2x_zero_sp_sb(struct bnx2x
*bp
)
3956 int func
= BP_FUNC(bp
);
3957 struct hc_sp_status_block_data sp_sb_data
;
3958 memset(&sp_sb_data
, 0, sizeof(struct hc_sp_status_block_data
));
3960 sp_sb_data
.p_func
.pf_id
= HC_FUNCTION_DISABLED
;
3961 sp_sb_data
.p_func
.vf_id
= HC_FUNCTION_DISABLED
;
3962 sp_sb_data
.p_func
.vf_valid
= false;
3964 bnx2x_wr_sp_sb_data(bp
, &sp_sb_data
);
3966 bnx2x_fill(bp
, BAR_CSTRORM_INTMEM
+
3967 CSTORM_SP_STATUS_BLOCK_OFFSET(func
), 0,
3968 CSTORM_SP_STATUS_BLOCK_SIZE
);
3969 bnx2x_fill(bp
, BAR_CSTRORM_INTMEM
+
3970 CSTORM_SP_SYNC_BLOCK_OFFSET(func
), 0,
3971 CSTORM_SP_SYNC_BLOCK_SIZE
);
3977 void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm
*hc_sm
,
3978 int igu_sb_id
, int igu_seg_id
)
3980 hc_sm
->igu_sb_id
= igu_sb_id
;
3981 hc_sm
->igu_seg_id
= igu_seg_id
;
3982 hc_sm
->timer_value
= 0xFF;
3983 hc_sm
->time_to_expire
= 0xFFFFFFFF;
3986 static void bnx2x_init_sb(struct bnx2x
*bp
, dma_addr_t mapping
, int vfid
,
3987 u8 vf_valid
, int fw_sb_id
, int igu_sb_id
)
3991 struct hc_status_block_data_e2 sb_data_e2
;
3992 struct hc_status_block_data_e1x sb_data_e1x
;
3993 struct hc_status_block_sm
*hc_sm_p
;
3994 struct hc_index_data
*hc_index_p
;
3998 if (CHIP_INT_MODE_IS_BC(bp
))
3999 igu_seg_id
= HC_SEG_ACCESS_NORM
;
4001 igu_seg_id
= IGU_SEG_ACCESS_NORM
;
4003 bnx2x_zero_fp_sb(bp
, fw_sb_id
);
4005 if (CHIP_IS_E2(bp
)) {
4006 memset(&sb_data_e2
, 0, sizeof(struct hc_status_block_data_e2
));
4007 sb_data_e2
.common
.p_func
.pf_id
= BP_FUNC(bp
);
4008 sb_data_e2
.common
.p_func
.vf_id
= vfid
;
4009 sb_data_e2
.common
.p_func
.vf_valid
= vf_valid
;
4010 sb_data_e2
.common
.p_func
.vnic_id
= BP_VN(bp
);
4011 sb_data_e2
.common
.same_igu_sb_1b
= true;
4012 sb_data_e2
.common
.host_sb_addr
.hi
= U64_HI(mapping
);
4013 sb_data_e2
.common
.host_sb_addr
.lo
= U64_LO(mapping
);
4014 hc_sm_p
= sb_data_e2
.common
.state_machine
;
4015 hc_index_p
= sb_data_e2
.index_data
;
4016 sb_data_p
= (u32
*)&sb_data_e2
;
4017 data_size
= sizeof(struct hc_status_block_data_e2
)/sizeof(u32
);
4019 memset(&sb_data_e1x
, 0,
4020 sizeof(struct hc_status_block_data_e1x
));
4021 sb_data_e1x
.common
.p_func
.pf_id
= BP_FUNC(bp
);
4022 sb_data_e1x
.common
.p_func
.vf_id
= 0xff;
4023 sb_data_e1x
.common
.p_func
.vf_valid
= false;
4024 sb_data_e1x
.common
.p_func
.vnic_id
= BP_VN(bp
);
4025 sb_data_e1x
.common
.same_igu_sb_1b
= true;
4026 sb_data_e1x
.common
.host_sb_addr
.hi
= U64_HI(mapping
);
4027 sb_data_e1x
.common
.host_sb_addr
.lo
= U64_LO(mapping
);
4028 hc_sm_p
= sb_data_e1x
.common
.state_machine
;
4029 hc_index_p
= sb_data_e1x
.index_data
;
4030 sb_data_p
= (u32
*)&sb_data_e1x
;
4031 data_size
= sizeof(struct hc_status_block_data_e1x
)/sizeof(u32
);
4034 bnx2x_setup_ndsb_state_machine(&hc_sm_p
[SM_RX_ID
],
4035 igu_sb_id
, igu_seg_id
);
4036 bnx2x_setup_ndsb_state_machine(&hc_sm_p
[SM_TX_ID
],
4037 igu_sb_id
, igu_seg_id
);
4039 DP(NETIF_MSG_HW
, "Init FW SB %d\n", fw_sb_id
);
4041 /* write indecies to HW */
4042 bnx2x_wr_fp_sb_data(bp
, fw_sb_id
, sb_data_p
, data_size
);
4045 static void bnx2x_update_coalesce_sb_index(struct bnx2x
*bp
, u16 fw_sb_id
,
4046 u8 sb_index
, u8 disable
, u16 usec
)
4048 int port
= BP_PORT(bp
);
4049 u8 ticks
= usec
/ BNX2X_BTR
;
4051 storm_memset_hc_timeout(bp
, port
, fw_sb_id
, sb_index
, ticks
);
4053 disable
= disable
? 1 : (usec
? 0 : 1);
4054 storm_memset_hc_disable(bp
, port
, fw_sb_id
, sb_index
, disable
);
4057 static void bnx2x_update_coalesce_sb(struct bnx2x
*bp
, u16 fw_sb_id
,
4058 u16 tx_usec
, u16 rx_usec
)
4060 bnx2x_update_coalesce_sb_index(bp
, fw_sb_id
, U_SB_ETH_RX_CQ_INDEX
,
4062 bnx2x_update_coalesce_sb_index(bp
, fw_sb_id
, C_SB_ETH_TX_CQ_INDEX
,
4066 static void bnx2x_init_def_sb(struct bnx2x
*bp
)
4068 struct host_sp_status_block
*def_sb
= bp
->def_status_blk
;
4069 dma_addr_t mapping
= bp
->def_status_blk_mapping
;
4070 int igu_sp_sb_index
;
4072 int port
= BP_PORT(bp
);
4073 int func
= BP_FUNC(bp
);
4077 struct hc_sp_status_block_data sp_sb_data
;
4078 memset(&sp_sb_data
, 0, sizeof(struct hc_sp_status_block_data
));
4080 if (CHIP_INT_MODE_IS_BC(bp
)) {
4081 igu_sp_sb_index
= DEF_SB_IGU_ID
;
4082 igu_seg_id
= HC_SEG_ACCESS_DEF
;
4084 igu_sp_sb_index
= bp
->igu_dsb_id
;
4085 igu_seg_id
= IGU_SEG_ACCESS_DEF
;
4089 section
= ((u64
)mapping
) + offsetof(struct host_sp_status_block
,
4090 atten_status_block
);
4091 def_sb
->atten_status_block
.status_block_id
= igu_sp_sb_index
;
4095 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
4096 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
4097 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
4099 /* take care of sig[0]..sig[4] */
4100 for (sindex
= 0; sindex
< 4; sindex
++)
4101 bp
->attn_group
[index
].sig
[sindex
] =
4102 REG_RD(bp
, reg_offset
+ sindex
*0x4 + 0x10*index
);
4106 * enable5 is separate from the rest of the registers,
4107 * and therefore the address skip is 4
4108 * and not 16 between the different groups
4110 bp
->attn_group
[index
].sig
[4] = REG_RD(bp
,
4111 reg_offset
+ 0x10 + 0x4*index
);
4113 bp
->attn_group
[index
].sig
[4] = 0;
4116 if (bp
->common
.int_block
== INT_BLOCK_HC
) {
4117 reg_offset
= (port
? HC_REG_ATTN_MSG1_ADDR_L
:
4118 HC_REG_ATTN_MSG0_ADDR_L
);
4120 REG_WR(bp
, reg_offset
, U64_LO(section
));
4121 REG_WR(bp
, reg_offset
+ 4, U64_HI(section
));
4122 } else if (CHIP_IS_E2(bp
)) {
4123 REG_WR(bp
, IGU_REG_ATTN_MSG_ADDR_L
, U64_LO(section
));
4124 REG_WR(bp
, IGU_REG_ATTN_MSG_ADDR_H
, U64_HI(section
));
4127 section
= ((u64
)mapping
) + offsetof(struct host_sp_status_block
,
4130 bnx2x_zero_sp_sb(bp
);
4132 sp_sb_data
.host_sb_addr
.lo
= U64_LO(section
);
4133 sp_sb_data
.host_sb_addr
.hi
= U64_HI(section
);
4134 sp_sb_data
.igu_sb_id
= igu_sp_sb_index
;
4135 sp_sb_data
.igu_seg_id
= igu_seg_id
;
4136 sp_sb_data
.p_func
.pf_id
= func
;
4137 sp_sb_data
.p_func
.vnic_id
= BP_VN(bp
);
4138 sp_sb_data
.p_func
.vf_id
= 0xff;
4140 bnx2x_wr_sp_sb_data(bp
, &sp_sb_data
);
4142 bp
->stats_pending
= 0;
4143 bp
->set_mac_pending
= 0;
4145 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
, USTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4148 void bnx2x_update_coalesce(struct bnx2x
*bp
)
4152 for_each_queue(bp
, i
)
4153 bnx2x_update_coalesce_sb(bp
, bp
->fp
[i
].fw_sb_id
,
4154 bp
->rx_ticks
, bp
->tx_ticks
);
4157 static void bnx2x_init_sp_ring(struct bnx2x
*bp
)
4159 spin_lock_init(&bp
->spq_lock
);
4160 atomic_set(&bp
->spq_left
, MAX_SPQ_PENDING
);
4162 bp
->spq_prod_idx
= 0;
4163 bp
->dsb_sp_prod
= BNX2X_SP_DSB_INDEX
;
4164 bp
->spq_prod_bd
= bp
->spq
;
4165 bp
->spq_last_bd
= bp
->spq_prod_bd
+ MAX_SP_DESC_CNT
;
4168 static void bnx2x_init_eq_ring(struct bnx2x
*bp
)
4171 for (i
= 1; i
<= NUM_EQ_PAGES
; i
++) {
4172 union event_ring_elem
*elem
=
4173 &bp
->eq_ring
[EQ_DESC_CNT_PAGE
* i
- 1];
4175 elem
->next_page
.addr
.hi
=
4176 cpu_to_le32(U64_HI(bp
->eq_mapping
+
4177 BCM_PAGE_SIZE
* (i
% NUM_EQ_PAGES
)));
4178 elem
->next_page
.addr
.lo
=
4179 cpu_to_le32(U64_LO(bp
->eq_mapping
+
4180 BCM_PAGE_SIZE
*(i
% NUM_EQ_PAGES
)));
4183 bp
->eq_prod
= NUM_EQ_DESC
;
4184 bp
->eq_cons_sb
= BNX2X_EQ_INDEX
;
4187 static void bnx2x_init_ind_table(struct bnx2x
*bp
)
4189 int func
= BP_FUNC(bp
);
4192 if (bp
->multi_mode
== ETH_RSS_MODE_DISABLED
)
4196 "Initializing indirection table multi_mode %d\n", bp
->multi_mode
);
4197 for (i
= 0; i
< TSTORM_INDIRECTION_TABLE_SIZE
; i
++)
4198 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+
4199 TSTORM_INDIRECTION_TABLE_OFFSET(func
) + i
,
4200 bp
->fp
->cl_id
+ (i
% bp
->num_queues
));
4203 void bnx2x_set_storm_rx_mode(struct bnx2x
*bp
)
4205 int mode
= bp
->rx_mode
;
4208 /* All but management unicast packets should pass to the host as well */
4210 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST
|
4211 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST
|
4212 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN
|
4213 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN
;
4216 case BNX2X_RX_MODE_NONE
: /* no Rx */
4217 cl_id
= BP_L_ID(bp
);
4218 bnx2x_rxq_set_mac_filters(bp
, cl_id
, BNX2X_ACCEPT_NONE
);
4221 case BNX2X_RX_MODE_NORMAL
:
4222 cl_id
= BP_L_ID(bp
);
4223 bnx2x_rxq_set_mac_filters(bp
, cl_id
,
4224 BNX2X_ACCEPT_UNICAST
|
4225 BNX2X_ACCEPT_BROADCAST
|
4226 BNX2X_ACCEPT_MULTICAST
);
4229 case BNX2X_RX_MODE_ALLMULTI
:
4230 cl_id
= BP_L_ID(bp
);
4231 bnx2x_rxq_set_mac_filters(bp
, cl_id
,
4232 BNX2X_ACCEPT_UNICAST
|
4233 BNX2X_ACCEPT_BROADCAST
|
4234 BNX2X_ACCEPT_ALL_MULTICAST
);
4237 case BNX2X_RX_MODE_PROMISC
:
4238 cl_id
= BP_L_ID(bp
);
4239 bnx2x_rxq_set_mac_filters(bp
, cl_id
, BNX2X_PROMISCUOUS_MODE
);
4241 /* pass management unicast packets as well */
4242 llh_mask
|= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST
;
4246 BNX2X_ERR("BAD rx mode (%d)\n", mode
);
4251 BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_DRV_MASK
:
4252 NIG_REG_LLH0_BRB1_DRV_MASK
,
4255 DP(NETIF_MSG_IFUP
, "rx mode %d\n"
4256 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4257 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode
,
4258 bp
->mac_filters
.ucast_drop_all
,
4259 bp
->mac_filters
.mcast_drop_all
,
4260 bp
->mac_filters
.bcast_drop_all
,
4261 bp
->mac_filters
.ucast_accept_all
,
4262 bp
->mac_filters
.mcast_accept_all
,
4263 bp
->mac_filters
.bcast_accept_all
4266 storm_memset_mac_filters(bp
, &bp
->mac_filters
, BP_FUNC(bp
));
4269 static void bnx2x_init_internal_common(struct bnx2x
*bp
)
4273 if (!CHIP_IS_E1(bp
)) {
4275 /* xstorm needs to know whether to add ovlan to packets or not,
4276 * in switch-independent we'll write 0 to here... */
4277 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_FUNCTION_MODE_OFFSET
,
4279 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_FUNCTION_MODE_OFFSET
,
4281 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_FUNCTION_MODE_OFFSET
,
4283 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_FUNCTION_MODE_OFFSET
,
4289 * In switch independent mode, the TSTORM needs to accept
4290 * packets that failed classification, since approximate match
4291 * mac addresses aren't written to NIG LLH
4293 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+
4294 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET
, 2);
4296 /* Zero this manually as its initialization is
4297 currently missing in the initTool */
4298 for (i
= 0; i
< (USTORM_AGG_DATA_SIZE
>> 2); i
++)
4299 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4300 USTORM_AGG_DATA_OFFSET
+ i
* 4, 0);
4301 if (CHIP_IS_E2(bp
)) {
4302 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_IGU_MODE_OFFSET
,
4303 CHIP_INT_MODE_IS_BC(bp
) ?
4304 HC_IGU_BC_MODE
: HC_IGU_NBC_MODE
);
4308 static void bnx2x_init_internal_port(struct bnx2x
*bp
)
4313 static void bnx2x_init_internal(struct bnx2x
*bp
, u32 load_code
)
4315 switch (load_code
) {
4316 case FW_MSG_CODE_DRV_LOAD_COMMON
:
4317 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
:
4318 bnx2x_init_internal_common(bp
);
4321 case FW_MSG_CODE_DRV_LOAD_PORT
:
4322 bnx2x_init_internal_port(bp
);
4325 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
4326 /* internal memory per function is
4327 initialized inside bnx2x_pf_init */
4331 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
4336 static void bnx2x_init_fp_sb(struct bnx2x
*bp
, int fp_idx
)
4338 struct bnx2x_fastpath
*fp
= &bp
->fp
[fp_idx
];
4340 fp
->state
= BNX2X_FP_STATE_CLOSED
;
4342 fp
->index
= fp
->cid
= fp_idx
;
4343 fp
->cl_id
= BP_L_ID(bp
) + fp_idx
;
4344 fp
->fw_sb_id
= bp
->base_fw_ndsb
+ fp
->cl_id
+ CNIC_CONTEXT_USE
;
4345 fp
->igu_sb_id
= bp
->igu_base_sb
+ fp_idx
+ CNIC_CONTEXT_USE
;
4346 /* qZone id equals to FW (per path) client id */
4347 fp
->cl_qzone_id
= fp
->cl_id
+
4348 BP_PORT(bp
)*(CHIP_IS_E2(bp
) ? ETH_MAX_RX_CLIENTS_E2
:
4349 ETH_MAX_RX_CLIENTS_E1H
);
4351 fp
->ustorm_rx_prods_offset
= CHIP_IS_E2(bp
) ?
4352 USTORM_RX_PRODS_E2_OFFSET(fp
->cl_qzone_id
) :
4353 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp
), fp
->cl_id
);
4354 /* Setup SB indicies */
4355 fp
->rx_cons_sb
= BNX2X_RX_SB_INDEX
;
4356 fp
->tx_cons_sb
= BNX2X_TX_SB_INDEX
;
4358 DP(NETIF_MSG_IFUP
, "queue[%d]: bnx2x_init_sb(%p,%p) "
4359 "cl_id %d fw_sb %d igu_sb %d\n",
4360 fp_idx
, bp
, fp
->status_blk
.e1x_sb
, fp
->cl_id
, fp
->fw_sb_id
,
4362 bnx2x_init_sb(bp
, fp
->status_blk_mapping
, BNX2X_VF_ID_INVALID
, false,
4363 fp
->fw_sb_id
, fp
->igu_sb_id
);
4365 bnx2x_update_fpsb_idx(fp
);
4368 void bnx2x_nic_init(struct bnx2x
*bp
, u32 load_code
)
4372 for_each_queue(bp
, i
)
4373 bnx2x_init_fp_sb(bp
, i
);
4376 bnx2x_init_sb(bp
, bp
->cnic_sb_mapping
,
4377 BNX2X_VF_ID_INVALID
, false,
4378 CNIC_SB_ID(bp
), CNIC_IGU_SB_ID(bp
));
4382 /* ensure status block indices were read */
4385 bnx2x_init_def_sb(bp
);
4386 bnx2x_update_dsb_idx(bp
);
4387 bnx2x_init_rx_rings(bp
);
4388 bnx2x_init_tx_rings(bp
);
4389 bnx2x_init_sp_ring(bp
);
4390 bnx2x_init_eq_ring(bp
);
4391 bnx2x_init_internal(bp
, load_code
);
4393 bnx2x_init_ind_table(bp
);
4394 bnx2x_stats_init(bp
);
4396 /* At this point, we are ready for interrupts */
4397 atomic_set(&bp
->intr_sem
, 0);
4399 /* flush all before enabling interrupts */
4403 bnx2x_int_enable(bp
);
4405 /* Check for SPIO5 */
4406 bnx2x_attn_int_deasserted0(bp
,
4407 REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ BP_PORT(bp
)*4) &
4408 AEU_INPUTS_ATTN_BITS_SPIO5
);
4411 /* end of nic init */
4414 * gzip service functions
4417 static int bnx2x_gunzip_init(struct bnx2x
*bp
)
4419 bp
->gunzip_buf
= dma_alloc_coherent(&bp
->pdev
->dev
, FW_BUF_SIZE
,
4420 &bp
->gunzip_mapping
, GFP_KERNEL
);
4421 if (bp
->gunzip_buf
== NULL
)
4424 bp
->strm
= kmalloc(sizeof(*bp
->strm
), GFP_KERNEL
);
4425 if (bp
->strm
== NULL
)
4428 bp
->strm
->workspace
= kmalloc(zlib_inflate_workspacesize(),
4430 if (bp
->strm
->workspace
== NULL
)
4440 dma_free_coherent(&bp
->pdev
->dev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
4441 bp
->gunzip_mapping
);
4442 bp
->gunzip_buf
= NULL
;
4445 netdev_err(bp
->dev
, "Cannot allocate firmware buffer for"
4446 " un-compression\n");
4450 static void bnx2x_gunzip_end(struct bnx2x
*bp
)
4452 kfree(bp
->strm
->workspace
);
4456 if (bp
->gunzip_buf
) {
4457 dma_free_coherent(&bp
->pdev
->dev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
4458 bp
->gunzip_mapping
);
4459 bp
->gunzip_buf
= NULL
;
4463 static int bnx2x_gunzip(struct bnx2x
*bp
, const u8
*zbuf
, int len
)
4467 /* check gzip header */
4468 if ((zbuf
[0] != 0x1f) || (zbuf
[1] != 0x8b) || (zbuf
[2] != Z_DEFLATED
)) {
4469 BNX2X_ERR("Bad gzip header\n");
4477 if (zbuf
[3] & FNAME
)
4478 while ((zbuf
[n
++] != 0) && (n
< len
));
4480 bp
->strm
->next_in
= (typeof(bp
->strm
->next_in
))zbuf
+ n
;
4481 bp
->strm
->avail_in
= len
- n
;
4482 bp
->strm
->next_out
= bp
->gunzip_buf
;
4483 bp
->strm
->avail_out
= FW_BUF_SIZE
;
4485 rc
= zlib_inflateInit2(bp
->strm
, -MAX_WBITS
);
4489 rc
= zlib_inflate(bp
->strm
, Z_FINISH
);
4490 if ((rc
!= Z_OK
) && (rc
!= Z_STREAM_END
))
4491 netdev_err(bp
->dev
, "Firmware decompression error: %s\n",
4494 bp
->gunzip_outlen
= (FW_BUF_SIZE
- bp
->strm
->avail_out
);
4495 if (bp
->gunzip_outlen
& 0x3)
4496 netdev_err(bp
->dev
, "Firmware decompression error:"
4497 " gunzip_outlen (%d) not aligned\n",
4499 bp
->gunzip_outlen
>>= 2;
4501 zlib_inflateEnd(bp
->strm
);
4503 if (rc
== Z_STREAM_END
)
4509 /* nic load/unload */
4512 * General service functions
4515 /* send a NIG loopback debug packet */
4516 static void bnx2x_lb_pckt(struct bnx2x
*bp
)
4520 /* Ethernet source and destination addresses */
4521 wb_write
[0] = 0x55555555;
4522 wb_write
[1] = 0x55555555;
4523 wb_write
[2] = 0x20; /* SOP */
4524 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
4526 /* NON-IP protocol */
4527 wb_write
[0] = 0x09000000;
4528 wb_write
[1] = 0x55555555;
4529 wb_write
[2] = 0x10; /* EOP, eop_bvalid = 0 */
4530 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
4533 /* some of the internal memories
4534 * are not directly readable from the driver
4535 * to test them we send debug packets
4537 static int bnx2x_int_mem_test(struct bnx2x
*bp
)
4543 if (CHIP_REV_IS_FPGA(bp
))
4545 else if (CHIP_REV_IS_EMUL(bp
))
4550 /* Disable inputs of parser neighbor blocks */
4551 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
4552 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
4553 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
4554 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x0);
4556 /* Write 0 to parser credits for CFC search request */
4557 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
4559 /* send Ethernet packet */
4562 /* TODO do i reset NIG statistic? */
4563 /* Wait until NIG register shows 1 packet of size 0x10 */
4564 count
= 1000 * factor
;
4567 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
4568 val
= *bnx2x_sp(bp
, wb_data
[0]);
4576 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
4580 /* Wait until PRS register shows 1 packet */
4581 count
= 1000 * factor
;
4583 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
4591 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
4595 /* Reset and init BRB, PRS */
4596 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
4598 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
4600 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
4601 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
4603 DP(NETIF_MSG_HW
, "part2\n");
4605 /* Disable inputs of parser neighbor blocks */
4606 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
4607 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
4608 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
4609 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x0);
4611 /* Write 0 to parser credits for CFC search request */
4612 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
4614 /* send 10 Ethernet packets */
4615 for (i
= 0; i
< 10; i
++)
4618 /* Wait until NIG register shows 10 + 1
4619 packets of size 11*0x10 = 0xb0 */
4620 count
= 1000 * factor
;
4623 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
4624 val
= *bnx2x_sp(bp
, wb_data
[0]);
4632 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
4636 /* Wait until PRS register shows 2 packets */
4637 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
4639 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
4641 /* Write 1 to parser credits for CFC search request */
4642 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x1);
4644 /* Wait until PRS register shows 3 packets */
4645 msleep(10 * factor
);
4646 /* Wait until NIG register shows 1 packet of size 0x10 */
4647 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
4649 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
4651 /* clear NIG EOP FIFO */
4652 for (i
= 0; i
< 11; i
++)
4653 REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_FIFO
);
4654 val
= REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_EMPTY
);
4656 BNX2X_ERR("clear of NIG failed\n");
4660 /* Reset and init BRB, PRS, NIG */
4661 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
4663 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
4665 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
4666 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
4669 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
4672 /* Enable inputs of parser neighbor blocks */
4673 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x7fffffff);
4674 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x1);
4675 REG_WR(bp
, CFC_REG_DEBUG0
, 0x0);
4676 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x1);
4678 DP(NETIF_MSG_HW
, "done\n");
4683 static void enable_blocks_attention(struct bnx2x
*bp
)
4685 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
4687 REG_WR(bp
, PXP_REG_PXP_INT_MASK_1
, 0x40);
4689 REG_WR(bp
, PXP_REG_PXP_INT_MASK_1
, 0);
4690 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
4691 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
4693 * mask read length error interrupts in brb for parser
4694 * (parsing unit and 'checksum and crc' unit)
4695 * these errors are legal (PU reads fixed length and CAC can cause
4696 * read length error on truncated packets)
4698 REG_WR(bp
, BRB1_REG_BRB1_INT_MASK
, 0xFC00);
4699 REG_WR(bp
, QM_REG_QM_INT_MASK
, 0);
4700 REG_WR(bp
, TM_REG_TM_INT_MASK
, 0);
4701 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_0
, 0);
4702 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_1
, 0);
4703 REG_WR(bp
, XCM_REG_XCM_INT_MASK
, 0);
4704 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4705 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
4706 REG_WR(bp
, USDM_REG_USDM_INT_MASK_0
, 0);
4707 REG_WR(bp
, USDM_REG_USDM_INT_MASK_1
, 0);
4708 REG_WR(bp
, UCM_REG_UCM_INT_MASK
, 0);
4709 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4710 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
4711 REG_WR(bp
, GRCBASE_UPB
+ PB_REG_PB_INT_MASK
, 0);
4712 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_0
, 0);
4713 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_1
, 0);
4714 REG_WR(bp
, CCM_REG_CCM_INT_MASK
, 0);
4715 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4716 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4718 if (CHIP_REV_IS_FPGA(bp
))
4719 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x580000);
4720 else if (CHIP_IS_E2(bp
))
4721 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
,
4722 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4723 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4724 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4725 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4726 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED
));
4728 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x480000);
4729 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_0
, 0);
4730 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_1
, 0);
4731 REG_WR(bp
, TCM_REG_TCM_INT_MASK
, 0);
4732 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4733 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
4734 REG_WR(bp
, CDU_REG_CDU_INT_MASK
, 0);
4735 REG_WR(bp
, DMAE_REG_DMAE_INT_MASK
, 0);
4736 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4737 REG_WR(bp
, PBF_REG_PBF_INT_MASK
, 0X18); /* bit 3,4 masked */
4740 static const struct {
4743 } bnx2x_parity_mask
[] = {
4744 {PXP_REG_PXP_PRTY_MASK
, 0x3ffffff},
4745 {PXP2_REG_PXP2_PRTY_MASK_0
, 0xffffffff},
4746 {PXP2_REG_PXP2_PRTY_MASK_1
, 0x7f},
4747 {HC_REG_HC_PRTY_MASK
, 0x7},
4748 {MISC_REG_MISC_PRTY_MASK
, 0x1},
4749 {QM_REG_QM_PRTY_MASK
, 0x0},
4750 {DORQ_REG_DORQ_PRTY_MASK
, 0x0},
4751 {GRCBASE_UPB
+ PB_REG_PB_PRTY_MASK
, 0x0},
4752 {GRCBASE_XPB
+ PB_REG_PB_PRTY_MASK
, 0x0},
4753 {SRC_REG_SRC_PRTY_MASK
, 0x4}, /* bit 2 */
4754 {CDU_REG_CDU_PRTY_MASK
, 0x0},
4755 {CFC_REG_CFC_PRTY_MASK
, 0x0},
4756 {DBG_REG_DBG_PRTY_MASK
, 0x0},
4757 {DMAE_REG_DMAE_PRTY_MASK
, 0x0},
4758 {BRB1_REG_BRB1_PRTY_MASK
, 0x0},
4759 {PRS_REG_PRS_PRTY_MASK
, (1<<6)},/* bit 6 */
4760 {TSDM_REG_TSDM_PRTY_MASK
, 0x18}, /* bit 3,4 */
4761 {CSDM_REG_CSDM_PRTY_MASK
, 0x8}, /* bit 3 */
4762 {USDM_REG_USDM_PRTY_MASK
, 0x38}, /* bit 3,4,5 */
4763 {XSDM_REG_XSDM_PRTY_MASK
, 0x8}, /* bit 3 */
4764 {TSEM_REG_TSEM_PRTY_MASK_0
, 0x0},
4765 {TSEM_REG_TSEM_PRTY_MASK_1
, 0x0},
4766 {USEM_REG_USEM_PRTY_MASK_0
, 0x0},
4767 {USEM_REG_USEM_PRTY_MASK_1
, 0x0},
4768 {CSEM_REG_CSEM_PRTY_MASK_0
, 0x0},
4769 {CSEM_REG_CSEM_PRTY_MASK_1
, 0x0},
4770 {XSEM_REG_XSEM_PRTY_MASK_0
, 0x0},
4771 {XSEM_REG_XSEM_PRTY_MASK_1
, 0x0}
4774 static void enable_blocks_parity(struct bnx2x
*bp
)
4778 for (i
= 0; i
< ARRAY_SIZE(bnx2x_parity_mask
); i
++)
4779 REG_WR(bp
, bnx2x_parity_mask
[i
].addr
,
4780 bnx2x_parity_mask
[i
].mask
);
4784 static void bnx2x_reset_common(struct bnx2x
*bp
)
4787 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
4789 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
, 0x1403);
4792 static void bnx2x_init_pxp(struct bnx2x
*bp
)
4795 int r_order
, w_order
;
4797 pci_read_config_word(bp
->pdev
,
4798 bp
->pcie_cap
+ PCI_EXP_DEVCTL
, &devctl
);
4799 DP(NETIF_MSG_HW
, "read 0x%x from devctl\n", devctl
);
4800 w_order
= ((devctl
& PCI_EXP_DEVCTL_PAYLOAD
) >> 5);
4802 r_order
= ((devctl
& PCI_EXP_DEVCTL_READRQ
) >> 12);
4804 DP(NETIF_MSG_HW
, "force read order to %d\n", bp
->mrrs
);
4808 bnx2x_init_pxp_arb(bp
, r_order
, w_order
);
4811 static void bnx2x_setup_fan_failure_detection(struct bnx2x
*bp
)
4821 val
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.config2
) &
4822 SHARED_HW_CFG_FAN_FAILURE_MASK
;
4824 if (val
== SHARED_HW_CFG_FAN_FAILURE_ENABLED
)
4828 * The fan failure mechanism is usually related to the PHY type since
4829 * the power consumption of the board is affected by the PHY. Currently,
4830 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4832 else if (val
== SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE
)
4833 for (port
= PORT_0
; port
< PORT_MAX
; port
++) {
4835 bnx2x_fan_failure_det_req(
4837 bp
->common
.shmem_base
,
4838 bp
->common
.shmem2_base
,
4842 DP(NETIF_MSG_HW
, "fan detection setting: %d\n", is_required
);
4844 if (is_required
== 0)
4847 /* Fan failure is indicated by SPIO 5 */
4848 bnx2x_set_spio(bp
, MISC_REGISTERS_SPIO_5
,
4849 MISC_REGISTERS_SPIO_INPUT_HI_Z
);
4851 /* set to active low mode */
4852 val
= REG_RD(bp
, MISC_REG_SPIO_INT
);
4853 val
|= ((1 << MISC_REGISTERS_SPIO_5
) <<
4854 MISC_REGISTERS_SPIO_INT_OLD_SET_POS
);
4855 REG_WR(bp
, MISC_REG_SPIO_INT
, val
);
4857 /* enable interrupt to signal the IGU */
4858 val
= REG_RD(bp
, MISC_REG_SPIO_EVENT_EN
);
4859 val
|= (1 << MISC_REGISTERS_SPIO_5
);
4860 REG_WR(bp
, MISC_REG_SPIO_EVENT_EN
, val
);
4863 static void bnx2x_pretend_func(struct bnx2x
*bp
, u8 pretend_func_num
)
4869 if (CHIP_IS_E1H(bp
) && (pretend_func_num
>= E1H_FUNC_MAX
))
4872 switch (BP_ABS_FUNC(bp
)) {
4874 offset
= PXP2_REG_PGL_PRETEND_FUNC_F0
;
4877 offset
= PXP2_REG_PGL_PRETEND_FUNC_F1
;
4880 offset
= PXP2_REG_PGL_PRETEND_FUNC_F2
;
4883 offset
= PXP2_REG_PGL_PRETEND_FUNC_F3
;
4886 offset
= PXP2_REG_PGL_PRETEND_FUNC_F4
;
4889 offset
= PXP2_REG_PGL_PRETEND_FUNC_F5
;
4892 offset
= PXP2_REG_PGL_PRETEND_FUNC_F6
;
4895 offset
= PXP2_REG_PGL_PRETEND_FUNC_F7
;
4901 REG_WR(bp
, offset
, pretend_func_num
);
4903 DP(NETIF_MSG_HW
, "Pretending to func %d\n", pretend_func_num
);
4906 static void bnx2x_pf_disable(struct bnx2x
*bp
)
4908 u32 val
= REG_RD(bp
, IGU_REG_PF_CONFIGURATION
);
4909 val
&= ~IGU_PF_CONF_FUNC_EN
;
4911 REG_WR(bp
, IGU_REG_PF_CONFIGURATION
, val
);
4912 REG_WR(bp
, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
, 0);
4913 REG_WR(bp
, CFC_REG_WEAK_ENABLE_PF
, 0);
4916 static int bnx2x_init_hw_common(struct bnx2x
*bp
, u32 load_code
)
4920 DP(BNX2X_MSG_MCP
, "starting common init func %d\n", BP_ABS_FUNC(bp
));
4922 bnx2x_reset_common(bp
);
4923 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0xffffffff);
4924 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_SET
, 0xfffc);
4926 bnx2x_init_block(bp
, MISC_BLOCK
, COMMON_STAGE
);
4927 if (!CHIP_IS_E1(bp
))
4928 REG_WR(bp
, MISC_REG_E1HMF_MODE
, IS_MF(bp
));
4930 if (CHIP_IS_E2(bp
)) {
4934 * 4-port mode or 2-port mode we need to turn of master-enable
4935 * for everyone, after that, turn it back on for self.
4936 * so, we disregard multi-function or not, and always disable
4937 * for all functions on the given path, this means 0,2,4,6 for
4938 * path 0 and 1,3,5,7 for path 1
4940 for (fid
= BP_PATH(bp
); fid
< E2_FUNC_MAX
*2; fid
+= 2) {
4941 if (fid
== BP_ABS_FUNC(bp
)) {
4943 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
,
4948 bnx2x_pretend_func(bp
, fid
);
4949 /* clear pf enable */
4950 bnx2x_pf_disable(bp
);
4951 bnx2x_pretend_func(bp
, BP_ABS_FUNC(bp
));
4955 bnx2x_init_block(bp
, PXP_BLOCK
, COMMON_STAGE
);
4956 if (CHIP_IS_E1(bp
)) {
4957 /* enable HW interrupt from PXP on USDM overflow
4958 bit 16 on INT_MASK_0 */
4959 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
4962 bnx2x_init_block(bp
, PXP2_BLOCK
, COMMON_STAGE
);
4966 REG_WR(bp
, PXP2_REG_RQ_QM_ENDIAN_M
, 1);
4967 REG_WR(bp
, PXP2_REG_RQ_TM_ENDIAN_M
, 1);
4968 REG_WR(bp
, PXP2_REG_RQ_SRC_ENDIAN_M
, 1);
4969 REG_WR(bp
, PXP2_REG_RQ_CDU_ENDIAN_M
, 1);
4970 REG_WR(bp
, PXP2_REG_RQ_DBG_ENDIAN_M
, 1);
4971 /* make sure this value is 0 */
4972 REG_WR(bp
, PXP2_REG_RQ_HC_ENDIAN_M
, 0);
4974 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4975 REG_WR(bp
, PXP2_REG_RD_QM_SWAP_MODE
, 1);
4976 REG_WR(bp
, PXP2_REG_RD_TM_SWAP_MODE
, 1);
4977 REG_WR(bp
, PXP2_REG_RD_SRC_SWAP_MODE
, 1);
4978 REG_WR(bp
, PXP2_REG_RD_CDURD_SWAP_MODE
, 1);
4981 bnx2x_ilt_init_page_size(bp
, INITOP_SET
);
4983 if (CHIP_REV_IS_FPGA(bp
) && CHIP_IS_E1H(bp
))
4984 REG_WR(bp
, PXP2_REG_PGL_TAGS_LIMIT
, 0x1);
4986 /* let the HW do it's magic ... */
4988 /* finish PXP init */
4989 val
= REG_RD(bp
, PXP2_REG_RQ_CFG_DONE
);
4991 BNX2X_ERR("PXP2 CFG failed\n");
4994 val
= REG_RD(bp
, PXP2_REG_RD_INIT_DONE
);
4996 BNX2X_ERR("PXP2 RD_INIT failed\n");
5000 /* Timers bug workaround E2 only. We need to set the entire ILT to
5001 * have entries with value "0" and valid bit on.
5002 * This needs to be done by the first PF that is loaded in a path
5003 * (i.e. common phase)
5005 if (CHIP_IS_E2(bp
)) {
5006 struct ilt_client_info ilt_cli
;
5007 struct bnx2x_ilt ilt
;
5008 memset(&ilt_cli
, 0, sizeof(struct ilt_client_info
));
5009 memset(&ilt
, 0, sizeof(struct bnx2x_ilt
));
5011 /* initalize dummy TM client */
5013 ilt_cli
.end
= ILT_NUM_PAGE_ENTRIES
- 1;
5014 ilt_cli
.client_num
= ILT_CLIENT_TM
;
5016 /* Step 1: set zeroes to all ilt page entries with valid bit on
5017 * Step 2: set the timers first/last ilt entry to point
5018 * to the entire range to prevent ILT range error for 3rd/4th
5019 * vnic (this code assumes existance of the vnic)
5021 * both steps performed by call to bnx2x_ilt_client_init_op()
5022 * with dummy TM client
5024 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
5025 * and his brother are split registers
5027 bnx2x_pretend_func(bp
, (BP_PATH(bp
) + 6));
5028 bnx2x_ilt_client_init_op_ilt(bp
, &ilt
, &ilt_cli
, INITOP_CLEAR
);
5029 bnx2x_pretend_func(bp
, BP_ABS_FUNC(bp
));
5031 REG_WR(bp
, PXP2_REG_RQ_DRAM_ALIGN
, BNX2X_PXP_DRAM_ALIGN
);
5032 REG_WR(bp
, PXP2_REG_RQ_DRAM_ALIGN_RD
, BNX2X_PXP_DRAM_ALIGN
);
5033 REG_WR(bp
, PXP2_REG_RQ_DRAM_ALIGN_SEL
, 1);
5037 REG_WR(bp
, PXP2_REG_RQ_DISABLE_INPUTS
, 0);
5038 REG_WR(bp
, PXP2_REG_RD_DISABLE_INPUTS
, 0);
5040 if (CHIP_IS_E2(bp
)) {
5041 int factor
= CHIP_REV_IS_EMUL(bp
) ? 1000 :
5042 (CHIP_REV_IS_FPGA(bp
) ? 400 : 0);
5043 bnx2x_init_block(bp
, PGLUE_B_BLOCK
, COMMON_STAGE
);
5045 bnx2x_init_block(bp
, ATC_BLOCK
, COMMON_STAGE
);
5047 /* let the HW do it's magic ... */
5050 val
= REG_RD(bp
, ATC_REG_ATC_INIT_DONE
);
5051 } while (factor
-- && (val
!= 1));
5054 BNX2X_ERR("ATC_INIT failed\n");
5059 bnx2x_init_block(bp
, DMAE_BLOCK
, COMMON_STAGE
);
5061 /* clean the DMAE memory */
5063 bnx2x_init_fill(bp
, TSEM_REG_PRAM
, 0, 8);
5065 bnx2x_init_block(bp
, TCM_BLOCK
, COMMON_STAGE
);
5066 bnx2x_init_block(bp
, UCM_BLOCK
, COMMON_STAGE
);
5067 bnx2x_init_block(bp
, CCM_BLOCK
, COMMON_STAGE
);
5068 bnx2x_init_block(bp
, XCM_BLOCK
, COMMON_STAGE
);
5070 bnx2x_read_dmae(bp
, XSEM_REG_PASSIVE_BUFFER
, 3);
5071 bnx2x_read_dmae(bp
, CSEM_REG_PASSIVE_BUFFER
, 3);
5072 bnx2x_read_dmae(bp
, TSEM_REG_PASSIVE_BUFFER
, 3);
5073 bnx2x_read_dmae(bp
, USEM_REG_PASSIVE_BUFFER
, 3);
5075 bnx2x_init_block(bp
, QM_BLOCK
, COMMON_STAGE
);
5077 if (CHIP_MODE_IS_4_PORT(bp
))
5078 bnx2x_init_block(bp
, QM_4PORT_BLOCK
, COMMON_STAGE
);
5080 /* QM queues pointers table */
5081 bnx2x_qm_init_ptr_table(bp
, bp
->qm_cid_count
, INITOP_SET
);
5083 /* soft reset pulse */
5084 REG_WR(bp
, QM_REG_SOFT_RESET
, 1);
5085 REG_WR(bp
, QM_REG_SOFT_RESET
, 0);
5088 bnx2x_init_block(bp
, TIMERS_BLOCK
, COMMON_STAGE
);
5091 bnx2x_init_block(bp
, DQ_BLOCK
, COMMON_STAGE
);
5092 REG_WR(bp
, DORQ_REG_DPM_CID_OFST
, BNX2X_DB_SHIFT
);
5094 if (!CHIP_REV_IS_SLOW(bp
)) {
5095 /* enable hw interrupt from doorbell Q */
5096 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
5099 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
5100 if (CHIP_MODE_IS_4_PORT(bp
)) {
5101 REG_WR(bp
, BRB1_REG_FULL_LB_XOFF_THRESHOLD
, 248);
5102 REG_WR(bp
, BRB1_REG_FULL_LB_XON_THRESHOLD
, 328);
5105 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
5106 REG_WR(bp
, PRS_REG_A_PRSU_20
, 0xf);
5109 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
5111 if (!CHIP_IS_E1(bp
))
5112 REG_WR(bp
, PRS_REG_E1HOV_MODE
, IS_MF_SD(bp
));
5114 if (CHIP_IS_E2(bp
)) {
5115 /* Bit-map indicating which L2 hdrs may appear after the
5116 basic Ethernet header */
5117 int has_ovlan
= IS_MF_SD(bp
);
5118 REG_WR(bp
, PRS_REG_HDRS_AFTER_BASIC
, (has_ovlan
? 7 : 6));
5119 REG_WR(bp
, PRS_REG_MUST_HAVE_HDRS
, (has_ovlan
? 1 : 0));
5122 bnx2x_init_block(bp
, TSDM_BLOCK
, COMMON_STAGE
);
5123 bnx2x_init_block(bp
, CSDM_BLOCK
, COMMON_STAGE
);
5124 bnx2x_init_block(bp
, USDM_BLOCK
, COMMON_STAGE
);
5125 bnx2x_init_block(bp
, XSDM_BLOCK
, COMMON_STAGE
);
5127 bnx2x_init_fill(bp
, TSEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
5128 bnx2x_init_fill(bp
, USEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
5129 bnx2x_init_fill(bp
, CSEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
5130 bnx2x_init_fill(bp
, XSEM_REG_FAST_MEMORY
, 0, STORM_INTMEM_SIZE(bp
));
5132 bnx2x_init_block(bp
, TSEM_BLOCK
, COMMON_STAGE
);
5133 bnx2x_init_block(bp
, USEM_BLOCK
, COMMON_STAGE
);
5134 bnx2x_init_block(bp
, CSEM_BLOCK
, COMMON_STAGE
);
5135 bnx2x_init_block(bp
, XSEM_BLOCK
, COMMON_STAGE
);
5137 if (CHIP_MODE_IS_4_PORT(bp
))
5138 bnx2x_init_block(bp
, XSEM_4PORT_BLOCK
, COMMON_STAGE
);
5141 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
5143 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
5146 bnx2x_init_block(bp
, UPB_BLOCK
, COMMON_STAGE
);
5147 bnx2x_init_block(bp
, XPB_BLOCK
, COMMON_STAGE
);
5148 bnx2x_init_block(bp
, PBF_BLOCK
, COMMON_STAGE
);
5150 if (CHIP_IS_E2(bp
)) {
5151 int has_ovlan
= IS_MF_SD(bp
);
5152 REG_WR(bp
, PBF_REG_HDRS_AFTER_BASIC
, (has_ovlan
? 7 : 6));
5153 REG_WR(bp
, PBF_REG_MUST_HAVE_HDRS
, (has_ovlan
? 1 : 0));
5156 REG_WR(bp
, SRC_REG_SOFT_RST
, 1);
5157 for (i
= SRC_REG_KEYRSS0_0
; i
<= SRC_REG_KEYRSS1_9
; i
+= 4)
5158 REG_WR(bp
, i
, random32());
5160 bnx2x_init_block(bp
, SRCH_BLOCK
, COMMON_STAGE
);
5162 REG_WR(bp
, SRC_REG_KEYSEARCH_0
, 0x63285672);
5163 REG_WR(bp
, SRC_REG_KEYSEARCH_1
, 0x24b8f2cc);
5164 REG_WR(bp
, SRC_REG_KEYSEARCH_2
, 0x223aef9b);
5165 REG_WR(bp
, SRC_REG_KEYSEARCH_3
, 0x26001e3a);
5166 REG_WR(bp
, SRC_REG_KEYSEARCH_4
, 0x7ae91116);
5167 REG_WR(bp
, SRC_REG_KEYSEARCH_5
, 0x5ce5230b);
5168 REG_WR(bp
, SRC_REG_KEYSEARCH_6
, 0x298d8adf);
5169 REG_WR(bp
, SRC_REG_KEYSEARCH_7
, 0x6eb0ff09);
5170 REG_WR(bp
, SRC_REG_KEYSEARCH_8
, 0x1830f82f);
5171 REG_WR(bp
, SRC_REG_KEYSEARCH_9
, 0x01e46be7);
5173 REG_WR(bp
, SRC_REG_SOFT_RST
, 0);
5175 if (sizeof(union cdu_context
) != 1024)
5176 /* we currently assume that a context is 1024 bytes */
5177 dev_alert(&bp
->pdev
->dev
, "please adjust the size "
5178 "of cdu_context(%ld)\n",
5179 (long)sizeof(union cdu_context
));
5181 bnx2x_init_block(bp
, CDU_BLOCK
, COMMON_STAGE
);
5182 val
= (4 << 24) + (0 << 12) + 1024;
5183 REG_WR(bp
, CDU_REG_CDU_GLOBAL_PARAMS
, val
);
5185 bnx2x_init_block(bp
, CFC_BLOCK
, COMMON_STAGE
);
5186 REG_WR(bp
, CFC_REG_INIT_REG
, 0x7FF);
5187 /* enable context validation interrupt from CFC */
5188 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
5190 /* set the thresholds to prevent CFC/CDU race */
5191 REG_WR(bp
, CFC_REG_DEBUG0
, 0x20020000);
5193 bnx2x_init_block(bp
, HC_BLOCK
, COMMON_STAGE
);
5195 if (CHIP_IS_E2(bp
) && BP_NOMCP(bp
))
5196 REG_WR(bp
, IGU_REG_RESET_MEMORIES
, 0x36);
5198 bnx2x_init_block(bp
, IGU_BLOCK
, COMMON_STAGE
);
5199 bnx2x_init_block(bp
, MISC_AEU_BLOCK
, COMMON_STAGE
);
5201 bnx2x_init_block(bp
, PXPCS_BLOCK
, COMMON_STAGE
);
5202 /* Reset PCIE errors for debug */
5203 REG_WR(bp
, 0x2814, 0xffffffff);
5204 REG_WR(bp
, 0x3820, 0xffffffff);
5206 if (CHIP_IS_E2(bp
)) {
5207 REG_WR(bp
, PCICFG_OFFSET
+ PXPCS_TL_CONTROL_5
,
5208 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1
|
5209 PXPCS_TL_CONTROL_5_ERR_UNSPPORT
));
5210 REG_WR(bp
, PCICFG_OFFSET
+ PXPCS_TL_FUNC345_STAT
,
5211 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4
|
5212 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3
|
5213 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2
));
5214 REG_WR(bp
, PCICFG_OFFSET
+ PXPCS_TL_FUNC678_STAT
,
5215 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7
|
5216 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6
|
5217 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5
));
5220 bnx2x_init_block(bp
, EMAC0_BLOCK
, COMMON_STAGE
);
5221 bnx2x_init_block(bp
, EMAC1_BLOCK
, COMMON_STAGE
);
5222 bnx2x_init_block(bp
, DBU_BLOCK
, COMMON_STAGE
);
5223 bnx2x_init_block(bp
, DBG_BLOCK
, COMMON_STAGE
);
5225 bnx2x_init_block(bp
, NIG_BLOCK
, COMMON_STAGE
);
5226 if (!CHIP_IS_E1(bp
)) {
5227 REG_WR(bp
, NIG_REG_LLH_MF_MODE
, IS_MF(bp
));
5228 REG_WR(bp
, NIG_REG_LLH_E1HOV_MODE
, IS_MF_SD(bp
));
5230 if (CHIP_IS_E2(bp
)) {
5231 /* Bit-map indicating which L2 hdrs may appear after the
5232 basic Ethernet header */
5233 REG_WR(bp
, NIG_REG_P0_HDRS_AFTER_BASIC
, (IS_MF_SD(bp
) ? 7 : 6));
5236 if (CHIP_REV_IS_SLOW(bp
))
5239 /* finish CFC init */
5240 val
= reg_poll(bp
, CFC_REG_LL_INIT_DONE
, 1, 100, 10);
5242 BNX2X_ERR("CFC LL_INIT failed\n");
5245 val
= reg_poll(bp
, CFC_REG_AC_INIT_DONE
, 1, 100, 10);
5247 BNX2X_ERR("CFC AC_INIT failed\n");
5250 val
= reg_poll(bp
, CFC_REG_CAM_INIT_DONE
, 1, 100, 10);
5252 BNX2X_ERR("CFC CAM_INIT failed\n");
5255 REG_WR(bp
, CFC_REG_DEBUG0
, 0);
5257 if (CHIP_IS_E1(bp
)) {
5258 /* read NIG statistic
5259 to see if this is our first up since powerup */
5260 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
5261 val
= *bnx2x_sp(bp
, wb_data
[0]);
5263 /* do internal memory self test */
5264 if ((val
== 0) && bnx2x_int_mem_test(bp
)) {
5265 BNX2X_ERR("internal mem self test failed\n");
5270 bp
->port
.need_hw_lock
= bnx2x_hw_lock_required(bp
,
5271 bp
->common
.shmem_base
,
5272 bp
->common
.shmem2_base
);
5274 bnx2x_setup_fan_failure_detection(bp
);
5276 /* clear PXP2 attentions */
5277 REG_RD(bp
, PXP2_REG_PXP2_INT_STS_CLR_0
);
5279 enable_blocks_attention(bp
);
5280 if (CHIP_PARITY_SUPPORTED(bp
))
5281 enable_blocks_parity(bp
);
5283 if (!BP_NOMCP(bp
)) {
5284 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5285 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
) ||
5287 u32 shmem_base
[2], shmem2_base
[2];
5288 shmem_base
[0] = bp
->common
.shmem_base
;
5289 shmem2_base
[0] = bp
->common
.shmem2_base
;
5290 if (CHIP_IS_E2(bp
)) {
5292 SHMEM2_RD(bp
, other_shmem_base_addr
);
5294 SHMEM2_RD(bp
, other_shmem2_base_addr
);
5296 bnx2x_acquire_phy_lock(bp
);
5297 bnx2x_common_init_phy(bp
, shmem_base
, shmem2_base
,
5298 bp
->common
.chip_id
);
5299 bnx2x_release_phy_lock(bp
);
5302 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5307 static int bnx2x_init_hw_port(struct bnx2x
*bp
)
5309 int port
= BP_PORT(bp
);
5310 int init_stage
= port
? PORT1_STAGE
: PORT0_STAGE
;
5314 DP(BNX2X_MSG_MCP
, "starting port init port %d\n", port
);
5316 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
5318 bnx2x_init_block(bp
, PXP_BLOCK
, init_stage
);
5319 bnx2x_init_block(bp
, PXP2_BLOCK
, init_stage
);
5321 /* Timers bug workaround: disables the pf_master bit in pglue at
5322 * common phase, we need to enable it here before any dmae access are
5323 * attempted. Therefore we manually added the enable-master to the
5324 * port phase (it also happens in the function phase)
5327 REG_WR(bp
, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
, 1);
5329 bnx2x_init_block(bp
, TCM_BLOCK
, init_stage
);
5330 bnx2x_init_block(bp
, UCM_BLOCK
, init_stage
);
5331 bnx2x_init_block(bp
, CCM_BLOCK
, init_stage
);
5332 bnx2x_init_block(bp
, XCM_BLOCK
, init_stage
);
5334 /* QM cid (connection) count */
5335 bnx2x_qm_init_cid_count(bp
, bp
->qm_cid_count
, INITOP_SET
);
5338 bnx2x_init_block(bp
, TIMERS_BLOCK
, init_stage
);
5339 REG_WR(bp
, TM_REG_LIN0_SCAN_TIME
+ port
*4, 20);
5340 REG_WR(bp
, TM_REG_LIN0_MAX_ACTIVE_CID
+ port
*4, 31);
5343 bnx2x_init_block(bp
, DQ_BLOCK
, init_stage
);
5345 if (CHIP_MODE_IS_4_PORT(bp
))
5346 bnx2x_init_block(bp
, QM_4PORT_BLOCK
, init_stage
);
5348 if (CHIP_IS_E1(bp
) || CHIP_IS_E1H(bp
)) {
5349 bnx2x_init_block(bp
, BRB1_BLOCK
, init_stage
);
5350 if (CHIP_REV_IS_SLOW(bp
) && CHIP_IS_E1(bp
)) {
5351 /* no pause for emulation and FPGA */
5356 low
= ((bp
->flags
& ONE_PORT_FLAG
) ? 160 : 246);
5357 else if (bp
->dev
->mtu
> 4096) {
5358 if (bp
->flags
& ONE_PORT_FLAG
)
5362 /* (24*1024 + val*4)/256 */
5363 low
= 96 + (val
/64) +
5364 ((val
% 64) ? 1 : 0);
5367 low
= ((bp
->flags
& ONE_PORT_FLAG
) ? 80 : 160);
5368 high
= low
+ 56; /* 14*1024/256 */
5370 REG_WR(bp
, BRB1_REG_PAUSE_LOW_THRESHOLD_0
+ port
*4, low
);
5371 REG_WR(bp
, BRB1_REG_PAUSE_HIGH_THRESHOLD_0
+ port
*4, high
);
5374 if (CHIP_MODE_IS_4_PORT(bp
)) {
5375 REG_WR(bp
, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0
+ port
*8, 248);
5376 REG_WR(bp
, BRB1_REG_PAUSE_0_XON_THRESHOLD_0
+ port
*8, 328);
5377 REG_WR(bp
, (BP_PORT(bp
) ? BRB1_REG_MAC_GUARANTIED_1
:
5378 BRB1_REG_MAC_GUARANTIED_0
), 40);
5381 bnx2x_init_block(bp
, PRS_BLOCK
, init_stage
);
5383 bnx2x_init_block(bp
, TSDM_BLOCK
, init_stage
);
5384 bnx2x_init_block(bp
, CSDM_BLOCK
, init_stage
);
5385 bnx2x_init_block(bp
, USDM_BLOCK
, init_stage
);
5386 bnx2x_init_block(bp
, XSDM_BLOCK
, init_stage
);
5388 bnx2x_init_block(bp
, TSEM_BLOCK
, init_stage
);
5389 bnx2x_init_block(bp
, USEM_BLOCK
, init_stage
);
5390 bnx2x_init_block(bp
, CSEM_BLOCK
, init_stage
);
5391 bnx2x_init_block(bp
, XSEM_BLOCK
, init_stage
);
5392 if (CHIP_MODE_IS_4_PORT(bp
))
5393 bnx2x_init_block(bp
, XSEM_4PORT_BLOCK
, init_stage
);
5395 bnx2x_init_block(bp
, UPB_BLOCK
, init_stage
);
5396 bnx2x_init_block(bp
, XPB_BLOCK
, init_stage
);
5398 bnx2x_init_block(bp
, PBF_BLOCK
, init_stage
);
5400 if (!CHIP_IS_E2(bp
)) {
5401 /* configure PBF to work without PAUSE mtu 9000 */
5402 REG_WR(bp
, PBF_REG_P0_PAUSE_ENABLE
+ port
*4, 0);
5404 /* update threshold */
5405 REG_WR(bp
, PBF_REG_P0_ARB_THRSH
+ port
*4, (9040/16));
5406 /* update init credit */
5407 REG_WR(bp
, PBF_REG_P0_INIT_CRD
+ port
*4, (9040/16) + 553 - 22);
5410 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 1);
5412 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 0);
5416 bnx2x_init_block(bp
, SRCH_BLOCK
, init_stage
);
5418 bnx2x_init_block(bp
, CDU_BLOCK
, init_stage
);
5419 bnx2x_init_block(bp
, CFC_BLOCK
, init_stage
);
5421 if (CHIP_IS_E1(bp
)) {
5422 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
5423 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
5425 bnx2x_init_block(bp
, HC_BLOCK
, init_stage
);
5427 bnx2x_init_block(bp
, IGU_BLOCK
, init_stage
);
5429 bnx2x_init_block(bp
, MISC_AEU_BLOCK
, init_stage
);
5430 /* init aeu_mask_attn_func_0/1:
5431 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5432 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5433 * bits 4-7 are used for "per vn group attention" */
5434 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4,
5435 (IS_MF(bp
) ? 0xF7 : 0x7));
5437 bnx2x_init_block(bp
, PXPCS_BLOCK
, init_stage
);
5438 bnx2x_init_block(bp
, EMAC0_BLOCK
, init_stage
);
5439 bnx2x_init_block(bp
, EMAC1_BLOCK
, init_stage
);
5440 bnx2x_init_block(bp
, DBU_BLOCK
, init_stage
);
5441 bnx2x_init_block(bp
, DBG_BLOCK
, init_stage
);
5443 bnx2x_init_block(bp
, NIG_BLOCK
, init_stage
);
5445 REG_WR(bp
, NIG_REG_XGXS_SERDES0_MODE_SEL
+ port
*4, 1);
5447 if (!CHIP_IS_E1(bp
)) {
5448 /* 0x2 disable mf_ov, 0x1 enable */
5449 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK_MF
+ port
*4,
5450 (IS_MF_SD(bp
) ? 0x1 : 0x2));
5452 if (CHIP_IS_E2(bp
)) {
5454 switch (bp
->mf_mode
) {
5455 case MULTI_FUNCTION_SD
:
5458 case MULTI_FUNCTION_SI
:
5463 REG_WR(bp
, (BP_PORT(bp
) ? NIG_REG_LLH1_CLS_TYPE
:
5464 NIG_REG_LLH0_CLS_TYPE
), val
);
5467 REG_WR(bp
, NIG_REG_LLFC_ENABLE_0
+ port
*4, 0);
5468 REG_WR(bp
, NIG_REG_LLFC_OUT_EN_0
+ port
*4, 0);
5469 REG_WR(bp
, NIG_REG_PAUSE_ENABLE_0
+ port
*4, 1);
5473 bnx2x_init_block(bp
, MCP_BLOCK
, init_stage
);
5474 bnx2x_init_block(bp
, DMAE_BLOCK
, init_stage
);
5475 bp
->port
.need_hw_lock
= bnx2x_hw_lock_required(bp
,
5476 bp
->common
.shmem_base
,
5477 bp
->common
.shmem2_base
);
5478 if (bnx2x_fan_failure_det_req(bp
, bp
->common
.shmem_base
,
5479 bp
->common
.shmem2_base
, port
)) {
5480 u32 reg_addr
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
5481 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
5482 val
= REG_RD(bp
, reg_addr
);
5483 val
|= AEU_INPUTS_ATTN_BITS_SPIO5
;
5484 REG_WR(bp
, reg_addr
, val
);
5486 bnx2x__link_reset(bp
);
5491 static void bnx2x_ilt_wr(struct bnx2x
*bp
, u32 index
, dma_addr_t addr
)
5496 reg
= PXP2_REG_RQ_ONCHIP_AT
+ index
*8;
5498 reg
= PXP2_REG_RQ_ONCHIP_AT_B0
+ index
*8;
5500 bnx2x_wb_wr(bp
, reg
, ONCHIP_ADDR1(addr
), ONCHIP_ADDR2(addr
));
5503 static inline void bnx2x_igu_clear_sb(struct bnx2x
*bp
, u8 idu_sb_id
)
5505 bnx2x_igu_clear_sb_gen(bp
, idu_sb_id
, true /*PF*/);
5508 static inline void bnx2x_clear_func_ilt(struct bnx2x
*bp
, u32 func
)
5510 u32 i
, base
= FUNC_ILT_BASE(func
);
5511 for (i
= base
; i
< base
+ ILT_PER_FUNC
; i
++)
5512 bnx2x_ilt_wr(bp
, i
, 0);
5515 static int bnx2x_init_hw_func(struct bnx2x
*bp
)
5517 int port
= BP_PORT(bp
);
5518 int func
= BP_FUNC(bp
);
5519 struct bnx2x_ilt
*ilt
= BP_ILT(bp
);
5522 u32 main_mem_base
, main_mem_size
, main_mem_prty_clr
;
5523 int i
, main_mem_width
;
5525 DP(BNX2X_MSG_MCP
, "starting func init func %d\n", func
);
5527 /* set MSI reconfigure capability */
5528 if (bp
->common
.int_block
== INT_BLOCK_HC
) {
5529 addr
= (port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
);
5530 val
= REG_RD(bp
, addr
);
5531 val
|= HC_CONFIG_0_REG_MSI_ATTN_EN_0
;
5532 REG_WR(bp
, addr
, val
);
5536 cdu_ilt_start
= ilt
->clients
[ILT_CLIENT_CDU
].start
;
5538 for (i
= 0; i
< L2_ILT_LINES(bp
); i
++) {
5539 ilt
->lines
[cdu_ilt_start
+ i
].page
=
5540 bp
->context
.vcxt
+ (ILT_PAGE_CIDS
* i
);
5541 ilt
->lines
[cdu_ilt_start
+ i
].page_mapping
=
5542 bp
->context
.cxt_mapping
+ (CDU_ILT_PAGE_SZ
* i
);
5543 /* cdu ilt pages are allocated manually so there's no need to
5546 bnx2x_ilt_init_op(bp
, INITOP_SET
);
5549 bnx2x_src_init_t2(bp
, bp
->t2
, bp
->t2_mapping
, SRC_CONN_NUM
);
5551 /* T1 hash bits value determines the T1 number of entries */
5552 REG_WR(bp
, SRC_REG_NUMBER_HASH_BITS0
+ port
*4, SRC_HASH_BITS
);
5557 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
5558 #endif /* BCM_CNIC */
5560 if (CHIP_IS_E2(bp
)) {
5561 u32 pf_conf
= IGU_PF_CONF_FUNC_EN
;
5563 /* Turn on a single ISR mode in IGU if driver is going to use
5566 if (!(bp
->flags
& USING_MSIX_FLAG
))
5567 pf_conf
|= IGU_PF_CONF_SINGLE_ISR_EN
;
5569 * Timers workaround bug: function init part.
5570 * Need to wait 20msec after initializing ILT,
5571 * needed to make sure there are no requests in
5572 * one of the PXP internal queues with "old" ILT addresses
5576 * Master enable - Due to WB DMAE writes performed before this
5577 * register is re-initialized as part of the regular function
5580 REG_WR(bp
, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
, 1);
5581 /* Enable the function in IGU */
5582 REG_WR(bp
, IGU_REG_PF_CONFIGURATION
, pf_conf
);
5587 bnx2x_init_block(bp
, PGLUE_B_BLOCK
, FUNC0_STAGE
+ func
);
5590 REG_WR(bp
, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR
, func
);
5592 bnx2x_init_block(bp
, MISC_BLOCK
, FUNC0_STAGE
+ func
);
5593 bnx2x_init_block(bp
, TCM_BLOCK
, FUNC0_STAGE
+ func
);
5594 bnx2x_init_block(bp
, UCM_BLOCK
, FUNC0_STAGE
+ func
);
5595 bnx2x_init_block(bp
, CCM_BLOCK
, FUNC0_STAGE
+ func
);
5596 bnx2x_init_block(bp
, XCM_BLOCK
, FUNC0_STAGE
+ func
);
5597 bnx2x_init_block(bp
, TSEM_BLOCK
, FUNC0_STAGE
+ func
);
5598 bnx2x_init_block(bp
, USEM_BLOCK
, FUNC0_STAGE
+ func
);
5599 bnx2x_init_block(bp
, CSEM_BLOCK
, FUNC0_STAGE
+ func
);
5600 bnx2x_init_block(bp
, XSEM_BLOCK
, FUNC0_STAGE
+ func
);
5602 if (CHIP_IS_E2(bp
)) {
5603 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_PATH_ID_OFFSET
,
5605 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_PATH_ID_OFFSET
,
5609 if (CHIP_MODE_IS_4_PORT(bp
))
5610 bnx2x_init_block(bp
, XSEM_4PORT_BLOCK
, FUNC0_STAGE
+ func
);
5613 REG_WR(bp
, QM_REG_PF_EN
, 1);
5615 bnx2x_init_block(bp
, QM_BLOCK
, FUNC0_STAGE
+ func
);
5617 if (CHIP_MODE_IS_4_PORT(bp
))
5618 bnx2x_init_block(bp
, QM_4PORT_BLOCK
, FUNC0_STAGE
+ func
);
5620 bnx2x_init_block(bp
, TIMERS_BLOCK
, FUNC0_STAGE
+ func
);
5621 bnx2x_init_block(bp
, DQ_BLOCK
, FUNC0_STAGE
+ func
);
5622 bnx2x_init_block(bp
, BRB1_BLOCK
, FUNC0_STAGE
+ func
);
5623 bnx2x_init_block(bp
, PRS_BLOCK
, FUNC0_STAGE
+ func
);
5624 bnx2x_init_block(bp
, TSDM_BLOCK
, FUNC0_STAGE
+ func
);
5625 bnx2x_init_block(bp
, CSDM_BLOCK
, FUNC0_STAGE
+ func
);
5626 bnx2x_init_block(bp
, USDM_BLOCK
, FUNC0_STAGE
+ func
);
5627 bnx2x_init_block(bp
, XSDM_BLOCK
, FUNC0_STAGE
+ func
);
5628 bnx2x_init_block(bp
, UPB_BLOCK
, FUNC0_STAGE
+ func
);
5629 bnx2x_init_block(bp
, XPB_BLOCK
, FUNC0_STAGE
+ func
);
5630 bnx2x_init_block(bp
, PBF_BLOCK
, FUNC0_STAGE
+ func
);
5632 REG_WR(bp
, PBF_REG_DISABLE_PF
, 0);
5634 bnx2x_init_block(bp
, CDU_BLOCK
, FUNC0_STAGE
+ func
);
5636 bnx2x_init_block(bp
, CFC_BLOCK
, FUNC0_STAGE
+ func
);
5639 REG_WR(bp
, CFC_REG_WEAK_ENABLE_PF
, 1);
5642 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 1);
5643 REG_WR(bp
, NIG_REG_LLH0_FUNC_VLAN_ID
+ port
*8, bp
->mf_ov
);
5646 bnx2x_init_block(bp
, MISC_AEU_BLOCK
, FUNC0_STAGE
+ func
);
5648 /* HC init per function */
5649 if (bp
->common
.int_block
== INT_BLOCK_HC
) {
5650 if (CHIP_IS_E1H(bp
)) {
5651 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
5653 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
5654 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
5656 bnx2x_init_block(bp
, HC_BLOCK
, FUNC0_STAGE
+ func
);
5659 int num_segs
, sb_idx
, prod_offset
;
5661 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
5663 if (CHIP_IS_E2(bp
)) {
5664 REG_WR(bp
, IGU_REG_LEADING_EDGE_LATCH
, 0);
5665 REG_WR(bp
, IGU_REG_TRAILING_EDGE_LATCH
, 0);
5668 bnx2x_init_block(bp
, IGU_BLOCK
, FUNC0_STAGE
+ func
);
5670 if (CHIP_IS_E2(bp
)) {
5674 * E2 mode: address 0-135 match to the mapping memory;
5675 * 136 - PF0 default prod; 137 - PF1 default prod;
5676 * 138 - PF2 default prod; 139 - PF3 default prod;
5677 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5678 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5681 * E1.5 mode - In backward compatible mode;
5682 * for non default SB; each even line in the memory
5683 * holds the U producer and each odd line hold
5684 * the C producer. The first 128 producers are for
5685 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5686 * producers are for the DSB for each PF.
5687 * Each PF has five segments: (the order inside each
5688 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5689 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5690 * 144-147 attn prods;
5692 /* non-default-status-blocks */
5693 num_segs
= CHIP_INT_MODE_IS_BC(bp
) ?
5694 IGU_BC_NDSB_NUM_SEGS
: IGU_NORM_NDSB_NUM_SEGS
;
5695 for (sb_idx
= 0; sb_idx
< bp
->igu_sb_cnt
; sb_idx
++) {
5696 prod_offset
= (bp
->igu_base_sb
+ sb_idx
) *
5699 for (i
= 0; i
< num_segs
; i
++) {
5700 addr
= IGU_REG_PROD_CONS_MEMORY
+
5701 (prod_offset
+ i
) * 4;
5702 REG_WR(bp
, addr
, 0);
5704 /* send consumer update with value 0 */
5705 bnx2x_ack_sb(bp
, bp
->igu_base_sb
+ sb_idx
,
5706 USTORM_ID
, 0, IGU_INT_NOP
, 1);
5707 bnx2x_igu_clear_sb(bp
,
5708 bp
->igu_base_sb
+ sb_idx
);
5711 /* default-status-blocks */
5712 num_segs
= CHIP_INT_MODE_IS_BC(bp
) ?
5713 IGU_BC_DSB_NUM_SEGS
: IGU_NORM_DSB_NUM_SEGS
;
5715 if (CHIP_MODE_IS_4_PORT(bp
))
5716 dsb_idx
= BP_FUNC(bp
);
5718 dsb_idx
= BP_E1HVN(bp
);
5720 prod_offset
= (CHIP_INT_MODE_IS_BC(bp
) ?
5721 IGU_BC_BASE_DSB_PROD
+ dsb_idx
:
5722 IGU_NORM_BASE_DSB_PROD
+ dsb_idx
);
5724 for (i
= 0; i
< (num_segs
* E1HVN_MAX
);
5726 addr
= IGU_REG_PROD_CONS_MEMORY
+
5727 (prod_offset
+ i
)*4;
5728 REG_WR(bp
, addr
, 0);
5730 /* send consumer update with 0 */
5731 if (CHIP_INT_MODE_IS_BC(bp
)) {
5732 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
,
5733 USTORM_ID
, 0, IGU_INT_NOP
, 1);
5734 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
,
5735 CSTORM_ID
, 0, IGU_INT_NOP
, 1);
5736 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
,
5737 XSTORM_ID
, 0, IGU_INT_NOP
, 1);
5738 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
,
5739 TSTORM_ID
, 0, IGU_INT_NOP
, 1);
5740 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
,
5741 ATTENTION_ID
, 0, IGU_INT_NOP
, 1);
5743 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
,
5744 USTORM_ID
, 0, IGU_INT_NOP
, 1);
5745 bnx2x_ack_sb(bp
, bp
->igu_dsb_id
,
5746 ATTENTION_ID
, 0, IGU_INT_NOP
, 1);
5748 bnx2x_igu_clear_sb(bp
, bp
->igu_dsb_id
);
5750 /* !!! these should become driver const once
5751 rf-tool supports split-68 const */
5752 REG_WR(bp
, IGU_REG_SB_INT_BEFORE_MASK_LSB
, 0);
5753 REG_WR(bp
, IGU_REG_SB_INT_BEFORE_MASK_MSB
, 0);
5754 REG_WR(bp
, IGU_REG_SB_MASK_LSB
, 0);
5755 REG_WR(bp
, IGU_REG_SB_MASK_MSB
, 0);
5756 REG_WR(bp
, IGU_REG_PBA_STATUS_LSB
, 0);
5757 REG_WR(bp
, IGU_REG_PBA_STATUS_MSB
, 0);
5761 /* Reset PCIE errors for debug */
5762 REG_WR(bp
, 0x2114, 0xffffffff);
5763 REG_WR(bp
, 0x2120, 0xffffffff);
5765 bnx2x_init_block(bp
, EMAC0_BLOCK
, FUNC0_STAGE
+ func
);
5766 bnx2x_init_block(bp
, EMAC1_BLOCK
, FUNC0_STAGE
+ func
);
5767 bnx2x_init_block(bp
, DBU_BLOCK
, FUNC0_STAGE
+ func
);
5768 bnx2x_init_block(bp
, DBG_BLOCK
, FUNC0_STAGE
+ func
);
5769 bnx2x_init_block(bp
, MCP_BLOCK
, FUNC0_STAGE
+ func
);
5770 bnx2x_init_block(bp
, DMAE_BLOCK
, FUNC0_STAGE
+ func
);
5772 if (CHIP_IS_E1x(bp
)) {
5773 main_mem_size
= HC_REG_MAIN_MEMORY_SIZE
/ 2; /*dwords*/
5774 main_mem_base
= HC_REG_MAIN_MEMORY
+
5775 BP_PORT(bp
) * (main_mem_size
* 4);
5776 main_mem_prty_clr
= HC_REG_HC_PRTY_STS_CLR
;
5779 val
= REG_RD(bp
, main_mem_prty_clr
);
5781 DP(BNX2X_MSG_MCP
, "Hmmm... Parity errors in HC "
5783 "function init (0x%x)!\n", val
);
5785 /* Clear "false" parity errors in MSI-X table */
5786 for (i
= main_mem_base
;
5787 i
< main_mem_base
+ main_mem_size
* 4;
5788 i
+= main_mem_width
) {
5789 bnx2x_read_dmae(bp
, i
, main_mem_width
/ 4);
5790 bnx2x_write_dmae(bp
, bnx2x_sp_mapping(bp
, wb_data
),
5791 i
, main_mem_width
/ 4);
5793 /* Clear HC parity attention */
5794 REG_RD(bp
, main_mem_prty_clr
);
5797 bnx2x_phy_probe(&bp
->link_params
);
5802 int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
)
5806 DP(BNX2X_MSG_MCP
, "function %d load_code %x\n",
5807 BP_ABS_FUNC(bp
), load_code
);
5810 mutex_init(&bp
->dmae_mutex
);
5811 rc
= bnx2x_gunzip_init(bp
);
5815 switch (load_code
) {
5816 case FW_MSG_CODE_DRV_LOAD_COMMON
:
5817 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
:
5818 rc
= bnx2x_init_hw_common(bp
, load_code
);
5823 case FW_MSG_CODE_DRV_LOAD_PORT
:
5824 rc
= bnx2x_init_hw_port(bp
);
5829 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
5830 rc
= bnx2x_init_hw_func(bp
);
5836 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
5840 if (!BP_NOMCP(bp
)) {
5841 int mb_idx
= BP_FW_MB_IDX(bp
);
5843 bp
->fw_drv_pulse_wr_seq
=
5844 (SHMEM_RD(bp
, func_mb
[mb_idx
].drv_pulse_mb
) &
5845 DRV_PULSE_SEQ_MASK
);
5846 DP(BNX2X_MSG_MCP
, "drv_pulse 0x%x\n", bp
->fw_drv_pulse_wr_seq
);
5850 bnx2x_gunzip_end(bp
);
5855 void bnx2x_free_mem(struct bnx2x
*bp
)
5858 #define BNX2X_PCI_FREE(x, y, size) \
5861 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
5867 #define BNX2X_FREE(x) \
5879 for_each_queue(bp
, i
) {
5882 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, status_blk
.e2_sb
),
5883 bnx2x_fp(bp
, i
, status_blk_mapping
),
5884 sizeof(struct host_hc_status_block_e2
));
5886 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, status_blk
.e1x_sb
),
5887 bnx2x_fp(bp
, i
, status_blk_mapping
),
5888 sizeof(struct host_hc_status_block_e1x
));
5891 for_each_queue(bp
, i
) {
5893 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5894 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_buf_ring
));
5895 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_desc_ring
),
5896 bnx2x_fp(bp
, i
, rx_desc_mapping
),
5897 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
5899 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_comp_ring
),
5900 bnx2x_fp(bp
, i
, rx_comp_mapping
),
5901 sizeof(struct eth_fast_path_rx_cqe
) *
5905 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_page_ring
));
5906 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_sge_ring
),
5907 bnx2x_fp(bp
, i
, rx_sge_mapping
),
5908 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
5911 for_each_queue(bp
, i
) {
5913 /* fastpath tx rings: tx_buf tx_desc */
5914 BNX2X_FREE(bnx2x_fp(bp
, i
, tx_buf_ring
));
5915 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, tx_desc_ring
),
5916 bnx2x_fp(bp
, i
, tx_desc_mapping
),
5917 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
5919 /* end of fastpath */
5921 BNX2X_PCI_FREE(bp
->def_status_blk
, bp
->def_status_blk_mapping
,
5922 sizeof(struct host_sp_status_block
));
5924 BNX2X_PCI_FREE(bp
->slowpath
, bp
->slowpath_mapping
,
5925 sizeof(struct bnx2x_slowpath
));
5927 BNX2X_PCI_FREE(bp
->context
.vcxt
, bp
->context
.cxt_mapping
,
5930 bnx2x_ilt_mem_op(bp
, ILT_MEMOP_FREE
);
5932 BNX2X_FREE(bp
->ilt
->lines
);
5936 BNX2X_PCI_FREE(bp
->cnic_sb
.e2_sb
, bp
->cnic_sb_mapping
,
5937 sizeof(struct host_hc_status_block_e2
));
5939 BNX2X_PCI_FREE(bp
->cnic_sb
.e1x_sb
, bp
->cnic_sb_mapping
,
5940 sizeof(struct host_hc_status_block_e1x
));
5942 BNX2X_PCI_FREE(bp
->t2
, bp
->t2_mapping
, SRC_T2_SZ
);
5945 BNX2X_PCI_FREE(bp
->spq
, bp
->spq_mapping
, BCM_PAGE_SIZE
);
5947 BNX2X_PCI_FREE(bp
->eq_ring
, bp
->eq_mapping
,
5948 BCM_PAGE_SIZE
* NUM_EQ_PAGES
);
5950 #undef BNX2X_PCI_FREE
5954 static inline void set_sb_shortcuts(struct bnx2x
*bp
, int index
)
5956 union host_hc_status_block status_blk
= bnx2x_fp(bp
, index
, status_blk
);
5957 if (CHIP_IS_E2(bp
)) {
5958 bnx2x_fp(bp
, index
, sb_index_values
) =
5959 (__le16
*)status_blk
.e2_sb
->sb
.index_values
;
5960 bnx2x_fp(bp
, index
, sb_running_index
) =
5961 (__le16
*)status_blk
.e2_sb
->sb
.running_index
;
5963 bnx2x_fp(bp
, index
, sb_index_values
) =
5964 (__le16
*)status_blk
.e1x_sb
->sb
.index_values
;
5965 bnx2x_fp(bp
, index
, sb_running_index
) =
5966 (__le16
*)status_blk
.e1x_sb
->sb
.running_index
;
5970 int bnx2x_alloc_mem(struct bnx2x
*bp
)
5972 #define BNX2X_PCI_ALLOC(x, y, size) \
5974 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
5976 goto alloc_mem_err; \
5977 memset(x, 0, size); \
5980 #define BNX2X_ALLOC(x, size) \
5982 x = kzalloc(size, GFP_KERNEL); \
5984 goto alloc_mem_err; \
5991 for_each_queue(bp
, i
) {
5992 union host_hc_status_block
*sb
= &bnx2x_fp(bp
, i
, status_blk
);
5993 bnx2x_fp(bp
, i
, bp
) = bp
;
5996 BNX2X_PCI_ALLOC(sb
->e2_sb
,
5997 &bnx2x_fp(bp
, i
, status_blk_mapping
),
5998 sizeof(struct host_hc_status_block_e2
));
6000 BNX2X_PCI_ALLOC(sb
->e1x_sb
,
6001 &bnx2x_fp(bp
, i
, status_blk_mapping
),
6002 sizeof(struct host_hc_status_block_e1x
));
6004 set_sb_shortcuts(bp
, i
);
6007 for_each_queue(bp
, i
) {
6009 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6010 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_buf_ring
),
6011 sizeof(struct sw_rx_bd
) * NUM_RX_BD
);
6012 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_desc_ring
),
6013 &bnx2x_fp(bp
, i
, rx_desc_mapping
),
6014 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
6016 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_comp_ring
),
6017 &bnx2x_fp(bp
, i
, rx_comp_mapping
),
6018 sizeof(struct eth_fast_path_rx_cqe
) *
6022 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_page_ring
),
6023 sizeof(struct sw_rx_page
) * NUM_RX_SGE
);
6024 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_sge_ring
),
6025 &bnx2x_fp(bp
, i
, rx_sge_mapping
),
6026 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
6029 for_each_queue(bp
, i
) {
6031 /* fastpath tx rings: tx_buf tx_desc */
6032 BNX2X_ALLOC(bnx2x_fp(bp
, i
, tx_buf_ring
),
6033 sizeof(struct sw_tx_bd
) * NUM_TX_BD
);
6034 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, tx_desc_ring
),
6035 &bnx2x_fp(bp
, i
, tx_desc_mapping
),
6036 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
6038 /* end of fastpath */
6042 BNX2X_PCI_ALLOC(bp
->cnic_sb
.e2_sb
, &bp
->cnic_sb_mapping
,
6043 sizeof(struct host_hc_status_block_e2
));
6045 BNX2X_PCI_ALLOC(bp
->cnic_sb
.e1x_sb
, &bp
->cnic_sb_mapping
,
6046 sizeof(struct host_hc_status_block_e1x
));
6048 /* allocate searcher T2 table */
6049 BNX2X_PCI_ALLOC(bp
->t2
, &bp
->t2_mapping
, SRC_T2_SZ
);
6053 BNX2X_PCI_ALLOC(bp
->def_status_blk
, &bp
->def_status_blk_mapping
,
6054 sizeof(struct host_sp_status_block
));
6056 BNX2X_PCI_ALLOC(bp
->slowpath
, &bp
->slowpath_mapping
,
6057 sizeof(struct bnx2x_slowpath
));
6059 bp
->context
.size
= sizeof(union cdu_context
) * bp
->l2_cid_count
;
6061 BNX2X_PCI_ALLOC(bp
->context
.vcxt
, &bp
->context
.cxt_mapping
,
6064 BNX2X_ALLOC(bp
->ilt
->lines
, sizeof(struct ilt_line
) * ILT_MAX_LINES
);
6066 if (bnx2x_ilt_mem_op(bp
, ILT_MEMOP_ALLOC
))
6069 /* Slow path ring */
6070 BNX2X_PCI_ALLOC(bp
->spq
, &bp
->spq_mapping
, BCM_PAGE_SIZE
);
6073 BNX2X_PCI_ALLOC(bp
->eq_ring
, &bp
->eq_mapping
,
6074 BCM_PAGE_SIZE
* NUM_EQ_PAGES
);
6081 #undef BNX2X_PCI_ALLOC
6086 * Init service functions
6088 static int bnx2x_wait_ramrod(struct bnx2x
*bp
, int state
, int idx
,
6089 int *state_p
, int flags
);
6091 int bnx2x_func_start(struct bnx2x
*bp
)
6093 bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_FUNCTION_START
, 0, 0, 0, 1);
6095 /* Wait for completion */
6096 return bnx2x_wait_ramrod(bp
, BNX2X_STATE_FUNC_STARTED
, 0, &(bp
->state
),
6097 WAIT_RAMROD_COMMON
);
6100 static int bnx2x_func_stop(struct bnx2x
*bp
)
6102 bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_FUNCTION_STOP
, 0, 0, 0, 1);
6104 /* Wait for completion */
6105 return bnx2x_wait_ramrod(bp
, BNX2X_STATE_CLOSING_WAIT4_UNLOAD
,
6106 0, &(bp
->state
), WAIT_RAMROD_COMMON
);
6110 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
6112 * @param bp driver descriptor
6113 * @param set set or clear an entry (1 or 0)
6114 * @param mac pointer to a buffer containing a MAC
6115 * @param cl_bit_vec bit vector of clients to register a MAC for
6116 * @param cam_offset offset in a CAM to use
6117 * @param is_bcast is the set MAC a broadcast address (for E1 only)
6119 static void bnx2x_set_mac_addr_gen(struct bnx2x
*bp
, int set
, u8
*mac
,
6120 u32 cl_bit_vec
, u8 cam_offset
,
6123 struct mac_configuration_cmd
*config
=
6124 (struct mac_configuration_cmd
*)bnx2x_sp(bp
, mac_config
);
6125 int ramrod_flags
= WAIT_RAMROD_COMMON
;
6127 bp
->set_mac_pending
= 1;
6130 config
->hdr
.length
= 1;
6131 config
->hdr
.offset
= cam_offset
;
6132 config
->hdr
.client_id
= 0xff;
6133 config
->hdr
.reserved1
= 0;
6136 config
->config_table
[0].msb_mac_addr
=
6137 swab16(*(u16
*)&mac
[0]);
6138 config
->config_table
[0].middle_mac_addr
=
6139 swab16(*(u16
*)&mac
[2]);
6140 config
->config_table
[0].lsb_mac_addr
=
6141 swab16(*(u16
*)&mac
[4]);
6142 config
->config_table
[0].clients_bit_vector
=
6143 cpu_to_le32(cl_bit_vec
);
6144 config
->config_table
[0].vlan_id
= 0;
6145 config
->config_table
[0].pf_id
= BP_FUNC(bp
);
6147 SET_FLAG(config
->config_table
[0].flags
,
6148 MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
6149 T_ETH_MAC_COMMAND_SET
);
6151 SET_FLAG(config
->config_table
[0].flags
,
6152 MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
6153 T_ETH_MAC_COMMAND_INVALIDATE
);
6156 SET_FLAG(config
->config_table
[0].flags
,
6157 MAC_CONFIGURATION_ENTRY_BROADCAST
, 1);
6159 DP(NETIF_MSG_IFUP
, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
6160 (set
? "setting" : "clearing"),
6161 config
->config_table
[0].msb_mac_addr
,
6162 config
->config_table
[0].middle_mac_addr
,
6163 config
->config_table
[0].lsb_mac_addr
, BP_FUNC(bp
), cl_bit_vec
);
6165 bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_SET_MAC
, 0,
6166 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
6167 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 1);
6169 /* Wait for a completion */
6170 bnx2x_wait_ramrod(bp
, 0, 0, &bp
->set_mac_pending
, ramrod_flags
);
6173 static int bnx2x_wait_ramrod(struct bnx2x
*bp
, int state
, int idx
,
6174 int *state_p
, int flags
)
6176 /* can take a while if any port is running */
6178 u8 poll
= flags
& WAIT_RAMROD_POLL
;
6179 u8 common
= flags
& WAIT_RAMROD_COMMON
;
6181 DP(NETIF_MSG_IFUP
, "%s for state to become %x on IDX [%d]\n",
6182 poll
? "polling" : "waiting", state
, idx
);
6190 bnx2x_rx_int(bp
->fp
, 10);
6191 /* if index is different from 0
6192 * the reply for some commands will
6193 * be on the non default queue
6196 bnx2x_rx_int(&bp
->fp
[idx
], 10);
6200 mb(); /* state is changed by bnx2x_sp_event() */
6201 if (*state_p
== state
) {
6202 #ifdef BNX2X_STOP_ON_ERROR
6203 DP(NETIF_MSG_IFUP
, "exit (cnt %d)\n", 5000 - cnt
);
6215 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6216 poll
? "polling" : "waiting", state
, idx
);
6217 #ifdef BNX2X_STOP_ON_ERROR
6224 static u8
bnx2x_e1h_cam_offset(struct bnx2x
*bp
, u8 rel_offset
)
6226 if (CHIP_IS_E1H(bp
))
6227 return E1H_FUNC_MAX
* rel_offset
+ BP_FUNC(bp
);
6228 else if (CHIP_MODE_IS_4_PORT(bp
))
6229 return BP_FUNC(bp
) * 32 + rel_offset
;
6231 return BP_VN(bp
) * 32 + rel_offset
;
6235 * LLH CAM line allocations: currently only iSCSI and ETH macs are
6236 * relevant. In addition, current implementation is tuned for a
6239 * When multiple unicast ETH MACs PF configuration in switch
6240 * independent mode is required (NetQ, multiple netdev MACs,
6241 * etc.), consider better utilisation of 16 per function MAC
6242 * entries in the LLH memory.
6245 LLH_CAM_ISCSI_ETH_LINE
= 0,
6247 LLH_CAM_MAX_PF_LINE
= NIG_REG_LLH1_FUNC_MEM_SIZE
6250 static void bnx2x_set_mac_in_nig(struct bnx2x
*bp
,
6252 unsigned char *dev_addr
,
6256 u32 mem_offset
, ena_offset
, mem_index
;
6259 * 0..7 - goes to MEM
6260 * 8..15 - goes to MEM2
6263 if (!IS_MF_SI(bp
) || index
> LLH_CAM_MAX_PF_LINE
)
6266 /* calculate memory start offset according to the mapping
6267 * and index in the memory */
6268 if (index
< NIG_LLH_FUNC_MEM_MAX_OFFSET
) {
6269 mem_offset
= BP_PORT(bp
) ? NIG_REG_LLH1_FUNC_MEM
:
6270 NIG_REG_LLH0_FUNC_MEM
;
6271 ena_offset
= BP_PORT(bp
) ? NIG_REG_LLH1_FUNC_MEM_ENABLE
:
6272 NIG_REG_LLH0_FUNC_MEM_ENABLE
;
6275 mem_offset
= BP_PORT(bp
) ? NIG_REG_P1_LLH_FUNC_MEM2
:
6276 NIG_REG_P0_LLH_FUNC_MEM2
;
6277 ena_offset
= BP_PORT(bp
) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE
:
6278 NIG_REG_P0_LLH_FUNC_MEM2_ENABLE
;
6279 mem_index
= index
- NIG_LLH_FUNC_MEM_MAX_OFFSET
;
6283 /* LLH_FUNC_MEM is a u64 WB register */
6284 mem_offset
+= 8*mem_index
;
6286 wb_data
[0] = ((dev_addr
[2] << 24) | (dev_addr
[3] << 16) |
6287 (dev_addr
[4] << 8) | dev_addr
[5]);
6288 wb_data
[1] = ((dev_addr
[0] << 8) | dev_addr
[1]);
6290 REG_WR_DMAE(bp
, mem_offset
, wb_data
, 2);
6293 /* enable/disable the entry */
6294 REG_WR(bp
, ena_offset
+ 4*mem_index
, set
);
6298 void bnx2x_set_eth_mac(struct bnx2x
*bp
, int set
)
6300 u8 cam_offset
= (CHIP_IS_E1(bp
) ? (BP_PORT(bp
) ? 32 : 0) :
6301 bnx2x_e1h_cam_offset(bp
, CAM_ETH_LINE
));
6303 /* networking MAC */
6304 bnx2x_set_mac_addr_gen(bp
, set
, bp
->dev
->dev_addr
,
6305 (1 << bp
->fp
->cl_id
), cam_offset
, 0);
6307 bnx2x_set_mac_in_nig(bp
, set
, bp
->dev
->dev_addr
, LLH_CAM_ETH_LINE
);
6309 if (CHIP_IS_E1(bp
)) {
6311 u8 bcast
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6312 bnx2x_set_mac_addr_gen(bp
, set
, bcast
, 0, cam_offset
+ 1, 1);
6315 static void bnx2x_set_e1_mc_list(struct bnx2x
*bp
, u8 offset
)
6318 struct net_device
*dev
= bp
->dev
;
6319 struct netdev_hw_addr
*ha
;
6320 struct mac_configuration_cmd
*config_cmd
= bnx2x_sp(bp
, mcast_config
);
6321 dma_addr_t config_cmd_map
= bnx2x_sp_mapping(bp
, mcast_config
);
6323 netdev_for_each_mc_addr(ha
, dev
) {
6325 config_cmd
->config_table
[i
].msb_mac_addr
=
6326 swab16(*(u16
*)&bnx2x_mc_addr(ha
)[0]);
6327 config_cmd
->config_table
[i
].middle_mac_addr
=
6328 swab16(*(u16
*)&bnx2x_mc_addr(ha
)[2]);
6329 config_cmd
->config_table
[i
].lsb_mac_addr
=
6330 swab16(*(u16
*)&bnx2x_mc_addr(ha
)[4]);
6332 config_cmd
->config_table
[i
].vlan_id
= 0;
6333 config_cmd
->config_table
[i
].pf_id
= BP_FUNC(bp
);
6334 config_cmd
->config_table
[i
].clients_bit_vector
=
6335 cpu_to_le32(1 << BP_L_ID(bp
));
6337 SET_FLAG(config_cmd
->config_table
[i
].flags
,
6338 MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
6339 T_ETH_MAC_COMMAND_SET
);
6342 "setting MCAST[%d] (%04x:%04x:%04x)\n", i
,
6343 config_cmd
->config_table
[i
].msb_mac_addr
,
6344 config_cmd
->config_table
[i
].middle_mac_addr
,
6345 config_cmd
->config_table
[i
].lsb_mac_addr
);
6348 old
= config_cmd
->hdr
.length
;
6350 for (; i
< old
; i
++) {
6351 if (CAM_IS_INVALID(config_cmd
->
6353 /* already invalidated */
6357 SET_FLAG(config_cmd
->config_table
[i
].flags
,
6358 MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
6359 T_ETH_MAC_COMMAND_INVALIDATE
);
6363 config_cmd
->hdr
.length
= i
;
6364 config_cmd
->hdr
.offset
= offset
;
6365 config_cmd
->hdr
.client_id
= 0xff;
6366 config_cmd
->hdr
.reserved1
= 0;
6368 bp
->set_mac_pending
= 1;
6371 bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_SET_MAC
, 0,
6372 U64_HI(config_cmd_map
), U64_LO(config_cmd_map
), 1);
6374 static void bnx2x_invlidate_e1_mc_list(struct bnx2x
*bp
)
6377 struct mac_configuration_cmd
*config_cmd
= bnx2x_sp(bp
, mcast_config
);
6378 dma_addr_t config_cmd_map
= bnx2x_sp_mapping(bp
, mcast_config
);
6379 int ramrod_flags
= WAIT_RAMROD_COMMON
;
6381 bp
->set_mac_pending
= 1;
6384 for (i
= 0; i
< config_cmd
->hdr
.length
; i
++)
6385 SET_FLAG(config_cmd
->config_table
[i
].flags
,
6386 MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
6387 T_ETH_MAC_COMMAND_INVALIDATE
);
6389 bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_SET_MAC
, 0,
6390 U64_HI(config_cmd_map
), U64_LO(config_cmd_map
), 1);
6392 /* Wait for a completion */
6393 bnx2x_wait_ramrod(bp
, 0, 0, &bp
->set_mac_pending
,
6400 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6401 * MAC(s). This function will wait until the ramdord completion
6404 * @param bp driver handle
6405 * @param set set or clear the CAM entry
6407 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6409 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x
*bp
, int set
)
6411 u8 cam_offset
= (CHIP_IS_E1(bp
) ? ((BP_PORT(bp
) ? 32 : 0) + 2) :
6412 bnx2x_e1h_cam_offset(bp
, CAM_ISCSI_ETH_LINE
));
6413 u32 iscsi_l2_cl_id
= BNX2X_ISCSI_ETH_CL_ID
;
6414 u32 cl_bit_vec
= (1 << iscsi_l2_cl_id
);
6416 /* Send a SET_MAC ramrod */
6417 bnx2x_set_mac_addr_gen(bp
, set
, bp
->iscsi_mac
, cl_bit_vec
,
6420 bnx2x_set_mac_in_nig(bp
, set
, bp
->iscsi_mac
, LLH_CAM_ISCSI_ETH_LINE
);
6425 static void bnx2x_fill_cl_init_data(struct bnx2x
*bp
,
6426 struct bnx2x_client_init_params
*params
,
6428 struct client_init_ramrod_data
*data
)
6430 /* Clear the buffer */
6431 memset(data
, 0, sizeof(*data
));
6434 data
->general
.client_id
= params
->rxq_params
.cl_id
;
6435 data
->general
.statistics_counter_id
= params
->rxq_params
.stat_id
;
6436 data
->general
.statistics_en_flg
=
6437 (params
->rxq_params
.flags
& QUEUE_FLG_STATS
) ? 1 : 0;
6438 data
->general
.activate_flg
= activate
;
6439 data
->general
.sp_client_id
= params
->rxq_params
.spcl_id
;
6442 data
->rx
.tpa_en_flg
=
6443 (params
->rxq_params
.flags
& QUEUE_FLG_TPA
) ? 1 : 0;
6444 data
->rx
.vmqueue_mode_en_flg
= 0;
6445 data
->rx
.cache_line_alignment_log_size
=
6446 params
->rxq_params
.cache_line_log
;
6447 data
->rx
.enable_dynamic_hc
=
6448 (params
->rxq_params
.flags
& QUEUE_FLG_DHC
) ? 1 : 0;
6449 data
->rx
.max_sges_for_packet
= params
->rxq_params
.max_sges_pkt
;
6450 data
->rx
.client_qzone_id
= params
->rxq_params
.cl_qzone_id
;
6451 data
->rx
.max_agg_size
= params
->rxq_params
.tpa_agg_sz
;
6453 /* We don't set drop flags */
6454 data
->rx
.drop_ip_cs_err_flg
= 0;
6455 data
->rx
.drop_tcp_cs_err_flg
= 0;
6456 data
->rx
.drop_ttl0_flg
= 0;
6457 data
->rx
.drop_udp_cs_err_flg
= 0;
6459 data
->rx
.inner_vlan_removal_enable_flg
=
6460 (params
->rxq_params
.flags
& QUEUE_FLG_VLAN
) ? 1 : 0;
6461 data
->rx
.outer_vlan_removal_enable_flg
=
6462 (params
->rxq_params
.flags
& QUEUE_FLG_OV
) ? 1 : 0;
6463 data
->rx
.status_block_id
= params
->rxq_params
.fw_sb_id
;
6464 data
->rx
.rx_sb_index_number
= params
->rxq_params
.sb_cq_index
;
6465 data
->rx
.bd_buff_size
= cpu_to_le16(params
->rxq_params
.buf_sz
);
6466 data
->rx
.sge_buff_size
= cpu_to_le16(params
->rxq_params
.sge_buf_sz
);
6467 data
->rx
.mtu
= cpu_to_le16(params
->rxq_params
.mtu
);
6468 data
->rx
.bd_page_base
.lo
=
6469 cpu_to_le32(U64_LO(params
->rxq_params
.dscr_map
));
6470 data
->rx
.bd_page_base
.hi
=
6471 cpu_to_le32(U64_HI(params
->rxq_params
.dscr_map
));
6472 data
->rx
.sge_page_base
.lo
=
6473 cpu_to_le32(U64_LO(params
->rxq_params
.sge_map
));
6474 data
->rx
.sge_page_base
.hi
=
6475 cpu_to_le32(U64_HI(params
->rxq_params
.sge_map
));
6476 data
->rx
.cqe_page_base
.lo
=
6477 cpu_to_le32(U64_LO(params
->rxq_params
.rcq_map
));
6478 data
->rx
.cqe_page_base
.hi
=
6479 cpu_to_le32(U64_HI(params
->rxq_params
.rcq_map
));
6480 data
->rx
.is_leading_rss
=
6481 (params
->ramrod_params
.flags
& CLIENT_IS_LEADING_RSS
) ? 1 : 0;
6482 data
->rx
.is_approx_mcast
= data
->rx
.is_leading_rss
;
6485 data
->tx
.enforce_security_flg
= 0; /* VF specific */
6486 data
->tx
.tx_status_block_id
= params
->txq_params
.fw_sb_id
;
6487 data
->tx
.tx_sb_index_number
= params
->txq_params
.sb_cq_index
;
6488 data
->tx
.mtu
= 0; /* VF specific */
6489 data
->tx
.tx_bd_page_base
.lo
=
6490 cpu_to_le32(U64_LO(params
->txq_params
.dscr_map
));
6491 data
->tx
.tx_bd_page_base
.hi
=
6492 cpu_to_le32(U64_HI(params
->txq_params
.dscr_map
));
6494 /* flow control data */
6495 data
->fc
.cqe_pause_thr_low
= cpu_to_le16(params
->pause
.rcq_th_lo
);
6496 data
->fc
.cqe_pause_thr_high
= cpu_to_le16(params
->pause
.rcq_th_hi
);
6497 data
->fc
.bd_pause_thr_low
= cpu_to_le16(params
->pause
.bd_th_lo
);
6498 data
->fc
.bd_pause_thr_high
= cpu_to_le16(params
->pause
.bd_th_hi
);
6499 data
->fc
.sge_pause_thr_low
= cpu_to_le16(params
->pause
.sge_th_lo
);
6500 data
->fc
.sge_pause_thr_high
= cpu_to_le16(params
->pause
.sge_th_hi
);
6501 data
->fc
.rx_cos_mask
= cpu_to_le16(params
->pause
.pri_map
);
6503 data
->fc
.safc_group_num
= params
->txq_params
.cos
;
6504 data
->fc
.safc_group_en_flg
=
6505 (params
->txq_params
.flags
& QUEUE_FLG_COS
) ? 1 : 0;
6506 data
->fc
.traffic_type
= LLFC_TRAFFIC_TYPE_NW
;
6509 static inline void bnx2x_set_ctx_validation(struct eth_context
*cxt
, u32 cid
)
6511 /* ustorm cxt validation */
6512 cxt
->ustorm_ag_context
.cdu_usage
=
6513 CDU_RSRVD_VALUE_TYPE_A(cid
, CDU_REGION_NUMBER_UCM_AG
,
6514 ETH_CONNECTION_TYPE
);
6515 /* xcontext validation */
6516 cxt
->xstorm_ag_context
.cdu_reserved
=
6517 CDU_RSRVD_VALUE_TYPE_A(cid
, CDU_REGION_NUMBER_XCM_AG
,
6518 ETH_CONNECTION_TYPE
);
6521 static int bnx2x_setup_fw_client(struct bnx2x
*bp
,
6522 struct bnx2x_client_init_params
*params
,
6524 struct client_init_ramrod_data
*data
,
6525 dma_addr_t data_mapping
)
6528 int ramrod
= RAMROD_CMD_ID_ETH_CLIENT_SETUP
;
6529 int ramrod_flags
= 0, rc
;
6531 /* HC and context validation values */
6532 hc_usec
= params
->txq_params
.hc_rate
?
6533 1000000 / params
->txq_params
.hc_rate
: 0;
6534 bnx2x_update_coalesce_sb_index(bp
,
6535 params
->txq_params
.fw_sb_id
,
6536 params
->txq_params
.sb_cq_index
,
6537 !(params
->txq_params
.flags
& QUEUE_FLG_HC
),
6540 *(params
->ramrod_params
.pstate
) = BNX2X_FP_STATE_OPENING
;
6542 hc_usec
= params
->rxq_params
.hc_rate
?
6543 1000000 / params
->rxq_params
.hc_rate
: 0;
6544 bnx2x_update_coalesce_sb_index(bp
,
6545 params
->rxq_params
.fw_sb_id
,
6546 params
->rxq_params
.sb_cq_index
,
6547 !(params
->rxq_params
.flags
& QUEUE_FLG_HC
),
6550 bnx2x_set_ctx_validation(params
->rxq_params
.cxt
,
6551 params
->rxq_params
.cid
);
6554 if (params
->txq_params
.flags
& QUEUE_FLG_STATS
)
6555 storm_memset_xstats_zero(bp
, BP_PORT(bp
),
6556 params
->txq_params
.stat_id
);
6558 if (params
->rxq_params
.flags
& QUEUE_FLG_STATS
) {
6559 storm_memset_ustats_zero(bp
, BP_PORT(bp
),
6560 params
->rxq_params
.stat_id
);
6561 storm_memset_tstats_zero(bp
, BP_PORT(bp
),
6562 params
->rxq_params
.stat_id
);
6565 /* Fill the ramrod data */
6566 bnx2x_fill_cl_init_data(bp
, params
, activate
, data
);
6570 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6571 * barrier except from mmiowb() is needed to impose a
6572 * proper ordering of memory operations.
6577 bnx2x_sp_post(bp
, ramrod
, params
->ramrod_params
.cid
,
6578 U64_HI(data_mapping
), U64_LO(data_mapping
), 0);
6580 /* Wait for completion */
6581 rc
= bnx2x_wait_ramrod(bp
, params
->ramrod_params
.state
,
6582 params
->ramrod_params
.index
,
6583 params
->ramrod_params
.pstate
,
6589 * Configure interrupt mode according to current configuration.
6590 * In case of MSI-X it will also try to enable MSI-X.
6596 static int __devinit
bnx2x_set_int_mode(struct bnx2x
*bp
)
6600 switch (bp
->int_mode
) {
6602 bnx2x_enable_msi(bp
);
6603 /* falling through... */
6606 DP(NETIF_MSG_IFUP
, "set number of queues to 1\n");
6609 /* Set number of queues according to bp->multi_mode value */
6610 bnx2x_set_num_queues(bp
);
6612 DP(NETIF_MSG_IFUP
, "set number of queues to %d\n",
6615 /* if we can't use MSI-X we only need one fp,
6616 * so try to enable MSI-X with the requested number of fp's
6617 * and fallback to MSI or legacy INTx with one fp
6619 rc
= bnx2x_enable_msix(bp
);
6621 /* failed to enable MSI-X */
6624 "Multi requested but failed to "
6625 "enable MSI-X (%d), "
6626 "set number of queues to %d\n",
6631 if (!(bp
->flags
& DISABLE_MSI_FLAG
))
6632 bnx2x_enable_msi(bp
);
6641 /* must be called prioir to any HW initializations */
6642 static inline u16
bnx2x_cid_ilt_lines(struct bnx2x
*bp
)
6644 return L2_ILT_LINES(bp
);
6647 void bnx2x_ilt_set_info(struct bnx2x
*bp
)
6649 struct ilt_client_info
*ilt_client
;
6650 struct bnx2x_ilt
*ilt
= BP_ILT(bp
);
6653 ilt
->start_line
= FUNC_ILT_BASE(BP_FUNC(bp
));
6654 DP(BNX2X_MSG_SP
, "ilt starts at line %d\n", ilt
->start_line
);
6657 ilt_client
= &ilt
->clients
[ILT_CLIENT_CDU
];
6658 ilt_client
->client_num
= ILT_CLIENT_CDU
;
6659 ilt_client
->page_size
= CDU_ILT_PAGE_SZ
;
6660 ilt_client
->flags
= ILT_CLIENT_SKIP_MEM
;
6661 ilt_client
->start
= line
;
6662 line
+= L2_ILT_LINES(bp
);
6664 line
+= CNIC_ILT_LINES
;
6666 ilt_client
->end
= line
- 1;
6668 DP(BNX2X_MSG_SP
, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6669 "flags 0x%x, hw psz %d\n",
6672 ilt_client
->page_size
,
6674 ilog2(ilt_client
->page_size
>> 12));
6677 if (QM_INIT(bp
->qm_cid_count
)) {
6678 ilt_client
= &ilt
->clients
[ILT_CLIENT_QM
];
6679 ilt_client
->client_num
= ILT_CLIENT_QM
;
6680 ilt_client
->page_size
= QM_ILT_PAGE_SZ
;
6681 ilt_client
->flags
= 0;
6682 ilt_client
->start
= line
;
6684 /* 4 bytes for each cid */
6685 line
+= DIV_ROUND_UP(bp
->qm_cid_count
* QM_QUEUES_PER_FUNC
* 4,
6688 ilt_client
->end
= line
- 1;
6690 DP(BNX2X_MSG_SP
, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6691 "flags 0x%x, hw psz %d\n",
6694 ilt_client
->page_size
,
6696 ilog2(ilt_client
->page_size
>> 12));
6700 ilt_client
= &ilt
->clients
[ILT_CLIENT_SRC
];
6702 ilt_client
->client_num
= ILT_CLIENT_SRC
;
6703 ilt_client
->page_size
= SRC_ILT_PAGE_SZ
;
6704 ilt_client
->flags
= 0;
6705 ilt_client
->start
= line
;
6706 line
+= SRC_ILT_LINES
;
6707 ilt_client
->end
= line
- 1;
6709 DP(BNX2X_MSG_SP
, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6710 "flags 0x%x, hw psz %d\n",
6713 ilt_client
->page_size
,
6715 ilog2(ilt_client
->page_size
>> 12));
6718 ilt_client
->flags
= (ILT_CLIENT_SKIP_INIT
| ILT_CLIENT_SKIP_MEM
);
6722 ilt_client
= &ilt
->clients
[ILT_CLIENT_TM
];
6724 ilt_client
->client_num
= ILT_CLIENT_TM
;
6725 ilt_client
->page_size
= TM_ILT_PAGE_SZ
;
6726 ilt_client
->flags
= 0;
6727 ilt_client
->start
= line
;
6728 line
+= TM_ILT_LINES
;
6729 ilt_client
->end
= line
- 1;
6731 DP(BNX2X_MSG_SP
, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6732 "flags 0x%x, hw psz %d\n",
6735 ilt_client
->page_size
,
6737 ilog2(ilt_client
->page_size
>> 12));
6740 ilt_client
->flags
= (ILT_CLIENT_SKIP_INIT
| ILT_CLIENT_SKIP_MEM
);
6744 int bnx2x_setup_client(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
6747 struct bnx2x_client_init_params params
= { {0} };
6750 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
, 0,
6753 params
.ramrod_params
.pstate
= &fp
->state
;
6754 params
.ramrod_params
.state
= BNX2X_FP_STATE_OPEN
;
6755 params
.ramrod_params
.index
= fp
->index
;
6756 params
.ramrod_params
.cid
= fp
->cid
;
6759 params
.ramrod_params
.flags
|= CLIENT_IS_LEADING_RSS
;
6761 bnx2x_pf_rx_cl_prep(bp
, fp
, ¶ms
.pause
, ¶ms
.rxq_params
);
6763 bnx2x_pf_tx_cl_prep(bp
, fp
, ¶ms
.txq_params
);
6765 rc
= bnx2x_setup_fw_client(bp
, ¶ms
, 1,
6766 bnx2x_sp(bp
, client_init_data
),
6767 bnx2x_sp_mapping(bp
, client_init_data
));
6771 static int bnx2x_stop_fw_client(struct bnx2x
*bp
,
6772 struct bnx2x_client_ramrod_params
*p
)
6776 int poll_flag
= p
->poll
? WAIT_RAMROD_POLL
: 0;
6778 /* halt the connection */
6779 *p
->pstate
= BNX2X_FP_STATE_HALTING
;
6780 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, p
->cid
, 0,
6783 /* Wait for completion */
6784 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, p
->index
,
6785 p
->pstate
, poll_flag
);
6786 if (rc
) /* timeout */
6789 *p
->pstate
= BNX2X_FP_STATE_TERMINATING
;
6790 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_TERMINATE
, p
->cid
, 0,
6792 /* Wait for completion */
6793 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_TERMINATED
, p
->index
,
6794 p
->pstate
, poll_flag
);
6795 if (rc
) /* timeout */
6799 /* delete cfc entry */
6800 bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_CFC_DEL
, p
->cid
, 0, 0, 1);
6802 /* Wait for completion */
6803 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_CLOSED
, p
->index
,
6804 p
->pstate
, WAIT_RAMROD_COMMON
);
6808 static int bnx2x_stop_client(struct bnx2x
*bp
, int index
)
6810 struct bnx2x_client_ramrod_params client_stop
= {0};
6811 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
6813 client_stop
.index
= index
;
6814 client_stop
.cid
= fp
->cid
;
6815 client_stop
.cl_id
= fp
->cl_id
;
6816 client_stop
.pstate
= &(fp
->state
);
6817 client_stop
.poll
= 0;
6819 return bnx2x_stop_fw_client(bp
, &client_stop
);
6823 static void bnx2x_reset_func(struct bnx2x
*bp
)
6825 int port
= BP_PORT(bp
);
6826 int func
= BP_FUNC(bp
);
6828 int pfunc_offset_fp
= offsetof(struct hc_sb_data
, p_func
) +
6830 offsetof(struct hc_status_block_data_e2
, common
) :
6831 offsetof(struct hc_status_block_data_e1x
, common
));
6832 int pfunc_offset_sp
= offsetof(struct hc_sp_status_block_data
, p_func
);
6833 int pfid_offset
= offsetof(struct pci_entity
, pf_id
);
6835 /* Disable the function in the FW */
6836 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_FUNC_EN_OFFSET(func
), 0);
6837 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_FUNC_EN_OFFSET(func
), 0);
6838 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_FUNC_EN_OFFSET(func
), 0);
6839 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_FUNC_EN_OFFSET(func
), 0);
6842 for_each_queue(bp
, i
) {
6843 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
6845 BAR_CSTRORM_INTMEM
+
6846 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp
->fw_sb_id
)
6847 + pfunc_offset_fp
+ pfid_offset
,
6848 HC_FUNCTION_DISABLED
);
6853 BAR_CSTRORM_INTMEM
+
6854 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func
) +
6855 pfunc_offset_sp
+ pfid_offset
,
6856 HC_FUNCTION_DISABLED
);
6859 for (i
= 0; i
< XSTORM_SPQ_DATA_SIZE
/ 4; i
++)
6860 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_SPQ_DATA_OFFSET(func
),
6864 if (bp
->common
.int_block
== INT_BLOCK_HC
) {
6865 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
6866 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
6868 REG_WR(bp
, IGU_REG_LEADING_EDGE_LATCH
, 0);
6869 REG_WR(bp
, IGU_REG_TRAILING_EDGE_LATCH
, 0);
6873 /* Disable Timer scan */
6874 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 0);
6876 * Wait for at least 10ms and up to 2 second for the timers scan to
6879 for (i
= 0; i
< 200; i
++) {
6881 if (!REG_RD(bp
, TM_REG_LIN0_SCAN_ON
+ port
*4))
6886 bnx2x_clear_func_ilt(bp
, func
);
6888 /* Timers workaround bug for E2: if this is vnic-3,
6889 * we need to set the entire ilt range for this timers.
6891 if (CHIP_IS_E2(bp
) && BP_VN(bp
) == 3) {
6892 struct ilt_client_info ilt_cli
;
6893 /* use dummy TM client */
6894 memset(&ilt_cli
, 0, sizeof(struct ilt_client_info
));
6896 ilt_cli
.end
= ILT_NUM_PAGE_ENTRIES
- 1;
6897 ilt_cli
.client_num
= ILT_CLIENT_TM
;
6899 bnx2x_ilt_boundry_init_op(bp
, &ilt_cli
, 0, INITOP_CLEAR
);
6902 /* this assumes that reset_port() called before reset_func()*/
6904 bnx2x_pf_disable(bp
);
6909 static void bnx2x_reset_port(struct bnx2x
*bp
)
6911 int port
= BP_PORT(bp
);
6914 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
6916 /* Do not rcv packets to BRB */
6917 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK
+ port
*4, 0x0);
6918 /* Do not direct rcv packets that are not for MCP to the BRB */
6919 REG_WR(bp
, (port
? NIG_REG_LLH1_BRB1_NOT_MCP
:
6920 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
6923 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4, 0);
6926 /* Check for BRB port occupancy */
6927 val
= REG_RD(bp
, BRB1_REG_PORT_NUM_OCC_BLOCKS_0
+ port
*4);
6929 DP(NETIF_MSG_IFDOWN
,
6930 "BRB1 is not empty %d blocks are occupied\n", val
);
6932 /* TODO: Close Doorbell port? */
6935 static void bnx2x_reset_chip(struct bnx2x
*bp
, u32 reset_code
)
6937 DP(BNX2X_MSG_MCP
, "function %d reset_code %x\n",
6938 BP_ABS_FUNC(bp
), reset_code
);
6940 switch (reset_code
) {
6941 case FW_MSG_CODE_DRV_UNLOAD_COMMON
:
6942 bnx2x_reset_port(bp
);
6943 bnx2x_reset_func(bp
);
6944 bnx2x_reset_common(bp
);
6947 case FW_MSG_CODE_DRV_UNLOAD_PORT
:
6948 bnx2x_reset_port(bp
);
6949 bnx2x_reset_func(bp
);
6952 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION
:
6953 bnx2x_reset_func(bp
);
6957 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code
);
6962 void bnx2x_chip_cleanup(struct bnx2x
*bp
, int unload_mode
)
6964 int port
= BP_PORT(bp
);
6968 /* Wait until tx fastpath tasks complete */
6969 for_each_queue(bp
, i
) {
6970 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
6973 while (bnx2x_has_tx_work_unload(fp
)) {
6976 BNX2X_ERR("timeout waiting for queue[%d]\n",
6978 #ifdef BNX2X_STOP_ON_ERROR
6989 /* Give HW time to discard old tx messages */
6992 if (CHIP_IS_E1(bp
)) {
6993 /* invalidate mc list,
6994 * wait and poll (interrupts are off)
6996 bnx2x_invlidate_e1_mc_list(bp
);
6997 bnx2x_set_eth_mac(bp
, 0);
7000 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 0);
7002 bnx2x_set_eth_mac(bp
, 0);
7004 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
7005 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
), 0);
7009 /* Clear iSCSI L2 MAC */
7010 mutex_lock(&bp
->cnic_mutex
);
7011 if (bp
->cnic_flags
& BNX2X_CNIC_FLAG_MAC_SET
) {
7012 bnx2x_set_iscsi_eth_mac_addr(bp
, 0);
7013 bp
->cnic_flags
&= ~BNX2X_CNIC_FLAG_MAC_SET
;
7015 mutex_unlock(&bp
->cnic_mutex
);
7018 if (unload_mode
== UNLOAD_NORMAL
)
7019 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
7021 else if (bp
->flags
& NO_WOL_FLAG
)
7022 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
;
7025 u32 emac_base
= port
? GRCBASE_EMAC1
: GRCBASE_EMAC0
;
7026 u8
*mac_addr
= bp
->dev
->dev_addr
;
7028 /* The mac address is written to entries 1-4 to
7029 preserve entry 0 which is used by the PMF */
7030 u8 entry
= (BP_E1HVN(bp
) + 1)*8;
7032 val
= (mac_addr
[0] << 8) | mac_addr
[1];
7033 EMAC_WR(bp
, EMAC_REG_EMAC_MAC_MATCH
+ entry
, val
);
7035 val
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
7036 (mac_addr
[4] << 8) | mac_addr
[5];
7037 EMAC_WR(bp
, EMAC_REG_EMAC_MAC_MATCH
+ entry
+ 4, val
);
7039 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_EN
;
7042 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
7044 /* Close multi and leading connections
7045 Completions for ramrods are collected in a synchronous way */
7046 for_each_queue(bp
, i
)
7048 if (bnx2x_stop_client(bp
, i
))
7049 #ifdef BNX2X_STOP_ON_ERROR
7055 rc
= bnx2x_func_stop(bp
);
7057 BNX2X_ERR("Function stop failed!\n");
7058 #ifdef BNX2X_STOP_ON_ERROR
7064 #ifndef BNX2X_STOP_ON_ERROR
7068 reset_code
= bnx2x_fw_command(bp
, reset_code
, 0);
7070 DP(NETIF_MSG_IFDOWN
, "NO MCP - load counts[%d] "
7071 "%d, %d, %d\n", BP_PATH(bp
),
7072 load_count
[BP_PATH(bp
)][0],
7073 load_count
[BP_PATH(bp
)][1],
7074 load_count
[BP_PATH(bp
)][2]);
7075 load_count
[BP_PATH(bp
)][0]--;
7076 load_count
[BP_PATH(bp
)][1 + port
]--;
7077 DP(NETIF_MSG_IFDOWN
, "NO MCP - new load counts[%d] "
7078 "%d, %d, %d\n", BP_PATH(bp
),
7079 load_count
[BP_PATH(bp
)][0], load_count
[BP_PATH(bp
)][1],
7080 load_count
[BP_PATH(bp
)][2]);
7081 if (load_count
[BP_PATH(bp
)][0] == 0)
7082 reset_code
= FW_MSG_CODE_DRV_UNLOAD_COMMON
;
7083 else if (load_count
[BP_PATH(bp
)][1 + port
] == 0)
7084 reset_code
= FW_MSG_CODE_DRV_UNLOAD_PORT
;
7086 reset_code
= FW_MSG_CODE_DRV_UNLOAD_FUNCTION
;
7089 if ((reset_code
== FW_MSG_CODE_DRV_UNLOAD_COMMON
) ||
7090 (reset_code
== FW_MSG_CODE_DRV_UNLOAD_PORT
))
7091 bnx2x__link_reset(bp
);
7093 /* Disable HW interrupts, NAPI */
7094 bnx2x_netif_stop(bp
, 1);
7099 /* Reset the chip */
7100 bnx2x_reset_chip(bp
, reset_code
);
7102 /* Report UNLOAD_DONE to MCP */
7104 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
, 0);
7108 void bnx2x_disable_close_the_gate(struct bnx2x
*bp
)
7112 DP(NETIF_MSG_HW
, "Disabling \"close the gates\"\n");
7114 if (CHIP_IS_E1(bp
)) {
7115 int port
= BP_PORT(bp
);
7116 u32 addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
7117 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
7119 val
= REG_RD(bp
, addr
);
7121 REG_WR(bp
, addr
, val
);
7122 } else if (CHIP_IS_E1H(bp
)) {
7123 val
= REG_RD(bp
, MISC_REG_AEU_GENERAL_MASK
);
7124 val
&= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK
|
7125 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK
);
7126 REG_WR(bp
, MISC_REG_AEU_GENERAL_MASK
, val
);
7130 /* Close gates #2, #3 and #4: */
7131 static void bnx2x_set_234_gates(struct bnx2x
*bp
, bool close
)
7135 /* Gates #2 and #4a are closed/opened for "not E1" only */
7136 if (!CHIP_IS_E1(bp
)) {
7138 val
= REG_RD(bp
, PXP_REG_HST_DISCARD_DOORBELLS
);
7139 REG_WR(bp
, PXP_REG_HST_DISCARD_DOORBELLS
,
7140 close
? (val
| 0x1) : (val
& (~(u32
)1)));
7142 val
= REG_RD(bp
, PXP_REG_HST_DISCARD_INTERNAL_WRITES
);
7143 REG_WR(bp
, PXP_REG_HST_DISCARD_INTERNAL_WRITES
,
7144 close
? (val
| 0x1) : (val
& (~(u32
)1)));
7148 addr
= BP_PORT(bp
) ? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
7149 val
= REG_RD(bp
, addr
);
7150 REG_WR(bp
, addr
, (!close
) ? (val
| 0x1) : (val
& (~(u32
)1)));
7152 DP(NETIF_MSG_HW
, "%s gates #2, #3 and #4\n",
7153 close
? "closing" : "opening");
7157 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
7159 static void bnx2x_clp_reset_prep(struct bnx2x
*bp
, u32
*magic_val
)
7161 /* Do some magic... */
7162 u32 val
= MF_CFG_RD(bp
, shared_mf_config
.clp_mb
);
7163 *magic_val
= val
& SHARED_MF_CLP_MAGIC
;
7164 MF_CFG_WR(bp
, shared_mf_config
.clp_mb
, val
| SHARED_MF_CLP_MAGIC
);
7167 /* Restore the value of the `magic' bit.
7169 * @param pdev Device handle.
7170 * @param magic_val Old value of the `magic' bit.
7172 static void bnx2x_clp_reset_done(struct bnx2x
*bp
, u32 magic_val
)
7174 /* Restore the `magic' bit value... */
7175 u32 val
= MF_CFG_RD(bp
, shared_mf_config
.clp_mb
);
7176 MF_CFG_WR(bp
, shared_mf_config
.clp_mb
,
7177 (val
& (~SHARED_MF_CLP_MAGIC
)) | magic_val
);
7181 * Prepares for MCP reset: takes care of CLP configurations.
7184 * @param magic_val Old value of 'magic' bit.
7186 static void bnx2x_reset_mcp_prep(struct bnx2x
*bp
, u32
*magic_val
)
7189 u32 validity_offset
;
7191 DP(NETIF_MSG_HW
, "Starting\n");
7193 /* Set `magic' bit in order to save MF config */
7194 if (!CHIP_IS_E1(bp
))
7195 bnx2x_clp_reset_prep(bp
, magic_val
);
7197 /* Get shmem offset */
7198 shmem
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
7199 validity_offset
= offsetof(struct shmem_region
, validity_map
[0]);
7201 /* Clear validity map flags */
7203 REG_WR(bp
, shmem
+ validity_offset
, 0);
7206 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7207 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
7209 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7210 * depending on the HW type.
7214 static inline void bnx2x_mcp_wait_one(struct bnx2x
*bp
)
7216 /* special handling for emulation and FPGA,
7217 wait 10 times longer */
7218 if (CHIP_REV_IS_SLOW(bp
))
7219 msleep(MCP_ONE_TIMEOUT
*10);
7221 msleep(MCP_ONE_TIMEOUT
);
7224 static int bnx2x_reset_mcp_comp(struct bnx2x
*bp
, u32 magic_val
)
7226 u32 shmem
, cnt
, validity_offset
, val
;
7231 /* Get shmem offset */
7232 shmem
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
7234 BNX2X_ERR("Shmem 0 return failure\n");
7239 validity_offset
= offsetof(struct shmem_region
, validity_map
[0]);
7241 /* Wait for MCP to come up */
7242 for (cnt
= 0; cnt
< (MCP_TIMEOUT
/ MCP_ONE_TIMEOUT
); cnt
++) {
7243 /* TBD: its best to check validity map of last port.
7244 * currently checks on port 0.
7246 val
= REG_RD(bp
, shmem
+ validity_offset
);
7247 DP(NETIF_MSG_HW
, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem
,
7248 shmem
+ validity_offset
, val
);
7250 /* check that shared memory is valid. */
7251 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
7252 == (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
7255 bnx2x_mcp_wait_one(bp
);
7258 DP(NETIF_MSG_HW
, "Cnt=%d Shmem validity map 0x%x\n", cnt
, val
);
7260 /* Check that shared memory is valid. This indicates that MCP is up. */
7261 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
)) !=
7262 (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
)) {
7263 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7269 /* Restore the `magic' bit value */
7270 if (!CHIP_IS_E1(bp
))
7271 bnx2x_clp_reset_done(bp
, magic_val
);
7276 static void bnx2x_pxp_prep(struct bnx2x
*bp
)
7278 if (!CHIP_IS_E1(bp
)) {
7279 REG_WR(bp
, PXP2_REG_RD_START_INIT
, 0);
7280 REG_WR(bp
, PXP2_REG_RQ_RBC_DONE
, 0);
7281 REG_WR(bp
, PXP2_REG_RQ_CFG_DONE
, 0);
7287 * Reset the whole chip except for:
7289 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7292 * - MISC (including AEU)
7296 static void bnx2x_process_kill_chip_reset(struct bnx2x
*bp
)
7298 u32 not_reset_mask1
, reset_mask1
, not_reset_mask2
, reset_mask2
;
7301 MISC_REGISTERS_RESET_REG_1_RST_HC
|
7302 MISC_REGISTERS_RESET_REG_1_RST_PXPV
|
7303 MISC_REGISTERS_RESET_REG_1_RST_PXP
;
7306 MISC_REGISTERS_RESET_REG_2_RST_MDIO
|
7307 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE
|
7308 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE
|
7309 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE
|
7310 MISC_REGISTERS_RESET_REG_2_RST_RBCN
|
7311 MISC_REGISTERS_RESET_REG_2_RST_GRC
|
7312 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE
|
7313 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B
;
7315 reset_mask1
= 0xffffffff;
7318 reset_mask2
= 0xffff;
7320 reset_mask2
= 0x1ffff;
7322 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
7323 reset_mask1
& (~not_reset_mask1
));
7324 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
,
7325 reset_mask2
& (~not_reset_mask2
));
7330 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, reset_mask1
);
7331 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_SET
, reset_mask2
);
7335 static int bnx2x_process_kill(struct bnx2x
*bp
)
7339 u32 sr_cnt
, blk_cnt
, port_is_idle_0
, port_is_idle_1
, pgl_exp_rom2
;
7342 /* Empty the Tetris buffer, wait for 1s */
7344 sr_cnt
= REG_RD(bp
, PXP2_REG_RD_SR_CNT
);
7345 blk_cnt
= REG_RD(bp
, PXP2_REG_RD_BLK_CNT
);
7346 port_is_idle_0
= REG_RD(bp
, PXP2_REG_RD_PORT_IS_IDLE_0
);
7347 port_is_idle_1
= REG_RD(bp
, PXP2_REG_RD_PORT_IS_IDLE_1
);
7348 pgl_exp_rom2
= REG_RD(bp
, PXP2_REG_PGL_EXP_ROM2
);
7349 if ((sr_cnt
== 0x7e) && (blk_cnt
== 0xa0) &&
7350 ((port_is_idle_0
& 0x1) == 0x1) &&
7351 ((port_is_idle_1
& 0x1) == 0x1) &&
7352 (pgl_exp_rom2
== 0xffffffff))
7355 } while (cnt
-- > 0);
7358 DP(NETIF_MSG_HW
, "Tetris buffer didn't get empty or there"
7360 " outstanding read requests after 1s!\n");
7361 DP(NETIF_MSG_HW
, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7362 " port_is_idle_0=0x%08x,"
7363 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7364 sr_cnt
, blk_cnt
, port_is_idle_0
, port_is_idle_1
,
7371 /* Close gates #2, #3 and #4 */
7372 bnx2x_set_234_gates(bp
, true);
7374 /* TBD: Indicate that "process kill" is in progress to MCP */
7376 /* Clear "unprepared" bit */
7377 REG_WR(bp
, MISC_REG_UNPREPARED
, 0);
7380 /* Make sure all is written to the chip before the reset */
7383 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7384 * PSWHST, GRC and PSWRD Tetris buffer.
7388 /* Prepare to chip reset: */
7390 bnx2x_reset_mcp_prep(bp
, &val
);
7396 /* reset the chip */
7397 bnx2x_process_kill_chip_reset(bp
);
7400 /* Recover after reset: */
7402 if (bnx2x_reset_mcp_comp(bp
, val
))
7408 /* Open the gates #2, #3 and #4 */
7409 bnx2x_set_234_gates(bp
, false);
7411 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7412 * reset state, re-enable attentions. */
7417 static int bnx2x_leader_reset(struct bnx2x
*bp
)
7420 /* Try to recover after the failure */
7421 if (bnx2x_process_kill(bp
)) {
7422 printk(KERN_ERR
"%s: Something bad had happen! Aii!\n",
7425 goto exit_leader_reset
;
7428 /* Clear "reset is in progress" bit and update the driver state */
7429 bnx2x_set_reset_done(bp
);
7430 bp
->recovery_state
= BNX2X_RECOVERY_DONE
;
7434 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_RESERVED_08
);
7439 /* Assumption: runs under rtnl lock. This together with the fact
7440 * that it's called only from bnx2x_reset_task() ensure that it
7441 * will never be called when netif_running(bp->dev) is false.
7443 static void bnx2x_parity_recover(struct bnx2x
*bp
)
7445 DP(NETIF_MSG_HW
, "Handling parity\n");
7447 switch (bp
->recovery_state
) {
7448 case BNX2X_RECOVERY_INIT
:
7449 DP(NETIF_MSG_HW
, "State is BNX2X_RECOVERY_INIT\n");
7450 /* Try to get a LEADER_LOCK HW lock */
7451 if (bnx2x_trylock_hw_lock(bp
,
7452 HW_LOCK_RESOURCE_RESERVED_08
))
7455 /* Stop the driver */
7456 /* If interface has been removed - break */
7457 if (bnx2x_nic_unload(bp
, UNLOAD_RECOVERY
))
7460 bp
->recovery_state
= BNX2X_RECOVERY_WAIT
;
7461 /* Ensure "is_leader" and "recovery_state"
7462 * update values are seen on other CPUs
7467 case BNX2X_RECOVERY_WAIT
:
7468 DP(NETIF_MSG_HW
, "State is BNX2X_RECOVERY_WAIT\n");
7469 if (bp
->is_leader
) {
7470 u32 load_counter
= bnx2x_get_load_cnt(bp
);
7472 /* Wait until all other functions get
7475 schedule_delayed_work(&bp
->reset_task
,
7479 /* If all other functions got down -
7480 * try to bring the chip back to
7481 * normal. In any case it's an exit
7482 * point for a leader.
7484 if (bnx2x_leader_reset(bp
) ||
7485 bnx2x_nic_load(bp
, LOAD_NORMAL
)) {
7486 printk(KERN_ERR
"%s: Recovery "
7487 "has failed. Power cycle is "
7488 "needed.\n", bp
->dev
->name
);
7489 /* Disconnect this device */
7490 netif_device_detach(bp
->dev
);
7491 /* Block ifup for all function
7492 * of this ASIC until
7493 * "process kill" or power
7496 bnx2x_set_reset_in_progress(bp
);
7497 /* Shut down the power */
7498 bnx2x_set_power_state(bp
,
7505 } else { /* non-leader */
7506 if (!bnx2x_reset_is_done(bp
)) {
7507 /* Try to get a LEADER_LOCK HW lock as
7508 * long as a former leader may have
7509 * been unloaded by the user or
7510 * released a leadership by another
7513 if (bnx2x_trylock_hw_lock(bp
,
7514 HW_LOCK_RESOURCE_RESERVED_08
)) {
7515 /* I'm a leader now! Restart a
7522 schedule_delayed_work(&bp
->reset_task
,
7526 } else { /* A leader has completed
7527 * the "process kill". It's an exit
7528 * point for a non-leader.
7530 bnx2x_nic_load(bp
, LOAD_NORMAL
);
7531 bp
->recovery_state
=
7532 BNX2X_RECOVERY_DONE
;
7543 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7544 * scheduled on a general queue in order to prevent a dead lock.
7546 static void bnx2x_reset_task(struct work_struct
*work
)
7548 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, reset_task
.work
);
7550 #ifdef BNX2X_STOP_ON_ERROR
7551 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7552 " so reset not done to allow debug dump,\n"
7553 KERN_ERR
" you will need to reboot when done\n");
7559 if (!netif_running(bp
->dev
))
7560 goto reset_task_exit
;
7562 if (unlikely(bp
->recovery_state
!= BNX2X_RECOVERY_DONE
))
7563 bnx2x_parity_recover(bp
);
7565 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
7566 bnx2x_nic_load(bp
, LOAD_NORMAL
);
7573 /* end of nic load/unload */
7576 * Init service functions
7579 static u32
bnx2x_get_pretend_reg(struct bnx2x
*bp
)
7581 u32 base
= PXP2_REG_PGL_PRETEND_FUNC_F0
;
7582 u32 stride
= PXP2_REG_PGL_PRETEND_FUNC_F1
- base
;
7583 return base
+ (BP_ABS_FUNC(bp
)) * stride
;
7586 static void bnx2x_undi_int_disable_e1h(struct bnx2x
*bp
)
7588 u32 reg
= bnx2x_get_pretend_reg(bp
);
7590 /* Flush all outstanding writes */
7593 /* Pretend to be function 0 */
7595 REG_RD(bp
, reg
); /* Flush the GRC transaction (in the chip) */
7597 /* From now we are in the "like-E1" mode */
7598 bnx2x_int_disable(bp
);
7600 /* Flush all outstanding writes */
7603 /* Restore the original function */
7604 REG_WR(bp
, reg
, BP_ABS_FUNC(bp
));
7608 static inline void bnx2x_undi_int_disable(struct bnx2x
*bp
)
7611 bnx2x_int_disable(bp
);
7613 bnx2x_undi_int_disable_e1h(bp
);
7616 static void __devinit
bnx2x_undi_unload(struct bnx2x
*bp
)
7620 /* Check if there is any driver already loaded */
7621 val
= REG_RD(bp
, MISC_REG_UNPREPARED
);
7623 /* Check if it is the UNDI driver
7624 * UNDI driver initializes CID offset for normal bell to 0x7
7626 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
7627 val
= REG_RD(bp
, DORQ_REG_NORM_CID_OFST
);
7629 u32 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
7630 /* save our pf_num */
7631 int orig_pf_num
= bp
->pf_num
;
7635 /* clear the UNDI indication */
7636 REG_WR(bp
, DORQ_REG_NORM_CID_OFST
, 0);
7638 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7640 /* try unload UNDI on port 0 */
7643 (SHMEM_RD(bp
, func_mb
[bp
->pf_num
].drv_mb_header
) &
7644 DRV_MSG_SEQ_NUMBER_MASK
);
7645 reset_code
= bnx2x_fw_command(bp
, reset_code
, 0);
7647 /* if UNDI is loaded on the other port */
7648 if (reset_code
!= FW_MSG_CODE_DRV_UNLOAD_COMMON
) {
7650 /* send "DONE" for previous unload */
7651 bnx2x_fw_command(bp
,
7652 DRV_MSG_CODE_UNLOAD_DONE
, 0);
7654 /* unload UNDI on port 1 */
7657 (SHMEM_RD(bp
, func_mb
[bp
->pf_num
].drv_mb_header
) &
7658 DRV_MSG_SEQ_NUMBER_MASK
);
7659 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
7661 bnx2x_fw_command(bp
, reset_code
, 0);
7664 /* now it's safe to release the lock */
7665 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
7667 bnx2x_undi_int_disable(bp
);
7669 /* close input traffic and wait for it */
7670 /* Do not rcv packets to BRB */
7672 (BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_DRV_MASK
:
7673 NIG_REG_LLH0_BRB1_DRV_MASK
), 0x0);
7674 /* Do not direct rcv packets that are not for MCP to
7677 (BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_NOT_MCP
:
7678 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
7681 (BP_PORT(bp
) ? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
7682 MISC_REG_AEU_MASK_ATTN_FUNC_0
), 0);
7685 /* save NIG port swap info */
7686 swap_val
= REG_RD(bp
, NIG_REG_PORT_SWAP
);
7687 swap_en
= REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
);
7690 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
7693 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
,
7695 /* take the NIG out of reset and restore swap values */
7697 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
7698 MISC_REGISTERS_RESET_REG_1_RST_NIG
);
7699 REG_WR(bp
, NIG_REG_PORT_SWAP
, swap_val
);
7700 REG_WR(bp
, NIG_REG_STRAP_OVERRIDE
, swap_en
);
7702 /* send unload done to the MCP */
7703 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
, 0);
7705 /* restore our func and fw_seq */
7706 bp
->pf_num
= orig_pf_num
;
7708 (SHMEM_RD(bp
, func_mb
[bp
->pf_num
].drv_mb_header
) &
7709 DRV_MSG_SEQ_NUMBER_MASK
);
7711 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
7715 static void __devinit
bnx2x_get_common_hwinfo(struct bnx2x
*bp
)
7717 u32 val
, val2
, val3
, val4
, id
;
7720 /* Get the chip revision id and number. */
7721 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7722 val
= REG_RD(bp
, MISC_REG_CHIP_NUM
);
7723 id
= ((val
& 0xffff) << 16);
7724 val
= REG_RD(bp
, MISC_REG_CHIP_REV
);
7725 id
|= ((val
& 0xf) << 12);
7726 val
= REG_RD(bp
, MISC_REG_CHIP_METAL
);
7727 id
|= ((val
& 0xff) << 4);
7728 val
= REG_RD(bp
, MISC_REG_BOND_ID
);
7730 bp
->common
.chip_id
= id
;
7732 /* Set doorbell size */
7733 bp
->db_size
= (1 << BNX2X_DB_SHIFT
);
7735 if (CHIP_IS_E2(bp
)) {
7736 val
= REG_RD(bp
, MISC_REG_PORT4MODE_EN_OVWR
);
7738 val
= REG_RD(bp
, MISC_REG_PORT4MODE_EN
);
7740 val
= (val
>> 1) & 1;
7741 BNX2X_DEV_INFO("chip is in %s\n", val
? "4_PORT_MODE" :
7743 bp
->common
.chip_port_mode
= val
? CHIP_4_PORT_MODE
:
7746 if (CHIP_MODE_IS_4_PORT(bp
))
7747 bp
->pfid
= (bp
->pf_num
>> 1); /* 0..3 */
7749 bp
->pfid
= (bp
->pf_num
& 0x6); /* 0, 2, 4, 6 */
7751 bp
->common
.chip_port_mode
= CHIP_PORT_MODE_NONE
; /* N/A */
7752 bp
->pfid
= bp
->pf_num
; /* 0..7 */
7756 * set base FW non-default (fast path) status block id, this value is
7757 * used to initialize the fw_sb_id saved on the fp/queue structure to
7758 * determine the id used by the FW.
7760 if (CHIP_IS_E1x(bp
))
7761 bp
->base_fw_ndsb
= BP_PORT(bp
) * FP_SB_MAX_E1x
;
7763 bp
->base_fw_ndsb
= BP_PORT(bp
) * FP_SB_MAX_E2
;
7765 bp
->link_params
.chip_id
= bp
->common
.chip_id
;
7766 BNX2X_DEV_INFO("chip ID is 0x%x\n", id
);
7768 val
= (REG_RD(bp
, 0x2874) & 0x55);
7769 if ((bp
->common
.chip_id
& 0x1) ||
7770 (CHIP_IS_E1(bp
) && val
) || (CHIP_IS_E1H(bp
) && (val
== 0x55))) {
7771 bp
->flags
|= ONE_PORT_FLAG
;
7772 BNX2X_DEV_INFO("single port device\n");
7775 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_CFG4
);
7776 bp
->common
.flash_size
= (NVRAM_1MB_SIZE
<<
7777 (val
& MCPR_NVM_CFG4_FLASH_SIZE
));
7778 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7779 bp
->common
.flash_size
, bp
->common
.flash_size
);
7781 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
7782 bp
->common
.shmem2_base
= REG_RD(bp
, (BP_PATH(bp
) ?
7783 MISC_REG_GENERIC_CR_1
:
7784 MISC_REG_GENERIC_CR_0
));
7785 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
7786 bp
->link_params
.shmem2_base
= bp
->common
.shmem2_base
;
7787 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7788 bp
->common
.shmem_base
, bp
->common
.shmem2_base
);
7790 if (!bp
->common
.shmem_base
) {
7791 BNX2X_DEV_INFO("MCP not active\n");
7792 bp
->flags
|= NO_MCP_FLAG
;
7796 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
7797 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
7798 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
7799 BNX2X_ERR("BAD MCP validity signature\n");
7801 bp
->common
.hw_config
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.config
);
7802 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp
->common
.hw_config
);
7804 bp
->link_params
.hw_led_mode
= ((bp
->common
.hw_config
&
7805 SHARED_HW_CFG_LED_MODE_MASK
) >>
7806 SHARED_HW_CFG_LED_MODE_SHIFT
);
7808 bp
->link_params
.feature_config_flags
= 0;
7809 val
= SHMEM_RD(bp
, dev_info
.shared_feature_config
.config
);
7810 if (val
& SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED
)
7811 bp
->link_params
.feature_config_flags
|=
7812 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED
;
7814 bp
->link_params
.feature_config_flags
&=
7815 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED
;
7817 val
= SHMEM_RD(bp
, dev_info
.bc_rev
) >> 8;
7818 bp
->common
.bc_ver
= val
;
7819 BNX2X_DEV_INFO("bc_ver %X\n", val
);
7820 if (val
< BNX2X_BC_VER
) {
7821 /* for now only warn
7822 * later we might need to enforce this */
7823 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7824 "please upgrade BC\n", BNX2X_BC_VER
, val
);
7826 bp
->link_params
.feature_config_flags
|=
7827 (val
>= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL
) ?
7828 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY
: 0;
7830 bp
->link_params
.feature_config_flags
|=
7831 (val
>= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL
) ?
7832 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY
: 0;
7834 if (BP_E1HVN(bp
) == 0) {
7835 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_PMC
, &pmc
);
7836 bp
->flags
|= (pmc
& PCI_PM_CAP_PME_D3cold
) ? 0 : NO_WOL_FLAG
;
7838 /* no WOL capability for E1HVN != 0 */
7839 bp
->flags
|= NO_WOL_FLAG
;
7841 BNX2X_DEV_INFO("%sWoL capable\n",
7842 (bp
->flags
& NO_WOL_FLAG
) ? "not " : "");
7844 val
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
);
7845 val2
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[4]);
7846 val3
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[8]);
7847 val4
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[12]);
7849 dev_info(&bp
->pdev
->dev
, "part number %X-%X-%X-%X\n",
7850 val
, val2
, val3
, val4
);
7853 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7854 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7856 static void __devinit
bnx2x_get_igu_cam_info(struct bnx2x
*bp
)
7858 int pfid
= BP_FUNC(bp
);
7859 int vn
= BP_E1HVN(bp
);
7864 bp
->igu_base_sb
= 0xff;
7866 if (CHIP_INT_MODE_IS_BC(bp
)) {
7867 bp
->igu_sb_cnt
= min_t(u8
, FP_SB_MAX_E1x
,
7870 bp
->igu_base_sb
= (CHIP_MODE_IS_4_PORT(bp
) ? pfid
: vn
) *
7873 bp
->igu_dsb_id
= E1HVN_MAX
* FP_SB_MAX_E1x
+
7874 (CHIP_MODE_IS_4_PORT(bp
) ? pfid
: vn
);
7879 /* IGU in normal mode - read CAM */
7880 for (igu_sb_id
= 0; igu_sb_id
< IGU_REG_MAPPING_MEMORY_SIZE
;
7882 val
= REG_RD(bp
, IGU_REG_MAPPING_MEMORY
+ igu_sb_id
* 4);
7883 if (!(val
& IGU_REG_MAPPING_MEMORY_VALID
))
7886 if ((fid
& IGU_FID_ENCODE_IS_PF
)) {
7887 if ((fid
& IGU_FID_PF_NUM_MASK
) != pfid
)
7889 if (IGU_VEC(val
) == 0)
7890 /* default status block */
7891 bp
->igu_dsb_id
= igu_sb_id
;
7893 if (bp
->igu_base_sb
== 0xff)
7894 bp
->igu_base_sb
= igu_sb_id
;
7899 bp
->igu_sb_cnt
= min_t(u8
, bp
->igu_sb_cnt
, bp
->l2_cid_count
);
7900 if (bp
->igu_sb_cnt
== 0)
7901 BNX2X_ERR("CAM configuration error\n");
7904 static void __devinit
bnx2x_link_settings_supported(struct bnx2x
*bp
,
7907 int cfg_size
= 0, idx
, port
= BP_PORT(bp
);
7909 /* Aggregation of supported attributes of all external phys */
7910 bp
->port
.supported
[0] = 0;
7911 bp
->port
.supported
[1] = 0;
7912 switch (bp
->link_params
.num_phys
) {
7914 bp
->port
.supported
[0] = bp
->link_params
.phy
[INT_PHY
].supported
;
7918 bp
->port
.supported
[0] = bp
->link_params
.phy
[EXT_PHY1
].supported
;
7922 if (bp
->link_params
.multi_phy_config
&
7923 PORT_HW_CFG_PHY_SWAPPED_ENABLED
) {
7924 bp
->port
.supported
[1] =
7925 bp
->link_params
.phy
[EXT_PHY1
].supported
;
7926 bp
->port
.supported
[0] =
7927 bp
->link_params
.phy
[EXT_PHY2
].supported
;
7929 bp
->port
.supported
[0] =
7930 bp
->link_params
.phy
[EXT_PHY1
].supported
;
7931 bp
->port
.supported
[1] =
7932 bp
->link_params
.phy
[EXT_PHY2
].supported
;
7938 if (!(bp
->port
.supported
[0] || bp
->port
.supported
[1])) {
7939 BNX2X_ERR("NVRAM config error. BAD phy config."
7940 "PHY1 config 0x%x, PHY2 config 0x%x\n",
7942 dev_info
.port_hw_config
[port
].external_phy_config
),
7944 dev_info
.port_hw_config
[port
].external_phy_config2
));
7948 switch (switch_cfg
) {
7950 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_SERDES0_CTRL_PHY_ADDR
+
7952 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
7955 case SWITCH_CFG_10G
:
7956 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_XGXS0_CTRL_PHY_ADDR
+
7958 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
7962 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7963 bp
->port
.link_config
[0]);
7966 /* mask what we support according to speed_cap_mask per configuration */
7967 for (idx
= 0; idx
< cfg_size
; idx
++) {
7968 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
7969 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF
))
7970 bp
->port
.supported
[idx
] &= ~SUPPORTED_10baseT_Half
;
7972 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
7973 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL
))
7974 bp
->port
.supported
[idx
] &= ~SUPPORTED_10baseT_Full
;
7976 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
7977 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF
))
7978 bp
->port
.supported
[idx
] &= ~SUPPORTED_100baseT_Half
;
7980 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
7981 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL
))
7982 bp
->port
.supported
[idx
] &= ~SUPPORTED_100baseT_Full
;
7984 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
7985 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G
))
7986 bp
->port
.supported
[idx
] &= ~(SUPPORTED_1000baseT_Half
|
7987 SUPPORTED_1000baseT_Full
);
7989 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
7990 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G
))
7991 bp
->port
.supported
[idx
] &= ~SUPPORTED_2500baseX_Full
;
7993 if (!(bp
->link_params
.speed_cap_mask
[idx
] &
7994 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G
))
7995 bp
->port
.supported
[idx
] &= ~SUPPORTED_10000baseT_Full
;
7999 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp
->port
.supported
[0],
8000 bp
->port
.supported
[1]);
8003 static void __devinit
bnx2x_link_settings_requested(struct bnx2x
*bp
)
8005 u32 link_config
, idx
, cfg_size
= 0;
8006 bp
->port
.advertising
[0] = 0;
8007 bp
->port
.advertising
[1] = 0;
8008 switch (bp
->link_params
.num_phys
) {
8017 for (idx
= 0; idx
< cfg_size
; idx
++) {
8018 bp
->link_params
.req_duplex
[idx
] = DUPLEX_FULL
;
8019 link_config
= bp
->port
.link_config
[idx
];
8020 switch (link_config
& PORT_FEATURE_LINK_SPEED_MASK
) {
8021 case PORT_FEATURE_LINK_SPEED_AUTO
:
8022 if (bp
->port
.supported
[idx
] & SUPPORTED_Autoneg
) {
8023 bp
->link_params
.req_line_speed
[idx
] =
8025 bp
->port
.advertising
[idx
] |=
8026 bp
->port
.supported
[idx
];
8028 /* force 10G, no AN */
8029 bp
->link_params
.req_line_speed
[idx
] =
8031 bp
->port
.advertising
[idx
] |=
8032 (ADVERTISED_10000baseT_Full
|
8038 case PORT_FEATURE_LINK_SPEED_10M_FULL
:
8039 if (bp
->port
.supported
[idx
] & SUPPORTED_10baseT_Full
) {
8040 bp
->link_params
.req_line_speed
[idx
] =
8042 bp
->port
.advertising
[idx
] |=
8043 (ADVERTISED_10baseT_Full
|
8046 BNX2X_ERROR("NVRAM config error. "
8047 "Invalid link_config 0x%x"
8048 " speed_cap_mask 0x%x\n",
8050 bp
->link_params
.speed_cap_mask
[idx
]);
8055 case PORT_FEATURE_LINK_SPEED_10M_HALF
:
8056 if (bp
->port
.supported
[idx
] & SUPPORTED_10baseT_Half
) {
8057 bp
->link_params
.req_line_speed
[idx
] =
8059 bp
->link_params
.req_duplex
[idx
] =
8061 bp
->port
.advertising
[idx
] |=
8062 (ADVERTISED_10baseT_Half
|
8065 BNX2X_ERROR("NVRAM config error. "
8066 "Invalid link_config 0x%x"
8067 " speed_cap_mask 0x%x\n",
8069 bp
->link_params
.speed_cap_mask
[idx
]);
8074 case PORT_FEATURE_LINK_SPEED_100M_FULL
:
8075 if (bp
->port
.supported
[idx
] &
8076 SUPPORTED_100baseT_Full
) {
8077 bp
->link_params
.req_line_speed
[idx
] =
8079 bp
->port
.advertising
[idx
] |=
8080 (ADVERTISED_100baseT_Full
|
8083 BNX2X_ERROR("NVRAM config error. "
8084 "Invalid link_config 0x%x"
8085 " speed_cap_mask 0x%x\n",
8087 bp
->link_params
.speed_cap_mask
[idx
]);
8092 case PORT_FEATURE_LINK_SPEED_100M_HALF
:
8093 if (bp
->port
.supported
[idx
] &
8094 SUPPORTED_100baseT_Half
) {
8095 bp
->link_params
.req_line_speed
[idx
] =
8097 bp
->link_params
.req_duplex
[idx
] =
8099 bp
->port
.advertising
[idx
] |=
8100 (ADVERTISED_100baseT_Half
|
8103 BNX2X_ERROR("NVRAM config error. "
8104 "Invalid link_config 0x%x"
8105 " speed_cap_mask 0x%x\n",
8107 bp
->link_params
.speed_cap_mask
[idx
]);
8112 case PORT_FEATURE_LINK_SPEED_1G
:
8113 if (bp
->port
.supported
[idx
] &
8114 SUPPORTED_1000baseT_Full
) {
8115 bp
->link_params
.req_line_speed
[idx
] =
8117 bp
->port
.advertising
[idx
] |=
8118 (ADVERTISED_1000baseT_Full
|
8121 BNX2X_ERROR("NVRAM config error. "
8122 "Invalid link_config 0x%x"
8123 " speed_cap_mask 0x%x\n",
8125 bp
->link_params
.speed_cap_mask
[idx
]);
8130 case PORT_FEATURE_LINK_SPEED_2_5G
:
8131 if (bp
->port
.supported
[idx
] &
8132 SUPPORTED_2500baseX_Full
) {
8133 bp
->link_params
.req_line_speed
[idx
] =
8135 bp
->port
.advertising
[idx
] |=
8136 (ADVERTISED_2500baseX_Full
|
8139 BNX2X_ERROR("NVRAM config error. "
8140 "Invalid link_config 0x%x"
8141 " speed_cap_mask 0x%x\n",
8143 bp
->link_params
.speed_cap_mask
[idx
]);
8148 case PORT_FEATURE_LINK_SPEED_10G_CX4
:
8149 case PORT_FEATURE_LINK_SPEED_10G_KX4
:
8150 case PORT_FEATURE_LINK_SPEED_10G_KR
:
8151 if (bp
->port
.supported
[idx
] &
8152 SUPPORTED_10000baseT_Full
) {
8153 bp
->link_params
.req_line_speed
[idx
] =
8155 bp
->port
.advertising
[idx
] |=
8156 (ADVERTISED_10000baseT_Full
|
8159 BNX2X_ERROR("NVRAM config error. "
8160 "Invalid link_config 0x%x"
8161 " speed_cap_mask 0x%x\n",
8163 bp
->link_params
.speed_cap_mask
[idx
]);
8169 BNX2X_ERROR("NVRAM config error. "
8170 "BAD link speed link_config 0x%x\n",
8172 bp
->link_params
.req_line_speed
[idx
] =
8174 bp
->port
.advertising
[idx
] =
8175 bp
->port
.supported
[idx
];
8179 bp
->link_params
.req_flow_ctrl
[idx
] = (link_config
&
8180 PORT_FEATURE_FLOW_CONTROL_MASK
);
8181 if ((bp
->link_params
.req_flow_ctrl
[idx
] ==
8182 BNX2X_FLOW_CTRL_AUTO
) &&
8183 !(bp
->port
.supported
[idx
] & SUPPORTED_Autoneg
)) {
8184 bp
->link_params
.req_flow_ctrl
[idx
] =
8185 BNX2X_FLOW_CTRL_NONE
;
8188 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8189 " 0x%x advertising 0x%x\n",
8190 bp
->link_params
.req_line_speed
[idx
],
8191 bp
->link_params
.req_duplex
[idx
],
8192 bp
->link_params
.req_flow_ctrl
[idx
],
8193 bp
->port
.advertising
[idx
]);
8197 static void __devinit
bnx2x_set_mac_buf(u8
*mac_buf
, u32 mac_lo
, u16 mac_hi
)
8199 mac_hi
= cpu_to_be16(mac_hi
);
8200 mac_lo
= cpu_to_be32(mac_lo
);
8201 memcpy(mac_buf
, &mac_hi
, sizeof(mac_hi
));
8202 memcpy(mac_buf
+ sizeof(mac_hi
), &mac_lo
, sizeof(mac_lo
));
8205 static void __devinit
bnx2x_get_port_hwinfo(struct bnx2x
*bp
)
8207 int port
= BP_PORT(bp
);
8209 u32 ext_phy_type
, ext_phy_config
;
8211 bp
->link_params
.bp
= bp
;
8212 bp
->link_params
.port
= port
;
8214 bp
->link_params
.lane_config
=
8215 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].lane_config
);
8217 bp
->link_params
.speed_cap_mask
[0] =
8219 dev_info
.port_hw_config
[port
].speed_capability_mask
);
8220 bp
->link_params
.speed_cap_mask
[1] =
8222 dev_info
.port_hw_config
[port
].speed_capability_mask2
);
8223 bp
->port
.link_config
[0] =
8224 SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].link_config
);
8226 bp
->port
.link_config
[1] =
8227 SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].link_config2
);
8229 bp
->link_params
.multi_phy_config
=
8230 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].multi_phy_config
);
8231 /* If the device is capable of WoL, set the default state according
8234 config
= SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].config
);
8235 bp
->wol
= (!(bp
->flags
& NO_WOL_FLAG
) &&
8236 (config
& PORT_FEATURE_WOL_ENABLED
));
8238 BNX2X_DEV_INFO("lane_config 0x%08x "
8239 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
8240 bp
->link_params
.lane_config
,
8241 bp
->link_params
.speed_cap_mask
[0],
8242 bp
->port
.link_config
[0]);
8244 bp
->link_params
.switch_cfg
= (bp
->port
.link_config
[0] &
8245 PORT_FEATURE_CONNECTED_SWITCH_MASK
);
8246 bnx2x_phy_probe(&bp
->link_params
);
8247 bnx2x_link_settings_supported(bp
, bp
->link_params
.switch_cfg
);
8249 bnx2x_link_settings_requested(bp
);
8252 * If connected directly, work with the internal PHY, otherwise, work
8253 * with the external PHY
8257 dev_info
.port_hw_config
[port
].external_phy_config
);
8258 ext_phy_type
= XGXS_EXT_PHY_TYPE(ext_phy_config
);
8259 if (ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
)
8260 bp
->mdio
.prtad
= bp
->port
.phy_addr
;
8262 else if ((ext_phy_type
!= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
) &&
8263 (ext_phy_type
!= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN
))
8265 XGXS_EXT_PHY_ADDR(ext_phy_config
);
8268 static void __devinit
bnx2x_get_mac_hwinfo(struct bnx2x
*bp
)
8271 int func
= BP_ABS_FUNC(bp
);
8272 int port
= BP_PORT(bp
);
8275 BNX2X_ERROR("warning: random MAC workaround active\n");
8276 random_ether_addr(bp
->dev
->dev_addr
);
8277 } else if (IS_MF(bp
)) {
8278 val2
= MF_CFG_RD(bp
, func_mf_config
[func
].mac_upper
);
8279 val
= MF_CFG_RD(bp
, func_mf_config
[func
].mac_lower
);
8280 if ((val2
!= FUNC_MF_CFG_UPPERMAC_DEFAULT
) &&
8281 (val
!= FUNC_MF_CFG_LOWERMAC_DEFAULT
))
8282 bnx2x_set_mac_buf(bp
->dev
->dev_addr
, val
, val2
);
8285 /* iSCSI NPAR MAC */
8287 u32 cfg
= MF_CFG_RD(bp
, func_ext_config
[func
].func_cfg
);
8288 if (cfg
& MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD
) {
8289 val2
= MF_CFG_RD(bp
, func_ext_config
[func
].
8290 iscsi_mac_addr_upper
);
8291 val
= MF_CFG_RD(bp
, func_ext_config
[func
].
8292 iscsi_mac_addr_lower
);
8293 bnx2x_set_mac_buf(bp
->iscsi_mac
, val
, val2
);
8298 /* in SF read MACs from port configuration */
8299 val2
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_upper
);
8300 val
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_lower
);
8301 bnx2x_set_mac_buf(bp
->dev
->dev_addr
, val
, val2
);
8304 val2
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].
8306 val
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].
8308 bnx2x_set_mac_buf(bp
->iscsi_mac
, val
, val2
);
8312 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
8313 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
8317 static int __devinit
bnx2x_get_hwinfo(struct bnx2x
*bp
)
8319 int /*abs*/func
= BP_ABS_FUNC(bp
);
8324 bnx2x_get_common_hwinfo(bp
);
8326 if (CHIP_IS_E1x(bp
)) {
8327 bp
->common
.int_block
= INT_BLOCK_HC
;
8329 bp
->igu_dsb_id
= DEF_SB_IGU_ID
;
8330 bp
->igu_base_sb
= 0;
8331 bp
->igu_sb_cnt
= min_t(u8
, FP_SB_MAX_E1x
, bp
->l2_cid_count
);
8333 bp
->common
.int_block
= INT_BLOCK_IGU
;
8334 val
= REG_RD(bp
, IGU_REG_BLOCK_CONFIGURATION
);
8335 if (val
& IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN
) {
8336 DP(NETIF_MSG_PROBE
, "IGU Backward Compatible Mode\n");
8337 bp
->common
.int_block
|= INT_BLOCK_MODE_BW_COMP
;
8339 DP(NETIF_MSG_PROBE
, "IGU Normal Mode\n");
8341 bnx2x_get_igu_cam_info(bp
);
8344 DP(NETIF_MSG_PROBE
, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8345 bp
->igu_dsb_id
, bp
->igu_base_sb
, bp
->igu_sb_cnt
);
8348 * Initialize MF configuration
8356 if (!CHIP_IS_E1(bp
) && !BP_NOMCP(bp
)) {
8358 "shmem2base 0x%x, size %d, mfcfg offset %d\n",
8359 bp
->common
.shmem2_base
, SHMEM2_RD(bp
, size
),
8360 (u32
)offsetof(struct shmem2_region
, mf_cfg_addr
));
8361 if (SHMEM2_HAS(bp
, mf_cfg_addr
))
8362 bp
->common
.mf_cfg_base
= SHMEM2_RD(bp
, mf_cfg_addr
);
8364 bp
->common
.mf_cfg_base
= bp
->common
.shmem_base
+
8365 offsetof(struct shmem_region
, func_mb
) +
8366 E1H_FUNC_MAX
* sizeof(struct drv_func_mb
);
8368 * get mf configuration:
8369 * 1. existance of MF configuration
8370 * 2. MAC address must be legal (check only upper bytes)
8371 * for Switch-Independent mode;
8372 * OVLAN must be legal for Switch-Dependent mode
8373 * 3. SF_MODE configures specific MF mode
8375 if (bp
->common
.mf_cfg_base
!= SHMEM_MF_CFG_ADDR_NONE
) {
8376 /* get mf configuration */
8378 dev_info
.shared_feature_config
.config
);
8379 val
&= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK
;
8382 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT
:
8383 val
= MF_CFG_RD(bp
, func_mf_config
[func
].
8385 /* check for legal mac (upper bytes)*/
8386 if (val
!= 0xffff) {
8387 bp
->mf_mode
= MULTI_FUNCTION_SI
;
8388 bp
->mf_config
[vn
] = MF_CFG_RD(bp
,
8389 func_mf_config
[func
].config
);
8391 DP(NETIF_MSG_PROBE
, "illegal MAC "
8392 "address for SI\n");
8394 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED
:
8395 /* get OV configuration */
8397 func_mf_config
[FUNC_0
].e1hov_tag
);
8398 val
&= FUNC_MF_CFG_E1HOV_TAG_MASK
;
8400 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
) {
8401 bp
->mf_mode
= MULTI_FUNCTION_SD
;
8402 bp
->mf_config
[vn
] = MF_CFG_RD(bp
,
8403 func_mf_config
[func
].config
);
8405 DP(NETIF_MSG_PROBE
, "illegal OV for "
8409 /* Unknown configuration: reset mf_config */
8410 bp
->mf_config
[vn
] = 0;
8411 DP(NETIF_MSG_PROBE
, "Unkown MF mode 0x%x\n",
8416 BNX2X_DEV_INFO("%s function mode\n",
8417 IS_MF(bp
) ? "multi" : "single");
8419 switch (bp
->mf_mode
) {
8420 case MULTI_FUNCTION_SD
:
8421 val
= MF_CFG_RD(bp
, func_mf_config
[func
].e1hov_tag
) &
8422 FUNC_MF_CFG_E1HOV_TAG_MASK
;
8423 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
) {
8425 BNX2X_DEV_INFO("MF OV for func %d is %d"
8426 " (0x%04x)\n", func
,
8427 bp
->mf_ov
, bp
->mf_ov
);
8429 BNX2X_ERR("No valid MF OV for func %d,"
8430 " aborting\n", func
);
8434 case MULTI_FUNCTION_SI
:
8435 BNX2X_DEV_INFO("func %d is in MF "
8436 "switch-independent mode\n", func
);
8440 BNX2X_ERR("VN %d in single function mode,"
8449 /* adjust igu_sb_cnt to MF for E1x */
8450 if (CHIP_IS_E1x(bp
) && IS_MF(bp
))
8451 bp
->igu_sb_cnt
/= E1HVN_MAX
;
8454 * adjust E2 sb count: to be removed when FW will support
8455 * more then 16 L2 clients
8457 #define MAX_L2_CLIENTS 16
8459 bp
->igu_sb_cnt
= min_t(u8
, bp
->igu_sb_cnt
,
8460 MAX_L2_CLIENTS
/ (IS_MF(bp
) ? 4 : 1));
8462 if (!BP_NOMCP(bp
)) {
8463 bnx2x_get_port_hwinfo(bp
);
8466 (SHMEM_RD(bp
, func_mb
[BP_FW_MB_IDX(bp
)].drv_mb_header
) &
8467 DRV_MSG_SEQ_NUMBER_MASK
);
8468 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
8471 /* Get MAC addresses */
8472 bnx2x_get_mac_hwinfo(bp
);
8477 static void __devinit
bnx2x_read_fwinfo(struct bnx2x
*bp
)
8479 int cnt
, i
, block_end
, rodi
;
8480 char vpd_data
[BNX2X_VPD_LEN
+1];
8481 char str_id_reg
[VENDOR_ID_LEN
+1];
8482 char str_id_cap
[VENDOR_ID_LEN
+1];
8485 cnt
= pci_read_vpd(bp
->pdev
, 0, BNX2X_VPD_LEN
, vpd_data
);
8486 memset(bp
->fw_ver
, 0, sizeof(bp
->fw_ver
));
8488 if (cnt
< BNX2X_VPD_LEN
)
8491 i
= pci_vpd_find_tag(vpd_data
, 0, BNX2X_VPD_LEN
,
8492 PCI_VPD_LRDT_RO_DATA
);
8497 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+
8498 pci_vpd_lrdt_size(&vpd_data
[i
]);
8500 i
+= PCI_VPD_LRDT_TAG_SIZE
;
8502 if (block_end
> BNX2X_VPD_LEN
)
8505 rodi
= pci_vpd_find_info_keyword(vpd_data
, i
, block_end
,
8506 PCI_VPD_RO_KEYWORD_MFR_ID
);
8510 len
= pci_vpd_info_field_size(&vpd_data
[rodi
]);
8512 if (len
!= VENDOR_ID_LEN
)
8515 rodi
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
8517 /* vendor specific info */
8518 snprintf(str_id_reg
, VENDOR_ID_LEN
+ 1, "%04x", PCI_VENDOR_ID_DELL
);
8519 snprintf(str_id_cap
, VENDOR_ID_LEN
+ 1, "%04X", PCI_VENDOR_ID_DELL
);
8520 if (!strncmp(str_id_reg
, &vpd_data
[rodi
], VENDOR_ID_LEN
) ||
8521 !strncmp(str_id_cap
, &vpd_data
[rodi
], VENDOR_ID_LEN
)) {
8523 rodi
= pci_vpd_find_info_keyword(vpd_data
, i
, block_end
,
8524 PCI_VPD_RO_KEYWORD_VENDOR0
);
8526 len
= pci_vpd_info_field_size(&vpd_data
[rodi
]);
8528 rodi
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
8530 if (len
< 32 && (len
+ rodi
) <= BNX2X_VPD_LEN
) {
8531 memcpy(bp
->fw_ver
, &vpd_data
[rodi
], len
);
8532 bp
->fw_ver
[len
] = ' ';
8541 static int __devinit
bnx2x_init_bp(struct bnx2x
*bp
)
8547 /* Disable interrupt handling until HW is initialized */
8548 atomic_set(&bp
->intr_sem
, 1);
8549 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8551 mutex_init(&bp
->port
.phy_mutex
);
8552 mutex_init(&bp
->fw_mb_mutex
);
8553 spin_lock_init(&bp
->stats_lock
);
8555 mutex_init(&bp
->cnic_mutex
);
8558 INIT_DELAYED_WORK(&bp
->sp_task
, bnx2x_sp_task
);
8559 INIT_DELAYED_WORK(&bp
->reset_task
, bnx2x_reset_task
);
8561 rc
= bnx2x_get_hwinfo(bp
);
8564 rc
= bnx2x_alloc_mem_bp(bp
);
8566 bnx2x_read_fwinfo(bp
);
8570 /* need to reset chip if undi was active */
8572 bnx2x_undi_unload(bp
);
8574 if (CHIP_REV_IS_FPGA(bp
))
8575 dev_err(&bp
->pdev
->dev
, "FPGA detected\n");
8577 if (BP_NOMCP(bp
) && (func
== 0))
8578 dev_err(&bp
->pdev
->dev
, "MCP disabled, "
8579 "must load devices in order!\n");
8581 /* Set multi queue mode */
8582 if ((multi_mode
!= ETH_RSS_MODE_DISABLED
) &&
8583 ((int_mode
== INT_MODE_INTx
) || (int_mode
== INT_MODE_MSI
))) {
8584 dev_err(&bp
->pdev
->dev
, "Multi disabled since int_mode "
8585 "requested is not MSI-X\n");
8586 multi_mode
= ETH_RSS_MODE_DISABLED
;
8588 bp
->multi_mode
= multi_mode
;
8589 bp
->int_mode
= int_mode
;
8591 bp
->dev
->features
|= NETIF_F_GRO
;
8595 bp
->flags
&= ~TPA_ENABLE_FLAG
;
8596 bp
->dev
->features
&= ~NETIF_F_LRO
;
8598 bp
->flags
|= TPA_ENABLE_FLAG
;
8599 bp
->dev
->features
|= NETIF_F_LRO
;
8601 bp
->disable_tpa
= disable_tpa
;
8604 bp
->dropless_fc
= 0;
8606 bp
->dropless_fc
= dropless_fc
;
8610 bp
->tx_ring_size
= MAX_TX_AVAIL
;
8614 /* make sure that the numbers are in the right granularity */
8615 bp
->tx_ticks
= (50 / BNX2X_BTR
) * BNX2X_BTR
;
8616 bp
->rx_ticks
= (25 / BNX2X_BTR
) * BNX2X_BTR
;
8618 timer_interval
= (CHIP_REV_IS_SLOW(bp
) ? 5*HZ
: HZ
);
8619 bp
->current_interval
= (poll
? poll
: timer_interval
);
8621 init_timer(&bp
->timer
);
8622 bp
->timer
.expires
= jiffies
+ bp
->current_interval
;
8623 bp
->timer
.data
= (unsigned long) bp
;
8624 bp
->timer
.function
= bnx2x_timer
;
8630 /****************************************************************************
8631 * General service functions
8632 ****************************************************************************/
8634 /* called with rtnl_lock */
8635 static int bnx2x_open(struct net_device
*dev
)
8637 struct bnx2x
*bp
= netdev_priv(dev
);
8639 netif_carrier_off(dev
);
8641 bnx2x_set_power_state(bp
, PCI_D0
);
8643 if (!bnx2x_reset_is_done(bp
)) {
8645 /* Reset MCP mail box sequence if there is on going
8650 /* If it's the first function to load and reset done
8651 * is still not cleared it may mean that. We don't
8652 * check the attention state here because it may have
8653 * already been cleared by a "common" reset but we
8654 * shell proceed with "process kill" anyway.
8656 if ((bnx2x_get_load_cnt(bp
) == 0) &&
8657 bnx2x_trylock_hw_lock(bp
,
8658 HW_LOCK_RESOURCE_RESERVED_08
) &&
8659 (!bnx2x_leader_reset(bp
))) {
8660 DP(NETIF_MSG_HW
, "Recovered in open\n");
8664 bnx2x_set_power_state(bp
, PCI_D3hot
);
8666 printk(KERN_ERR
"%s: Recovery flow hasn't been properly"
8667 " completed yet. Try again later. If u still see this"
8668 " message after a few retries then power cycle is"
8669 " required.\n", bp
->dev
->name
);
8675 bp
->recovery_state
= BNX2X_RECOVERY_DONE
;
8677 return bnx2x_nic_load(bp
, LOAD_OPEN
);
8680 /* called with rtnl_lock */
8681 static int bnx2x_close(struct net_device
*dev
)
8683 struct bnx2x
*bp
= netdev_priv(dev
);
8685 /* Unload the driver, release IRQs */
8686 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
8687 bnx2x_set_power_state(bp
, PCI_D3hot
);
8692 /* called with netif_tx_lock from dev_mcast.c */
8693 void bnx2x_set_rx_mode(struct net_device
*dev
)
8695 struct bnx2x
*bp
= netdev_priv(dev
);
8696 u32 rx_mode
= BNX2X_RX_MODE_NORMAL
;
8697 int port
= BP_PORT(bp
);
8699 if (bp
->state
!= BNX2X_STATE_OPEN
) {
8700 DP(NETIF_MSG_IFUP
, "state is %x, returning\n", bp
->state
);
8704 DP(NETIF_MSG_IFUP
, "dev->flags = %x\n", dev
->flags
);
8706 if (dev
->flags
& IFF_PROMISC
)
8707 rx_mode
= BNX2X_RX_MODE_PROMISC
;
8708 else if ((dev
->flags
& IFF_ALLMULTI
) ||
8709 ((netdev_mc_count(dev
) > BNX2X_MAX_MULTICAST
) &&
8711 rx_mode
= BNX2X_RX_MODE_ALLMULTI
;
8712 else { /* some multicasts */
8713 if (CHIP_IS_E1(bp
)) {
8715 * set mc list, do not wait as wait implies sleep
8716 * and set_rx_mode can be invoked from non-sleepable
8719 u8 offset
= (CHIP_REV_IS_SLOW(bp
) ?
8720 BNX2X_MAX_EMUL_MULTI
*(1 + port
) :
8721 BNX2X_MAX_MULTICAST
*(1 + port
));
8723 bnx2x_set_e1_mc_list(bp
, offset
);
8725 /* Accept one or more multicasts */
8726 struct netdev_hw_addr
*ha
;
8727 u32 mc_filter
[MC_HASH_SIZE
];
8728 u32 crc
, bit
, regidx
;
8731 memset(mc_filter
, 0, 4 * MC_HASH_SIZE
);
8733 netdev_for_each_mc_addr(ha
, dev
) {
8734 DP(NETIF_MSG_IFUP
, "Adding mcast MAC: %pM\n",
8737 crc
= crc32c_le(0, bnx2x_mc_addr(ha
),
8739 bit
= (crc
>> 24) & 0xff;
8742 mc_filter
[regidx
] |= (1 << bit
);
8745 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
8746 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
),
8751 bp
->rx_mode
= rx_mode
;
8752 bnx2x_set_storm_rx_mode(bp
);
8755 /* called with rtnl_lock */
8756 static int bnx2x_mdio_read(struct net_device
*netdev
, int prtad
,
8757 int devad
, u16 addr
)
8759 struct bnx2x
*bp
= netdev_priv(netdev
);
8763 DP(NETIF_MSG_LINK
, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8764 prtad
, devad
, addr
);
8766 /* The HW expects different devad if CL22 is used */
8767 devad
= (devad
== MDIO_DEVAD_NONE
) ? DEFAULT_PHY_DEV_ADDR
: devad
;
8769 bnx2x_acquire_phy_lock(bp
);
8770 rc
= bnx2x_phy_read(&bp
->link_params
, prtad
, devad
, addr
, &value
);
8771 bnx2x_release_phy_lock(bp
);
8772 DP(NETIF_MSG_LINK
, "mdio_read_val 0x%x rc = 0x%x\n", value
, rc
);
8779 /* called with rtnl_lock */
8780 static int bnx2x_mdio_write(struct net_device
*netdev
, int prtad
, int devad
,
8781 u16 addr
, u16 value
)
8783 struct bnx2x
*bp
= netdev_priv(netdev
);
8786 DP(NETIF_MSG_LINK
, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8787 " value 0x%x\n", prtad
, devad
, addr
, value
);
8789 /* The HW expects different devad if CL22 is used */
8790 devad
= (devad
== MDIO_DEVAD_NONE
) ? DEFAULT_PHY_DEV_ADDR
: devad
;
8792 bnx2x_acquire_phy_lock(bp
);
8793 rc
= bnx2x_phy_write(&bp
->link_params
, prtad
, devad
, addr
, value
);
8794 bnx2x_release_phy_lock(bp
);
8798 /* called with rtnl_lock */
8799 static int bnx2x_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
8801 struct bnx2x
*bp
= netdev_priv(dev
);
8802 struct mii_ioctl_data
*mdio
= if_mii(ifr
);
8804 DP(NETIF_MSG_LINK
, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8805 mdio
->phy_id
, mdio
->reg_num
, mdio
->val_in
);
8807 if (!netif_running(dev
))
8810 return mdio_mii_ioctl(&bp
->mdio
, mdio
, cmd
);
8813 #ifdef CONFIG_NET_POLL_CONTROLLER
8814 static void poll_bnx2x(struct net_device
*dev
)
8816 struct bnx2x
*bp
= netdev_priv(dev
);
8818 disable_irq(bp
->pdev
->irq
);
8819 bnx2x_interrupt(bp
->pdev
->irq
, dev
);
8820 enable_irq(bp
->pdev
->irq
);
8824 static const struct net_device_ops bnx2x_netdev_ops
= {
8825 .ndo_open
= bnx2x_open
,
8826 .ndo_stop
= bnx2x_close
,
8827 .ndo_start_xmit
= bnx2x_start_xmit
,
8828 .ndo_set_multicast_list
= bnx2x_set_rx_mode
,
8829 .ndo_set_mac_address
= bnx2x_change_mac_addr
,
8830 .ndo_validate_addr
= eth_validate_addr
,
8831 .ndo_do_ioctl
= bnx2x_ioctl
,
8832 .ndo_change_mtu
= bnx2x_change_mtu
,
8833 .ndo_tx_timeout
= bnx2x_tx_timeout
,
8834 #ifdef CONFIG_NET_POLL_CONTROLLER
8835 .ndo_poll_controller
= poll_bnx2x
,
8839 static int __devinit
bnx2x_init_dev(struct pci_dev
*pdev
,
8840 struct net_device
*dev
)
8845 SET_NETDEV_DEV(dev
, &pdev
->dev
);
8846 bp
= netdev_priv(dev
);
8851 bp
->pf_num
= PCI_FUNC(pdev
->devfn
);
8853 rc
= pci_enable_device(pdev
);
8855 dev_err(&bp
->pdev
->dev
,
8856 "Cannot enable PCI device, aborting\n");
8860 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
8861 dev_err(&bp
->pdev
->dev
,
8862 "Cannot find PCI device base address, aborting\n");
8864 goto err_out_disable
;
8867 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
8868 dev_err(&bp
->pdev
->dev
, "Cannot find second PCI device"
8869 " base address, aborting\n");
8871 goto err_out_disable
;
8874 if (atomic_read(&pdev
->enable_cnt
) == 1) {
8875 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
8877 dev_err(&bp
->pdev
->dev
,
8878 "Cannot obtain PCI resources, aborting\n");
8879 goto err_out_disable
;
8882 pci_set_master(pdev
);
8883 pci_save_state(pdev
);
8886 bp
->pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
8887 if (bp
->pm_cap
== 0) {
8888 dev_err(&bp
->pdev
->dev
,
8889 "Cannot find power management capability, aborting\n");
8891 goto err_out_release
;
8894 bp
->pcie_cap
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
8895 if (bp
->pcie_cap
== 0) {
8896 dev_err(&bp
->pdev
->dev
,
8897 "Cannot find PCI Express capability, aborting\n");
8899 goto err_out_release
;
8902 if (dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64)) == 0) {
8903 bp
->flags
|= USING_DAC_FLAG
;
8904 if (dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64)) != 0) {
8905 dev_err(&bp
->pdev
->dev
, "dma_set_coherent_mask"
8906 " failed, aborting\n");
8908 goto err_out_release
;
8911 } else if (dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32)) != 0) {
8912 dev_err(&bp
->pdev
->dev
,
8913 "System does not support DMA, aborting\n");
8915 goto err_out_release
;
8918 dev
->mem_start
= pci_resource_start(pdev
, 0);
8919 dev
->base_addr
= dev
->mem_start
;
8920 dev
->mem_end
= pci_resource_end(pdev
, 0);
8922 dev
->irq
= pdev
->irq
;
8924 bp
->regview
= pci_ioremap_bar(pdev
, 0);
8926 dev_err(&bp
->pdev
->dev
,
8927 "Cannot map register space, aborting\n");
8929 goto err_out_release
;
8932 bp
->doorbells
= ioremap_nocache(pci_resource_start(pdev
, 2),
8933 min_t(u64
, BNX2X_DB_SIZE(bp
),
8934 pci_resource_len(pdev
, 2)));
8935 if (!bp
->doorbells
) {
8936 dev_err(&bp
->pdev
->dev
,
8937 "Cannot map doorbell space, aborting\n");
8942 bnx2x_set_power_state(bp
, PCI_D0
);
8944 /* clean indirect addresses */
8945 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
8946 PCICFG_VENDOR_ID_OFFSET
);
8947 REG_WR(bp
, PXP2_REG_PGL_ADDR_88_F0
+ BP_PORT(bp
)*16, 0);
8948 REG_WR(bp
, PXP2_REG_PGL_ADDR_8C_F0
+ BP_PORT(bp
)*16, 0);
8949 REG_WR(bp
, PXP2_REG_PGL_ADDR_90_F0
+ BP_PORT(bp
)*16, 0);
8950 REG_WR(bp
, PXP2_REG_PGL_ADDR_94_F0
+ BP_PORT(bp
)*16, 0);
8952 /* Reset the load counter */
8953 bnx2x_clear_load_cnt(bp
);
8955 dev
->watchdog_timeo
= TX_TIMEOUT
;
8957 dev
->netdev_ops
= &bnx2x_netdev_ops
;
8958 bnx2x_set_ethtool_ops(dev
);
8959 dev
->features
|= NETIF_F_SG
;
8960 dev
->features
|= NETIF_F_HW_CSUM
;
8961 if (bp
->flags
& USING_DAC_FLAG
)
8962 dev
->features
|= NETIF_F_HIGHDMA
;
8963 dev
->features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
8964 dev
->features
|= NETIF_F_TSO6
;
8965 dev
->features
|= (NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
);
8967 dev
->vlan_features
|= NETIF_F_SG
;
8968 dev
->vlan_features
|= NETIF_F_HW_CSUM
;
8969 if (bp
->flags
& USING_DAC_FLAG
)
8970 dev
->vlan_features
|= NETIF_F_HIGHDMA
;
8971 dev
->vlan_features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
8972 dev
->vlan_features
|= NETIF_F_TSO6
;
8974 /* get_port_hwinfo() will set prtad and mmds properly */
8975 bp
->mdio
.prtad
= MDIO_PRTAD_NONE
;
8977 bp
->mdio
.mode_support
= MDIO_SUPPORTS_C45
| MDIO_EMULATE_C22
;
8979 bp
->mdio
.mdio_read
= bnx2x_mdio_read
;
8980 bp
->mdio
.mdio_write
= bnx2x_mdio_write
;
8986 iounmap(bp
->regview
);
8989 if (bp
->doorbells
) {
8990 iounmap(bp
->doorbells
);
8991 bp
->doorbells
= NULL
;
8995 if (atomic_read(&pdev
->enable_cnt
) == 1)
8996 pci_release_regions(pdev
);
8999 pci_disable_device(pdev
);
9000 pci_set_drvdata(pdev
, NULL
);
9006 static void __devinit
bnx2x_get_pcie_width_speed(struct bnx2x
*bp
,
9007 int *width
, int *speed
)
9009 u32 val
= REG_RD(bp
, PCICFG_OFFSET
+ PCICFG_LINK_CONTROL
);
9011 *width
= (val
& PCICFG_LINK_WIDTH
) >> PCICFG_LINK_WIDTH_SHIFT
;
9013 /* return value of 1=2.5GHz 2=5GHz */
9014 *speed
= (val
& PCICFG_LINK_SPEED
) >> PCICFG_LINK_SPEED_SHIFT
;
9017 static int bnx2x_check_firmware(struct bnx2x
*bp
)
9019 const struct firmware
*firmware
= bp
->firmware
;
9020 struct bnx2x_fw_file_hdr
*fw_hdr
;
9021 struct bnx2x_fw_file_section
*sections
;
9022 u32 offset
, len
, num_ops
;
9027 if (firmware
->size
< sizeof(struct bnx2x_fw_file_hdr
))
9030 fw_hdr
= (struct bnx2x_fw_file_hdr
*)firmware
->data
;
9031 sections
= (struct bnx2x_fw_file_section
*)fw_hdr
;
9033 /* Make sure none of the offsets and sizes make us read beyond
9034 * the end of the firmware data */
9035 for (i
= 0; i
< sizeof(*fw_hdr
) / sizeof(*sections
); i
++) {
9036 offset
= be32_to_cpu(sections
[i
].offset
);
9037 len
= be32_to_cpu(sections
[i
].len
);
9038 if (offset
+ len
> firmware
->size
) {
9039 dev_err(&bp
->pdev
->dev
,
9040 "Section %d length is out of bounds\n", i
);
9045 /* Likewise for the init_ops offsets */
9046 offset
= be32_to_cpu(fw_hdr
->init_ops_offsets
.offset
);
9047 ops_offsets
= (u16
*)(firmware
->data
+ offset
);
9048 num_ops
= be32_to_cpu(fw_hdr
->init_ops
.len
) / sizeof(struct raw_op
);
9050 for (i
= 0; i
< be32_to_cpu(fw_hdr
->init_ops_offsets
.len
) / 2; i
++) {
9051 if (be16_to_cpu(ops_offsets
[i
]) > num_ops
) {
9052 dev_err(&bp
->pdev
->dev
,
9053 "Section offset %d is out of bounds\n", i
);
9058 /* Check FW version */
9059 offset
= be32_to_cpu(fw_hdr
->fw_version
.offset
);
9060 fw_ver
= firmware
->data
+ offset
;
9061 if ((fw_ver
[0] != BCM_5710_FW_MAJOR_VERSION
) ||
9062 (fw_ver
[1] != BCM_5710_FW_MINOR_VERSION
) ||
9063 (fw_ver
[2] != BCM_5710_FW_REVISION_VERSION
) ||
9064 (fw_ver
[3] != BCM_5710_FW_ENGINEERING_VERSION
)) {
9065 dev_err(&bp
->pdev
->dev
,
9066 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
9067 fw_ver
[0], fw_ver
[1], fw_ver
[2],
9068 fw_ver
[3], BCM_5710_FW_MAJOR_VERSION
,
9069 BCM_5710_FW_MINOR_VERSION
,
9070 BCM_5710_FW_REVISION_VERSION
,
9071 BCM_5710_FW_ENGINEERING_VERSION
);
9078 static inline void be32_to_cpu_n(const u8
*_source
, u8
*_target
, u32 n
)
9080 const __be32
*source
= (const __be32
*)_source
;
9081 u32
*target
= (u32
*)_target
;
9084 for (i
= 0; i
< n
/4; i
++)
9085 target
[i
] = be32_to_cpu(source
[i
]);
9089 Ops array is stored in the following format:
9090 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
9092 static inline void bnx2x_prep_ops(const u8
*_source
, u8
*_target
, u32 n
)
9094 const __be32
*source
= (const __be32
*)_source
;
9095 struct raw_op
*target
= (struct raw_op
*)_target
;
9098 for (i
= 0, j
= 0; i
< n
/8; i
++, j
+= 2) {
9099 tmp
= be32_to_cpu(source
[j
]);
9100 target
[i
].op
= (tmp
>> 24) & 0xff;
9101 target
[i
].offset
= tmp
& 0xffffff;
9102 target
[i
].raw_data
= be32_to_cpu(source
[j
+ 1]);
9107 * IRO array is stored in the following format:
9108 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
9110 static inline void bnx2x_prep_iro(const u8
*_source
, u8
*_target
, u32 n
)
9112 const __be32
*source
= (const __be32
*)_source
;
9113 struct iro
*target
= (struct iro
*)_target
;
9116 for (i
= 0, j
= 0; i
< n
/sizeof(struct iro
); i
++) {
9117 target
[i
].base
= be32_to_cpu(source
[j
]);
9119 tmp
= be32_to_cpu(source
[j
]);
9120 target
[i
].m1
= (tmp
>> 16) & 0xffff;
9121 target
[i
].m2
= tmp
& 0xffff;
9123 tmp
= be32_to_cpu(source
[j
]);
9124 target
[i
].m3
= (tmp
>> 16) & 0xffff;
9125 target
[i
].size
= tmp
& 0xffff;
9130 static inline void be16_to_cpu_n(const u8
*_source
, u8
*_target
, u32 n
)
9132 const __be16
*source
= (const __be16
*)_source
;
9133 u16
*target
= (u16
*)_target
;
9136 for (i
= 0; i
< n
/2; i
++)
9137 target
[i
] = be16_to_cpu(source
[i
]);
9140 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
9142 u32 len = be32_to_cpu(fw_hdr->arr.len); \
9143 bp->arr = kmalloc(len, GFP_KERNEL); \
9145 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
9148 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
9149 (u8 *)bp->arr, len); \
9152 int bnx2x_init_firmware(struct bnx2x
*bp
)
9154 const char *fw_file_name
;
9155 struct bnx2x_fw_file_hdr
*fw_hdr
;
9159 fw_file_name
= FW_FILE_NAME_E1
;
9160 else if (CHIP_IS_E1H(bp
))
9161 fw_file_name
= FW_FILE_NAME_E1H
;
9162 else if (CHIP_IS_E2(bp
))
9163 fw_file_name
= FW_FILE_NAME_E2
;
9165 BNX2X_ERR("Unsupported chip revision\n");
9169 BNX2X_DEV_INFO("Loading %s\n", fw_file_name
);
9171 rc
= request_firmware(&bp
->firmware
, fw_file_name
, &bp
->pdev
->dev
);
9173 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name
);
9174 goto request_firmware_exit
;
9177 rc
= bnx2x_check_firmware(bp
);
9179 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name
);
9180 goto request_firmware_exit
;
9183 fw_hdr
= (struct bnx2x_fw_file_hdr
*)bp
->firmware
->data
;
9185 /* Initialize the pointers to the init arrays */
9187 BNX2X_ALLOC_AND_SET(init_data
, request_firmware_exit
, be32_to_cpu_n
);
9190 BNX2X_ALLOC_AND_SET(init_ops
, init_ops_alloc_err
, bnx2x_prep_ops
);
9193 BNX2X_ALLOC_AND_SET(init_ops_offsets
, init_offsets_alloc_err
,
9196 /* STORMs firmware */
9197 INIT_TSEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
9198 be32_to_cpu(fw_hdr
->tsem_int_table_data
.offset
);
9199 INIT_TSEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
9200 be32_to_cpu(fw_hdr
->tsem_pram_data
.offset
);
9201 INIT_USEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
9202 be32_to_cpu(fw_hdr
->usem_int_table_data
.offset
);
9203 INIT_USEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
9204 be32_to_cpu(fw_hdr
->usem_pram_data
.offset
);
9205 INIT_XSEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
9206 be32_to_cpu(fw_hdr
->xsem_int_table_data
.offset
);
9207 INIT_XSEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
9208 be32_to_cpu(fw_hdr
->xsem_pram_data
.offset
);
9209 INIT_CSEM_INT_TABLE_DATA(bp
) = bp
->firmware
->data
+
9210 be32_to_cpu(fw_hdr
->csem_int_table_data
.offset
);
9211 INIT_CSEM_PRAM_DATA(bp
) = bp
->firmware
->data
+
9212 be32_to_cpu(fw_hdr
->csem_pram_data
.offset
);
9214 BNX2X_ALLOC_AND_SET(iro_arr
, iro_alloc_err
, bnx2x_prep_iro
);
9219 kfree(bp
->init_ops_offsets
);
9220 init_offsets_alloc_err
:
9221 kfree(bp
->init_ops
);
9223 kfree(bp
->init_data
);
9224 request_firmware_exit
:
9225 release_firmware(bp
->firmware
);
9230 static inline int bnx2x_set_qm_cid_count(struct bnx2x
*bp
, int l2_cid_count
)
9232 int cid_count
= L2_FP_COUNT(l2_cid_count
);
9235 cid_count
+= CNIC_CID_MAX
;
9237 return roundup(cid_count
, QM_CID_ROUND
);
9240 static int __devinit
bnx2x_init_one(struct pci_dev
*pdev
,
9241 const struct pci_device_id
*ent
)
9243 struct net_device
*dev
= NULL
;
9245 int pcie_width
, pcie_speed
;
9248 switch (ent
->driver_data
) {
9252 cid_count
= FP_SB_MAX_E1x
;
9257 cid_count
= FP_SB_MAX_E2
;
9261 pr_err("Unknown board_type (%ld), aborting\n",
9266 cid_count
+= CNIC_CONTEXT_USE
;
9268 /* dev zeroed in init_etherdev */
9269 dev
= alloc_etherdev_mq(sizeof(*bp
), cid_count
);
9271 dev_err(&pdev
->dev
, "Cannot allocate net device\n");
9275 bp
= netdev_priv(dev
);
9276 bp
->msg_enable
= debug
;
9278 pci_set_drvdata(pdev
, dev
);
9280 bp
->l2_cid_count
= cid_count
;
9282 rc
= bnx2x_init_dev(pdev
, dev
);
9288 rc
= bnx2x_init_bp(bp
);
9292 /* calc qm_cid_count */
9293 bp
->qm_cid_count
= bnx2x_set_qm_cid_count(bp
, cid_count
);
9295 /* Configure interupt mode: try to enable MSI-X/MSI if
9296 * needed, set bp->num_queues appropriately.
9298 bnx2x_set_int_mode(bp
);
9300 /* Add all NAPI objects */
9301 bnx2x_add_all_napi(bp
);
9303 rc
= register_netdev(dev
);
9305 dev_err(&pdev
->dev
, "Cannot register net device\n");
9309 bnx2x_get_pcie_width_speed(bp
, &pcie_width
, &pcie_speed
);
9311 netdev_info(dev
, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9312 " IRQ %d, ", board_info
[ent
->driver_data
].name
,
9313 (CHIP_REV(bp
) >> 12) + 'A', (CHIP_METAL(bp
) >> 4),
9315 ((!CHIP_IS_E2(bp
) && pcie_speed
== 2) ||
9316 (CHIP_IS_E2(bp
) && pcie_speed
== 1)) ?
9317 "5GHz (Gen2)" : "2.5GHz",
9318 dev
->base_addr
, bp
->pdev
->irq
);
9319 pr_cont("node addr %pM\n", dev
->dev_addr
);
9325 iounmap(bp
->regview
);
9328 iounmap(bp
->doorbells
);
9332 if (atomic_read(&pdev
->enable_cnt
) == 1)
9333 pci_release_regions(pdev
);
9335 pci_disable_device(pdev
);
9336 pci_set_drvdata(pdev
, NULL
);
9341 static void __devexit
bnx2x_remove_one(struct pci_dev
*pdev
)
9343 struct net_device
*dev
= pci_get_drvdata(pdev
);
9347 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
9350 bp
= netdev_priv(dev
);
9352 unregister_netdev(dev
);
9354 /* Delete all NAPI objects */
9355 bnx2x_del_all_napi(bp
);
9357 /* Disable MSI/MSI-X */
9358 bnx2x_disable_msi(bp
);
9360 /* Make sure RESET task is not scheduled before continuing */
9361 cancel_delayed_work_sync(&bp
->reset_task
);
9364 iounmap(bp
->regview
);
9367 iounmap(bp
->doorbells
);
9369 bnx2x_free_mem_bp(bp
);
9373 if (atomic_read(&pdev
->enable_cnt
) == 1)
9374 pci_release_regions(pdev
);
9376 pci_disable_device(pdev
);
9377 pci_set_drvdata(pdev
, NULL
);
9380 static int bnx2x_eeh_nic_unload(struct bnx2x
*bp
)
9384 bp
->state
= BNX2X_STATE_ERROR
;
9386 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
9388 bnx2x_netif_stop(bp
, 0);
9389 netif_carrier_off(bp
->dev
);
9391 del_timer_sync(&bp
->timer
);
9392 bp
->stats_state
= STATS_STATE_DISABLED
;
9393 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
9398 /* Free SKBs, SGEs, TPA pool and driver internals */
9399 bnx2x_free_skbs(bp
);
9401 for_each_queue(bp
, i
)
9402 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
9406 bp
->state
= BNX2X_STATE_CLOSED
;
9411 static void bnx2x_eeh_recover(struct bnx2x
*bp
)
9415 mutex_init(&bp
->port
.phy_mutex
);
9417 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
9418 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
9419 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp
->common
.shmem_base
);
9421 if (!bp
->common
.shmem_base
||
9422 (bp
->common
.shmem_base
< 0xA0000) ||
9423 (bp
->common
.shmem_base
>= 0xC0000)) {
9424 BNX2X_DEV_INFO("MCP not active\n");
9425 bp
->flags
|= NO_MCP_FLAG
;
9429 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
9430 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
9431 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
9432 BNX2X_ERR("BAD MCP validity signature\n");
9434 if (!BP_NOMCP(bp
)) {
9436 (SHMEM_RD(bp
, func_mb
[BP_FW_MB_IDX(bp
)].drv_mb_header
) &
9437 DRV_MSG_SEQ_NUMBER_MASK
);
9438 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
9443 * bnx2x_io_error_detected - called when PCI error is detected
9444 * @pdev: Pointer to PCI device
9445 * @state: The current pci connection state
9447 * This function is called after a PCI bus error affecting
9448 * this device has been detected.
9450 static pci_ers_result_t
bnx2x_io_error_detected(struct pci_dev
*pdev
,
9451 pci_channel_state_t state
)
9453 struct net_device
*dev
= pci_get_drvdata(pdev
);
9454 struct bnx2x
*bp
= netdev_priv(dev
);
9458 netif_device_detach(dev
);
9460 if (state
== pci_channel_io_perm_failure
) {
9462 return PCI_ERS_RESULT_DISCONNECT
;
9465 if (netif_running(dev
))
9466 bnx2x_eeh_nic_unload(bp
);
9468 pci_disable_device(pdev
);
9472 /* Request a slot reset */
9473 return PCI_ERS_RESULT_NEED_RESET
;
9477 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9478 * @pdev: Pointer to PCI device
9480 * Restart the card from scratch, as if from a cold-boot.
9482 static pci_ers_result_t
bnx2x_io_slot_reset(struct pci_dev
*pdev
)
9484 struct net_device
*dev
= pci_get_drvdata(pdev
);
9485 struct bnx2x
*bp
= netdev_priv(dev
);
9489 if (pci_enable_device(pdev
)) {
9491 "Cannot re-enable PCI device after reset\n");
9493 return PCI_ERS_RESULT_DISCONNECT
;
9496 pci_set_master(pdev
);
9497 pci_restore_state(pdev
);
9499 if (netif_running(dev
))
9500 bnx2x_set_power_state(bp
, PCI_D0
);
9504 return PCI_ERS_RESULT_RECOVERED
;
9508 * bnx2x_io_resume - called when traffic can start flowing again
9509 * @pdev: Pointer to PCI device
9511 * This callback is called when the error recovery driver tells us that
9512 * its OK to resume normal operation.
9514 static void bnx2x_io_resume(struct pci_dev
*pdev
)
9516 struct net_device
*dev
= pci_get_drvdata(pdev
);
9517 struct bnx2x
*bp
= netdev_priv(dev
);
9519 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
9520 printk(KERN_ERR
"Handling parity error recovery. "
9521 "Try again later\n");
9527 bnx2x_eeh_recover(bp
);
9529 if (netif_running(dev
))
9530 bnx2x_nic_load(bp
, LOAD_NORMAL
);
9532 netif_device_attach(dev
);
9537 static struct pci_error_handlers bnx2x_err_handler
= {
9538 .error_detected
= bnx2x_io_error_detected
,
9539 .slot_reset
= bnx2x_io_slot_reset
,
9540 .resume
= bnx2x_io_resume
,
9543 static struct pci_driver bnx2x_pci_driver
= {
9544 .name
= DRV_MODULE_NAME
,
9545 .id_table
= bnx2x_pci_tbl
,
9546 .probe
= bnx2x_init_one
,
9547 .remove
= __devexit_p(bnx2x_remove_one
),
9548 .suspend
= bnx2x_suspend
,
9549 .resume
= bnx2x_resume
,
9550 .err_handler
= &bnx2x_err_handler
,
9553 static int __init
bnx2x_init(void)
9557 pr_info("%s", version
);
9559 bnx2x_wq
= create_singlethread_workqueue("bnx2x");
9560 if (bnx2x_wq
== NULL
) {
9561 pr_err("Cannot create workqueue\n");
9565 ret
= pci_register_driver(&bnx2x_pci_driver
);
9567 pr_err("Cannot register driver\n");
9568 destroy_workqueue(bnx2x_wq
);
9573 static void __exit
bnx2x_cleanup(void)
9575 pci_unregister_driver(&bnx2x_pci_driver
);
9577 destroy_workqueue(bnx2x_wq
);
9580 module_init(bnx2x_init
);
9581 module_exit(bnx2x_cleanup
);
9585 /* count denotes the number of new completions we have seen */
9586 static void bnx2x_cnic_sp_post(struct bnx2x
*bp
, int count
)
9588 struct eth_spe
*spe
;
9590 #ifdef BNX2X_STOP_ON_ERROR
9591 if (unlikely(bp
->panic
))
9595 spin_lock_bh(&bp
->spq_lock
);
9596 BUG_ON(bp
->cnic_spq_pending
< count
);
9597 bp
->cnic_spq_pending
-= count
;
9600 for (; bp
->cnic_kwq_pending
; bp
->cnic_kwq_pending
--) {
9601 u16 type
= (le16_to_cpu(bp
->cnic_kwq_cons
->hdr
.type
)
9602 & SPE_HDR_CONN_TYPE
) >>
9603 SPE_HDR_CONN_TYPE_SHIFT
;
9605 /* Set validation for iSCSI L2 client before sending SETUP
9608 if (type
== ETH_CONNECTION_TYPE
) {
9609 u8 cmd
= (le32_to_cpu(bp
->cnic_kwq_cons
->
9610 hdr
.conn_and_cmd_data
) >>
9611 SPE_HDR_CMD_ID_SHIFT
) & 0xff;
9613 if (cmd
== RAMROD_CMD_ID_ETH_CLIENT_SETUP
)
9614 bnx2x_set_ctx_validation(&bp
->context
.
9615 vcxt
[BNX2X_ISCSI_ETH_CID
].eth
,
9616 HW_CID(bp
, BNX2X_ISCSI_ETH_CID
));
9619 /* There may be not more than 8 L2 and COMMON SPEs and not more
9620 * than 8 L5 SPEs in the air.
9622 if ((type
== NONE_CONNECTION_TYPE
) ||
9623 (type
== ETH_CONNECTION_TYPE
)) {
9624 if (!atomic_read(&bp
->spq_left
))
9627 atomic_dec(&bp
->spq_left
);
9628 } else if (type
== ISCSI_CONNECTION_TYPE
) {
9629 if (bp
->cnic_spq_pending
>=
9630 bp
->cnic_eth_dev
.max_kwqe_pending
)
9633 bp
->cnic_spq_pending
++;
9635 BNX2X_ERR("Unknown SPE type: %d\n", type
);
9640 spe
= bnx2x_sp_get_next(bp
);
9641 *spe
= *bp
->cnic_kwq_cons
;
9643 DP(NETIF_MSG_TIMER
, "pending on SPQ %d, on KWQ %d count %d\n",
9644 bp
->cnic_spq_pending
, bp
->cnic_kwq_pending
, count
);
9646 if (bp
->cnic_kwq_cons
== bp
->cnic_kwq_last
)
9647 bp
->cnic_kwq_cons
= bp
->cnic_kwq
;
9649 bp
->cnic_kwq_cons
++;
9651 bnx2x_sp_prod_update(bp
);
9652 spin_unlock_bh(&bp
->spq_lock
);
9655 static int bnx2x_cnic_sp_queue(struct net_device
*dev
,
9656 struct kwqe_16
*kwqes
[], u32 count
)
9658 struct bnx2x
*bp
= netdev_priv(dev
);
9661 #ifdef BNX2X_STOP_ON_ERROR
9662 if (unlikely(bp
->panic
))
9666 spin_lock_bh(&bp
->spq_lock
);
9668 for (i
= 0; i
< count
; i
++) {
9669 struct eth_spe
*spe
= (struct eth_spe
*)kwqes
[i
];
9671 if (bp
->cnic_kwq_pending
== MAX_SP_DESC_CNT
)
9674 *bp
->cnic_kwq_prod
= *spe
;
9676 bp
->cnic_kwq_pending
++;
9678 DP(NETIF_MSG_TIMER
, "L5 SPQE %x %x %x:%x pos %d\n",
9679 spe
->hdr
.conn_and_cmd_data
, spe
->hdr
.type
,
9680 spe
->data
.update_data_addr
.hi
,
9681 spe
->data
.update_data_addr
.lo
,
9682 bp
->cnic_kwq_pending
);
9684 if (bp
->cnic_kwq_prod
== bp
->cnic_kwq_last
)
9685 bp
->cnic_kwq_prod
= bp
->cnic_kwq
;
9687 bp
->cnic_kwq_prod
++;
9690 spin_unlock_bh(&bp
->spq_lock
);
9692 if (bp
->cnic_spq_pending
< bp
->cnic_eth_dev
.max_kwqe_pending
)
9693 bnx2x_cnic_sp_post(bp
, 0);
9698 static int bnx2x_cnic_ctl_send(struct bnx2x
*bp
, struct cnic_ctl_info
*ctl
)
9700 struct cnic_ops
*c_ops
;
9703 mutex_lock(&bp
->cnic_mutex
);
9704 c_ops
= bp
->cnic_ops
;
9706 rc
= c_ops
->cnic_ctl(bp
->cnic_data
, ctl
);
9707 mutex_unlock(&bp
->cnic_mutex
);
9712 static int bnx2x_cnic_ctl_send_bh(struct bnx2x
*bp
, struct cnic_ctl_info
*ctl
)
9714 struct cnic_ops
*c_ops
;
9718 c_ops
= rcu_dereference(bp
->cnic_ops
);
9720 rc
= c_ops
->cnic_ctl(bp
->cnic_data
, ctl
);
9727 * for commands that have no data
9729 int bnx2x_cnic_notify(struct bnx2x
*bp
, int cmd
)
9731 struct cnic_ctl_info ctl
= {0};
9735 return bnx2x_cnic_ctl_send(bp
, &ctl
);
9738 static void bnx2x_cnic_cfc_comp(struct bnx2x
*bp
, int cid
)
9740 struct cnic_ctl_info ctl
;
9742 /* first we tell CNIC and only then we count this as a completion */
9743 ctl
.cmd
= CNIC_CTL_COMPLETION_CMD
;
9744 ctl
.data
.comp
.cid
= cid
;
9746 bnx2x_cnic_ctl_send_bh(bp
, &ctl
);
9747 bnx2x_cnic_sp_post(bp
, 0);
9750 static int bnx2x_drv_ctl(struct net_device
*dev
, struct drv_ctl_info
*ctl
)
9752 struct bnx2x
*bp
= netdev_priv(dev
);
9756 case DRV_CTL_CTXTBL_WR_CMD
: {
9757 u32 index
= ctl
->data
.io
.offset
;
9758 dma_addr_t addr
= ctl
->data
.io
.dma_addr
;
9760 bnx2x_ilt_wr(bp
, index
, addr
);
9764 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD
: {
9765 int count
= ctl
->data
.credit
.credit_count
;
9767 bnx2x_cnic_sp_post(bp
, count
);
9771 /* rtnl_lock is held. */
9772 case DRV_CTL_START_L2_CMD
: {
9773 u32 cli
= ctl
->data
.ring
.client_id
;
9775 /* Set iSCSI MAC address */
9776 bnx2x_set_iscsi_eth_mac_addr(bp
, 1);
9781 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9782 * because it's the only way for UIO Client to accept
9783 * multicasts (in non-promiscuous mode only one Client per
9784 * function will receive multicast packets (leading in our
9787 bnx2x_rxq_set_mac_filters(bp
, cli
,
9788 BNX2X_ACCEPT_UNICAST
|
9789 BNX2X_ACCEPT_BROADCAST
|
9790 BNX2X_ACCEPT_ALL_MULTICAST
);
9791 storm_memset_mac_filters(bp
, &bp
->mac_filters
, BP_FUNC(bp
));
9796 /* rtnl_lock is held. */
9797 case DRV_CTL_STOP_L2_CMD
: {
9798 u32 cli
= ctl
->data
.ring
.client_id
;
9800 /* Stop accepting on iSCSI L2 ring */
9801 bnx2x_rxq_set_mac_filters(bp
, cli
, BNX2X_ACCEPT_NONE
);
9802 storm_memset_mac_filters(bp
, &bp
->mac_filters
, BP_FUNC(bp
));
9807 /* Unset iSCSI L2 MAC */
9808 bnx2x_set_iscsi_eth_mac_addr(bp
, 0);
9811 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD
: {
9812 int count
= ctl
->data
.credit
.credit_count
;
9814 smp_mb__before_atomic_inc();
9815 atomic_add(count
, &bp
->spq_left
);
9816 smp_mb__after_atomic_inc();
9821 BNX2X_ERR("unknown command %x\n", ctl
->cmd
);
9828 void bnx2x_setup_cnic_irq_info(struct bnx2x
*bp
)
9830 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
9832 if (bp
->flags
& USING_MSIX_FLAG
) {
9833 cp
->drv_state
|= CNIC_DRV_STATE_USING_MSIX
;
9834 cp
->irq_arr
[0].irq_flags
|= CNIC_IRQ_FL_MSIX
;
9835 cp
->irq_arr
[0].vector
= bp
->msix_table
[1].vector
;
9837 cp
->drv_state
&= ~CNIC_DRV_STATE_USING_MSIX
;
9838 cp
->irq_arr
[0].irq_flags
&= ~CNIC_IRQ_FL_MSIX
;
9841 cp
->irq_arr
[0].status_blk
= (void *)bp
->cnic_sb
.e2_sb
;
9843 cp
->irq_arr
[0].status_blk
= (void *)bp
->cnic_sb
.e1x_sb
;
9845 cp
->irq_arr
[0].status_blk_num
= CNIC_SB_ID(bp
);
9846 cp
->irq_arr
[0].status_blk_num2
= CNIC_IGU_SB_ID(bp
);
9847 cp
->irq_arr
[1].status_blk
= bp
->def_status_blk
;
9848 cp
->irq_arr
[1].status_blk_num
= DEF_SB_ID
;
9849 cp
->irq_arr
[1].status_blk_num2
= DEF_SB_IGU_ID
;
9854 static int bnx2x_register_cnic(struct net_device
*dev
, struct cnic_ops
*ops
,
9857 struct bnx2x
*bp
= netdev_priv(dev
);
9858 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
9863 if (atomic_read(&bp
->intr_sem
) != 0)
9866 bp
->cnic_kwq
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
9870 bp
->cnic_kwq_cons
= bp
->cnic_kwq
;
9871 bp
->cnic_kwq_prod
= bp
->cnic_kwq
;
9872 bp
->cnic_kwq_last
= bp
->cnic_kwq
+ MAX_SP_DESC_CNT
;
9874 bp
->cnic_spq_pending
= 0;
9875 bp
->cnic_kwq_pending
= 0;
9877 bp
->cnic_data
= data
;
9880 cp
->drv_state
= CNIC_DRV_STATE_REGD
;
9881 cp
->iro_arr
= bp
->iro_arr
;
9883 bnx2x_setup_cnic_irq_info(bp
);
9885 rcu_assign_pointer(bp
->cnic_ops
, ops
);
9890 static int bnx2x_unregister_cnic(struct net_device
*dev
)
9892 struct bnx2x
*bp
= netdev_priv(dev
);
9893 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
9895 mutex_lock(&bp
->cnic_mutex
);
9896 if (bp
->cnic_flags
& BNX2X_CNIC_FLAG_MAC_SET
) {
9897 bp
->cnic_flags
&= ~BNX2X_CNIC_FLAG_MAC_SET
;
9898 bnx2x_set_iscsi_eth_mac_addr(bp
, 0);
9901 rcu_assign_pointer(bp
->cnic_ops
, NULL
);
9902 mutex_unlock(&bp
->cnic_mutex
);
9904 kfree(bp
->cnic_kwq
);
9905 bp
->cnic_kwq
= NULL
;
9910 struct cnic_eth_dev
*bnx2x_cnic_probe(struct net_device
*dev
)
9912 struct bnx2x
*bp
= netdev_priv(dev
);
9913 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
9915 cp
->drv_owner
= THIS_MODULE
;
9916 cp
->chip_id
= CHIP_ID(bp
);
9917 cp
->pdev
= bp
->pdev
;
9918 cp
->io_base
= bp
->regview
;
9919 cp
->io_base2
= bp
->doorbells
;
9920 cp
->max_kwqe_pending
= 8;
9921 cp
->ctx_blk_size
= CDU_ILT_PAGE_SZ
;
9922 cp
->ctx_tbl_offset
= FUNC_ILT_BASE(BP_FUNC(bp
)) +
9923 bnx2x_cid_ilt_lines(bp
);
9924 cp
->ctx_tbl_len
= CNIC_ILT_LINES
;
9925 cp
->starting_cid
= bnx2x_cid_ilt_lines(bp
) * ILT_PAGE_CIDS
;
9926 cp
->drv_submit_kwqes_16
= bnx2x_cnic_sp_queue
;
9927 cp
->drv_ctl
= bnx2x_drv_ctl
;
9928 cp
->drv_register_cnic
= bnx2x_register_cnic
;
9929 cp
->drv_unregister_cnic
= bnx2x_unregister_cnic
;
9930 cp
->iscsi_l2_client_id
= BNX2X_ISCSI_ETH_CL_ID
;
9931 cp
->iscsi_l2_cid
= BNX2X_ISCSI_ETH_CID
;
9933 DP(BNX2X_MSG_SP
, "page_size %d, tbl_offset %d, tbl_lines %d, "
9934 "starting cid %d\n",
9941 EXPORT_SYMBOL(bnx2x_cnic_probe
);
9943 #endif /* BCM_CNIC */