bnx2x: move msix table initialization to probe()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / bnx2x / bnx2x_main.c
blob2572eb40c0ed2a187112e1bf124d84e22755f1bc
1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/pci.h>
28 #include <linux/init.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitops.h>
34 #include <linux/irq.h>
35 #include <linux/delay.h>
36 #include <asm/byteorder.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if_vlan.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/crc32c.h>
48 #include <linux/prefetch.h>
49 #include <linux/zlib.h>
50 #include <linux/io.h>
51 #include <linux/stringify.h>
53 #define BNX2X_MAIN
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_cmn.h"
60 #include <linux/firmware.h>
61 #include "bnx2x_fw_file_hdr.h"
62 /* FW files */
63 #define FW_FILE_VERSION \
64 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
65 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
66 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
67 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
68 #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
69 #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
70 #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
72 /* Time in jiffies before concluding the transmitter is hung */
73 #define TX_TIMEOUT (5*HZ)
75 static char version[] __devinitdata =
76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
79 MODULE_AUTHOR("Eliezer Tamir");
80 MODULE_DESCRIPTION("Broadcom NetXtreme II "
81 "BCM57710/57711/57711E/57712/57712E Driver");
82 MODULE_LICENSE("GPL");
83 MODULE_VERSION(DRV_MODULE_VERSION);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1);
85 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
86 MODULE_FIRMWARE(FW_FILE_NAME_E2);
88 static int multi_mode = 1;
89 module_param(multi_mode, int, 0);
90 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
91 "(0 Disable; 1 Enable (default))");
93 int num_queues;
94 module_param(num_queues, int, 0);
95 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
96 " (default is as a number of CPUs)");
98 static int disable_tpa;
99 module_param(disable_tpa, int, 0);
100 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102 static int int_mode;
103 module_param(int_mode, int, 0);
104 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
105 "(1 INT#x; 2 MSI)");
107 static int dropless_fc;
108 module_param(dropless_fc, int, 0);
109 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
111 static int poll;
112 module_param(poll, int, 0);
113 MODULE_PARM_DESC(poll, " Use polling (for debug)");
115 static int mrrs = -1;
116 module_param(mrrs, int, 0);
117 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
119 static int debug;
120 module_param(debug, int, 0);
121 MODULE_PARM_DESC(debug, " Default debug msglevel");
123 static struct workqueue_struct *bnx2x_wq;
125 enum bnx2x_board_type {
126 BCM57710 = 0,
127 BCM57711 = 1,
128 BCM57711E = 2,
129 BCM57712 = 3,
130 BCM57712E = 4
133 /* indexed by board_type, above */
134 static struct {
135 char *name;
136 } board_info[] __devinitdata = {
137 { "Broadcom NetXtreme II BCM57710 XGb" },
138 { "Broadcom NetXtreme II BCM57711 XGb" },
139 { "Broadcom NetXtreme II BCM57711E XGb" },
140 { "Broadcom NetXtreme II BCM57712 XGb" },
141 { "Broadcom NetXtreme II BCM57712E XGb" }
144 #ifndef PCI_DEVICE_ID_NX2_57712
145 #define PCI_DEVICE_ID_NX2_57712 0x1662
146 #endif
147 #ifndef PCI_DEVICE_ID_NX2_57712E
148 #define PCI_DEVICE_ID_NX2_57712E 0x1663
149 #endif
151 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
154 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
155 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
156 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
157 { 0 }
160 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
162 /****************************************************************************
163 * General service functions
164 ****************************************************************************/
166 static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
167 u32 addr, dma_addr_t mapping)
169 REG_WR(bp, addr, U64_LO(mapping));
170 REG_WR(bp, addr + 4, U64_HI(mapping));
173 static inline void __storm_memset_fill(struct bnx2x *bp,
174 u32 addr, size_t size, u32 val)
176 int i;
177 for (i = 0; i < size/4; i++)
178 REG_WR(bp, addr + (i * 4), val);
181 static inline void storm_memset_ustats_zero(struct bnx2x *bp,
182 u8 port, u16 stat_id)
184 size_t size = sizeof(struct ustorm_per_client_stats);
186 u32 addr = BAR_USTRORM_INTMEM +
187 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
189 __storm_memset_fill(bp, addr, size, 0);
192 static inline void storm_memset_tstats_zero(struct bnx2x *bp,
193 u8 port, u16 stat_id)
195 size_t size = sizeof(struct tstorm_per_client_stats);
197 u32 addr = BAR_TSTRORM_INTMEM +
198 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
200 __storm_memset_fill(bp, addr, size, 0);
203 static inline void storm_memset_xstats_zero(struct bnx2x *bp,
204 u8 port, u16 stat_id)
206 size_t size = sizeof(struct xstorm_per_client_stats);
208 u32 addr = BAR_XSTRORM_INTMEM +
209 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
211 __storm_memset_fill(bp, addr, size, 0);
215 static inline void storm_memset_spq_addr(struct bnx2x *bp,
216 dma_addr_t mapping, u16 abs_fid)
218 u32 addr = XSEM_REG_FAST_MEMORY +
219 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
221 __storm_memset_dma_mapping(bp, addr, mapping);
224 static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
226 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
229 static inline void storm_memset_func_cfg(struct bnx2x *bp,
230 struct tstorm_eth_function_common_config *tcfg,
231 u16 abs_fid)
233 size_t size = sizeof(struct tstorm_eth_function_common_config);
235 u32 addr = BAR_TSTRORM_INTMEM +
236 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
238 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
241 static inline void storm_memset_xstats_flags(struct bnx2x *bp,
242 struct stats_indication_flags *flags,
243 u16 abs_fid)
245 size_t size = sizeof(struct stats_indication_flags);
247 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
249 __storm_memset_struct(bp, addr, size, (u32 *)flags);
252 static inline void storm_memset_tstats_flags(struct bnx2x *bp,
253 struct stats_indication_flags *flags,
254 u16 abs_fid)
256 size_t size = sizeof(struct stats_indication_flags);
258 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
260 __storm_memset_struct(bp, addr, size, (u32 *)flags);
263 static inline void storm_memset_ustats_flags(struct bnx2x *bp,
264 struct stats_indication_flags *flags,
265 u16 abs_fid)
267 size_t size = sizeof(struct stats_indication_flags);
269 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
271 __storm_memset_struct(bp, addr, size, (u32 *)flags);
274 static inline void storm_memset_cstats_flags(struct bnx2x *bp,
275 struct stats_indication_flags *flags,
276 u16 abs_fid)
278 size_t size = sizeof(struct stats_indication_flags);
280 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
282 __storm_memset_struct(bp, addr, size, (u32 *)flags);
285 static inline void storm_memset_xstats_addr(struct bnx2x *bp,
286 dma_addr_t mapping, u16 abs_fid)
288 u32 addr = BAR_XSTRORM_INTMEM +
289 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
291 __storm_memset_dma_mapping(bp, addr, mapping);
294 static inline void storm_memset_tstats_addr(struct bnx2x *bp,
295 dma_addr_t mapping, u16 abs_fid)
297 u32 addr = BAR_TSTRORM_INTMEM +
298 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
300 __storm_memset_dma_mapping(bp, addr, mapping);
303 static inline void storm_memset_ustats_addr(struct bnx2x *bp,
304 dma_addr_t mapping, u16 abs_fid)
306 u32 addr = BAR_USTRORM_INTMEM +
307 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
309 __storm_memset_dma_mapping(bp, addr, mapping);
312 static inline void storm_memset_cstats_addr(struct bnx2x *bp,
313 dma_addr_t mapping, u16 abs_fid)
315 u32 addr = BAR_CSTRORM_INTMEM +
316 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
318 __storm_memset_dma_mapping(bp, addr, mapping);
321 static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
322 u16 pf_id)
324 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
325 pf_id);
326 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
327 pf_id);
328 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
329 pf_id);
330 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
331 pf_id);
334 static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
335 u8 enable)
337 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
338 enable);
339 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
340 enable);
341 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
342 enable);
343 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
344 enable);
347 static inline void storm_memset_eq_data(struct bnx2x *bp,
348 struct event_ring_data *eq_data,
349 u16 pfid)
351 size_t size = sizeof(struct event_ring_data);
353 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
355 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
358 static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
359 u16 pfid)
361 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
362 REG_WR16(bp, addr, eq_prod);
365 static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
366 u16 fw_sb_id, u8 sb_index,
367 u8 ticks)
370 int index_offset = CHIP_IS_E2(bp) ?
371 offsetof(struct hc_status_block_data_e2, index_data) :
372 offsetof(struct hc_status_block_data_e1x, index_data);
373 u32 addr = BAR_CSTRORM_INTMEM +
374 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
375 index_offset +
376 sizeof(struct hc_index_data)*sb_index +
377 offsetof(struct hc_index_data, timeout);
378 REG_WR8(bp, addr, ticks);
379 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
380 port, fw_sb_id, sb_index, ticks);
382 static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
383 u16 fw_sb_id, u8 sb_index,
384 u8 disable)
386 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
387 int index_offset = CHIP_IS_E2(bp) ?
388 offsetof(struct hc_status_block_data_e2, index_data) :
389 offsetof(struct hc_status_block_data_e1x, index_data);
390 u32 addr = BAR_CSTRORM_INTMEM +
391 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
392 index_offset +
393 sizeof(struct hc_index_data)*sb_index +
394 offsetof(struct hc_index_data, flags);
395 u16 flags = REG_RD16(bp, addr);
396 /* clear and set */
397 flags &= ~HC_INDEX_DATA_HC_ENABLED;
398 flags |= enable_flag;
399 REG_WR16(bp, addr, flags);
400 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
401 port, fw_sb_id, sb_index, disable);
404 /* used only at init
405 * locking is done by mcp
407 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
409 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
410 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
411 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
412 PCICFG_VENDOR_ID_OFFSET);
415 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
417 u32 val;
419 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
420 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
421 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
422 PCICFG_VENDOR_ID_OFFSET);
424 return val;
427 #define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
428 #define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
429 #define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
430 #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
431 #define DMAE_DP_DST_NONE "dst_addr [none]"
433 void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
435 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
437 switch (dmae->opcode & DMAE_COMMAND_DST) {
438 case DMAE_CMD_DST_PCI:
439 if (src_type == DMAE_CMD_SRC_PCI)
440 DP(msglvl, "DMAE: opcode 0x%08x\n"
441 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
442 "comp_addr [%x:%08x], comp_val 0x%08x\n",
443 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
444 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
445 dmae->comp_addr_hi, dmae->comp_addr_lo,
446 dmae->comp_val);
447 else
448 DP(msglvl, "DMAE: opcode 0x%08x\n"
449 "src [%08x], len [%d*4], dst [%x:%08x]\n"
450 "comp_addr [%x:%08x], comp_val 0x%08x\n",
451 dmae->opcode, dmae->src_addr_lo >> 2,
452 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
453 dmae->comp_addr_hi, dmae->comp_addr_lo,
454 dmae->comp_val);
455 break;
456 case DMAE_CMD_DST_GRC:
457 if (src_type == DMAE_CMD_SRC_PCI)
458 DP(msglvl, "DMAE: opcode 0x%08x\n"
459 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
460 "comp_addr [%x:%08x], comp_val 0x%08x\n",
461 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
462 dmae->len, dmae->dst_addr_lo >> 2,
463 dmae->comp_addr_hi, dmae->comp_addr_lo,
464 dmae->comp_val);
465 else
466 DP(msglvl, "DMAE: opcode 0x%08x\n"
467 "src [%08x], len [%d*4], dst [%08x]\n"
468 "comp_addr [%x:%08x], comp_val 0x%08x\n",
469 dmae->opcode, dmae->src_addr_lo >> 2,
470 dmae->len, dmae->dst_addr_lo >> 2,
471 dmae->comp_addr_hi, dmae->comp_addr_lo,
472 dmae->comp_val);
473 break;
474 default:
475 if (src_type == DMAE_CMD_SRC_PCI)
476 DP(msglvl, "DMAE: opcode 0x%08x\n"
477 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
478 "dst_addr [none]\n"
479 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
480 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
481 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
482 dmae->comp_val);
483 else
484 DP(msglvl, "DMAE: opcode 0x%08x\n"
485 DP_LEVEL "src_addr [%08x] len [%d * 4] "
486 "dst_addr [none]\n"
487 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
488 dmae->opcode, dmae->src_addr_lo >> 2,
489 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
490 dmae->comp_val);
491 break;
496 const u32 dmae_reg_go_c[] = {
497 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
498 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
499 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
500 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
503 /* copy command into DMAE command memory and set DMAE command go */
504 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
506 u32 cmd_offset;
507 int i;
509 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
510 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
511 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
513 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
514 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
516 REG_WR(bp, dmae_reg_go_c[idx], 1);
519 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
521 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
522 DMAE_CMD_C_ENABLE);
525 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
527 return opcode & ~DMAE_CMD_SRC_RESET;
530 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
531 bool with_comp, u8 comp_type)
533 u32 opcode = 0;
535 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
536 (dst_type << DMAE_COMMAND_DST_SHIFT));
538 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
540 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
541 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
542 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
543 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
545 #ifdef __BIG_ENDIAN
546 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
547 #else
548 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
549 #endif
550 if (with_comp)
551 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
552 return opcode;
555 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
556 u8 src_type, u8 dst_type)
558 memset(dmae, 0, sizeof(struct dmae_command));
560 /* set the opcode */
561 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
562 true, DMAE_COMP_PCI);
564 /* fill in the completion parameters */
565 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
566 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
567 dmae->comp_val = DMAE_COMP_VAL;
570 /* issue a dmae command over the init-channel and wailt for completion */
571 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
573 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
574 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
575 int rc = 0;
577 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
578 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
579 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
581 /* lock the dmae channel */
582 mutex_lock(&bp->dmae_mutex);
584 /* reset completion */
585 *wb_comp = 0;
587 /* post the command on the channel used for initializations */
588 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
590 /* wait for completion */
591 udelay(5);
592 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
593 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
595 if (!cnt) {
596 BNX2X_ERR("DMAE timeout!\n");
597 rc = DMAE_TIMEOUT;
598 goto unlock;
600 cnt--;
601 udelay(50);
603 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
604 BNX2X_ERR("DMAE PCI error!\n");
605 rc = DMAE_PCI_ERROR;
608 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
609 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
610 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
612 unlock:
613 mutex_unlock(&bp->dmae_mutex);
614 return rc;
617 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
618 u32 len32)
620 struct dmae_command dmae;
622 if (!bp->dmae_ready) {
623 u32 *data = bnx2x_sp(bp, wb_data[0]);
625 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
626 " using indirect\n", dst_addr, len32);
627 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
628 return;
631 /* set opcode and fixed command fields */
632 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
634 /* fill in addresses and len */
635 dmae.src_addr_lo = U64_LO(dma_addr);
636 dmae.src_addr_hi = U64_HI(dma_addr);
637 dmae.dst_addr_lo = dst_addr >> 2;
638 dmae.dst_addr_hi = 0;
639 dmae.len = len32;
641 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
643 /* issue the command and wait for completion */
644 bnx2x_issue_dmae_with_comp(bp, &dmae);
647 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
649 struct dmae_command dmae;
651 if (!bp->dmae_ready) {
652 u32 *data = bnx2x_sp(bp, wb_data[0]);
653 int i;
655 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
656 " using indirect\n", src_addr, len32);
657 for (i = 0; i < len32; i++)
658 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
659 return;
662 /* set opcode and fixed command fields */
663 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
665 /* fill in addresses and len */
666 dmae.src_addr_lo = src_addr >> 2;
667 dmae.src_addr_hi = 0;
668 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
669 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
670 dmae.len = len32;
672 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
674 /* issue the command and wait for completion */
675 bnx2x_issue_dmae_with_comp(bp, &dmae);
678 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
679 u32 addr, u32 len)
681 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
682 int offset = 0;
684 while (len > dmae_wr_max) {
685 bnx2x_write_dmae(bp, phys_addr + offset,
686 addr + offset, dmae_wr_max);
687 offset += dmae_wr_max * 4;
688 len -= dmae_wr_max;
691 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
694 /* used only for slowpath so not inlined */
695 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
697 u32 wb_write[2];
699 wb_write[0] = val_hi;
700 wb_write[1] = val_lo;
701 REG_WR_DMAE(bp, reg, wb_write, 2);
704 #ifdef USE_WB_RD
705 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
707 u32 wb_data[2];
709 REG_RD_DMAE(bp, reg, wb_data, 2);
711 return HILO_U64(wb_data[0], wb_data[1]);
713 #endif
715 static int bnx2x_mc_assert(struct bnx2x *bp)
717 char last_idx;
718 int i, rc = 0;
719 u32 row0, row1, row2, row3;
721 /* XSTORM */
722 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
723 XSTORM_ASSERT_LIST_INDEX_OFFSET);
724 if (last_idx)
725 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
727 /* print the asserts */
728 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
730 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
731 XSTORM_ASSERT_LIST_OFFSET(i));
732 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
733 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
734 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
735 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
736 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
737 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
739 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
740 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
741 " 0x%08x 0x%08x 0x%08x\n",
742 i, row3, row2, row1, row0);
743 rc++;
744 } else {
745 break;
749 /* TSTORM */
750 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
751 TSTORM_ASSERT_LIST_INDEX_OFFSET);
752 if (last_idx)
753 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
755 /* print the asserts */
756 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
758 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
759 TSTORM_ASSERT_LIST_OFFSET(i));
760 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
761 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
762 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
763 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
764 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
765 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
767 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
768 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
769 " 0x%08x 0x%08x 0x%08x\n",
770 i, row3, row2, row1, row0);
771 rc++;
772 } else {
773 break;
777 /* CSTORM */
778 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
779 CSTORM_ASSERT_LIST_INDEX_OFFSET);
780 if (last_idx)
781 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
783 /* print the asserts */
784 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
786 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
787 CSTORM_ASSERT_LIST_OFFSET(i));
788 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
789 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
790 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
791 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
792 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
793 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
795 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
796 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
797 " 0x%08x 0x%08x 0x%08x\n",
798 i, row3, row2, row1, row0);
799 rc++;
800 } else {
801 break;
805 /* USTORM */
806 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
807 USTORM_ASSERT_LIST_INDEX_OFFSET);
808 if (last_idx)
809 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
811 /* print the asserts */
812 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
814 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
815 USTORM_ASSERT_LIST_OFFSET(i));
816 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
817 USTORM_ASSERT_LIST_OFFSET(i) + 4);
818 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
819 USTORM_ASSERT_LIST_OFFSET(i) + 8);
820 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
821 USTORM_ASSERT_LIST_OFFSET(i) + 12);
823 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
824 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
825 " 0x%08x 0x%08x 0x%08x\n",
826 i, row3, row2, row1, row0);
827 rc++;
828 } else {
829 break;
833 return rc;
836 static void bnx2x_fw_dump(struct bnx2x *bp)
838 u32 addr;
839 u32 mark, offset;
840 __be32 data[9];
841 int word;
842 u32 trace_shmem_base;
843 if (BP_NOMCP(bp)) {
844 BNX2X_ERR("NO MCP - can not dump\n");
845 return;
848 if (BP_PATH(bp) == 0)
849 trace_shmem_base = bp->common.shmem_base;
850 else
851 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
852 addr = trace_shmem_base - 0x0800 + 4;
853 mark = REG_RD(bp, addr);
854 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
855 + ((mark + 0x3) & ~0x3) - 0x08000000;
856 pr_err("begin fw dump (mark 0x%x)\n", mark);
858 pr_err("");
859 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
860 for (word = 0; word < 8; word++)
861 data[word] = htonl(REG_RD(bp, offset + 4*word));
862 data[8] = 0x0;
863 pr_cont("%s", (char *)data);
865 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
866 for (word = 0; word < 8; word++)
867 data[word] = htonl(REG_RD(bp, offset + 4*word));
868 data[8] = 0x0;
869 pr_cont("%s", (char *)data);
871 pr_err("end of fw dump\n");
874 void bnx2x_panic_dump(struct bnx2x *bp)
876 int i;
877 u16 j;
878 struct hc_sp_status_block_data sp_sb_data;
879 int func = BP_FUNC(bp);
880 #ifdef BNX2X_STOP_ON_ERROR
881 u16 start = 0, end = 0;
882 #endif
884 bp->stats_state = STATS_STATE_DISABLED;
885 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
887 BNX2X_ERR("begin crash dump -----------------\n");
889 /* Indices */
890 /* Common */
891 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
892 " spq_prod_idx(0x%x)\n",
893 bp->def_idx, bp->def_att_idx,
894 bp->attn_state, bp->spq_prod_idx);
895 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
896 bp->def_status_blk->atten_status_block.attn_bits,
897 bp->def_status_blk->atten_status_block.attn_bits_ack,
898 bp->def_status_blk->atten_status_block.status_block_id,
899 bp->def_status_blk->atten_status_block.attn_bits_index);
900 BNX2X_ERR(" def (");
901 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
902 pr_cont("0x%x%s",
903 bp->def_status_blk->sp_sb.index_values[i],
904 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
906 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
907 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
908 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
909 i*sizeof(u32));
911 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
912 "pf_id(0x%x) vnic_id(0x%x) "
913 "vf_id(0x%x) vf_valid (0x%x)\n",
914 sp_sb_data.igu_sb_id,
915 sp_sb_data.igu_seg_id,
916 sp_sb_data.p_func.pf_id,
917 sp_sb_data.p_func.vnic_id,
918 sp_sb_data.p_func.vf_id,
919 sp_sb_data.p_func.vf_valid);
922 for_each_queue(bp, i) {
923 struct bnx2x_fastpath *fp = &bp->fp[i];
924 int loop;
925 struct hc_status_block_data_e2 sb_data_e2;
926 struct hc_status_block_data_e1x sb_data_e1x;
927 struct hc_status_block_sm *hc_sm_p =
928 CHIP_IS_E2(bp) ?
929 sb_data_e2.common.state_machine :
930 sb_data_e1x.common.state_machine;
931 struct hc_index_data *hc_index_p =
932 CHIP_IS_E2(bp) ?
933 sb_data_e2.index_data :
934 sb_data_e1x.index_data;
935 int data_size;
936 u32 *sb_data_p;
938 /* Rx */
939 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
940 " rx_comp_prod(0x%x)"
941 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
942 i, fp->rx_bd_prod, fp->rx_bd_cons,
943 fp->rx_comp_prod,
944 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
945 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
946 " fp_hc_idx(0x%x)\n",
947 fp->rx_sge_prod, fp->last_max_sge,
948 le16_to_cpu(fp->fp_hc_idx));
950 /* Tx */
951 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
952 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
953 " *tx_cons_sb(0x%x)\n",
954 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
955 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
957 loop = CHIP_IS_E2(bp) ?
958 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
960 /* host sb data */
962 BNX2X_ERR(" run indexes (");
963 for (j = 0; j < HC_SB_MAX_SM; j++)
964 pr_cont("0x%x%s",
965 fp->sb_running_index[j],
966 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
968 BNX2X_ERR(" indexes (");
969 for (j = 0; j < loop; j++)
970 pr_cont("0x%x%s",
971 fp->sb_index_values[j],
972 (j == loop - 1) ? ")" : " ");
973 /* fw sb data */
974 data_size = CHIP_IS_E2(bp) ?
975 sizeof(struct hc_status_block_data_e2) :
976 sizeof(struct hc_status_block_data_e1x);
977 data_size /= sizeof(u32);
978 sb_data_p = CHIP_IS_E2(bp) ?
979 (u32 *)&sb_data_e2 :
980 (u32 *)&sb_data_e1x;
981 /* copy sb data in here */
982 for (j = 0; j < data_size; j++)
983 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
984 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
985 j * sizeof(u32));
987 if (CHIP_IS_E2(bp)) {
988 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
989 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
990 sb_data_e2.common.p_func.pf_id,
991 sb_data_e2.common.p_func.vf_id,
992 sb_data_e2.common.p_func.vf_valid,
993 sb_data_e2.common.p_func.vnic_id,
994 sb_data_e2.common.same_igu_sb_1b);
995 } else {
996 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
997 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
998 sb_data_e1x.common.p_func.pf_id,
999 sb_data_e1x.common.p_func.vf_id,
1000 sb_data_e1x.common.p_func.vf_valid,
1001 sb_data_e1x.common.p_func.vnic_id,
1002 sb_data_e1x.common.same_igu_sb_1b);
1005 /* SB_SMs data */
1006 for (j = 0; j < HC_SB_MAX_SM; j++) {
1007 pr_cont("SM[%d] __flags (0x%x) "
1008 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1009 "time_to_expire (0x%x) "
1010 "timer_value(0x%x)\n", j,
1011 hc_sm_p[j].__flags,
1012 hc_sm_p[j].igu_sb_id,
1013 hc_sm_p[j].igu_seg_id,
1014 hc_sm_p[j].time_to_expire,
1015 hc_sm_p[j].timer_value);
1018 /* Indecies data */
1019 for (j = 0; j < loop; j++) {
1020 pr_cont("INDEX[%d] flags (0x%x) "
1021 "timeout (0x%x)\n", j,
1022 hc_index_p[j].flags,
1023 hc_index_p[j].timeout);
1027 #ifdef BNX2X_STOP_ON_ERROR
1028 /* Rings */
1029 /* Rx */
1030 for_each_queue(bp, i) {
1031 struct bnx2x_fastpath *fp = &bp->fp[i];
1033 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1034 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1035 for (j = start; j != end; j = RX_BD(j + 1)) {
1036 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1037 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1039 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1040 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
1043 start = RX_SGE(fp->rx_sge_prod);
1044 end = RX_SGE(fp->last_max_sge);
1045 for (j = start; j != end; j = RX_SGE(j + 1)) {
1046 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1047 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1049 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1050 i, j, rx_sge[1], rx_sge[0], sw_page->page);
1053 start = RCQ_BD(fp->rx_comp_cons - 10);
1054 end = RCQ_BD(fp->rx_comp_cons + 503);
1055 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1056 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1058 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1059 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1063 /* Tx */
1064 for_each_queue(bp, i) {
1065 struct bnx2x_fastpath *fp = &bp->fp[i];
1067 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1068 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1069 for (j = start; j != end; j = TX_BD(j + 1)) {
1070 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1072 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1073 i, j, sw_bd->skb, sw_bd->first_bd);
1076 start = TX_BD(fp->tx_bd_cons - 10);
1077 end = TX_BD(fp->tx_bd_cons + 254);
1078 for (j = start; j != end; j = TX_BD(j + 1)) {
1079 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1081 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1082 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
1085 #endif
1086 bnx2x_fw_dump(bp);
1087 bnx2x_mc_assert(bp);
1088 BNX2X_ERR("end crash dump -----------------\n");
1091 static void bnx2x_hc_int_enable(struct bnx2x *bp)
1093 int port = BP_PORT(bp);
1094 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1095 u32 val = REG_RD(bp, addr);
1096 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1097 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1099 if (msix) {
1100 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1101 HC_CONFIG_0_REG_INT_LINE_EN_0);
1102 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1103 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1104 } else if (msi) {
1105 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1106 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1107 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1108 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1109 } else {
1110 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1111 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1112 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1113 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1115 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1116 val, port, addr);
1118 REG_WR(bp, addr, val);
1120 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1123 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1124 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1126 REG_WR(bp, addr, val);
1128 * Ensure that HC_CONFIG is written before leading/trailing edge config
1130 mmiowb();
1131 barrier();
1133 if (!CHIP_IS_E1(bp)) {
1134 /* init leading/trailing edge */
1135 if (IS_MF(bp)) {
1136 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1137 if (bp->port.pmf)
1138 /* enable nig and gpio3 attention */
1139 val |= 0x1100;
1140 } else
1141 val = 0xffff;
1143 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1144 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1147 /* Make sure that interrupts are indeed enabled from here on */
1148 mmiowb();
1151 static void bnx2x_igu_int_enable(struct bnx2x *bp)
1153 u32 val;
1154 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1155 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1157 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1159 if (msix) {
1160 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1161 IGU_PF_CONF_SINGLE_ISR_EN);
1162 val |= (IGU_PF_CONF_FUNC_EN |
1163 IGU_PF_CONF_MSI_MSIX_EN |
1164 IGU_PF_CONF_ATTN_BIT_EN);
1165 } else if (msi) {
1166 val &= ~IGU_PF_CONF_INT_LINE_EN;
1167 val |= (IGU_PF_CONF_FUNC_EN |
1168 IGU_PF_CONF_MSI_MSIX_EN |
1169 IGU_PF_CONF_ATTN_BIT_EN |
1170 IGU_PF_CONF_SINGLE_ISR_EN);
1171 } else {
1172 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1173 val |= (IGU_PF_CONF_FUNC_EN |
1174 IGU_PF_CONF_INT_LINE_EN |
1175 IGU_PF_CONF_ATTN_BIT_EN |
1176 IGU_PF_CONF_SINGLE_ISR_EN);
1179 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1180 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1182 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1184 barrier();
1186 /* init leading/trailing edge */
1187 if (IS_MF(bp)) {
1188 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1189 if (bp->port.pmf)
1190 /* enable nig and gpio3 attention */
1191 val |= 0x1100;
1192 } else
1193 val = 0xffff;
1195 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1196 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1198 /* Make sure that interrupts are indeed enabled from here on */
1199 mmiowb();
1202 void bnx2x_int_enable(struct bnx2x *bp)
1204 if (bp->common.int_block == INT_BLOCK_HC)
1205 bnx2x_hc_int_enable(bp);
1206 else
1207 bnx2x_igu_int_enable(bp);
1210 static void bnx2x_hc_int_disable(struct bnx2x *bp)
1212 int port = BP_PORT(bp);
1213 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1214 u32 val = REG_RD(bp, addr);
1216 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1217 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1218 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1219 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1221 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1222 val, port, addr);
1224 /* flush all outstanding writes */
1225 mmiowb();
1227 REG_WR(bp, addr, val);
1228 if (REG_RD(bp, addr) != val)
1229 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1232 static void bnx2x_igu_int_disable(struct bnx2x *bp)
1234 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1236 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1237 IGU_PF_CONF_INT_LINE_EN |
1238 IGU_PF_CONF_ATTN_BIT_EN);
1240 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1242 /* flush all outstanding writes */
1243 mmiowb();
1245 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1246 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1247 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1250 void bnx2x_int_disable(struct bnx2x *bp)
1252 if (bp->common.int_block == INT_BLOCK_HC)
1253 bnx2x_hc_int_disable(bp);
1254 else
1255 bnx2x_igu_int_disable(bp);
1258 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1260 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1261 int i, offset;
1263 /* disable interrupt handling */
1264 atomic_inc(&bp->intr_sem);
1265 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1267 if (disable_hw)
1268 /* prevent the HW from sending interrupts */
1269 bnx2x_int_disable(bp);
1271 /* make sure all ISRs are done */
1272 if (msix) {
1273 synchronize_irq(bp->msix_table[0].vector);
1274 offset = 1;
1275 #ifdef BCM_CNIC
1276 offset++;
1277 #endif
1278 for_each_queue(bp, i)
1279 synchronize_irq(bp->msix_table[i + offset].vector);
1280 } else
1281 synchronize_irq(bp->pdev->irq);
1283 /* make sure sp_task is not running */
1284 cancel_delayed_work(&bp->sp_task);
1285 flush_workqueue(bnx2x_wq);
1288 /* fast path */
1291 * General service functions
1294 /* Return true if succeeded to acquire the lock */
1295 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1297 u32 lock_status;
1298 u32 resource_bit = (1 << resource);
1299 int func = BP_FUNC(bp);
1300 u32 hw_lock_control_reg;
1302 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1304 /* Validating that the resource is within range */
1305 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1306 DP(NETIF_MSG_HW,
1307 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1308 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1309 return false;
1312 if (func <= 5)
1313 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1314 else
1315 hw_lock_control_reg =
1316 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1318 /* Try to acquire the lock */
1319 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1320 lock_status = REG_RD(bp, hw_lock_control_reg);
1321 if (lock_status & resource_bit)
1322 return true;
1324 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1325 return false;
1329 #ifdef BCM_CNIC
1330 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1331 #endif
1333 void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1334 union eth_rx_cqe *rr_cqe)
1336 struct bnx2x *bp = fp->bp;
1337 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1338 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1340 DP(BNX2X_MSG_SP,
1341 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1342 fp->index, cid, command, bp->state,
1343 rr_cqe->ramrod_cqe.ramrod_type);
1345 switch (command | fp->state) {
1346 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1347 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1348 fp->state = BNX2X_FP_STATE_OPEN;
1349 break;
1351 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1352 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
1353 fp->state = BNX2X_FP_STATE_HALTED;
1354 break;
1356 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1357 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1358 fp->state = BNX2X_FP_STATE_TERMINATED;
1359 break;
1361 default:
1362 BNX2X_ERR("unexpected MC reply (%d) "
1363 "fp[%d] state is %x\n",
1364 command, fp->index, fp->state);
1365 break;
1368 smp_mb__before_atomic_inc();
1369 atomic_inc(&bp->spq_left);
1370 /* push the change in fp->state and towards the memory */
1371 smp_wmb();
1373 return;
1376 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1378 struct bnx2x *bp = netdev_priv(dev_instance);
1379 u16 status = bnx2x_ack_int(bp);
1380 u16 mask;
1381 int i;
1383 /* Return here if interrupt is shared and it's not for us */
1384 if (unlikely(status == 0)) {
1385 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1386 return IRQ_NONE;
1388 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1390 /* Return here if interrupt is disabled */
1391 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1392 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1393 return IRQ_HANDLED;
1396 #ifdef BNX2X_STOP_ON_ERROR
1397 if (unlikely(bp->panic))
1398 return IRQ_HANDLED;
1399 #endif
1401 for_each_queue(bp, i) {
1402 struct bnx2x_fastpath *fp = &bp->fp[i];
1404 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
1405 if (status & mask) {
1406 /* Handle Rx and Tx according to SB id */
1407 prefetch(fp->rx_cons_sb);
1408 prefetch(fp->tx_cons_sb);
1409 prefetch(&fp->sb_running_index[SM_RX_ID]);
1410 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1411 status &= ~mask;
1415 #ifdef BCM_CNIC
1416 mask = 0x2;
1417 if (status & (mask | 0x1)) {
1418 struct cnic_ops *c_ops = NULL;
1420 rcu_read_lock();
1421 c_ops = rcu_dereference(bp->cnic_ops);
1422 if (c_ops)
1423 c_ops->cnic_handler(bp->cnic_data, NULL);
1424 rcu_read_unlock();
1426 status &= ~mask;
1428 #endif
1430 if (unlikely(status & 0x1)) {
1431 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1433 status &= ~0x1;
1434 if (!status)
1435 return IRQ_HANDLED;
1438 if (unlikely(status))
1439 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1440 status);
1442 return IRQ_HANDLED;
1445 /* end of fast path */
1448 /* Link */
1451 * General service functions
1454 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1456 u32 lock_status;
1457 u32 resource_bit = (1 << resource);
1458 int func = BP_FUNC(bp);
1459 u32 hw_lock_control_reg;
1460 int cnt;
1462 /* Validating that the resource is within range */
1463 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1464 DP(NETIF_MSG_HW,
1465 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1466 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1467 return -EINVAL;
1470 if (func <= 5) {
1471 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1472 } else {
1473 hw_lock_control_reg =
1474 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1477 /* Validating that the resource is not already taken */
1478 lock_status = REG_RD(bp, hw_lock_control_reg);
1479 if (lock_status & resource_bit) {
1480 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1481 lock_status, resource_bit);
1482 return -EEXIST;
1485 /* Try for 5 second every 5ms */
1486 for (cnt = 0; cnt < 1000; cnt++) {
1487 /* Try to acquire the lock */
1488 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1489 lock_status = REG_RD(bp, hw_lock_control_reg);
1490 if (lock_status & resource_bit)
1491 return 0;
1493 msleep(5);
1495 DP(NETIF_MSG_HW, "Timeout\n");
1496 return -EAGAIN;
1499 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1501 u32 lock_status;
1502 u32 resource_bit = (1 << resource);
1503 int func = BP_FUNC(bp);
1504 u32 hw_lock_control_reg;
1506 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1508 /* Validating that the resource is within range */
1509 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1510 DP(NETIF_MSG_HW,
1511 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1512 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1513 return -EINVAL;
1516 if (func <= 5) {
1517 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1518 } else {
1519 hw_lock_control_reg =
1520 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1523 /* Validating that the resource is currently taken */
1524 lock_status = REG_RD(bp, hw_lock_control_reg);
1525 if (!(lock_status & resource_bit)) {
1526 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1527 lock_status, resource_bit);
1528 return -EFAULT;
1531 REG_WR(bp, hw_lock_control_reg, resource_bit);
1532 return 0;
1536 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1538 /* The GPIO should be swapped if swap register is set and active */
1539 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1540 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1541 int gpio_shift = gpio_num +
1542 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1543 u32 gpio_mask = (1 << gpio_shift);
1544 u32 gpio_reg;
1545 int value;
1547 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1548 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1549 return -EINVAL;
1552 /* read GPIO value */
1553 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1555 /* get the requested pin value */
1556 if ((gpio_reg & gpio_mask) == gpio_mask)
1557 value = 1;
1558 else
1559 value = 0;
1561 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1563 return value;
1566 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1568 /* The GPIO should be swapped if swap register is set and active */
1569 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1570 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1571 int gpio_shift = gpio_num +
1572 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1573 u32 gpio_mask = (1 << gpio_shift);
1574 u32 gpio_reg;
1576 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1577 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1578 return -EINVAL;
1581 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1582 /* read GPIO and mask except the float bits */
1583 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1585 switch (mode) {
1586 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1587 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1588 gpio_num, gpio_shift);
1589 /* clear FLOAT and set CLR */
1590 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1591 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1592 break;
1594 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1595 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1596 gpio_num, gpio_shift);
1597 /* clear FLOAT and set SET */
1598 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1599 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1600 break;
1602 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1603 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1604 gpio_num, gpio_shift);
1605 /* set FLOAT */
1606 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1607 break;
1609 default:
1610 break;
1613 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1614 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1616 return 0;
1619 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1621 /* The GPIO should be swapped if swap register is set and active */
1622 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1623 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1624 int gpio_shift = gpio_num +
1625 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1626 u32 gpio_mask = (1 << gpio_shift);
1627 u32 gpio_reg;
1629 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1630 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1631 return -EINVAL;
1634 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1635 /* read GPIO int */
1636 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1638 switch (mode) {
1639 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1640 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1641 "output low\n", gpio_num, gpio_shift);
1642 /* clear SET and set CLR */
1643 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1644 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1645 break;
1647 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1648 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1649 "output high\n", gpio_num, gpio_shift);
1650 /* clear CLR and set SET */
1651 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1652 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1653 break;
1655 default:
1656 break;
1659 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1660 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1662 return 0;
1665 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1667 u32 spio_mask = (1 << spio_num);
1668 u32 spio_reg;
1670 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1671 (spio_num > MISC_REGISTERS_SPIO_7)) {
1672 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1673 return -EINVAL;
1676 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1677 /* read SPIO and mask except the float bits */
1678 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1680 switch (mode) {
1681 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1682 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1683 /* clear FLOAT and set CLR */
1684 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1685 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1686 break;
1688 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1689 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1690 /* clear FLOAT and set SET */
1691 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1692 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1693 break;
1695 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1696 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1697 /* set FLOAT */
1698 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1699 break;
1701 default:
1702 break;
1705 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1706 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1708 return 0;
1711 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1713 u32 sel_phy_idx = 0;
1714 if (bp->link_vars.link_up) {
1715 sel_phy_idx = EXT_PHY1;
1716 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1717 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1718 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1719 sel_phy_idx = EXT_PHY2;
1720 } else {
1722 switch (bnx2x_phy_selection(&bp->link_params)) {
1723 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1724 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1725 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1726 sel_phy_idx = EXT_PHY1;
1727 break;
1728 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1729 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1730 sel_phy_idx = EXT_PHY2;
1731 break;
1735 * The selected actived PHY is always after swapping (in case PHY
1736 * swapping is enabled). So when swapping is enabled, we need to reverse
1737 * the configuration
1740 if (bp->link_params.multi_phy_config &
1741 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1742 if (sel_phy_idx == EXT_PHY1)
1743 sel_phy_idx = EXT_PHY2;
1744 else if (sel_phy_idx == EXT_PHY2)
1745 sel_phy_idx = EXT_PHY1;
1747 return LINK_CONFIG_IDX(sel_phy_idx);
1750 void bnx2x_calc_fc_adv(struct bnx2x *bp)
1752 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
1753 switch (bp->link_vars.ieee_fc &
1754 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1755 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1756 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1757 ADVERTISED_Pause);
1758 break;
1760 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1761 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
1762 ADVERTISED_Pause);
1763 break;
1765 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1766 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
1767 break;
1769 default:
1770 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1771 ADVERTISED_Pause);
1772 break;
1777 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1779 if (!BP_NOMCP(bp)) {
1780 u8 rc;
1781 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1782 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
1783 /* Initialize link parameters structure variables */
1784 /* It is recommended to turn off RX FC for jumbo frames
1785 for better performance */
1786 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
1787 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1788 else
1789 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1791 bnx2x_acquire_phy_lock(bp);
1793 if (load_mode == LOAD_DIAG) {
1794 bp->link_params.loopback_mode = LOOPBACK_XGXS;
1795 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1798 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1800 bnx2x_release_phy_lock(bp);
1802 bnx2x_calc_fc_adv(bp);
1804 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1805 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1806 bnx2x_link_report(bp);
1808 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
1809 return rc;
1811 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1812 return -EINVAL;
1815 void bnx2x_link_set(struct bnx2x *bp)
1817 if (!BP_NOMCP(bp)) {
1818 bnx2x_acquire_phy_lock(bp);
1819 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1820 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1821 bnx2x_release_phy_lock(bp);
1823 bnx2x_calc_fc_adv(bp);
1824 } else
1825 BNX2X_ERR("Bootcode is missing - can not set link\n");
1828 static void bnx2x__link_reset(struct bnx2x *bp)
1830 if (!BP_NOMCP(bp)) {
1831 bnx2x_acquire_phy_lock(bp);
1832 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1833 bnx2x_release_phy_lock(bp);
1834 } else
1835 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1838 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
1840 u8 rc = 0;
1842 if (!BP_NOMCP(bp)) {
1843 bnx2x_acquire_phy_lock(bp);
1844 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1845 is_serdes);
1846 bnx2x_release_phy_lock(bp);
1847 } else
1848 BNX2X_ERR("Bootcode is missing - can not test link\n");
1850 return rc;
1853 static void bnx2x_init_port_minmax(struct bnx2x *bp)
1855 u32 r_param = bp->link_vars.line_speed / 8;
1856 u32 fair_periodic_timeout_usec;
1857 u32 t_fair;
1859 memset(&(bp->cmng.rs_vars), 0,
1860 sizeof(struct rate_shaping_vars_per_port));
1861 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1863 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1864 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1866 /* this is the threshold below which no timer arming will occur
1867 1.25 coefficient is for the threshold to be a little bigger
1868 than the real time, to compensate for timer in-accuracy */
1869 bp->cmng.rs_vars.rs_threshold =
1870 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1872 /* resolution of fairness timer */
1873 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1874 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1875 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1877 /* this is the threshold below which we won't arm the timer anymore */
1878 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1880 /* we multiply by 1e3/8 to get bytes/msec.
1881 We don't want the credits to pass a credit
1882 of the t_fair*FAIR_MEM (algorithm resolution) */
1883 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1884 /* since each tick is 4 usec */
1885 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1888 /* Calculates the sum of vn_min_rates.
1889 It's needed for further normalizing of the min_rates.
1890 Returns:
1891 sum of vn_min_rates.
1893 0 - if all the min_rates are 0.
1894 In the later case fainess algorithm should be deactivated.
1895 If not all min_rates are zero then those that are zeroes will be set to 1.
1897 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1899 int all_zero = 1;
1900 int vn;
1902 bp->vn_weight_sum = 0;
1903 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1904 u32 vn_cfg = bp->mf_config[vn];
1905 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1906 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1908 /* Skip hidden vns */
1909 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1910 continue;
1912 /* If min rate is zero - set it to 1 */
1913 if (!vn_min_rate)
1914 vn_min_rate = DEF_MIN_RATE;
1915 else
1916 all_zero = 0;
1918 bp->vn_weight_sum += vn_min_rate;
1921 /* ... only if all min rates are zeros - disable fairness */
1922 if (all_zero) {
1923 bp->cmng.flags.cmng_enables &=
1924 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1925 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1926 " fairness will be disabled\n");
1927 } else
1928 bp->cmng.flags.cmng_enables |=
1929 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1932 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
1934 struct rate_shaping_vars_per_vn m_rs_vn;
1935 struct fairness_vars_per_vn m_fair_vn;
1936 u32 vn_cfg = bp->mf_config[vn];
1937 int func = 2*vn + BP_PORT(bp);
1938 u16 vn_min_rate, vn_max_rate;
1939 int i;
1941 /* If function is hidden - set min and max to zeroes */
1942 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1943 vn_min_rate = 0;
1944 vn_max_rate = 0;
1946 } else {
1947 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1948 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1949 /* If min rate is zero - set it to 1 */
1950 if (bp->vn_weight_sum && (vn_min_rate == 0))
1951 vn_min_rate = DEF_MIN_RATE;
1952 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1953 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1955 DP(NETIF_MSG_IFUP,
1956 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
1957 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1959 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1960 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1962 /* global vn counter - maximal Mbps for this vn */
1963 m_rs_vn.vn_counter.rate = vn_max_rate;
1965 /* quota - number of bytes transmitted in this period */
1966 m_rs_vn.vn_counter.quota =
1967 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1969 if (bp->vn_weight_sum) {
1970 /* credit for each period of the fairness algorithm:
1971 number of bytes in T_FAIR (the vn share the port rate).
1972 vn_weight_sum should not be larger than 10000, thus
1973 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1974 than zero */
1975 m_fair_vn.vn_credit_delta =
1976 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1977 (8 * bp->vn_weight_sum))),
1978 (bp->cmng.fair_vars.fair_threshold * 2));
1979 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
1980 m_fair_vn.vn_credit_delta);
1983 /* Store it to internal memory */
1984 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1985 REG_WR(bp, BAR_XSTRORM_INTMEM +
1986 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1987 ((u32 *)(&m_rs_vn))[i]);
1989 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1990 REG_WR(bp, BAR_XSTRORM_INTMEM +
1991 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1992 ((u32 *)(&m_fair_vn))[i]);
1994 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
1996 if (CHIP_REV_IS_SLOW(bp))
1997 return CMNG_FNS_NONE;
1998 if (IS_MF(bp))
1999 return CMNG_FNS_MINMAX;
2001 return CMNG_FNS_NONE;
2004 static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2006 int vn;
2008 if (BP_NOMCP(bp))
2009 return; /* what should be the default bvalue in this case */
2011 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2012 int /*abs*/func = 2*vn + BP_PORT(bp);
2013 bp->mf_config[vn] =
2014 MF_CFG_RD(bp, func_mf_config[func].config);
2018 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2021 if (cmng_type == CMNG_FNS_MINMAX) {
2022 int vn;
2024 /* clear cmng_enables */
2025 bp->cmng.flags.cmng_enables = 0;
2027 /* read mf conf from shmem */
2028 if (read_cfg)
2029 bnx2x_read_mf_cfg(bp);
2031 /* Init rate shaping and fairness contexts */
2032 bnx2x_init_port_minmax(bp);
2034 /* vn_weight_sum and enable fairness if not 0 */
2035 bnx2x_calc_vn_weight_sum(bp);
2037 /* calculate and set min-max rate for each vn */
2038 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2039 bnx2x_init_vn_minmax(bp, vn);
2041 /* always enable rate shaping and fairness */
2042 bp->cmng.flags.cmng_enables |=
2043 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2044 if (!bp->vn_weight_sum)
2045 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2046 " fairness will be disabled\n");
2047 return;
2050 /* rate shaping and fairness are disabled */
2051 DP(NETIF_MSG_IFUP,
2052 "rate shaping and fairness are disabled\n");
2055 static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2057 int port = BP_PORT(bp);
2058 int func;
2059 int vn;
2061 /* Set the attention towards other drivers on the same port */
2062 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2063 if (vn == BP_E1HVN(bp))
2064 continue;
2066 func = ((vn << 1) | port);
2067 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2068 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2072 /* This function is called upon link interrupt */
2073 static void bnx2x_link_attn(struct bnx2x *bp)
2075 u32 prev_link_status = bp->link_vars.link_status;
2076 /* Make sure that we are synced with the current statistics */
2077 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2079 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2081 if (bp->link_vars.link_up) {
2083 /* dropless flow control */
2084 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
2085 int port = BP_PORT(bp);
2086 u32 pause_enabled = 0;
2088 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2089 pause_enabled = 1;
2091 REG_WR(bp, BAR_USTRORM_INTMEM +
2092 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2093 pause_enabled);
2096 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2097 struct host_port_stats *pstats;
2099 pstats = bnx2x_sp(bp, port_stats);
2100 /* reset old bmac stats */
2101 memset(&(pstats->mac_stx[0]), 0,
2102 sizeof(struct mac_stx));
2104 if (bp->state == BNX2X_STATE_OPEN)
2105 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2108 /* indicate link status only if link status actually changed */
2109 if (prev_link_status != bp->link_vars.link_status)
2110 bnx2x_link_report(bp);
2112 if (IS_MF(bp))
2113 bnx2x_link_sync_notify(bp);
2115 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2116 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2118 if (cmng_fns != CMNG_FNS_NONE) {
2119 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2120 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2121 } else
2122 /* rate shaping and fairness are disabled */
2123 DP(NETIF_MSG_IFUP,
2124 "single function mode without fairness\n");
2128 void bnx2x__link_status_update(struct bnx2x *bp)
2130 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2131 return;
2133 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2135 if (bp->link_vars.link_up)
2136 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2137 else
2138 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2140 /* the link status update could be the result of a DCC event
2141 hence re-read the shmem mf configuration */
2142 bnx2x_read_mf_cfg(bp);
2144 /* indicate link status */
2145 bnx2x_link_report(bp);
2148 static void bnx2x_pmf_update(struct bnx2x *bp)
2150 int port = BP_PORT(bp);
2151 u32 val;
2153 bp->port.pmf = 1;
2154 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2156 /* enable nig attention */
2157 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2158 if (bp->common.int_block == INT_BLOCK_HC) {
2159 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2160 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2161 } else if (CHIP_IS_E2(bp)) {
2162 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2163 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2166 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2169 /* end of Link */
2171 /* slow path */
2174 * General service functions
2177 /* send the MCP a request, block until there is a reply */
2178 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2180 int mb_idx = BP_FW_MB_IDX(bp);
2181 u32 seq = ++bp->fw_seq;
2182 u32 rc = 0;
2183 u32 cnt = 1;
2184 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2186 mutex_lock(&bp->fw_mb_mutex);
2187 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2188 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2190 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2192 do {
2193 /* let the FW do it's magic ... */
2194 msleep(delay);
2196 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2198 /* Give the FW up to 5 second (500*10ms) */
2199 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2201 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2202 cnt*delay, rc, seq);
2204 /* is this a reply to our command? */
2205 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2206 rc &= FW_MSG_CODE_MASK;
2207 else {
2208 /* FW BUG! */
2209 BNX2X_ERR("FW failed to respond!\n");
2210 bnx2x_fw_dump(bp);
2211 rc = 0;
2213 mutex_unlock(&bp->fw_mb_mutex);
2215 return rc;
2218 /* must be called under rtnl_lock */
2219 void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2221 u32 mask = (1 << cl_id);
2223 /* initial seeting is BNX2X_ACCEPT_NONE */
2224 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2225 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2226 u8 unmatched_unicast = 0;
2228 if (filters & BNX2X_PROMISCUOUS_MODE) {
2229 /* promiscious - accept all, drop none */
2230 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2231 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2233 if (filters & BNX2X_ACCEPT_UNICAST) {
2234 /* accept matched ucast */
2235 drop_all_ucast = 0;
2237 if (filters & BNX2X_ACCEPT_MULTICAST) {
2238 /* accept matched mcast */
2239 drop_all_mcast = 0;
2241 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2242 /* accept all mcast */
2243 drop_all_ucast = 0;
2244 accp_all_ucast = 1;
2246 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2247 /* accept all mcast */
2248 drop_all_mcast = 0;
2249 accp_all_mcast = 1;
2251 if (filters & BNX2X_ACCEPT_BROADCAST) {
2252 /* accept (all) bcast */
2253 drop_all_bcast = 0;
2254 accp_all_bcast = 1;
2257 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2258 bp->mac_filters.ucast_drop_all | mask :
2259 bp->mac_filters.ucast_drop_all & ~mask;
2261 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2262 bp->mac_filters.mcast_drop_all | mask :
2263 bp->mac_filters.mcast_drop_all & ~mask;
2265 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2266 bp->mac_filters.bcast_drop_all | mask :
2267 bp->mac_filters.bcast_drop_all & ~mask;
2269 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2270 bp->mac_filters.ucast_accept_all | mask :
2271 bp->mac_filters.ucast_accept_all & ~mask;
2273 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2274 bp->mac_filters.mcast_accept_all | mask :
2275 bp->mac_filters.mcast_accept_all & ~mask;
2277 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2278 bp->mac_filters.bcast_accept_all | mask :
2279 bp->mac_filters.bcast_accept_all & ~mask;
2281 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2282 bp->mac_filters.unmatched_unicast | mask :
2283 bp->mac_filters.unmatched_unicast & ~mask;
2286 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2288 if (FUNC_CONFIG(p->func_flgs)) {
2289 struct tstorm_eth_function_common_config tcfg = {0};
2291 /* tpa */
2292 if (p->func_flgs & FUNC_FLG_TPA)
2293 tcfg.config_flags |=
2294 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2296 /* set rss flags */
2297 if (p->func_flgs & FUNC_FLG_RSS) {
2298 u16 rss_flgs = (p->rss->mode <<
2299 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2301 if (p->rss->cap & RSS_IPV4_CAP)
2302 rss_flgs |= RSS_IPV4_CAP_MASK;
2303 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2304 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2305 if (p->rss->cap & RSS_IPV6_CAP)
2306 rss_flgs |= RSS_IPV6_CAP_MASK;
2307 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2308 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2310 tcfg.config_flags |= rss_flgs;
2311 tcfg.rss_result_mask = p->rss->result_mask;
2315 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2318 /* Enable the function in the FW */
2319 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2320 storm_memset_func_en(bp, p->func_id, 1);
2322 /* statistics */
2323 if (p->func_flgs & FUNC_FLG_STATS) {
2324 struct stats_indication_flags stats_flags = {0};
2325 stats_flags.collect_eth = 1;
2327 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2328 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2330 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2331 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2333 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2334 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2336 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2337 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2340 /* spq */
2341 if (p->func_flgs & FUNC_FLG_SPQ) {
2342 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2343 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2344 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2348 static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2349 struct bnx2x_fastpath *fp)
2351 u16 flags = 0;
2353 /* calculate queue flags */
2354 flags |= QUEUE_FLG_CACHE_ALIGN;
2355 flags |= QUEUE_FLG_HC;
2356 flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0;
2358 #ifdef BCM_VLAN
2359 flags |= QUEUE_FLG_VLAN;
2360 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2361 #endif
2363 if (!fp->disable_tpa)
2364 flags |= QUEUE_FLG_TPA;
2366 flags |= QUEUE_FLG_STATS;
2368 return flags;
2371 static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2372 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2373 struct bnx2x_rxq_init_params *rxq_init)
2375 u16 max_sge = 0;
2376 u16 sge_sz = 0;
2377 u16 tpa_agg_size = 0;
2379 /* calculate queue flags */
2380 u16 flags = bnx2x_get_cl_flags(bp, fp);
2382 if (!fp->disable_tpa) {
2383 pause->sge_th_hi = 250;
2384 pause->sge_th_lo = 150;
2385 tpa_agg_size = min_t(u32,
2386 (min_t(u32, 8, MAX_SKB_FRAGS) *
2387 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2388 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2389 SGE_PAGE_SHIFT;
2390 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2391 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2392 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2393 0xffff);
2396 /* pause - not for e1 */
2397 if (!CHIP_IS_E1(bp)) {
2398 pause->bd_th_hi = 350;
2399 pause->bd_th_lo = 250;
2400 pause->rcq_th_hi = 350;
2401 pause->rcq_th_lo = 250;
2402 pause->sge_th_hi = 0;
2403 pause->sge_th_lo = 0;
2404 pause->pri_map = 1;
2407 /* rxq setup */
2408 rxq_init->flags = flags;
2409 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2410 rxq_init->dscr_map = fp->rx_desc_mapping;
2411 rxq_init->sge_map = fp->rx_sge_mapping;
2412 rxq_init->rcq_map = fp->rx_comp_mapping;
2413 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2414 rxq_init->mtu = bp->dev->mtu;
2415 rxq_init->buf_sz = bp->rx_buf_size;
2416 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2417 rxq_init->cl_id = fp->cl_id;
2418 rxq_init->spcl_id = fp->cl_id;
2419 rxq_init->stat_id = fp->cl_id;
2420 rxq_init->tpa_agg_sz = tpa_agg_size;
2421 rxq_init->sge_buf_sz = sge_sz;
2422 rxq_init->max_sges_pkt = max_sge;
2423 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2424 rxq_init->fw_sb_id = fp->fw_sb_id;
2426 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2428 rxq_init->cid = HW_CID(bp, fp->cid);
2430 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2433 static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2434 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2436 u16 flags = bnx2x_get_cl_flags(bp, fp);
2438 txq_init->flags = flags;
2439 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2440 txq_init->dscr_map = fp->tx_desc_mapping;
2441 txq_init->stat_id = fp->cl_id;
2442 txq_init->cid = HW_CID(bp, fp->cid);
2443 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2444 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2445 txq_init->fw_sb_id = fp->fw_sb_id;
2446 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2449 void bnx2x_pf_init(struct bnx2x *bp)
2451 struct bnx2x_func_init_params func_init = {0};
2452 struct bnx2x_rss_params rss = {0};
2453 struct event_ring_data eq_data = { {0} };
2454 u16 flags;
2456 /* pf specific setups */
2457 if (!CHIP_IS_E1(bp))
2458 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
2460 if (CHIP_IS_E2(bp)) {
2461 /* reset IGU PF statistics: MSIX + ATTN */
2462 /* PF */
2463 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2464 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2465 (CHIP_MODE_IS_4_PORT(bp) ?
2466 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2467 /* ATTN */
2468 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2469 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2470 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2471 (CHIP_MODE_IS_4_PORT(bp) ?
2472 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2475 /* function setup flags */
2476 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2478 if (CHIP_IS_E1x(bp))
2479 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2480 else
2481 flags |= FUNC_FLG_TPA;
2484 * Although RSS is meaningless when there is a single HW queue we
2485 * still need it enabled in order to have HW Rx hash generated.
2487 * if (is_eth_multi(bp))
2488 * flags |= FUNC_FLG_RSS;
2491 /* function setup */
2492 if (flags & FUNC_FLG_RSS) {
2493 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2494 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2495 rss.mode = bp->multi_mode;
2496 rss.result_mask = MULTI_MASK;
2497 func_init.rss = &rss;
2500 func_init.func_flgs = flags;
2501 func_init.pf_id = BP_FUNC(bp);
2502 func_init.func_id = BP_FUNC(bp);
2503 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2504 func_init.spq_map = bp->spq_mapping;
2505 func_init.spq_prod = bp->spq_prod_idx;
2507 bnx2x_func_init(bp, &func_init);
2509 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2512 Congestion management values depend on the link rate
2513 There is no active link so initial link rate is set to 10 Gbps.
2514 When the link comes up The congestion management values are
2515 re-calculated according to the actual link rate.
2517 bp->link_vars.line_speed = SPEED_10000;
2518 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2520 /* Only the PMF sets the HW */
2521 if (bp->port.pmf)
2522 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2524 /* no rx until link is up */
2525 bp->rx_mode = BNX2X_RX_MODE_NONE;
2526 bnx2x_set_storm_rx_mode(bp);
2528 /* init Event Queue */
2529 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2530 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2531 eq_data.producer = bp->eq_prod;
2532 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2533 eq_data.sb_id = DEF_SB_ID;
2534 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2538 static void bnx2x_e1h_disable(struct bnx2x *bp)
2540 int port = BP_PORT(bp);
2542 netif_tx_disable(bp->dev);
2544 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2546 netif_carrier_off(bp->dev);
2549 static void bnx2x_e1h_enable(struct bnx2x *bp)
2551 int port = BP_PORT(bp);
2553 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2555 /* Tx queue should be only reenabled */
2556 netif_tx_wake_all_queues(bp->dev);
2559 * Should not call netif_carrier_on since it will be called if the link
2560 * is up when checking for link state
2564 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2566 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2568 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2571 * This is the only place besides the function initialization
2572 * where the bp->flags can change so it is done without any
2573 * locks
2575 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2576 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2577 bp->flags |= MF_FUNC_DIS;
2579 bnx2x_e1h_disable(bp);
2580 } else {
2581 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2582 bp->flags &= ~MF_FUNC_DIS;
2584 bnx2x_e1h_enable(bp);
2586 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2588 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2590 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2591 bnx2x_link_sync_notify(bp);
2592 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2593 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2596 /* Report results to MCP */
2597 if (dcc_event)
2598 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2599 else
2600 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2603 /* must be called under the spq lock */
2604 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2606 struct eth_spe *next_spe = bp->spq_prod_bd;
2608 if (bp->spq_prod_bd == bp->spq_last_bd) {
2609 bp->spq_prod_bd = bp->spq;
2610 bp->spq_prod_idx = 0;
2611 DP(NETIF_MSG_TIMER, "end of spq\n");
2612 } else {
2613 bp->spq_prod_bd++;
2614 bp->spq_prod_idx++;
2616 return next_spe;
2619 /* must be called under the spq lock */
2620 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2622 int func = BP_FUNC(bp);
2624 /* Make sure that BD data is updated before writing the producer */
2625 wmb();
2627 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2628 bp->spq_prod_idx);
2629 mmiowb();
2632 /* the slow path queue is odd since completions arrive on the fastpath ring */
2633 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2634 u32 data_hi, u32 data_lo, int common)
2636 struct eth_spe *spe;
2637 u16 type;
2639 #ifdef BNX2X_STOP_ON_ERROR
2640 if (unlikely(bp->panic))
2641 return -EIO;
2642 #endif
2644 spin_lock_bh(&bp->spq_lock);
2646 if (!atomic_read(&bp->spq_left)) {
2647 BNX2X_ERR("BUG! SPQ ring full!\n");
2648 spin_unlock_bh(&bp->spq_lock);
2649 bnx2x_panic();
2650 return -EBUSY;
2653 spe = bnx2x_sp_get_next(bp);
2655 /* CID needs port number to be encoded int it */
2656 spe->hdr.conn_and_cmd_data =
2657 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2658 HW_CID(bp, cid));
2660 if (common)
2661 /* Common ramrods:
2662 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2663 * TRAFFIC_STOP, TRAFFIC_START
2665 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2666 & SPE_HDR_CONN_TYPE;
2667 else
2668 /* ETH ramrods: SETUP, HALT */
2669 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2670 & SPE_HDR_CONN_TYPE;
2672 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2673 SPE_HDR_FUNCTION_ID);
2675 spe->hdr.type = cpu_to_le16(type);
2677 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2678 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2680 /* stats ramrod has it's own slot on the spq */
2681 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2682 /* It's ok if the actual decrement is issued towards the memory
2683 * somewhere between the spin_lock and spin_unlock. Thus no
2684 * more explict memory barrier is needed.
2686 atomic_dec(&bp->spq_left);
2688 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2689 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2690 "type(0x%x) left %x\n",
2691 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2692 (u32)(U64_LO(bp->spq_mapping) +
2693 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2694 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
2696 bnx2x_sp_prod_update(bp);
2697 spin_unlock_bh(&bp->spq_lock);
2698 return 0;
2701 /* acquire split MCP access lock register */
2702 static int bnx2x_acquire_alr(struct bnx2x *bp)
2704 u32 j, val;
2705 int rc = 0;
2707 might_sleep();
2708 for (j = 0; j < 1000; j++) {
2709 val = (1UL << 31);
2710 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2711 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2712 if (val & (1L << 31))
2713 break;
2715 msleep(5);
2717 if (!(val & (1L << 31))) {
2718 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2719 rc = -EBUSY;
2722 return rc;
2725 /* release split MCP access lock register */
2726 static void bnx2x_release_alr(struct bnx2x *bp)
2728 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2731 #define BNX2X_DEF_SB_ATT_IDX 0x0001
2732 #define BNX2X_DEF_SB_IDX 0x0002
2734 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2736 struct host_sp_status_block *def_sb = bp->def_status_blk;
2737 u16 rc = 0;
2739 barrier(); /* status block is written to by the chip */
2740 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2741 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2742 rc |= BNX2X_DEF_SB_ATT_IDX;
2745 if (bp->def_idx != def_sb->sp_sb.running_index) {
2746 bp->def_idx = def_sb->sp_sb.running_index;
2747 rc |= BNX2X_DEF_SB_IDX;
2750 /* Do not reorder: indecies reading should complete before handling */
2751 barrier();
2752 return rc;
2756 * slow path service functions
2759 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2761 int port = BP_PORT(bp);
2762 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2763 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2764 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2765 NIG_REG_MASK_INTERRUPT_PORT0;
2766 u32 aeu_mask;
2767 u32 nig_mask = 0;
2768 u32 reg_addr;
2770 if (bp->attn_state & asserted)
2771 BNX2X_ERR("IGU ERROR\n");
2773 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2774 aeu_mask = REG_RD(bp, aeu_addr);
2776 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2777 aeu_mask, asserted);
2778 aeu_mask &= ~(asserted & 0x3ff);
2779 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2781 REG_WR(bp, aeu_addr, aeu_mask);
2782 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2784 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2785 bp->attn_state |= asserted;
2786 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2788 if (asserted & ATTN_HARD_WIRED_MASK) {
2789 if (asserted & ATTN_NIG_FOR_FUNC) {
2791 bnx2x_acquire_phy_lock(bp);
2793 /* save nig interrupt mask */
2794 nig_mask = REG_RD(bp, nig_int_mask_addr);
2795 REG_WR(bp, nig_int_mask_addr, 0);
2797 bnx2x_link_attn(bp);
2799 /* handle unicore attn? */
2801 if (asserted & ATTN_SW_TIMER_4_FUNC)
2802 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2804 if (asserted & GPIO_2_FUNC)
2805 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2807 if (asserted & GPIO_3_FUNC)
2808 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2810 if (asserted & GPIO_4_FUNC)
2811 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2813 if (port == 0) {
2814 if (asserted & ATTN_GENERAL_ATTN_1) {
2815 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2816 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2818 if (asserted & ATTN_GENERAL_ATTN_2) {
2819 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2820 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2822 if (asserted & ATTN_GENERAL_ATTN_3) {
2823 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2824 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2826 } else {
2827 if (asserted & ATTN_GENERAL_ATTN_4) {
2828 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2829 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2831 if (asserted & ATTN_GENERAL_ATTN_5) {
2832 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2833 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2835 if (asserted & ATTN_GENERAL_ATTN_6) {
2836 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2837 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2841 } /* if hardwired */
2843 if (bp->common.int_block == INT_BLOCK_HC)
2844 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2845 COMMAND_REG_ATTN_BITS_SET);
2846 else
2847 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2849 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2850 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2851 REG_WR(bp, reg_addr, asserted);
2853 /* now set back the mask */
2854 if (asserted & ATTN_NIG_FOR_FUNC) {
2855 REG_WR(bp, nig_int_mask_addr, nig_mask);
2856 bnx2x_release_phy_lock(bp);
2860 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2862 int port = BP_PORT(bp);
2863 u32 ext_phy_config;
2864 /* mark the failure */
2865 ext_phy_config =
2866 SHMEM_RD(bp,
2867 dev_info.port_hw_config[port].external_phy_config);
2869 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2870 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2871 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2872 ext_phy_config);
2874 /* log the failure */
2875 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2876 " the driver to shutdown the card to prevent permanent"
2877 " damage. Please contact OEM Support for assistance\n");
2880 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2882 int port = BP_PORT(bp);
2883 int reg_offset;
2884 u32 val;
2886 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2887 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2889 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2891 val = REG_RD(bp, reg_offset);
2892 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2893 REG_WR(bp, reg_offset, val);
2895 BNX2X_ERR("SPIO5 hw attention\n");
2897 /* Fan failure attention */
2898 bnx2x_hw_reset_phy(&bp->link_params);
2899 bnx2x_fan_failure(bp);
2902 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2903 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2904 bnx2x_acquire_phy_lock(bp);
2905 bnx2x_handle_module_detect_int(&bp->link_params);
2906 bnx2x_release_phy_lock(bp);
2909 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2911 val = REG_RD(bp, reg_offset);
2912 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2913 REG_WR(bp, reg_offset, val);
2915 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2916 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2917 bnx2x_panic();
2921 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2923 u32 val;
2925 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2927 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2928 BNX2X_ERR("DB hw attention 0x%x\n", val);
2929 /* DORQ discard attention */
2930 if (val & 0x2)
2931 BNX2X_ERR("FATAL error from DORQ\n");
2934 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2936 int port = BP_PORT(bp);
2937 int reg_offset;
2939 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2940 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2942 val = REG_RD(bp, reg_offset);
2943 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2944 REG_WR(bp, reg_offset, val);
2946 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2947 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2948 bnx2x_panic();
2952 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2954 u32 val;
2956 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2958 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2959 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2960 /* CFC error attention */
2961 if (val & 0x2)
2962 BNX2X_ERR("FATAL error from CFC\n");
2965 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2967 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2968 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2969 /* RQ_USDMDP_FIFO_OVERFLOW */
2970 if (val & 0x18000)
2971 BNX2X_ERR("FATAL error from PXP\n");
2972 if (CHIP_IS_E2(bp)) {
2973 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
2974 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
2978 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2980 int port = BP_PORT(bp);
2981 int reg_offset;
2983 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2984 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2986 val = REG_RD(bp, reg_offset);
2987 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2988 REG_WR(bp, reg_offset, val);
2990 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2991 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
2992 bnx2x_panic();
2996 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2998 u32 val;
3000 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3002 if (attn & BNX2X_PMF_LINK_ASSERT) {
3003 int func = BP_FUNC(bp);
3005 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3006 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3007 func_mf_config[BP_ABS_FUNC(bp)].config);
3008 val = SHMEM_RD(bp,
3009 func_mb[BP_FW_MB_IDX(bp)].drv_status);
3010 if (val & DRV_STATUS_DCC_EVENT_MASK)
3011 bnx2x_dcc_event(bp,
3012 (val & DRV_STATUS_DCC_EVENT_MASK));
3013 bnx2x__link_status_update(bp);
3014 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3015 bnx2x_pmf_update(bp);
3017 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3019 BNX2X_ERR("MC assert!\n");
3020 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3021 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3022 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3023 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3024 bnx2x_panic();
3026 } else if (attn & BNX2X_MCP_ASSERT) {
3028 BNX2X_ERR("MCP assert!\n");
3029 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3030 bnx2x_fw_dump(bp);
3032 } else
3033 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3036 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3037 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3038 if (attn & BNX2X_GRC_TIMEOUT) {
3039 val = CHIP_IS_E1(bp) ? 0 :
3040 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
3041 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3043 if (attn & BNX2X_GRC_RSV) {
3044 val = CHIP_IS_E1(bp) ? 0 :
3045 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
3046 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3048 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3052 #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3053 #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3054 #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3055 #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3056 #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3057 #define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3059 * should be run under rtnl lock
3061 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3063 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3064 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3065 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3066 barrier();
3067 mmiowb();
3071 * should be run under rtnl lock
3073 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3075 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3076 val |= (1 << 16);
3077 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3078 barrier();
3079 mmiowb();
3083 * should be run under rtnl lock
3085 bool bnx2x_reset_is_done(struct bnx2x *bp)
3087 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3088 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3089 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3093 * should be run under rtnl lock
3095 inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3097 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3099 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3101 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3102 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3103 barrier();
3104 mmiowb();
3108 * should be run under rtnl lock
3110 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3112 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3114 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3116 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3117 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3118 barrier();
3119 mmiowb();
3121 return val1;
3125 * should be run under rtnl lock
3127 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3129 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3132 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3134 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3135 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3138 static inline void _print_next_block(int idx, const char *blk)
3140 if (idx)
3141 pr_cont(", ");
3142 pr_cont("%s", blk);
3145 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3147 int i = 0;
3148 u32 cur_bit = 0;
3149 for (i = 0; sig; i++) {
3150 cur_bit = ((u32)0x1 << i);
3151 if (sig & cur_bit) {
3152 switch (cur_bit) {
3153 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3154 _print_next_block(par_num++, "BRB");
3155 break;
3156 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3157 _print_next_block(par_num++, "PARSER");
3158 break;
3159 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3160 _print_next_block(par_num++, "TSDM");
3161 break;
3162 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3163 _print_next_block(par_num++, "SEARCHER");
3164 break;
3165 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3166 _print_next_block(par_num++, "TSEMI");
3167 break;
3170 /* Clear the bit */
3171 sig &= ~cur_bit;
3175 return par_num;
3178 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3180 int i = 0;
3181 u32 cur_bit = 0;
3182 for (i = 0; sig; i++) {
3183 cur_bit = ((u32)0x1 << i);
3184 if (sig & cur_bit) {
3185 switch (cur_bit) {
3186 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3187 _print_next_block(par_num++, "PBCLIENT");
3188 break;
3189 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3190 _print_next_block(par_num++, "QM");
3191 break;
3192 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3193 _print_next_block(par_num++, "XSDM");
3194 break;
3195 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3196 _print_next_block(par_num++, "XSEMI");
3197 break;
3198 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3199 _print_next_block(par_num++, "DOORBELLQ");
3200 break;
3201 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3202 _print_next_block(par_num++, "VAUX PCI CORE");
3203 break;
3204 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3205 _print_next_block(par_num++, "DEBUG");
3206 break;
3207 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3208 _print_next_block(par_num++, "USDM");
3209 break;
3210 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3211 _print_next_block(par_num++, "USEMI");
3212 break;
3213 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3214 _print_next_block(par_num++, "UPB");
3215 break;
3216 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3217 _print_next_block(par_num++, "CSDM");
3218 break;
3221 /* Clear the bit */
3222 sig &= ~cur_bit;
3226 return par_num;
3229 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3231 int i = 0;
3232 u32 cur_bit = 0;
3233 for (i = 0; sig; i++) {
3234 cur_bit = ((u32)0x1 << i);
3235 if (sig & cur_bit) {
3236 switch (cur_bit) {
3237 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3238 _print_next_block(par_num++, "CSEMI");
3239 break;
3240 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3241 _print_next_block(par_num++, "PXP");
3242 break;
3243 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3244 _print_next_block(par_num++,
3245 "PXPPCICLOCKCLIENT");
3246 break;
3247 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3248 _print_next_block(par_num++, "CFC");
3249 break;
3250 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3251 _print_next_block(par_num++, "CDU");
3252 break;
3253 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3254 _print_next_block(par_num++, "IGU");
3255 break;
3256 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3257 _print_next_block(par_num++, "MISC");
3258 break;
3261 /* Clear the bit */
3262 sig &= ~cur_bit;
3266 return par_num;
3269 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3271 int i = 0;
3272 u32 cur_bit = 0;
3273 for (i = 0; sig; i++) {
3274 cur_bit = ((u32)0x1 << i);
3275 if (sig & cur_bit) {
3276 switch (cur_bit) {
3277 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3278 _print_next_block(par_num++, "MCP ROM");
3279 break;
3280 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3281 _print_next_block(par_num++, "MCP UMP RX");
3282 break;
3283 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3284 _print_next_block(par_num++, "MCP UMP TX");
3285 break;
3286 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3287 _print_next_block(par_num++, "MCP SCPAD");
3288 break;
3291 /* Clear the bit */
3292 sig &= ~cur_bit;
3296 return par_num;
3299 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3300 u32 sig2, u32 sig3)
3302 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3303 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3304 int par_num = 0;
3305 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3306 "[0]:0x%08x [1]:0x%08x "
3307 "[2]:0x%08x [3]:0x%08x\n",
3308 sig0 & HW_PRTY_ASSERT_SET_0,
3309 sig1 & HW_PRTY_ASSERT_SET_1,
3310 sig2 & HW_PRTY_ASSERT_SET_2,
3311 sig3 & HW_PRTY_ASSERT_SET_3);
3312 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3313 bp->dev->name);
3314 par_num = bnx2x_print_blocks_with_parity0(
3315 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3316 par_num = bnx2x_print_blocks_with_parity1(
3317 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3318 par_num = bnx2x_print_blocks_with_parity2(
3319 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3320 par_num = bnx2x_print_blocks_with_parity3(
3321 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3322 printk("\n");
3323 return true;
3324 } else
3325 return false;
3328 bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3330 struct attn_route attn;
3331 int port = BP_PORT(bp);
3333 attn.sig[0] = REG_RD(bp,
3334 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3335 port*4);
3336 attn.sig[1] = REG_RD(bp,
3337 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3338 port*4);
3339 attn.sig[2] = REG_RD(bp,
3340 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3341 port*4);
3342 attn.sig[3] = REG_RD(bp,
3343 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3344 port*4);
3346 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3347 attn.sig[3]);
3351 static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3353 u32 val;
3354 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3356 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3357 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3358 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3359 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3360 "ADDRESS_ERROR\n");
3361 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3362 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3363 "INCORRECT_RCV_BEHAVIOR\n");
3364 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3365 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3366 "WAS_ERROR_ATTN\n");
3367 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3368 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3369 "VF_LENGTH_VIOLATION_ATTN\n");
3370 if (val &
3371 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3372 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3373 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3374 if (val &
3375 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3376 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3377 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3378 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3379 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3380 "TCPL_ERROR_ATTN\n");
3381 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3382 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3383 "TCPL_IN_TWO_RCBS_ATTN\n");
3384 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3385 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3386 "CSSNOOP_FIFO_OVERFLOW\n");
3388 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3389 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3390 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3391 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3392 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3393 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3394 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3395 "_ATC_TCPL_TO_NOT_PEND\n");
3396 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3397 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3398 "ATC_GPA_MULTIPLE_HITS\n");
3399 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3400 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3401 "ATC_RCPL_TO_EMPTY_CNT\n");
3402 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3403 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3404 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3405 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3406 "ATC_IREQ_LESS_THAN_STU\n");
3409 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3410 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3411 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3412 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3413 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3418 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3420 struct attn_route attn, *group_mask;
3421 int port = BP_PORT(bp);
3422 int index;
3423 u32 reg_addr;
3424 u32 val;
3425 u32 aeu_mask;
3427 /* need to take HW lock because MCP or other port might also
3428 try to handle this event */
3429 bnx2x_acquire_alr(bp);
3431 if (bnx2x_chk_parity_attn(bp)) {
3432 bp->recovery_state = BNX2X_RECOVERY_INIT;
3433 bnx2x_set_reset_in_progress(bp);
3434 schedule_delayed_work(&bp->reset_task, 0);
3435 /* Disable HW interrupts */
3436 bnx2x_int_disable(bp);
3437 bnx2x_release_alr(bp);
3438 /* In case of parity errors don't handle attentions so that
3439 * other function would "see" parity errors.
3441 return;
3444 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3445 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3446 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3447 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3448 if (CHIP_IS_E2(bp))
3449 attn.sig[4] =
3450 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3451 else
3452 attn.sig[4] = 0;
3454 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3455 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
3457 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3458 if (deasserted & (1 << index)) {
3459 group_mask = &bp->attn_group[index];
3461 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3462 "%08x %08x %08x\n",
3463 index,
3464 group_mask->sig[0], group_mask->sig[1],
3465 group_mask->sig[2], group_mask->sig[3],
3466 group_mask->sig[4]);
3468 bnx2x_attn_int_deasserted4(bp,
3469 attn.sig[4] & group_mask->sig[4]);
3470 bnx2x_attn_int_deasserted3(bp,
3471 attn.sig[3] & group_mask->sig[3]);
3472 bnx2x_attn_int_deasserted1(bp,
3473 attn.sig[1] & group_mask->sig[1]);
3474 bnx2x_attn_int_deasserted2(bp,
3475 attn.sig[2] & group_mask->sig[2]);
3476 bnx2x_attn_int_deasserted0(bp,
3477 attn.sig[0] & group_mask->sig[0]);
3481 bnx2x_release_alr(bp);
3483 if (bp->common.int_block == INT_BLOCK_HC)
3484 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3485 COMMAND_REG_ATTN_BITS_CLR);
3486 else
3487 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
3489 val = ~deasserted;
3490 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3491 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
3492 REG_WR(bp, reg_addr, val);
3494 if (~bp->attn_state & deasserted)
3495 BNX2X_ERR("IGU ERROR\n");
3497 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3498 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3500 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3501 aeu_mask = REG_RD(bp, reg_addr);
3503 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3504 aeu_mask, deasserted);
3505 aeu_mask |= (deasserted & 0x3ff);
3506 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3508 REG_WR(bp, reg_addr, aeu_mask);
3509 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3511 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3512 bp->attn_state &= ~deasserted;
3513 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3516 static void bnx2x_attn_int(struct bnx2x *bp)
3518 /* read local copy of bits */
3519 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3520 attn_bits);
3521 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3522 attn_bits_ack);
3523 u32 attn_state = bp->attn_state;
3525 /* look for changed bits */
3526 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3527 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3529 DP(NETIF_MSG_HW,
3530 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3531 attn_bits, attn_ack, asserted, deasserted);
3533 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3534 BNX2X_ERR("BAD attention state\n");
3536 /* handle bits that were raised */
3537 if (asserted)
3538 bnx2x_attn_int_asserted(bp, asserted);
3540 if (deasserted)
3541 bnx2x_attn_int_deasserted(bp, deasserted);
3544 static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3546 /* No memory barriers */
3547 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3548 mmiowb(); /* keep prod updates ordered */
3551 #ifdef BCM_CNIC
3552 static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3553 union event_ring_elem *elem)
3555 if (!bp->cnic_eth_dev.starting_cid ||
3556 cid < bp->cnic_eth_dev.starting_cid)
3557 return 1;
3559 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3561 if (unlikely(elem->message.data.cfc_del_event.error)) {
3562 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3563 cid);
3564 bnx2x_panic_dump(bp);
3566 bnx2x_cnic_cfc_comp(bp, cid);
3567 return 0;
3569 #endif
3571 static void bnx2x_eq_int(struct bnx2x *bp)
3573 u16 hw_cons, sw_cons, sw_prod;
3574 union event_ring_elem *elem;
3575 u32 cid;
3576 u8 opcode;
3577 int spqe_cnt = 0;
3579 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3581 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3582 * when we get the the next-page we nned to adjust so the loop
3583 * condition below will be met. The next element is the size of a
3584 * regular element and hence incrementing by 1
3586 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3587 hw_cons++;
3589 /* This function may never run in parralel with itself for a
3590 * specific bp, thus there is no need in "paired" read memory
3591 * barrier here.
3593 sw_cons = bp->eq_cons;
3594 sw_prod = bp->eq_prod;
3596 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
3597 hw_cons, sw_cons, atomic_read(&bp->spq_left));
3599 for (; sw_cons != hw_cons;
3600 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3603 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3605 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3606 opcode = elem->message.opcode;
3609 /* handle eq element */
3610 switch (opcode) {
3611 case EVENT_RING_OPCODE_STAT_QUERY:
3612 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3613 /* nothing to do with stats comp */
3614 continue;
3616 case EVENT_RING_OPCODE_CFC_DEL:
3617 /* handle according to cid range */
3619 * we may want to verify here that the bp state is
3620 * HALTING
3622 DP(NETIF_MSG_IFDOWN,
3623 "got delete ramrod for MULTI[%d]\n", cid);
3624 #ifdef BCM_CNIC
3625 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3626 goto next_spqe;
3627 #endif
3628 bnx2x_fp(bp, cid, state) =
3629 BNX2X_FP_STATE_CLOSED;
3631 goto next_spqe;
3634 switch (opcode | bp->state) {
3635 case (EVENT_RING_OPCODE_FUNCTION_START |
3636 BNX2X_STATE_OPENING_WAIT4_PORT):
3637 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3638 bp->state = BNX2X_STATE_FUNC_STARTED;
3639 break;
3641 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3642 BNX2X_STATE_CLOSING_WAIT4_HALT):
3643 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3644 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3645 break;
3647 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3648 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3649 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3650 bp->set_mac_pending = 0;
3651 break;
3653 case (EVENT_RING_OPCODE_SET_MAC |
3654 BNX2X_STATE_CLOSING_WAIT4_HALT):
3655 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3656 bp->set_mac_pending = 0;
3657 break;
3658 default:
3659 /* unknown event log error and continue */
3660 BNX2X_ERR("Unknown EQ event %d\n",
3661 elem->message.opcode);
3663 next_spqe:
3664 spqe_cnt++;
3665 } /* for */
3667 smp_mb__before_atomic_inc();
3668 atomic_add(spqe_cnt, &bp->spq_left);
3670 bp->eq_cons = sw_cons;
3671 bp->eq_prod = sw_prod;
3672 /* Make sure that above mem writes were issued towards the memory */
3673 smp_wmb();
3675 /* update producer */
3676 bnx2x_update_eq_prod(bp, bp->eq_prod);
3679 static void bnx2x_sp_task(struct work_struct *work)
3681 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3682 u16 status;
3684 /* Return here if interrupt is disabled */
3685 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3686 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3687 return;
3690 status = bnx2x_update_dsb_idx(bp);
3691 /* if (status == 0) */
3692 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3694 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3696 /* HW attentions */
3697 if (status & BNX2X_DEF_SB_ATT_IDX) {
3698 bnx2x_attn_int(bp);
3699 status &= ~BNX2X_DEF_SB_ATT_IDX;
3702 /* SP events: STAT_QUERY and others */
3703 if (status & BNX2X_DEF_SB_IDX) {
3705 /* Handle EQ completions */
3706 bnx2x_eq_int(bp);
3708 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3709 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3711 status &= ~BNX2X_DEF_SB_IDX;
3714 if (unlikely(status))
3715 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3716 status);
3718 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3719 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
3722 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3724 struct net_device *dev = dev_instance;
3725 struct bnx2x *bp = netdev_priv(dev);
3727 /* Return here if interrupt is disabled */
3728 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3729 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3730 return IRQ_HANDLED;
3733 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3734 IGU_INT_DISABLE, 0);
3736 #ifdef BNX2X_STOP_ON_ERROR
3737 if (unlikely(bp->panic))
3738 return IRQ_HANDLED;
3739 #endif
3741 #ifdef BCM_CNIC
3743 struct cnic_ops *c_ops;
3745 rcu_read_lock();
3746 c_ops = rcu_dereference(bp->cnic_ops);
3747 if (c_ops)
3748 c_ops->cnic_handler(bp->cnic_data, NULL);
3749 rcu_read_unlock();
3751 #endif
3752 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3754 return IRQ_HANDLED;
3757 /* end of slow path */
3759 static void bnx2x_timer(unsigned long data)
3761 struct bnx2x *bp = (struct bnx2x *) data;
3763 if (!netif_running(bp->dev))
3764 return;
3766 if (atomic_read(&bp->intr_sem) != 0)
3767 goto timer_restart;
3769 if (poll) {
3770 struct bnx2x_fastpath *fp = &bp->fp[0];
3771 int rc;
3773 bnx2x_tx_int(fp);
3774 rc = bnx2x_rx_int(fp, 1000);
3777 if (!BP_NOMCP(bp)) {
3778 int mb_idx = BP_FW_MB_IDX(bp);
3779 u32 drv_pulse;
3780 u32 mcp_pulse;
3782 ++bp->fw_drv_pulse_wr_seq;
3783 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3784 /* TBD - add SYSTEM_TIME */
3785 drv_pulse = bp->fw_drv_pulse_wr_seq;
3786 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
3788 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
3789 MCP_PULSE_SEQ_MASK);
3790 /* The delta between driver pulse and mcp response
3791 * should be 1 (before mcp response) or 0 (after mcp response)
3793 if ((drv_pulse != mcp_pulse) &&
3794 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3795 /* someone lost a heartbeat... */
3796 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3797 drv_pulse, mcp_pulse);
3801 if (bp->state == BNX2X_STATE_OPEN)
3802 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3804 timer_restart:
3805 mod_timer(&bp->timer, jiffies + bp->current_interval);
3808 /* end of Statistics */
3810 /* nic init */
3813 * nic init service functions
3816 static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
3818 u32 i;
3819 if (!(len%4) && !(addr%4))
3820 for (i = 0; i < len; i += 4)
3821 REG_WR(bp, addr + i, fill);
3822 else
3823 for (i = 0; i < len; i++)
3824 REG_WR8(bp, addr + i, fill);
3828 /* helper: writes FP SP data to FW - data_size in dwords */
3829 static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3830 int fw_sb_id,
3831 u32 *sb_data_p,
3832 u32 data_size)
3834 int index;
3835 for (index = 0; index < data_size; index++)
3836 REG_WR(bp, BAR_CSTRORM_INTMEM +
3837 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3838 sizeof(u32)*index,
3839 *(sb_data_p + index));
3842 static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3844 u32 *sb_data_p;
3845 u32 data_size = 0;
3846 struct hc_status_block_data_e2 sb_data_e2;
3847 struct hc_status_block_data_e1x sb_data_e1x;
3849 /* disable the function first */
3850 if (CHIP_IS_E2(bp)) {
3851 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3852 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3853 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3854 sb_data_e2.common.p_func.vf_valid = false;
3855 sb_data_p = (u32 *)&sb_data_e2;
3856 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3857 } else {
3858 memset(&sb_data_e1x, 0,
3859 sizeof(struct hc_status_block_data_e1x));
3860 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3861 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3862 sb_data_e1x.common.p_func.vf_valid = false;
3863 sb_data_p = (u32 *)&sb_data_e1x;
3864 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3866 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3868 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3869 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3870 CSTORM_STATUS_BLOCK_SIZE);
3871 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3872 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3873 CSTORM_SYNC_BLOCK_SIZE);
3876 /* helper: writes SP SB data to FW */
3877 static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3878 struct hc_sp_status_block_data *sp_sb_data)
3880 int func = BP_FUNC(bp);
3881 int i;
3882 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3883 REG_WR(bp, BAR_CSTRORM_INTMEM +
3884 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3885 i*sizeof(u32),
3886 *((u32 *)sp_sb_data + i));
3889 static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
3891 int func = BP_FUNC(bp);
3892 struct hc_sp_status_block_data sp_sb_data;
3893 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3895 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3896 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3897 sp_sb_data.p_func.vf_valid = false;
3899 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3901 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3902 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3903 CSTORM_SP_STATUS_BLOCK_SIZE);
3904 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3905 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3906 CSTORM_SP_SYNC_BLOCK_SIZE);
3911 static inline
3912 void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3913 int igu_sb_id, int igu_seg_id)
3915 hc_sm->igu_sb_id = igu_sb_id;
3916 hc_sm->igu_seg_id = igu_seg_id;
3917 hc_sm->timer_value = 0xFF;
3918 hc_sm->time_to_expire = 0xFFFFFFFF;
3921 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
3922 u8 vf_valid, int fw_sb_id, int igu_sb_id)
3924 int igu_seg_id;
3926 struct hc_status_block_data_e2 sb_data_e2;
3927 struct hc_status_block_data_e1x sb_data_e1x;
3928 struct hc_status_block_sm *hc_sm_p;
3929 struct hc_index_data *hc_index_p;
3930 int data_size;
3931 u32 *sb_data_p;
3933 if (CHIP_INT_MODE_IS_BC(bp))
3934 igu_seg_id = HC_SEG_ACCESS_NORM;
3935 else
3936 igu_seg_id = IGU_SEG_ACCESS_NORM;
3938 bnx2x_zero_fp_sb(bp, fw_sb_id);
3940 if (CHIP_IS_E2(bp)) {
3941 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3942 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
3943 sb_data_e2.common.p_func.vf_id = vfid;
3944 sb_data_e2.common.p_func.vf_valid = vf_valid;
3945 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
3946 sb_data_e2.common.same_igu_sb_1b = true;
3947 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
3948 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
3949 hc_sm_p = sb_data_e2.common.state_machine;
3950 hc_index_p = sb_data_e2.index_data;
3951 sb_data_p = (u32 *)&sb_data_e2;
3952 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3953 } else {
3954 memset(&sb_data_e1x, 0,
3955 sizeof(struct hc_status_block_data_e1x));
3956 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3957 sb_data_e1x.common.p_func.vf_id = 0xff;
3958 sb_data_e1x.common.p_func.vf_valid = false;
3959 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
3960 sb_data_e1x.common.same_igu_sb_1b = true;
3961 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3962 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3963 hc_sm_p = sb_data_e1x.common.state_machine;
3964 hc_index_p = sb_data_e1x.index_data;
3965 sb_data_p = (u32 *)&sb_data_e1x;
3966 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3969 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3970 igu_sb_id, igu_seg_id);
3971 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
3972 igu_sb_id, igu_seg_id);
3974 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
3976 /* write indecies to HW */
3977 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3980 static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
3981 u8 sb_index, u8 disable, u16 usec)
3983 int port = BP_PORT(bp);
3984 u8 ticks = usec / BNX2X_BTR;
3986 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3988 disable = disable ? 1 : (usec ? 0 : 1);
3989 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3992 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
3993 u16 tx_usec, u16 rx_usec)
3995 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
3996 false, rx_usec);
3997 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
3998 false, tx_usec);
4001 static void bnx2x_init_def_sb(struct bnx2x *bp)
4003 struct host_sp_status_block *def_sb = bp->def_status_blk;
4004 dma_addr_t mapping = bp->def_status_blk_mapping;
4005 int igu_sp_sb_index;
4006 int igu_seg_id;
4007 int port = BP_PORT(bp);
4008 int func = BP_FUNC(bp);
4009 int reg_offset;
4010 u64 section;
4011 int index;
4012 struct hc_sp_status_block_data sp_sb_data;
4013 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4015 if (CHIP_INT_MODE_IS_BC(bp)) {
4016 igu_sp_sb_index = DEF_SB_IGU_ID;
4017 igu_seg_id = HC_SEG_ACCESS_DEF;
4018 } else {
4019 igu_sp_sb_index = bp->igu_dsb_id;
4020 igu_seg_id = IGU_SEG_ACCESS_DEF;
4023 /* ATTN */
4024 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4025 atten_status_block);
4026 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
4028 bp->attn_state = 0;
4030 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4031 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4032 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4033 int sindex;
4034 /* take care of sig[0]..sig[4] */
4035 for (sindex = 0; sindex < 4; sindex++)
4036 bp->attn_group[index].sig[sindex] =
4037 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
4039 if (CHIP_IS_E2(bp))
4041 * enable5 is separate from the rest of the registers,
4042 * and therefore the address skip is 4
4043 * and not 16 between the different groups
4045 bp->attn_group[index].sig[4] = REG_RD(bp,
4046 reg_offset + 0x10 + 0x4*index);
4047 else
4048 bp->attn_group[index].sig[4] = 0;
4051 if (bp->common.int_block == INT_BLOCK_HC) {
4052 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4053 HC_REG_ATTN_MSG0_ADDR_L);
4055 REG_WR(bp, reg_offset, U64_LO(section));
4056 REG_WR(bp, reg_offset + 4, U64_HI(section));
4057 } else if (CHIP_IS_E2(bp)) {
4058 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4059 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4062 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4063 sp_sb);
4065 bnx2x_zero_sp_sb(bp);
4067 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4068 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4069 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4070 sp_sb_data.igu_seg_id = igu_seg_id;
4071 sp_sb_data.p_func.pf_id = func;
4072 sp_sb_data.p_func.vnic_id = BP_VN(bp);
4073 sp_sb_data.p_func.vf_id = 0xff;
4075 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4077 bp->stats_pending = 0;
4078 bp->set_mac_pending = 0;
4080 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
4083 void bnx2x_update_coalesce(struct bnx2x *bp)
4085 int i;
4087 for_each_queue(bp, i)
4088 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4089 bp->rx_ticks, bp->tx_ticks);
4092 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4094 spin_lock_init(&bp->spq_lock);
4095 atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
4097 bp->spq_prod_idx = 0;
4098 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4099 bp->spq_prod_bd = bp->spq;
4100 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4103 static void bnx2x_init_eq_ring(struct bnx2x *bp)
4105 int i;
4106 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4107 union event_ring_elem *elem =
4108 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
4110 elem->next_page.addr.hi =
4111 cpu_to_le32(U64_HI(bp->eq_mapping +
4112 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4113 elem->next_page.addr.lo =
4114 cpu_to_le32(U64_LO(bp->eq_mapping +
4115 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
4117 bp->eq_cons = 0;
4118 bp->eq_prod = NUM_EQ_DESC;
4119 bp->eq_cons_sb = BNX2X_EQ_INDEX;
4122 static void bnx2x_init_ind_table(struct bnx2x *bp)
4124 int func = BP_FUNC(bp);
4125 int i;
4127 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4128 return;
4130 DP(NETIF_MSG_IFUP,
4131 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4132 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4133 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4134 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4135 bp->fp->cl_id + (i % bp->num_queues));
4138 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4140 int mode = bp->rx_mode;
4141 u16 cl_id;
4143 /* All but management unicast packets should pass to the host as well */
4144 u32 llh_mask =
4145 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4146 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4147 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4148 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
4150 switch (mode) {
4151 case BNX2X_RX_MODE_NONE: /* no Rx */
4152 cl_id = BP_L_ID(bp);
4153 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4154 break;
4156 case BNX2X_RX_MODE_NORMAL:
4157 cl_id = BP_L_ID(bp);
4158 bnx2x_rxq_set_mac_filters(bp, cl_id,
4159 BNX2X_ACCEPT_UNICAST |
4160 BNX2X_ACCEPT_BROADCAST |
4161 BNX2X_ACCEPT_MULTICAST);
4162 break;
4164 case BNX2X_RX_MODE_ALLMULTI:
4165 cl_id = BP_L_ID(bp);
4166 bnx2x_rxq_set_mac_filters(bp, cl_id,
4167 BNX2X_ACCEPT_UNICAST |
4168 BNX2X_ACCEPT_BROADCAST |
4169 BNX2X_ACCEPT_ALL_MULTICAST);
4170 break;
4172 case BNX2X_RX_MODE_PROMISC:
4173 cl_id = BP_L_ID(bp);
4174 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
4176 /* pass management unicast packets as well */
4177 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
4178 break;
4180 default:
4181 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4182 break;
4185 REG_WR(bp,
4186 BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
4187 NIG_REG_LLH0_BRB1_DRV_MASK,
4188 llh_mask);
4190 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4191 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4192 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
4193 bp->mac_filters.ucast_drop_all,
4194 bp->mac_filters.mcast_drop_all,
4195 bp->mac_filters.bcast_drop_all,
4196 bp->mac_filters.ucast_accept_all,
4197 bp->mac_filters.mcast_accept_all,
4198 bp->mac_filters.bcast_accept_all
4201 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
4204 static void bnx2x_init_internal_common(struct bnx2x *bp)
4206 int i;
4208 if (!CHIP_IS_E1(bp)) {
4210 /* xstorm needs to know whether to add ovlan to packets or not,
4211 * in switch-independent we'll write 0 to here... */
4212 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4213 bp->mf_mode);
4214 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4215 bp->mf_mode);
4216 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4217 bp->mf_mode);
4218 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4219 bp->mf_mode);
4222 /* Zero this manually as its initialization is
4223 currently missing in the initTool */
4224 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4225 REG_WR(bp, BAR_USTRORM_INTMEM +
4226 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4227 if (CHIP_IS_E2(bp)) {
4228 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4229 CHIP_INT_MODE_IS_BC(bp) ?
4230 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4234 static void bnx2x_init_internal_port(struct bnx2x *bp)
4236 /* port */
4239 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4241 switch (load_code) {
4242 case FW_MSG_CODE_DRV_LOAD_COMMON:
4243 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
4244 bnx2x_init_internal_common(bp);
4245 /* no break */
4247 case FW_MSG_CODE_DRV_LOAD_PORT:
4248 bnx2x_init_internal_port(bp);
4249 /* no break */
4251 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4252 /* internal memory per function is
4253 initialized inside bnx2x_pf_init */
4254 break;
4256 default:
4257 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4258 break;
4262 static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4264 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4266 fp->state = BNX2X_FP_STATE_CLOSED;
4268 fp->index = fp->cid = fp_idx;
4269 fp->cl_id = BP_L_ID(bp) + fp_idx;
4270 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4271 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4272 /* qZone id equals to FW (per path) client id */
4273 fp->cl_qzone_id = fp->cl_id +
4274 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4275 ETH_MAX_RX_CLIENTS_E1H);
4276 /* init shortcut */
4277 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4278 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
4279 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4280 /* Setup SB indicies */
4281 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4282 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4284 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4285 "cl_id %d fw_sb %d igu_sb %d\n",
4286 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4287 fp->igu_sb_id);
4288 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4289 fp->fw_sb_id, fp->igu_sb_id);
4291 bnx2x_update_fpsb_idx(fp);
4294 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4296 int i;
4298 for_each_queue(bp, i)
4299 bnx2x_init_fp_sb(bp, i);
4300 #ifdef BCM_CNIC
4302 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4303 BNX2X_VF_ID_INVALID, false,
4304 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4306 #endif
4308 /* ensure status block indices were read */
4309 rmb();
4311 bnx2x_init_def_sb(bp);
4312 bnx2x_update_dsb_idx(bp);
4313 bnx2x_init_rx_rings(bp);
4314 bnx2x_init_tx_rings(bp);
4315 bnx2x_init_sp_ring(bp);
4316 bnx2x_init_eq_ring(bp);
4317 bnx2x_init_internal(bp, load_code);
4318 bnx2x_pf_init(bp);
4319 bnx2x_init_ind_table(bp);
4320 bnx2x_stats_init(bp);
4322 /* At this point, we are ready for interrupts */
4323 atomic_set(&bp->intr_sem, 0);
4325 /* flush all before enabling interrupts */
4326 mb();
4327 mmiowb();
4329 bnx2x_int_enable(bp);
4331 /* Check for SPIO5 */
4332 bnx2x_attn_int_deasserted0(bp,
4333 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4334 AEU_INPUTS_ATTN_BITS_SPIO5);
4337 /* end of nic init */
4340 * gzip service functions
4343 static int bnx2x_gunzip_init(struct bnx2x *bp)
4345 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4346 &bp->gunzip_mapping, GFP_KERNEL);
4347 if (bp->gunzip_buf == NULL)
4348 goto gunzip_nomem1;
4350 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4351 if (bp->strm == NULL)
4352 goto gunzip_nomem2;
4354 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4355 GFP_KERNEL);
4356 if (bp->strm->workspace == NULL)
4357 goto gunzip_nomem3;
4359 return 0;
4361 gunzip_nomem3:
4362 kfree(bp->strm);
4363 bp->strm = NULL;
4365 gunzip_nomem2:
4366 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4367 bp->gunzip_mapping);
4368 bp->gunzip_buf = NULL;
4370 gunzip_nomem1:
4371 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4372 " un-compression\n");
4373 return -ENOMEM;
4376 static void bnx2x_gunzip_end(struct bnx2x *bp)
4378 kfree(bp->strm->workspace);
4380 kfree(bp->strm);
4381 bp->strm = NULL;
4383 if (bp->gunzip_buf) {
4384 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4385 bp->gunzip_mapping);
4386 bp->gunzip_buf = NULL;
4390 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
4392 int n, rc;
4394 /* check gzip header */
4395 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4396 BNX2X_ERR("Bad gzip header\n");
4397 return -EINVAL;
4400 n = 10;
4402 #define FNAME 0x8
4404 if (zbuf[3] & FNAME)
4405 while ((zbuf[n++] != 0) && (n < len));
4407 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
4408 bp->strm->avail_in = len - n;
4409 bp->strm->next_out = bp->gunzip_buf;
4410 bp->strm->avail_out = FW_BUF_SIZE;
4412 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4413 if (rc != Z_OK)
4414 return rc;
4416 rc = zlib_inflate(bp->strm, Z_FINISH);
4417 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4418 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4419 bp->strm->msg);
4421 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4422 if (bp->gunzip_outlen & 0x3)
4423 netdev_err(bp->dev, "Firmware decompression error:"
4424 " gunzip_outlen (%d) not aligned\n",
4425 bp->gunzip_outlen);
4426 bp->gunzip_outlen >>= 2;
4428 zlib_inflateEnd(bp->strm);
4430 if (rc == Z_STREAM_END)
4431 return 0;
4433 return rc;
4436 /* nic load/unload */
4439 * General service functions
4442 /* send a NIG loopback debug packet */
4443 static void bnx2x_lb_pckt(struct bnx2x *bp)
4445 u32 wb_write[3];
4447 /* Ethernet source and destination addresses */
4448 wb_write[0] = 0x55555555;
4449 wb_write[1] = 0x55555555;
4450 wb_write[2] = 0x20; /* SOP */
4451 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4453 /* NON-IP protocol */
4454 wb_write[0] = 0x09000000;
4455 wb_write[1] = 0x55555555;
4456 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4457 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4460 /* some of the internal memories
4461 * are not directly readable from the driver
4462 * to test them we send debug packets
4464 static int bnx2x_int_mem_test(struct bnx2x *bp)
4466 int factor;
4467 int count, i;
4468 u32 val = 0;
4470 if (CHIP_REV_IS_FPGA(bp))
4471 factor = 120;
4472 else if (CHIP_REV_IS_EMUL(bp))
4473 factor = 200;
4474 else
4475 factor = 1;
4477 /* Disable inputs of parser neighbor blocks */
4478 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4479 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4480 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4481 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4483 /* Write 0 to parser credits for CFC search request */
4484 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4486 /* send Ethernet packet */
4487 bnx2x_lb_pckt(bp);
4489 /* TODO do i reset NIG statistic? */
4490 /* Wait until NIG register shows 1 packet of size 0x10 */
4491 count = 1000 * factor;
4492 while (count) {
4494 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4495 val = *bnx2x_sp(bp, wb_data[0]);
4496 if (val == 0x10)
4497 break;
4499 msleep(10);
4500 count--;
4502 if (val != 0x10) {
4503 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4504 return -1;
4507 /* Wait until PRS register shows 1 packet */
4508 count = 1000 * factor;
4509 while (count) {
4510 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4511 if (val == 1)
4512 break;
4514 msleep(10);
4515 count--;
4517 if (val != 0x1) {
4518 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4519 return -2;
4522 /* Reset and init BRB, PRS */
4523 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4524 msleep(50);
4525 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4526 msleep(50);
4527 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4528 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4530 DP(NETIF_MSG_HW, "part2\n");
4532 /* Disable inputs of parser neighbor blocks */
4533 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4534 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4535 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4536 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4538 /* Write 0 to parser credits for CFC search request */
4539 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4541 /* send 10 Ethernet packets */
4542 for (i = 0; i < 10; i++)
4543 bnx2x_lb_pckt(bp);
4545 /* Wait until NIG register shows 10 + 1
4546 packets of size 11*0x10 = 0xb0 */
4547 count = 1000 * factor;
4548 while (count) {
4550 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4551 val = *bnx2x_sp(bp, wb_data[0]);
4552 if (val == 0xb0)
4553 break;
4555 msleep(10);
4556 count--;
4558 if (val != 0xb0) {
4559 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4560 return -3;
4563 /* Wait until PRS register shows 2 packets */
4564 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4565 if (val != 2)
4566 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4568 /* Write 1 to parser credits for CFC search request */
4569 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4571 /* Wait until PRS register shows 3 packets */
4572 msleep(10 * factor);
4573 /* Wait until NIG register shows 1 packet of size 0x10 */
4574 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4575 if (val != 3)
4576 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4578 /* clear NIG EOP FIFO */
4579 for (i = 0; i < 11; i++)
4580 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4581 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4582 if (val != 1) {
4583 BNX2X_ERR("clear of NIG failed\n");
4584 return -4;
4587 /* Reset and init BRB, PRS, NIG */
4588 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4589 msleep(50);
4590 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4591 msleep(50);
4592 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4593 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4594 #ifndef BCM_CNIC
4595 /* set NIC mode */
4596 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4597 #endif
4599 /* Enable inputs of parser neighbor blocks */
4600 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4601 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4602 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
4603 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
4605 DP(NETIF_MSG_HW, "done\n");
4607 return 0; /* OK */
4610 static void enable_blocks_attention(struct bnx2x *bp)
4612 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4613 if (CHIP_IS_E2(bp))
4614 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4615 else
4616 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
4617 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4618 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4620 * mask read length error interrupts in brb for parser
4621 * (parsing unit and 'checksum and crc' unit)
4622 * these errors are legal (PU reads fixed length and CAC can cause
4623 * read length error on truncated packets)
4625 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
4626 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4627 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4628 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4629 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4630 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
4631 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4632 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
4633 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4634 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4635 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
4636 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4637 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
4638 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4639 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4640 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4641 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
4642 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4643 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4644 if (CHIP_REV_IS_FPGA(bp))
4645 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
4646 else if (CHIP_IS_E2(bp))
4647 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4648 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4649 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4650 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4651 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4652 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
4653 else
4654 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
4655 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4656 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4657 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
4658 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4659 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
4660 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4661 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
4662 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4663 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
4666 static const struct {
4667 u32 addr;
4668 u32 mask;
4669 } bnx2x_parity_mask[] = {
4670 {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
4671 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4672 {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
4673 {HC_REG_HC_PRTY_MASK, 0x7},
4674 {MISC_REG_MISC_PRTY_MASK, 0x1},
4675 {QM_REG_QM_PRTY_MASK, 0x0},
4676 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
4677 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4678 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
4679 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
4680 {CDU_REG_CDU_PRTY_MASK, 0x0},
4681 {CFC_REG_CFC_PRTY_MASK, 0x0},
4682 {DBG_REG_DBG_PRTY_MASK, 0x0},
4683 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
4684 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
4685 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
4686 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
4687 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
4688 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
4689 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
4690 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
4691 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
4692 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
4693 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
4694 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
4695 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
4696 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
4697 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
4700 static void enable_blocks_parity(struct bnx2x *bp)
4702 int i;
4704 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
4705 REG_WR(bp, bnx2x_parity_mask[i].addr,
4706 bnx2x_parity_mask[i].mask);
4710 static void bnx2x_reset_common(struct bnx2x *bp)
4712 /* reset_common */
4713 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4714 0xd3ffff7f);
4715 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4718 static void bnx2x_init_pxp(struct bnx2x *bp)
4720 u16 devctl;
4721 int r_order, w_order;
4723 pci_read_config_word(bp->pdev,
4724 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4725 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4726 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4727 if (bp->mrrs == -1)
4728 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4729 else {
4730 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4731 r_order = bp->mrrs;
4734 bnx2x_init_pxp_arb(bp, r_order, w_order);
4737 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4739 int is_required;
4740 u32 val;
4741 int port;
4743 if (BP_NOMCP(bp))
4744 return;
4746 is_required = 0;
4747 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4748 SHARED_HW_CFG_FAN_FAILURE_MASK;
4750 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4751 is_required = 1;
4754 * The fan failure mechanism is usually related to the PHY type since
4755 * the power consumption of the board is affected by the PHY. Currently,
4756 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4758 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4759 for (port = PORT_0; port < PORT_MAX; port++) {
4760 is_required |=
4761 bnx2x_fan_failure_det_req(
4763 bp->common.shmem_base,
4764 bp->common.shmem2_base,
4765 port);
4768 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4770 if (is_required == 0)
4771 return;
4773 /* Fan failure is indicated by SPIO 5 */
4774 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4775 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4777 /* set to active low mode */
4778 val = REG_RD(bp, MISC_REG_SPIO_INT);
4779 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
4780 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
4781 REG_WR(bp, MISC_REG_SPIO_INT, val);
4783 /* enable interrupt to signal the IGU */
4784 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4785 val |= (1 << MISC_REGISTERS_SPIO_5);
4786 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4789 static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4791 u32 offset = 0;
4793 if (CHIP_IS_E1(bp))
4794 return;
4795 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4796 return;
4798 switch (BP_ABS_FUNC(bp)) {
4799 case 0:
4800 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4801 break;
4802 case 1:
4803 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4804 break;
4805 case 2:
4806 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4807 break;
4808 case 3:
4809 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4810 break;
4811 case 4:
4812 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4813 break;
4814 case 5:
4815 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4816 break;
4817 case 6:
4818 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4819 break;
4820 case 7:
4821 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4822 break;
4823 default:
4824 return;
4827 REG_WR(bp, offset, pretend_func_num);
4828 REG_RD(bp, offset);
4829 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4832 static void bnx2x_pf_disable(struct bnx2x *bp)
4834 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4835 val &= ~IGU_PF_CONF_FUNC_EN;
4837 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4838 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4839 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4842 static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4844 u32 val, i;
4846 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
4848 bnx2x_reset_common(bp);
4849 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4850 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4852 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
4853 if (!CHIP_IS_E1(bp))
4854 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
4856 if (CHIP_IS_E2(bp)) {
4857 u8 fid;
4860 * 4-port mode or 2-port mode we need to turn of master-enable
4861 * for everyone, after that, turn it back on for self.
4862 * so, we disregard multi-function or not, and always disable
4863 * for all functions on the given path, this means 0,2,4,6 for
4864 * path 0 and 1,3,5,7 for path 1
4866 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4867 if (fid == BP_ABS_FUNC(bp)) {
4868 REG_WR(bp,
4869 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4871 continue;
4874 bnx2x_pretend_func(bp, fid);
4875 /* clear pf enable */
4876 bnx2x_pf_disable(bp);
4877 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4881 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
4882 if (CHIP_IS_E1(bp)) {
4883 /* enable HW interrupt from PXP on USDM overflow
4884 bit 16 on INT_MASK_0 */
4885 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4888 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
4889 bnx2x_init_pxp(bp);
4891 #ifdef __BIG_ENDIAN
4892 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4893 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4894 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4895 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4896 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
4897 /* make sure this value is 0 */
4898 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
4900 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4901 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4902 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4903 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4904 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
4905 #endif
4907 bnx2x_ilt_init_page_size(bp, INITOP_SET);
4910 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4911 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
4913 /* let the HW do it's magic ... */
4914 msleep(100);
4915 /* finish PXP init */
4916 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4917 if (val != 1) {
4918 BNX2X_ERR("PXP2 CFG failed\n");
4919 return -EBUSY;
4921 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4922 if (val != 1) {
4923 BNX2X_ERR("PXP2 RD_INIT failed\n");
4924 return -EBUSY;
4927 /* Timers bug workaround E2 only. We need to set the entire ILT to
4928 * have entries with value "0" and valid bit on.
4929 * This needs to be done by the first PF that is loaded in a path
4930 * (i.e. common phase)
4932 if (CHIP_IS_E2(bp)) {
4933 struct ilt_client_info ilt_cli;
4934 struct bnx2x_ilt ilt;
4935 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
4936 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
4938 /* initalize dummy TM client */
4939 ilt_cli.start = 0;
4940 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
4941 ilt_cli.client_num = ILT_CLIENT_TM;
4943 /* Step 1: set zeroes to all ilt page entries with valid bit on
4944 * Step 2: set the timers first/last ilt entry to point
4945 * to the entire range to prevent ILT range error for 3rd/4th
4946 * vnic (this code assumes existance of the vnic)
4948 * both steps performed by call to bnx2x_ilt_client_init_op()
4949 * with dummy TM client
4951 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
4952 * and his brother are split registers
4954 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
4955 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
4956 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4958 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
4959 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
4960 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
4964 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4965 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
4967 if (CHIP_IS_E2(bp)) {
4968 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
4969 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
4970 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
4972 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
4974 /* let the HW do it's magic ... */
4975 do {
4976 msleep(200);
4977 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
4978 } while (factor-- && (val != 1));
4980 if (val != 1) {
4981 BNX2X_ERR("ATC_INIT failed\n");
4982 return -EBUSY;
4986 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
4988 /* clean the DMAE memory */
4989 bp->dmae_ready = 1;
4990 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
4992 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
4993 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
4994 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
4995 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
4997 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
4998 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
4999 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5000 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5002 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5004 if (CHIP_MODE_IS_4_PORT(bp))
5005 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
5006 /* QM queues pointers table */
5007 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
5009 /* soft reset pulse */
5010 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5011 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5013 #ifdef BCM_CNIC
5014 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5015 #endif
5017 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5018 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5020 if (!CHIP_REV_IS_SLOW(bp)) {
5021 /* enable hw interrupt from doorbell Q */
5022 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5025 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5026 if (CHIP_MODE_IS_4_PORT(bp)) {
5027 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5028 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5031 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5032 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5033 #ifndef BCM_CNIC
5034 /* set NIC mode */
5035 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5036 #endif
5037 if (!CHIP_IS_E1(bp))
5038 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
5039 if (CHIP_IS_E2(bp)) {
5040 /* Bit-map indicating which L2 hdrs may appear after the
5041 basic Ethernet header */
5042 int has_ovlan = IS_MF(bp);
5043 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5044 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5047 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5048 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5049 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5050 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5052 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5053 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5054 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5055 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5057 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5058 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5059 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5060 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5062 if (CHIP_MODE_IS_4_PORT(bp))
5063 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5065 /* sync semi rtc */
5066 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5067 0x80000000);
5068 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5069 0x80000000);
5071 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5072 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5073 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5075 if (CHIP_IS_E2(bp)) {
5076 int has_ovlan = IS_MF(bp);
5077 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5078 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5081 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5082 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5083 REG_WR(bp, i, random32());
5084 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5085 #ifdef BCM_CNIC
5086 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5087 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5088 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5089 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5090 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5091 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5092 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5093 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5094 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5095 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5096 #endif
5097 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5099 if (sizeof(union cdu_context) != 1024)
5100 /* we currently assume that a context is 1024 bytes */
5101 dev_alert(&bp->pdev->dev, "please adjust the size "
5102 "of cdu_context(%ld)\n",
5103 (long)sizeof(union cdu_context));
5105 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5106 val = (4 << 24) + (0 << 12) + 1024;
5107 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5109 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5110 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5111 /* enable context validation interrupt from CFC */
5112 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5114 /* set the thresholds to prevent CFC/CDU race */
5115 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5117 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5119 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5120 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5122 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
5123 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5125 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5126 /* Reset PCIE errors for debug */
5127 REG_WR(bp, 0x2814, 0xffffffff);
5128 REG_WR(bp, 0x3820, 0xffffffff);
5130 if (CHIP_IS_E2(bp)) {
5131 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5132 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5133 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5134 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5135 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5136 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5137 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5138 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5139 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5140 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5141 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5144 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5145 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5146 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5147 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5149 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5150 if (!CHIP_IS_E1(bp)) {
5151 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5152 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
5154 if (CHIP_IS_E2(bp)) {
5155 /* Bit-map indicating which L2 hdrs may appear after the
5156 basic Ethernet header */
5157 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
5160 if (CHIP_REV_IS_SLOW(bp))
5161 msleep(200);
5163 /* finish CFC init */
5164 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5165 if (val != 1) {
5166 BNX2X_ERR("CFC LL_INIT failed\n");
5167 return -EBUSY;
5169 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5170 if (val != 1) {
5171 BNX2X_ERR("CFC AC_INIT failed\n");
5172 return -EBUSY;
5174 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5175 if (val != 1) {
5176 BNX2X_ERR("CFC CAM_INIT failed\n");
5177 return -EBUSY;
5179 REG_WR(bp, CFC_REG_DEBUG0, 0);
5181 if (CHIP_IS_E1(bp)) {
5182 /* read NIG statistic
5183 to see if this is our first up since powerup */
5184 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5185 val = *bnx2x_sp(bp, wb_data[0]);
5187 /* do internal memory self test */
5188 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5189 BNX2X_ERR("internal mem self test failed\n");
5190 return -EBUSY;
5194 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
5195 bp->common.shmem_base,
5196 bp->common.shmem2_base);
5198 bnx2x_setup_fan_failure_detection(bp);
5200 /* clear PXP2 attentions */
5201 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5203 enable_blocks_attention(bp);
5204 if (CHIP_PARITY_SUPPORTED(bp))
5205 enable_blocks_parity(bp);
5207 if (!BP_NOMCP(bp)) {
5208 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5209 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5210 CHIP_IS_E1x(bp)) {
5211 u32 shmem_base[2], shmem2_base[2];
5212 shmem_base[0] = bp->common.shmem_base;
5213 shmem2_base[0] = bp->common.shmem2_base;
5214 if (CHIP_IS_E2(bp)) {
5215 shmem_base[1] =
5216 SHMEM2_RD(bp, other_shmem_base_addr);
5217 shmem2_base[1] =
5218 SHMEM2_RD(bp, other_shmem2_base_addr);
5220 bnx2x_acquire_phy_lock(bp);
5221 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5222 bp->common.chip_id);
5223 bnx2x_release_phy_lock(bp);
5225 } else
5226 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5228 return 0;
5231 static int bnx2x_init_hw_port(struct bnx2x *bp)
5233 int port = BP_PORT(bp);
5234 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5235 u32 low, high;
5236 u32 val;
5238 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
5240 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5242 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5243 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5245 /* Timers bug workaround: disables the pf_master bit in pglue at
5246 * common phase, we need to enable it here before any dmae access are
5247 * attempted. Therefore we manually added the enable-master to the
5248 * port phase (it also happens in the function phase)
5250 if (CHIP_IS_E2(bp))
5251 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5253 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5254 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5255 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
5256 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5258 /* QM cid (connection) count */
5259 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
5261 #ifdef BCM_CNIC
5262 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5263 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5264 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
5265 #endif
5267 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5269 if (CHIP_MODE_IS_4_PORT(bp))
5270 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
5272 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5273 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5274 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5275 /* no pause for emulation and FPGA */
5276 low = 0;
5277 high = 513;
5278 } else {
5279 if (IS_MF(bp))
5280 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5281 else if (bp->dev->mtu > 4096) {
5282 if (bp->flags & ONE_PORT_FLAG)
5283 low = 160;
5284 else {
5285 val = bp->dev->mtu;
5286 /* (24*1024 + val*4)/256 */
5287 low = 96 + (val/64) +
5288 ((val % 64) ? 1 : 0);
5290 } else
5291 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5292 high = low + 56; /* 14*1024/256 */
5294 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5295 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5298 if (CHIP_MODE_IS_4_PORT(bp)) {
5299 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5300 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5301 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5302 BRB1_REG_MAC_GUARANTIED_0), 40);
5305 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5307 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5308 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5309 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5310 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5312 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5313 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5314 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5315 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5316 if (CHIP_MODE_IS_4_PORT(bp))
5317 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
5319 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5320 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5322 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5324 if (!CHIP_IS_E2(bp)) {
5325 /* configure PBF to work without PAUSE mtu 9000 */
5326 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5328 /* update threshold */
5329 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5330 /* update init credit */
5331 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5333 /* probe changes */
5334 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5335 udelay(50);
5336 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5339 #ifdef BCM_CNIC
5340 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
5341 #endif
5342 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5343 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5345 if (CHIP_IS_E1(bp)) {
5346 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5347 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5349 bnx2x_init_block(bp, HC_BLOCK, init_stage);
5351 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5353 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
5354 /* init aeu_mask_attn_func_0/1:
5355 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5356 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5357 * bits 4-7 are used for "per vn group attention" */
5358 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5359 (IS_MF(bp) ? 0xF7 : 0x7));
5361 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
5362 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
5363 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
5364 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
5365 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
5367 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
5369 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5371 if (!CHIP_IS_E1(bp)) {
5372 /* 0x2 disable mf_ov, 0x1 enable */
5373 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5374 (IS_MF(bp) ? 0x1 : 0x2));
5376 if (CHIP_IS_E2(bp)) {
5377 val = 0;
5378 switch (bp->mf_mode) {
5379 case MULTI_FUNCTION_SD:
5380 val = 1;
5381 break;
5382 case MULTI_FUNCTION_SI:
5383 val = 2;
5384 break;
5387 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5388 NIG_REG_LLH0_CLS_TYPE), val);
5391 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5392 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5393 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5397 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
5398 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
5399 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
5400 bp->common.shmem_base,
5401 bp->common.shmem2_base);
5402 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
5403 bp->common.shmem2_base, port)) {
5404 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5405 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5406 val = REG_RD(bp, reg_addr);
5407 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5408 REG_WR(bp, reg_addr, val);
5410 bnx2x__link_reset(bp);
5412 return 0;
5415 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5417 int reg;
5419 if (CHIP_IS_E1(bp))
5420 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5421 else
5422 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5424 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5427 static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5429 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5432 static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5434 u32 i, base = FUNC_ILT_BASE(func);
5435 for (i = base; i < base + ILT_PER_FUNC; i++)
5436 bnx2x_ilt_wr(bp, i, 0);
5439 static int bnx2x_init_hw_func(struct bnx2x *bp)
5441 int port = BP_PORT(bp);
5442 int func = BP_FUNC(bp);
5443 struct bnx2x_ilt *ilt = BP_ILT(bp);
5444 u16 cdu_ilt_start;
5445 u32 addr, val;
5446 int i;
5448 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
5450 /* set MSI reconfigure capability */
5451 if (bp->common.int_block == INT_BLOCK_HC) {
5452 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5453 val = REG_RD(bp, addr);
5454 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5455 REG_WR(bp, addr, val);
5458 ilt = BP_ILT(bp);
5459 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
5461 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5462 ilt->lines[cdu_ilt_start + i].page =
5463 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5464 ilt->lines[cdu_ilt_start + i].page_mapping =
5465 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5466 /* cdu ilt pages are allocated manually so there's no need to
5467 set the size */
5469 bnx2x_ilt_init_op(bp, INITOP_SET);
5470 #ifdef BCM_CNIC
5471 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
5473 /* T1 hash bits value determines the T1 number of entries */
5474 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
5475 #endif
5477 #ifndef BCM_CNIC
5478 /* set NIC mode */
5479 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5480 #endif /* BCM_CNIC */
5482 if (CHIP_IS_E2(bp)) {
5483 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5485 /* Turn on a single ISR mode in IGU if driver is going to use
5486 * INT#x or MSI
5488 if (!(bp->flags & USING_MSIX_FLAG))
5489 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5491 * Timers workaround bug: function init part.
5492 * Need to wait 20msec after initializing ILT,
5493 * needed to make sure there are no requests in
5494 * one of the PXP internal queues with "old" ILT addresses
5496 msleep(20);
5498 * Master enable - Due to WB DMAE writes performed before this
5499 * register is re-initialized as part of the regular function
5500 * init
5502 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5503 /* Enable the function in IGU */
5504 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5507 bp->dmae_ready = 1;
5509 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5511 if (CHIP_IS_E2(bp))
5512 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5514 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5515 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5516 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5517 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5518 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5519 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5520 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5521 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5522 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5524 if (CHIP_IS_E2(bp)) {
5525 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5526 BP_PATH(bp));
5527 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5528 BP_PATH(bp));
5531 if (CHIP_MODE_IS_4_PORT(bp))
5532 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5534 if (CHIP_IS_E2(bp))
5535 REG_WR(bp, QM_REG_PF_EN, 1);
5537 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
5539 if (CHIP_MODE_IS_4_PORT(bp))
5540 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5542 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5543 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5544 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5545 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5546 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5547 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5548 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5549 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5550 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5551 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5552 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
5553 if (CHIP_IS_E2(bp))
5554 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5556 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5558 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5560 if (CHIP_IS_E2(bp))
5561 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5563 if (IS_MF(bp)) {
5564 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5565 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
5568 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5570 /* HC init per function */
5571 if (bp->common.int_block == INT_BLOCK_HC) {
5572 if (CHIP_IS_E1H(bp)) {
5573 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5575 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5576 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5578 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5580 } else {
5581 int num_segs, sb_idx, prod_offset;
5583 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5585 if (CHIP_IS_E2(bp)) {
5586 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5587 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5590 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5592 if (CHIP_IS_E2(bp)) {
5593 int dsb_idx = 0;
5595 * Producer memory:
5596 * E2 mode: address 0-135 match to the mapping memory;
5597 * 136 - PF0 default prod; 137 - PF1 default prod;
5598 * 138 - PF2 default prod; 139 - PF3 default prod;
5599 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5600 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5601 * 144-147 reserved.
5603 * E1.5 mode - In backward compatible mode;
5604 * for non default SB; each even line in the memory
5605 * holds the U producer and each odd line hold
5606 * the C producer. The first 128 producers are for
5607 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5608 * producers are for the DSB for each PF.
5609 * Each PF has five segments: (the order inside each
5610 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5611 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5612 * 144-147 attn prods;
5614 /* non-default-status-blocks */
5615 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5616 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5617 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5618 prod_offset = (bp->igu_base_sb + sb_idx) *
5619 num_segs;
5621 for (i = 0; i < num_segs; i++) {
5622 addr = IGU_REG_PROD_CONS_MEMORY +
5623 (prod_offset + i) * 4;
5624 REG_WR(bp, addr, 0);
5626 /* send consumer update with value 0 */
5627 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5628 USTORM_ID, 0, IGU_INT_NOP, 1);
5629 bnx2x_igu_clear_sb(bp,
5630 bp->igu_base_sb + sb_idx);
5633 /* default-status-blocks */
5634 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5635 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5637 if (CHIP_MODE_IS_4_PORT(bp))
5638 dsb_idx = BP_FUNC(bp);
5639 else
5640 dsb_idx = BP_E1HVN(bp);
5642 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5643 IGU_BC_BASE_DSB_PROD + dsb_idx :
5644 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5646 for (i = 0; i < (num_segs * E1HVN_MAX);
5647 i += E1HVN_MAX) {
5648 addr = IGU_REG_PROD_CONS_MEMORY +
5649 (prod_offset + i)*4;
5650 REG_WR(bp, addr, 0);
5652 /* send consumer update with 0 */
5653 if (CHIP_INT_MODE_IS_BC(bp)) {
5654 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5655 USTORM_ID, 0, IGU_INT_NOP, 1);
5656 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5657 CSTORM_ID, 0, IGU_INT_NOP, 1);
5658 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5659 XSTORM_ID, 0, IGU_INT_NOP, 1);
5660 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5661 TSTORM_ID, 0, IGU_INT_NOP, 1);
5662 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5663 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5664 } else {
5665 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5666 USTORM_ID, 0, IGU_INT_NOP, 1);
5667 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5668 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5670 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5672 /* !!! these should become driver const once
5673 rf-tool supports split-68 const */
5674 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5675 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5676 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5677 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5678 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5679 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5683 /* Reset PCIE errors for debug */
5684 REG_WR(bp, 0x2114, 0xffffffff);
5685 REG_WR(bp, 0x2120, 0xffffffff);
5687 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5688 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5689 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5690 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5691 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5692 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5694 bnx2x_phy_probe(&bp->link_params);
5695 return 0;
5698 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5700 int rc = 0;
5702 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5703 BP_ABS_FUNC(bp), load_code);
5705 bp->dmae_ready = 0;
5706 mutex_init(&bp->dmae_mutex);
5707 rc = bnx2x_gunzip_init(bp);
5708 if (rc)
5709 return rc;
5711 switch (load_code) {
5712 case FW_MSG_CODE_DRV_LOAD_COMMON:
5713 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5714 rc = bnx2x_init_hw_common(bp, load_code);
5715 if (rc)
5716 goto init_hw_err;
5717 /* no break */
5719 case FW_MSG_CODE_DRV_LOAD_PORT:
5720 rc = bnx2x_init_hw_port(bp);
5721 if (rc)
5722 goto init_hw_err;
5723 /* no break */
5725 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5726 rc = bnx2x_init_hw_func(bp);
5727 if (rc)
5728 goto init_hw_err;
5729 break;
5731 default:
5732 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5733 break;
5736 if (!BP_NOMCP(bp)) {
5737 int mb_idx = BP_FW_MB_IDX(bp);
5739 bp->fw_drv_pulse_wr_seq =
5740 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
5741 DRV_PULSE_SEQ_MASK);
5742 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5745 init_hw_err:
5746 bnx2x_gunzip_end(bp);
5748 return rc;
5751 void bnx2x_free_mem(struct bnx2x *bp)
5754 #define BNX2X_PCI_FREE(x, y, size) \
5755 do { \
5756 if (x) { \
5757 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
5758 x = NULL; \
5759 y = 0; \
5761 } while (0)
5763 #define BNX2X_FREE(x) \
5764 do { \
5765 if (x) { \
5766 kfree((void *)x); \
5767 x = NULL; \
5769 } while (0)
5771 int i;
5773 /* fastpath */
5774 /* Common */
5775 for_each_queue(bp, i) {
5776 /* status blocks */
5777 if (CHIP_IS_E2(bp))
5778 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5779 bnx2x_fp(bp, i, status_blk_mapping),
5780 sizeof(struct host_hc_status_block_e2));
5781 else
5782 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5783 bnx2x_fp(bp, i, status_blk_mapping),
5784 sizeof(struct host_hc_status_block_e1x));
5786 /* Rx */
5787 for_each_queue(bp, i) {
5789 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5790 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5791 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5792 bnx2x_fp(bp, i, rx_desc_mapping),
5793 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5795 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5796 bnx2x_fp(bp, i, rx_comp_mapping),
5797 sizeof(struct eth_fast_path_rx_cqe) *
5798 NUM_RCQ_BD);
5800 /* SGE ring */
5801 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5802 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5803 bnx2x_fp(bp, i, rx_sge_mapping),
5804 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5806 /* Tx */
5807 for_each_queue(bp, i) {
5809 /* fastpath tx rings: tx_buf tx_desc */
5810 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5811 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5812 bnx2x_fp(bp, i, tx_desc_mapping),
5813 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
5815 /* end of fastpath */
5817 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5818 sizeof(struct host_sp_status_block));
5820 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5821 sizeof(struct bnx2x_slowpath));
5823 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5824 bp->context.size);
5826 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5828 BNX2X_FREE(bp->ilt->lines);
5829 #ifdef BCM_CNIC
5830 if (CHIP_IS_E2(bp))
5831 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5832 sizeof(struct host_hc_status_block_e2));
5833 else
5834 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5835 sizeof(struct host_hc_status_block_e1x));
5836 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
5837 #endif
5838 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5840 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5841 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5843 #undef BNX2X_PCI_FREE
5844 #undef BNX2X_KFREE
5847 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
5849 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
5850 if (CHIP_IS_E2(bp)) {
5851 bnx2x_fp(bp, index, sb_index_values) =
5852 (__le16 *)status_blk.e2_sb->sb.index_values;
5853 bnx2x_fp(bp, index, sb_running_index) =
5854 (__le16 *)status_blk.e2_sb->sb.running_index;
5855 } else {
5856 bnx2x_fp(bp, index, sb_index_values) =
5857 (__le16 *)status_blk.e1x_sb->sb.index_values;
5858 bnx2x_fp(bp, index, sb_running_index) =
5859 (__le16 *)status_blk.e1x_sb->sb.running_index;
5863 int bnx2x_alloc_mem(struct bnx2x *bp)
5866 #define BNX2X_PCI_ALLOC(x, y, size) \
5867 do { \
5868 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
5869 if (x == NULL) \
5870 goto alloc_mem_err; \
5871 memset(x, 0, size); \
5872 } while (0)
5874 #define BNX2X_ALLOC(x, size) \
5875 do { \
5876 x = kzalloc(size, GFP_KERNEL); \
5877 if (x == NULL) \
5878 goto alloc_mem_err; \
5879 } while (0)
5881 int i;
5883 /* fastpath */
5884 /* Common */
5885 for_each_queue(bp, i) {
5886 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
5887 bnx2x_fp(bp, i, bp) = bp;
5888 /* status blocks */
5889 if (CHIP_IS_E2(bp))
5890 BNX2X_PCI_ALLOC(sb->e2_sb,
5891 &bnx2x_fp(bp, i, status_blk_mapping),
5892 sizeof(struct host_hc_status_block_e2));
5893 else
5894 BNX2X_PCI_ALLOC(sb->e1x_sb,
5895 &bnx2x_fp(bp, i, status_blk_mapping),
5896 sizeof(struct host_hc_status_block_e1x));
5898 set_sb_shortcuts(bp, i);
5900 /* Rx */
5901 for_each_queue(bp, i) {
5903 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5904 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5905 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5906 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5907 &bnx2x_fp(bp, i, rx_desc_mapping),
5908 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5910 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5911 &bnx2x_fp(bp, i, rx_comp_mapping),
5912 sizeof(struct eth_fast_path_rx_cqe) *
5913 NUM_RCQ_BD);
5915 /* SGE ring */
5916 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5917 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5918 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5919 &bnx2x_fp(bp, i, rx_sge_mapping),
5920 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5922 /* Tx */
5923 for_each_queue(bp, i) {
5925 /* fastpath tx rings: tx_buf tx_desc */
5926 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5927 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5928 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5929 &bnx2x_fp(bp, i, tx_desc_mapping),
5930 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
5932 /* end of fastpath */
5934 #ifdef BCM_CNIC
5935 if (CHIP_IS_E2(bp))
5936 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5937 sizeof(struct host_hc_status_block_e2));
5938 else
5939 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5940 sizeof(struct host_hc_status_block_e1x));
5942 /* allocate searcher T2 table */
5943 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5944 #endif
5947 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5948 sizeof(struct host_sp_status_block));
5950 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5951 sizeof(struct bnx2x_slowpath));
5953 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
5954 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
5955 bp->context.size);
5957 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
5959 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
5960 goto alloc_mem_err;
5962 /* Slow path ring */
5963 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5965 /* EQ */
5966 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
5967 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5968 return 0;
5970 alloc_mem_err:
5971 bnx2x_free_mem(bp);
5972 return -ENOMEM;
5974 #undef BNX2X_PCI_ALLOC
5975 #undef BNX2X_ALLOC
5979 * Init service functions
5981 int bnx2x_func_start(struct bnx2x *bp)
5983 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
5985 /* Wait for completion */
5986 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
5987 WAIT_RAMROD_COMMON);
5990 int bnx2x_func_stop(struct bnx2x *bp)
5992 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
5994 /* Wait for completion */
5995 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
5996 0, &(bp->state), WAIT_RAMROD_COMMON);
6000 * Sets a MAC in a CAM for a few L2 Clients for E1x chip
6002 * @param bp driver descriptor
6003 * @param set set or clear an entry (1 or 0)
6004 * @param mac pointer to a buffer containing a MAC
6005 * @param cl_bit_vec bit vector of clients to register a MAC for
6006 * @param cam_offset offset in a CAM to use
6007 * @param is_bcast is the set MAC a broadcast address (for E1 only)
6009 static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
6010 u32 cl_bit_vec, u8 cam_offset,
6011 u8 is_bcast)
6013 struct mac_configuration_cmd *config =
6014 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6015 int ramrod_flags = WAIT_RAMROD_COMMON;
6017 bp->set_mac_pending = 1;
6018 smp_wmb();
6020 config->hdr.length = 1;
6021 config->hdr.offset = cam_offset;
6022 config->hdr.client_id = 0xff;
6023 config->hdr.reserved1 = 0;
6025 /* primary MAC */
6026 config->config_table[0].msb_mac_addr =
6027 swab16(*(u16 *)&mac[0]);
6028 config->config_table[0].middle_mac_addr =
6029 swab16(*(u16 *)&mac[2]);
6030 config->config_table[0].lsb_mac_addr =
6031 swab16(*(u16 *)&mac[4]);
6032 config->config_table[0].clients_bit_vector =
6033 cpu_to_le32(cl_bit_vec);
6034 config->config_table[0].vlan_id = 0;
6035 config->config_table[0].pf_id = BP_FUNC(bp);
6036 if (set)
6037 SET_FLAG(config->config_table[0].flags,
6038 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6039 T_ETH_MAC_COMMAND_SET);
6040 else
6041 SET_FLAG(config->config_table[0].flags,
6042 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6043 T_ETH_MAC_COMMAND_INVALIDATE);
6045 if (is_bcast)
6046 SET_FLAG(config->config_table[0].flags,
6047 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6049 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
6050 (set ? "setting" : "clearing"),
6051 config->config_table[0].msb_mac_addr,
6052 config->config_table[0].middle_mac_addr,
6053 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
6055 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6056 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6057 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6059 /* Wait for a completion */
6060 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
6064 int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6065 int *state_p, int flags)
6067 /* can take a while if any port is running */
6068 int cnt = 5000;
6069 u8 poll = flags & WAIT_RAMROD_POLL;
6070 u8 common = flags & WAIT_RAMROD_COMMON;
6072 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6073 poll ? "polling" : "waiting", state, idx);
6075 might_sleep();
6076 while (cnt--) {
6077 if (poll) {
6078 if (common)
6079 bnx2x_eq_int(bp);
6080 else {
6081 bnx2x_rx_int(bp->fp, 10);
6082 /* if index is different from 0
6083 * the reply for some commands will
6084 * be on the non default queue
6086 if (idx)
6087 bnx2x_rx_int(&bp->fp[idx], 10);
6091 mb(); /* state is changed by bnx2x_sp_event() */
6092 if (*state_p == state) {
6093 #ifdef BNX2X_STOP_ON_ERROR
6094 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6095 #endif
6096 return 0;
6099 msleep(1);
6101 if (bp->panic)
6102 return -EIO;
6105 /* timeout! */
6106 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6107 poll ? "polling" : "waiting", state, idx);
6108 #ifdef BNX2X_STOP_ON_ERROR
6109 bnx2x_panic();
6110 #endif
6112 return -EBUSY;
6115 u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
6117 if (CHIP_IS_E1H(bp))
6118 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6119 else if (CHIP_MODE_IS_4_PORT(bp))
6120 return BP_FUNC(bp) * 32 + rel_offset;
6121 else
6122 return BP_VN(bp) * 32 + rel_offset;
6125 void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6127 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6128 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6130 /* networking MAC */
6131 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6132 (1 << bp->fp->cl_id), cam_offset , 0);
6134 if (CHIP_IS_E1(bp)) {
6135 /* broadcast MAC */
6136 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6137 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6140 static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6142 int i = 0, old;
6143 struct net_device *dev = bp->dev;
6144 struct netdev_hw_addr *ha;
6145 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6146 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6148 netdev_for_each_mc_addr(ha, dev) {
6149 /* copy mac */
6150 config_cmd->config_table[i].msb_mac_addr =
6151 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6152 config_cmd->config_table[i].middle_mac_addr =
6153 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6154 config_cmd->config_table[i].lsb_mac_addr =
6155 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6157 config_cmd->config_table[i].vlan_id = 0;
6158 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6159 config_cmd->config_table[i].clients_bit_vector =
6160 cpu_to_le32(1 << BP_L_ID(bp));
6162 SET_FLAG(config_cmd->config_table[i].flags,
6163 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6164 T_ETH_MAC_COMMAND_SET);
6166 DP(NETIF_MSG_IFUP,
6167 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6168 config_cmd->config_table[i].msb_mac_addr,
6169 config_cmd->config_table[i].middle_mac_addr,
6170 config_cmd->config_table[i].lsb_mac_addr);
6171 i++;
6173 old = config_cmd->hdr.length;
6174 if (old > i) {
6175 for (; i < old; i++) {
6176 if (CAM_IS_INVALID(config_cmd->
6177 config_table[i])) {
6178 /* already invalidated */
6179 break;
6181 /* invalidate */
6182 SET_FLAG(config_cmd->config_table[i].flags,
6183 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6184 T_ETH_MAC_COMMAND_INVALIDATE);
6188 config_cmd->hdr.length = i;
6189 config_cmd->hdr.offset = offset;
6190 config_cmd->hdr.client_id = 0xff;
6191 config_cmd->hdr.reserved1 = 0;
6193 bp->set_mac_pending = 1;
6194 smp_wmb();
6196 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6197 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6199 static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6201 int i;
6202 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6203 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6204 int ramrod_flags = WAIT_RAMROD_COMMON;
6206 bp->set_mac_pending = 1;
6207 smp_wmb();
6209 for (i = 0; i < config_cmd->hdr.length; i++)
6210 SET_FLAG(config_cmd->config_table[i].flags,
6211 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6212 T_ETH_MAC_COMMAND_INVALIDATE);
6214 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6215 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6217 /* Wait for a completion */
6218 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6219 ramrod_flags);
6224 #ifdef BCM_CNIC
6226 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6227 * MAC(s). This function will wait until the ramdord completion
6228 * returns.
6230 * @param bp driver handle
6231 * @param set set or clear the CAM entry
6233 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6235 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6237 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6238 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6239 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
6240 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
6242 /* Send a SET_MAC ramrod */
6243 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6244 cam_offset, 0);
6245 return 0;
6247 #endif
6249 static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6250 struct bnx2x_client_init_params *params,
6251 u8 activate,
6252 struct client_init_ramrod_data *data)
6254 /* Clear the buffer */
6255 memset(data, 0, sizeof(*data));
6257 /* general */
6258 data->general.client_id = params->rxq_params.cl_id;
6259 data->general.statistics_counter_id = params->rxq_params.stat_id;
6260 data->general.statistics_en_flg =
6261 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6262 data->general.activate_flg = activate;
6263 data->general.sp_client_id = params->rxq_params.spcl_id;
6265 /* Rx data */
6266 data->rx.tpa_en_flg =
6267 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6268 data->rx.vmqueue_mode_en_flg = 0;
6269 data->rx.cache_line_alignment_log_size =
6270 params->rxq_params.cache_line_log;
6271 data->rx.enable_dynamic_hc =
6272 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6273 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6274 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6275 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6277 /* We don't set drop flags */
6278 data->rx.drop_ip_cs_err_flg = 0;
6279 data->rx.drop_tcp_cs_err_flg = 0;
6280 data->rx.drop_ttl0_flg = 0;
6281 data->rx.drop_udp_cs_err_flg = 0;
6283 data->rx.inner_vlan_removal_enable_flg =
6284 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6285 data->rx.outer_vlan_removal_enable_flg =
6286 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6287 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6288 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6289 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6290 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6291 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6292 data->rx.bd_page_base.lo =
6293 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6294 data->rx.bd_page_base.hi =
6295 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6296 data->rx.sge_page_base.lo =
6297 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6298 data->rx.sge_page_base.hi =
6299 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6300 data->rx.cqe_page_base.lo =
6301 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6302 data->rx.cqe_page_base.hi =
6303 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6304 data->rx.is_leading_rss =
6305 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6306 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6308 /* Tx data */
6309 data->tx.enforce_security_flg = 0; /* VF specific */
6310 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6311 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6312 data->tx.mtu = 0; /* VF specific */
6313 data->tx.tx_bd_page_base.lo =
6314 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6315 data->tx.tx_bd_page_base.hi =
6316 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6318 /* flow control data */
6319 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6320 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6321 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6322 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6323 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6324 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6325 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6327 data->fc.safc_group_num = params->txq_params.cos;
6328 data->fc.safc_group_en_flg =
6329 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6330 data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
6333 static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6335 /* ustorm cxt validation */
6336 cxt->ustorm_ag_context.cdu_usage =
6337 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6338 ETH_CONNECTION_TYPE);
6339 /* xcontext validation */
6340 cxt->xstorm_ag_context.cdu_reserved =
6341 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6342 ETH_CONNECTION_TYPE);
6345 int bnx2x_setup_fw_client(struct bnx2x *bp,
6346 struct bnx2x_client_init_params *params,
6347 u8 activate,
6348 struct client_init_ramrod_data *data,
6349 dma_addr_t data_mapping)
6351 u16 hc_usec;
6352 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6353 int ramrod_flags = 0, rc;
6355 /* HC and context validation values */
6356 hc_usec = params->txq_params.hc_rate ?
6357 1000000 / params->txq_params.hc_rate : 0;
6358 bnx2x_update_coalesce_sb_index(bp,
6359 params->txq_params.fw_sb_id,
6360 params->txq_params.sb_cq_index,
6361 !(params->txq_params.flags & QUEUE_FLG_HC),
6362 hc_usec);
6364 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6366 hc_usec = params->rxq_params.hc_rate ?
6367 1000000 / params->rxq_params.hc_rate : 0;
6368 bnx2x_update_coalesce_sb_index(bp,
6369 params->rxq_params.fw_sb_id,
6370 params->rxq_params.sb_cq_index,
6371 !(params->rxq_params.flags & QUEUE_FLG_HC),
6372 hc_usec);
6374 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6375 params->rxq_params.cid);
6377 /* zero stats */
6378 if (params->txq_params.flags & QUEUE_FLG_STATS)
6379 storm_memset_xstats_zero(bp, BP_PORT(bp),
6380 params->txq_params.stat_id);
6382 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6383 storm_memset_ustats_zero(bp, BP_PORT(bp),
6384 params->rxq_params.stat_id);
6385 storm_memset_tstats_zero(bp, BP_PORT(bp),
6386 params->rxq_params.stat_id);
6389 /* Fill the ramrod data */
6390 bnx2x_fill_cl_init_data(bp, params, activate, data);
6392 /* SETUP ramrod.
6394 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6395 * barrier except from mmiowb() is needed to impose a
6396 * proper ordering of memory operations.
6398 mmiowb();
6401 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6402 U64_HI(data_mapping), U64_LO(data_mapping), 0);
6404 /* Wait for completion */
6405 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6406 params->ramrod_params.index,
6407 params->ramrod_params.pstate,
6408 ramrod_flags);
6409 return rc;
6413 * Configure interrupt mode according to current configuration.
6414 * In case of MSI-X it will also try to enable MSI-X.
6416 * @param bp
6418 * @return int
6420 static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
6422 int rc = 0;
6424 switch (bp->int_mode) {
6425 case INT_MODE_MSI:
6426 bnx2x_enable_msi(bp);
6427 /* falling through... */
6428 case INT_MODE_INTx:
6429 bp->num_queues = 1;
6430 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
6431 break;
6432 default:
6433 /* Set number of queues according to bp->multi_mode value */
6434 bnx2x_set_num_queues(bp);
6436 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6437 bp->num_queues);
6439 /* if we can't use MSI-X we only need one fp,
6440 * so try to enable MSI-X with the requested number of fp's
6441 * and fallback to MSI or legacy INTx with one fp
6443 rc = bnx2x_enable_msix(bp);
6444 if (rc) {
6445 /* failed to enable MSI-X */
6446 if (bp->multi_mode)
6447 DP(NETIF_MSG_IFUP,
6448 "Multi requested but failed to "
6449 "enable MSI-X (%d), "
6450 "set number of queues to %d\n",
6451 bp->num_queues,
6453 bp->num_queues = 1;
6455 if (!(bp->flags & DISABLE_MSI_FLAG))
6456 bnx2x_enable_msi(bp);
6459 break;
6462 return rc;
6465 void bnx2x_ilt_set_info(struct bnx2x *bp)
6467 struct ilt_client_info *ilt_client;
6468 struct bnx2x_ilt *ilt = BP_ILT(bp);
6469 u16 line = 0;
6471 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6472 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6474 /* CDU */
6475 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6476 ilt_client->client_num = ILT_CLIENT_CDU;
6477 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6478 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6479 ilt_client->start = line;
6480 line += L2_ILT_LINES(bp);
6481 #ifdef BCM_CNIC
6482 line += CNIC_ILT_LINES;
6483 #endif
6484 ilt_client->end = line - 1;
6486 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6487 "flags 0x%x, hw psz %d\n",
6488 ilt_client->start,
6489 ilt_client->end,
6490 ilt_client->page_size,
6491 ilt_client->flags,
6492 ilog2(ilt_client->page_size >> 12));
6494 /* QM */
6495 if (QM_INIT(bp->qm_cid_count)) {
6496 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6497 ilt_client->client_num = ILT_CLIENT_QM;
6498 ilt_client->page_size = QM_ILT_PAGE_SZ;
6499 ilt_client->flags = 0;
6500 ilt_client->start = line;
6502 /* 4 bytes for each cid */
6503 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6504 QM_ILT_PAGE_SZ);
6506 ilt_client->end = line - 1;
6508 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6509 "flags 0x%x, hw psz %d\n",
6510 ilt_client->start,
6511 ilt_client->end,
6512 ilt_client->page_size,
6513 ilt_client->flags,
6514 ilog2(ilt_client->page_size >> 12));
6517 /* SRC */
6518 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6519 #ifdef BCM_CNIC
6520 ilt_client->client_num = ILT_CLIENT_SRC;
6521 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6522 ilt_client->flags = 0;
6523 ilt_client->start = line;
6524 line += SRC_ILT_LINES;
6525 ilt_client->end = line - 1;
6527 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6528 "flags 0x%x, hw psz %d\n",
6529 ilt_client->start,
6530 ilt_client->end,
6531 ilt_client->page_size,
6532 ilt_client->flags,
6533 ilog2(ilt_client->page_size >> 12));
6535 #else
6536 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6537 #endif
6539 /* TM */
6540 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6541 #ifdef BCM_CNIC
6542 ilt_client->client_num = ILT_CLIENT_TM;
6543 ilt_client->page_size = TM_ILT_PAGE_SZ;
6544 ilt_client->flags = 0;
6545 ilt_client->start = line;
6546 line += TM_ILT_LINES;
6547 ilt_client->end = line - 1;
6549 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6550 "flags 0x%x, hw psz %d\n",
6551 ilt_client->start,
6552 ilt_client->end,
6553 ilt_client->page_size,
6554 ilt_client->flags,
6555 ilog2(ilt_client->page_size >> 12));
6557 #else
6558 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6559 #endif
6561 int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6562 int is_leading)
6564 struct bnx2x_client_init_params params = { {0} };
6565 int rc;
6567 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6568 IGU_INT_ENABLE, 0);
6570 params.ramrod_params.pstate = &fp->state;
6571 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6572 params.ramrod_params.index = fp->index;
6573 params.ramrod_params.cid = fp->cid;
6575 if (is_leading)
6576 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6578 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6580 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6582 rc = bnx2x_setup_fw_client(bp, &params, 1,
6583 bnx2x_sp(bp, client_init_data),
6584 bnx2x_sp_mapping(bp, client_init_data));
6585 return rc;
6588 int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
6590 int rc;
6592 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
6594 /* halt the connection */
6595 *p->pstate = BNX2X_FP_STATE_HALTING;
6596 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6597 p->cl_id, 0);
6599 /* Wait for completion */
6600 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6601 p->pstate, poll_flag);
6602 if (rc) /* timeout */
6603 return rc;
6605 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6606 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6607 p->cl_id, 0);
6608 /* Wait for completion */
6609 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6610 p->pstate, poll_flag);
6611 if (rc) /* timeout */
6612 return rc;
6615 /* delete cfc entry */
6616 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
6618 /* Wait for completion */
6619 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6620 p->pstate, WAIT_RAMROD_COMMON);
6621 return rc;
6624 static int bnx2x_stop_client(struct bnx2x *bp, int index)
6626 struct bnx2x_client_ramrod_params client_stop = {0};
6627 struct bnx2x_fastpath *fp = &bp->fp[index];
6629 client_stop.index = index;
6630 client_stop.cid = fp->cid;
6631 client_stop.cl_id = fp->cl_id;
6632 client_stop.pstate = &(fp->state);
6633 client_stop.poll = 0;
6635 return bnx2x_stop_fw_client(bp, &client_stop);
6639 static void bnx2x_reset_func(struct bnx2x *bp)
6641 int port = BP_PORT(bp);
6642 int func = BP_FUNC(bp);
6643 int i;
6644 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
6645 (CHIP_IS_E2(bp) ?
6646 offsetof(struct hc_status_block_data_e2, common) :
6647 offsetof(struct hc_status_block_data_e1x, common));
6648 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6649 int pfid_offset = offsetof(struct pci_entity, pf_id);
6651 /* Disable the function in the FW */
6652 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6653 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6654 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6655 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6657 /* FP SBs */
6658 for_each_queue(bp, i) {
6659 struct bnx2x_fastpath *fp = &bp->fp[i];
6660 REG_WR8(bp,
6661 BAR_CSTRORM_INTMEM +
6662 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6663 + pfunc_offset_fp + pfid_offset,
6664 HC_FUNCTION_DISABLED);
6667 /* SP SB */
6668 REG_WR8(bp,
6669 BAR_CSTRORM_INTMEM +
6670 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6671 pfunc_offset_sp + pfid_offset,
6672 HC_FUNCTION_DISABLED);
6675 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6676 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6679 /* Configure IGU */
6680 if (bp->common.int_block == INT_BLOCK_HC) {
6681 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6682 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6683 } else {
6684 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6685 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6688 #ifdef BCM_CNIC
6689 /* Disable Timer scan */
6690 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6692 * Wait for at least 10ms and up to 2 second for the timers scan to
6693 * complete
6695 for (i = 0; i < 200; i++) {
6696 msleep(10);
6697 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6698 break;
6700 #endif
6701 /* Clear ILT */
6702 bnx2x_clear_func_ilt(bp, func);
6704 /* Timers workaround bug for E2: if this is vnic-3,
6705 * we need to set the entire ilt range for this timers.
6707 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6708 struct ilt_client_info ilt_cli;
6709 /* use dummy TM client */
6710 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6711 ilt_cli.start = 0;
6712 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6713 ilt_cli.client_num = ILT_CLIENT_TM;
6715 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6718 /* this assumes that reset_port() called before reset_func()*/
6719 if (CHIP_IS_E2(bp))
6720 bnx2x_pf_disable(bp);
6722 bp->dmae_ready = 0;
6725 static void bnx2x_reset_port(struct bnx2x *bp)
6727 int port = BP_PORT(bp);
6728 u32 val;
6730 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6732 /* Do not rcv packets to BRB */
6733 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6734 /* Do not direct rcv packets that are not for MCP to the BRB */
6735 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6736 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6738 /* Configure AEU */
6739 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6741 msleep(100);
6742 /* Check for BRB port occupancy */
6743 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6744 if (val)
6745 DP(NETIF_MSG_IFDOWN,
6746 "BRB1 is not empty %d blocks are occupied\n", val);
6748 /* TODO: Close Doorbell port? */
6751 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6753 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6754 BP_ABS_FUNC(bp), reset_code);
6756 switch (reset_code) {
6757 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6758 bnx2x_reset_port(bp);
6759 bnx2x_reset_func(bp);
6760 bnx2x_reset_common(bp);
6761 break;
6763 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6764 bnx2x_reset_port(bp);
6765 bnx2x_reset_func(bp);
6766 break;
6768 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6769 bnx2x_reset_func(bp);
6770 break;
6772 default:
6773 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6774 break;
6778 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
6780 int port = BP_PORT(bp);
6781 u32 reset_code = 0;
6782 int i, cnt, rc;
6784 /* Wait until tx fastpath tasks complete */
6785 for_each_queue(bp, i) {
6786 struct bnx2x_fastpath *fp = &bp->fp[i];
6788 cnt = 1000;
6789 while (bnx2x_has_tx_work_unload(fp)) {
6791 if (!cnt) {
6792 BNX2X_ERR("timeout waiting for queue[%d]\n",
6794 #ifdef BNX2X_STOP_ON_ERROR
6795 bnx2x_panic();
6796 return -EBUSY;
6797 #else
6798 break;
6799 #endif
6801 cnt--;
6802 msleep(1);
6805 /* Give HW time to discard old tx messages */
6806 msleep(1);
6808 if (CHIP_IS_E1(bp)) {
6809 /* invalidate mc list,
6810 * wait and poll (interrupts are off)
6812 bnx2x_invlidate_e1_mc_list(bp);
6813 bnx2x_set_eth_mac(bp, 0);
6815 } else {
6816 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6818 bnx2x_set_eth_mac(bp, 0);
6820 for (i = 0; i < MC_HASH_SIZE; i++)
6821 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6824 #ifdef BCM_CNIC
6825 /* Clear iSCSI L2 MAC */
6826 mutex_lock(&bp->cnic_mutex);
6827 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
6828 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
6829 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
6831 mutex_unlock(&bp->cnic_mutex);
6832 #endif
6834 if (unload_mode == UNLOAD_NORMAL)
6835 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6837 else if (bp->flags & NO_WOL_FLAG)
6838 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6840 else if (bp->wol) {
6841 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6842 u8 *mac_addr = bp->dev->dev_addr;
6843 u32 val;
6844 /* The mac address is written to entries 1-4 to
6845 preserve entry 0 which is used by the PMF */
6846 u8 entry = (BP_E1HVN(bp) + 1)*8;
6848 val = (mac_addr[0] << 8) | mac_addr[1];
6849 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6851 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6852 (mac_addr[4] << 8) | mac_addr[5];
6853 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6855 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6857 } else
6858 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6860 /* Close multi and leading connections
6861 Completions for ramrods are collected in a synchronous way */
6862 for_each_queue(bp, i)
6864 if (bnx2x_stop_client(bp, i))
6865 #ifdef BNX2X_STOP_ON_ERROR
6866 return;
6867 #else
6868 goto unload_error;
6869 #endif
6871 rc = bnx2x_func_stop(bp);
6872 if (rc) {
6873 BNX2X_ERR("Function stop failed!\n");
6874 #ifdef BNX2X_STOP_ON_ERROR
6875 return;
6876 #else
6877 goto unload_error;
6878 #endif
6880 #ifndef BNX2X_STOP_ON_ERROR
6881 unload_error:
6882 #endif
6883 if (!BP_NOMCP(bp))
6884 reset_code = bnx2x_fw_command(bp, reset_code, 0);
6885 else {
6886 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
6887 "%d, %d, %d\n", BP_PATH(bp),
6888 load_count[BP_PATH(bp)][0],
6889 load_count[BP_PATH(bp)][1],
6890 load_count[BP_PATH(bp)][2]);
6891 load_count[BP_PATH(bp)][0]--;
6892 load_count[BP_PATH(bp)][1 + port]--;
6893 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
6894 "%d, %d, %d\n", BP_PATH(bp),
6895 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
6896 load_count[BP_PATH(bp)][2]);
6897 if (load_count[BP_PATH(bp)][0] == 0)
6898 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6899 else if (load_count[BP_PATH(bp)][1 + port] == 0)
6900 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6901 else
6902 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6905 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6906 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6907 bnx2x__link_reset(bp);
6909 /* Disable HW interrupts, NAPI */
6910 bnx2x_netif_stop(bp, 1);
6912 /* Release IRQs */
6913 bnx2x_free_irq(bp);
6915 /* Reset the chip */
6916 bnx2x_reset_chip(bp, reset_code);
6918 /* Report UNLOAD_DONE to MCP */
6919 if (!BP_NOMCP(bp))
6920 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
6924 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
6926 u32 val;
6928 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6930 if (CHIP_IS_E1(bp)) {
6931 int port = BP_PORT(bp);
6932 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6933 MISC_REG_AEU_MASK_ATTN_FUNC_0;
6935 val = REG_RD(bp, addr);
6936 val &= ~(0x300);
6937 REG_WR(bp, addr, val);
6938 } else if (CHIP_IS_E1H(bp)) {
6939 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6940 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6941 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6942 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6947 /* Close gates #2, #3 and #4: */
6948 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
6950 u32 val, addr;
6952 /* Gates #2 and #4a are closed/opened for "not E1" only */
6953 if (!CHIP_IS_E1(bp)) {
6954 /* #4 */
6955 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
6956 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
6957 close ? (val | 0x1) : (val & (~(u32)1)));
6958 /* #2 */
6959 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
6960 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
6961 close ? (val | 0x1) : (val & (~(u32)1)));
6964 /* #3 */
6965 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
6966 val = REG_RD(bp, addr);
6967 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
6969 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
6970 close ? "closing" : "opening");
6971 mmiowb();
6974 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
6976 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
6978 /* Do some magic... */
6979 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6980 *magic_val = val & SHARED_MF_CLP_MAGIC;
6981 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
6984 /* Restore the value of the `magic' bit.
6986 * @param pdev Device handle.
6987 * @param magic_val Old value of the `magic' bit.
6989 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
6991 /* Restore the `magic' bit value... */
6992 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
6993 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
6994 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
6995 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6996 MF_CFG_WR(bp, shared_mf_config.clp_mb,
6997 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7000 /* Prepares for MCP reset: takes care of CLP configurations.
7002 * @param bp
7003 * @param magic_val Old value of 'magic' bit.
7005 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7007 u32 shmem;
7008 u32 validity_offset;
7010 DP(NETIF_MSG_HW, "Starting\n");
7012 /* Set `magic' bit in order to save MF config */
7013 if (!CHIP_IS_E1(bp))
7014 bnx2x_clp_reset_prep(bp, magic_val);
7016 /* Get shmem offset */
7017 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7018 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7020 /* Clear validity map flags */
7021 if (shmem > 0)
7022 REG_WR(bp, shmem + validity_offset, 0);
7025 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7026 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
7028 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7029 * depending on the HW type.
7031 * @param bp
7033 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7035 /* special handling for emulation and FPGA,
7036 wait 10 times longer */
7037 if (CHIP_REV_IS_SLOW(bp))
7038 msleep(MCP_ONE_TIMEOUT*10);
7039 else
7040 msleep(MCP_ONE_TIMEOUT);
7043 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7045 u32 shmem, cnt, validity_offset, val;
7046 int rc = 0;
7048 msleep(100);
7050 /* Get shmem offset */
7051 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7052 if (shmem == 0) {
7053 BNX2X_ERR("Shmem 0 return failure\n");
7054 rc = -ENOTTY;
7055 goto exit_lbl;
7058 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7060 /* Wait for MCP to come up */
7061 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7062 /* TBD: its best to check validity map of last port.
7063 * currently checks on port 0.
7065 val = REG_RD(bp, shmem + validity_offset);
7066 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7067 shmem + validity_offset, val);
7069 /* check that shared memory is valid. */
7070 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7071 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7072 break;
7074 bnx2x_mcp_wait_one(bp);
7077 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7079 /* Check that shared memory is valid. This indicates that MCP is up. */
7080 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7081 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7082 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7083 rc = -ENOTTY;
7084 goto exit_lbl;
7087 exit_lbl:
7088 /* Restore the `magic' bit value */
7089 if (!CHIP_IS_E1(bp))
7090 bnx2x_clp_reset_done(bp, magic_val);
7092 return rc;
7095 static void bnx2x_pxp_prep(struct bnx2x *bp)
7097 if (!CHIP_IS_E1(bp)) {
7098 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7099 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7100 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7101 mmiowb();
7106 * Reset the whole chip except for:
7107 * - PCIE core
7108 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7109 * one reset bit)
7110 * - IGU
7111 * - MISC (including AEU)
7112 * - GRC
7113 * - RBCN, RBCP
7115 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7117 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7119 not_reset_mask1 =
7120 MISC_REGISTERS_RESET_REG_1_RST_HC |
7121 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7122 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7124 not_reset_mask2 =
7125 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7126 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7127 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7128 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7129 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7130 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7131 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7132 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7134 reset_mask1 = 0xffffffff;
7136 if (CHIP_IS_E1(bp))
7137 reset_mask2 = 0xffff;
7138 else
7139 reset_mask2 = 0x1ffff;
7141 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7142 reset_mask1 & (~not_reset_mask1));
7143 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7144 reset_mask2 & (~not_reset_mask2));
7146 barrier();
7147 mmiowb();
7149 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7150 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7151 mmiowb();
7154 static int bnx2x_process_kill(struct bnx2x *bp)
7156 int cnt = 1000;
7157 u32 val = 0;
7158 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7161 /* Empty the Tetris buffer, wait for 1s */
7162 do {
7163 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7164 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7165 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7166 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7167 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7168 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7169 ((port_is_idle_0 & 0x1) == 0x1) &&
7170 ((port_is_idle_1 & 0x1) == 0x1) &&
7171 (pgl_exp_rom2 == 0xffffffff))
7172 break;
7173 msleep(1);
7174 } while (cnt-- > 0);
7176 if (cnt <= 0) {
7177 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7178 " are still"
7179 " outstanding read requests after 1s!\n");
7180 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7181 " port_is_idle_0=0x%08x,"
7182 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7183 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7184 pgl_exp_rom2);
7185 return -EAGAIN;
7188 barrier();
7190 /* Close gates #2, #3 and #4 */
7191 bnx2x_set_234_gates(bp, true);
7193 /* TBD: Indicate that "process kill" is in progress to MCP */
7195 /* Clear "unprepared" bit */
7196 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7197 barrier();
7199 /* Make sure all is written to the chip before the reset */
7200 mmiowb();
7202 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7203 * PSWHST, GRC and PSWRD Tetris buffer.
7205 msleep(1);
7207 /* Prepare to chip reset: */
7208 /* MCP */
7209 bnx2x_reset_mcp_prep(bp, &val);
7211 /* PXP */
7212 bnx2x_pxp_prep(bp);
7213 barrier();
7215 /* reset the chip */
7216 bnx2x_process_kill_chip_reset(bp);
7217 barrier();
7219 /* Recover after reset: */
7220 /* MCP */
7221 if (bnx2x_reset_mcp_comp(bp, val))
7222 return -EAGAIN;
7224 /* PXP */
7225 bnx2x_pxp_prep(bp);
7227 /* Open the gates #2, #3 and #4 */
7228 bnx2x_set_234_gates(bp, false);
7230 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7231 * reset state, re-enable attentions. */
7233 return 0;
7236 static int bnx2x_leader_reset(struct bnx2x *bp)
7238 int rc = 0;
7239 /* Try to recover after the failure */
7240 if (bnx2x_process_kill(bp)) {
7241 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7242 bp->dev->name);
7243 rc = -EAGAIN;
7244 goto exit_leader_reset;
7247 /* Clear "reset is in progress" bit and update the driver state */
7248 bnx2x_set_reset_done(bp);
7249 bp->recovery_state = BNX2X_RECOVERY_DONE;
7251 exit_leader_reset:
7252 bp->is_leader = 0;
7253 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7254 smp_wmb();
7255 return rc;
7258 /* Assumption: runs under rtnl lock. This together with the fact
7259 * that it's called only from bnx2x_reset_task() ensure that it
7260 * will never be called when netif_running(bp->dev) is false.
7262 static void bnx2x_parity_recover(struct bnx2x *bp)
7264 DP(NETIF_MSG_HW, "Handling parity\n");
7265 while (1) {
7266 switch (bp->recovery_state) {
7267 case BNX2X_RECOVERY_INIT:
7268 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7269 /* Try to get a LEADER_LOCK HW lock */
7270 if (bnx2x_trylock_hw_lock(bp,
7271 HW_LOCK_RESOURCE_RESERVED_08))
7272 bp->is_leader = 1;
7274 /* Stop the driver */
7275 /* If interface has been removed - break */
7276 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7277 return;
7279 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7280 /* Ensure "is_leader" and "recovery_state"
7281 * update values are seen on other CPUs
7283 smp_wmb();
7284 break;
7286 case BNX2X_RECOVERY_WAIT:
7287 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7288 if (bp->is_leader) {
7289 u32 load_counter = bnx2x_get_load_cnt(bp);
7290 if (load_counter) {
7291 /* Wait until all other functions get
7292 * down.
7294 schedule_delayed_work(&bp->reset_task,
7295 HZ/10);
7296 return;
7297 } else {
7298 /* If all other functions got down -
7299 * try to bring the chip back to
7300 * normal. In any case it's an exit
7301 * point for a leader.
7303 if (bnx2x_leader_reset(bp) ||
7304 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7305 printk(KERN_ERR"%s: Recovery "
7306 "has failed. Power cycle is "
7307 "needed.\n", bp->dev->name);
7308 /* Disconnect this device */
7309 netif_device_detach(bp->dev);
7310 /* Block ifup for all function
7311 * of this ASIC until
7312 * "process kill" or power
7313 * cycle.
7315 bnx2x_set_reset_in_progress(bp);
7316 /* Shut down the power */
7317 bnx2x_set_power_state(bp,
7318 PCI_D3hot);
7319 return;
7322 return;
7324 } else { /* non-leader */
7325 if (!bnx2x_reset_is_done(bp)) {
7326 /* Try to get a LEADER_LOCK HW lock as
7327 * long as a former leader may have
7328 * been unloaded by the user or
7329 * released a leadership by another
7330 * reason.
7332 if (bnx2x_trylock_hw_lock(bp,
7333 HW_LOCK_RESOURCE_RESERVED_08)) {
7334 /* I'm a leader now! Restart a
7335 * switch case.
7337 bp->is_leader = 1;
7338 break;
7341 schedule_delayed_work(&bp->reset_task,
7342 HZ/10);
7343 return;
7345 } else { /* A leader has completed
7346 * the "process kill". It's an exit
7347 * point for a non-leader.
7349 bnx2x_nic_load(bp, LOAD_NORMAL);
7350 bp->recovery_state =
7351 BNX2X_RECOVERY_DONE;
7352 smp_wmb();
7353 return;
7356 default:
7357 return;
7362 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7363 * scheduled on a general queue in order to prevent a dead lock.
7365 static void bnx2x_reset_task(struct work_struct *work)
7367 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
7369 #ifdef BNX2X_STOP_ON_ERROR
7370 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7371 " so reset not done to allow debug dump,\n"
7372 KERN_ERR " you will need to reboot when done\n");
7373 return;
7374 #endif
7376 rtnl_lock();
7378 if (!netif_running(bp->dev))
7379 goto reset_task_exit;
7381 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7382 bnx2x_parity_recover(bp);
7383 else {
7384 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7385 bnx2x_nic_load(bp, LOAD_NORMAL);
7388 reset_task_exit:
7389 rtnl_unlock();
7392 /* end of nic load/unload */
7395 * Init service functions
7398 u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
7400 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7401 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7402 return base + (BP_ABS_FUNC(bp)) * stride;
7405 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
7407 u32 reg = bnx2x_get_pretend_reg(bp);
7409 /* Flush all outstanding writes */
7410 mmiowb();
7412 /* Pretend to be function 0 */
7413 REG_WR(bp, reg, 0);
7414 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
7416 /* From now we are in the "like-E1" mode */
7417 bnx2x_int_disable(bp);
7419 /* Flush all outstanding writes */
7420 mmiowb();
7422 /* Restore the original function */
7423 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7424 REG_RD(bp, reg);
7427 static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
7429 if (CHIP_IS_E1(bp))
7430 bnx2x_int_disable(bp);
7431 else
7432 bnx2x_undi_int_disable_e1h(bp);
7435 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7437 u32 val;
7439 /* Check if there is any driver already loaded */
7440 val = REG_RD(bp, MISC_REG_UNPREPARED);
7441 if (val == 0x1) {
7442 /* Check if it is the UNDI driver
7443 * UNDI driver initializes CID offset for normal bell to 0x7
7445 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7446 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7447 if (val == 0x7) {
7448 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7449 /* save our pf_num */
7450 int orig_pf_num = bp->pf_num;
7451 u32 swap_en;
7452 u32 swap_val;
7454 /* clear the UNDI indication */
7455 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7457 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7459 /* try unload UNDI on port 0 */
7460 bp->pf_num = 0;
7461 bp->fw_seq =
7462 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7463 DRV_MSG_SEQ_NUMBER_MASK);
7464 reset_code = bnx2x_fw_command(bp, reset_code, 0);
7466 /* if UNDI is loaded on the other port */
7467 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7469 /* send "DONE" for previous unload */
7470 bnx2x_fw_command(bp,
7471 DRV_MSG_CODE_UNLOAD_DONE, 0);
7473 /* unload UNDI on port 1 */
7474 bp->pf_num = 1;
7475 bp->fw_seq =
7476 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7477 DRV_MSG_SEQ_NUMBER_MASK);
7478 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7480 bnx2x_fw_command(bp, reset_code, 0);
7483 /* now it's safe to release the lock */
7484 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7486 bnx2x_undi_int_disable(bp);
7488 /* close input traffic and wait for it */
7489 /* Do not rcv packets to BRB */
7490 REG_WR(bp,
7491 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7492 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7493 /* Do not direct rcv packets that are not for MCP to
7494 * the BRB */
7495 REG_WR(bp,
7496 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7497 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7498 /* clear AEU */
7499 REG_WR(bp,
7500 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7501 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7502 msleep(10);
7504 /* save NIG port swap info */
7505 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7506 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7507 /* reset device */
7508 REG_WR(bp,
7509 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7510 0xd3ffffff);
7511 REG_WR(bp,
7512 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7513 0x1403);
7514 /* take the NIG out of reset and restore swap values */
7515 REG_WR(bp,
7516 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7517 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7518 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7519 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7521 /* send unload done to the MCP */
7522 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
7524 /* restore our func and fw_seq */
7525 bp->pf_num = orig_pf_num;
7526 bp->fw_seq =
7527 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7528 DRV_MSG_SEQ_NUMBER_MASK);
7530 } else
7531 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7535 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7537 u32 val, val2, val3, val4, id;
7538 u16 pmc;
7540 /* Get the chip revision id and number. */
7541 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7542 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7543 id = ((val & 0xffff) << 16);
7544 val = REG_RD(bp, MISC_REG_CHIP_REV);
7545 id |= ((val & 0xf) << 12);
7546 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7547 id |= ((val & 0xff) << 4);
7548 val = REG_RD(bp, MISC_REG_BOND_ID);
7549 id |= (val & 0xf);
7550 bp->common.chip_id = id;
7552 /* Set doorbell size */
7553 bp->db_size = (1 << BNX2X_DB_SHIFT);
7555 if (CHIP_IS_E2(bp)) {
7556 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7557 if ((val & 1) == 0)
7558 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7559 else
7560 val = (val >> 1) & 1;
7561 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7562 "2_PORT_MODE");
7563 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7564 CHIP_2_PORT_MODE;
7566 if (CHIP_MODE_IS_4_PORT(bp))
7567 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7568 else
7569 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7570 } else {
7571 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7572 bp->pfid = bp->pf_num; /* 0..7 */
7576 * set base FW non-default (fast path) status block id, this value is
7577 * used to initialize the fw_sb_id saved on the fp/queue structure to
7578 * determine the id used by the FW.
7580 if (CHIP_IS_E1x(bp))
7581 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7582 else /* E2 */
7583 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7585 bp->link_params.chip_id = bp->common.chip_id;
7586 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7588 val = (REG_RD(bp, 0x2874) & 0x55);
7589 if ((bp->common.chip_id & 0x1) ||
7590 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7591 bp->flags |= ONE_PORT_FLAG;
7592 BNX2X_DEV_INFO("single port device\n");
7595 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7596 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7597 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7598 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7599 bp->common.flash_size, bp->common.flash_size);
7601 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7602 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7603 MISC_REG_GENERIC_CR_1 :
7604 MISC_REG_GENERIC_CR_0));
7605 bp->link_params.shmem_base = bp->common.shmem_base;
7606 bp->link_params.shmem2_base = bp->common.shmem2_base;
7607 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7608 bp->common.shmem_base, bp->common.shmem2_base);
7610 if (!bp->common.shmem_base) {
7611 BNX2X_DEV_INFO("MCP not active\n");
7612 bp->flags |= NO_MCP_FLAG;
7613 return;
7616 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7617 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7618 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7619 BNX2X_ERR("BAD MCP validity signature\n");
7621 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7622 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7624 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7625 SHARED_HW_CFG_LED_MODE_MASK) >>
7626 SHARED_HW_CFG_LED_MODE_SHIFT);
7628 bp->link_params.feature_config_flags = 0;
7629 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7630 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7631 bp->link_params.feature_config_flags |=
7632 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7633 else
7634 bp->link_params.feature_config_flags &=
7635 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7637 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7638 bp->common.bc_ver = val;
7639 BNX2X_DEV_INFO("bc_ver %X\n", val);
7640 if (val < BNX2X_BC_VER) {
7641 /* for now only warn
7642 * later we might need to enforce this */
7643 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7644 "please upgrade BC\n", BNX2X_BC_VER, val);
7646 bp->link_params.feature_config_flags |=
7647 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
7648 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7649 bp->link_params.feature_config_flags |=
7650 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7651 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
7653 if (BP_E1HVN(bp) == 0) {
7654 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7655 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7656 } else {
7657 /* no WOL capability for E1HVN != 0 */
7658 bp->flags |= NO_WOL_FLAG;
7660 BNX2X_DEV_INFO("%sWoL capable\n",
7661 (bp->flags & NO_WOL_FLAG) ? "not " : "");
7663 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7664 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7665 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7666 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7668 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7669 val, val2, val3, val4);
7672 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7673 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7675 static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7677 int pfid = BP_FUNC(bp);
7678 int vn = BP_E1HVN(bp);
7679 int igu_sb_id;
7680 u32 val;
7681 u8 fid;
7683 bp->igu_base_sb = 0xff;
7684 bp->igu_sb_cnt = 0;
7685 if (CHIP_INT_MODE_IS_BC(bp)) {
7686 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7687 bp->l2_cid_count);
7689 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7690 FP_SB_MAX_E1x;
7692 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7693 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7695 return;
7698 /* IGU in normal mode - read CAM */
7699 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7700 igu_sb_id++) {
7701 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7702 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7703 continue;
7704 fid = IGU_FID(val);
7705 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7706 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7707 continue;
7708 if (IGU_VEC(val) == 0)
7709 /* default status block */
7710 bp->igu_dsb_id = igu_sb_id;
7711 else {
7712 if (bp->igu_base_sb == 0xff)
7713 bp->igu_base_sb = igu_sb_id;
7714 bp->igu_sb_cnt++;
7718 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
7719 if (bp->igu_sb_cnt == 0)
7720 BNX2X_ERR("CAM configuration error\n");
7723 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7724 u32 switch_cfg)
7726 int cfg_size = 0, idx, port = BP_PORT(bp);
7728 /* Aggregation of supported attributes of all external phys */
7729 bp->port.supported[0] = 0;
7730 bp->port.supported[1] = 0;
7731 switch (bp->link_params.num_phys) {
7732 case 1:
7733 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
7734 cfg_size = 1;
7735 break;
7736 case 2:
7737 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
7738 cfg_size = 1;
7739 break;
7740 case 3:
7741 if (bp->link_params.multi_phy_config &
7742 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7743 bp->port.supported[1] =
7744 bp->link_params.phy[EXT_PHY1].supported;
7745 bp->port.supported[0] =
7746 bp->link_params.phy[EXT_PHY2].supported;
7747 } else {
7748 bp->port.supported[0] =
7749 bp->link_params.phy[EXT_PHY1].supported;
7750 bp->port.supported[1] =
7751 bp->link_params.phy[EXT_PHY2].supported;
7753 cfg_size = 2;
7754 break;
7757 if (!(bp->port.supported[0] || bp->port.supported[1])) {
7758 BNX2X_ERR("NVRAM config error. BAD phy config."
7759 "PHY1 config 0x%x, PHY2 config 0x%x\n",
7760 SHMEM_RD(bp,
7761 dev_info.port_hw_config[port].external_phy_config),
7762 SHMEM_RD(bp,
7763 dev_info.port_hw_config[port].external_phy_config2));
7764 return;
7767 switch (switch_cfg) {
7768 case SWITCH_CFG_1G:
7769 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7770 port*0x10);
7771 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7772 break;
7774 case SWITCH_CFG_10G:
7775 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7776 port*0x18);
7777 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7779 break;
7781 default:
7782 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7783 bp->port.link_config[0]);
7784 return;
7786 /* mask what we support according to speed_cap_mask per configuration */
7787 for (idx = 0; idx < cfg_size; idx++) {
7788 if (!(bp->link_params.speed_cap_mask[idx] &
7789 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7790 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
7792 if (!(bp->link_params.speed_cap_mask[idx] &
7793 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7794 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
7796 if (!(bp->link_params.speed_cap_mask[idx] &
7797 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7798 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
7800 if (!(bp->link_params.speed_cap_mask[idx] &
7801 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7802 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
7804 if (!(bp->link_params.speed_cap_mask[idx] &
7805 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7806 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
7807 SUPPORTED_1000baseT_Full);
7809 if (!(bp->link_params.speed_cap_mask[idx] &
7810 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7811 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
7813 if (!(bp->link_params.speed_cap_mask[idx] &
7814 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7815 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
7819 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
7820 bp->port.supported[1]);
7823 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7825 u32 link_config, idx, cfg_size = 0;
7826 bp->port.advertising[0] = 0;
7827 bp->port.advertising[1] = 0;
7828 switch (bp->link_params.num_phys) {
7829 case 1:
7830 case 2:
7831 cfg_size = 1;
7832 break;
7833 case 3:
7834 cfg_size = 2;
7835 break;
7837 for (idx = 0; idx < cfg_size; idx++) {
7838 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
7839 link_config = bp->port.link_config[idx];
7840 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7841 case PORT_FEATURE_LINK_SPEED_AUTO:
7842 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
7843 bp->link_params.req_line_speed[idx] =
7844 SPEED_AUTO_NEG;
7845 bp->port.advertising[idx] |=
7846 bp->port.supported[idx];
7847 } else {
7848 /* force 10G, no AN */
7849 bp->link_params.req_line_speed[idx] =
7850 SPEED_10000;
7851 bp->port.advertising[idx] |=
7852 (ADVERTISED_10000baseT_Full |
7853 ADVERTISED_FIBRE);
7854 continue;
7856 break;
7858 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7859 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
7860 bp->link_params.req_line_speed[idx] =
7861 SPEED_10;
7862 bp->port.advertising[idx] |=
7863 (ADVERTISED_10baseT_Full |
7864 ADVERTISED_TP);
7865 } else {
7866 BNX2X_ERROR("NVRAM config error. "
7867 "Invalid link_config 0x%x"
7868 " speed_cap_mask 0x%x\n",
7869 link_config,
7870 bp->link_params.speed_cap_mask[idx]);
7871 return;
7873 break;
7875 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7876 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
7877 bp->link_params.req_line_speed[idx] =
7878 SPEED_10;
7879 bp->link_params.req_duplex[idx] =
7880 DUPLEX_HALF;
7881 bp->port.advertising[idx] |=
7882 (ADVERTISED_10baseT_Half |
7883 ADVERTISED_TP);
7884 } else {
7885 BNX2X_ERROR("NVRAM config error. "
7886 "Invalid link_config 0x%x"
7887 " speed_cap_mask 0x%x\n",
7888 link_config,
7889 bp->link_params.speed_cap_mask[idx]);
7890 return;
7892 break;
7894 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7895 if (bp->port.supported[idx] & SUPPORTED_100baseT_Full) {
7896 bp->link_params.req_line_speed[idx] =
7897 SPEED_100;
7898 bp->port.advertising[idx] |=
7899 (ADVERTISED_100baseT_Full |
7900 ADVERTISED_TP);
7901 } else {
7902 BNX2X_ERROR("NVRAM config error. "
7903 "Invalid link_config 0x%x"
7904 " speed_cap_mask 0x%x\n",
7905 link_config,
7906 bp->link_params.speed_cap_mask[idx]);
7907 return;
7909 break;
7911 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7912 if (bp->port.supported[idx] & SUPPORTED_100baseT_Half) {
7913 bp->link_params.req_line_speed[idx] = SPEED_100;
7914 bp->link_params.req_duplex[idx] = DUPLEX_HALF;
7915 bp->port.advertising[idx] |=
7916 (ADVERTISED_100baseT_Half |
7917 ADVERTISED_TP);
7918 } else {
7919 BNX2X_ERROR("NVRAM config error. "
7920 "Invalid link_config 0x%x"
7921 " speed_cap_mask 0x%x\n",
7922 link_config,
7923 bp->link_params.speed_cap_mask[idx]);
7924 return;
7926 break;
7928 case PORT_FEATURE_LINK_SPEED_1G:
7929 if (bp->port.supported[idx] &
7930 SUPPORTED_1000baseT_Full) {
7931 bp->link_params.req_line_speed[idx] =
7932 SPEED_1000;
7933 bp->port.advertising[idx] |=
7934 (ADVERTISED_1000baseT_Full |
7935 ADVERTISED_TP);
7936 } else {
7937 BNX2X_ERROR("NVRAM config error. "
7938 "Invalid link_config 0x%x"
7939 " speed_cap_mask 0x%x\n",
7940 link_config,
7941 bp->link_params.speed_cap_mask[idx]);
7942 return;
7944 break;
7946 case PORT_FEATURE_LINK_SPEED_2_5G:
7947 if (bp->port.supported[idx] &
7948 SUPPORTED_2500baseX_Full) {
7949 bp->link_params.req_line_speed[idx] =
7950 SPEED_2500;
7951 bp->port.advertising[idx] |=
7952 (ADVERTISED_2500baseX_Full |
7953 ADVERTISED_TP);
7954 } else {
7955 BNX2X_ERROR("NVRAM config error. "
7956 "Invalid link_config 0x%x"
7957 " speed_cap_mask 0x%x\n",
7958 link_config,
7959 bp->link_params.speed_cap_mask[idx]);
7960 return;
7962 break;
7964 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7965 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7966 case PORT_FEATURE_LINK_SPEED_10G_KR:
7967 if (bp->port.supported[idx] &
7968 SUPPORTED_10000baseT_Full) {
7969 bp->link_params.req_line_speed[idx] =
7970 SPEED_10000;
7971 bp->port.advertising[idx] |=
7972 (ADVERTISED_10000baseT_Full |
7973 ADVERTISED_FIBRE);
7974 } else {
7975 BNX2X_ERROR("NVRAM config error. "
7976 "Invalid link_config 0x%x"
7977 " speed_cap_mask 0x%x\n",
7978 link_config,
7979 bp->link_params.speed_cap_mask[idx]);
7980 return;
7982 break;
7984 default:
7985 BNX2X_ERROR("NVRAM config error. "
7986 "BAD link speed link_config 0x%x\n",
7987 link_config);
7988 bp->link_params.req_line_speed[idx] = SPEED_AUTO_NEG;
7989 bp->port.advertising[idx] = bp->port.supported[idx];
7990 break;
7993 bp->link_params.req_flow_ctrl[idx] = (link_config &
7994 PORT_FEATURE_FLOW_CONTROL_MASK);
7995 if ((bp->link_params.req_flow_ctrl[idx] ==
7996 BNX2X_FLOW_CTRL_AUTO) &&
7997 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
7998 bp->link_params.req_flow_ctrl[idx] =
7999 BNX2X_FLOW_CTRL_NONE;
8002 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8003 " 0x%x advertising 0x%x\n",
8004 bp->link_params.req_line_speed[idx],
8005 bp->link_params.req_duplex[idx],
8006 bp->link_params.req_flow_ctrl[idx],
8007 bp->port.advertising[idx]);
8011 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8013 mac_hi = cpu_to_be16(mac_hi);
8014 mac_lo = cpu_to_be32(mac_lo);
8015 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8016 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8019 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8021 int port = BP_PORT(bp);
8022 u32 val, val2;
8023 u32 config;
8024 u32 ext_phy_type, ext_phy_config;;
8026 bp->link_params.bp = bp;
8027 bp->link_params.port = port;
8029 bp->link_params.lane_config =
8030 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8032 bp->link_params.speed_cap_mask[0] =
8033 SHMEM_RD(bp,
8034 dev_info.port_hw_config[port].speed_capability_mask);
8035 bp->link_params.speed_cap_mask[1] =
8036 SHMEM_RD(bp,
8037 dev_info.port_hw_config[port].speed_capability_mask2);
8038 bp->port.link_config[0] =
8039 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8041 bp->port.link_config[1] =
8042 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
8044 bp->link_params.multi_phy_config =
8045 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
8046 /* If the device is capable of WoL, set the default state according
8047 * to the HW
8049 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8050 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8051 (config & PORT_FEATURE_WOL_ENABLED));
8053 BNX2X_DEV_INFO("lane_config 0x%08x"
8054 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
8055 bp->link_params.lane_config,
8056 bp->link_params.speed_cap_mask[0],
8057 bp->port.link_config[0]);
8059 bp->link_params.switch_cfg = (bp->port.link_config[0] &
8060 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8061 bnx2x_phy_probe(&bp->link_params);
8062 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8064 bnx2x_link_settings_requested(bp);
8067 * If connected directly, work with the internal PHY, otherwise, work
8068 * with the external PHY
8070 ext_phy_config =
8071 SHMEM_RD(bp,
8072 dev_info.port_hw_config[port].external_phy_config);
8073 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
8074 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8075 bp->mdio.prtad = bp->port.phy_addr;
8077 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8078 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8079 bp->mdio.prtad =
8080 XGXS_EXT_PHY_ADDR(ext_phy_config);
8082 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8083 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8084 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8085 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8086 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8088 #ifdef BCM_CNIC
8089 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8090 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8091 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8092 #endif
8095 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8097 int func = BP_ABS_FUNC(bp);
8098 int vn;
8099 u32 val, val2;
8100 int rc = 0;
8102 bnx2x_get_common_hwinfo(bp);
8104 if (CHIP_IS_E1x(bp)) {
8105 bp->common.int_block = INT_BLOCK_HC;
8107 bp->igu_dsb_id = DEF_SB_IGU_ID;
8108 bp->igu_base_sb = 0;
8109 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
8110 } else {
8111 bp->common.int_block = INT_BLOCK_IGU;
8112 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8113 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8114 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8115 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8116 } else
8117 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8119 bnx2x_get_igu_cam_info(bp);
8122 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8123 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8126 * Initialize MF configuration
8129 bp->mf_ov = 0;
8130 bp->mf_mode = 0;
8131 vn = BP_E1HVN(bp);
8132 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8133 if (SHMEM2_HAS(bp, mf_cfg_addr))
8134 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8135 else
8136 bp->common.mf_cfg_base = bp->common.shmem_base +
8137 offsetof(struct shmem_region, func_mb) +
8138 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
8139 bp->mf_config[vn] =
8140 MF_CFG_RD(bp, func_mf_config[func].config);
8142 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
8143 FUNC_MF_CFG_E1HOV_TAG_MASK);
8144 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8145 bp->mf_mode = 1;
8146 BNX2X_DEV_INFO("%s function mode\n",
8147 IS_MF(bp) ? "multi" : "single");
8149 if (IS_MF(bp)) {
8150 val = (MF_CFG_RD(bp, func_mf_config[func].
8151 e1hov_tag) &
8152 FUNC_MF_CFG_E1HOV_TAG_MASK);
8153 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8154 bp->mf_ov = val;
8155 BNX2X_DEV_INFO("MF OV for func %d is %d "
8156 "(0x%04x)\n",
8157 func, bp->mf_ov, bp->mf_ov);
8158 } else {
8159 BNX2X_ERROR("No valid MF OV for func %d,"
8160 " aborting\n", func);
8161 rc = -EPERM;
8163 } else {
8164 if (BP_VN(bp)) {
8165 BNX2X_ERROR("VN %d in single function mode,"
8166 " aborting\n", BP_E1HVN(bp));
8167 rc = -EPERM;
8172 /* adjust igu_sb_cnt to MF for E1x */
8173 if (CHIP_IS_E1x(bp) && IS_MF(bp))
8174 bp->igu_sb_cnt /= E1HVN_MAX;
8177 * adjust E2 sb count: to be removed when FW will support
8178 * more then 16 L2 clients
8180 #define MAX_L2_CLIENTS 16
8181 if (CHIP_IS_E2(bp))
8182 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8183 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8185 if (!BP_NOMCP(bp)) {
8186 bnx2x_get_port_hwinfo(bp);
8188 bp->fw_seq =
8189 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8190 DRV_MSG_SEQ_NUMBER_MASK);
8191 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8194 if (IS_MF(bp)) {
8195 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8196 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8197 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8198 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8199 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8200 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8201 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8202 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8203 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8204 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8205 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8206 ETH_ALEN);
8207 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8208 ETH_ALEN);
8211 return rc;
8214 if (BP_NOMCP(bp)) {
8215 /* only supposed to happen on emulation/FPGA */
8216 BNX2X_ERROR("warning: random MAC workaround active\n");
8217 random_ether_addr(bp->dev->dev_addr);
8218 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8221 return rc;
8224 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8226 int cnt, i, block_end, rodi;
8227 char vpd_data[BNX2X_VPD_LEN+1];
8228 char str_id_reg[VENDOR_ID_LEN+1];
8229 char str_id_cap[VENDOR_ID_LEN+1];
8230 u8 len;
8232 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8233 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8235 if (cnt < BNX2X_VPD_LEN)
8236 goto out_not_found;
8238 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8239 PCI_VPD_LRDT_RO_DATA);
8240 if (i < 0)
8241 goto out_not_found;
8244 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8245 pci_vpd_lrdt_size(&vpd_data[i]);
8247 i += PCI_VPD_LRDT_TAG_SIZE;
8249 if (block_end > BNX2X_VPD_LEN)
8250 goto out_not_found;
8252 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8253 PCI_VPD_RO_KEYWORD_MFR_ID);
8254 if (rodi < 0)
8255 goto out_not_found;
8257 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8259 if (len != VENDOR_ID_LEN)
8260 goto out_not_found;
8262 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8264 /* vendor specific info */
8265 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8266 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8267 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8268 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8270 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8271 PCI_VPD_RO_KEYWORD_VENDOR0);
8272 if (rodi >= 0) {
8273 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8275 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8277 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8278 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8279 bp->fw_ver[len] = ' ';
8282 return;
8284 out_not_found:
8285 return;
8288 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8290 int func;
8291 int timer_interval;
8292 int rc;
8294 /* Disable interrupt handling until HW is initialized */
8295 atomic_set(&bp->intr_sem, 1);
8296 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8298 mutex_init(&bp->port.phy_mutex);
8299 mutex_init(&bp->fw_mb_mutex);
8300 spin_lock_init(&bp->stats_lock);
8301 #ifdef BCM_CNIC
8302 mutex_init(&bp->cnic_mutex);
8303 #endif
8305 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8306 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
8308 rc = bnx2x_get_hwinfo(bp);
8310 if (!rc)
8311 rc = bnx2x_alloc_mem_bp(bp);
8313 bnx2x_read_fwinfo(bp);
8315 func = BP_FUNC(bp);
8317 /* need to reset chip if undi was active */
8318 if (!BP_NOMCP(bp))
8319 bnx2x_undi_unload(bp);
8321 if (CHIP_REV_IS_FPGA(bp))
8322 dev_err(&bp->pdev->dev, "FPGA detected\n");
8324 if (BP_NOMCP(bp) && (func == 0))
8325 dev_err(&bp->pdev->dev, "MCP disabled, "
8326 "must load devices in order!\n");
8328 /* Set multi queue mode */
8329 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8330 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8331 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8332 "requested is not MSI-X\n");
8333 multi_mode = ETH_RSS_MODE_DISABLED;
8335 bp->multi_mode = multi_mode;
8336 bp->int_mode = int_mode;
8338 bp->dev->features |= NETIF_F_GRO;
8340 /* Set TPA flags */
8341 if (disable_tpa) {
8342 bp->flags &= ~TPA_ENABLE_FLAG;
8343 bp->dev->features &= ~NETIF_F_LRO;
8344 } else {
8345 bp->flags |= TPA_ENABLE_FLAG;
8346 bp->dev->features |= NETIF_F_LRO;
8348 bp->disable_tpa = disable_tpa;
8350 if (CHIP_IS_E1(bp))
8351 bp->dropless_fc = 0;
8352 else
8353 bp->dropless_fc = dropless_fc;
8355 bp->mrrs = mrrs;
8357 bp->tx_ring_size = MAX_TX_AVAIL;
8359 bp->rx_csum = 1;
8361 /* make sure that the numbers are in the right granularity */
8362 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8363 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
8365 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8366 bp->current_interval = (poll ? poll : timer_interval);
8368 init_timer(&bp->timer);
8369 bp->timer.expires = jiffies + bp->current_interval;
8370 bp->timer.data = (unsigned long) bp;
8371 bp->timer.function = bnx2x_timer;
8373 return rc;
8377 /****************************************************************************
8378 * General service functions
8379 ****************************************************************************/
8381 /* called with rtnl_lock */
8382 static int bnx2x_open(struct net_device *dev)
8384 struct bnx2x *bp = netdev_priv(dev);
8386 netif_carrier_off(dev);
8388 bnx2x_set_power_state(bp, PCI_D0);
8390 if (!bnx2x_reset_is_done(bp)) {
8391 do {
8392 /* Reset MCP mail box sequence if there is on going
8393 * recovery
8395 bp->fw_seq = 0;
8397 /* If it's the first function to load and reset done
8398 * is still not cleared it may mean that. We don't
8399 * check the attention state here because it may have
8400 * already been cleared by a "common" reset but we
8401 * shell proceed with "process kill" anyway.
8403 if ((bnx2x_get_load_cnt(bp) == 0) &&
8404 bnx2x_trylock_hw_lock(bp,
8405 HW_LOCK_RESOURCE_RESERVED_08) &&
8406 (!bnx2x_leader_reset(bp))) {
8407 DP(NETIF_MSG_HW, "Recovered in open\n");
8408 break;
8411 bnx2x_set_power_state(bp, PCI_D3hot);
8413 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8414 " completed yet. Try again later. If u still see this"
8415 " message after a few retries then power cycle is"
8416 " required.\n", bp->dev->name);
8418 return -EAGAIN;
8419 } while (0);
8422 bp->recovery_state = BNX2X_RECOVERY_DONE;
8424 return bnx2x_nic_load(bp, LOAD_OPEN);
8427 /* called with rtnl_lock */
8428 static int bnx2x_close(struct net_device *dev)
8430 struct bnx2x *bp = netdev_priv(dev);
8432 /* Unload the driver, release IRQs */
8433 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
8434 bnx2x_set_power_state(bp, PCI_D3hot);
8436 return 0;
8439 /* called with netif_tx_lock from dev_mcast.c */
8440 void bnx2x_set_rx_mode(struct net_device *dev)
8442 struct bnx2x *bp = netdev_priv(dev);
8443 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8444 int port = BP_PORT(bp);
8446 if (bp->state != BNX2X_STATE_OPEN) {
8447 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8448 return;
8451 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8453 if (dev->flags & IFF_PROMISC)
8454 rx_mode = BNX2X_RX_MODE_PROMISC;
8456 else if ((dev->flags & IFF_ALLMULTI) ||
8457 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8458 CHIP_IS_E1(bp)))
8459 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8461 else { /* some multicasts */
8462 if (CHIP_IS_E1(bp)) {
8464 * set mc list, do not wait as wait implies sleep
8465 * and set_rx_mode can be invoked from non-sleepable
8466 * context
8468 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8469 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8470 BNX2X_MAX_MULTICAST*(1 + port));
8472 bnx2x_set_e1_mc_list(bp, offset);
8473 } else { /* E1H */
8474 /* Accept one or more multicasts */
8475 struct netdev_hw_addr *ha;
8476 u32 mc_filter[MC_HASH_SIZE];
8477 u32 crc, bit, regidx;
8478 int i;
8480 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8482 netdev_for_each_mc_addr(ha, dev) {
8483 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
8484 bnx2x_mc_addr(ha));
8486 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8487 ETH_ALEN);
8488 bit = (crc >> 24) & 0xff;
8489 regidx = bit >> 5;
8490 bit &= 0x1f;
8491 mc_filter[regidx] |= (1 << bit);
8494 for (i = 0; i < MC_HASH_SIZE; i++)
8495 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8496 mc_filter[i]);
8501 bp->rx_mode = rx_mode;
8502 bnx2x_set_storm_rx_mode(bp);
8506 /* called with rtnl_lock */
8507 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8508 int devad, u16 addr)
8510 struct bnx2x *bp = netdev_priv(netdev);
8511 u16 value;
8512 int rc;
8514 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8515 prtad, devad, addr);
8517 /* The HW expects different devad if CL22 is used */
8518 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8520 bnx2x_acquire_phy_lock(bp);
8521 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
8522 bnx2x_release_phy_lock(bp);
8523 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
8525 if (!rc)
8526 rc = value;
8527 return rc;
8530 /* called with rtnl_lock */
8531 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8532 u16 addr, u16 value)
8534 struct bnx2x *bp = netdev_priv(netdev);
8535 int rc;
8537 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8538 " value 0x%x\n", prtad, devad, addr, value);
8540 /* The HW expects different devad if CL22 is used */
8541 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8543 bnx2x_acquire_phy_lock(bp);
8544 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
8545 bnx2x_release_phy_lock(bp);
8546 return rc;
8549 /* called with rtnl_lock */
8550 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8552 struct bnx2x *bp = netdev_priv(dev);
8553 struct mii_ioctl_data *mdio = if_mii(ifr);
8555 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8556 mdio->phy_id, mdio->reg_num, mdio->val_in);
8558 if (!netif_running(dev))
8559 return -EAGAIN;
8561 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
8564 #ifdef CONFIG_NET_POLL_CONTROLLER
8565 static void poll_bnx2x(struct net_device *dev)
8567 struct bnx2x *bp = netdev_priv(dev);
8569 disable_irq(bp->pdev->irq);
8570 bnx2x_interrupt(bp->pdev->irq, dev);
8571 enable_irq(bp->pdev->irq);
8573 #endif
8575 static const struct net_device_ops bnx2x_netdev_ops = {
8576 .ndo_open = bnx2x_open,
8577 .ndo_stop = bnx2x_close,
8578 .ndo_start_xmit = bnx2x_start_xmit,
8579 .ndo_set_multicast_list = bnx2x_set_rx_mode,
8580 .ndo_set_mac_address = bnx2x_change_mac_addr,
8581 .ndo_validate_addr = eth_validate_addr,
8582 .ndo_do_ioctl = bnx2x_ioctl,
8583 .ndo_change_mtu = bnx2x_change_mtu,
8584 .ndo_tx_timeout = bnx2x_tx_timeout,
8585 #ifdef BCM_VLAN
8586 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
8587 #endif
8588 #ifdef CONFIG_NET_POLL_CONTROLLER
8589 .ndo_poll_controller = poll_bnx2x,
8590 #endif
8593 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8594 struct net_device *dev)
8596 struct bnx2x *bp;
8597 int rc;
8599 SET_NETDEV_DEV(dev, &pdev->dev);
8600 bp = netdev_priv(dev);
8602 bp->dev = dev;
8603 bp->pdev = pdev;
8604 bp->flags = 0;
8605 bp->pf_num = PCI_FUNC(pdev->devfn);
8607 rc = pci_enable_device(pdev);
8608 if (rc) {
8609 dev_err(&bp->pdev->dev,
8610 "Cannot enable PCI device, aborting\n");
8611 goto err_out;
8614 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8615 dev_err(&bp->pdev->dev,
8616 "Cannot find PCI device base address, aborting\n");
8617 rc = -ENODEV;
8618 goto err_out_disable;
8621 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
8622 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
8623 " base address, aborting\n");
8624 rc = -ENODEV;
8625 goto err_out_disable;
8628 if (atomic_read(&pdev->enable_cnt) == 1) {
8629 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8630 if (rc) {
8631 dev_err(&bp->pdev->dev,
8632 "Cannot obtain PCI resources, aborting\n");
8633 goto err_out_disable;
8636 pci_set_master(pdev);
8637 pci_save_state(pdev);
8640 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8641 if (bp->pm_cap == 0) {
8642 dev_err(&bp->pdev->dev,
8643 "Cannot find power management capability, aborting\n");
8644 rc = -EIO;
8645 goto err_out_release;
8648 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8649 if (bp->pcie_cap == 0) {
8650 dev_err(&bp->pdev->dev,
8651 "Cannot find PCI Express capability, aborting\n");
8652 rc = -EIO;
8653 goto err_out_release;
8656 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
8657 bp->flags |= USING_DAC_FLAG;
8658 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
8659 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
8660 " failed, aborting\n");
8661 rc = -EIO;
8662 goto err_out_release;
8665 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
8666 dev_err(&bp->pdev->dev,
8667 "System does not support DMA, aborting\n");
8668 rc = -EIO;
8669 goto err_out_release;
8672 dev->mem_start = pci_resource_start(pdev, 0);
8673 dev->base_addr = dev->mem_start;
8674 dev->mem_end = pci_resource_end(pdev, 0);
8676 dev->irq = pdev->irq;
8678 bp->regview = pci_ioremap_bar(pdev, 0);
8679 if (!bp->regview) {
8680 dev_err(&bp->pdev->dev,
8681 "Cannot map register space, aborting\n");
8682 rc = -ENOMEM;
8683 goto err_out_release;
8686 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
8687 min_t(u64, BNX2X_DB_SIZE(bp),
8688 pci_resource_len(pdev, 2)));
8689 if (!bp->doorbells) {
8690 dev_err(&bp->pdev->dev,
8691 "Cannot map doorbell space, aborting\n");
8692 rc = -ENOMEM;
8693 goto err_out_unmap;
8696 bnx2x_set_power_state(bp, PCI_D0);
8698 /* clean indirect addresses */
8699 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
8700 PCICFG_VENDOR_ID_OFFSET);
8701 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
8702 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
8703 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
8704 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
8706 /* Reset the load counter */
8707 bnx2x_clear_load_cnt(bp);
8709 dev->watchdog_timeo = TX_TIMEOUT;
8711 dev->netdev_ops = &bnx2x_netdev_ops;
8712 bnx2x_set_ethtool_ops(dev);
8713 dev->features |= NETIF_F_SG;
8714 dev->features |= NETIF_F_HW_CSUM;
8715 if (bp->flags & USING_DAC_FLAG)
8716 dev->features |= NETIF_F_HIGHDMA;
8717 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8718 dev->features |= NETIF_F_TSO6;
8719 #ifdef BCM_VLAN
8720 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
8721 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
8723 dev->vlan_features |= NETIF_F_SG;
8724 dev->vlan_features |= NETIF_F_HW_CSUM;
8725 if (bp->flags & USING_DAC_FLAG)
8726 dev->vlan_features |= NETIF_F_HIGHDMA;
8727 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8728 dev->vlan_features |= NETIF_F_TSO6;
8729 #endif
8731 /* get_port_hwinfo() will set prtad and mmds properly */
8732 bp->mdio.prtad = MDIO_PRTAD_NONE;
8733 bp->mdio.mmds = 0;
8734 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8735 bp->mdio.dev = dev;
8736 bp->mdio.mdio_read = bnx2x_mdio_read;
8737 bp->mdio.mdio_write = bnx2x_mdio_write;
8739 return 0;
8741 err_out_unmap:
8742 if (bp->regview) {
8743 iounmap(bp->regview);
8744 bp->regview = NULL;
8746 if (bp->doorbells) {
8747 iounmap(bp->doorbells);
8748 bp->doorbells = NULL;
8751 err_out_release:
8752 if (atomic_read(&pdev->enable_cnt) == 1)
8753 pci_release_regions(pdev);
8755 err_out_disable:
8756 pci_disable_device(pdev);
8757 pci_set_drvdata(pdev, NULL);
8759 err_out:
8760 return rc;
8763 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
8764 int *width, int *speed)
8766 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
8768 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
8770 /* return value of 1=2.5GHz 2=5GHz */
8771 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
8774 static int bnx2x_check_firmware(struct bnx2x *bp)
8776 const struct firmware *firmware = bp->firmware;
8777 struct bnx2x_fw_file_hdr *fw_hdr;
8778 struct bnx2x_fw_file_section *sections;
8779 u32 offset, len, num_ops;
8780 u16 *ops_offsets;
8781 int i;
8782 const u8 *fw_ver;
8784 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
8785 return -EINVAL;
8787 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
8788 sections = (struct bnx2x_fw_file_section *)fw_hdr;
8790 /* Make sure none of the offsets and sizes make us read beyond
8791 * the end of the firmware data */
8792 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
8793 offset = be32_to_cpu(sections[i].offset);
8794 len = be32_to_cpu(sections[i].len);
8795 if (offset + len > firmware->size) {
8796 dev_err(&bp->pdev->dev,
8797 "Section %d length is out of bounds\n", i);
8798 return -EINVAL;
8802 /* Likewise for the init_ops offsets */
8803 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
8804 ops_offsets = (u16 *)(firmware->data + offset);
8805 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
8807 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
8808 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
8809 dev_err(&bp->pdev->dev,
8810 "Section offset %d is out of bounds\n", i);
8811 return -EINVAL;
8815 /* Check FW version */
8816 offset = be32_to_cpu(fw_hdr->fw_version.offset);
8817 fw_ver = firmware->data + offset;
8818 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
8819 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
8820 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
8821 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
8822 dev_err(&bp->pdev->dev,
8823 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
8824 fw_ver[0], fw_ver[1], fw_ver[2],
8825 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
8826 BCM_5710_FW_MINOR_VERSION,
8827 BCM_5710_FW_REVISION_VERSION,
8828 BCM_5710_FW_ENGINEERING_VERSION);
8829 return -EINVAL;
8832 return 0;
8835 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
8837 const __be32 *source = (const __be32 *)_source;
8838 u32 *target = (u32 *)_target;
8839 u32 i;
8841 for (i = 0; i < n/4; i++)
8842 target[i] = be32_to_cpu(source[i]);
8846 Ops array is stored in the following format:
8847 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
8849 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
8851 const __be32 *source = (const __be32 *)_source;
8852 struct raw_op *target = (struct raw_op *)_target;
8853 u32 i, j, tmp;
8855 for (i = 0, j = 0; i < n/8; i++, j += 2) {
8856 tmp = be32_to_cpu(source[j]);
8857 target[i].op = (tmp >> 24) & 0xff;
8858 target[i].offset = tmp & 0xffffff;
8859 target[i].raw_data = be32_to_cpu(source[j + 1]);
8864 * IRO array is stored in the following format:
8865 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
8867 static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
8869 const __be32 *source = (const __be32 *)_source;
8870 struct iro *target = (struct iro *)_target;
8871 u32 i, j, tmp;
8873 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
8874 target[i].base = be32_to_cpu(source[j]);
8875 j++;
8876 tmp = be32_to_cpu(source[j]);
8877 target[i].m1 = (tmp >> 16) & 0xffff;
8878 target[i].m2 = tmp & 0xffff;
8879 j++;
8880 tmp = be32_to_cpu(source[j]);
8881 target[i].m3 = (tmp >> 16) & 0xffff;
8882 target[i].size = tmp & 0xffff;
8883 j++;
8887 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
8889 const __be16 *source = (const __be16 *)_source;
8890 u16 *target = (u16 *)_target;
8891 u32 i;
8893 for (i = 0; i < n/2; i++)
8894 target[i] = be16_to_cpu(source[i]);
8897 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
8898 do { \
8899 u32 len = be32_to_cpu(fw_hdr->arr.len); \
8900 bp->arr = kmalloc(len, GFP_KERNEL); \
8901 if (!bp->arr) { \
8902 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
8903 goto lbl; \
8905 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
8906 (u8 *)bp->arr, len); \
8907 } while (0)
8909 int bnx2x_init_firmware(struct bnx2x *bp)
8911 const char *fw_file_name;
8912 struct bnx2x_fw_file_hdr *fw_hdr;
8913 int rc;
8915 if (CHIP_IS_E1(bp))
8916 fw_file_name = FW_FILE_NAME_E1;
8917 else if (CHIP_IS_E1H(bp))
8918 fw_file_name = FW_FILE_NAME_E1H;
8919 else if (CHIP_IS_E2(bp))
8920 fw_file_name = FW_FILE_NAME_E2;
8921 else {
8922 BNX2X_ERR("Unsupported chip revision\n");
8923 return -EINVAL;
8926 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
8928 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
8929 if (rc) {
8930 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
8931 goto request_firmware_exit;
8934 rc = bnx2x_check_firmware(bp);
8935 if (rc) {
8936 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
8937 goto request_firmware_exit;
8940 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
8942 /* Initialize the pointers to the init arrays */
8943 /* Blob */
8944 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
8946 /* Opcodes */
8947 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
8949 /* Offsets */
8950 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
8951 be16_to_cpu_n);
8953 /* STORMs firmware */
8954 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8955 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
8956 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
8957 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
8958 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8959 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
8960 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
8961 be32_to_cpu(fw_hdr->usem_pram_data.offset);
8962 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8963 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
8964 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
8965 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
8966 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8967 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
8968 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
8969 be32_to_cpu(fw_hdr->csem_pram_data.offset);
8970 /* IRO */
8971 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
8973 return 0;
8975 iro_alloc_err:
8976 kfree(bp->init_ops_offsets);
8977 init_offsets_alloc_err:
8978 kfree(bp->init_ops);
8979 init_ops_alloc_err:
8980 kfree(bp->init_data);
8981 request_firmware_exit:
8982 release_firmware(bp->firmware);
8984 return rc;
8987 static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
8989 int cid_count = L2_FP_COUNT(l2_cid_count);
8991 #ifdef BCM_CNIC
8992 cid_count += CNIC_CID_MAX;
8993 #endif
8994 return roundup(cid_count, QM_CID_ROUND);
8996 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8997 const struct pci_device_id *ent)
8999 struct net_device *dev = NULL;
9000 struct bnx2x *bp;
9001 int pcie_width, pcie_speed;
9002 int rc, cid_count;
9004 switch (ent->driver_data) {
9005 case BCM57710:
9006 case BCM57711:
9007 case BCM57711E:
9008 cid_count = FP_SB_MAX_E1x;
9009 break;
9011 case BCM57712:
9012 case BCM57712E:
9013 cid_count = FP_SB_MAX_E2;
9014 break;
9016 default:
9017 pr_err("Unknown board_type (%ld), aborting\n",
9018 ent->driver_data);
9019 return ENODEV;
9022 cid_count += CNIC_CONTEXT_USE;
9023 /* dev zeroed in init_etherdev */
9024 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
9025 if (!dev) {
9026 dev_err(&pdev->dev, "Cannot allocate net device\n");
9027 return -ENOMEM;
9030 bp = netdev_priv(dev);
9031 bp->msg_enable = debug;
9033 pci_set_drvdata(pdev, dev);
9035 bp->l2_cid_count = cid_count;
9037 rc = bnx2x_init_dev(pdev, dev);
9038 if (rc < 0) {
9039 free_netdev(dev);
9040 return rc;
9043 rc = bnx2x_init_bp(bp);
9044 if (rc)
9045 goto init_one_exit;
9047 /* calc qm_cid_count */
9048 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9050 rc = register_netdev(dev);
9051 if (rc) {
9052 dev_err(&pdev->dev, "Cannot register net device\n");
9053 goto init_one_exit;
9056 /* Configure interupt mode: try to enable MSI-X/MSI if
9057 * needed, set bp->num_queues appropriately.
9059 bnx2x_set_int_mode(bp);
9061 /* Add all NAPI objects */
9062 bnx2x_add_all_napi(bp);
9064 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
9066 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9067 " IRQ %d, ", board_info[ent->driver_data].name,
9068 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
9069 pcie_width,
9070 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9071 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9072 "5GHz (Gen2)" : "2.5GHz",
9073 dev->base_addr, bp->pdev->irq);
9074 pr_cont("node addr %pM\n", dev->dev_addr);
9076 return 0;
9078 init_one_exit:
9079 if (bp->regview)
9080 iounmap(bp->regview);
9082 if (bp->doorbells)
9083 iounmap(bp->doorbells);
9085 free_netdev(dev);
9087 if (atomic_read(&pdev->enable_cnt) == 1)
9088 pci_release_regions(pdev);
9090 pci_disable_device(pdev);
9091 pci_set_drvdata(pdev, NULL);
9093 return rc;
9096 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9098 struct net_device *dev = pci_get_drvdata(pdev);
9099 struct bnx2x *bp;
9101 if (!dev) {
9102 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
9103 return;
9105 bp = netdev_priv(dev);
9107 unregister_netdev(dev);
9109 /* Delete all NAPI objects */
9110 bnx2x_del_all_napi(bp);
9112 /* Disable MSI/MSI-X */
9113 bnx2x_disable_msi(bp);
9114 /* Make sure RESET task is not scheduled before continuing */
9115 cancel_delayed_work_sync(&bp->reset_task);
9117 if (bp->regview)
9118 iounmap(bp->regview);
9120 if (bp->doorbells)
9121 iounmap(bp->doorbells);
9123 bnx2x_free_mem_bp(bp);
9125 free_netdev(dev);
9127 if (atomic_read(&pdev->enable_cnt) == 1)
9128 pci_release_regions(pdev);
9130 pci_disable_device(pdev);
9131 pci_set_drvdata(pdev, NULL);
9134 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9136 int i;
9138 bp->state = BNX2X_STATE_ERROR;
9140 bp->rx_mode = BNX2X_RX_MODE_NONE;
9142 bnx2x_netif_stop(bp, 0);
9143 netif_carrier_off(bp->dev);
9145 del_timer_sync(&bp->timer);
9146 bp->stats_state = STATS_STATE_DISABLED;
9147 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9149 /* Release IRQs */
9150 bnx2x_free_irq(bp);
9152 /* Free SKBs, SGEs, TPA pool and driver internals */
9153 bnx2x_free_skbs(bp);
9155 for_each_queue(bp, i)
9156 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
9158 bnx2x_free_mem(bp);
9160 bp->state = BNX2X_STATE_CLOSED;
9162 return 0;
9165 static void bnx2x_eeh_recover(struct bnx2x *bp)
9167 u32 val;
9169 mutex_init(&bp->port.phy_mutex);
9171 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9172 bp->link_params.shmem_base = bp->common.shmem_base;
9173 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9175 if (!bp->common.shmem_base ||
9176 (bp->common.shmem_base < 0xA0000) ||
9177 (bp->common.shmem_base >= 0xC0000)) {
9178 BNX2X_DEV_INFO("MCP not active\n");
9179 bp->flags |= NO_MCP_FLAG;
9180 return;
9183 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9184 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9185 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9186 BNX2X_ERR("BAD MCP validity signature\n");
9188 if (!BP_NOMCP(bp)) {
9189 bp->fw_seq =
9190 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9191 DRV_MSG_SEQ_NUMBER_MASK);
9192 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9197 * bnx2x_io_error_detected - called when PCI error is detected
9198 * @pdev: Pointer to PCI device
9199 * @state: The current pci connection state
9201 * This function is called after a PCI bus error affecting
9202 * this device has been detected.
9204 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9205 pci_channel_state_t state)
9207 struct net_device *dev = pci_get_drvdata(pdev);
9208 struct bnx2x *bp = netdev_priv(dev);
9210 rtnl_lock();
9212 netif_device_detach(dev);
9214 if (state == pci_channel_io_perm_failure) {
9215 rtnl_unlock();
9216 return PCI_ERS_RESULT_DISCONNECT;
9219 if (netif_running(dev))
9220 bnx2x_eeh_nic_unload(bp);
9222 pci_disable_device(pdev);
9224 rtnl_unlock();
9226 /* Request a slot reset */
9227 return PCI_ERS_RESULT_NEED_RESET;
9231 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9232 * @pdev: Pointer to PCI device
9234 * Restart the card from scratch, as if from a cold-boot.
9236 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9238 struct net_device *dev = pci_get_drvdata(pdev);
9239 struct bnx2x *bp = netdev_priv(dev);
9241 rtnl_lock();
9243 if (pci_enable_device(pdev)) {
9244 dev_err(&pdev->dev,
9245 "Cannot re-enable PCI device after reset\n");
9246 rtnl_unlock();
9247 return PCI_ERS_RESULT_DISCONNECT;
9250 pci_set_master(pdev);
9251 pci_restore_state(pdev);
9253 if (netif_running(dev))
9254 bnx2x_set_power_state(bp, PCI_D0);
9256 rtnl_unlock();
9258 return PCI_ERS_RESULT_RECOVERED;
9262 * bnx2x_io_resume - called when traffic can start flowing again
9263 * @pdev: Pointer to PCI device
9265 * This callback is called when the error recovery driver tells us that
9266 * its OK to resume normal operation.
9268 static void bnx2x_io_resume(struct pci_dev *pdev)
9270 struct net_device *dev = pci_get_drvdata(pdev);
9271 struct bnx2x *bp = netdev_priv(dev);
9273 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
9274 printk(KERN_ERR "Handling parity error recovery. "
9275 "Try again later\n");
9276 return;
9279 rtnl_lock();
9281 bnx2x_eeh_recover(bp);
9283 if (netif_running(dev))
9284 bnx2x_nic_load(bp, LOAD_NORMAL);
9286 netif_device_attach(dev);
9288 rtnl_unlock();
9291 static struct pci_error_handlers bnx2x_err_handler = {
9292 .error_detected = bnx2x_io_error_detected,
9293 .slot_reset = bnx2x_io_slot_reset,
9294 .resume = bnx2x_io_resume,
9297 static struct pci_driver bnx2x_pci_driver = {
9298 .name = DRV_MODULE_NAME,
9299 .id_table = bnx2x_pci_tbl,
9300 .probe = bnx2x_init_one,
9301 .remove = __devexit_p(bnx2x_remove_one),
9302 .suspend = bnx2x_suspend,
9303 .resume = bnx2x_resume,
9304 .err_handler = &bnx2x_err_handler,
9307 static int __init bnx2x_init(void)
9309 int ret;
9311 pr_info("%s", version);
9313 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9314 if (bnx2x_wq == NULL) {
9315 pr_err("Cannot create workqueue\n");
9316 return -ENOMEM;
9319 ret = pci_register_driver(&bnx2x_pci_driver);
9320 if (ret) {
9321 pr_err("Cannot register driver\n");
9322 destroy_workqueue(bnx2x_wq);
9324 return ret;
9327 static void __exit bnx2x_cleanup(void)
9329 pci_unregister_driver(&bnx2x_pci_driver);
9331 destroy_workqueue(bnx2x_wq);
9334 module_init(bnx2x_init);
9335 module_exit(bnx2x_cleanup);
9337 #ifdef BCM_CNIC
9339 /* count denotes the number of new completions we have seen */
9340 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9342 struct eth_spe *spe;
9344 #ifdef BNX2X_STOP_ON_ERROR
9345 if (unlikely(bp->panic))
9346 return;
9347 #endif
9349 spin_lock_bh(&bp->spq_lock);
9350 bp->cnic_spq_pending -= count;
9352 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
9353 bp->cnic_spq_pending++) {
9355 if (!bp->cnic_kwq_pending)
9356 break;
9358 spe = bnx2x_sp_get_next(bp);
9359 *spe = *bp->cnic_kwq_cons;
9361 bp->cnic_kwq_pending--;
9363 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9364 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9366 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9367 bp->cnic_kwq_cons = bp->cnic_kwq;
9368 else
9369 bp->cnic_kwq_cons++;
9371 bnx2x_sp_prod_update(bp);
9372 spin_unlock_bh(&bp->spq_lock);
9375 static int bnx2x_cnic_sp_queue(struct net_device *dev,
9376 struct kwqe_16 *kwqes[], u32 count)
9378 struct bnx2x *bp = netdev_priv(dev);
9379 int i;
9381 #ifdef BNX2X_STOP_ON_ERROR
9382 if (unlikely(bp->panic))
9383 return -EIO;
9384 #endif
9386 spin_lock_bh(&bp->spq_lock);
9388 for (i = 0; i < count; i++) {
9389 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9391 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9392 break;
9394 *bp->cnic_kwq_prod = *spe;
9396 bp->cnic_kwq_pending++;
9398 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9399 spe->hdr.conn_and_cmd_data, spe->hdr.type,
9400 spe->data.update_data_addr.hi,
9401 spe->data.update_data_addr.lo,
9402 bp->cnic_kwq_pending);
9404 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9405 bp->cnic_kwq_prod = bp->cnic_kwq;
9406 else
9407 bp->cnic_kwq_prod++;
9410 spin_unlock_bh(&bp->spq_lock);
9412 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9413 bnx2x_cnic_sp_post(bp, 0);
9415 return i;
9418 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9420 struct cnic_ops *c_ops;
9421 int rc = 0;
9423 mutex_lock(&bp->cnic_mutex);
9424 c_ops = bp->cnic_ops;
9425 if (c_ops)
9426 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9427 mutex_unlock(&bp->cnic_mutex);
9429 return rc;
9432 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9434 struct cnic_ops *c_ops;
9435 int rc = 0;
9437 rcu_read_lock();
9438 c_ops = rcu_dereference(bp->cnic_ops);
9439 if (c_ops)
9440 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9441 rcu_read_unlock();
9443 return rc;
9447 * for commands that have no data
9449 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
9451 struct cnic_ctl_info ctl = {0};
9453 ctl.cmd = cmd;
9455 return bnx2x_cnic_ctl_send(bp, &ctl);
9458 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9460 struct cnic_ctl_info ctl;
9462 /* first we tell CNIC and only then we count this as a completion */
9463 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9464 ctl.data.comp.cid = cid;
9466 bnx2x_cnic_ctl_send_bh(bp, &ctl);
9467 bnx2x_cnic_sp_post(bp, 1);
9470 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9472 struct bnx2x *bp = netdev_priv(dev);
9473 int rc = 0;
9475 switch (ctl->cmd) {
9476 case DRV_CTL_CTXTBL_WR_CMD: {
9477 u32 index = ctl->data.io.offset;
9478 dma_addr_t addr = ctl->data.io.dma_addr;
9480 bnx2x_ilt_wr(bp, index, addr);
9481 break;
9484 case DRV_CTL_COMPLETION_CMD: {
9485 int count = ctl->data.comp.comp_count;
9487 bnx2x_cnic_sp_post(bp, count);
9488 break;
9491 /* rtnl_lock is held. */
9492 case DRV_CTL_START_L2_CMD: {
9493 u32 cli = ctl->data.ring.client_id;
9495 /* Set iSCSI MAC address */
9496 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9498 mmiowb();
9499 barrier();
9501 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9502 * because it's the only way for UIO Client to accept
9503 * multicasts (in non-promiscuous mode only one Client per
9504 * function will receive multicast packets (leading in our
9505 * case).
9507 bnx2x_rxq_set_mac_filters(bp, cli,
9508 BNX2X_ACCEPT_UNICAST |
9509 BNX2X_ACCEPT_BROADCAST |
9510 BNX2X_ACCEPT_ALL_MULTICAST);
9511 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9513 break;
9516 /* rtnl_lock is held. */
9517 case DRV_CTL_STOP_L2_CMD: {
9518 u32 cli = ctl->data.ring.client_id;
9520 /* Stop accepting on iSCSI L2 ring */
9521 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9522 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9524 mmiowb();
9525 barrier();
9527 /* Unset iSCSI L2 MAC */
9528 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9529 break;
9532 default:
9533 BNX2X_ERR("unknown command %x\n", ctl->cmd);
9534 rc = -EINVAL;
9537 return rc;
9540 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
9542 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9544 if (bp->flags & USING_MSIX_FLAG) {
9545 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
9546 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
9547 cp->irq_arr[0].vector = bp->msix_table[1].vector;
9548 } else {
9549 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
9550 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
9552 if (CHIP_IS_E2(bp))
9553 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
9554 else
9555 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
9557 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
9558 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
9559 cp->irq_arr[1].status_blk = bp->def_status_blk;
9560 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
9561 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
9563 cp->num_irq = 2;
9566 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
9567 void *data)
9569 struct bnx2x *bp = netdev_priv(dev);
9570 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9572 if (ops == NULL)
9573 return -EINVAL;
9575 if (atomic_read(&bp->intr_sem) != 0)
9576 return -EBUSY;
9578 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
9579 if (!bp->cnic_kwq)
9580 return -ENOMEM;
9582 bp->cnic_kwq_cons = bp->cnic_kwq;
9583 bp->cnic_kwq_prod = bp->cnic_kwq;
9584 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
9586 bp->cnic_spq_pending = 0;
9587 bp->cnic_kwq_pending = 0;
9589 bp->cnic_data = data;
9591 cp->num_irq = 0;
9592 cp->drv_state = CNIC_DRV_STATE_REGD;
9593 cp->iro_arr = bp->iro_arr;
9595 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
9596 BNX2X_VF_ID_INVALID, false,
9597 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
9599 bnx2x_setup_cnic_irq_info(bp);
9600 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9601 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
9602 rcu_assign_pointer(bp->cnic_ops, ops);
9604 return 0;
9607 static int bnx2x_unregister_cnic(struct net_device *dev)
9609 struct bnx2x *bp = netdev_priv(dev);
9610 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9612 mutex_lock(&bp->cnic_mutex);
9613 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
9614 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
9615 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9617 cp->drv_state = 0;
9618 rcu_assign_pointer(bp->cnic_ops, NULL);
9619 mutex_unlock(&bp->cnic_mutex);
9620 synchronize_rcu();
9621 kfree(bp->cnic_kwq);
9622 bp->cnic_kwq = NULL;
9624 return 0;
9627 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
9629 struct bnx2x *bp = netdev_priv(dev);
9630 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9632 cp->drv_owner = THIS_MODULE;
9633 cp->chip_id = CHIP_ID(bp);
9634 cp->pdev = bp->pdev;
9635 cp->io_base = bp->regview;
9636 cp->io_base2 = bp->doorbells;
9637 cp->max_kwqe_pending = 8;
9638 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
9639 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
9640 cp->ctx_tbl_len = CNIC_ILT_LINES;
9641 cp->starting_cid = BCM_CNIC_CID_START;
9642 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
9643 cp->drv_ctl = bnx2x_drv_ctl;
9644 cp->drv_register_cnic = bnx2x_register_cnic;
9645 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
9647 return cp;
9649 EXPORT_SYMBOL(bnx2x_cnic_probe);
9651 #endif /* BCM_CNIC */