[SCSI] bnx2i: Added support for the 57712(E) devices
[linux-2.6/btrfs-unstable.git] / drivers / net / bnx2x / bnx2x_main.c
blob9791dc53375dcc1df00d206393aadda05453b93a
1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/pci.h>
28 #include <linux/init.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitops.h>
34 #include <linux/irq.h>
35 #include <linux/delay.h>
36 #include <asm/byteorder.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if_vlan.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/crc32c.h>
48 #include <linux/prefetch.h>
49 #include <linux/zlib.h>
50 #include <linux/io.h>
51 #include <linux/stringify.h>
53 #define BNX2X_MAIN
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_cmn.h"
58 #include "bnx2x_dcb.h"
60 #include <linux/firmware.h>
61 #include "bnx2x_fw_file_hdr.h"
62 /* FW files */
63 #define FW_FILE_VERSION \
64 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
65 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
66 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
67 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
68 #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
69 #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
70 #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
72 /* Time in jiffies before concluding the transmitter is hung */
73 #define TX_TIMEOUT (5*HZ)
75 static char version[] __devinitdata =
76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
79 MODULE_AUTHOR("Eliezer Tamir");
80 MODULE_DESCRIPTION("Broadcom NetXtreme II "
81 "BCM57710/57711/57711E/57712/57712E Driver");
82 MODULE_LICENSE("GPL");
83 MODULE_VERSION(DRV_MODULE_VERSION);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1);
85 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
86 MODULE_FIRMWARE(FW_FILE_NAME_E2);
88 static int multi_mode = 1;
89 module_param(multi_mode, int, 0);
90 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
91 "(0 Disable; 1 Enable (default))");
93 int num_queues;
94 module_param(num_queues, int, 0);
95 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
96 " (default is as a number of CPUs)");
98 static int disable_tpa;
99 module_param(disable_tpa, int, 0);
100 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102 static int int_mode;
103 module_param(int_mode, int, 0);
104 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
105 "(1 INT#x; 2 MSI)");
107 static int dropless_fc;
108 module_param(dropless_fc, int, 0);
109 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
111 static int poll;
112 module_param(poll, int, 0);
113 MODULE_PARM_DESC(poll, " Use polling (for debug)");
115 static int mrrs = -1;
116 module_param(mrrs, int, 0);
117 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
119 static int debug;
120 module_param(debug, int, 0);
121 MODULE_PARM_DESC(debug, " Default debug msglevel");
123 static struct workqueue_struct *bnx2x_wq;
125 #ifdef BCM_CNIC
126 static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
127 #endif
129 enum bnx2x_board_type {
130 BCM57710 = 0,
131 BCM57711 = 1,
132 BCM57711E = 2,
133 BCM57712 = 3,
134 BCM57712E = 4
137 /* indexed by board_type, above */
138 static struct {
139 char *name;
140 } board_info[] __devinitdata = {
141 { "Broadcom NetXtreme II BCM57710 XGb" },
142 { "Broadcom NetXtreme II BCM57711 XGb" },
143 { "Broadcom NetXtreme II BCM57711E XGb" },
144 { "Broadcom NetXtreme II BCM57712 XGb" },
145 { "Broadcom NetXtreme II BCM57712E XGb" }
148 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
149 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
150 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
151 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
154 { 0 }
157 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
159 /****************************************************************************
160 * General service functions
161 ****************************************************************************/
163 static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
164 u32 addr, dma_addr_t mapping)
166 REG_WR(bp, addr, U64_LO(mapping));
167 REG_WR(bp, addr + 4, U64_HI(mapping));
170 static inline void __storm_memset_fill(struct bnx2x *bp,
171 u32 addr, size_t size, u32 val)
173 int i;
174 for (i = 0; i < size/4; i++)
175 REG_WR(bp, addr + (i * 4), val);
178 static inline void storm_memset_ustats_zero(struct bnx2x *bp,
179 u8 port, u16 stat_id)
181 size_t size = sizeof(struct ustorm_per_client_stats);
183 u32 addr = BAR_USTRORM_INTMEM +
184 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
186 __storm_memset_fill(bp, addr, size, 0);
189 static inline void storm_memset_tstats_zero(struct bnx2x *bp,
190 u8 port, u16 stat_id)
192 size_t size = sizeof(struct tstorm_per_client_stats);
194 u32 addr = BAR_TSTRORM_INTMEM +
195 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
197 __storm_memset_fill(bp, addr, size, 0);
200 static inline void storm_memset_xstats_zero(struct bnx2x *bp,
201 u8 port, u16 stat_id)
203 size_t size = sizeof(struct xstorm_per_client_stats);
205 u32 addr = BAR_XSTRORM_INTMEM +
206 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
208 __storm_memset_fill(bp, addr, size, 0);
212 static inline void storm_memset_spq_addr(struct bnx2x *bp,
213 dma_addr_t mapping, u16 abs_fid)
215 u32 addr = XSEM_REG_FAST_MEMORY +
216 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
218 __storm_memset_dma_mapping(bp, addr, mapping);
221 static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
223 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
226 static inline void storm_memset_func_cfg(struct bnx2x *bp,
227 struct tstorm_eth_function_common_config *tcfg,
228 u16 abs_fid)
230 size_t size = sizeof(struct tstorm_eth_function_common_config);
232 u32 addr = BAR_TSTRORM_INTMEM +
233 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
235 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
238 static inline void storm_memset_xstats_flags(struct bnx2x *bp,
239 struct stats_indication_flags *flags,
240 u16 abs_fid)
242 size_t size = sizeof(struct stats_indication_flags);
244 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
246 __storm_memset_struct(bp, addr, size, (u32 *)flags);
249 static inline void storm_memset_tstats_flags(struct bnx2x *bp,
250 struct stats_indication_flags *flags,
251 u16 abs_fid)
253 size_t size = sizeof(struct stats_indication_flags);
255 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
257 __storm_memset_struct(bp, addr, size, (u32 *)flags);
260 static inline void storm_memset_ustats_flags(struct bnx2x *bp,
261 struct stats_indication_flags *flags,
262 u16 abs_fid)
264 size_t size = sizeof(struct stats_indication_flags);
266 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
268 __storm_memset_struct(bp, addr, size, (u32 *)flags);
271 static inline void storm_memset_cstats_flags(struct bnx2x *bp,
272 struct stats_indication_flags *flags,
273 u16 abs_fid)
275 size_t size = sizeof(struct stats_indication_flags);
277 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
279 __storm_memset_struct(bp, addr, size, (u32 *)flags);
282 static inline void storm_memset_xstats_addr(struct bnx2x *bp,
283 dma_addr_t mapping, u16 abs_fid)
285 u32 addr = BAR_XSTRORM_INTMEM +
286 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
288 __storm_memset_dma_mapping(bp, addr, mapping);
291 static inline void storm_memset_tstats_addr(struct bnx2x *bp,
292 dma_addr_t mapping, u16 abs_fid)
294 u32 addr = BAR_TSTRORM_INTMEM +
295 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
297 __storm_memset_dma_mapping(bp, addr, mapping);
300 static inline void storm_memset_ustats_addr(struct bnx2x *bp,
301 dma_addr_t mapping, u16 abs_fid)
303 u32 addr = BAR_USTRORM_INTMEM +
304 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
306 __storm_memset_dma_mapping(bp, addr, mapping);
309 static inline void storm_memset_cstats_addr(struct bnx2x *bp,
310 dma_addr_t mapping, u16 abs_fid)
312 u32 addr = BAR_CSTRORM_INTMEM +
313 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
315 __storm_memset_dma_mapping(bp, addr, mapping);
318 static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
319 u16 pf_id)
321 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
322 pf_id);
323 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
324 pf_id);
325 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
326 pf_id);
327 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
328 pf_id);
331 static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
332 u8 enable)
334 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
335 enable);
336 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
337 enable);
338 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
339 enable);
340 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
341 enable);
344 static inline void storm_memset_eq_data(struct bnx2x *bp,
345 struct event_ring_data *eq_data,
346 u16 pfid)
348 size_t size = sizeof(struct event_ring_data);
350 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
352 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
355 static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
356 u16 pfid)
358 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
359 REG_WR16(bp, addr, eq_prod);
362 static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
363 u16 fw_sb_id, u8 sb_index,
364 u8 ticks)
367 int index_offset = CHIP_IS_E2(bp) ?
368 offsetof(struct hc_status_block_data_e2, index_data) :
369 offsetof(struct hc_status_block_data_e1x, index_data);
370 u32 addr = BAR_CSTRORM_INTMEM +
371 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
372 index_offset +
373 sizeof(struct hc_index_data)*sb_index +
374 offsetof(struct hc_index_data, timeout);
375 REG_WR8(bp, addr, ticks);
376 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
377 port, fw_sb_id, sb_index, ticks);
379 static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
380 u16 fw_sb_id, u8 sb_index,
381 u8 disable)
383 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
384 int index_offset = CHIP_IS_E2(bp) ?
385 offsetof(struct hc_status_block_data_e2, index_data) :
386 offsetof(struct hc_status_block_data_e1x, index_data);
387 u32 addr = BAR_CSTRORM_INTMEM +
388 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
389 index_offset +
390 sizeof(struct hc_index_data)*sb_index +
391 offsetof(struct hc_index_data, flags);
392 u16 flags = REG_RD16(bp, addr);
393 /* clear and set */
394 flags &= ~HC_INDEX_DATA_HC_ENABLED;
395 flags |= enable_flag;
396 REG_WR16(bp, addr, flags);
397 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
398 port, fw_sb_id, sb_index, disable);
401 /* used only at init
402 * locking is done by mcp
404 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
406 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
407 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
408 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
409 PCICFG_VENDOR_ID_OFFSET);
412 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
414 u32 val;
416 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
417 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
418 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
419 PCICFG_VENDOR_ID_OFFSET);
421 return val;
424 #define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
425 #define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
426 #define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
427 #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
428 #define DMAE_DP_DST_NONE "dst_addr [none]"
430 static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
431 int msglvl)
433 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
435 switch (dmae->opcode & DMAE_COMMAND_DST) {
436 case DMAE_CMD_DST_PCI:
437 if (src_type == DMAE_CMD_SRC_PCI)
438 DP(msglvl, "DMAE: opcode 0x%08x\n"
439 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
440 "comp_addr [%x:%08x], comp_val 0x%08x\n",
441 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
442 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
443 dmae->comp_addr_hi, dmae->comp_addr_lo,
444 dmae->comp_val);
445 else
446 DP(msglvl, "DMAE: opcode 0x%08x\n"
447 "src [%08x], len [%d*4], dst [%x:%08x]\n"
448 "comp_addr [%x:%08x], comp_val 0x%08x\n",
449 dmae->opcode, dmae->src_addr_lo >> 2,
450 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
451 dmae->comp_addr_hi, dmae->comp_addr_lo,
452 dmae->comp_val);
453 break;
454 case DMAE_CMD_DST_GRC:
455 if (src_type == DMAE_CMD_SRC_PCI)
456 DP(msglvl, "DMAE: opcode 0x%08x\n"
457 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
458 "comp_addr [%x:%08x], comp_val 0x%08x\n",
459 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
460 dmae->len, dmae->dst_addr_lo >> 2,
461 dmae->comp_addr_hi, dmae->comp_addr_lo,
462 dmae->comp_val);
463 else
464 DP(msglvl, "DMAE: opcode 0x%08x\n"
465 "src [%08x], len [%d*4], dst [%08x]\n"
466 "comp_addr [%x:%08x], comp_val 0x%08x\n",
467 dmae->opcode, dmae->src_addr_lo >> 2,
468 dmae->len, dmae->dst_addr_lo >> 2,
469 dmae->comp_addr_hi, dmae->comp_addr_lo,
470 dmae->comp_val);
471 break;
472 default:
473 if (src_type == DMAE_CMD_SRC_PCI)
474 DP(msglvl, "DMAE: opcode 0x%08x\n"
475 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
476 "dst_addr [none]\n"
477 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
478 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
479 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
480 dmae->comp_val);
481 else
482 DP(msglvl, "DMAE: opcode 0x%08x\n"
483 DP_LEVEL "src_addr [%08x] len [%d * 4] "
484 "dst_addr [none]\n"
485 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
486 dmae->opcode, dmae->src_addr_lo >> 2,
487 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
488 dmae->comp_val);
489 break;
494 const u32 dmae_reg_go_c[] = {
495 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
496 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
497 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
498 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
501 /* copy command into DMAE command memory and set DMAE command go */
502 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
504 u32 cmd_offset;
505 int i;
507 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
508 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
509 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
511 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
512 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
514 REG_WR(bp, dmae_reg_go_c[idx], 1);
517 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
519 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
520 DMAE_CMD_C_ENABLE);
523 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
525 return opcode & ~DMAE_CMD_SRC_RESET;
528 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
529 bool with_comp, u8 comp_type)
531 u32 opcode = 0;
533 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
534 (dst_type << DMAE_COMMAND_DST_SHIFT));
536 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
538 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
539 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
540 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
541 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
543 #ifdef __BIG_ENDIAN
544 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
545 #else
546 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
547 #endif
548 if (with_comp)
549 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
550 return opcode;
553 static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
554 struct dmae_command *dmae,
555 u8 src_type, u8 dst_type)
557 memset(dmae, 0, sizeof(struct dmae_command));
559 /* set the opcode */
560 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
561 true, DMAE_COMP_PCI);
563 /* fill in the completion parameters */
564 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
565 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
566 dmae->comp_val = DMAE_COMP_VAL;
569 /* issue a dmae command over the init-channel and wailt for completion */
570 static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
571 struct dmae_command *dmae)
573 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
574 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
575 int rc = 0;
577 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
578 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
579 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
581 /* lock the dmae channel */
582 mutex_lock(&bp->dmae_mutex);
584 /* reset completion */
585 *wb_comp = 0;
587 /* post the command on the channel used for initializations */
588 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
590 /* wait for completion */
591 udelay(5);
592 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
593 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
595 if (!cnt) {
596 BNX2X_ERR("DMAE timeout!\n");
597 rc = DMAE_TIMEOUT;
598 goto unlock;
600 cnt--;
601 udelay(50);
603 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
604 BNX2X_ERR("DMAE PCI error!\n");
605 rc = DMAE_PCI_ERROR;
608 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
609 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
610 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
612 unlock:
613 mutex_unlock(&bp->dmae_mutex);
614 return rc;
617 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
618 u32 len32)
620 struct dmae_command dmae;
622 if (!bp->dmae_ready) {
623 u32 *data = bnx2x_sp(bp, wb_data[0]);
625 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
626 " using indirect\n", dst_addr, len32);
627 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
628 return;
631 /* set opcode and fixed command fields */
632 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
634 /* fill in addresses and len */
635 dmae.src_addr_lo = U64_LO(dma_addr);
636 dmae.src_addr_hi = U64_HI(dma_addr);
637 dmae.dst_addr_lo = dst_addr >> 2;
638 dmae.dst_addr_hi = 0;
639 dmae.len = len32;
641 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
643 /* issue the command and wait for completion */
644 bnx2x_issue_dmae_with_comp(bp, &dmae);
647 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
649 struct dmae_command dmae;
651 if (!bp->dmae_ready) {
652 u32 *data = bnx2x_sp(bp, wb_data[0]);
653 int i;
655 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
656 " using indirect\n", src_addr, len32);
657 for (i = 0; i < len32; i++)
658 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
659 return;
662 /* set opcode and fixed command fields */
663 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
665 /* fill in addresses and len */
666 dmae.src_addr_lo = src_addr >> 2;
667 dmae.src_addr_hi = 0;
668 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
669 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
670 dmae.len = len32;
672 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
674 /* issue the command and wait for completion */
675 bnx2x_issue_dmae_with_comp(bp, &dmae);
678 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
679 u32 addr, u32 len)
681 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
682 int offset = 0;
684 while (len > dmae_wr_max) {
685 bnx2x_write_dmae(bp, phys_addr + offset,
686 addr + offset, dmae_wr_max);
687 offset += dmae_wr_max * 4;
688 len -= dmae_wr_max;
691 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
694 /* used only for slowpath so not inlined */
695 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
697 u32 wb_write[2];
699 wb_write[0] = val_hi;
700 wb_write[1] = val_lo;
701 REG_WR_DMAE(bp, reg, wb_write, 2);
704 #ifdef USE_WB_RD
705 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
707 u32 wb_data[2];
709 REG_RD_DMAE(bp, reg, wb_data, 2);
711 return HILO_U64(wb_data[0], wb_data[1]);
713 #endif
715 static int bnx2x_mc_assert(struct bnx2x *bp)
717 char last_idx;
718 int i, rc = 0;
719 u32 row0, row1, row2, row3;
721 /* XSTORM */
722 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
723 XSTORM_ASSERT_LIST_INDEX_OFFSET);
724 if (last_idx)
725 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
727 /* print the asserts */
728 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
730 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
731 XSTORM_ASSERT_LIST_OFFSET(i));
732 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
733 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
734 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
735 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
736 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
737 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
739 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
740 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
741 " 0x%08x 0x%08x 0x%08x\n",
742 i, row3, row2, row1, row0);
743 rc++;
744 } else {
745 break;
749 /* TSTORM */
750 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
751 TSTORM_ASSERT_LIST_INDEX_OFFSET);
752 if (last_idx)
753 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
755 /* print the asserts */
756 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
758 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
759 TSTORM_ASSERT_LIST_OFFSET(i));
760 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
761 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
762 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
763 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
764 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
765 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
767 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
768 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
769 " 0x%08x 0x%08x 0x%08x\n",
770 i, row3, row2, row1, row0);
771 rc++;
772 } else {
773 break;
777 /* CSTORM */
778 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
779 CSTORM_ASSERT_LIST_INDEX_OFFSET);
780 if (last_idx)
781 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
783 /* print the asserts */
784 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
786 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
787 CSTORM_ASSERT_LIST_OFFSET(i));
788 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
789 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
790 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
791 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
792 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
793 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
795 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
796 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
797 " 0x%08x 0x%08x 0x%08x\n",
798 i, row3, row2, row1, row0);
799 rc++;
800 } else {
801 break;
805 /* USTORM */
806 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
807 USTORM_ASSERT_LIST_INDEX_OFFSET);
808 if (last_idx)
809 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
811 /* print the asserts */
812 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
814 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
815 USTORM_ASSERT_LIST_OFFSET(i));
816 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
817 USTORM_ASSERT_LIST_OFFSET(i) + 4);
818 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
819 USTORM_ASSERT_LIST_OFFSET(i) + 8);
820 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
821 USTORM_ASSERT_LIST_OFFSET(i) + 12);
823 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
824 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
825 " 0x%08x 0x%08x 0x%08x\n",
826 i, row3, row2, row1, row0);
827 rc++;
828 } else {
829 break;
833 return rc;
836 static void bnx2x_fw_dump(struct bnx2x *bp)
838 u32 addr;
839 u32 mark, offset;
840 __be32 data[9];
841 int word;
842 u32 trace_shmem_base;
843 if (BP_NOMCP(bp)) {
844 BNX2X_ERR("NO MCP - can not dump\n");
845 return;
848 if (BP_PATH(bp) == 0)
849 trace_shmem_base = bp->common.shmem_base;
850 else
851 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
852 addr = trace_shmem_base - 0x0800 + 4;
853 mark = REG_RD(bp, addr);
854 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
855 + ((mark + 0x3) & ~0x3) - 0x08000000;
856 pr_err("begin fw dump (mark 0x%x)\n", mark);
858 pr_err("");
859 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
860 for (word = 0; word < 8; word++)
861 data[word] = htonl(REG_RD(bp, offset + 4*word));
862 data[8] = 0x0;
863 pr_cont("%s", (char *)data);
865 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
866 for (word = 0; word < 8; word++)
867 data[word] = htonl(REG_RD(bp, offset + 4*word));
868 data[8] = 0x0;
869 pr_cont("%s", (char *)data);
871 pr_err("end of fw dump\n");
874 void bnx2x_panic_dump(struct bnx2x *bp)
876 int i;
877 u16 j;
878 struct hc_sp_status_block_data sp_sb_data;
879 int func = BP_FUNC(bp);
880 #ifdef BNX2X_STOP_ON_ERROR
881 u16 start = 0, end = 0;
882 #endif
884 bp->stats_state = STATS_STATE_DISABLED;
885 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
887 BNX2X_ERR("begin crash dump -----------------\n");
889 /* Indices */
890 /* Common */
891 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
892 " spq_prod_idx(0x%x)\n",
893 bp->def_idx, bp->def_att_idx,
894 bp->attn_state, bp->spq_prod_idx);
895 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
896 bp->def_status_blk->atten_status_block.attn_bits,
897 bp->def_status_blk->atten_status_block.attn_bits_ack,
898 bp->def_status_blk->atten_status_block.status_block_id,
899 bp->def_status_blk->atten_status_block.attn_bits_index);
900 BNX2X_ERR(" def (");
901 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
902 pr_cont("0x%x%s",
903 bp->def_status_blk->sp_sb.index_values[i],
904 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
906 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
907 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
908 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
909 i*sizeof(u32));
911 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
912 "pf_id(0x%x) vnic_id(0x%x) "
913 "vf_id(0x%x) vf_valid (0x%x)\n",
914 sp_sb_data.igu_sb_id,
915 sp_sb_data.igu_seg_id,
916 sp_sb_data.p_func.pf_id,
917 sp_sb_data.p_func.vnic_id,
918 sp_sb_data.p_func.vf_id,
919 sp_sb_data.p_func.vf_valid);
922 for_each_eth_queue(bp, i) {
923 struct bnx2x_fastpath *fp = &bp->fp[i];
924 int loop;
925 struct hc_status_block_data_e2 sb_data_e2;
926 struct hc_status_block_data_e1x sb_data_e1x;
927 struct hc_status_block_sm *hc_sm_p =
928 CHIP_IS_E2(bp) ?
929 sb_data_e2.common.state_machine :
930 sb_data_e1x.common.state_machine;
931 struct hc_index_data *hc_index_p =
932 CHIP_IS_E2(bp) ?
933 sb_data_e2.index_data :
934 sb_data_e1x.index_data;
935 int data_size;
936 u32 *sb_data_p;
938 /* Rx */
939 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
940 " rx_comp_prod(0x%x)"
941 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
942 i, fp->rx_bd_prod, fp->rx_bd_cons,
943 fp->rx_comp_prod,
944 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
945 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
946 " fp_hc_idx(0x%x)\n",
947 fp->rx_sge_prod, fp->last_max_sge,
948 le16_to_cpu(fp->fp_hc_idx));
950 /* Tx */
951 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
952 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
953 " *tx_cons_sb(0x%x)\n",
954 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
955 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
957 loop = CHIP_IS_E2(bp) ?
958 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
960 /* host sb data */
962 #ifdef BCM_CNIC
963 if (IS_FCOE_FP(fp))
964 continue;
965 #endif
966 BNX2X_ERR(" run indexes (");
967 for (j = 0; j < HC_SB_MAX_SM; j++)
968 pr_cont("0x%x%s",
969 fp->sb_running_index[j],
970 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
972 BNX2X_ERR(" indexes (");
973 for (j = 0; j < loop; j++)
974 pr_cont("0x%x%s",
975 fp->sb_index_values[j],
976 (j == loop - 1) ? ")" : " ");
977 /* fw sb data */
978 data_size = CHIP_IS_E2(bp) ?
979 sizeof(struct hc_status_block_data_e2) :
980 sizeof(struct hc_status_block_data_e1x);
981 data_size /= sizeof(u32);
982 sb_data_p = CHIP_IS_E2(bp) ?
983 (u32 *)&sb_data_e2 :
984 (u32 *)&sb_data_e1x;
985 /* copy sb data in here */
986 for (j = 0; j < data_size; j++)
987 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
988 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
989 j * sizeof(u32));
991 if (CHIP_IS_E2(bp)) {
992 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
993 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
994 sb_data_e2.common.p_func.pf_id,
995 sb_data_e2.common.p_func.vf_id,
996 sb_data_e2.common.p_func.vf_valid,
997 sb_data_e2.common.p_func.vnic_id,
998 sb_data_e2.common.same_igu_sb_1b);
999 } else {
1000 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
1001 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1002 sb_data_e1x.common.p_func.pf_id,
1003 sb_data_e1x.common.p_func.vf_id,
1004 sb_data_e1x.common.p_func.vf_valid,
1005 sb_data_e1x.common.p_func.vnic_id,
1006 sb_data_e1x.common.same_igu_sb_1b);
1009 /* SB_SMs data */
1010 for (j = 0; j < HC_SB_MAX_SM; j++) {
1011 pr_cont("SM[%d] __flags (0x%x) "
1012 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1013 "time_to_expire (0x%x) "
1014 "timer_value(0x%x)\n", j,
1015 hc_sm_p[j].__flags,
1016 hc_sm_p[j].igu_sb_id,
1017 hc_sm_p[j].igu_seg_id,
1018 hc_sm_p[j].time_to_expire,
1019 hc_sm_p[j].timer_value);
1022 /* Indecies data */
1023 for (j = 0; j < loop; j++) {
1024 pr_cont("INDEX[%d] flags (0x%x) "
1025 "timeout (0x%x)\n", j,
1026 hc_index_p[j].flags,
1027 hc_index_p[j].timeout);
1031 #ifdef BNX2X_STOP_ON_ERROR
1032 /* Rings */
1033 /* Rx */
1034 for_each_rx_queue(bp, i) {
1035 struct bnx2x_fastpath *fp = &bp->fp[i];
1037 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1038 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1039 for (j = start; j != end; j = RX_BD(j + 1)) {
1040 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1041 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1043 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1044 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
1047 start = RX_SGE(fp->rx_sge_prod);
1048 end = RX_SGE(fp->last_max_sge);
1049 for (j = start; j != end; j = RX_SGE(j + 1)) {
1050 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1051 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1053 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1054 i, j, rx_sge[1], rx_sge[0], sw_page->page);
1057 start = RCQ_BD(fp->rx_comp_cons - 10);
1058 end = RCQ_BD(fp->rx_comp_cons + 503);
1059 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1060 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1062 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1063 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1067 /* Tx */
1068 for_each_tx_queue(bp, i) {
1069 struct bnx2x_fastpath *fp = &bp->fp[i];
1071 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1072 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1073 for (j = start; j != end; j = TX_BD(j + 1)) {
1074 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1076 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1077 i, j, sw_bd->skb, sw_bd->first_bd);
1080 start = TX_BD(fp->tx_bd_cons - 10);
1081 end = TX_BD(fp->tx_bd_cons + 254);
1082 for (j = start; j != end; j = TX_BD(j + 1)) {
1083 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1085 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1086 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
1089 #endif
1090 bnx2x_fw_dump(bp);
1091 bnx2x_mc_assert(bp);
1092 BNX2X_ERR("end crash dump -----------------\n");
1095 static void bnx2x_hc_int_enable(struct bnx2x *bp)
1097 int port = BP_PORT(bp);
1098 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1099 u32 val = REG_RD(bp, addr);
1100 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1101 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1103 if (msix) {
1104 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1105 HC_CONFIG_0_REG_INT_LINE_EN_0);
1106 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1107 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1108 } else if (msi) {
1109 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1110 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1111 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1112 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1113 } else {
1114 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1115 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1116 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1117 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1119 if (!CHIP_IS_E1(bp)) {
1120 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1121 val, port, addr);
1123 REG_WR(bp, addr, val);
1125 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1129 if (CHIP_IS_E1(bp))
1130 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1132 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1133 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1135 REG_WR(bp, addr, val);
1137 * Ensure that HC_CONFIG is written before leading/trailing edge config
1139 mmiowb();
1140 barrier();
1142 if (!CHIP_IS_E1(bp)) {
1143 /* init leading/trailing edge */
1144 if (IS_MF(bp)) {
1145 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1146 if (bp->port.pmf)
1147 /* enable nig and gpio3 attention */
1148 val |= 0x1100;
1149 } else
1150 val = 0xffff;
1152 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1153 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1156 /* Make sure that interrupts are indeed enabled from here on */
1157 mmiowb();
1160 static void bnx2x_igu_int_enable(struct bnx2x *bp)
1162 u32 val;
1163 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1164 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1166 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1168 if (msix) {
1169 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1170 IGU_PF_CONF_SINGLE_ISR_EN);
1171 val |= (IGU_PF_CONF_FUNC_EN |
1172 IGU_PF_CONF_MSI_MSIX_EN |
1173 IGU_PF_CONF_ATTN_BIT_EN);
1174 } else if (msi) {
1175 val &= ~IGU_PF_CONF_INT_LINE_EN;
1176 val |= (IGU_PF_CONF_FUNC_EN |
1177 IGU_PF_CONF_MSI_MSIX_EN |
1178 IGU_PF_CONF_ATTN_BIT_EN |
1179 IGU_PF_CONF_SINGLE_ISR_EN);
1180 } else {
1181 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1182 val |= (IGU_PF_CONF_FUNC_EN |
1183 IGU_PF_CONF_INT_LINE_EN |
1184 IGU_PF_CONF_ATTN_BIT_EN |
1185 IGU_PF_CONF_SINGLE_ISR_EN);
1188 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1189 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1191 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1193 barrier();
1195 /* init leading/trailing edge */
1196 if (IS_MF(bp)) {
1197 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1198 if (bp->port.pmf)
1199 /* enable nig and gpio3 attention */
1200 val |= 0x1100;
1201 } else
1202 val = 0xffff;
1204 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1205 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1207 /* Make sure that interrupts are indeed enabled from here on */
1208 mmiowb();
1211 void bnx2x_int_enable(struct bnx2x *bp)
1213 if (bp->common.int_block == INT_BLOCK_HC)
1214 bnx2x_hc_int_enable(bp);
1215 else
1216 bnx2x_igu_int_enable(bp);
1219 static void bnx2x_hc_int_disable(struct bnx2x *bp)
1221 int port = BP_PORT(bp);
1222 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1223 u32 val = REG_RD(bp, addr);
1226 * in E1 we must use only PCI configuration space to disable
1227 * MSI/MSIX capablility
1228 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1230 if (CHIP_IS_E1(bp)) {
1231 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1232 * Use mask register to prevent from HC sending interrupts
1233 * after we exit the function
1235 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1237 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1238 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1239 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1240 } else
1241 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1242 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1243 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1244 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1246 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1247 val, port, addr);
1249 /* flush all outstanding writes */
1250 mmiowb();
1252 REG_WR(bp, addr, val);
1253 if (REG_RD(bp, addr) != val)
1254 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1257 static void bnx2x_igu_int_disable(struct bnx2x *bp)
1259 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1261 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1262 IGU_PF_CONF_INT_LINE_EN |
1263 IGU_PF_CONF_ATTN_BIT_EN);
1265 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1267 /* flush all outstanding writes */
1268 mmiowb();
1270 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1271 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1272 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1275 static void bnx2x_int_disable(struct bnx2x *bp)
1277 if (bp->common.int_block == INT_BLOCK_HC)
1278 bnx2x_hc_int_disable(bp);
1279 else
1280 bnx2x_igu_int_disable(bp);
1283 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1285 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1286 int i, offset;
1288 /* disable interrupt handling */
1289 atomic_inc(&bp->intr_sem);
1290 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1292 if (disable_hw)
1293 /* prevent the HW from sending interrupts */
1294 bnx2x_int_disable(bp);
1296 /* make sure all ISRs are done */
1297 if (msix) {
1298 synchronize_irq(bp->msix_table[0].vector);
1299 offset = 1;
1300 #ifdef BCM_CNIC
1301 offset++;
1302 #endif
1303 for_each_eth_queue(bp, i)
1304 synchronize_irq(bp->msix_table[i + offset].vector);
1305 } else
1306 synchronize_irq(bp->pdev->irq);
1308 /* make sure sp_task is not running */
1309 cancel_delayed_work(&bp->sp_task);
1310 flush_workqueue(bnx2x_wq);
1313 /* fast path */
1316 * General service functions
1319 /* Return true if succeeded to acquire the lock */
1320 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1322 u32 lock_status;
1323 u32 resource_bit = (1 << resource);
1324 int func = BP_FUNC(bp);
1325 u32 hw_lock_control_reg;
1327 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1329 /* Validating that the resource is within range */
1330 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1331 DP(NETIF_MSG_HW,
1332 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1333 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1334 return false;
1337 if (func <= 5)
1338 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1339 else
1340 hw_lock_control_reg =
1341 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1343 /* Try to acquire the lock */
1344 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1345 lock_status = REG_RD(bp, hw_lock_control_reg);
1346 if (lock_status & resource_bit)
1347 return true;
1349 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1350 return false;
1353 #ifdef BCM_CNIC
1354 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1355 #endif
1357 void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1358 union eth_rx_cqe *rr_cqe)
1360 struct bnx2x *bp = fp->bp;
1361 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1362 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1364 DP(BNX2X_MSG_SP,
1365 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1366 fp->index, cid, command, bp->state,
1367 rr_cqe->ramrod_cqe.ramrod_type);
1369 switch (command | fp->state) {
1370 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1371 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1372 fp->state = BNX2X_FP_STATE_OPEN;
1373 break;
1375 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1376 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
1377 fp->state = BNX2X_FP_STATE_HALTED;
1378 break;
1380 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1381 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1382 fp->state = BNX2X_FP_STATE_TERMINATED;
1383 break;
1385 default:
1386 BNX2X_ERR("unexpected MC reply (%d) "
1387 "fp[%d] state is %x\n",
1388 command, fp->index, fp->state);
1389 break;
1392 smp_mb__before_atomic_inc();
1393 atomic_inc(&bp->spq_left);
1394 /* push the change in fp->state and towards the memory */
1395 smp_wmb();
1397 return;
1400 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1402 struct bnx2x *bp = netdev_priv(dev_instance);
1403 u16 status = bnx2x_ack_int(bp);
1404 u16 mask;
1405 int i;
1407 /* Return here if interrupt is shared and it's not for us */
1408 if (unlikely(status == 0)) {
1409 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1410 return IRQ_NONE;
1412 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1414 /* Return here if interrupt is disabled */
1415 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1416 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1417 return IRQ_HANDLED;
1420 #ifdef BNX2X_STOP_ON_ERROR
1421 if (unlikely(bp->panic))
1422 return IRQ_HANDLED;
1423 #endif
1425 for_each_eth_queue(bp, i) {
1426 struct bnx2x_fastpath *fp = &bp->fp[i];
1428 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
1429 if (status & mask) {
1430 /* Handle Rx and Tx according to SB id */
1431 prefetch(fp->rx_cons_sb);
1432 prefetch(fp->tx_cons_sb);
1433 prefetch(&fp->sb_running_index[SM_RX_ID]);
1434 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1435 status &= ~mask;
1439 #ifdef BCM_CNIC
1440 mask = 0x2;
1441 if (status & (mask | 0x1)) {
1442 struct cnic_ops *c_ops = NULL;
1444 rcu_read_lock();
1445 c_ops = rcu_dereference(bp->cnic_ops);
1446 if (c_ops)
1447 c_ops->cnic_handler(bp->cnic_data, NULL);
1448 rcu_read_unlock();
1450 status &= ~mask;
1452 #endif
1454 if (unlikely(status & 0x1)) {
1455 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1457 status &= ~0x1;
1458 if (!status)
1459 return IRQ_HANDLED;
1462 if (unlikely(status))
1463 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1464 status);
1466 return IRQ_HANDLED;
1469 /* end of fast path */
1472 /* Link */
1475 * General service functions
1478 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1480 u32 lock_status;
1481 u32 resource_bit = (1 << resource);
1482 int func = BP_FUNC(bp);
1483 u32 hw_lock_control_reg;
1484 int cnt;
1486 /* Validating that the resource is within range */
1487 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1488 DP(NETIF_MSG_HW,
1489 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1490 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1491 return -EINVAL;
1494 if (func <= 5) {
1495 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1496 } else {
1497 hw_lock_control_reg =
1498 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1501 /* Validating that the resource is not already taken */
1502 lock_status = REG_RD(bp, hw_lock_control_reg);
1503 if (lock_status & resource_bit) {
1504 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1505 lock_status, resource_bit);
1506 return -EEXIST;
1509 /* Try for 5 second every 5ms */
1510 for (cnt = 0; cnt < 1000; cnt++) {
1511 /* Try to acquire the lock */
1512 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1513 lock_status = REG_RD(bp, hw_lock_control_reg);
1514 if (lock_status & resource_bit)
1515 return 0;
1517 msleep(5);
1519 DP(NETIF_MSG_HW, "Timeout\n");
1520 return -EAGAIN;
1523 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1525 u32 lock_status;
1526 u32 resource_bit = (1 << resource);
1527 int func = BP_FUNC(bp);
1528 u32 hw_lock_control_reg;
1530 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1532 /* Validating that the resource is within range */
1533 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1534 DP(NETIF_MSG_HW,
1535 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1536 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1537 return -EINVAL;
1540 if (func <= 5) {
1541 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1542 } else {
1543 hw_lock_control_reg =
1544 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1547 /* Validating that the resource is currently taken */
1548 lock_status = REG_RD(bp, hw_lock_control_reg);
1549 if (!(lock_status & resource_bit)) {
1550 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1551 lock_status, resource_bit);
1552 return -EFAULT;
1555 REG_WR(bp, hw_lock_control_reg, resource_bit);
1556 return 0;
1560 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1562 /* The GPIO should be swapped if swap register is set and active */
1563 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1564 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1565 int gpio_shift = gpio_num +
1566 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1567 u32 gpio_mask = (1 << gpio_shift);
1568 u32 gpio_reg;
1569 int value;
1571 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1572 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1573 return -EINVAL;
1576 /* read GPIO value */
1577 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1579 /* get the requested pin value */
1580 if ((gpio_reg & gpio_mask) == gpio_mask)
1581 value = 1;
1582 else
1583 value = 0;
1585 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1587 return value;
1590 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1592 /* The GPIO should be swapped if swap register is set and active */
1593 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1594 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1595 int gpio_shift = gpio_num +
1596 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1597 u32 gpio_mask = (1 << gpio_shift);
1598 u32 gpio_reg;
1600 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1601 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1602 return -EINVAL;
1605 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1606 /* read GPIO and mask except the float bits */
1607 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1609 switch (mode) {
1610 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1611 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1612 gpio_num, gpio_shift);
1613 /* clear FLOAT and set CLR */
1614 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1615 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1616 break;
1618 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1619 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1620 gpio_num, gpio_shift);
1621 /* clear FLOAT and set SET */
1622 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1623 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1624 break;
1626 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1627 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1628 gpio_num, gpio_shift);
1629 /* set FLOAT */
1630 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1631 break;
1633 default:
1634 break;
1637 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1638 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1640 return 0;
1643 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1645 /* The GPIO should be swapped if swap register is set and active */
1646 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1647 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1648 int gpio_shift = gpio_num +
1649 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1650 u32 gpio_mask = (1 << gpio_shift);
1651 u32 gpio_reg;
1653 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1654 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1655 return -EINVAL;
1658 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1659 /* read GPIO int */
1660 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1662 switch (mode) {
1663 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1664 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1665 "output low\n", gpio_num, gpio_shift);
1666 /* clear SET and set CLR */
1667 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1668 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1669 break;
1671 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1672 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1673 "output high\n", gpio_num, gpio_shift);
1674 /* clear CLR and set SET */
1675 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1676 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1677 break;
1679 default:
1680 break;
1683 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1684 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1686 return 0;
1689 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1691 u32 spio_mask = (1 << spio_num);
1692 u32 spio_reg;
1694 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1695 (spio_num > MISC_REGISTERS_SPIO_7)) {
1696 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1697 return -EINVAL;
1700 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1701 /* read SPIO and mask except the float bits */
1702 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1704 switch (mode) {
1705 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1706 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1707 /* clear FLOAT and set CLR */
1708 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1709 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1710 break;
1712 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1713 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1714 /* clear FLOAT and set SET */
1715 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1716 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1717 break;
1719 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1720 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1721 /* set FLOAT */
1722 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1723 break;
1725 default:
1726 break;
1729 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1730 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1732 return 0;
1735 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1737 u32 sel_phy_idx = 0;
1738 if (bp->link_vars.link_up) {
1739 sel_phy_idx = EXT_PHY1;
1740 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1741 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1742 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1743 sel_phy_idx = EXT_PHY2;
1744 } else {
1746 switch (bnx2x_phy_selection(&bp->link_params)) {
1747 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1748 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1749 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1750 sel_phy_idx = EXT_PHY1;
1751 break;
1752 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1753 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1754 sel_phy_idx = EXT_PHY2;
1755 break;
1759 * The selected actived PHY is always after swapping (in case PHY
1760 * swapping is enabled). So when swapping is enabled, we need to reverse
1761 * the configuration
1764 if (bp->link_params.multi_phy_config &
1765 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1766 if (sel_phy_idx == EXT_PHY1)
1767 sel_phy_idx = EXT_PHY2;
1768 else if (sel_phy_idx == EXT_PHY2)
1769 sel_phy_idx = EXT_PHY1;
1771 return LINK_CONFIG_IDX(sel_phy_idx);
1774 void bnx2x_calc_fc_adv(struct bnx2x *bp)
1776 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
1777 switch (bp->link_vars.ieee_fc &
1778 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1779 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1780 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1781 ADVERTISED_Pause);
1782 break;
1784 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1785 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
1786 ADVERTISED_Pause);
1787 break;
1789 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1790 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
1791 break;
1793 default:
1794 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1795 ADVERTISED_Pause);
1796 break;
1800 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1802 if (!BP_NOMCP(bp)) {
1803 u8 rc;
1804 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1805 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
1806 /* Initialize link parameters structure variables */
1807 /* It is recommended to turn off RX FC for jumbo frames
1808 for better performance */
1809 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
1810 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1811 else
1812 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1814 bnx2x_acquire_phy_lock(bp);
1816 if (load_mode == LOAD_DIAG) {
1817 bp->link_params.loopback_mode = LOOPBACK_XGXS;
1818 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1821 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1823 bnx2x_release_phy_lock(bp);
1825 bnx2x_calc_fc_adv(bp);
1827 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1828 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1829 bnx2x_link_report(bp);
1831 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
1832 return rc;
1834 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1835 return -EINVAL;
1838 void bnx2x_link_set(struct bnx2x *bp)
1840 if (!BP_NOMCP(bp)) {
1841 bnx2x_acquire_phy_lock(bp);
1842 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1843 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1844 bnx2x_release_phy_lock(bp);
1846 bnx2x_calc_fc_adv(bp);
1847 } else
1848 BNX2X_ERR("Bootcode is missing - can not set link\n");
1851 static void bnx2x__link_reset(struct bnx2x *bp)
1853 if (!BP_NOMCP(bp)) {
1854 bnx2x_acquire_phy_lock(bp);
1855 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1856 bnx2x_release_phy_lock(bp);
1857 } else
1858 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1861 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
1863 u8 rc = 0;
1865 if (!BP_NOMCP(bp)) {
1866 bnx2x_acquire_phy_lock(bp);
1867 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1868 is_serdes);
1869 bnx2x_release_phy_lock(bp);
1870 } else
1871 BNX2X_ERR("Bootcode is missing - can not test link\n");
1873 return rc;
1876 static void bnx2x_init_port_minmax(struct bnx2x *bp)
1878 u32 r_param = bp->link_vars.line_speed / 8;
1879 u32 fair_periodic_timeout_usec;
1880 u32 t_fair;
1882 memset(&(bp->cmng.rs_vars), 0,
1883 sizeof(struct rate_shaping_vars_per_port));
1884 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1886 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1887 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1889 /* this is the threshold below which no timer arming will occur
1890 1.25 coefficient is for the threshold to be a little bigger
1891 than the real time, to compensate for timer in-accuracy */
1892 bp->cmng.rs_vars.rs_threshold =
1893 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1895 /* resolution of fairness timer */
1896 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1897 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1898 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1900 /* this is the threshold below which we won't arm the timer anymore */
1901 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1903 /* we multiply by 1e3/8 to get bytes/msec.
1904 We don't want the credits to pass a credit
1905 of the t_fair*FAIR_MEM (algorithm resolution) */
1906 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1907 /* since each tick is 4 usec */
1908 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1911 /* Calculates the sum of vn_min_rates.
1912 It's needed for further normalizing of the min_rates.
1913 Returns:
1914 sum of vn_min_rates.
1916 0 - if all the min_rates are 0.
1917 In the later case fainess algorithm should be deactivated.
1918 If not all min_rates are zero then those that are zeroes will be set to 1.
1920 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1922 int all_zero = 1;
1923 int vn;
1925 bp->vn_weight_sum = 0;
1926 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1927 u32 vn_cfg = bp->mf_config[vn];
1928 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1929 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1931 /* Skip hidden vns */
1932 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1933 continue;
1935 /* If min rate is zero - set it to 1 */
1936 if (!vn_min_rate)
1937 vn_min_rate = DEF_MIN_RATE;
1938 else
1939 all_zero = 0;
1941 bp->vn_weight_sum += vn_min_rate;
1944 /* ... only if all min rates are zeros - disable fairness */
1945 if (all_zero) {
1946 bp->cmng.flags.cmng_enables &=
1947 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1948 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1949 " fairness will be disabled\n");
1950 } else
1951 bp->cmng.flags.cmng_enables |=
1952 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1955 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
1957 struct rate_shaping_vars_per_vn m_rs_vn;
1958 struct fairness_vars_per_vn m_fair_vn;
1959 u32 vn_cfg = bp->mf_config[vn];
1960 int func = 2*vn + BP_PORT(bp);
1961 u16 vn_min_rate, vn_max_rate;
1962 int i;
1964 /* If function is hidden - set min and max to zeroes */
1965 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1966 vn_min_rate = 0;
1967 vn_max_rate = 0;
1969 } else {
1970 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1971 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1972 /* If min rate is zero - set it to 1 */
1973 if (bp->vn_weight_sum && (vn_min_rate == 0))
1974 vn_min_rate = DEF_MIN_RATE;
1975 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1976 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1979 DP(NETIF_MSG_IFUP,
1980 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
1981 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1983 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1984 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1986 /* global vn counter - maximal Mbps for this vn */
1987 m_rs_vn.vn_counter.rate = vn_max_rate;
1989 /* quota - number of bytes transmitted in this period */
1990 m_rs_vn.vn_counter.quota =
1991 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1993 if (bp->vn_weight_sum) {
1994 /* credit for each period of the fairness algorithm:
1995 number of bytes in T_FAIR (the vn share the port rate).
1996 vn_weight_sum should not be larger than 10000, thus
1997 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1998 than zero */
1999 m_fair_vn.vn_credit_delta =
2000 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2001 (8 * bp->vn_weight_sum))),
2002 (bp->cmng.fair_vars.fair_threshold * 2));
2003 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2004 m_fair_vn.vn_credit_delta);
2007 /* Store it to internal memory */
2008 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2009 REG_WR(bp, BAR_XSTRORM_INTMEM +
2010 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2011 ((u32 *)(&m_rs_vn))[i]);
2013 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2014 REG_WR(bp, BAR_XSTRORM_INTMEM +
2015 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2016 ((u32 *)(&m_fair_vn))[i]);
2019 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2021 if (CHIP_REV_IS_SLOW(bp))
2022 return CMNG_FNS_NONE;
2023 if (IS_MF(bp))
2024 return CMNG_FNS_MINMAX;
2026 return CMNG_FNS_NONE;
2029 static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2031 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2033 if (BP_NOMCP(bp))
2034 return; /* what should be the default bvalue in this case */
2036 /* For 2 port configuration the absolute function number formula
2037 * is:
2038 * abs_func = 2 * vn + BP_PORT + BP_PATH
2040 * and there are 4 functions per port
2042 * For 4 port configuration it is
2043 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2045 * and there are 2 functions per port
2047 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2048 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2050 if (func >= E1H_FUNC_MAX)
2051 break;
2053 bp->mf_config[vn] =
2054 MF_CFG_RD(bp, func_mf_config[func].config);
2058 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2061 if (cmng_type == CMNG_FNS_MINMAX) {
2062 int vn;
2064 /* clear cmng_enables */
2065 bp->cmng.flags.cmng_enables = 0;
2067 /* read mf conf from shmem */
2068 if (read_cfg)
2069 bnx2x_read_mf_cfg(bp);
2071 /* Init rate shaping and fairness contexts */
2072 bnx2x_init_port_minmax(bp);
2074 /* vn_weight_sum and enable fairness if not 0 */
2075 bnx2x_calc_vn_weight_sum(bp);
2077 /* calculate and set min-max rate for each vn */
2078 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2079 bnx2x_init_vn_minmax(bp, vn);
2081 /* always enable rate shaping and fairness */
2082 bp->cmng.flags.cmng_enables |=
2083 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2084 if (!bp->vn_weight_sum)
2085 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2086 " fairness will be disabled\n");
2087 return;
2090 /* rate shaping and fairness are disabled */
2091 DP(NETIF_MSG_IFUP,
2092 "rate shaping and fairness are disabled\n");
2095 static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2097 int port = BP_PORT(bp);
2098 int func;
2099 int vn;
2101 /* Set the attention towards other drivers on the same port */
2102 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2103 if (vn == BP_E1HVN(bp))
2104 continue;
2106 func = ((vn << 1) | port);
2107 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2108 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2112 /* This function is called upon link interrupt */
2113 static void bnx2x_link_attn(struct bnx2x *bp)
2115 u32 prev_link_status = bp->link_vars.link_status;
2116 /* Make sure that we are synced with the current statistics */
2117 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2119 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2121 if (bp->link_vars.link_up) {
2123 /* dropless flow control */
2124 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
2125 int port = BP_PORT(bp);
2126 u32 pause_enabled = 0;
2128 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2129 pause_enabled = 1;
2131 REG_WR(bp, BAR_USTRORM_INTMEM +
2132 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2133 pause_enabled);
2136 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2137 struct host_port_stats *pstats;
2139 pstats = bnx2x_sp(bp, port_stats);
2140 /* reset old bmac stats */
2141 memset(&(pstats->mac_stx[0]), 0,
2142 sizeof(struct mac_stx));
2144 if (bp->state == BNX2X_STATE_OPEN)
2145 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2148 /* indicate link status only if link status actually changed */
2149 if (prev_link_status != bp->link_vars.link_status)
2150 bnx2x_link_report(bp);
2152 if (IS_MF(bp))
2153 bnx2x_link_sync_notify(bp);
2155 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2156 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2158 if (cmng_fns != CMNG_FNS_NONE) {
2159 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2160 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2161 } else
2162 /* rate shaping and fairness are disabled */
2163 DP(NETIF_MSG_IFUP,
2164 "single function mode without fairness\n");
2168 void bnx2x__link_status_update(struct bnx2x *bp)
2170 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2171 return;
2173 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2175 if (bp->link_vars.link_up)
2176 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2177 else
2178 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2180 /* the link status update could be the result of a DCC event
2181 hence re-read the shmem mf configuration */
2182 bnx2x_read_mf_cfg(bp);
2184 /* indicate link status */
2185 bnx2x_link_report(bp);
2188 static void bnx2x_pmf_update(struct bnx2x *bp)
2190 int port = BP_PORT(bp);
2191 u32 val;
2193 bp->port.pmf = 1;
2194 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2196 /* enable nig attention */
2197 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2198 if (bp->common.int_block == INT_BLOCK_HC) {
2199 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2200 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2201 } else if (CHIP_IS_E2(bp)) {
2202 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2203 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2206 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2209 /* end of Link */
2211 /* slow path */
2214 * General service functions
2217 /* send the MCP a request, block until there is a reply */
2218 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2220 int mb_idx = BP_FW_MB_IDX(bp);
2221 u32 seq = ++bp->fw_seq;
2222 u32 rc = 0;
2223 u32 cnt = 1;
2224 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2226 mutex_lock(&bp->fw_mb_mutex);
2227 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2228 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2230 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2232 do {
2233 /* let the FW do it's magic ... */
2234 msleep(delay);
2236 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2238 /* Give the FW up to 5 second (500*10ms) */
2239 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2241 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2242 cnt*delay, rc, seq);
2244 /* is this a reply to our command? */
2245 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2246 rc &= FW_MSG_CODE_MASK;
2247 else {
2248 /* FW BUG! */
2249 BNX2X_ERR("FW failed to respond!\n");
2250 bnx2x_fw_dump(bp);
2251 rc = 0;
2253 mutex_unlock(&bp->fw_mb_mutex);
2255 return rc;
2258 static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
2260 #ifdef BCM_CNIC
2261 if (IS_FCOE_FP(fp) && IS_MF(bp))
2262 return false;
2263 #endif
2264 return true;
2267 /* must be called under rtnl_lock */
2268 static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2270 u32 mask = (1 << cl_id);
2272 /* initial seeting is BNX2X_ACCEPT_NONE */
2273 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2274 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2275 u8 unmatched_unicast = 0;
2277 if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
2278 unmatched_unicast = 1;
2280 if (filters & BNX2X_PROMISCUOUS_MODE) {
2281 /* promiscious - accept all, drop none */
2282 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2283 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2284 if (IS_MF_SI(bp)) {
2286 * SI mode defines to accept in promiscuos mode
2287 * only unmatched packets
2289 unmatched_unicast = 1;
2290 accp_all_ucast = 0;
2293 if (filters & BNX2X_ACCEPT_UNICAST) {
2294 /* accept matched ucast */
2295 drop_all_ucast = 0;
2297 if (filters & BNX2X_ACCEPT_MULTICAST) {
2298 /* accept matched mcast */
2299 drop_all_mcast = 0;
2300 if (IS_MF_SI(bp))
2301 /* since mcast addresses won't arrive with ovlan,
2302 * fw needs to accept all of them in
2303 * switch-independent mode */
2304 accp_all_mcast = 1;
2306 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2307 /* accept all mcast */
2308 drop_all_ucast = 0;
2309 accp_all_ucast = 1;
2311 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2312 /* accept all mcast */
2313 drop_all_mcast = 0;
2314 accp_all_mcast = 1;
2316 if (filters & BNX2X_ACCEPT_BROADCAST) {
2317 /* accept (all) bcast */
2318 drop_all_bcast = 0;
2319 accp_all_bcast = 1;
2322 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2323 bp->mac_filters.ucast_drop_all | mask :
2324 bp->mac_filters.ucast_drop_all & ~mask;
2326 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2327 bp->mac_filters.mcast_drop_all | mask :
2328 bp->mac_filters.mcast_drop_all & ~mask;
2330 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2331 bp->mac_filters.bcast_drop_all | mask :
2332 bp->mac_filters.bcast_drop_all & ~mask;
2334 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2335 bp->mac_filters.ucast_accept_all | mask :
2336 bp->mac_filters.ucast_accept_all & ~mask;
2338 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2339 bp->mac_filters.mcast_accept_all | mask :
2340 bp->mac_filters.mcast_accept_all & ~mask;
2342 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2343 bp->mac_filters.bcast_accept_all | mask :
2344 bp->mac_filters.bcast_accept_all & ~mask;
2346 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2347 bp->mac_filters.unmatched_unicast | mask :
2348 bp->mac_filters.unmatched_unicast & ~mask;
2351 static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2353 struct tstorm_eth_function_common_config tcfg = {0};
2354 u16 rss_flgs;
2356 /* tpa */
2357 if (p->func_flgs & FUNC_FLG_TPA)
2358 tcfg.config_flags |=
2359 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2361 /* set rss flags */
2362 rss_flgs = (p->rss->mode <<
2363 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2365 if (p->rss->cap & RSS_IPV4_CAP)
2366 rss_flgs |= RSS_IPV4_CAP_MASK;
2367 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2368 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2369 if (p->rss->cap & RSS_IPV6_CAP)
2370 rss_flgs |= RSS_IPV6_CAP_MASK;
2371 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2372 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2374 tcfg.config_flags |= rss_flgs;
2375 tcfg.rss_result_mask = p->rss->result_mask;
2377 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2379 /* Enable the function in the FW */
2380 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2381 storm_memset_func_en(bp, p->func_id, 1);
2383 /* statistics */
2384 if (p->func_flgs & FUNC_FLG_STATS) {
2385 struct stats_indication_flags stats_flags = {0};
2386 stats_flags.collect_eth = 1;
2388 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2389 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2391 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2392 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2394 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2395 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2397 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2398 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2401 /* spq */
2402 if (p->func_flgs & FUNC_FLG_SPQ) {
2403 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2404 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2405 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2409 static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2410 struct bnx2x_fastpath *fp)
2412 u16 flags = 0;
2414 /* calculate queue flags */
2415 flags |= QUEUE_FLG_CACHE_ALIGN;
2416 flags |= QUEUE_FLG_HC;
2417 flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
2419 flags |= QUEUE_FLG_VLAN;
2420 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2422 if (!fp->disable_tpa)
2423 flags |= QUEUE_FLG_TPA;
2425 flags = stat_counter_valid(bp, fp) ?
2426 (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
2428 return flags;
2431 static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2432 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2433 struct bnx2x_rxq_init_params *rxq_init)
2435 u16 max_sge = 0;
2436 u16 sge_sz = 0;
2437 u16 tpa_agg_size = 0;
2439 /* calculate queue flags */
2440 u16 flags = bnx2x_get_cl_flags(bp, fp);
2442 if (!fp->disable_tpa) {
2443 pause->sge_th_hi = 250;
2444 pause->sge_th_lo = 150;
2445 tpa_agg_size = min_t(u32,
2446 (min_t(u32, 8, MAX_SKB_FRAGS) *
2447 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2448 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2449 SGE_PAGE_SHIFT;
2450 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2451 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2452 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2453 0xffff);
2456 /* pause - not for e1 */
2457 if (!CHIP_IS_E1(bp)) {
2458 pause->bd_th_hi = 350;
2459 pause->bd_th_lo = 250;
2460 pause->rcq_th_hi = 350;
2461 pause->rcq_th_lo = 250;
2462 pause->sge_th_hi = 0;
2463 pause->sge_th_lo = 0;
2464 pause->pri_map = 1;
2467 /* rxq setup */
2468 rxq_init->flags = flags;
2469 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2470 rxq_init->dscr_map = fp->rx_desc_mapping;
2471 rxq_init->sge_map = fp->rx_sge_mapping;
2472 rxq_init->rcq_map = fp->rx_comp_mapping;
2473 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2474 rxq_init->mtu = bp->dev->mtu;
2475 rxq_init->buf_sz = bp->rx_buf_size;
2476 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2477 rxq_init->cl_id = fp->cl_id;
2478 rxq_init->spcl_id = fp->cl_id;
2479 rxq_init->stat_id = fp->cl_id;
2480 rxq_init->tpa_agg_sz = tpa_agg_size;
2481 rxq_init->sge_buf_sz = sge_sz;
2482 rxq_init->max_sges_pkt = max_sge;
2483 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2484 rxq_init->fw_sb_id = fp->fw_sb_id;
2486 if (IS_FCOE_FP(fp))
2487 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2488 else
2489 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2491 rxq_init->cid = HW_CID(bp, fp->cid);
2493 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2496 static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2497 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2499 u16 flags = bnx2x_get_cl_flags(bp, fp);
2501 txq_init->flags = flags;
2502 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2503 txq_init->dscr_map = fp->tx_desc_mapping;
2504 txq_init->stat_id = fp->cl_id;
2505 txq_init->cid = HW_CID(bp, fp->cid);
2506 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2507 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2508 txq_init->fw_sb_id = fp->fw_sb_id;
2510 if (IS_FCOE_FP(fp)) {
2511 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
2512 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
2515 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2518 static void bnx2x_pf_init(struct bnx2x *bp)
2520 struct bnx2x_func_init_params func_init = {0};
2521 struct bnx2x_rss_params rss = {0};
2522 struct event_ring_data eq_data = { {0} };
2523 u16 flags;
2525 /* pf specific setups */
2526 if (!CHIP_IS_E1(bp))
2527 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
2529 if (CHIP_IS_E2(bp)) {
2530 /* reset IGU PF statistics: MSIX + ATTN */
2531 /* PF */
2532 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2533 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2534 (CHIP_MODE_IS_4_PORT(bp) ?
2535 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2536 /* ATTN */
2537 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2538 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2539 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2540 (CHIP_MODE_IS_4_PORT(bp) ?
2541 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2544 /* function setup flags */
2545 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2547 if (CHIP_IS_E1x(bp))
2548 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2549 else
2550 flags |= FUNC_FLG_TPA;
2552 /* function setup */
2555 * Although RSS is meaningless when there is a single HW queue we
2556 * still need it enabled in order to have HW Rx hash generated.
2558 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2559 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2560 rss.mode = bp->multi_mode;
2561 rss.result_mask = MULTI_MASK;
2562 func_init.rss = &rss;
2564 func_init.func_flgs = flags;
2565 func_init.pf_id = BP_FUNC(bp);
2566 func_init.func_id = BP_FUNC(bp);
2567 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2568 func_init.spq_map = bp->spq_mapping;
2569 func_init.spq_prod = bp->spq_prod_idx;
2571 bnx2x_func_init(bp, &func_init);
2573 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2576 Congestion management values depend on the link rate
2577 There is no active link so initial link rate is set to 10 Gbps.
2578 When the link comes up The congestion management values are
2579 re-calculated according to the actual link rate.
2581 bp->link_vars.line_speed = SPEED_10000;
2582 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2584 /* Only the PMF sets the HW */
2585 if (bp->port.pmf)
2586 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2588 /* no rx until link is up */
2589 bp->rx_mode = BNX2X_RX_MODE_NONE;
2590 bnx2x_set_storm_rx_mode(bp);
2592 /* init Event Queue */
2593 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2594 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2595 eq_data.producer = bp->eq_prod;
2596 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2597 eq_data.sb_id = DEF_SB_ID;
2598 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2602 static void bnx2x_e1h_disable(struct bnx2x *bp)
2604 int port = BP_PORT(bp);
2606 netif_tx_disable(bp->dev);
2608 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2610 netif_carrier_off(bp->dev);
2613 static void bnx2x_e1h_enable(struct bnx2x *bp)
2615 int port = BP_PORT(bp);
2617 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2619 /* Tx queue should be only reenabled */
2620 netif_tx_wake_all_queues(bp->dev);
2623 * Should not call netif_carrier_on since it will be called if the link
2624 * is up when checking for link state
2628 /* called due to MCP event (on pmf):
2629 * reread new bandwidth configuration
2630 * configure FW
2631 * notify others function about the change
2633 static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2635 if (bp->link_vars.link_up) {
2636 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2637 bnx2x_link_sync_notify(bp);
2639 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2642 static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2644 bnx2x_config_mf_bw(bp);
2645 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2648 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2650 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2652 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2655 * This is the only place besides the function initialization
2656 * where the bp->flags can change so it is done without any
2657 * locks
2659 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2660 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2661 bp->flags |= MF_FUNC_DIS;
2663 bnx2x_e1h_disable(bp);
2664 } else {
2665 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2666 bp->flags &= ~MF_FUNC_DIS;
2668 bnx2x_e1h_enable(bp);
2670 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2672 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2673 bnx2x_config_mf_bw(bp);
2674 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2677 /* Report results to MCP */
2678 if (dcc_event)
2679 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2680 else
2681 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2684 /* must be called under the spq lock */
2685 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2687 struct eth_spe *next_spe = bp->spq_prod_bd;
2689 if (bp->spq_prod_bd == bp->spq_last_bd) {
2690 bp->spq_prod_bd = bp->spq;
2691 bp->spq_prod_idx = 0;
2692 DP(NETIF_MSG_TIMER, "end of spq\n");
2693 } else {
2694 bp->spq_prod_bd++;
2695 bp->spq_prod_idx++;
2697 return next_spe;
2700 /* must be called under the spq lock */
2701 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2703 int func = BP_FUNC(bp);
2705 /* Make sure that BD data is updated before writing the producer */
2706 wmb();
2708 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2709 bp->spq_prod_idx);
2710 mmiowb();
2713 /* the slow path queue is odd since completions arrive on the fastpath ring */
2714 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2715 u32 data_hi, u32 data_lo, int common)
2717 struct eth_spe *spe;
2718 u16 type;
2720 #ifdef BNX2X_STOP_ON_ERROR
2721 if (unlikely(bp->panic))
2722 return -EIO;
2723 #endif
2725 spin_lock_bh(&bp->spq_lock);
2727 if (!atomic_read(&bp->spq_left)) {
2728 BNX2X_ERR("BUG! SPQ ring full!\n");
2729 spin_unlock_bh(&bp->spq_lock);
2730 bnx2x_panic();
2731 return -EBUSY;
2734 spe = bnx2x_sp_get_next(bp);
2736 /* CID needs port number to be encoded int it */
2737 spe->hdr.conn_and_cmd_data =
2738 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2739 HW_CID(bp, cid));
2741 if (common)
2742 /* Common ramrods:
2743 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2744 * TRAFFIC_STOP, TRAFFIC_START
2746 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2747 & SPE_HDR_CONN_TYPE;
2748 else
2749 /* ETH ramrods: SETUP, HALT */
2750 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2751 & SPE_HDR_CONN_TYPE;
2753 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2754 SPE_HDR_FUNCTION_ID);
2756 spe->hdr.type = cpu_to_le16(type);
2758 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2759 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2761 /* stats ramrod has it's own slot on the spq */
2762 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2763 /* It's ok if the actual decrement is issued towards the memory
2764 * somewhere between the spin_lock and spin_unlock. Thus no
2765 * more explict memory barrier is needed.
2767 atomic_dec(&bp->spq_left);
2769 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2770 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2771 "type(0x%x) left %x\n",
2772 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2773 (u32)(U64_LO(bp->spq_mapping) +
2774 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2775 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
2777 bnx2x_sp_prod_update(bp);
2778 spin_unlock_bh(&bp->spq_lock);
2779 return 0;
2782 /* acquire split MCP access lock register */
2783 static int bnx2x_acquire_alr(struct bnx2x *bp)
2785 u32 j, val;
2786 int rc = 0;
2788 might_sleep();
2789 for (j = 0; j < 1000; j++) {
2790 val = (1UL << 31);
2791 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2792 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2793 if (val & (1L << 31))
2794 break;
2796 msleep(5);
2798 if (!(val & (1L << 31))) {
2799 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2800 rc = -EBUSY;
2803 return rc;
2806 /* release split MCP access lock register */
2807 static void bnx2x_release_alr(struct bnx2x *bp)
2809 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2812 #define BNX2X_DEF_SB_ATT_IDX 0x0001
2813 #define BNX2X_DEF_SB_IDX 0x0002
2815 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2817 struct host_sp_status_block *def_sb = bp->def_status_blk;
2818 u16 rc = 0;
2820 barrier(); /* status block is written to by the chip */
2821 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2822 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2823 rc |= BNX2X_DEF_SB_ATT_IDX;
2826 if (bp->def_idx != def_sb->sp_sb.running_index) {
2827 bp->def_idx = def_sb->sp_sb.running_index;
2828 rc |= BNX2X_DEF_SB_IDX;
2831 /* Do not reorder: indecies reading should complete before handling */
2832 barrier();
2833 return rc;
2837 * slow path service functions
2840 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2842 int port = BP_PORT(bp);
2843 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2844 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2845 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2846 NIG_REG_MASK_INTERRUPT_PORT0;
2847 u32 aeu_mask;
2848 u32 nig_mask = 0;
2849 u32 reg_addr;
2851 if (bp->attn_state & asserted)
2852 BNX2X_ERR("IGU ERROR\n");
2854 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2855 aeu_mask = REG_RD(bp, aeu_addr);
2857 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2858 aeu_mask, asserted);
2859 aeu_mask &= ~(asserted & 0x3ff);
2860 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2862 REG_WR(bp, aeu_addr, aeu_mask);
2863 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2865 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2866 bp->attn_state |= asserted;
2867 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2869 if (asserted & ATTN_HARD_WIRED_MASK) {
2870 if (asserted & ATTN_NIG_FOR_FUNC) {
2872 bnx2x_acquire_phy_lock(bp);
2874 /* save nig interrupt mask */
2875 nig_mask = REG_RD(bp, nig_int_mask_addr);
2876 REG_WR(bp, nig_int_mask_addr, 0);
2878 bnx2x_link_attn(bp);
2880 /* handle unicore attn? */
2882 if (asserted & ATTN_SW_TIMER_4_FUNC)
2883 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2885 if (asserted & GPIO_2_FUNC)
2886 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2888 if (asserted & GPIO_3_FUNC)
2889 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2891 if (asserted & GPIO_4_FUNC)
2892 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2894 if (port == 0) {
2895 if (asserted & ATTN_GENERAL_ATTN_1) {
2896 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2897 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2899 if (asserted & ATTN_GENERAL_ATTN_2) {
2900 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2901 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2903 if (asserted & ATTN_GENERAL_ATTN_3) {
2904 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2905 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2907 } else {
2908 if (asserted & ATTN_GENERAL_ATTN_4) {
2909 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2910 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2912 if (asserted & ATTN_GENERAL_ATTN_5) {
2913 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2914 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2916 if (asserted & ATTN_GENERAL_ATTN_6) {
2917 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2918 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2922 } /* if hardwired */
2924 if (bp->common.int_block == INT_BLOCK_HC)
2925 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2926 COMMAND_REG_ATTN_BITS_SET);
2927 else
2928 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2930 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2931 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2932 REG_WR(bp, reg_addr, asserted);
2934 /* now set back the mask */
2935 if (asserted & ATTN_NIG_FOR_FUNC) {
2936 REG_WR(bp, nig_int_mask_addr, nig_mask);
2937 bnx2x_release_phy_lock(bp);
2941 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2943 int port = BP_PORT(bp);
2944 u32 ext_phy_config;
2945 /* mark the failure */
2946 ext_phy_config =
2947 SHMEM_RD(bp,
2948 dev_info.port_hw_config[port].external_phy_config);
2950 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2951 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2952 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2953 ext_phy_config);
2955 /* log the failure */
2956 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2957 " the driver to shutdown the card to prevent permanent"
2958 " damage. Please contact OEM Support for assistance\n");
2961 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2963 int port = BP_PORT(bp);
2964 int reg_offset;
2965 u32 val;
2967 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2968 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2970 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2972 val = REG_RD(bp, reg_offset);
2973 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2974 REG_WR(bp, reg_offset, val);
2976 BNX2X_ERR("SPIO5 hw attention\n");
2978 /* Fan failure attention */
2979 bnx2x_hw_reset_phy(&bp->link_params);
2980 bnx2x_fan_failure(bp);
2983 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2984 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2985 bnx2x_acquire_phy_lock(bp);
2986 bnx2x_handle_module_detect_int(&bp->link_params);
2987 bnx2x_release_phy_lock(bp);
2990 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2992 val = REG_RD(bp, reg_offset);
2993 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2994 REG_WR(bp, reg_offset, val);
2996 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2997 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2998 bnx2x_panic();
3002 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3004 u32 val;
3006 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3008 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3009 BNX2X_ERR("DB hw attention 0x%x\n", val);
3010 /* DORQ discard attention */
3011 if (val & 0x2)
3012 BNX2X_ERR("FATAL error from DORQ\n");
3015 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3017 int port = BP_PORT(bp);
3018 int reg_offset;
3020 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3021 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3023 val = REG_RD(bp, reg_offset);
3024 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3025 REG_WR(bp, reg_offset, val);
3027 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3028 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3029 bnx2x_panic();
3033 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3035 u32 val;
3037 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3039 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3040 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3041 /* CFC error attention */
3042 if (val & 0x2)
3043 BNX2X_ERR("FATAL error from CFC\n");
3046 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3048 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3049 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3050 /* RQ_USDMDP_FIFO_OVERFLOW */
3051 if (val & 0x18000)
3052 BNX2X_ERR("FATAL error from PXP\n");
3053 if (CHIP_IS_E2(bp)) {
3054 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3055 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3059 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3061 int port = BP_PORT(bp);
3062 int reg_offset;
3064 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3065 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3067 val = REG_RD(bp, reg_offset);
3068 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3069 REG_WR(bp, reg_offset, val);
3071 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3072 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3073 bnx2x_panic();
3077 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3079 u32 val;
3081 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3083 if (attn & BNX2X_PMF_LINK_ASSERT) {
3084 int func = BP_FUNC(bp);
3086 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3087 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3088 func_mf_config[BP_ABS_FUNC(bp)].config);
3089 val = SHMEM_RD(bp,
3090 func_mb[BP_FW_MB_IDX(bp)].drv_status);
3091 if (val & DRV_STATUS_DCC_EVENT_MASK)
3092 bnx2x_dcc_event(bp,
3093 (val & DRV_STATUS_DCC_EVENT_MASK));
3095 if (val & DRV_STATUS_SET_MF_BW)
3096 bnx2x_set_mf_bw(bp);
3098 bnx2x__link_status_update(bp);
3099 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3100 bnx2x_pmf_update(bp);
3102 if (bp->port.pmf &&
3103 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3104 bp->dcbx_enabled > 0)
3105 /* start dcbx state machine */
3106 bnx2x_dcbx_set_params(bp,
3107 BNX2X_DCBX_STATE_NEG_RECEIVED);
3108 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3110 BNX2X_ERR("MC assert!\n");
3111 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3112 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3113 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3114 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3115 bnx2x_panic();
3117 } else if (attn & BNX2X_MCP_ASSERT) {
3119 BNX2X_ERR("MCP assert!\n");
3120 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3121 bnx2x_fw_dump(bp);
3123 } else
3124 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3127 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3128 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3129 if (attn & BNX2X_GRC_TIMEOUT) {
3130 val = CHIP_IS_E1(bp) ? 0 :
3131 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
3132 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3134 if (attn & BNX2X_GRC_RSV) {
3135 val = CHIP_IS_E1(bp) ? 0 :
3136 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
3137 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3139 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3143 #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3144 #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3145 #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3146 #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3147 #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3150 * should be run under rtnl lock
3152 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3154 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3155 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3156 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3157 barrier();
3158 mmiowb();
3162 * should be run under rtnl lock
3164 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3166 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3167 val |= (1 << 16);
3168 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3169 barrier();
3170 mmiowb();
3174 * should be run under rtnl lock
3176 bool bnx2x_reset_is_done(struct bnx2x *bp)
3178 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3179 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3180 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3184 * should be run under rtnl lock
3186 inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3188 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3190 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3192 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3193 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3194 barrier();
3195 mmiowb();
3199 * should be run under rtnl lock
3201 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3203 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3205 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3207 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3208 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3209 barrier();
3210 mmiowb();
3212 return val1;
3216 * should be run under rtnl lock
3218 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3220 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3223 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3225 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3226 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3229 static inline void _print_next_block(int idx, const char *blk)
3231 if (idx)
3232 pr_cont(", ");
3233 pr_cont("%s", blk);
3236 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3238 int i = 0;
3239 u32 cur_bit = 0;
3240 for (i = 0; sig; i++) {
3241 cur_bit = ((u32)0x1 << i);
3242 if (sig & cur_bit) {
3243 switch (cur_bit) {
3244 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3245 _print_next_block(par_num++, "BRB");
3246 break;
3247 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3248 _print_next_block(par_num++, "PARSER");
3249 break;
3250 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3251 _print_next_block(par_num++, "TSDM");
3252 break;
3253 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3254 _print_next_block(par_num++, "SEARCHER");
3255 break;
3256 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3257 _print_next_block(par_num++, "TSEMI");
3258 break;
3261 /* Clear the bit */
3262 sig &= ~cur_bit;
3266 return par_num;
3269 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3271 int i = 0;
3272 u32 cur_bit = 0;
3273 for (i = 0; sig; i++) {
3274 cur_bit = ((u32)0x1 << i);
3275 if (sig & cur_bit) {
3276 switch (cur_bit) {
3277 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3278 _print_next_block(par_num++, "PBCLIENT");
3279 break;
3280 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3281 _print_next_block(par_num++, "QM");
3282 break;
3283 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3284 _print_next_block(par_num++, "XSDM");
3285 break;
3286 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3287 _print_next_block(par_num++, "XSEMI");
3288 break;
3289 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3290 _print_next_block(par_num++, "DOORBELLQ");
3291 break;
3292 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3293 _print_next_block(par_num++, "VAUX PCI CORE");
3294 break;
3295 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3296 _print_next_block(par_num++, "DEBUG");
3297 break;
3298 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3299 _print_next_block(par_num++, "USDM");
3300 break;
3301 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3302 _print_next_block(par_num++, "USEMI");
3303 break;
3304 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3305 _print_next_block(par_num++, "UPB");
3306 break;
3307 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3308 _print_next_block(par_num++, "CSDM");
3309 break;
3312 /* Clear the bit */
3313 sig &= ~cur_bit;
3317 return par_num;
3320 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3322 int i = 0;
3323 u32 cur_bit = 0;
3324 for (i = 0; sig; i++) {
3325 cur_bit = ((u32)0x1 << i);
3326 if (sig & cur_bit) {
3327 switch (cur_bit) {
3328 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3329 _print_next_block(par_num++, "CSEMI");
3330 break;
3331 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3332 _print_next_block(par_num++, "PXP");
3333 break;
3334 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3335 _print_next_block(par_num++,
3336 "PXPPCICLOCKCLIENT");
3337 break;
3338 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3339 _print_next_block(par_num++, "CFC");
3340 break;
3341 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3342 _print_next_block(par_num++, "CDU");
3343 break;
3344 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3345 _print_next_block(par_num++, "IGU");
3346 break;
3347 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3348 _print_next_block(par_num++, "MISC");
3349 break;
3352 /* Clear the bit */
3353 sig &= ~cur_bit;
3357 return par_num;
3360 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3362 int i = 0;
3363 u32 cur_bit = 0;
3364 for (i = 0; sig; i++) {
3365 cur_bit = ((u32)0x1 << i);
3366 if (sig & cur_bit) {
3367 switch (cur_bit) {
3368 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3369 _print_next_block(par_num++, "MCP ROM");
3370 break;
3371 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3372 _print_next_block(par_num++, "MCP UMP RX");
3373 break;
3374 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3375 _print_next_block(par_num++, "MCP UMP TX");
3376 break;
3377 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3378 _print_next_block(par_num++, "MCP SCPAD");
3379 break;
3382 /* Clear the bit */
3383 sig &= ~cur_bit;
3387 return par_num;
3390 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3391 u32 sig2, u32 sig3)
3393 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3394 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3395 int par_num = 0;
3396 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3397 "[0]:0x%08x [1]:0x%08x "
3398 "[2]:0x%08x [3]:0x%08x\n",
3399 sig0 & HW_PRTY_ASSERT_SET_0,
3400 sig1 & HW_PRTY_ASSERT_SET_1,
3401 sig2 & HW_PRTY_ASSERT_SET_2,
3402 sig3 & HW_PRTY_ASSERT_SET_3);
3403 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3404 bp->dev->name);
3405 par_num = bnx2x_print_blocks_with_parity0(
3406 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3407 par_num = bnx2x_print_blocks_with_parity1(
3408 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3409 par_num = bnx2x_print_blocks_with_parity2(
3410 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3411 par_num = bnx2x_print_blocks_with_parity3(
3412 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3413 printk("\n");
3414 return true;
3415 } else
3416 return false;
3419 bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3421 struct attn_route attn;
3422 int port = BP_PORT(bp);
3424 attn.sig[0] = REG_RD(bp,
3425 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3426 port*4);
3427 attn.sig[1] = REG_RD(bp,
3428 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3429 port*4);
3430 attn.sig[2] = REG_RD(bp,
3431 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3432 port*4);
3433 attn.sig[3] = REG_RD(bp,
3434 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3435 port*4);
3437 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3438 attn.sig[3]);
3442 static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3444 u32 val;
3445 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3447 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3448 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3449 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3450 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3451 "ADDRESS_ERROR\n");
3452 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3453 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3454 "INCORRECT_RCV_BEHAVIOR\n");
3455 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3456 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3457 "WAS_ERROR_ATTN\n");
3458 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3459 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3460 "VF_LENGTH_VIOLATION_ATTN\n");
3461 if (val &
3462 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3463 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3464 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3465 if (val &
3466 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3467 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3468 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3469 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3470 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3471 "TCPL_ERROR_ATTN\n");
3472 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3473 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3474 "TCPL_IN_TWO_RCBS_ATTN\n");
3475 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3476 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3477 "CSSNOOP_FIFO_OVERFLOW\n");
3479 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3480 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3481 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3482 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3483 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3484 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3485 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3486 "_ATC_TCPL_TO_NOT_PEND\n");
3487 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3488 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3489 "ATC_GPA_MULTIPLE_HITS\n");
3490 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3491 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3492 "ATC_RCPL_TO_EMPTY_CNT\n");
3493 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3494 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3495 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3496 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3497 "ATC_IREQ_LESS_THAN_STU\n");
3500 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3501 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3502 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3503 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3504 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3509 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3511 struct attn_route attn, *group_mask;
3512 int port = BP_PORT(bp);
3513 int index;
3514 u32 reg_addr;
3515 u32 val;
3516 u32 aeu_mask;
3518 /* need to take HW lock because MCP or other port might also
3519 try to handle this event */
3520 bnx2x_acquire_alr(bp);
3522 if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
3523 bp->recovery_state = BNX2X_RECOVERY_INIT;
3524 bnx2x_set_reset_in_progress(bp);
3525 schedule_delayed_work(&bp->reset_task, 0);
3526 /* Disable HW interrupts */
3527 bnx2x_int_disable(bp);
3528 bnx2x_release_alr(bp);
3529 /* In case of parity errors don't handle attentions so that
3530 * other function would "see" parity errors.
3532 return;
3535 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3536 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3537 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3538 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3539 if (CHIP_IS_E2(bp))
3540 attn.sig[4] =
3541 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3542 else
3543 attn.sig[4] = 0;
3545 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3546 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
3548 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3549 if (deasserted & (1 << index)) {
3550 group_mask = &bp->attn_group[index];
3552 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3553 "%08x %08x %08x\n",
3554 index,
3555 group_mask->sig[0], group_mask->sig[1],
3556 group_mask->sig[2], group_mask->sig[3],
3557 group_mask->sig[4]);
3559 bnx2x_attn_int_deasserted4(bp,
3560 attn.sig[4] & group_mask->sig[4]);
3561 bnx2x_attn_int_deasserted3(bp,
3562 attn.sig[3] & group_mask->sig[3]);
3563 bnx2x_attn_int_deasserted1(bp,
3564 attn.sig[1] & group_mask->sig[1]);
3565 bnx2x_attn_int_deasserted2(bp,
3566 attn.sig[2] & group_mask->sig[2]);
3567 bnx2x_attn_int_deasserted0(bp,
3568 attn.sig[0] & group_mask->sig[0]);
3572 bnx2x_release_alr(bp);
3574 if (bp->common.int_block == INT_BLOCK_HC)
3575 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3576 COMMAND_REG_ATTN_BITS_CLR);
3577 else
3578 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
3580 val = ~deasserted;
3581 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3582 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
3583 REG_WR(bp, reg_addr, val);
3585 if (~bp->attn_state & deasserted)
3586 BNX2X_ERR("IGU ERROR\n");
3588 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3589 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3591 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3592 aeu_mask = REG_RD(bp, reg_addr);
3594 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3595 aeu_mask, deasserted);
3596 aeu_mask |= (deasserted & 0x3ff);
3597 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3599 REG_WR(bp, reg_addr, aeu_mask);
3600 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3602 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3603 bp->attn_state &= ~deasserted;
3604 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3607 static void bnx2x_attn_int(struct bnx2x *bp)
3609 /* read local copy of bits */
3610 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3611 attn_bits);
3612 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3613 attn_bits_ack);
3614 u32 attn_state = bp->attn_state;
3616 /* look for changed bits */
3617 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3618 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3620 DP(NETIF_MSG_HW,
3621 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3622 attn_bits, attn_ack, asserted, deasserted);
3624 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3625 BNX2X_ERR("BAD attention state\n");
3627 /* handle bits that were raised */
3628 if (asserted)
3629 bnx2x_attn_int_asserted(bp, asserted);
3631 if (deasserted)
3632 bnx2x_attn_int_deasserted(bp, deasserted);
3635 static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3637 /* No memory barriers */
3638 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3639 mmiowb(); /* keep prod updates ordered */
3642 #ifdef BCM_CNIC
3643 static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3644 union event_ring_elem *elem)
3646 if (!bp->cnic_eth_dev.starting_cid ||
3647 cid < bp->cnic_eth_dev.starting_cid)
3648 return 1;
3650 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3652 if (unlikely(elem->message.data.cfc_del_event.error)) {
3653 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3654 cid);
3655 bnx2x_panic_dump(bp);
3657 bnx2x_cnic_cfc_comp(bp, cid);
3658 return 0;
3660 #endif
3662 static void bnx2x_eq_int(struct bnx2x *bp)
3664 u16 hw_cons, sw_cons, sw_prod;
3665 union event_ring_elem *elem;
3666 u32 cid;
3667 u8 opcode;
3668 int spqe_cnt = 0;
3670 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3672 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3673 * when we get the the next-page we nned to adjust so the loop
3674 * condition below will be met. The next element is the size of a
3675 * regular element and hence incrementing by 1
3677 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3678 hw_cons++;
3680 /* This function may never run in parralel with itself for a
3681 * specific bp, thus there is no need in "paired" read memory
3682 * barrier here.
3684 sw_cons = bp->eq_cons;
3685 sw_prod = bp->eq_prod;
3687 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
3688 hw_cons, sw_cons, atomic_read(&bp->spq_left));
3690 for (; sw_cons != hw_cons;
3691 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3694 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3696 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3697 opcode = elem->message.opcode;
3700 /* handle eq element */
3701 switch (opcode) {
3702 case EVENT_RING_OPCODE_STAT_QUERY:
3703 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3704 /* nothing to do with stats comp */
3705 continue;
3707 case EVENT_RING_OPCODE_CFC_DEL:
3708 /* handle according to cid range */
3710 * we may want to verify here that the bp state is
3711 * HALTING
3713 DP(NETIF_MSG_IFDOWN,
3714 "got delete ramrod for MULTI[%d]\n", cid);
3715 #ifdef BCM_CNIC
3716 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3717 goto next_spqe;
3718 if (cid == BNX2X_FCOE_ETH_CID)
3719 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
3720 else
3721 #endif
3722 bnx2x_fp(bp, cid, state) =
3723 BNX2X_FP_STATE_CLOSED;
3725 goto next_spqe;
3727 case EVENT_RING_OPCODE_STOP_TRAFFIC:
3728 DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
3729 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
3730 goto next_spqe;
3731 case EVENT_RING_OPCODE_START_TRAFFIC:
3732 DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
3733 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
3734 goto next_spqe;
3737 switch (opcode | bp->state) {
3738 case (EVENT_RING_OPCODE_FUNCTION_START |
3739 BNX2X_STATE_OPENING_WAIT4_PORT):
3740 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3741 bp->state = BNX2X_STATE_FUNC_STARTED;
3742 break;
3744 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3745 BNX2X_STATE_CLOSING_WAIT4_HALT):
3746 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3747 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3748 break;
3750 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3751 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3752 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3753 bp->set_mac_pending = 0;
3754 break;
3756 case (EVENT_RING_OPCODE_SET_MAC |
3757 BNX2X_STATE_CLOSING_WAIT4_HALT):
3758 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3759 bp->set_mac_pending = 0;
3760 break;
3761 default:
3762 /* unknown event log error and continue */
3763 BNX2X_ERR("Unknown EQ event %d\n",
3764 elem->message.opcode);
3766 next_spqe:
3767 spqe_cnt++;
3768 } /* for */
3770 smp_mb__before_atomic_inc();
3771 atomic_add(spqe_cnt, &bp->spq_left);
3773 bp->eq_cons = sw_cons;
3774 bp->eq_prod = sw_prod;
3775 /* Make sure that above mem writes were issued towards the memory */
3776 smp_wmb();
3778 /* update producer */
3779 bnx2x_update_eq_prod(bp, bp->eq_prod);
3782 static void bnx2x_sp_task(struct work_struct *work)
3784 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3785 u16 status;
3787 /* Return here if interrupt is disabled */
3788 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3789 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3790 return;
3793 status = bnx2x_update_dsb_idx(bp);
3794 /* if (status == 0) */
3795 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3797 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3799 /* HW attentions */
3800 if (status & BNX2X_DEF_SB_ATT_IDX) {
3801 bnx2x_attn_int(bp);
3802 status &= ~BNX2X_DEF_SB_ATT_IDX;
3805 /* SP events: STAT_QUERY and others */
3806 if (status & BNX2X_DEF_SB_IDX) {
3807 #ifdef BCM_CNIC
3808 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
3810 if ((!NO_FCOE(bp)) &&
3811 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
3812 napi_schedule(&bnx2x_fcoe(bp, napi));
3813 #endif
3814 /* Handle EQ completions */
3815 bnx2x_eq_int(bp);
3817 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3818 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3820 status &= ~BNX2X_DEF_SB_IDX;
3823 if (unlikely(status))
3824 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3825 status);
3827 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3828 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
3831 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3833 struct net_device *dev = dev_instance;
3834 struct bnx2x *bp = netdev_priv(dev);
3836 /* Return here if interrupt is disabled */
3837 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3838 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3839 return IRQ_HANDLED;
3842 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3843 IGU_INT_DISABLE, 0);
3845 #ifdef BNX2X_STOP_ON_ERROR
3846 if (unlikely(bp->panic))
3847 return IRQ_HANDLED;
3848 #endif
3850 #ifdef BCM_CNIC
3852 struct cnic_ops *c_ops;
3854 rcu_read_lock();
3855 c_ops = rcu_dereference(bp->cnic_ops);
3856 if (c_ops)
3857 c_ops->cnic_handler(bp->cnic_data, NULL);
3858 rcu_read_unlock();
3860 #endif
3861 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3863 return IRQ_HANDLED;
3866 /* end of slow path */
3868 static void bnx2x_timer(unsigned long data)
3870 struct bnx2x *bp = (struct bnx2x *) data;
3872 if (!netif_running(bp->dev))
3873 return;
3875 if (atomic_read(&bp->intr_sem) != 0)
3876 goto timer_restart;
3878 if (poll) {
3879 struct bnx2x_fastpath *fp = &bp->fp[0];
3880 int rc;
3882 bnx2x_tx_int(fp);
3883 rc = bnx2x_rx_int(fp, 1000);
3886 if (!BP_NOMCP(bp)) {
3887 int mb_idx = BP_FW_MB_IDX(bp);
3888 u32 drv_pulse;
3889 u32 mcp_pulse;
3891 ++bp->fw_drv_pulse_wr_seq;
3892 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3893 /* TBD - add SYSTEM_TIME */
3894 drv_pulse = bp->fw_drv_pulse_wr_seq;
3895 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
3897 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
3898 MCP_PULSE_SEQ_MASK);
3899 /* The delta between driver pulse and mcp response
3900 * should be 1 (before mcp response) or 0 (after mcp response)
3902 if ((drv_pulse != mcp_pulse) &&
3903 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3904 /* someone lost a heartbeat... */
3905 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3906 drv_pulse, mcp_pulse);
3910 if (bp->state == BNX2X_STATE_OPEN)
3911 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3913 timer_restart:
3914 mod_timer(&bp->timer, jiffies + bp->current_interval);
3917 /* end of Statistics */
3919 /* nic init */
3922 * nic init service functions
3925 static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
3927 u32 i;
3928 if (!(len%4) && !(addr%4))
3929 for (i = 0; i < len; i += 4)
3930 REG_WR(bp, addr + i, fill);
3931 else
3932 for (i = 0; i < len; i++)
3933 REG_WR8(bp, addr + i, fill);
3937 /* helper: writes FP SP data to FW - data_size in dwords */
3938 static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3939 int fw_sb_id,
3940 u32 *sb_data_p,
3941 u32 data_size)
3943 int index;
3944 for (index = 0; index < data_size; index++)
3945 REG_WR(bp, BAR_CSTRORM_INTMEM +
3946 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3947 sizeof(u32)*index,
3948 *(sb_data_p + index));
3951 static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3953 u32 *sb_data_p;
3954 u32 data_size = 0;
3955 struct hc_status_block_data_e2 sb_data_e2;
3956 struct hc_status_block_data_e1x sb_data_e1x;
3958 /* disable the function first */
3959 if (CHIP_IS_E2(bp)) {
3960 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3961 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3962 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3963 sb_data_e2.common.p_func.vf_valid = false;
3964 sb_data_p = (u32 *)&sb_data_e2;
3965 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3966 } else {
3967 memset(&sb_data_e1x, 0,
3968 sizeof(struct hc_status_block_data_e1x));
3969 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3970 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3971 sb_data_e1x.common.p_func.vf_valid = false;
3972 sb_data_p = (u32 *)&sb_data_e1x;
3973 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3975 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3977 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3978 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3979 CSTORM_STATUS_BLOCK_SIZE);
3980 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3981 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3982 CSTORM_SYNC_BLOCK_SIZE);
3985 /* helper: writes SP SB data to FW */
3986 static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3987 struct hc_sp_status_block_data *sp_sb_data)
3989 int func = BP_FUNC(bp);
3990 int i;
3991 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3992 REG_WR(bp, BAR_CSTRORM_INTMEM +
3993 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3994 i*sizeof(u32),
3995 *((u32 *)sp_sb_data + i));
3998 static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
4000 int func = BP_FUNC(bp);
4001 struct hc_sp_status_block_data sp_sb_data;
4002 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4004 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
4005 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
4006 sp_sb_data.p_func.vf_valid = false;
4008 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4010 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4011 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
4012 CSTORM_SP_STATUS_BLOCK_SIZE);
4013 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4014 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
4015 CSTORM_SP_SYNC_BLOCK_SIZE);
4020 static inline
4021 void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4022 int igu_sb_id, int igu_seg_id)
4024 hc_sm->igu_sb_id = igu_sb_id;
4025 hc_sm->igu_seg_id = igu_seg_id;
4026 hc_sm->timer_value = 0xFF;
4027 hc_sm->time_to_expire = 0xFFFFFFFF;
4030 static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4031 u8 vf_valid, int fw_sb_id, int igu_sb_id)
4033 int igu_seg_id;
4035 struct hc_status_block_data_e2 sb_data_e2;
4036 struct hc_status_block_data_e1x sb_data_e1x;
4037 struct hc_status_block_sm *hc_sm_p;
4038 struct hc_index_data *hc_index_p;
4039 int data_size;
4040 u32 *sb_data_p;
4042 if (CHIP_INT_MODE_IS_BC(bp))
4043 igu_seg_id = HC_SEG_ACCESS_NORM;
4044 else
4045 igu_seg_id = IGU_SEG_ACCESS_NORM;
4047 bnx2x_zero_fp_sb(bp, fw_sb_id);
4049 if (CHIP_IS_E2(bp)) {
4050 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
4051 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
4052 sb_data_e2.common.p_func.vf_id = vfid;
4053 sb_data_e2.common.p_func.vf_valid = vf_valid;
4054 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
4055 sb_data_e2.common.same_igu_sb_1b = true;
4056 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
4057 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
4058 hc_sm_p = sb_data_e2.common.state_machine;
4059 hc_index_p = sb_data_e2.index_data;
4060 sb_data_p = (u32 *)&sb_data_e2;
4061 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4062 } else {
4063 memset(&sb_data_e1x, 0,
4064 sizeof(struct hc_status_block_data_e1x));
4065 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
4066 sb_data_e1x.common.p_func.vf_id = 0xff;
4067 sb_data_e1x.common.p_func.vf_valid = false;
4068 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
4069 sb_data_e1x.common.same_igu_sb_1b = true;
4070 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
4071 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
4072 hc_sm_p = sb_data_e1x.common.state_machine;
4073 hc_index_p = sb_data_e1x.index_data;
4074 sb_data_p = (u32 *)&sb_data_e1x;
4075 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4078 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
4079 igu_sb_id, igu_seg_id);
4080 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
4081 igu_sb_id, igu_seg_id);
4083 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
4085 /* write indecies to HW */
4086 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
4089 static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
4090 u8 sb_index, u8 disable, u16 usec)
4092 int port = BP_PORT(bp);
4093 u8 ticks = usec / BNX2X_BTR;
4095 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4097 disable = disable ? 1 : (usec ? 0 : 1);
4098 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4101 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4102 u16 tx_usec, u16 rx_usec)
4104 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4105 false, rx_usec);
4106 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4107 false, tx_usec);
4110 static void bnx2x_init_def_sb(struct bnx2x *bp)
4112 struct host_sp_status_block *def_sb = bp->def_status_blk;
4113 dma_addr_t mapping = bp->def_status_blk_mapping;
4114 int igu_sp_sb_index;
4115 int igu_seg_id;
4116 int port = BP_PORT(bp);
4117 int func = BP_FUNC(bp);
4118 int reg_offset;
4119 u64 section;
4120 int index;
4121 struct hc_sp_status_block_data sp_sb_data;
4122 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4124 if (CHIP_INT_MODE_IS_BC(bp)) {
4125 igu_sp_sb_index = DEF_SB_IGU_ID;
4126 igu_seg_id = HC_SEG_ACCESS_DEF;
4127 } else {
4128 igu_sp_sb_index = bp->igu_dsb_id;
4129 igu_seg_id = IGU_SEG_ACCESS_DEF;
4132 /* ATTN */
4133 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4134 atten_status_block);
4135 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
4137 bp->attn_state = 0;
4139 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4140 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4141 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4142 int sindex;
4143 /* take care of sig[0]..sig[4] */
4144 for (sindex = 0; sindex < 4; sindex++)
4145 bp->attn_group[index].sig[sindex] =
4146 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
4148 if (CHIP_IS_E2(bp))
4150 * enable5 is separate from the rest of the registers,
4151 * and therefore the address skip is 4
4152 * and not 16 between the different groups
4154 bp->attn_group[index].sig[4] = REG_RD(bp,
4155 reg_offset + 0x10 + 0x4*index);
4156 else
4157 bp->attn_group[index].sig[4] = 0;
4160 if (bp->common.int_block == INT_BLOCK_HC) {
4161 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4162 HC_REG_ATTN_MSG0_ADDR_L);
4164 REG_WR(bp, reg_offset, U64_LO(section));
4165 REG_WR(bp, reg_offset + 4, U64_HI(section));
4166 } else if (CHIP_IS_E2(bp)) {
4167 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4168 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4171 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4172 sp_sb);
4174 bnx2x_zero_sp_sb(bp);
4176 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4177 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4178 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4179 sp_sb_data.igu_seg_id = igu_seg_id;
4180 sp_sb_data.p_func.pf_id = func;
4181 sp_sb_data.p_func.vnic_id = BP_VN(bp);
4182 sp_sb_data.p_func.vf_id = 0xff;
4184 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4186 bp->stats_pending = 0;
4187 bp->set_mac_pending = 0;
4189 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
4192 void bnx2x_update_coalesce(struct bnx2x *bp)
4194 int i;
4196 for_each_eth_queue(bp, i)
4197 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4198 bp->rx_ticks, bp->tx_ticks);
4201 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4203 spin_lock_init(&bp->spq_lock);
4204 atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
4206 bp->spq_prod_idx = 0;
4207 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4208 bp->spq_prod_bd = bp->spq;
4209 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4212 static void bnx2x_init_eq_ring(struct bnx2x *bp)
4214 int i;
4215 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4216 union event_ring_elem *elem =
4217 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
4219 elem->next_page.addr.hi =
4220 cpu_to_le32(U64_HI(bp->eq_mapping +
4221 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4222 elem->next_page.addr.lo =
4223 cpu_to_le32(U64_LO(bp->eq_mapping +
4224 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
4226 bp->eq_cons = 0;
4227 bp->eq_prod = NUM_EQ_DESC;
4228 bp->eq_cons_sb = BNX2X_EQ_INDEX;
4231 static void bnx2x_init_ind_table(struct bnx2x *bp)
4233 int func = BP_FUNC(bp);
4234 int i;
4236 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4237 return;
4239 DP(NETIF_MSG_IFUP,
4240 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4241 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4242 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4243 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4244 bp->fp->cl_id + (i % (bp->num_queues -
4245 NONE_ETH_CONTEXT_USE)));
4248 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4250 int mode = bp->rx_mode;
4251 int port = BP_PORT(bp);
4252 u16 cl_id;
4253 u32 def_q_filters = 0;
4255 /* All but management unicast packets should pass to the host as well */
4256 u32 llh_mask =
4257 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4258 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4259 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4260 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
4262 switch (mode) {
4263 case BNX2X_RX_MODE_NONE: /* no Rx */
4264 def_q_filters = BNX2X_ACCEPT_NONE;
4265 #ifdef BCM_CNIC
4266 if (!NO_FCOE(bp)) {
4267 cl_id = bnx2x_fcoe(bp, cl_id);
4268 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4270 #endif
4271 break;
4273 case BNX2X_RX_MODE_NORMAL:
4274 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4275 BNX2X_ACCEPT_MULTICAST;
4276 #ifdef BCM_CNIC
4277 cl_id = bnx2x_fcoe(bp, cl_id);
4278 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
4279 BNX2X_ACCEPT_MULTICAST);
4280 #endif
4281 break;
4283 case BNX2X_RX_MODE_ALLMULTI:
4284 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4285 BNX2X_ACCEPT_ALL_MULTICAST;
4286 #ifdef BCM_CNIC
4287 cl_id = bnx2x_fcoe(bp, cl_id);
4288 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
4289 BNX2X_ACCEPT_MULTICAST);
4290 #endif
4291 break;
4293 case BNX2X_RX_MODE_PROMISC:
4294 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
4295 #ifdef BCM_CNIC
4296 cl_id = bnx2x_fcoe(bp, cl_id);
4297 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
4298 BNX2X_ACCEPT_MULTICAST);
4299 #endif
4300 /* pass management unicast packets as well */
4301 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
4302 break;
4304 default:
4305 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4306 break;
4309 cl_id = BP_L_ID(bp);
4310 bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
4312 REG_WR(bp,
4313 (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
4314 NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
4316 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4317 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4318 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
4319 "unmatched_ucast 0x%x\n", mode,
4320 bp->mac_filters.ucast_drop_all,
4321 bp->mac_filters.mcast_drop_all,
4322 bp->mac_filters.bcast_drop_all,
4323 bp->mac_filters.ucast_accept_all,
4324 bp->mac_filters.mcast_accept_all,
4325 bp->mac_filters.bcast_accept_all,
4326 bp->mac_filters.unmatched_unicast
4329 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
4332 static void bnx2x_init_internal_common(struct bnx2x *bp)
4334 int i;
4336 if (!CHIP_IS_E1(bp)) {
4338 /* xstorm needs to know whether to add ovlan to packets or not,
4339 * in switch-independent we'll write 0 to here... */
4340 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4341 bp->mf_mode);
4342 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4343 bp->mf_mode);
4344 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4345 bp->mf_mode);
4346 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4347 bp->mf_mode);
4350 if (IS_MF_SI(bp))
4352 * In switch independent mode, the TSTORM needs to accept
4353 * packets that failed classification, since approximate match
4354 * mac addresses aren't written to NIG LLH
4356 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4357 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
4359 /* Zero this manually as its initialization is
4360 currently missing in the initTool */
4361 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4362 REG_WR(bp, BAR_USTRORM_INTMEM +
4363 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4364 if (CHIP_IS_E2(bp)) {
4365 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4366 CHIP_INT_MODE_IS_BC(bp) ?
4367 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4371 static void bnx2x_init_internal_port(struct bnx2x *bp)
4373 /* port */
4374 bnx2x_dcb_init_intmem_pfc(bp);
4377 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4379 switch (load_code) {
4380 case FW_MSG_CODE_DRV_LOAD_COMMON:
4381 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
4382 bnx2x_init_internal_common(bp);
4383 /* no break */
4385 case FW_MSG_CODE_DRV_LOAD_PORT:
4386 bnx2x_init_internal_port(bp);
4387 /* no break */
4389 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4390 /* internal memory per function is
4391 initialized inside bnx2x_pf_init */
4392 break;
4394 default:
4395 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4396 break;
4400 static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4402 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4404 fp->state = BNX2X_FP_STATE_CLOSED;
4406 fp->index = fp->cid = fp_idx;
4407 fp->cl_id = BP_L_ID(bp) + fp_idx;
4408 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4409 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4410 /* qZone id equals to FW (per path) client id */
4411 fp->cl_qzone_id = fp->cl_id +
4412 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4413 ETH_MAX_RX_CLIENTS_E1H);
4414 /* init shortcut */
4415 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4416 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
4417 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4418 /* Setup SB indicies */
4419 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4420 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4422 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4423 "cl_id %d fw_sb %d igu_sb %d\n",
4424 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4425 fp->igu_sb_id);
4426 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4427 fp->fw_sb_id, fp->igu_sb_id);
4429 bnx2x_update_fpsb_idx(fp);
4432 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4434 int i;
4436 for_each_eth_queue(bp, i)
4437 bnx2x_init_fp_sb(bp, i);
4438 #ifdef BCM_CNIC
4439 if (!NO_FCOE(bp))
4440 bnx2x_init_fcoe_fp(bp);
4442 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4443 BNX2X_VF_ID_INVALID, false,
4444 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4446 #endif
4448 /* ensure status block indices were read */
4449 rmb();
4451 bnx2x_init_def_sb(bp);
4452 bnx2x_update_dsb_idx(bp);
4453 bnx2x_init_rx_rings(bp);
4454 bnx2x_init_tx_rings(bp);
4455 bnx2x_init_sp_ring(bp);
4456 bnx2x_init_eq_ring(bp);
4457 bnx2x_init_internal(bp, load_code);
4458 bnx2x_pf_init(bp);
4459 bnx2x_init_ind_table(bp);
4460 bnx2x_stats_init(bp);
4462 /* At this point, we are ready for interrupts */
4463 atomic_set(&bp->intr_sem, 0);
4465 /* flush all before enabling interrupts */
4466 mb();
4467 mmiowb();
4469 bnx2x_int_enable(bp);
4471 /* Check for SPIO5 */
4472 bnx2x_attn_int_deasserted0(bp,
4473 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4474 AEU_INPUTS_ATTN_BITS_SPIO5);
4477 /* end of nic init */
4480 * gzip service functions
4483 static int bnx2x_gunzip_init(struct bnx2x *bp)
4485 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4486 &bp->gunzip_mapping, GFP_KERNEL);
4487 if (bp->gunzip_buf == NULL)
4488 goto gunzip_nomem1;
4490 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4491 if (bp->strm == NULL)
4492 goto gunzip_nomem2;
4494 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4495 GFP_KERNEL);
4496 if (bp->strm->workspace == NULL)
4497 goto gunzip_nomem3;
4499 return 0;
4501 gunzip_nomem3:
4502 kfree(bp->strm);
4503 bp->strm = NULL;
4505 gunzip_nomem2:
4506 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4507 bp->gunzip_mapping);
4508 bp->gunzip_buf = NULL;
4510 gunzip_nomem1:
4511 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4512 " un-compression\n");
4513 return -ENOMEM;
4516 static void bnx2x_gunzip_end(struct bnx2x *bp)
4518 kfree(bp->strm->workspace);
4519 kfree(bp->strm);
4520 bp->strm = NULL;
4522 if (bp->gunzip_buf) {
4523 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4524 bp->gunzip_mapping);
4525 bp->gunzip_buf = NULL;
4529 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
4531 int n, rc;
4533 /* check gzip header */
4534 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4535 BNX2X_ERR("Bad gzip header\n");
4536 return -EINVAL;
4539 n = 10;
4541 #define FNAME 0x8
4543 if (zbuf[3] & FNAME)
4544 while ((zbuf[n++] != 0) && (n < len));
4546 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
4547 bp->strm->avail_in = len - n;
4548 bp->strm->next_out = bp->gunzip_buf;
4549 bp->strm->avail_out = FW_BUF_SIZE;
4551 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4552 if (rc != Z_OK)
4553 return rc;
4555 rc = zlib_inflate(bp->strm, Z_FINISH);
4556 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4557 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4558 bp->strm->msg);
4560 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4561 if (bp->gunzip_outlen & 0x3)
4562 netdev_err(bp->dev, "Firmware decompression error:"
4563 " gunzip_outlen (%d) not aligned\n",
4564 bp->gunzip_outlen);
4565 bp->gunzip_outlen >>= 2;
4567 zlib_inflateEnd(bp->strm);
4569 if (rc == Z_STREAM_END)
4570 return 0;
4572 return rc;
4575 /* nic load/unload */
4578 * General service functions
4581 /* send a NIG loopback debug packet */
4582 static void bnx2x_lb_pckt(struct bnx2x *bp)
4584 u32 wb_write[3];
4586 /* Ethernet source and destination addresses */
4587 wb_write[0] = 0x55555555;
4588 wb_write[1] = 0x55555555;
4589 wb_write[2] = 0x20; /* SOP */
4590 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4592 /* NON-IP protocol */
4593 wb_write[0] = 0x09000000;
4594 wb_write[1] = 0x55555555;
4595 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4596 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4599 /* some of the internal memories
4600 * are not directly readable from the driver
4601 * to test them we send debug packets
4603 static int bnx2x_int_mem_test(struct bnx2x *bp)
4605 int factor;
4606 int count, i;
4607 u32 val = 0;
4609 if (CHIP_REV_IS_FPGA(bp))
4610 factor = 120;
4611 else if (CHIP_REV_IS_EMUL(bp))
4612 factor = 200;
4613 else
4614 factor = 1;
4616 /* Disable inputs of parser neighbor blocks */
4617 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4618 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4619 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4620 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4622 /* Write 0 to parser credits for CFC search request */
4623 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4625 /* send Ethernet packet */
4626 bnx2x_lb_pckt(bp);
4628 /* TODO do i reset NIG statistic? */
4629 /* Wait until NIG register shows 1 packet of size 0x10 */
4630 count = 1000 * factor;
4631 while (count) {
4633 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4634 val = *bnx2x_sp(bp, wb_data[0]);
4635 if (val == 0x10)
4636 break;
4638 msleep(10);
4639 count--;
4641 if (val != 0x10) {
4642 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4643 return -1;
4646 /* Wait until PRS register shows 1 packet */
4647 count = 1000 * factor;
4648 while (count) {
4649 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4650 if (val == 1)
4651 break;
4653 msleep(10);
4654 count--;
4656 if (val != 0x1) {
4657 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4658 return -2;
4661 /* Reset and init BRB, PRS */
4662 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4663 msleep(50);
4664 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4665 msleep(50);
4666 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4667 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4669 DP(NETIF_MSG_HW, "part2\n");
4671 /* Disable inputs of parser neighbor blocks */
4672 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4673 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4674 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4675 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4677 /* Write 0 to parser credits for CFC search request */
4678 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4680 /* send 10 Ethernet packets */
4681 for (i = 0; i < 10; i++)
4682 bnx2x_lb_pckt(bp);
4684 /* Wait until NIG register shows 10 + 1
4685 packets of size 11*0x10 = 0xb0 */
4686 count = 1000 * factor;
4687 while (count) {
4689 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4690 val = *bnx2x_sp(bp, wb_data[0]);
4691 if (val == 0xb0)
4692 break;
4694 msleep(10);
4695 count--;
4697 if (val != 0xb0) {
4698 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4699 return -3;
4702 /* Wait until PRS register shows 2 packets */
4703 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4704 if (val != 2)
4705 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4707 /* Write 1 to parser credits for CFC search request */
4708 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4710 /* Wait until PRS register shows 3 packets */
4711 msleep(10 * factor);
4712 /* Wait until NIG register shows 1 packet of size 0x10 */
4713 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4714 if (val != 3)
4715 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4717 /* clear NIG EOP FIFO */
4718 for (i = 0; i < 11; i++)
4719 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4720 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4721 if (val != 1) {
4722 BNX2X_ERR("clear of NIG failed\n");
4723 return -4;
4726 /* Reset and init BRB, PRS, NIG */
4727 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4728 msleep(50);
4729 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4730 msleep(50);
4731 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4732 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4733 #ifndef BCM_CNIC
4734 /* set NIC mode */
4735 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4736 #endif
4738 /* Enable inputs of parser neighbor blocks */
4739 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4740 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4741 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
4742 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
4744 DP(NETIF_MSG_HW, "done\n");
4746 return 0; /* OK */
4749 static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
4751 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4752 if (CHIP_IS_E2(bp))
4753 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4754 else
4755 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
4756 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4757 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4759 * mask read length error interrupts in brb for parser
4760 * (parsing unit and 'checksum and crc' unit)
4761 * these errors are legal (PU reads fixed length and CAC can cause
4762 * read length error on truncated packets)
4764 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
4765 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4766 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4767 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4768 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4769 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
4770 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4771 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
4772 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4773 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4774 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
4775 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4776 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
4777 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4778 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4779 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4780 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
4781 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4782 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4784 if (CHIP_REV_IS_FPGA(bp))
4785 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
4786 else if (CHIP_IS_E2(bp))
4787 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4788 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4789 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4790 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4791 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4792 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
4793 else
4794 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
4795 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4796 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4797 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
4798 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4799 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
4800 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4801 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
4802 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4803 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
4806 static void bnx2x_reset_common(struct bnx2x *bp)
4808 /* reset_common */
4809 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4810 0xd3ffff7f);
4811 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4814 static void bnx2x_init_pxp(struct bnx2x *bp)
4816 u16 devctl;
4817 int r_order, w_order;
4819 pci_read_config_word(bp->pdev,
4820 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4821 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4822 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4823 if (bp->mrrs == -1)
4824 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4825 else {
4826 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4827 r_order = bp->mrrs;
4830 bnx2x_init_pxp_arb(bp, r_order, w_order);
4833 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4835 int is_required;
4836 u32 val;
4837 int port;
4839 if (BP_NOMCP(bp))
4840 return;
4842 is_required = 0;
4843 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4844 SHARED_HW_CFG_FAN_FAILURE_MASK;
4846 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4847 is_required = 1;
4850 * The fan failure mechanism is usually related to the PHY type since
4851 * the power consumption of the board is affected by the PHY. Currently,
4852 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4854 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4855 for (port = PORT_0; port < PORT_MAX; port++) {
4856 is_required |=
4857 bnx2x_fan_failure_det_req(
4859 bp->common.shmem_base,
4860 bp->common.shmem2_base,
4861 port);
4864 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4866 if (is_required == 0)
4867 return;
4869 /* Fan failure is indicated by SPIO 5 */
4870 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4871 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4873 /* set to active low mode */
4874 val = REG_RD(bp, MISC_REG_SPIO_INT);
4875 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
4876 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
4877 REG_WR(bp, MISC_REG_SPIO_INT, val);
4879 /* enable interrupt to signal the IGU */
4880 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4881 val |= (1 << MISC_REGISTERS_SPIO_5);
4882 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4885 static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4887 u32 offset = 0;
4889 if (CHIP_IS_E1(bp))
4890 return;
4891 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4892 return;
4894 switch (BP_ABS_FUNC(bp)) {
4895 case 0:
4896 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4897 break;
4898 case 1:
4899 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4900 break;
4901 case 2:
4902 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4903 break;
4904 case 3:
4905 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4906 break;
4907 case 4:
4908 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4909 break;
4910 case 5:
4911 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4912 break;
4913 case 6:
4914 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4915 break;
4916 case 7:
4917 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4918 break;
4919 default:
4920 return;
4923 REG_WR(bp, offset, pretend_func_num);
4924 REG_RD(bp, offset);
4925 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4928 static void bnx2x_pf_disable(struct bnx2x *bp)
4930 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4931 val &= ~IGU_PF_CONF_FUNC_EN;
4933 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4934 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4935 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4938 static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4940 u32 val, i;
4942 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
4944 bnx2x_reset_common(bp);
4945 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4946 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4948 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
4949 if (!CHIP_IS_E1(bp))
4950 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
4952 if (CHIP_IS_E2(bp)) {
4953 u8 fid;
4956 * 4-port mode or 2-port mode we need to turn of master-enable
4957 * for everyone, after that, turn it back on for self.
4958 * so, we disregard multi-function or not, and always disable
4959 * for all functions on the given path, this means 0,2,4,6 for
4960 * path 0 and 1,3,5,7 for path 1
4962 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4963 if (fid == BP_ABS_FUNC(bp)) {
4964 REG_WR(bp,
4965 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4967 continue;
4970 bnx2x_pretend_func(bp, fid);
4971 /* clear pf enable */
4972 bnx2x_pf_disable(bp);
4973 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4977 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
4978 if (CHIP_IS_E1(bp)) {
4979 /* enable HW interrupt from PXP on USDM overflow
4980 bit 16 on INT_MASK_0 */
4981 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4984 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
4985 bnx2x_init_pxp(bp);
4987 #ifdef __BIG_ENDIAN
4988 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4989 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4990 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4991 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4992 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
4993 /* make sure this value is 0 */
4994 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
4996 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4997 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4998 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4999 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5000 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5001 #endif
5003 bnx2x_ilt_init_page_size(bp, INITOP_SET);
5005 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5006 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5008 /* let the HW do it's magic ... */
5009 msleep(100);
5010 /* finish PXP init */
5011 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5012 if (val != 1) {
5013 BNX2X_ERR("PXP2 CFG failed\n");
5014 return -EBUSY;
5016 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5017 if (val != 1) {
5018 BNX2X_ERR("PXP2 RD_INIT failed\n");
5019 return -EBUSY;
5022 /* Timers bug workaround E2 only. We need to set the entire ILT to
5023 * have entries with value "0" and valid bit on.
5024 * This needs to be done by the first PF that is loaded in a path
5025 * (i.e. common phase)
5027 if (CHIP_IS_E2(bp)) {
5028 struct ilt_client_info ilt_cli;
5029 struct bnx2x_ilt ilt;
5030 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
5031 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
5033 /* initialize dummy TM client */
5034 ilt_cli.start = 0;
5035 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
5036 ilt_cli.client_num = ILT_CLIENT_TM;
5038 /* Step 1: set zeroes to all ilt page entries with valid bit on
5039 * Step 2: set the timers first/last ilt entry to point
5040 * to the entire range to prevent ILT range error for 3rd/4th
5041 * vnic (this code assumes existance of the vnic)
5043 * both steps performed by call to bnx2x_ilt_client_init_op()
5044 * with dummy TM client
5046 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
5047 * and his brother are split registers
5049 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
5050 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
5051 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5053 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
5054 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
5055 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
5059 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5060 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5062 if (CHIP_IS_E2(bp)) {
5063 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
5064 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
5065 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
5067 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
5069 /* let the HW do it's magic ... */
5070 do {
5071 msleep(200);
5072 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
5073 } while (factor-- && (val != 1));
5075 if (val != 1) {
5076 BNX2X_ERR("ATC_INIT failed\n");
5077 return -EBUSY;
5081 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5083 /* clean the DMAE memory */
5084 bp->dmae_ready = 1;
5085 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5087 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5088 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5089 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5090 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5092 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5093 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5094 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5095 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5097 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5099 if (CHIP_MODE_IS_4_PORT(bp))
5100 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
5102 /* QM queues pointers table */
5103 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
5105 /* soft reset pulse */
5106 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5107 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5109 #ifdef BCM_CNIC
5110 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5111 #endif
5113 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5114 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5116 if (!CHIP_REV_IS_SLOW(bp)) {
5117 /* enable hw interrupt from doorbell Q */
5118 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5121 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5122 if (CHIP_MODE_IS_4_PORT(bp)) {
5123 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5124 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5127 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5128 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5129 #ifndef BCM_CNIC
5130 /* set NIC mode */
5131 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5132 #endif
5133 if (!CHIP_IS_E1(bp))
5134 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
5136 if (CHIP_IS_E2(bp)) {
5137 /* Bit-map indicating which L2 hdrs may appear after the
5138 basic Ethernet header */
5139 int has_ovlan = IS_MF_SD(bp);
5140 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5141 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5144 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5145 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5146 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5147 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5149 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5150 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5151 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5152 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5154 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5155 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5156 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5157 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5159 if (CHIP_MODE_IS_4_PORT(bp))
5160 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5162 /* sync semi rtc */
5163 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5164 0x80000000);
5165 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5166 0x80000000);
5168 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5169 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5170 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5172 if (CHIP_IS_E2(bp)) {
5173 int has_ovlan = IS_MF_SD(bp);
5174 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5175 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5178 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5179 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5180 REG_WR(bp, i, random32());
5182 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5183 #ifdef BCM_CNIC
5184 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5185 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5186 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5187 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5188 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5189 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5190 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5191 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5192 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5193 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5194 #endif
5195 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5197 if (sizeof(union cdu_context) != 1024)
5198 /* we currently assume that a context is 1024 bytes */
5199 dev_alert(&bp->pdev->dev, "please adjust the size "
5200 "of cdu_context(%ld)\n",
5201 (long)sizeof(union cdu_context));
5203 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5204 val = (4 << 24) + (0 << 12) + 1024;
5205 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5207 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5208 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5209 /* enable context validation interrupt from CFC */
5210 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5212 /* set the thresholds to prevent CFC/CDU race */
5213 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5215 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5217 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5218 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5220 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
5221 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5223 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5224 /* Reset PCIE errors for debug */
5225 REG_WR(bp, 0x2814, 0xffffffff);
5226 REG_WR(bp, 0x3820, 0xffffffff);
5228 if (CHIP_IS_E2(bp)) {
5229 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5230 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5231 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5232 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5233 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5234 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5235 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5236 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5237 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5238 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5239 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5242 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5243 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5244 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5245 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5247 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5248 if (!CHIP_IS_E1(bp)) {
5249 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5250 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
5252 if (CHIP_IS_E2(bp)) {
5253 /* Bit-map indicating which L2 hdrs may appear after the
5254 basic Ethernet header */
5255 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
5258 if (CHIP_REV_IS_SLOW(bp))
5259 msleep(200);
5261 /* finish CFC init */
5262 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5263 if (val != 1) {
5264 BNX2X_ERR("CFC LL_INIT failed\n");
5265 return -EBUSY;
5267 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5268 if (val != 1) {
5269 BNX2X_ERR("CFC AC_INIT failed\n");
5270 return -EBUSY;
5272 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5273 if (val != 1) {
5274 BNX2X_ERR("CFC CAM_INIT failed\n");
5275 return -EBUSY;
5277 REG_WR(bp, CFC_REG_DEBUG0, 0);
5279 if (CHIP_IS_E1(bp)) {
5280 /* read NIG statistic
5281 to see if this is our first up since powerup */
5282 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5283 val = *bnx2x_sp(bp, wb_data[0]);
5285 /* do internal memory self test */
5286 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5287 BNX2X_ERR("internal mem self test failed\n");
5288 return -EBUSY;
5292 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
5293 bp->common.shmem_base,
5294 bp->common.shmem2_base);
5296 bnx2x_setup_fan_failure_detection(bp);
5298 /* clear PXP2 attentions */
5299 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5301 bnx2x_enable_blocks_attention(bp);
5302 if (CHIP_PARITY_ENABLED(bp))
5303 bnx2x_enable_blocks_parity(bp);
5305 if (!BP_NOMCP(bp)) {
5306 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5307 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5308 CHIP_IS_E1x(bp)) {
5309 u32 shmem_base[2], shmem2_base[2];
5310 shmem_base[0] = bp->common.shmem_base;
5311 shmem2_base[0] = bp->common.shmem2_base;
5312 if (CHIP_IS_E2(bp)) {
5313 shmem_base[1] =
5314 SHMEM2_RD(bp, other_shmem_base_addr);
5315 shmem2_base[1] =
5316 SHMEM2_RD(bp, other_shmem2_base_addr);
5318 bnx2x_acquire_phy_lock(bp);
5319 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5320 bp->common.chip_id);
5321 bnx2x_release_phy_lock(bp);
5323 } else
5324 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5326 return 0;
5329 static int bnx2x_init_hw_port(struct bnx2x *bp)
5331 int port = BP_PORT(bp);
5332 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5333 u32 low, high;
5334 u32 val;
5336 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
5338 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5340 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5341 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5343 /* Timers bug workaround: disables the pf_master bit in pglue at
5344 * common phase, we need to enable it here before any dmae access are
5345 * attempted. Therefore we manually added the enable-master to the
5346 * port phase (it also happens in the function phase)
5348 if (CHIP_IS_E2(bp))
5349 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5351 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5352 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5353 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
5354 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5356 /* QM cid (connection) count */
5357 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
5359 #ifdef BCM_CNIC
5360 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5361 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5362 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
5363 #endif
5365 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5367 if (CHIP_MODE_IS_4_PORT(bp))
5368 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
5370 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5371 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5372 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5373 /* no pause for emulation and FPGA */
5374 low = 0;
5375 high = 513;
5376 } else {
5377 if (IS_MF(bp))
5378 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5379 else if (bp->dev->mtu > 4096) {
5380 if (bp->flags & ONE_PORT_FLAG)
5381 low = 160;
5382 else {
5383 val = bp->dev->mtu;
5384 /* (24*1024 + val*4)/256 */
5385 low = 96 + (val/64) +
5386 ((val % 64) ? 1 : 0);
5388 } else
5389 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5390 high = low + 56; /* 14*1024/256 */
5392 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5393 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5396 if (CHIP_MODE_IS_4_PORT(bp)) {
5397 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5398 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5399 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5400 BRB1_REG_MAC_GUARANTIED_0), 40);
5403 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5405 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5406 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5407 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5408 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5410 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5411 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5412 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5413 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5414 if (CHIP_MODE_IS_4_PORT(bp))
5415 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
5417 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5418 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5420 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5422 if (!CHIP_IS_E2(bp)) {
5423 /* configure PBF to work without PAUSE mtu 9000 */
5424 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5426 /* update threshold */
5427 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5428 /* update init credit */
5429 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5431 /* probe changes */
5432 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5433 udelay(50);
5434 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5437 #ifdef BCM_CNIC
5438 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
5439 #endif
5440 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5441 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5443 if (CHIP_IS_E1(bp)) {
5444 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5445 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5447 bnx2x_init_block(bp, HC_BLOCK, init_stage);
5449 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5451 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
5452 /* init aeu_mask_attn_func_0/1:
5453 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5454 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5455 * bits 4-7 are used for "per vn group attention" */
5456 val = IS_MF(bp) ? 0xF7 : 0x7;
5457 /* Enable DCBX attention for all but E1 */
5458 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
5459 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
5461 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
5462 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
5463 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
5464 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
5465 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
5467 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
5469 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5471 if (!CHIP_IS_E1(bp)) {
5472 /* 0x2 disable mf_ov, 0x1 enable */
5473 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5474 (IS_MF_SD(bp) ? 0x1 : 0x2));
5476 if (CHIP_IS_E2(bp)) {
5477 val = 0;
5478 switch (bp->mf_mode) {
5479 case MULTI_FUNCTION_SD:
5480 val = 1;
5481 break;
5482 case MULTI_FUNCTION_SI:
5483 val = 2;
5484 break;
5487 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5488 NIG_REG_LLH0_CLS_TYPE), val);
5491 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5492 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5493 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5497 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
5498 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
5499 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
5500 bp->common.shmem_base,
5501 bp->common.shmem2_base);
5502 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
5503 bp->common.shmem2_base, port)) {
5504 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5505 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5506 val = REG_RD(bp, reg_addr);
5507 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5508 REG_WR(bp, reg_addr, val);
5510 bnx2x__link_reset(bp);
5512 return 0;
5515 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5517 int reg;
5519 if (CHIP_IS_E1(bp))
5520 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5521 else
5522 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5524 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5527 static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5529 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5532 static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5534 u32 i, base = FUNC_ILT_BASE(func);
5535 for (i = base; i < base + ILT_PER_FUNC; i++)
5536 bnx2x_ilt_wr(bp, i, 0);
5539 static int bnx2x_init_hw_func(struct bnx2x *bp)
5541 int port = BP_PORT(bp);
5542 int func = BP_FUNC(bp);
5543 struct bnx2x_ilt *ilt = BP_ILT(bp);
5544 u16 cdu_ilt_start;
5545 u32 addr, val;
5546 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5547 int i, main_mem_width;
5549 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
5551 /* set MSI reconfigure capability */
5552 if (bp->common.int_block == INT_BLOCK_HC) {
5553 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5554 val = REG_RD(bp, addr);
5555 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5556 REG_WR(bp, addr, val);
5559 ilt = BP_ILT(bp);
5560 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
5562 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5563 ilt->lines[cdu_ilt_start + i].page =
5564 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5565 ilt->lines[cdu_ilt_start + i].page_mapping =
5566 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5567 /* cdu ilt pages are allocated manually so there's no need to
5568 set the size */
5570 bnx2x_ilt_init_op(bp, INITOP_SET);
5572 #ifdef BCM_CNIC
5573 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
5575 /* T1 hash bits value determines the T1 number of entries */
5576 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
5577 #endif
5579 #ifndef BCM_CNIC
5580 /* set NIC mode */
5581 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5582 #endif /* BCM_CNIC */
5584 if (CHIP_IS_E2(bp)) {
5585 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5587 /* Turn on a single ISR mode in IGU if driver is going to use
5588 * INT#x or MSI
5590 if (!(bp->flags & USING_MSIX_FLAG))
5591 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5593 * Timers workaround bug: function init part.
5594 * Need to wait 20msec after initializing ILT,
5595 * needed to make sure there are no requests in
5596 * one of the PXP internal queues with "old" ILT addresses
5598 msleep(20);
5600 * Master enable - Due to WB DMAE writes performed before this
5601 * register is re-initialized as part of the regular function
5602 * init
5604 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5605 /* Enable the function in IGU */
5606 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5609 bp->dmae_ready = 1;
5611 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5613 if (CHIP_IS_E2(bp))
5614 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5616 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5617 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5618 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5619 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5620 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5621 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5622 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5623 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5624 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5626 if (CHIP_IS_E2(bp)) {
5627 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5628 BP_PATH(bp));
5629 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5630 BP_PATH(bp));
5633 if (CHIP_MODE_IS_4_PORT(bp))
5634 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5636 if (CHIP_IS_E2(bp))
5637 REG_WR(bp, QM_REG_PF_EN, 1);
5639 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
5641 if (CHIP_MODE_IS_4_PORT(bp))
5642 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5644 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5645 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5646 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5647 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5648 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5649 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5650 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5651 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5652 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5653 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5654 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
5655 if (CHIP_IS_E2(bp))
5656 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5658 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5660 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5662 if (CHIP_IS_E2(bp))
5663 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5665 if (IS_MF(bp)) {
5666 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5667 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
5670 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5672 /* HC init per function */
5673 if (bp->common.int_block == INT_BLOCK_HC) {
5674 if (CHIP_IS_E1H(bp)) {
5675 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5677 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5678 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5680 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5682 } else {
5683 int num_segs, sb_idx, prod_offset;
5685 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5687 if (CHIP_IS_E2(bp)) {
5688 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5689 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5692 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5694 if (CHIP_IS_E2(bp)) {
5695 int dsb_idx = 0;
5697 * Producer memory:
5698 * E2 mode: address 0-135 match to the mapping memory;
5699 * 136 - PF0 default prod; 137 - PF1 default prod;
5700 * 138 - PF2 default prod; 139 - PF3 default prod;
5701 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5702 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5703 * 144-147 reserved.
5705 * E1.5 mode - In backward compatible mode;
5706 * for non default SB; each even line in the memory
5707 * holds the U producer and each odd line hold
5708 * the C producer. The first 128 producers are for
5709 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5710 * producers are for the DSB for each PF.
5711 * Each PF has five segments: (the order inside each
5712 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5713 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5714 * 144-147 attn prods;
5716 /* non-default-status-blocks */
5717 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5718 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5719 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5720 prod_offset = (bp->igu_base_sb + sb_idx) *
5721 num_segs;
5723 for (i = 0; i < num_segs; i++) {
5724 addr = IGU_REG_PROD_CONS_MEMORY +
5725 (prod_offset + i) * 4;
5726 REG_WR(bp, addr, 0);
5728 /* send consumer update with value 0 */
5729 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5730 USTORM_ID, 0, IGU_INT_NOP, 1);
5731 bnx2x_igu_clear_sb(bp,
5732 bp->igu_base_sb + sb_idx);
5735 /* default-status-blocks */
5736 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5737 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5739 if (CHIP_MODE_IS_4_PORT(bp))
5740 dsb_idx = BP_FUNC(bp);
5741 else
5742 dsb_idx = BP_E1HVN(bp);
5744 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5745 IGU_BC_BASE_DSB_PROD + dsb_idx :
5746 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5748 for (i = 0; i < (num_segs * E1HVN_MAX);
5749 i += E1HVN_MAX) {
5750 addr = IGU_REG_PROD_CONS_MEMORY +
5751 (prod_offset + i)*4;
5752 REG_WR(bp, addr, 0);
5754 /* send consumer update with 0 */
5755 if (CHIP_INT_MODE_IS_BC(bp)) {
5756 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5757 USTORM_ID, 0, IGU_INT_NOP, 1);
5758 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5759 CSTORM_ID, 0, IGU_INT_NOP, 1);
5760 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5761 XSTORM_ID, 0, IGU_INT_NOP, 1);
5762 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5763 TSTORM_ID, 0, IGU_INT_NOP, 1);
5764 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5765 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5766 } else {
5767 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5768 USTORM_ID, 0, IGU_INT_NOP, 1);
5769 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5770 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5772 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5774 /* !!! these should become driver const once
5775 rf-tool supports split-68 const */
5776 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5777 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5778 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5779 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5780 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5781 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5785 /* Reset PCIE errors for debug */
5786 REG_WR(bp, 0x2114, 0xffffffff);
5787 REG_WR(bp, 0x2120, 0xffffffff);
5789 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5790 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5791 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5792 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5793 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5794 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5796 if (CHIP_IS_E1x(bp)) {
5797 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5798 main_mem_base = HC_REG_MAIN_MEMORY +
5799 BP_PORT(bp) * (main_mem_size * 4);
5800 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5801 main_mem_width = 8;
5803 val = REG_RD(bp, main_mem_prty_clr);
5804 if (val)
5805 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5806 "block during "
5807 "function init (0x%x)!\n", val);
5809 /* Clear "false" parity errors in MSI-X table */
5810 for (i = main_mem_base;
5811 i < main_mem_base + main_mem_size * 4;
5812 i += main_mem_width) {
5813 bnx2x_read_dmae(bp, i, main_mem_width / 4);
5814 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5815 i, main_mem_width / 4);
5817 /* Clear HC parity attention */
5818 REG_RD(bp, main_mem_prty_clr);
5821 bnx2x_phy_probe(&bp->link_params);
5823 return 0;
5826 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5828 int rc = 0;
5830 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5831 BP_ABS_FUNC(bp), load_code);
5833 bp->dmae_ready = 0;
5834 mutex_init(&bp->dmae_mutex);
5835 rc = bnx2x_gunzip_init(bp);
5836 if (rc)
5837 return rc;
5839 switch (load_code) {
5840 case FW_MSG_CODE_DRV_LOAD_COMMON:
5841 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5842 rc = bnx2x_init_hw_common(bp, load_code);
5843 if (rc)
5844 goto init_hw_err;
5845 /* no break */
5847 case FW_MSG_CODE_DRV_LOAD_PORT:
5848 rc = bnx2x_init_hw_port(bp);
5849 if (rc)
5850 goto init_hw_err;
5851 /* no break */
5853 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5854 rc = bnx2x_init_hw_func(bp);
5855 if (rc)
5856 goto init_hw_err;
5857 break;
5859 default:
5860 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5861 break;
5864 if (!BP_NOMCP(bp)) {
5865 int mb_idx = BP_FW_MB_IDX(bp);
5867 bp->fw_drv_pulse_wr_seq =
5868 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
5869 DRV_PULSE_SEQ_MASK);
5870 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5873 init_hw_err:
5874 bnx2x_gunzip_end(bp);
5876 return rc;
5879 void bnx2x_free_mem(struct bnx2x *bp)
5882 #define BNX2X_PCI_FREE(x, y, size) \
5883 do { \
5884 if (x) { \
5885 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
5886 x = NULL; \
5887 y = 0; \
5889 } while (0)
5891 #define BNX2X_FREE(x) \
5892 do { \
5893 if (x) { \
5894 kfree((void *)x); \
5895 x = NULL; \
5897 } while (0)
5899 int i;
5901 /* fastpath */
5902 /* Common */
5903 for_each_queue(bp, i) {
5904 #ifdef BCM_CNIC
5905 /* FCoE client uses default status block */
5906 if (IS_FCOE_IDX(i)) {
5907 union host_hc_status_block *sb =
5908 &bnx2x_fp(bp, i, status_blk);
5909 memset(sb, 0, sizeof(union host_hc_status_block));
5910 bnx2x_fp(bp, i, status_blk_mapping) = 0;
5911 } else {
5912 #endif
5913 /* status blocks */
5914 if (CHIP_IS_E2(bp))
5915 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5916 bnx2x_fp(bp, i, status_blk_mapping),
5917 sizeof(struct host_hc_status_block_e2));
5918 else
5919 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5920 bnx2x_fp(bp, i, status_blk_mapping),
5921 sizeof(struct host_hc_status_block_e1x));
5922 #ifdef BCM_CNIC
5924 #endif
5926 /* Rx */
5927 for_each_rx_queue(bp, i) {
5929 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5930 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5931 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5932 bnx2x_fp(bp, i, rx_desc_mapping),
5933 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5935 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5936 bnx2x_fp(bp, i, rx_comp_mapping),
5937 sizeof(struct eth_fast_path_rx_cqe) *
5938 NUM_RCQ_BD);
5940 /* SGE ring */
5941 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5942 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5943 bnx2x_fp(bp, i, rx_sge_mapping),
5944 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5946 /* Tx */
5947 for_each_tx_queue(bp, i) {
5949 /* fastpath tx rings: tx_buf tx_desc */
5950 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5951 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5952 bnx2x_fp(bp, i, tx_desc_mapping),
5953 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
5955 /* end of fastpath */
5957 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5958 sizeof(struct host_sp_status_block));
5960 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5961 sizeof(struct bnx2x_slowpath));
5963 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5964 bp->context.size);
5966 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5968 BNX2X_FREE(bp->ilt->lines);
5970 #ifdef BCM_CNIC
5971 if (CHIP_IS_E2(bp))
5972 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5973 sizeof(struct host_hc_status_block_e2));
5974 else
5975 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5976 sizeof(struct host_hc_status_block_e1x));
5978 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
5979 #endif
5981 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5983 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5984 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5986 #undef BNX2X_PCI_FREE
5987 #undef BNX2X_KFREE
5990 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
5992 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
5993 if (CHIP_IS_E2(bp)) {
5994 bnx2x_fp(bp, index, sb_index_values) =
5995 (__le16 *)status_blk.e2_sb->sb.index_values;
5996 bnx2x_fp(bp, index, sb_running_index) =
5997 (__le16 *)status_blk.e2_sb->sb.running_index;
5998 } else {
5999 bnx2x_fp(bp, index, sb_index_values) =
6000 (__le16 *)status_blk.e1x_sb->sb.index_values;
6001 bnx2x_fp(bp, index, sb_running_index) =
6002 (__le16 *)status_blk.e1x_sb->sb.running_index;
6006 int bnx2x_alloc_mem(struct bnx2x *bp)
6008 #define BNX2X_PCI_ALLOC(x, y, size) \
6009 do { \
6010 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
6011 if (x == NULL) \
6012 goto alloc_mem_err; \
6013 memset(x, 0, size); \
6014 } while (0)
6016 #define BNX2X_ALLOC(x, size) \
6017 do { \
6018 x = kzalloc(size, GFP_KERNEL); \
6019 if (x == NULL) \
6020 goto alloc_mem_err; \
6021 } while (0)
6023 int i;
6025 /* fastpath */
6026 /* Common */
6027 for_each_queue(bp, i) {
6028 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
6029 bnx2x_fp(bp, i, bp) = bp;
6030 /* status blocks */
6031 #ifdef BCM_CNIC
6032 if (!IS_FCOE_IDX(i)) {
6033 #endif
6034 if (CHIP_IS_E2(bp))
6035 BNX2X_PCI_ALLOC(sb->e2_sb,
6036 &bnx2x_fp(bp, i, status_blk_mapping),
6037 sizeof(struct host_hc_status_block_e2));
6038 else
6039 BNX2X_PCI_ALLOC(sb->e1x_sb,
6040 &bnx2x_fp(bp, i, status_blk_mapping),
6041 sizeof(struct host_hc_status_block_e1x));
6042 #ifdef BCM_CNIC
6044 #endif
6045 set_sb_shortcuts(bp, i);
6047 /* Rx */
6048 for_each_queue(bp, i) {
6050 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6051 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6052 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6053 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6054 &bnx2x_fp(bp, i, rx_desc_mapping),
6055 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6057 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6058 &bnx2x_fp(bp, i, rx_comp_mapping),
6059 sizeof(struct eth_fast_path_rx_cqe) *
6060 NUM_RCQ_BD);
6062 /* SGE ring */
6063 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6064 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6065 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6066 &bnx2x_fp(bp, i, rx_sge_mapping),
6067 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6069 /* Tx */
6070 for_each_queue(bp, i) {
6072 /* fastpath tx rings: tx_buf tx_desc */
6073 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6074 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6075 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6076 &bnx2x_fp(bp, i, tx_desc_mapping),
6077 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6079 /* end of fastpath */
6081 #ifdef BCM_CNIC
6082 if (CHIP_IS_E2(bp))
6083 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
6084 sizeof(struct host_hc_status_block_e2));
6085 else
6086 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
6087 sizeof(struct host_hc_status_block_e1x));
6089 /* allocate searcher T2 table */
6090 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
6091 #endif
6094 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6095 sizeof(struct host_sp_status_block));
6097 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6098 sizeof(struct bnx2x_slowpath));
6100 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
6102 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
6103 bp->context.size);
6105 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
6107 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
6108 goto alloc_mem_err;
6110 /* Slow path ring */
6111 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6113 /* EQ */
6114 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6115 BCM_PAGE_SIZE * NUM_EQ_PAGES);
6116 return 0;
6118 alloc_mem_err:
6119 bnx2x_free_mem(bp);
6120 return -ENOMEM;
6122 #undef BNX2X_PCI_ALLOC
6123 #undef BNX2X_ALLOC
6127 * Init service functions
6129 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6130 int *state_p, int flags);
6132 int bnx2x_func_start(struct bnx2x *bp)
6134 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
6136 /* Wait for completion */
6137 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
6138 WAIT_RAMROD_COMMON);
6141 static int bnx2x_func_stop(struct bnx2x *bp)
6143 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
6145 /* Wait for completion */
6146 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6147 0, &(bp->state), WAIT_RAMROD_COMMON);
6151 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
6153 * @param bp driver descriptor
6154 * @param set set or clear an entry (1 or 0)
6155 * @param mac pointer to a buffer containing a MAC
6156 * @param cl_bit_vec bit vector of clients to register a MAC for
6157 * @param cam_offset offset in a CAM to use
6158 * @param is_bcast is the set MAC a broadcast address (for E1 only)
6160 static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
6161 u32 cl_bit_vec, u8 cam_offset,
6162 u8 is_bcast)
6164 struct mac_configuration_cmd *config =
6165 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6166 int ramrod_flags = WAIT_RAMROD_COMMON;
6168 bp->set_mac_pending = 1;
6169 smp_wmb();
6171 config->hdr.length = 1;
6172 config->hdr.offset = cam_offset;
6173 config->hdr.client_id = 0xff;
6174 config->hdr.reserved1 = 0;
6176 /* primary MAC */
6177 config->config_table[0].msb_mac_addr =
6178 swab16(*(u16 *)&mac[0]);
6179 config->config_table[0].middle_mac_addr =
6180 swab16(*(u16 *)&mac[2]);
6181 config->config_table[0].lsb_mac_addr =
6182 swab16(*(u16 *)&mac[4]);
6183 config->config_table[0].clients_bit_vector =
6184 cpu_to_le32(cl_bit_vec);
6185 config->config_table[0].vlan_id = 0;
6186 config->config_table[0].pf_id = BP_FUNC(bp);
6187 if (set)
6188 SET_FLAG(config->config_table[0].flags,
6189 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6190 T_ETH_MAC_COMMAND_SET);
6191 else
6192 SET_FLAG(config->config_table[0].flags,
6193 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6194 T_ETH_MAC_COMMAND_INVALIDATE);
6196 if (is_bcast)
6197 SET_FLAG(config->config_table[0].flags,
6198 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6200 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
6201 (set ? "setting" : "clearing"),
6202 config->config_table[0].msb_mac_addr,
6203 config->config_table[0].middle_mac_addr,
6204 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
6206 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6207 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6208 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6210 /* Wait for a completion */
6211 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
6214 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6215 int *state_p, int flags)
6217 /* can take a while if any port is running */
6218 int cnt = 5000;
6219 u8 poll = flags & WAIT_RAMROD_POLL;
6220 u8 common = flags & WAIT_RAMROD_COMMON;
6222 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6223 poll ? "polling" : "waiting", state, idx);
6225 might_sleep();
6226 while (cnt--) {
6227 if (poll) {
6228 if (common)
6229 bnx2x_eq_int(bp);
6230 else {
6231 bnx2x_rx_int(bp->fp, 10);
6232 /* if index is different from 0
6233 * the reply for some commands will
6234 * be on the non default queue
6236 if (idx)
6237 bnx2x_rx_int(&bp->fp[idx], 10);
6241 mb(); /* state is changed by bnx2x_sp_event() */
6242 if (*state_p == state) {
6243 #ifdef BNX2X_STOP_ON_ERROR
6244 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6245 #endif
6246 return 0;
6249 msleep(1);
6251 if (bp->panic)
6252 return -EIO;
6255 /* timeout! */
6256 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6257 poll ? "polling" : "waiting", state, idx);
6258 #ifdef BNX2X_STOP_ON_ERROR
6259 bnx2x_panic();
6260 #endif
6262 return -EBUSY;
6265 static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
6267 if (CHIP_IS_E1H(bp))
6268 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6269 else if (CHIP_MODE_IS_4_PORT(bp))
6270 return BP_FUNC(bp) * 32 + rel_offset;
6271 else
6272 return BP_VN(bp) * 32 + rel_offset;
6276 * LLH CAM line allocations: currently only iSCSI and ETH macs are
6277 * relevant. In addition, current implementation is tuned for a
6278 * single ETH MAC.
6280 * When multiple unicast ETH MACs PF configuration in switch
6281 * independent mode is required (NetQ, multiple netdev MACs,
6282 * etc.), consider better utilisation of 16 per function MAC
6283 * entries in the LLH memory.
6285 enum {
6286 LLH_CAM_ISCSI_ETH_LINE = 0,
6287 LLH_CAM_ETH_LINE,
6288 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
6291 static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
6292 int set,
6293 unsigned char *dev_addr,
6294 int index)
6296 u32 wb_data[2];
6297 u32 mem_offset, ena_offset, mem_index;
6299 * indexes mapping:
6300 * 0..7 - goes to MEM
6301 * 8..15 - goes to MEM2
6304 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
6305 return;
6307 /* calculate memory start offset according to the mapping
6308 * and index in the memory */
6309 if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
6310 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
6311 NIG_REG_LLH0_FUNC_MEM;
6312 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
6313 NIG_REG_LLH0_FUNC_MEM_ENABLE;
6314 mem_index = index;
6315 } else {
6316 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
6317 NIG_REG_P0_LLH_FUNC_MEM2;
6318 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
6319 NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
6320 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
6323 if (set) {
6324 /* LLH_FUNC_MEM is a u64 WB register */
6325 mem_offset += 8*mem_index;
6327 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
6328 (dev_addr[4] << 8) | dev_addr[5]);
6329 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
6331 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
6334 /* enable/disable the entry */
6335 REG_WR(bp, ena_offset + 4*mem_index, set);
6339 void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6341 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6342 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6344 /* networking MAC */
6345 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6346 (1 << bp->fp->cl_id), cam_offset , 0);
6348 bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
6350 if (CHIP_IS_E1(bp)) {
6351 /* broadcast MAC */
6352 static const u8 bcast[ETH_ALEN] = {
6353 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
6355 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6358 static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6360 int i = 0, old;
6361 struct net_device *dev = bp->dev;
6362 struct netdev_hw_addr *ha;
6363 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6364 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6366 netdev_for_each_mc_addr(ha, dev) {
6367 /* copy mac */
6368 config_cmd->config_table[i].msb_mac_addr =
6369 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6370 config_cmd->config_table[i].middle_mac_addr =
6371 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6372 config_cmd->config_table[i].lsb_mac_addr =
6373 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6375 config_cmd->config_table[i].vlan_id = 0;
6376 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6377 config_cmd->config_table[i].clients_bit_vector =
6378 cpu_to_le32(1 << BP_L_ID(bp));
6380 SET_FLAG(config_cmd->config_table[i].flags,
6381 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6382 T_ETH_MAC_COMMAND_SET);
6384 DP(NETIF_MSG_IFUP,
6385 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6386 config_cmd->config_table[i].msb_mac_addr,
6387 config_cmd->config_table[i].middle_mac_addr,
6388 config_cmd->config_table[i].lsb_mac_addr);
6389 i++;
6391 old = config_cmd->hdr.length;
6392 if (old > i) {
6393 for (; i < old; i++) {
6394 if (CAM_IS_INVALID(config_cmd->
6395 config_table[i])) {
6396 /* already invalidated */
6397 break;
6399 /* invalidate */
6400 SET_FLAG(config_cmd->config_table[i].flags,
6401 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6402 T_ETH_MAC_COMMAND_INVALIDATE);
6406 config_cmd->hdr.length = i;
6407 config_cmd->hdr.offset = offset;
6408 config_cmd->hdr.client_id = 0xff;
6409 config_cmd->hdr.reserved1 = 0;
6411 bp->set_mac_pending = 1;
6412 smp_wmb();
6414 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6415 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6417 static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6419 int i;
6420 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6421 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6422 int ramrod_flags = WAIT_RAMROD_COMMON;
6424 bp->set_mac_pending = 1;
6425 smp_wmb();
6427 for (i = 0; i < config_cmd->hdr.length; i++)
6428 SET_FLAG(config_cmd->config_table[i].flags,
6429 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6430 T_ETH_MAC_COMMAND_INVALIDATE);
6432 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6433 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6435 /* Wait for a completion */
6436 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6437 ramrod_flags);
6441 #ifdef BCM_CNIC
6443 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6444 * MAC(s). This function will wait until the ramdord completion
6445 * returns.
6447 * @param bp driver handle
6448 * @param set set or clear the CAM entry
6450 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6452 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6454 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6455 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6456 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
6457 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
6458 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
6460 /* Send a SET_MAC ramrod */
6461 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6462 cam_offset, 0);
6464 bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
6466 return 0;
6470 * Set FCoE L2 MAC(s) at the next enties in the CAM after the
6471 * ETH MAC(s). This function will wait until the ramdord
6472 * completion returns.
6474 * @param bp driver handle
6475 * @param set set or clear the CAM entry
6477 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6479 int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
6481 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6483 * CAM allocation for E1H
6484 * eth unicasts: by func number
6485 * iscsi: by func number
6486 * fip unicast: by func number
6487 * fip multicast: by func number
6489 bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
6490 cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
6492 return 0;
6495 int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
6497 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6500 * CAM allocation for E1H
6501 * eth unicasts: by func number
6502 * iscsi: by func number
6503 * fip unicast: by func number
6504 * fip multicast: by func number
6506 bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
6507 bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
6509 return 0;
6511 #endif
6513 static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6514 struct bnx2x_client_init_params *params,
6515 u8 activate,
6516 struct client_init_ramrod_data *data)
6518 /* Clear the buffer */
6519 memset(data, 0, sizeof(*data));
6521 /* general */
6522 data->general.client_id = params->rxq_params.cl_id;
6523 data->general.statistics_counter_id = params->rxq_params.stat_id;
6524 data->general.statistics_en_flg =
6525 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6526 data->general.is_fcoe_flg =
6527 (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
6528 data->general.activate_flg = activate;
6529 data->general.sp_client_id = params->rxq_params.spcl_id;
6531 /* Rx data */
6532 data->rx.tpa_en_flg =
6533 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6534 data->rx.vmqueue_mode_en_flg = 0;
6535 data->rx.cache_line_alignment_log_size =
6536 params->rxq_params.cache_line_log;
6537 data->rx.enable_dynamic_hc =
6538 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6539 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6540 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6541 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6543 /* We don't set drop flags */
6544 data->rx.drop_ip_cs_err_flg = 0;
6545 data->rx.drop_tcp_cs_err_flg = 0;
6546 data->rx.drop_ttl0_flg = 0;
6547 data->rx.drop_udp_cs_err_flg = 0;
6549 data->rx.inner_vlan_removal_enable_flg =
6550 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6551 data->rx.outer_vlan_removal_enable_flg =
6552 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6553 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6554 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6555 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6556 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6557 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6558 data->rx.bd_page_base.lo =
6559 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6560 data->rx.bd_page_base.hi =
6561 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6562 data->rx.sge_page_base.lo =
6563 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6564 data->rx.sge_page_base.hi =
6565 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6566 data->rx.cqe_page_base.lo =
6567 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6568 data->rx.cqe_page_base.hi =
6569 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6570 data->rx.is_leading_rss =
6571 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6572 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6574 /* Tx data */
6575 data->tx.enforce_security_flg = 0; /* VF specific */
6576 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6577 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6578 data->tx.mtu = 0; /* VF specific */
6579 data->tx.tx_bd_page_base.lo =
6580 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6581 data->tx.tx_bd_page_base.hi =
6582 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6584 /* flow control data */
6585 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6586 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6587 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6588 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6589 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6590 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6591 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6593 data->fc.safc_group_num = params->txq_params.cos;
6594 data->fc.safc_group_en_flg =
6595 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6596 data->fc.traffic_type =
6597 (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
6598 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
6601 static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6603 /* ustorm cxt validation */
6604 cxt->ustorm_ag_context.cdu_usage =
6605 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6606 ETH_CONNECTION_TYPE);
6607 /* xcontext validation */
6608 cxt->xstorm_ag_context.cdu_reserved =
6609 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6610 ETH_CONNECTION_TYPE);
6613 static int bnx2x_setup_fw_client(struct bnx2x *bp,
6614 struct bnx2x_client_init_params *params,
6615 u8 activate,
6616 struct client_init_ramrod_data *data,
6617 dma_addr_t data_mapping)
6619 u16 hc_usec;
6620 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6621 int ramrod_flags = 0, rc;
6623 /* HC and context validation values */
6624 hc_usec = params->txq_params.hc_rate ?
6625 1000000 / params->txq_params.hc_rate : 0;
6626 bnx2x_update_coalesce_sb_index(bp,
6627 params->txq_params.fw_sb_id,
6628 params->txq_params.sb_cq_index,
6629 !(params->txq_params.flags & QUEUE_FLG_HC),
6630 hc_usec);
6632 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6634 hc_usec = params->rxq_params.hc_rate ?
6635 1000000 / params->rxq_params.hc_rate : 0;
6636 bnx2x_update_coalesce_sb_index(bp,
6637 params->rxq_params.fw_sb_id,
6638 params->rxq_params.sb_cq_index,
6639 !(params->rxq_params.flags & QUEUE_FLG_HC),
6640 hc_usec);
6642 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6643 params->rxq_params.cid);
6645 /* zero stats */
6646 if (params->txq_params.flags & QUEUE_FLG_STATS)
6647 storm_memset_xstats_zero(bp, BP_PORT(bp),
6648 params->txq_params.stat_id);
6650 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6651 storm_memset_ustats_zero(bp, BP_PORT(bp),
6652 params->rxq_params.stat_id);
6653 storm_memset_tstats_zero(bp, BP_PORT(bp),
6654 params->rxq_params.stat_id);
6657 /* Fill the ramrod data */
6658 bnx2x_fill_cl_init_data(bp, params, activate, data);
6660 /* SETUP ramrod.
6662 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6663 * barrier except from mmiowb() is needed to impose a
6664 * proper ordering of memory operations.
6666 mmiowb();
6669 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6670 U64_HI(data_mapping), U64_LO(data_mapping), 0);
6672 /* Wait for completion */
6673 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6674 params->ramrod_params.index,
6675 params->ramrod_params.pstate,
6676 ramrod_flags);
6677 return rc;
6681 * Configure interrupt mode according to current configuration.
6682 * In case of MSI-X it will also try to enable MSI-X.
6684 * @param bp
6686 * @return int
6688 static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
6690 int rc = 0;
6692 switch (bp->int_mode) {
6693 case INT_MODE_MSI:
6694 bnx2x_enable_msi(bp);
6695 /* falling through... */
6696 case INT_MODE_INTx:
6697 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
6698 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
6699 break;
6700 default:
6701 /* Set number of queues according to bp->multi_mode value */
6702 bnx2x_set_num_queues(bp);
6704 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6705 bp->num_queues);
6707 /* if we can't use MSI-X we only need one fp,
6708 * so try to enable MSI-X with the requested number of fp's
6709 * and fallback to MSI or legacy INTx with one fp
6711 rc = bnx2x_enable_msix(bp);
6712 if (rc) {
6713 /* failed to enable MSI-X */
6714 if (bp->multi_mode)
6715 DP(NETIF_MSG_IFUP,
6716 "Multi requested but failed to "
6717 "enable MSI-X (%d), "
6718 "set number of queues to %d\n",
6719 bp->num_queues,
6720 1 + NONE_ETH_CONTEXT_USE);
6721 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
6723 if (!(bp->flags & DISABLE_MSI_FLAG))
6724 bnx2x_enable_msi(bp);
6727 break;
6730 return rc;
6733 /* must be called prioir to any HW initializations */
6734 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6736 return L2_ILT_LINES(bp);
6739 void bnx2x_ilt_set_info(struct bnx2x *bp)
6741 struct ilt_client_info *ilt_client;
6742 struct bnx2x_ilt *ilt = BP_ILT(bp);
6743 u16 line = 0;
6745 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6746 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6748 /* CDU */
6749 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6750 ilt_client->client_num = ILT_CLIENT_CDU;
6751 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6752 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6753 ilt_client->start = line;
6754 line += L2_ILT_LINES(bp);
6755 #ifdef BCM_CNIC
6756 line += CNIC_ILT_LINES;
6757 #endif
6758 ilt_client->end = line - 1;
6760 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6761 "flags 0x%x, hw psz %d\n",
6762 ilt_client->start,
6763 ilt_client->end,
6764 ilt_client->page_size,
6765 ilt_client->flags,
6766 ilog2(ilt_client->page_size >> 12));
6768 /* QM */
6769 if (QM_INIT(bp->qm_cid_count)) {
6770 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6771 ilt_client->client_num = ILT_CLIENT_QM;
6772 ilt_client->page_size = QM_ILT_PAGE_SZ;
6773 ilt_client->flags = 0;
6774 ilt_client->start = line;
6776 /* 4 bytes for each cid */
6777 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6778 QM_ILT_PAGE_SZ);
6780 ilt_client->end = line - 1;
6782 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6783 "flags 0x%x, hw psz %d\n",
6784 ilt_client->start,
6785 ilt_client->end,
6786 ilt_client->page_size,
6787 ilt_client->flags,
6788 ilog2(ilt_client->page_size >> 12));
6791 /* SRC */
6792 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6793 #ifdef BCM_CNIC
6794 ilt_client->client_num = ILT_CLIENT_SRC;
6795 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6796 ilt_client->flags = 0;
6797 ilt_client->start = line;
6798 line += SRC_ILT_LINES;
6799 ilt_client->end = line - 1;
6801 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6802 "flags 0x%x, hw psz %d\n",
6803 ilt_client->start,
6804 ilt_client->end,
6805 ilt_client->page_size,
6806 ilt_client->flags,
6807 ilog2(ilt_client->page_size >> 12));
6809 #else
6810 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6811 #endif
6813 /* TM */
6814 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6815 #ifdef BCM_CNIC
6816 ilt_client->client_num = ILT_CLIENT_TM;
6817 ilt_client->page_size = TM_ILT_PAGE_SZ;
6818 ilt_client->flags = 0;
6819 ilt_client->start = line;
6820 line += TM_ILT_LINES;
6821 ilt_client->end = line - 1;
6823 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6824 "flags 0x%x, hw psz %d\n",
6825 ilt_client->start,
6826 ilt_client->end,
6827 ilt_client->page_size,
6828 ilt_client->flags,
6829 ilog2(ilt_client->page_size >> 12));
6831 #else
6832 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6833 #endif
6836 int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6837 int is_leading)
6839 struct bnx2x_client_init_params params = { {0} };
6840 int rc;
6842 /* reset IGU state skip FCoE L2 queue */
6843 if (!IS_FCOE_FP(fp))
6844 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6845 IGU_INT_ENABLE, 0);
6847 params.ramrod_params.pstate = &fp->state;
6848 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6849 params.ramrod_params.index = fp->index;
6850 params.ramrod_params.cid = fp->cid;
6852 #ifdef BCM_CNIC
6853 if (IS_FCOE_FP(fp))
6854 params.ramrod_params.flags |= CLIENT_IS_FCOE;
6856 #endif
6858 if (is_leading)
6859 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6861 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6863 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6865 rc = bnx2x_setup_fw_client(bp, &params, 1,
6866 bnx2x_sp(bp, client_init_data),
6867 bnx2x_sp_mapping(bp, client_init_data));
6868 return rc;
6871 static int bnx2x_stop_fw_client(struct bnx2x *bp,
6872 struct bnx2x_client_ramrod_params *p)
6874 int rc;
6876 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
6878 /* halt the connection */
6879 *p->pstate = BNX2X_FP_STATE_HALTING;
6880 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6881 p->cl_id, 0);
6883 /* Wait for completion */
6884 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6885 p->pstate, poll_flag);
6886 if (rc) /* timeout */
6887 return rc;
6889 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6890 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6891 p->cl_id, 0);
6892 /* Wait for completion */
6893 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6894 p->pstate, poll_flag);
6895 if (rc) /* timeout */
6896 return rc;
6899 /* delete cfc entry */
6900 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
6902 /* Wait for completion */
6903 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6904 p->pstate, WAIT_RAMROD_COMMON);
6905 return rc;
6908 static int bnx2x_stop_client(struct bnx2x *bp, int index)
6910 struct bnx2x_client_ramrod_params client_stop = {0};
6911 struct bnx2x_fastpath *fp = &bp->fp[index];
6913 client_stop.index = index;
6914 client_stop.cid = fp->cid;
6915 client_stop.cl_id = fp->cl_id;
6916 client_stop.pstate = &(fp->state);
6917 client_stop.poll = 0;
6919 return bnx2x_stop_fw_client(bp, &client_stop);
6923 static void bnx2x_reset_func(struct bnx2x *bp)
6925 int port = BP_PORT(bp);
6926 int func = BP_FUNC(bp);
6927 int i;
6928 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
6929 (CHIP_IS_E2(bp) ?
6930 offsetof(struct hc_status_block_data_e2, common) :
6931 offsetof(struct hc_status_block_data_e1x, common));
6932 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6933 int pfid_offset = offsetof(struct pci_entity, pf_id);
6935 /* Disable the function in the FW */
6936 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6937 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6938 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6939 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6941 /* FP SBs */
6942 for_each_eth_queue(bp, i) {
6943 struct bnx2x_fastpath *fp = &bp->fp[i];
6944 REG_WR8(bp,
6945 BAR_CSTRORM_INTMEM +
6946 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6947 + pfunc_offset_fp + pfid_offset,
6948 HC_FUNCTION_DISABLED);
6951 /* SP SB */
6952 REG_WR8(bp,
6953 BAR_CSTRORM_INTMEM +
6954 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6955 pfunc_offset_sp + pfid_offset,
6956 HC_FUNCTION_DISABLED);
6959 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6960 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6963 /* Configure IGU */
6964 if (bp->common.int_block == INT_BLOCK_HC) {
6965 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6966 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6967 } else {
6968 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6969 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6972 #ifdef BCM_CNIC
6973 /* Disable Timer scan */
6974 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6976 * Wait for at least 10ms and up to 2 second for the timers scan to
6977 * complete
6979 for (i = 0; i < 200; i++) {
6980 msleep(10);
6981 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6982 break;
6984 #endif
6985 /* Clear ILT */
6986 bnx2x_clear_func_ilt(bp, func);
6988 /* Timers workaround bug for E2: if this is vnic-3,
6989 * we need to set the entire ilt range for this timers.
6991 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6992 struct ilt_client_info ilt_cli;
6993 /* use dummy TM client */
6994 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6995 ilt_cli.start = 0;
6996 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6997 ilt_cli.client_num = ILT_CLIENT_TM;
6999 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
7002 /* this assumes that reset_port() called before reset_func()*/
7003 if (CHIP_IS_E2(bp))
7004 bnx2x_pf_disable(bp);
7006 bp->dmae_ready = 0;
7009 static void bnx2x_reset_port(struct bnx2x *bp)
7011 int port = BP_PORT(bp);
7012 u32 val;
7014 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7016 /* Do not rcv packets to BRB */
7017 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7018 /* Do not direct rcv packets that are not for MCP to the BRB */
7019 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7020 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7022 /* Configure AEU */
7023 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7025 msleep(100);
7026 /* Check for BRB port occupancy */
7027 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7028 if (val)
7029 DP(NETIF_MSG_IFDOWN,
7030 "BRB1 is not empty %d blocks are occupied\n", val);
7032 /* TODO: Close Doorbell port? */
7035 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7037 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7038 BP_ABS_FUNC(bp), reset_code);
7040 switch (reset_code) {
7041 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7042 bnx2x_reset_port(bp);
7043 bnx2x_reset_func(bp);
7044 bnx2x_reset_common(bp);
7045 break;
7047 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7048 bnx2x_reset_port(bp);
7049 bnx2x_reset_func(bp);
7050 break;
7052 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7053 bnx2x_reset_func(bp);
7054 break;
7056 default:
7057 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7058 break;
7062 #ifdef BCM_CNIC
7063 static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
7065 if (bp->flags & FCOE_MACS_SET) {
7066 if (!IS_MF_SD(bp))
7067 bnx2x_set_fip_eth_mac_addr(bp, 0);
7069 bnx2x_set_all_enode_macs(bp, 0);
7071 bp->flags &= ~FCOE_MACS_SET;
7074 #endif
7076 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
7078 int port = BP_PORT(bp);
7079 u32 reset_code = 0;
7080 int i, cnt, rc;
7082 /* Wait until tx fastpath tasks complete */
7083 for_each_tx_queue(bp, i) {
7084 struct bnx2x_fastpath *fp = &bp->fp[i];
7086 cnt = 1000;
7087 while (bnx2x_has_tx_work_unload(fp)) {
7089 if (!cnt) {
7090 BNX2X_ERR("timeout waiting for queue[%d]\n",
7092 #ifdef BNX2X_STOP_ON_ERROR
7093 bnx2x_panic();
7094 return -EBUSY;
7095 #else
7096 break;
7097 #endif
7099 cnt--;
7100 msleep(1);
7103 /* Give HW time to discard old tx messages */
7104 msleep(1);
7106 if (CHIP_IS_E1(bp)) {
7107 /* invalidate mc list,
7108 * wait and poll (interrupts are off)
7110 bnx2x_invlidate_e1_mc_list(bp);
7111 bnx2x_set_eth_mac(bp, 0);
7113 } else {
7114 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7116 bnx2x_set_eth_mac(bp, 0);
7118 for (i = 0; i < MC_HASH_SIZE; i++)
7119 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7122 #ifdef BCM_CNIC
7123 bnx2x_del_fcoe_eth_macs(bp);
7124 #endif
7126 if (unload_mode == UNLOAD_NORMAL)
7127 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7129 else if (bp->flags & NO_WOL_FLAG)
7130 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7132 else if (bp->wol) {
7133 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7134 u8 *mac_addr = bp->dev->dev_addr;
7135 u32 val;
7136 /* The mac address is written to entries 1-4 to
7137 preserve entry 0 which is used by the PMF */
7138 u8 entry = (BP_E1HVN(bp) + 1)*8;
7140 val = (mac_addr[0] << 8) | mac_addr[1];
7141 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7143 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7144 (mac_addr[4] << 8) | mac_addr[5];
7145 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7147 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7149 } else
7150 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7152 /* Close multi and leading connections
7153 Completions for ramrods are collected in a synchronous way */
7154 for_each_queue(bp, i)
7156 if (bnx2x_stop_client(bp, i))
7157 #ifdef BNX2X_STOP_ON_ERROR
7158 return;
7159 #else
7160 goto unload_error;
7161 #endif
7163 rc = bnx2x_func_stop(bp);
7164 if (rc) {
7165 BNX2X_ERR("Function stop failed!\n");
7166 #ifdef BNX2X_STOP_ON_ERROR
7167 return;
7168 #else
7169 goto unload_error;
7170 #endif
7172 #ifndef BNX2X_STOP_ON_ERROR
7173 unload_error:
7174 #endif
7175 if (!BP_NOMCP(bp))
7176 reset_code = bnx2x_fw_command(bp, reset_code, 0);
7177 else {
7178 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
7179 "%d, %d, %d\n", BP_PATH(bp),
7180 load_count[BP_PATH(bp)][0],
7181 load_count[BP_PATH(bp)][1],
7182 load_count[BP_PATH(bp)][2]);
7183 load_count[BP_PATH(bp)][0]--;
7184 load_count[BP_PATH(bp)][1 + port]--;
7185 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
7186 "%d, %d, %d\n", BP_PATH(bp),
7187 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
7188 load_count[BP_PATH(bp)][2]);
7189 if (load_count[BP_PATH(bp)][0] == 0)
7190 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7191 else if (load_count[BP_PATH(bp)][1 + port] == 0)
7192 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7193 else
7194 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7197 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7198 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7199 bnx2x__link_reset(bp);
7201 /* Disable HW interrupts, NAPI */
7202 bnx2x_netif_stop(bp, 1);
7204 /* Release IRQs */
7205 bnx2x_free_irq(bp);
7207 /* Reset the chip */
7208 bnx2x_reset_chip(bp, reset_code);
7210 /* Report UNLOAD_DONE to MCP */
7211 if (!BP_NOMCP(bp))
7212 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
7216 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
7218 u32 val;
7220 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
7222 if (CHIP_IS_E1(bp)) {
7223 int port = BP_PORT(bp);
7224 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7225 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7227 val = REG_RD(bp, addr);
7228 val &= ~(0x300);
7229 REG_WR(bp, addr, val);
7230 } else if (CHIP_IS_E1H(bp)) {
7231 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
7232 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
7233 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
7234 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
7238 /* Close gates #2, #3 and #4: */
7239 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
7241 u32 val, addr;
7243 /* Gates #2 and #4a are closed/opened for "not E1" only */
7244 if (!CHIP_IS_E1(bp)) {
7245 /* #4 */
7246 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7247 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7248 close ? (val | 0x1) : (val & (~(u32)1)));
7249 /* #2 */
7250 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7251 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7252 close ? (val | 0x1) : (val & (~(u32)1)));
7255 /* #3 */
7256 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7257 val = REG_RD(bp, addr);
7258 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7260 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7261 close ? "closing" : "opening");
7262 mmiowb();
7265 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
7267 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7269 /* Do some magic... */
7270 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7271 *magic_val = val & SHARED_MF_CLP_MAGIC;
7272 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7275 /* Restore the value of the `magic' bit.
7277 * @param pdev Device handle.
7278 * @param magic_val Old value of the `magic' bit.
7280 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7282 /* Restore the `magic' bit value... */
7283 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7284 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7285 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7289 * Prepares for MCP reset: takes care of CLP configurations.
7291 * @param bp
7292 * @param magic_val Old value of 'magic' bit.
7294 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7296 u32 shmem;
7297 u32 validity_offset;
7299 DP(NETIF_MSG_HW, "Starting\n");
7301 /* Set `magic' bit in order to save MF config */
7302 if (!CHIP_IS_E1(bp))
7303 bnx2x_clp_reset_prep(bp, magic_val);
7305 /* Get shmem offset */
7306 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7307 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7309 /* Clear validity map flags */
7310 if (shmem > 0)
7311 REG_WR(bp, shmem + validity_offset, 0);
7314 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7315 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
7317 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7318 * depending on the HW type.
7320 * @param bp
7322 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7324 /* special handling for emulation and FPGA,
7325 wait 10 times longer */
7326 if (CHIP_REV_IS_SLOW(bp))
7327 msleep(MCP_ONE_TIMEOUT*10);
7328 else
7329 msleep(MCP_ONE_TIMEOUT);
7332 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7334 u32 shmem, cnt, validity_offset, val;
7335 int rc = 0;
7337 msleep(100);
7339 /* Get shmem offset */
7340 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7341 if (shmem == 0) {
7342 BNX2X_ERR("Shmem 0 return failure\n");
7343 rc = -ENOTTY;
7344 goto exit_lbl;
7347 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7349 /* Wait for MCP to come up */
7350 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7351 /* TBD: its best to check validity map of last port.
7352 * currently checks on port 0.
7354 val = REG_RD(bp, shmem + validity_offset);
7355 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7356 shmem + validity_offset, val);
7358 /* check that shared memory is valid. */
7359 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7360 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7361 break;
7363 bnx2x_mcp_wait_one(bp);
7366 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7368 /* Check that shared memory is valid. This indicates that MCP is up. */
7369 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7370 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7371 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7372 rc = -ENOTTY;
7373 goto exit_lbl;
7376 exit_lbl:
7377 /* Restore the `magic' bit value */
7378 if (!CHIP_IS_E1(bp))
7379 bnx2x_clp_reset_done(bp, magic_val);
7381 return rc;
7384 static void bnx2x_pxp_prep(struct bnx2x *bp)
7386 if (!CHIP_IS_E1(bp)) {
7387 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7388 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7389 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7390 mmiowb();
7395 * Reset the whole chip except for:
7396 * - PCIE core
7397 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7398 * one reset bit)
7399 * - IGU
7400 * - MISC (including AEU)
7401 * - GRC
7402 * - RBCN, RBCP
7404 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7406 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7408 not_reset_mask1 =
7409 MISC_REGISTERS_RESET_REG_1_RST_HC |
7410 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7411 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7413 not_reset_mask2 =
7414 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7415 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7416 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7417 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7418 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7419 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7420 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7421 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7423 reset_mask1 = 0xffffffff;
7425 if (CHIP_IS_E1(bp))
7426 reset_mask2 = 0xffff;
7427 else
7428 reset_mask2 = 0x1ffff;
7430 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7431 reset_mask1 & (~not_reset_mask1));
7432 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7433 reset_mask2 & (~not_reset_mask2));
7435 barrier();
7436 mmiowb();
7438 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7439 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7440 mmiowb();
7443 static int bnx2x_process_kill(struct bnx2x *bp)
7445 int cnt = 1000;
7446 u32 val = 0;
7447 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7450 /* Empty the Tetris buffer, wait for 1s */
7451 do {
7452 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7453 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7454 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7455 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7456 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7457 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7458 ((port_is_idle_0 & 0x1) == 0x1) &&
7459 ((port_is_idle_1 & 0x1) == 0x1) &&
7460 (pgl_exp_rom2 == 0xffffffff))
7461 break;
7462 msleep(1);
7463 } while (cnt-- > 0);
7465 if (cnt <= 0) {
7466 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7467 " are still"
7468 " outstanding read requests after 1s!\n");
7469 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7470 " port_is_idle_0=0x%08x,"
7471 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7472 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7473 pgl_exp_rom2);
7474 return -EAGAIN;
7477 barrier();
7479 /* Close gates #2, #3 and #4 */
7480 bnx2x_set_234_gates(bp, true);
7482 /* TBD: Indicate that "process kill" is in progress to MCP */
7484 /* Clear "unprepared" bit */
7485 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7486 barrier();
7488 /* Make sure all is written to the chip before the reset */
7489 mmiowb();
7491 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7492 * PSWHST, GRC and PSWRD Tetris buffer.
7494 msleep(1);
7496 /* Prepare to chip reset: */
7497 /* MCP */
7498 bnx2x_reset_mcp_prep(bp, &val);
7500 /* PXP */
7501 bnx2x_pxp_prep(bp);
7502 barrier();
7504 /* reset the chip */
7505 bnx2x_process_kill_chip_reset(bp);
7506 barrier();
7508 /* Recover after reset: */
7509 /* MCP */
7510 if (bnx2x_reset_mcp_comp(bp, val))
7511 return -EAGAIN;
7513 /* PXP */
7514 bnx2x_pxp_prep(bp);
7516 /* Open the gates #2, #3 and #4 */
7517 bnx2x_set_234_gates(bp, false);
7519 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7520 * reset state, re-enable attentions. */
7522 return 0;
7525 static int bnx2x_leader_reset(struct bnx2x *bp)
7527 int rc = 0;
7528 /* Try to recover after the failure */
7529 if (bnx2x_process_kill(bp)) {
7530 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7531 bp->dev->name);
7532 rc = -EAGAIN;
7533 goto exit_leader_reset;
7536 /* Clear "reset is in progress" bit and update the driver state */
7537 bnx2x_set_reset_done(bp);
7538 bp->recovery_state = BNX2X_RECOVERY_DONE;
7540 exit_leader_reset:
7541 bp->is_leader = 0;
7542 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7543 smp_wmb();
7544 return rc;
7547 /* Assumption: runs under rtnl lock. This together with the fact
7548 * that it's called only from bnx2x_reset_task() ensure that it
7549 * will never be called when netif_running(bp->dev) is false.
7551 static void bnx2x_parity_recover(struct bnx2x *bp)
7553 DP(NETIF_MSG_HW, "Handling parity\n");
7554 while (1) {
7555 switch (bp->recovery_state) {
7556 case BNX2X_RECOVERY_INIT:
7557 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7558 /* Try to get a LEADER_LOCK HW lock */
7559 if (bnx2x_trylock_hw_lock(bp,
7560 HW_LOCK_RESOURCE_RESERVED_08))
7561 bp->is_leader = 1;
7563 /* Stop the driver */
7564 /* If interface has been removed - break */
7565 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7566 return;
7568 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7569 /* Ensure "is_leader" and "recovery_state"
7570 * update values are seen on other CPUs
7572 smp_wmb();
7573 break;
7575 case BNX2X_RECOVERY_WAIT:
7576 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7577 if (bp->is_leader) {
7578 u32 load_counter = bnx2x_get_load_cnt(bp);
7579 if (load_counter) {
7580 /* Wait until all other functions get
7581 * down.
7583 schedule_delayed_work(&bp->reset_task,
7584 HZ/10);
7585 return;
7586 } else {
7587 /* If all other functions got down -
7588 * try to bring the chip back to
7589 * normal. In any case it's an exit
7590 * point for a leader.
7592 if (bnx2x_leader_reset(bp) ||
7593 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7594 printk(KERN_ERR"%s: Recovery "
7595 "has failed. Power cycle is "
7596 "needed.\n", bp->dev->name);
7597 /* Disconnect this device */
7598 netif_device_detach(bp->dev);
7599 /* Block ifup for all function
7600 * of this ASIC until
7601 * "process kill" or power
7602 * cycle.
7604 bnx2x_set_reset_in_progress(bp);
7605 /* Shut down the power */
7606 bnx2x_set_power_state(bp,
7607 PCI_D3hot);
7608 return;
7611 return;
7613 } else { /* non-leader */
7614 if (!bnx2x_reset_is_done(bp)) {
7615 /* Try to get a LEADER_LOCK HW lock as
7616 * long as a former leader may have
7617 * been unloaded by the user or
7618 * released a leadership by another
7619 * reason.
7621 if (bnx2x_trylock_hw_lock(bp,
7622 HW_LOCK_RESOURCE_RESERVED_08)) {
7623 /* I'm a leader now! Restart a
7624 * switch case.
7626 bp->is_leader = 1;
7627 break;
7630 schedule_delayed_work(&bp->reset_task,
7631 HZ/10);
7632 return;
7634 } else { /* A leader has completed
7635 * the "process kill". It's an exit
7636 * point for a non-leader.
7638 bnx2x_nic_load(bp, LOAD_NORMAL);
7639 bp->recovery_state =
7640 BNX2X_RECOVERY_DONE;
7641 smp_wmb();
7642 return;
7645 default:
7646 return;
7651 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7652 * scheduled on a general queue in order to prevent a dead lock.
7654 static void bnx2x_reset_task(struct work_struct *work)
7656 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
7658 #ifdef BNX2X_STOP_ON_ERROR
7659 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7660 " so reset not done to allow debug dump,\n"
7661 KERN_ERR " you will need to reboot when done\n");
7662 return;
7663 #endif
7665 rtnl_lock();
7667 if (!netif_running(bp->dev))
7668 goto reset_task_exit;
7670 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7671 bnx2x_parity_recover(bp);
7672 else {
7673 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7674 bnx2x_nic_load(bp, LOAD_NORMAL);
7677 reset_task_exit:
7678 rtnl_unlock();
7681 /* end of nic load/unload */
7684 * Init service functions
7687 static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
7689 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7690 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7691 return base + (BP_ABS_FUNC(bp)) * stride;
7694 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
7696 u32 reg = bnx2x_get_pretend_reg(bp);
7698 /* Flush all outstanding writes */
7699 mmiowb();
7701 /* Pretend to be function 0 */
7702 REG_WR(bp, reg, 0);
7703 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
7705 /* From now we are in the "like-E1" mode */
7706 bnx2x_int_disable(bp);
7708 /* Flush all outstanding writes */
7709 mmiowb();
7711 /* Restore the original function */
7712 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7713 REG_RD(bp, reg);
7716 static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
7718 if (CHIP_IS_E1(bp))
7719 bnx2x_int_disable(bp);
7720 else
7721 bnx2x_undi_int_disable_e1h(bp);
7724 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7726 u32 val;
7728 /* Check if there is any driver already loaded */
7729 val = REG_RD(bp, MISC_REG_UNPREPARED);
7730 if (val == 0x1) {
7731 /* Check if it is the UNDI driver
7732 * UNDI driver initializes CID offset for normal bell to 0x7
7734 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7735 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7736 if (val == 0x7) {
7737 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7738 /* save our pf_num */
7739 int orig_pf_num = bp->pf_num;
7740 u32 swap_en;
7741 u32 swap_val;
7743 /* clear the UNDI indication */
7744 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7746 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7748 /* try unload UNDI on port 0 */
7749 bp->pf_num = 0;
7750 bp->fw_seq =
7751 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7752 DRV_MSG_SEQ_NUMBER_MASK);
7753 reset_code = bnx2x_fw_command(bp, reset_code, 0);
7755 /* if UNDI is loaded on the other port */
7756 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7758 /* send "DONE" for previous unload */
7759 bnx2x_fw_command(bp,
7760 DRV_MSG_CODE_UNLOAD_DONE, 0);
7762 /* unload UNDI on port 1 */
7763 bp->pf_num = 1;
7764 bp->fw_seq =
7765 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7766 DRV_MSG_SEQ_NUMBER_MASK);
7767 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7769 bnx2x_fw_command(bp, reset_code, 0);
7772 /* now it's safe to release the lock */
7773 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7775 bnx2x_undi_int_disable(bp);
7777 /* close input traffic and wait for it */
7778 /* Do not rcv packets to BRB */
7779 REG_WR(bp,
7780 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7781 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7782 /* Do not direct rcv packets that are not for MCP to
7783 * the BRB */
7784 REG_WR(bp,
7785 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7786 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7787 /* clear AEU */
7788 REG_WR(bp,
7789 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7790 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7791 msleep(10);
7793 /* save NIG port swap info */
7794 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7795 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7796 /* reset device */
7797 REG_WR(bp,
7798 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7799 0xd3ffffff);
7800 REG_WR(bp,
7801 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7802 0x1403);
7803 /* take the NIG out of reset and restore swap values */
7804 REG_WR(bp,
7805 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7806 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7807 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7808 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7810 /* send unload done to the MCP */
7811 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
7813 /* restore our func and fw_seq */
7814 bp->pf_num = orig_pf_num;
7815 bp->fw_seq =
7816 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7817 DRV_MSG_SEQ_NUMBER_MASK);
7818 } else
7819 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7823 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7825 u32 val, val2, val3, val4, id;
7826 u16 pmc;
7828 /* Get the chip revision id and number. */
7829 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7830 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7831 id = ((val & 0xffff) << 16);
7832 val = REG_RD(bp, MISC_REG_CHIP_REV);
7833 id |= ((val & 0xf) << 12);
7834 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7835 id |= ((val & 0xff) << 4);
7836 val = REG_RD(bp, MISC_REG_BOND_ID);
7837 id |= (val & 0xf);
7838 bp->common.chip_id = id;
7840 /* Set doorbell size */
7841 bp->db_size = (1 << BNX2X_DB_SHIFT);
7843 if (CHIP_IS_E2(bp)) {
7844 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7845 if ((val & 1) == 0)
7846 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7847 else
7848 val = (val >> 1) & 1;
7849 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7850 "2_PORT_MODE");
7851 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7852 CHIP_2_PORT_MODE;
7854 if (CHIP_MODE_IS_4_PORT(bp))
7855 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7856 else
7857 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7858 } else {
7859 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7860 bp->pfid = bp->pf_num; /* 0..7 */
7864 * set base FW non-default (fast path) status block id, this value is
7865 * used to initialize the fw_sb_id saved on the fp/queue structure to
7866 * determine the id used by the FW.
7868 if (CHIP_IS_E1x(bp))
7869 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7870 else /* E2 */
7871 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7873 bp->link_params.chip_id = bp->common.chip_id;
7874 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7876 val = (REG_RD(bp, 0x2874) & 0x55);
7877 if ((bp->common.chip_id & 0x1) ||
7878 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7879 bp->flags |= ONE_PORT_FLAG;
7880 BNX2X_DEV_INFO("single port device\n");
7883 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7884 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7885 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7886 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7887 bp->common.flash_size, bp->common.flash_size);
7889 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7890 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7891 MISC_REG_GENERIC_CR_1 :
7892 MISC_REG_GENERIC_CR_0));
7893 bp->link_params.shmem_base = bp->common.shmem_base;
7894 bp->link_params.shmem2_base = bp->common.shmem2_base;
7895 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7896 bp->common.shmem_base, bp->common.shmem2_base);
7898 if (!bp->common.shmem_base) {
7899 BNX2X_DEV_INFO("MCP not active\n");
7900 bp->flags |= NO_MCP_FLAG;
7901 return;
7904 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7905 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7906 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7907 BNX2X_ERR("BAD MCP validity signature\n");
7909 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7910 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7912 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7913 SHARED_HW_CFG_LED_MODE_MASK) >>
7914 SHARED_HW_CFG_LED_MODE_SHIFT);
7916 bp->link_params.feature_config_flags = 0;
7917 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7918 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7919 bp->link_params.feature_config_flags |=
7920 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7921 else
7922 bp->link_params.feature_config_flags &=
7923 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7925 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7926 bp->common.bc_ver = val;
7927 BNX2X_DEV_INFO("bc_ver %X\n", val);
7928 if (val < BNX2X_BC_VER) {
7929 /* for now only warn
7930 * later we might need to enforce this */
7931 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7932 "please upgrade BC\n", BNX2X_BC_VER, val);
7934 bp->link_params.feature_config_flags |=
7935 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
7936 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7938 bp->link_params.feature_config_flags |=
7939 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7940 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
7942 if (BP_E1HVN(bp) == 0) {
7943 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7944 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7945 } else {
7946 /* no WOL capability for E1HVN != 0 */
7947 bp->flags |= NO_WOL_FLAG;
7949 BNX2X_DEV_INFO("%sWoL capable\n",
7950 (bp->flags & NO_WOL_FLAG) ? "not " : "");
7952 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7953 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7954 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7955 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7957 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7958 val, val2, val3, val4);
7961 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7962 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7964 static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7966 int pfid = BP_FUNC(bp);
7967 int vn = BP_E1HVN(bp);
7968 int igu_sb_id;
7969 u32 val;
7970 u8 fid;
7972 bp->igu_base_sb = 0xff;
7973 bp->igu_sb_cnt = 0;
7974 if (CHIP_INT_MODE_IS_BC(bp)) {
7975 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7976 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
7978 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7979 FP_SB_MAX_E1x;
7981 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7982 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7984 return;
7987 /* IGU in normal mode - read CAM */
7988 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7989 igu_sb_id++) {
7990 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7991 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7992 continue;
7993 fid = IGU_FID(val);
7994 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7995 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7996 continue;
7997 if (IGU_VEC(val) == 0)
7998 /* default status block */
7999 bp->igu_dsb_id = igu_sb_id;
8000 else {
8001 if (bp->igu_base_sb == 0xff)
8002 bp->igu_base_sb = igu_sb_id;
8003 bp->igu_sb_cnt++;
8007 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8008 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
8009 if (bp->igu_sb_cnt == 0)
8010 BNX2X_ERR("CAM configuration error\n");
8013 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8014 u32 switch_cfg)
8016 int cfg_size = 0, idx, port = BP_PORT(bp);
8018 /* Aggregation of supported attributes of all external phys */
8019 bp->port.supported[0] = 0;
8020 bp->port.supported[1] = 0;
8021 switch (bp->link_params.num_phys) {
8022 case 1:
8023 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
8024 cfg_size = 1;
8025 break;
8026 case 2:
8027 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
8028 cfg_size = 1;
8029 break;
8030 case 3:
8031 if (bp->link_params.multi_phy_config &
8032 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
8033 bp->port.supported[1] =
8034 bp->link_params.phy[EXT_PHY1].supported;
8035 bp->port.supported[0] =
8036 bp->link_params.phy[EXT_PHY2].supported;
8037 } else {
8038 bp->port.supported[0] =
8039 bp->link_params.phy[EXT_PHY1].supported;
8040 bp->port.supported[1] =
8041 bp->link_params.phy[EXT_PHY2].supported;
8043 cfg_size = 2;
8044 break;
8047 if (!(bp->port.supported[0] || bp->port.supported[1])) {
8048 BNX2X_ERR("NVRAM config error. BAD phy config."
8049 "PHY1 config 0x%x, PHY2 config 0x%x\n",
8050 SHMEM_RD(bp,
8051 dev_info.port_hw_config[port].external_phy_config),
8052 SHMEM_RD(bp,
8053 dev_info.port_hw_config[port].external_phy_config2));
8054 return;
8057 switch (switch_cfg) {
8058 case SWITCH_CFG_1G:
8059 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8060 port*0x10);
8061 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8062 break;
8064 case SWITCH_CFG_10G:
8065 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8066 port*0x18);
8067 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8068 break;
8070 default:
8071 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8072 bp->port.link_config[0]);
8073 return;
8075 /* mask what we support according to speed_cap_mask per configuration */
8076 for (idx = 0; idx < cfg_size; idx++) {
8077 if (!(bp->link_params.speed_cap_mask[idx] &
8078 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8079 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
8081 if (!(bp->link_params.speed_cap_mask[idx] &
8082 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8083 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
8085 if (!(bp->link_params.speed_cap_mask[idx] &
8086 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8087 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
8089 if (!(bp->link_params.speed_cap_mask[idx] &
8090 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8091 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
8093 if (!(bp->link_params.speed_cap_mask[idx] &
8094 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8095 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
8096 SUPPORTED_1000baseT_Full);
8098 if (!(bp->link_params.speed_cap_mask[idx] &
8099 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8100 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
8102 if (!(bp->link_params.speed_cap_mask[idx] &
8103 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8104 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
8108 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
8109 bp->port.supported[1]);
8112 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8114 u32 link_config, idx, cfg_size = 0;
8115 bp->port.advertising[0] = 0;
8116 bp->port.advertising[1] = 0;
8117 switch (bp->link_params.num_phys) {
8118 case 1:
8119 case 2:
8120 cfg_size = 1;
8121 break;
8122 case 3:
8123 cfg_size = 2;
8124 break;
8126 for (idx = 0; idx < cfg_size; idx++) {
8127 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
8128 link_config = bp->port.link_config[idx];
8129 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8130 case PORT_FEATURE_LINK_SPEED_AUTO:
8131 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
8132 bp->link_params.req_line_speed[idx] =
8133 SPEED_AUTO_NEG;
8134 bp->port.advertising[idx] |=
8135 bp->port.supported[idx];
8136 } else {
8137 /* force 10G, no AN */
8138 bp->link_params.req_line_speed[idx] =
8139 SPEED_10000;
8140 bp->port.advertising[idx] |=
8141 (ADVERTISED_10000baseT_Full |
8142 ADVERTISED_FIBRE);
8143 continue;
8145 break;
8147 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8148 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
8149 bp->link_params.req_line_speed[idx] =
8150 SPEED_10;
8151 bp->port.advertising[idx] |=
8152 (ADVERTISED_10baseT_Full |
8153 ADVERTISED_TP);
8154 } else {
8155 BNX2X_ERROR("NVRAM config error. "
8156 "Invalid link_config 0x%x"
8157 " speed_cap_mask 0x%x\n",
8158 link_config,
8159 bp->link_params.speed_cap_mask[idx]);
8160 return;
8162 break;
8164 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8165 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
8166 bp->link_params.req_line_speed[idx] =
8167 SPEED_10;
8168 bp->link_params.req_duplex[idx] =
8169 DUPLEX_HALF;
8170 bp->port.advertising[idx] |=
8171 (ADVERTISED_10baseT_Half |
8172 ADVERTISED_TP);
8173 } else {
8174 BNX2X_ERROR("NVRAM config error. "
8175 "Invalid link_config 0x%x"
8176 " speed_cap_mask 0x%x\n",
8177 link_config,
8178 bp->link_params.speed_cap_mask[idx]);
8179 return;
8181 break;
8183 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8184 if (bp->port.supported[idx] &
8185 SUPPORTED_100baseT_Full) {
8186 bp->link_params.req_line_speed[idx] =
8187 SPEED_100;
8188 bp->port.advertising[idx] |=
8189 (ADVERTISED_100baseT_Full |
8190 ADVERTISED_TP);
8191 } else {
8192 BNX2X_ERROR("NVRAM config error. "
8193 "Invalid link_config 0x%x"
8194 " speed_cap_mask 0x%x\n",
8195 link_config,
8196 bp->link_params.speed_cap_mask[idx]);
8197 return;
8199 break;
8201 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8202 if (bp->port.supported[idx] &
8203 SUPPORTED_100baseT_Half) {
8204 bp->link_params.req_line_speed[idx] =
8205 SPEED_100;
8206 bp->link_params.req_duplex[idx] =
8207 DUPLEX_HALF;
8208 bp->port.advertising[idx] |=
8209 (ADVERTISED_100baseT_Half |
8210 ADVERTISED_TP);
8211 } else {
8212 BNX2X_ERROR("NVRAM config error. "
8213 "Invalid link_config 0x%x"
8214 " speed_cap_mask 0x%x\n",
8215 link_config,
8216 bp->link_params.speed_cap_mask[idx]);
8217 return;
8219 break;
8221 case PORT_FEATURE_LINK_SPEED_1G:
8222 if (bp->port.supported[idx] &
8223 SUPPORTED_1000baseT_Full) {
8224 bp->link_params.req_line_speed[idx] =
8225 SPEED_1000;
8226 bp->port.advertising[idx] |=
8227 (ADVERTISED_1000baseT_Full |
8228 ADVERTISED_TP);
8229 } else {
8230 BNX2X_ERROR("NVRAM config error. "
8231 "Invalid link_config 0x%x"
8232 " speed_cap_mask 0x%x\n",
8233 link_config,
8234 bp->link_params.speed_cap_mask[idx]);
8235 return;
8237 break;
8239 case PORT_FEATURE_LINK_SPEED_2_5G:
8240 if (bp->port.supported[idx] &
8241 SUPPORTED_2500baseX_Full) {
8242 bp->link_params.req_line_speed[idx] =
8243 SPEED_2500;
8244 bp->port.advertising[idx] |=
8245 (ADVERTISED_2500baseX_Full |
8246 ADVERTISED_TP);
8247 } else {
8248 BNX2X_ERROR("NVRAM config error. "
8249 "Invalid link_config 0x%x"
8250 " speed_cap_mask 0x%x\n",
8251 link_config,
8252 bp->link_params.speed_cap_mask[idx]);
8253 return;
8255 break;
8257 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8258 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8259 case PORT_FEATURE_LINK_SPEED_10G_KR:
8260 if (bp->port.supported[idx] &
8261 SUPPORTED_10000baseT_Full) {
8262 bp->link_params.req_line_speed[idx] =
8263 SPEED_10000;
8264 bp->port.advertising[idx] |=
8265 (ADVERTISED_10000baseT_Full |
8266 ADVERTISED_FIBRE);
8267 } else {
8268 BNX2X_ERROR("NVRAM config error. "
8269 "Invalid link_config 0x%x"
8270 " speed_cap_mask 0x%x\n",
8271 link_config,
8272 bp->link_params.speed_cap_mask[idx]);
8273 return;
8275 break;
8277 default:
8278 BNX2X_ERROR("NVRAM config error. "
8279 "BAD link speed link_config 0x%x\n",
8280 link_config);
8281 bp->link_params.req_line_speed[idx] =
8282 SPEED_AUTO_NEG;
8283 bp->port.advertising[idx] =
8284 bp->port.supported[idx];
8285 break;
8288 bp->link_params.req_flow_ctrl[idx] = (link_config &
8289 PORT_FEATURE_FLOW_CONTROL_MASK);
8290 if ((bp->link_params.req_flow_ctrl[idx] ==
8291 BNX2X_FLOW_CTRL_AUTO) &&
8292 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8293 bp->link_params.req_flow_ctrl[idx] =
8294 BNX2X_FLOW_CTRL_NONE;
8297 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8298 " 0x%x advertising 0x%x\n",
8299 bp->link_params.req_line_speed[idx],
8300 bp->link_params.req_duplex[idx],
8301 bp->link_params.req_flow_ctrl[idx],
8302 bp->port.advertising[idx]);
8306 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8308 mac_hi = cpu_to_be16(mac_hi);
8309 mac_lo = cpu_to_be32(mac_lo);
8310 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8311 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8314 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8316 int port = BP_PORT(bp);
8317 u32 config;
8318 u32 ext_phy_type, ext_phy_config;
8320 bp->link_params.bp = bp;
8321 bp->link_params.port = port;
8323 bp->link_params.lane_config =
8324 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8326 bp->link_params.speed_cap_mask[0] =
8327 SHMEM_RD(bp,
8328 dev_info.port_hw_config[port].speed_capability_mask);
8329 bp->link_params.speed_cap_mask[1] =
8330 SHMEM_RD(bp,
8331 dev_info.port_hw_config[port].speed_capability_mask2);
8332 bp->port.link_config[0] =
8333 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8335 bp->port.link_config[1] =
8336 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
8338 bp->link_params.multi_phy_config =
8339 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
8340 /* If the device is capable of WoL, set the default state according
8341 * to the HW
8343 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8344 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8345 (config & PORT_FEATURE_WOL_ENABLED));
8347 BNX2X_DEV_INFO("lane_config 0x%08x "
8348 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
8349 bp->link_params.lane_config,
8350 bp->link_params.speed_cap_mask[0],
8351 bp->port.link_config[0]);
8353 bp->link_params.switch_cfg = (bp->port.link_config[0] &
8354 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8355 bnx2x_phy_probe(&bp->link_params);
8356 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8358 bnx2x_link_settings_requested(bp);
8361 * If connected directly, work with the internal PHY, otherwise, work
8362 * with the external PHY
8364 ext_phy_config =
8365 SHMEM_RD(bp,
8366 dev_info.port_hw_config[port].external_phy_config);
8367 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
8368 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8369 bp->mdio.prtad = bp->port.phy_addr;
8371 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8372 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8373 bp->mdio.prtad =
8374 XGXS_EXT_PHY_ADDR(ext_phy_config);
8377 static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8379 u32 val, val2;
8380 int func = BP_ABS_FUNC(bp);
8381 int port = BP_PORT(bp);
8383 if (BP_NOMCP(bp)) {
8384 BNX2X_ERROR("warning: random MAC workaround active\n");
8385 random_ether_addr(bp->dev->dev_addr);
8386 } else if (IS_MF(bp)) {
8387 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8388 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8389 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8390 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
8391 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8393 #ifdef BCM_CNIC
8394 /* iSCSI NPAR MAC */
8395 if (IS_MF_SI(bp)) {
8396 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8397 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
8398 val2 = MF_CFG_RD(bp, func_ext_config[func].
8399 iscsi_mac_addr_upper);
8400 val = MF_CFG_RD(bp, func_ext_config[func].
8401 iscsi_mac_addr_lower);
8402 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8405 #endif
8406 } else {
8407 /* in SF read MACs from port configuration */
8408 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8409 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8410 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8412 #ifdef BCM_CNIC
8413 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
8414 iscsi_mac_upper);
8415 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8416 iscsi_mac_lower);
8417 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8418 #endif
8421 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8422 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8424 #ifdef BCM_CNIC
8425 /* Inform the upper layers about FCoE MAC */
8426 if (!CHIP_IS_E1x(bp)) {
8427 if (IS_MF_SD(bp))
8428 memcpy(bp->fip_mac, bp->dev->dev_addr,
8429 sizeof(bp->fip_mac));
8430 else
8431 memcpy(bp->fip_mac, bp->iscsi_mac,
8432 sizeof(bp->fip_mac));
8434 #endif
8437 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8439 int /*abs*/func = BP_ABS_FUNC(bp);
8440 int vn, port;
8441 u32 val = 0;
8442 int rc = 0;
8444 bnx2x_get_common_hwinfo(bp);
8446 if (CHIP_IS_E1x(bp)) {
8447 bp->common.int_block = INT_BLOCK_HC;
8449 bp->igu_dsb_id = DEF_SB_IGU_ID;
8450 bp->igu_base_sb = 0;
8451 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
8452 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
8453 } else {
8454 bp->common.int_block = INT_BLOCK_IGU;
8455 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8456 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8457 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8458 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8459 } else
8460 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8462 bnx2x_get_igu_cam_info(bp);
8465 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8466 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8469 * Initialize MF configuration
8472 bp->mf_ov = 0;
8473 bp->mf_mode = 0;
8474 vn = BP_E1HVN(bp);
8475 port = BP_PORT(bp);
8477 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8478 DP(NETIF_MSG_PROBE,
8479 "shmem2base 0x%x, size %d, mfcfg offset %d\n",
8480 bp->common.shmem2_base, SHMEM2_RD(bp, size),
8481 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
8482 if (SHMEM2_HAS(bp, mf_cfg_addr))
8483 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8484 else
8485 bp->common.mf_cfg_base = bp->common.shmem_base +
8486 offsetof(struct shmem_region, func_mb) +
8487 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
8489 * get mf configuration:
8490 * 1. existance of MF configuration
8491 * 2. MAC address must be legal (check only upper bytes)
8492 * for Switch-Independent mode;
8493 * OVLAN must be legal for Switch-Dependent mode
8494 * 3. SF_MODE configures specific MF mode
8496 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
8497 /* get mf configuration */
8498 val = SHMEM_RD(bp,
8499 dev_info.shared_feature_config.config);
8500 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
8502 switch (val) {
8503 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
8504 val = MF_CFG_RD(bp, func_mf_config[func].
8505 mac_upper);
8506 /* check for legal mac (upper bytes)*/
8507 if (val != 0xffff) {
8508 bp->mf_mode = MULTI_FUNCTION_SI;
8509 bp->mf_config[vn] = MF_CFG_RD(bp,
8510 func_mf_config[func].config);
8511 } else
8512 DP(NETIF_MSG_PROBE, "illegal MAC "
8513 "address for SI\n");
8514 break;
8515 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
8516 /* get OV configuration */
8517 val = MF_CFG_RD(bp,
8518 func_mf_config[FUNC_0].e1hov_tag);
8519 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
8521 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8522 bp->mf_mode = MULTI_FUNCTION_SD;
8523 bp->mf_config[vn] = MF_CFG_RD(bp,
8524 func_mf_config[func].config);
8525 } else
8526 DP(NETIF_MSG_PROBE, "illegal OV for "
8527 "SD\n");
8528 break;
8529 default:
8530 /* Unknown configuration: reset mf_config */
8531 bp->mf_config[vn] = 0;
8532 DP(NETIF_MSG_PROBE, "Unkown MF mode 0x%x\n",
8533 val);
8537 BNX2X_DEV_INFO("%s function mode\n",
8538 IS_MF(bp) ? "multi" : "single");
8540 switch (bp->mf_mode) {
8541 case MULTI_FUNCTION_SD:
8542 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
8543 FUNC_MF_CFG_E1HOV_TAG_MASK;
8544 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8545 bp->mf_ov = val;
8546 BNX2X_DEV_INFO("MF OV for func %d is %d"
8547 " (0x%04x)\n", func,
8548 bp->mf_ov, bp->mf_ov);
8549 } else {
8550 BNX2X_ERR("No valid MF OV for func %d,"
8551 " aborting\n", func);
8552 rc = -EPERM;
8554 break;
8555 case MULTI_FUNCTION_SI:
8556 BNX2X_DEV_INFO("func %d is in MF "
8557 "switch-independent mode\n", func);
8558 break;
8559 default:
8560 if (vn) {
8561 BNX2X_ERR("VN %d in single function mode,"
8562 " aborting\n", vn);
8563 rc = -EPERM;
8565 break;
8570 /* adjust igu_sb_cnt to MF for E1x */
8571 if (CHIP_IS_E1x(bp) && IS_MF(bp))
8572 bp->igu_sb_cnt /= E1HVN_MAX;
8575 * adjust E2 sb count: to be removed when FW will support
8576 * more then 16 L2 clients
8578 #define MAX_L2_CLIENTS 16
8579 if (CHIP_IS_E2(bp))
8580 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8581 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8583 if (!BP_NOMCP(bp)) {
8584 bnx2x_get_port_hwinfo(bp);
8586 bp->fw_seq =
8587 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8588 DRV_MSG_SEQ_NUMBER_MASK);
8589 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8592 /* Get MAC addresses */
8593 bnx2x_get_mac_hwinfo(bp);
8595 return rc;
8598 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8600 int cnt, i, block_end, rodi;
8601 char vpd_data[BNX2X_VPD_LEN+1];
8602 char str_id_reg[VENDOR_ID_LEN+1];
8603 char str_id_cap[VENDOR_ID_LEN+1];
8604 u8 len;
8606 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8607 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8609 if (cnt < BNX2X_VPD_LEN)
8610 goto out_not_found;
8612 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8613 PCI_VPD_LRDT_RO_DATA);
8614 if (i < 0)
8615 goto out_not_found;
8618 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8619 pci_vpd_lrdt_size(&vpd_data[i]);
8621 i += PCI_VPD_LRDT_TAG_SIZE;
8623 if (block_end > BNX2X_VPD_LEN)
8624 goto out_not_found;
8626 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8627 PCI_VPD_RO_KEYWORD_MFR_ID);
8628 if (rodi < 0)
8629 goto out_not_found;
8631 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8633 if (len != VENDOR_ID_LEN)
8634 goto out_not_found;
8636 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8638 /* vendor specific info */
8639 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8640 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8641 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8642 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8644 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8645 PCI_VPD_RO_KEYWORD_VENDOR0);
8646 if (rodi >= 0) {
8647 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8649 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8651 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8652 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8653 bp->fw_ver[len] = ' ';
8656 return;
8658 out_not_found:
8659 return;
8662 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8664 int func;
8665 int timer_interval;
8666 int rc;
8668 /* Disable interrupt handling until HW is initialized */
8669 atomic_set(&bp->intr_sem, 1);
8670 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8672 mutex_init(&bp->port.phy_mutex);
8673 mutex_init(&bp->fw_mb_mutex);
8674 spin_lock_init(&bp->stats_lock);
8675 #ifdef BCM_CNIC
8676 mutex_init(&bp->cnic_mutex);
8677 #endif
8679 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8680 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
8682 rc = bnx2x_get_hwinfo(bp);
8684 if (!rc)
8685 rc = bnx2x_alloc_mem_bp(bp);
8687 bnx2x_read_fwinfo(bp);
8689 func = BP_FUNC(bp);
8691 /* need to reset chip if undi was active */
8692 if (!BP_NOMCP(bp))
8693 bnx2x_undi_unload(bp);
8695 if (CHIP_REV_IS_FPGA(bp))
8696 dev_err(&bp->pdev->dev, "FPGA detected\n");
8698 if (BP_NOMCP(bp) && (func == 0))
8699 dev_err(&bp->pdev->dev, "MCP disabled, "
8700 "must load devices in order!\n");
8702 bp->multi_mode = multi_mode;
8703 bp->int_mode = int_mode;
8705 bp->dev->features |= NETIF_F_GRO;
8707 /* Set TPA flags */
8708 if (disable_tpa) {
8709 bp->flags &= ~TPA_ENABLE_FLAG;
8710 bp->dev->features &= ~NETIF_F_LRO;
8711 } else {
8712 bp->flags |= TPA_ENABLE_FLAG;
8713 bp->dev->features |= NETIF_F_LRO;
8715 bp->disable_tpa = disable_tpa;
8717 if (CHIP_IS_E1(bp))
8718 bp->dropless_fc = 0;
8719 else
8720 bp->dropless_fc = dropless_fc;
8722 bp->mrrs = mrrs;
8724 bp->tx_ring_size = MAX_TX_AVAIL;
8726 bp->rx_csum = 1;
8728 /* make sure that the numbers are in the right granularity */
8729 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8730 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
8732 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8733 bp->current_interval = (poll ? poll : timer_interval);
8735 init_timer(&bp->timer);
8736 bp->timer.expires = jiffies + bp->current_interval;
8737 bp->timer.data = (unsigned long) bp;
8738 bp->timer.function = bnx2x_timer;
8740 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
8741 bnx2x_dcbx_init_params(bp);
8743 return rc;
8747 /****************************************************************************
8748 * General service functions
8749 ****************************************************************************/
8751 /* called with rtnl_lock */
8752 static int bnx2x_open(struct net_device *dev)
8754 struct bnx2x *bp = netdev_priv(dev);
8756 netif_carrier_off(dev);
8758 bnx2x_set_power_state(bp, PCI_D0);
8760 if (!bnx2x_reset_is_done(bp)) {
8761 do {
8762 /* Reset MCP mail box sequence if there is on going
8763 * recovery
8765 bp->fw_seq = 0;
8767 /* If it's the first function to load and reset done
8768 * is still not cleared it may mean that. We don't
8769 * check the attention state here because it may have
8770 * already been cleared by a "common" reset but we
8771 * shell proceed with "process kill" anyway.
8773 if ((bnx2x_get_load_cnt(bp) == 0) &&
8774 bnx2x_trylock_hw_lock(bp,
8775 HW_LOCK_RESOURCE_RESERVED_08) &&
8776 (!bnx2x_leader_reset(bp))) {
8777 DP(NETIF_MSG_HW, "Recovered in open\n");
8778 break;
8781 bnx2x_set_power_state(bp, PCI_D3hot);
8783 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8784 " completed yet. Try again later. If u still see this"
8785 " message after a few retries then power cycle is"
8786 " required.\n", bp->dev->name);
8788 return -EAGAIN;
8789 } while (0);
8792 bp->recovery_state = BNX2X_RECOVERY_DONE;
8794 return bnx2x_nic_load(bp, LOAD_OPEN);
8797 /* called with rtnl_lock */
8798 static int bnx2x_close(struct net_device *dev)
8800 struct bnx2x *bp = netdev_priv(dev);
8802 /* Unload the driver, release IRQs */
8803 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
8804 bnx2x_set_power_state(bp, PCI_D3hot);
8806 return 0;
8809 /* called with netif_tx_lock from dev_mcast.c */
8810 void bnx2x_set_rx_mode(struct net_device *dev)
8812 struct bnx2x *bp = netdev_priv(dev);
8813 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8814 int port = BP_PORT(bp);
8816 if (bp->state != BNX2X_STATE_OPEN) {
8817 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8818 return;
8821 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8823 if (dev->flags & IFF_PROMISC)
8824 rx_mode = BNX2X_RX_MODE_PROMISC;
8825 else if ((dev->flags & IFF_ALLMULTI) ||
8826 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8827 CHIP_IS_E1(bp)))
8828 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8829 else { /* some multicasts */
8830 if (CHIP_IS_E1(bp)) {
8832 * set mc list, do not wait as wait implies sleep
8833 * and set_rx_mode can be invoked from non-sleepable
8834 * context
8836 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8837 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8838 BNX2X_MAX_MULTICAST*(1 + port));
8840 bnx2x_set_e1_mc_list(bp, offset);
8841 } else { /* E1H */
8842 /* Accept one or more multicasts */
8843 struct netdev_hw_addr *ha;
8844 u32 mc_filter[MC_HASH_SIZE];
8845 u32 crc, bit, regidx;
8846 int i;
8848 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8850 netdev_for_each_mc_addr(ha, dev) {
8851 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
8852 bnx2x_mc_addr(ha));
8854 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8855 ETH_ALEN);
8856 bit = (crc >> 24) & 0xff;
8857 regidx = bit >> 5;
8858 bit &= 0x1f;
8859 mc_filter[regidx] |= (1 << bit);
8862 for (i = 0; i < MC_HASH_SIZE; i++)
8863 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8864 mc_filter[i]);
8868 bp->rx_mode = rx_mode;
8869 bnx2x_set_storm_rx_mode(bp);
8872 /* called with rtnl_lock */
8873 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8874 int devad, u16 addr)
8876 struct bnx2x *bp = netdev_priv(netdev);
8877 u16 value;
8878 int rc;
8880 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8881 prtad, devad, addr);
8883 /* The HW expects different devad if CL22 is used */
8884 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8886 bnx2x_acquire_phy_lock(bp);
8887 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
8888 bnx2x_release_phy_lock(bp);
8889 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
8891 if (!rc)
8892 rc = value;
8893 return rc;
8896 /* called with rtnl_lock */
8897 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8898 u16 addr, u16 value)
8900 struct bnx2x *bp = netdev_priv(netdev);
8901 int rc;
8903 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8904 " value 0x%x\n", prtad, devad, addr, value);
8906 /* The HW expects different devad if CL22 is used */
8907 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8909 bnx2x_acquire_phy_lock(bp);
8910 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
8911 bnx2x_release_phy_lock(bp);
8912 return rc;
8915 /* called with rtnl_lock */
8916 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8918 struct bnx2x *bp = netdev_priv(dev);
8919 struct mii_ioctl_data *mdio = if_mii(ifr);
8921 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8922 mdio->phy_id, mdio->reg_num, mdio->val_in);
8924 if (!netif_running(dev))
8925 return -EAGAIN;
8927 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
8930 #ifdef CONFIG_NET_POLL_CONTROLLER
8931 static void poll_bnx2x(struct net_device *dev)
8933 struct bnx2x *bp = netdev_priv(dev);
8935 disable_irq(bp->pdev->irq);
8936 bnx2x_interrupt(bp->pdev->irq, dev);
8937 enable_irq(bp->pdev->irq);
8939 #endif
8941 static const struct net_device_ops bnx2x_netdev_ops = {
8942 .ndo_open = bnx2x_open,
8943 .ndo_stop = bnx2x_close,
8944 .ndo_start_xmit = bnx2x_start_xmit,
8945 .ndo_select_queue = bnx2x_select_queue,
8946 .ndo_set_multicast_list = bnx2x_set_rx_mode,
8947 .ndo_set_mac_address = bnx2x_change_mac_addr,
8948 .ndo_validate_addr = eth_validate_addr,
8949 .ndo_do_ioctl = bnx2x_ioctl,
8950 .ndo_change_mtu = bnx2x_change_mtu,
8951 .ndo_tx_timeout = bnx2x_tx_timeout,
8952 #ifdef CONFIG_NET_POLL_CONTROLLER
8953 .ndo_poll_controller = poll_bnx2x,
8954 #endif
8957 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8958 struct net_device *dev)
8960 struct bnx2x *bp;
8961 int rc;
8963 SET_NETDEV_DEV(dev, &pdev->dev);
8964 bp = netdev_priv(dev);
8966 bp->dev = dev;
8967 bp->pdev = pdev;
8968 bp->flags = 0;
8969 bp->pf_num = PCI_FUNC(pdev->devfn);
8971 rc = pci_enable_device(pdev);
8972 if (rc) {
8973 dev_err(&bp->pdev->dev,
8974 "Cannot enable PCI device, aborting\n");
8975 goto err_out;
8978 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8979 dev_err(&bp->pdev->dev,
8980 "Cannot find PCI device base address, aborting\n");
8981 rc = -ENODEV;
8982 goto err_out_disable;
8985 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
8986 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
8987 " base address, aborting\n");
8988 rc = -ENODEV;
8989 goto err_out_disable;
8992 if (atomic_read(&pdev->enable_cnt) == 1) {
8993 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8994 if (rc) {
8995 dev_err(&bp->pdev->dev,
8996 "Cannot obtain PCI resources, aborting\n");
8997 goto err_out_disable;
9000 pci_set_master(pdev);
9001 pci_save_state(pdev);
9004 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9005 if (bp->pm_cap == 0) {
9006 dev_err(&bp->pdev->dev,
9007 "Cannot find power management capability, aborting\n");
9008 rc = -EIO;
9009 goto err_out_release;
9012 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9013 if (bp->pcie_cap == 0) {
9014 dev_err(&bp->pdev->dev,
9015 "Cannot find PCI Express capability, aborting\n");
9016 rc = -EIO;
9017 goto err_out_release;
9020 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
9021 bp->flags |= USING_DAC_FLAG;
9022 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
9023 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
9024 " failed, aborting\n");
9025 rc = -EIO;
9026 goto err_out_release;
9029 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
9030 dev_err(&bp->pdev->dev,
9031 "System does not support DMA, aborting\n");
9032 rc = -EIO;
9033 goto err_out_release;
9036 dev->mem_start = pci_resource_start(pdev, 0);
9037 dev->base_addr = dev->mem_start;
9038 dev->mem_end = pci_resource_end(pdev, 0);
9040 dev->irq = pdev->irq;
9042 bp->regview = pci_ioremap_bar(pdev, 0);
9043 if (!bp->regview) {
9044 dev_err(&bp->pdev->dev,
9045 "Cannot map register space, aborting\n");
9046 rc = -ENOMEM;
9047 goto err_out_release;
9050 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
9051 min_t(u64, BNX2X_DB_SIZE(bp),
9052 pci_resource_len(pdev, 2)));
9053 if (!bp->doorbells) {
9054 dev_err(&bp->pdev->dev,
9055 "Cannot map doorbell space, aborting\n");
9056 rc = -ENOMEM;
9057 goto err_out_unmap;
9060 bnx2x_set_power_state(bp, PCI_D0);
9062 /* clean indirect addresses */
9063 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
9064 PCICFG_VENDOR_ID_OFFSET);
9065 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
9066 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
9067 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
9068 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
9070 /* Reset the load counter */
9071 bnx2x_clear_load_cnt(bp);
9073 dev->watchdog_timeo = TX_TIMEOUT;
9075 dev->netdev_ops = &bnx2x_netdev_ops;
9076 bnx2x_set_ethtool_ops(dev);
9077 dev->features |= NETIF_F_SG;
9078 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
9079 if (bp->flags & USING_DAC_FLAG)
9080 dev->features |= NETIF_F_HIGHDMA;
9081 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9082 dev->features |= NETIF_F_TSO6;
9083 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
9085 dev->vlan_features |= NETIF_F_SG;
9086 dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
9087 if (bp->flags & USING_DAC_FLAG)
9088 dev->vlan_features |= NETIF_F_HIGHDMA;
9089 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9090 dev->vlan_features |= NETIF_F_TSO6;
9092 #ifdef BCM_DCB
9093 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
9094 #endif
9096 /* get_port_hwinfo() will set prtad and mmds properly */
9097 bp->mdio.prtad = MDIO_PRTAD_NONE;
9098 bp->mdio.mmds = 0;
9099 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
9100 bp->mdio.dev = dev;
9101 bp->mdio.mdio_read = bnx2x_mdio_read;
9102 bp->mdio.mdio_write = bnx2x_mdio_write;
9104 return 0;
9106 err_out_unmap:
9107 if (bp->regview) {
9108 iounmap(bp->regview);
9109 bp->regview = NULL;
9111 if (bp->doorbells) {
9112 iounmap(bp->doorbells);
9113 bp->doorbells = NULL;
9116 err_out_release:
9117 if (atomic_read(&pdev->enable_cnt) == 1)
9118 pci_release_regions(pdev);
9120 err_out_disable:
9121 pci_disable_device(pdev);
9122 pci_set_drvdata(pdev, NULL);
9124 err_out:
9125 return rc;
9128 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
9129 int *width, int *speed)
9131 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9133 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
9135 /* return value of 1=2.5GHz 2=5GHz */
9136 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
9139 static int bnx2x_check_firmware(struct bnx2x *bp)
9141 const struct firmware *firmware = bp->firmware;
9142 struct bnx2x_fw_file_hdr *fw_hdr;
9143 struct bnx2x_fw_file_section *sections;
9144 u32 offset, len, num_ops;
9145 u16 *ops_offsets;
9146 int i;
9147 const u8 *fw_ver;
9149 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
9150 return -EINVAL;
9152 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
9153 sections = (struct bnx2x_fw_file_section *)fw_hdr;
9155 /* Make sure none of the offsets and sizes make us read beyond
9156 * the end of the firmware data */
9157 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
9158 offset = be32_to_cpu(sections[i].offset);
9159 len = be32_to_cpu(sections[i].len);
9160 if (offset + len > firmware->size) {
9161 dev_err(&bp->pdev->dev,
9162 "Section %d length is out of bounds\n", i);
9163 return -EINVAL;
9167 /* Likewise for the init_ops offsets */
9168 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
9169 ops_offsets = (u16 *)(firmware->data + offset);
9170 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
9172 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
9173 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
9174 dev_err(&bp->pdev->dev,
9175 "Section offset %d is out of bounds\n", i);
9176 return -EINVAL;
9180 /* Check FW version */
9181 offset = be32_to_cpu(fw_hdr->fw_version.offset);
9182 fw_ver = firmware->data + offset;
9183 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
9184 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
9185 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
9186 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
9187 dev_err(&bp->pdev->dev,
9188 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
9189 fw_ver[0], fw_ver[1], fw_ver[2],
9190 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
9191 BCM_5710_FW_MINOR_VERSION,
9192 BCM_5710_FW_REVISION_VERSION,
9193 BCM_5710_FW_ENGINEERING_VERSION);
9194 return -EINVAL;
9197 return 0;
9200 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
9202 const __be32 *source = (const __be32 *)_source;
9203 u32 *target = (u32 *)_target;
9204 u32 i;
9206 for (i = 0; i < n/4; i++)
9207 target[i] = be32_to_cpu(source[i]);
9211 Ops array is stored in the following format:
9212 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
9214 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
9216 const __be32 *source = (const __be32 *)_source;
9217 struct raw_op *target = (struct raw_op *)_target;
9218 u32 i, j, tmp;
9220 for (i = 0, j = 0; i < n/8; i++, j += 2) {
9221 tmp = be32_to_cpu(source[j]);
9222 target[i].op = (tmp >> 24) & 0xff;
9223 target[i].offset = tmp & 0xffffff;
9224 target[i].raw_data = be32_to_cpu(source[j + 1]);
9229 * IRO array is stored in the following format:
9230 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
9232 static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
9234 const __be32 *source = (const __be32 *)_source;
9235 struct iro *target = (struct iro *)_target;
9236 u32 i, j, tmp;
9238 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
9239 target[i].base = be32_to_cpu(source[j]);
9240 j++;
9241 tmp = be32_to_cpu(source[j]);
9242 target[i].m1 = (tmp >> 16) & 0xffff;
9243 target[i].m2 = tmp & 0xffff;
9244 j++;
9245 tmp = be32_to_cpu(source[j]);
9246 target[i].m3 = (tmp >> 16) & 0xffff;
9247 target[i].size = tmp & 0xffff;
9248 j++;
9252 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
9254 const __be16 *source = (const __be16 *)_source;
9255 u16 *target = (u16 *)_target;
9256 u32 i;
9258 for (i = 0; i < n/2; i++)
9259 target[i] = be16_to_cpu(source[i]);
9262 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
9263 do { \
9264 u32 len = be32_to_cpu(fw_hdr->arr.len); \
9265 bp->arr = kmalloc(len, GFP_KERNEL); \
9266 if (!bp->arr) { \
9267 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
9268 goto lbl; \
9270 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
9271 (u8 *)bp->arr, len); \
9272 } while (0)
9274 int bnx2x_init_firmware(struct bnx2x *bp)
9276 const char *fw_file_name;
9277 struct bnx2x_fw_file_hdr *fw_hdr;
9278 int rc;
9280 if (CHIP_IS_E1(bp))
9281 fw_file_name = FW_FILE_NAME_E1;
9282 else if (CHIP_IS_E1H(bp))
9283 fw_file_name = FW_FILE_NAME_E1H;
9284 else if (CHIP_IS_E2(bp))
9285 fw_file_name = FW_FILE_NAME_E2;
9286 else {
9287 BNX2X_ERR("Unsupported chip revision\n");
9288 return -EINVAL;
9291 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
9293 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
9294 if (rc) {
9295 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
9296 goto request_firmware_exit;
9299 rc = bnx2x_check_firmware(bp);
9300 if (rc) {
9301 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
9302 goto request_firmware_exit;
9305 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
9307 /* Initialize the pointers to the init arrays */
9308 /* Blob */
9309 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
9311 /* Opcodes */
9312 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
9314 /* Offsets */
9315 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
9316 be16_to_cpu_n);
9318 /* STORMs firmware */
9319 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9320 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
9321 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
9322 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
9323 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9324 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9325 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
9326 be32_to_cpu(fw_hdr->usem_pram_data.offset);
9327 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9328 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9329 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
9330 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9331 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9332 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9333 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
9334 be32_to_cpu(fw_hdr->csem_pram_data.offset);
9335 /* IRO */
9336 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
9338 return 0;
9340 iro_alloc_err:
9341 kfree(bp->init_ops_offsets);
9342 init_offsets_alloc_err:
9343 kfree(bp->init_ops);
9344 init_ops_alloc_err:
9345 kfree(bp->init_data);
9346 request_firmware_exit:
9347 release_firmware(bp->firmware);
9349 return rc;
9352 static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9354 int cid_count = L2_FP_COUNT(l2_cid_count);
9356 #ifdef BCM_CNIC
9357 cid_count += CNIC_CID_MAX;
9358 #endif
9359 return roundup(cid_count, QM_CID_ROUND);
9362 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9363 const struct pci_device_id *ent)
9365 struct net_device *dev = NULL;
9366 struct bnx2x *bp;
9367 int pcie_width, pcie_speed;
9368 int rc, cid_count;
9370 switch (ent->driver_data) {
9371 case BCM57710:
9372 case BCM57711:
9373 case BCM57711E:
9374 cid_count = FP_SB_MAX_E1x;
9375 break;
9377 case BCM57712:
9378 case BCM57712E:
9379 cid_count = FP_SB_MAX_E2;
9380 break;
9382 default:
9383 pr_err("Unknown board_type (%ld), aborting\n",
9384 ent->driver_data);
9385 return -ENODEV;
9388 cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
9390 /* dev zeroed in init_etherdev */
9391 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
9392 if (!dev) {
9393 dev_err(&pdev->dev, "Cannot allocate net device\n");
9394 return -ENOMEM;
9397 bp = netdev_priv(dev);
9398 bp->msg_enable = debug;
9400 pci_set_drvdata(pdev, dev);
9402 bp->l2_cid_count = cid_count;
9404 rc = bnx2x_init_dev(pdev, dev);
9405 if (rc < 0) {
9406 free_netdev(dev);
9407 return rc;
9410 rc = bnx2x_init_bp(bp);
9411 if (rc)
9412 goto init_one_exit;
9414 /* calc qm_cid_count */
9415 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9417 #ifdef BCM_CNIC
9418 /* disable FCOE L2 queue for E1x*/
9419 if (CHIP_IS_E1x(bp))
9420 bp->flags |= NO_FCOE_FLAG;
9422 #endif
9424 /* Configure interupt mode: try to enable MSI-X/MSI if
9425 * needed, set bp->num_queues appropriately.
9427 bnx2x_set_int_mode(bp);
9429 /* Add all NAPI objects */
9430 bnx2x_add_all_napi(bp);
9432 rc = register_netdev(dev);
9433 if (rc) {
9434 dev_err(&pdev->dev, "Cannot register net device\n");
9435 goto init_one_exit;
9438 #ifdef BCM_CNIC
9439 if (!NO_FCOE(bp)) {
9440 /* Add storage MAC address */
9441 rtnl_lock();
9442 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9443 rtnl_unlock();
9445 #endif
9447 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
9449 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9450 " IRQ %d, ", board_info[ent->driver_data].name,
9451 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
9452 pcie_width,
9453 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9454 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9455 "5GHz (Gen2)" : "2.5GHz",
9456 dev->base_addr, bp->pdev->irq);
9457 pr_cont("node addr %pM\n", dev->dev_addr);
9459 return 0;
9461 init_one_exit:
9462 if (bp->regview)
9463 iounmap(bp->regview);
9465 if (bp->doorbells)
9466 iounmap(bp->doorbells);
9468 free_netdev(dev);
9470 if (atomic_read(&pdev->enable_cnt) == 1)
9471 pci_release_regions(pdev);
9473 pci_disable_device(pdev);
9474 pci_set_drvdata(pdev, NULL);
9476 return rc;
9479 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9481 struct net_device *dev = pci_get_drvdata(pdev);
9482 struct bnx2x *bp;
9484 if (!dev) {
9485 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
9486 return;
9488 bp = netdev_priv(dev);
9490 #ifdef BCM_CNIC
9491 /* Delete storage MAC address */
9492 if (!NO_FCOE(bp)) {
9493 rtnl_lock();
9494 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9495 rtnl_unlock();
9497 #endif
9499 unregister_netdev(dev);
9501 /* Delete all NAPI objects */
9502 bnx2x_del_all_napi(bp);
9504 /* Power on: we can't let PCI layer write to us while we are in D3 */
9505 bnx2x_set_power_state(bp, PCI_D0);
9507 /* Disable MSI/MSI-X */
9508 bnx2x_disable_msi(bp);
9510 /* Power off */
9511 bnx2x_set_power_state(bp, PCI_D3hot);
9513 /* Make sure RESET task is not scheduled before continuing */
9514 cancel_delayed_work_sync(&bp->reset_task);
9516 if (bp->regview)
9517 iounmap(bp->regview);
9519 if (bp->doorbells)
9520 iounmap(bp->doorbells);
9522 bnx2x_free_mem_bp(bp);
9524 free_netdev(dev);
9526 if (atomic_read(&pdev->enable_cnt) == 1)
9527 pci_release_regions(pdev);
9529 pci_disable_device(pdev);
9530 pci_set_drvdata(pdev, NULL);
9533 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9535 int i;
9537 bp->state = BNX2X_STATE_ERROR;
9539 bp->rx_mode = BNX2X_RX_MODE_NONE;
9541 bnx2x_netif_stop(bp, 0);
9542 netif_carrier_off(bp->dev);
9544 del_timer_sync(&bp->timer);
9545 bp->stats_state = STATS_STATE_DISABLED;
9546 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9548 /* Release IRQs */
9549 bnx2x_free_irq(bp);
9551 /* Free SKBs, SGEs, TPA pool and driver internals */
9552 bnx2x_free_skbs(bp);
9554 for_each_rx_queue(bp, i)
9555 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
9557 bnx2x_free_mem(bp);
9559 bp->state = BNX2X_STATE_CLOSED;
9561 return 0;
9564 static void bnx2x_eeh_recover(struct bnx2x *bp)
9566 u32 val;
9568 mutex_init(&bp->port.phy_mutex);
9570 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9571 bp->link_params.shmem_base = bp->common.shmem_base;
9572 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9574 if (!bp->common.shmem_base ||
9575 (bp->common.shmem_base < 0xA0000) ||
9576 (bp->common.shmem_base >= 0xC0000)) {
9577 BNX2X_DEV_INFO("MCP not active\n");
9578 bp->flags |= NO_MCP_FLAG;
9579 return;
9582 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9583 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9584 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9585 BNX2X_ERR("BAD MCP validity signature\n");
9587 if (!BP_NOMCP(bp)) {
9588 bp->fw_seq =
9589 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9590 DRV_MSG_SEQ_NUMBER_MASK);
9591 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9596 * bnx2x_io_error_detected - called when PCI error is detected
9597 * @pdev: Pointer to PCI device
9598 * @state: The current pci connection state
9600 * This function is called after a PCI bus error affecting
9601 * this device has been detected.
9603 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9604 pci_channel_state_t state)
9606 struct net_device *dev = pci_get_drvdata(pdev);
9607 struct bnx2x *bp = netdev_priv(dev);
9609 rtnl_lock();
9611 netif_device_detach(dev);
9613 if (state == pci_channel_io_perm_failure) {
9614 rtnl_unlock();
9615 return PCI_ERS_RESULT_DISCONNECT;
9618 if (netif_running(dev))
9619 bnx2x_eeh_nic_unload(bp);
9621 pci_disable_device(pdev);
9623 rtnl_unlock();
9625 /* Request a slot reset */
9626 return PCI_ERS_RESULT_NEED_RESET;
9630 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9631 * @pdev: Pointer to PCI device
9633 * Restart the card from scratch, as if from a cold-boot.
9635 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9637 struct net_device *dev = pci_get_drvdata(pdev);
9638 struct bnx2x *bp = netdev_priv(dev);
9640 rtnl_lock();
9642 if (pci_enable_device(pdev)) {
9643 dev_err(&pdev->dev,
9644 "Cannot re-enable PCI device after reset\n");
9645 rtnl_unlock();
9646 return PCI_ERS_RESULT_DISCONNECT;
9649 pci_set_master(pdev);
9650 pci_restore_state(pdev);
9652 if (netif_running(dev))
9653 bnx2x_set_power_state(bp, PCI_D0);
9655 rtnl_unlock();
9657 return PCI_ERS_RESULT_RECOVERED;
9661 * bnx2x_io_resume - called when traffic can start flowing again
9662 * @pdev: Pointer to PCI device
9664 * This callback is called when the error recovery driver tells us that
9665 * its OK to resume normal operation.
9667 static void bnx2x_io_resume(struct pci_dev *pdev)
9669 struct net_device *dev = pci_get_drvdata(pdev);
9670 struct bnx2x *bp = netdev_priv(dev);
9672 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
9673 printk(KERN_ERR "Handling parity error recovery. "
9674 "Try again later\n");
9675 return;
9678 rtnl_lock();
9680 bnx2x_eeh_recover(bp);
9682 if (netif_running(dev))
9683 bnx2x_nic_load(bp, LOAD_NORMAL);
9685 netif_device_attach(dev);
9687 rtnl_unlock();
9690 static struct pci_error_handlers bnx2x_err_handler = {
9691 .error_detected = bnx2x_io_error_detected,
9692 .slot_reset = bnx2x_io_slot_reset,
9693 .resume = bnx2x_io_resume,
9696 static struct pci_driver bnx2x_pci_driver = {
9697 .name = DRV_MODULE_NAME,
9698 .id_table = bnx2x_pci_tbl,
9699 .probe = bnx2x_init_one,
9700 .remove = __devexit_p(bnx2x_remove_one),
9701 .suspend = bnx2x_suspend,
9702 .resume = bnx2x_resume,
9703 .err_handler = &bnx2x_err_handler,
9706 static int __init bnx2x_init(void)
9708 int ret;
9710 pr_info("%s", version);
9712 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9713 if (bnx2x_wq == NULL) {
9714 pr_err("Cannot create workqueue\n");
9715 return -ENOMEM;
9718 ret = pci_register_driver(&bnx2x_pci_driver);
9719 if (ret) {
9720 pr_err("Cannot register driver\n");
9721 destroy_workqueue(bnx2x_wq);
9723 return ret;
9726 static void __exit bnx2x_cleanup(void)
9728 pci_unregister_driver(&bnx2x_pci_driver);
9730 destroy_workqueue(bnx2x_wq);
9733 module_init(bnx2x_init);
9734 module_exit(bnx2x_cleanup);
9736 #ifdef BCM_CNIC
9738 /* count denotes the number of new completions we have seen */
9739 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9741 struct eth_spe *spe;
9743 #ifdef BNX2X_STOP_ON_ERROR
9744 if (unlikely(bp->panic))
9745 return;
9746 #endif
9748 spin_lock_bh(&bp->spq_lock);
9749 BUG_ON(bp->cnic_spq_pending < count);
9750 bp->cnic_spq_pending -= count;
9753 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9754 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9755 & SPE_HDR_CONN_TYPE) >>
9756 SPE_HDR_CONN_TYPE_SHIFT;
9758 /* Set validation for iSCSI L2 client before sending SETUP
9759 * ramrod
9761 if (type == ETH_CONNECTION_TYPE) {
9762 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9763 hdr.conn_and_cmd_data) >>
9764 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9766 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9767 bnx2x_set_ctx_validation(&bp->context.
9768 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9769 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9772 /* There may be not more than 8 L2 and COMMON SPEs and not more
9773 * than 8 L5 SPEs in the air.
9775 if ((type == NONE_CONNECTION_TYPE) ||
9776 (type == ETH_CONNECTION_TYPE)) {
9777 if (!atomic_read(&bp->spq_left))
9778 break;
9779 else
9780 atomic_dec(&bp->spq_left);
9781 } else if ((type == ISCSI_CONNECTION_TYPE) ||
9782 (type == FCOE_CONNECTION_TYPE)) {
9783 if (bp->cnic_spq_pending >=
9784 bp->cnic_eth_dev.max_kwqe_pending)
9785 break;
9786 else
9787 bp->cnic_spq_pending++;
9788 } else {
9789 BNX2X_ERR("Unknown SPE type: %d\n", type);
9790 bnx2x_panic();
9791 break;
9794 spe = bnx2x_sp_get_next(bp);
9795 *spe = *bp->cnic_kwq_cons;
9797 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9798 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9800 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9801 bp->cnic_kwq_cons = bp->cnic_kwq;
9802 else
9803 bp->cnic_kwq_cons++;
9805 bnx2x_sp_prod_update(bp);
9806 spin_unlock_bh(&bp->spq_lock);
9809 static int bnx2x_cnic_sp_queue(struct net_device *dev,
9810 struct kwqe_16 *kwqes[], u32 count)
9812 struct bnx2x *bp = netdev_priv(dev);
9813 int i;
9815 #ifdef BNX2X_STOP_ON_ERROR
9816 if (unlikely(bp->panic))
9817 return -EIO;
9818 #endif
9820 spin_lock_bh(&bp->spq_lock);
9822 for (i = 0; i < count; i++) {
9823 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9825 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9826 break;
9828 *bp->cnic_kwq_prod = *spe;
9830 bp->cnic_kwq_pending++;
9832 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9833 spe->hdr.conn_and_cmd_data, spe->hdr.type,
9834 spe->data.update_data_addr.hi,
9835 spe->data.update_data_addr.lo,
9836 bp->cnic_kwq_pending);
9838 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9839 bp->cnic_kwq_prod = bp->cnic_kwq;
9840 else
9841 bp->cnic_kwq_prod++;
9844 spin_unlock_bh(&bp->spq_lock);
9846 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9847 bnx2x_cnic_sp_post(bp, 0);
9849 return i;
9852 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9854 struct cnic_ops *c_ops;
9855 int rc = 0;
9857 mutex_lock(&bp->cnic_mutex);
9858 c_ops = bp->cnic_ops;
9859 if (c_ops)
9860 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9861 mutex_unlock(&bp->cnic_mutex);
9863 return rc;
9866 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9868 struct cnic_ops *c_ops;
9869 int rc = 0;
9871 rcu_read_lock();
9872 c_ops = rcu_dereference(bp->cnic_ops);
9873 if (c_ops)
9874 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9875 rcu_read_unlock();
9877 return rc;
9881 * for commands that have no data
9883 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
9885 struct cnic_ctl_info ctl = {0};
9887 ctl.cmd = cmd;
9889 return bnx2x_cnic_ctl_send(bp, &ctl);
9892 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9894 struct cnic_ctl_info ctl;
9896 /* first we tell CNIC and only then we count this as a completion */
9897 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9898 ctl.data.comp.cid = cid;
9900 bnx2x_cnic_ctl_send_bh(bp, &ctl);
9901 bnx2x_cnic_sp_post(bp, 0);
9904 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9906 struct bnx2x *bp = netdev_priv(dev);
9907 int rc = 0;
9909 switch (ctl->cmd) {
9910 case DRV_CTL_CTXTBL_WR_CMD: {
9911 u32 index = ctl->data.io.offset;
9912 dma_addr_t addr = ctl->data.io.dma_addr;
9914 bnx2x_ilt_wr(bp, index, addr);
9915 break;
9918 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9919 int count = ctl->data.credit.credit_count;
9921 bnx2x_cnic_sp_post(bp, count);
9922 break;
9925 /* rtnl_lock is held. */
9926 case DRV_CTL_START_L2_CMD: {
9927 u32 cli = ctl->data.ring.client_id;
9929 /* Clear FCoE FIP and ALL ENODE MACs addresses first */
9930 bnx2x_del_fcoe_eth_macs(bp);
9932 /* Set iSCSI MAC address */
9933 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9935 mmiowb();
9936 barrier();
9938 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9939 * because it's the only way for UIO Client to accept
9940 * multicasts (in non-promiscuous mode only one Client per
9941 * function will receive multicast packets (leading in our
9942 * case).
9944 bnx2x_rxq_set_mac_filters(bp, cli,
9945 BNX2X_ACCEPT_UNICAST |
9946 BNX2X_ACCEPT_BROADCAST |
9947 BNX2X_ACCEPT_ALL_MULTICAST);
9948 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9950 break;
9953 /* rtnl_lock is held. */
9954 case DRV_CTL_STOP_L2_CMD: {
9955 u32 cli = ctl->data.ring.client_id;
9957 /* Stop accepting on iSCSI L2 ring */
9958 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9959 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9961 mmiowb();
9962 barrier();
9964 /* Unset iSCSI L2 MAC */
9965 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9966 break;
9968 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9969 int count = ctl->data.credit.credit_count;
9971 smp_mb__before_atomic_inc();
9972 atomic_add(count, &bp->spq_left);
9973 smp_mb__after_atomic_inc();
9974 break;
9977 default:
9978 BNX2X_ERR("unknown command %x\n", ctl->cmd);
9979 rc = -EINVAL;
9982 return rc;
9985 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
9987 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9989 if (bp->flags & USING_MSIX_FLAG) {
9990 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
9991 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
9992 cp->irq_arr[0].vector = bp->msix_table[1].vector;
9993 } else {
9994 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
9995 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
9997 if (CHIP_IS_E2(bp))
9998 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
9999 else
10000 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
10002 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
10003 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
10004 cp->irq_arr[1].status_blk = bp->def_status_blk;
10005 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
10006 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
10008 cp->num_irq = 2;
10011 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
10012 void *data)
10014 struct bnx2x *bp = netdev_priv(dev);
10015 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10017 if (ops == NULL)
10018 return -EINVAL;
10020 if (atomic_read(&bp->intr_sem) != 0)
10021 return -EBUSY;
10023 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
10024 if (!bp->cnic_kwq)
10025 return -ENOMEM;
10027 bp->cnic_kwq_cons = bp->cnic_kwq;
10028 bp->cnic_kwq_prod = bp->cnic_kwq;
10029 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
10031 bp->cnic_spq_pending = 0;
10032 bp->cnic_kwq_pending = 0;
10034 bp->cnic_data = data;
10036 cp->num_irq = 0;
10037 cp->drv_state = CNIC_DRV_STATE_REGD;
10038 cp->iro_arr = bp->iro_arr;
10040 bnx2x_setup_cnic_irq_info(bp);
10042 rcu_assign_pointer(bp->cnic_ops, ops);
10044 return 0;
10047 static int bnx2x_unregister_cnic(struct net_device *dev)
10049 struct bnx2x *bp = netdev_priv(dev);
10050 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10052 mutex_lock(&bp->cnic_mutex);
10053 cp->drv_state = 0;
10054 rcu_assign_pointer(bp->cnic_ops, NULL);
10055 mutex_unlock(&bp->cnic_mutex);
10056 synchronize_rcu();
10057 kfree(bp->cnic_kwq);
10058 bp->cnic_kwq = NULL;
10060 return 0;
10063 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10065 struct bnx2x *bp = netdev_priv(dev);
10066 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10068 cp->drv_owner = THIS_MODULE;
10069 cp->chip_id = CHIP_ID(bp);
10070 cp->pdev = bp->pdev;
10071 cp->io_base = bp->regview;
10072 cp->io_base2 = bp->doorbells;
10073 cp->max_kwqe_pending = 8;
10074 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
10075 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
10076 bnx2x_cid_ilt_lines(bp);
10077 cp->ctx_tbl_len = CNIC_ILT_LINES;
10078 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
10079 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
10080 cp->drv_ctl = bnx2x_drv_ctl;
10081 cp->drv_register_cnic = bnx2x_register_cnic;
10082 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
10083 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
10084 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
10085 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
10086 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
10088 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
10089 "starting cid %d\n",
10090 cp->ctx_blk_size,
10091 cp->ctx_tbl_offset,
10092 cp->ctx_tbl_len,
10093 cp->starting_cid);
10094 return cp;
10096 EXPORT_SYMBOL(bnx2x_cnic_probe);
10098 #endif /* BCM_CNIC */