GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / net / bnx2x / bnx2x_main.c
blob601e8b8446afb47d05616a99c2c4023dc0f10392
3 #include <linux/module.h>
4 #include <linux/moduleparam.h>
5 #include <linux/kernel.h>
6 #include <linux/device.h> /* for dev_info() */
7 #include <linux/timer.h>
8 #include <linux/errno.h>
9 #include <linux/ioport.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12 #include <linux/interrupt.h>
13 #include <linux/pci.h>
14 #include <linux/init.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/skbuff.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/bitops.h>
20 #include <linux/irq.h>
21 #include <linux/delay.h>
22 #include <asm/byteorder.h>
23 #include <linux/time.h>
24 #include <linux/ethtool.h>
25 #include <linux/mii.h>
26 #include <linux/if_vlan.h>
27 #include <net/ip.h>
28 #include <net/tcp.h>
29 #include <net/checksum.h>
30 #include <net/ip6_checksum.h>
31 #include <linux/workqueue.h>
32 #include <linux/crc32.h>
33 #include <linux/crc32c.h>
34 #include <linux/prefetch.h>
35 #include <linux/zlib.h>
36 #include <linux/io.h>
37 #include <linux/stringify.h>
39 #define BNX2X_MAIN
40 #include "bnx2x.h"
41 #include "bnx2x_init.h"
42 #include "bnx2x_init_ops.h"
43 #include "bnx2x_cmn.h"
46 #include <linux/firmware.h>
47 #include "bnx2x_fw_file_hdr.h"
48 /* FW files */
49 #define FW_FILE_VERSION \
50 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
51 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
52 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
53 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
54 #define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
55 #define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
57 /* Time in jiffies before concluding the transmitter is hung */
58 #define TX_TIMEOUT (5*HZ)
60 static char version[] __devinitdata =
61 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
62 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
64 MODULE_AUTHOR("Eliezer Tamir");
65 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
66 MODULE_LICENSE("GPL");
67 MODULE_VERSION(DRV_MODULE_VERSION);
68 MODULE_FIRMWARE(FW_FILE_NAME_E1);
69 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
71 static int multi_mode = 1;
72 module_param(multi_mode, int, 0);
73 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
74 "(0 Disable; 1 Enable (default))");
76 static int num_queues;
77 module_param(num_queues, int, 0);
78 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
79 " (default is as a number of CPUs)");
81 static int disable_tpa;
82 module_param(disable_tpa, int, 0);
83 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
85 static int int_mode;
86 module_param(int_mode, int, 0);
87 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
88 "(1 INT#x; 2 MSI)");
90 static int dropless_fc;
91 module_param(dropless_fc, int, 0);
92 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
94 static int poll;
95 module_param(poll, int, 0);
96 MODULE_PARM_DESC(poll, " Use polling (for debug)");
98 static int mrrs = -1;
99 module_param(mrrs, int, 0);
100 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
102 static int debug;
103 module_param(debug, int, 0);
104 MODULE_PARM_DESC(debug, " Default debug msglevel");
106 static struct workqueue_struct *bnx2x_wq;
108 enum bnx2x_board_type {
109 BCM57710 = 0,
110 BCM57711 = 1,
111 BCM57711E = 2,
114 /* indexed by board_type, above */
115 static struct {
116 char *name;
117 } board_info[] __devinitdata = {
118 { "Broadcom NetXtreme II BCM57710 XGb" },
119 { "Broadcom NetXtreme II BCM57711 XGb" },
120 { "Broadcom NetXtreme II BCM57711E XGb" }
124 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
125 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
126 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
127 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
128 { 0 }
131 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
133 /****************************************************************************
134 * General service functions
135 ****************************************************************************/
137 /* used only at init
138 * locking is done by mcp
140 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
142 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
143 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
144 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
145 PCICFG_VENDOR_ID_OFFSET);
148 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
150 u32 val;
152 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
153 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
154 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
155 PCICFG_VENDOR_ID_OFFSET);
157 return val;
160 const u32 dmae_reg_go_c[] = {
161 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
162 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
163 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
164 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
167 /* copy command into DMAE command memory and set DMAE command go */
168 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
170 u32 cmd_offset;
171 int i;
173 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
174 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
175 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
177 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
178 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
180 REG_WR(bp, dmae_reg_go_c[idx], 1);
183 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
184 u32 len32)
186 struct dmae_command dmae;
187 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
188 int cnt = 200;
190 if (!bp->dmae_ready) {
191 u32 *data = bnx2x_sp(bp, wb_data[0]);
193 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
194 " using indirect\n", dst_addr, len32);
195 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
196 return;
199 memset(&dmae, 0, sizeof(struct dmae_command));
201 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
202 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
203 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
204 #ifdef __BIG_ENDIAN
205 DMAE_CMD_ENDIANITY_B_DW_SWAP |
206 #else
207 DMAE_CMD_ENDIANITY_DW_SWAP |
208 #endif
209 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
210 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
211 dmae.src_addr_lo = U64_LO(dma_addr);
212 dmae.src_addr_hi = U64_HI(dma_addr);
213 dmae.dst_addr_lo = dst_addr >> 2;
214 dmae.dst_addr_hi = 0;
215 dmae.len = len32;
216 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
217 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
218 dmae.comp_val = DMAE_COMP_VAL;
220 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
221 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
222 "dst_addr [%x:%08x (%08x)]\n"
223 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
224 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
225 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
226 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
227 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
228 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
229 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
231 mutex_lock(&bp->dmae_mutex);
233 *wb_comp = 0;
235 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
237 udelay(5);
239 while (*wb_comp != DMAE_COMP_VAL) {
240 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
242 if (!cnt) {
243 BNX2X_ERR("DMAE timeout!\n");
244 break;
246 cnt--;
247 /* adjust delay for emulation/FPGA */
248 if (CHIP_REV_IS_SLOW(bp))
249 msleep(100);
250 else
251 udelay(5);
254 mutex_unlock(&bp->dmae_mutex);
257 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
259 struct dmae_command dmae;
260 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
261 int cnt = 200;
263 if (!bp->dmae_ready) {
264 u32 *data = bnx2x_sp(bp, wb_data[0]);
265 int i;
267 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
268 " using indirect\n", src_addr, len32);
269 for (i = 0; i < len32; i++)
270 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
271 return;
274 memset(&dmae, 0, sizeof(struct dmae_command));
276 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
277 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
278 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
279 #ifdef __BIG_ENDIAN
280 DMAE_CMD_ENDIANITY_B_DW_SWAP |
281 #else
282 DMAE_CMD_ENDIANITY_DW_SWAP |
283 #endif
284 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
285 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
286 dmae.src_addr_lo = src_addr >> 2;
287 dmae.src_addr_hi = 0;
288 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
289 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
290 dmae.len = len32;
291 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
292 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
293 dmae.comp_val = DMAE_COMP_VAL;
295 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
296 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
297 "dst_addr [%x:%08x (%08x)]\n"
298 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
299 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
300 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
301 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
303 mutex_lock(&bp->dmae_mutex);
305 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
306 *wb_comp = 0;
308 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
310 udelay(5);
312 while (*wb_comp != DMAE_COMP_VAL) {
314 if (!cnt) {
315 BNX2X_ERR("DMAE timeout!\n");
316 break;
318 cnt--;
319 /* adjust delay for emulation/FPGA */
320 if (CHIP_REV_IS_SLOW(bp))
321 msleep(100);
322 else
323 udelay(5);
325 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
326 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
327 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
329 mutex_unlock(&bp->dmae_mutex);
332 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
333 u32 addr, u32 len)
335 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
336 int offset = 0;
338 while (len > dmae_wr_max) {
339 bnx2x_write_dmae(bp, phys_addr + offset,
340 addr + offset, dmae_wr_max);
341 offset += dmae_wr_max * 4;
342 len -= dmae_wr_max;
345 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
348 /* used only for slowpath so not inlined */
349 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
351 u32 wb_write[2];
353 wb_write[0] = val_hi;
354 wb_write[1] = val_lo;
355 REG_WR_DMAE(bp, reg, wb_write, 2);
358 #ifdef USE_WB_RD
359 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
361 u32 wb_data[2];
363 REG_RD_DMAE(bp, reg, wb_data, 2);
365 return HILO_U64(wb_data[0], wb_data[1]);
367 #endif
369 static int bnx2x_mc_assert(struct bnx2x *bp)
371 char last_idx;
372 int i, rc = 0;
373 u32 row0, row1, row2, row3;
375 /* XSTORM */
376 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
377 XSTORM_ASSERT_LIST_INDEX_OFFSET);
378 if (last_idx)
379 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
381 /* print the asserts */
382 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
384 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
385 XSTORM_ASSERT_LIST_OFFSET(i));
386 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
387 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
388 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
389 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
390 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
391 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
393 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
394 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
395 " 0x%08x 0x%08x 0x%08x\n",
396 i, row3, row2, row1, row0);
397 rc++;
398 } else {
399 break;
403 /* TSTORM */
404 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
405 TSTORM_ASSERT_LIST_INDEX_OFFSET);
406 if (last_idx)
407 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
409 /* print the asserts */
410 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
412 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
413 TSTORM_ASSERT_LIST_OFFSET(i));
414 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
415 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
416 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
417 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
418 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
419 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
421 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
422 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
423 " 0x%08x 0x%08x 0x%08x\n",
424 i, row3, row2, row1, row0);
425 rc++;
426 } else {
427 break;
431 /* CSTORM */
432 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
433 CSTORM_ASSERT_LIST_INDEX_OFFSET);
434 if (last_idx)
435 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
437 /* print the asserts */
438 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
440 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
441 CSTORM_ASSERT_LIST_OFFSET(i));
442 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
443 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
444 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
445 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
446 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
447 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
449 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
450 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
451 " 0x%08x 0x%08x 0x%08x\n",
452 i, row3, row2, row1, row0);
453 rc++;
454 } else {
455 break;
459 /* USTORM */
460 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
461 USTORM_ASSERT_LIST_INDEX_OFFSET);
462 if (last_idx)
463 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
465 /* print the asserts */
466 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
468 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
469 USTORM_ASSERT_LIST_OFFSET(i));
470 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
471 USTORM_ASSERT_LIST_OFFSET(i) + 4);
472 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
473 USTORM_ASSERT_LIST_OFFSET(i) + 8);
474 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
475 USTORM_ASSERT_LIST_OFFSET(i) + 12);
477 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
478 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
479 " 0x%08x 0x%08x 0x%08x\n",
480 i, row3, row2, row1, row0);
481 rc++;
482 } else {
483 break;
487 return rc;
490 static void bnx2x_fw_dump(struct bnx2x *bp)
492 u32 addr;
493 u32 mark, offset;
494 __be32 data[9];
495 int word;
497 if (BP_NOMCP(bp)) {
498 BNX2X_ERR("NO MCP - can not dump\n");
499 return;
502 addr = bp->common.shmem_base - 0x0800 + 4;
503 mark = REG_RD(bp, addr);
504 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
505 pr_err("begin fw dump (mark 0x%x)\n", mark);
507 pr_err("");
508 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
509 for (word = 0; word < 8; word++)
510 data[word] = htonl(REG_RD(bp, offset + 4*word));
511 data[8] = 0x0;
512 pr_cont("%s", (char *)data);
514 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
515 for (word = 0; word < 8; word++)
516 data[word] = htonl(REG_RD(bp, offset + 4*word));
517 data[8] = 0x0;
518 pr_cont("%s", (char *)data);
520 pr_err("end of fw dump\n");
523 void bnx2x_panic_dump(struct bnx2x *bp)
525 int i;
526 u16 j, start, end;
528 bp->stats_state = STATS_STATE_DISABLED;
529 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
531 BNX2X_ERR("begin crash dump -----------------\n");
533 /* Indices */
534 /* Common */
535 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
536 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
537 " spq_prod_idx(0x%x)\n",
538 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
539 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
541 /* Rx */
542 for_each_queue(bp, i) {
543 struct bnx2x_fastpath *fp = &bp->fp[i];
545 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
546 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
547 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
548 i, fp->rx_bd_prod, fp->rx_bd_cons,
549 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
550 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
551 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
552 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
553 fp->rx_sge_prod, fp->last_max_sge,
554 le16_to_cpu(fp->fp_u_idx),
555 fp->status_blk->u_status_block.status_block_index);
558 /* Tx */
559 for_each_queue(bp, i) {
560 struct bnx2x_fastpath *fp = &bp->fp[i];
562 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
563 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
564 " *tx_cons_sb(0x%x)\n",
565 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
566 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
567 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
568 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
569 fp->status_blk->c_status_block.status_block_index,
570 fp->tx_db.data.prod);
573 /* Rings */
574 /* Rx */
575 for_each_queue(bp, i) {
576 struct bnx2x_fastpath *fp = &bp->fp[i];
578 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
579 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
580 for (j = start; j != end; j = RX_BD(j + 1)) {
581 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
582 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
584 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
585 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
588 start = RX_SGE(fp->rx_sge_prod);
589 end = RX_SGE(fp->last_max_sge);
590 for (j = start; j != end; j = RX_SGE(j + 1)) {
591 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
592 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
594 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
595 i, j, rx_sge[1], rx_sge[0], sw_page->page);
598 start = RCQ_BD(fp->rx_comp_cons - 10);
599 end = RCQ_BD(fp->rx_comp_cons + 503);
600 for (j = start; j != end; j = RCQ_BD(j + 1)) {
601 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
603 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
604 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
608 /* Tx */
609 for_each_queue(bp, i) {
610 struct bnx2x_fastpath *fp = &bp->fp[i];
612 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
613 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
614 for (j = start; j != end; j = TX_BD(j + 1)) {
615 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
617 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
618 i, j, sw_bd->skb, sw_bd->first_bd);
621 start = TX_BD(fp->tx_bd_cons - 10);
622 end = TX_BD(fp->tx_bd_cons + 254);
623 for (j = start; j != end; j = TX_BD(j + 1)) {
624 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
626 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
627 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
631 bnx2x_fw_dump(bp);
632 bnx2x_mc_assert(bp);
633 BNX2X_ERR("end crash dump -----------------\n");
636 void bnx2x_int_enable(struct bnx2x *bp)
638 int port = BP_PORT(bp);
639 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
640 u32 val = REG_RD(bp, addr);
641 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
642 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
644 if (msix) {
645 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_INT_LINE_EN_0);
647 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
648 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
649 } else if (msi) {
650 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
651 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
652 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
653 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
654 } else {
655 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
657 HC_CONFIG_0_REG_INT_LINE_EN_0 |
658 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
660 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
661 val, port, addr);
663 REG_WR(bp, addr, val);
665 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
668 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
669 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
671 REG_WR(bp, addr, val);
673 * Ensure that HC_CONFIG is written before leading/trailing edge config
675 mmiowb();
676 barrier();
678 if (CHIP_IS_E1H(bp)) {
679 /* init leading/trailing edge */
680 if (IS_E1HMF(bp)) {
681 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
682 if (bp->port.pmf)
683 /* enable nig and gpio3 attention */
684 val |= 0x1100;
685 } else
686 val = 0xffff;
688 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
689 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
692 /* Make sure that interrupts are indeed enabled from here on */
693 mmiowb();
696 static void bnx2x_int_disable(struct bnx2x *bp)
698 int port = BP_PORT(bp);
699 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
700 u32 val = REG_RD(bp, addr);
702 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
703 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
704 HC_CONFIG_0_REG_INT_LINE_EN_0 |
705 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
707 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
708 val, port, addr);
710 /* flush all outstanding writes */
711 mmiowb();
713 REG_WR(bp, addr, val);
714 if (REG_RD(bp, addr) != val)
715 BNX2X_ERR("BUG! proper val not read from IGU!\n");
718 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
720 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
721 int i, offset;
723 /* disable interrupt handling */
724 atomic_inc(&bp->intr_sem);
725 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
727 if (disable_hw)
728 /* prevent the HW from sending interrupts */
729 bnx2x_int_disable(bp);
731 /* make sure all ISRs are done */
732 if (msix) {
733 synchronize_irq(bp->msix_table[0].vector);
734 offset = 1;
735 #ifdef BCM_CNIC
736 offset++;
737 #endif
738 for_each_queue(bp, i)
739 synchronize_irq(bp->msix_table[i + offset].vector);
740 } else
741 synchronize_irq(bp->pdev->irq);
743 /* make sure sp_task is not running */
744 cancel_delayed_work(&bp->sp_task);
745 flush_workqueue(bnx2x_wq);
748 /* fast path */
751 * General service functions
754 /* Return true if succeeded to acquire the lock */
755 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
757 u32 lock_status;
758 u32 resource_bit = (1 << resource);
759 int func = BP_FUNC(bp);
760 u32 hw_lock_control_reg;
762 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
764 /* Validating that the resource is within range */
765 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
766 DP(NETIF_MSG_HW,
767 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
768 resource, HW_LOCK_MAX_RESOURCE_VALUE);
769 return -EINVAL;
772 if (func <= 5)
773 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
774 else
775 hw_lock_control_reg =
776 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
778 /* Try to acquire the lock */
779 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
780 lock_status = REG_RD(bp, hw_lock_control_reg);
781 if (lock_status & resource_bit)
782 return true;
784 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
785 return false;
789 #ifdef BCM_CNIC
790 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
791 #endif
793 void bnx2x_sp_event(struct bnx2x_fastpath *fp,
794 union eth_rx_cqe *rr_cqe)
796 struct bnx2x *bp = fp->bp;
797 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
798 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
800 DP(BNX2X_MSG_SP,
801 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
802 fp->index, cid, command, bp->state,
803 rr_cqe->ramrod_cqe.ramrod_type);
805 bp->spq_left++;
807 if (fp->index) {
808 switch (command | fp->state) {
809 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
810 BNX2X_FP_STATE_OPENING):
811 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
812 cid);
813 fp->state = BNX2X_FP_STATE_OPEN;
814 break;
816 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
817 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
818 cid);
819 fp->state = BNX2X_FP_STATE_HALTED;
820 break;
822 default:
823 BNX2X_ERR("unexpected MC reply (%d) "
824 "fp[%d] state is %x\n",
825 command, fp->index, fp->state);
826 break;
828 mb(); /* force bnx2x_wait_ramrod() to see the change */
829 return;
832 switch (command | bp->state) {
833 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
834 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
835 bp->state = BNX2X_STATE_OPEN;
836 break;
838 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
839 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
840 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
841 fp->state = BNX2X_FP_STATE_HALTED;
842 break;
844 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
845 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
846 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
847 break;
849 #ifdef BCM_CNIC
850 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
851 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
852 bnx2x_cnic_cfc_comp(bp, cid);
853 break;
854 #endif
856 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
857 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
858 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
859 bp->set_mac_pending--;
860 smp_wmb();
861 break;
863 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
864 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
865 bp->set_mac_pending--;
866 smp_wmb();
867 break;
869 default:
870 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
871 command, bp->state);
872 break;
874 mb(); /* force bnx2x_wait_ramrod() to see the change */
877 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
879 struct bnx2x *bp = netdev_priv(dev_instance);
880 u16 status = bnx2x_ack_int(bp);
881 u16 mask;
882 int i;
884 /* Return here if interrupt is shared and it's not for us */
885 if (unlikely(status == 0)) {
886 DP(NETIF_MSG_INTR, "not our interrupt!\n");
887 return IRQ_NONE;
889 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
891 /* Return here if interrupt is disabled */
892 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
893 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
894 return IRQ_HANDLED;
897 #ifdef BNX2X_STOP_ON_ERROR
898 if (unlikely(bp->panic))
899 return IRQ_HANDLED;
900 #endif
902 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
903 struct bnx2x_fastpath *fp = &bp->fp[i];
905 mask = 0x2 << fp->sb_id;
906 if (status & mask) {
907 /* Handle Rx and Tx according to SB id */
908 prefetch(fp->rx_cons_sb);
909 prefetch(&fp->status_blk->u_status_block.
910 status_block_index);
911 prefetch(fp->tx_cons_sb);
912 prefetch(&fp->status_blk->c_status_block.
913 status_block_index);
914 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
915 status &= ~mask;
919 #ifdef BCM_CNIC
920 mask = 0x2 << CNIC_SB_ID(bp);
921 if (status & (mask | 0x1)) {
922 struct cnic_ops *c_ops = NULL;
924 rcu_read_lock();
925 c_ops = rcu_dereference(bp->cnic_ops);
926 if (c_ops)
927 c_ops->cnic_handler(bp->cnic_data, NULL);
928 rcu_read_unlock();
930 status &= ~mask;
932 #endif
934 if (unlikely(status & 0x1)) {
935 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
937 status &= ~0x1;
938 if (!status)
939 return IRQ_HANDLED;
942 if (unlikely(status))
943 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
944 status);
946 return IRQ_HANDLED;
949 /* end of fast path */
952 /* Link */
955 * General service functions
958 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
960 u32 lock_status;
961 u32 resource_bit = (1 << resource);
962 int func = BP_FUNC(bp);
963 u32 hw_lock_control_reg;
964 int cnt;
966 /* Validating that the resource is within range */
967 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
968 DP(NETIF_MSG_HW,
969 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
970 resource, HW_LOCK_MAX_RESOURCE_VALUE);
971 return -EINVAL;
974 if (func <= 5) {
975 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
976 } else {
977 hw_lock_control_reg =
978 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
981 /* Validating that the resource is not already taken */
982 lock_status = REG_RD(bp, hw_lock_control_reg);
983 if (lock_status & resource_bit) {
984 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
985 lock_status, resource_bit);
986 return -EEXIST;
989 /* Try for 5 second every 5ms */
990 for (cnt = 0; cnt < 1000; cnt++) {
991 /* Try to acquire the lock */
992 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
993 lock_status = REG_RD(bp, hw_lock_control_reg);
994 if (lock_status & resource_bit)
995 return 0;
997 msleep(5);
999 DP(NETIF_MSG_HW, "Timeout\n");
1000 return -EAGAIN;
1003 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1005 u32 lock_status;
1006 u32 resource_bit = (1 << resource);
1007 int func = BP_FUNC(bp);
1008 u32 hw_lock_control_reg;
1010 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1012 /* Validating that the resource is within range */
1013 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1014 DP(NETIF_MSG_HW,
1015 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1016 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1017 return -EINVAL;
1020 if (func <= 5) {
1021 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1022 } else {
1023 hw_lock_control_reg =
1024 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1027 /* Validating that the resource is currently taken */
1028 lock_status = REG_RD(bp, hw_lock_control_reg);
1029 if (!(lock_status & resource_bit)) {
1030 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1031 lock_status, resource_bit);
1032 return -EFAULT;
1035 REG_WR(bp, hw_lock_control_reg, resource_bit);
1036 return 0;
1040 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1042 /* The GPIO should be swapped if swap register is set and active */
1043 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1044 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1045 int gpio_shift = gpio_num +
1046 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1047 u32 gpio_mask = (1 << gpio_shift);
1048 u32 gpio_reg;
1049 int value;
1051 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1052 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1053 return -EINVAL;
1056 /* read GPIO value */
1057 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1059 /* get the requested pin value */
1060 if ((gpio_reg & gpio_mask) == gpio_mask)
1061 value = 1;
1062 else
1063 value = 0;
1065 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1067 return value;
1070 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1072 /* The GPIO should be swapped if swap register is set and active */
1073 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1074 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1075 int gpio_shift = gpio_num +
1076 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1077 u32 gpio_mask = (1 << gpio_shift);
1078 u32 gpio_reg;
1080 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1081 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1082 return -EINVAL;
1085 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1086 /* read GPIO and mask except the float bits */
1087 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1089 switch (mode) {
1090 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1091 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1092 gpio_num, gpio_shift);
1093 /* clear FLOAT and set CLR */
1094 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1095 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1096 break;
1098 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1099 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1100 gpio_num, gpio_shift);
1101 /* clear FLOAT and set SET */
1102 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1103 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1104 break;
1106 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1107 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1108 gpio_num, gpio_shift);
1109 /* set FLOAT */
1110 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1111 break;
1113 default:
1114 break;
1117 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1118 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1120 return 0;
1123 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1125 /* The GPIO should be swapped if swap register is set and active */
1126 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1127 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1128 int gpio_shift = gpio_num +
1129 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1130 u32 gpio_mask = (1 << gpio_shift);
1131 u32 gpio_reg;
1133 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1134 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1135 return -EINVAL;
1138 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1139 /* read GPIO int */
1140 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1142 switch (mode) {
1143 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1144 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1145 "output low\n", gpio_num, gpio_shift);
1146 /* clear SET and set CLR */
1147 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1148 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1149 break;
1151 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1152 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1153 "output high\n", gpio_num, gpio_shift);
1154 /* clear CLR and set SET */
1155 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1156 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1157 break;
1159 default:
1160 break;
1163 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1164 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1166 return 0;
1169 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1171 u32 spio_mask = (1 << spio_num);
1172 u32 spio_reg;
1174 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1175 (spio_num > MISC_REGISTERS_SPIO_7)) {
1176 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1177 return -EINVAL;
1180 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1181 /* read SPIO and mask except the float bits */
1182 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1184 switch (mode) {
1185 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1186 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1187 /* clear FLOAT and set CLR */
1188 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1189 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1190 break;
1192 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1193 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1194 /* clear FLOAT and set SET */
1195 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1196 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1197 break;
1199 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1200 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1201 /* set FLOAT */
1202 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1203 break;
1205 default:
1206 break;
1209 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1210 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1212 return 0;
1215 void bnx2x_calc_fc_adv(struct bnx2x *bp)
1217 switch (bp->link_vars.ieee_fc &
1218 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1219 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1220 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1221 ADVERTISED_Pause);
1222 break;
1224 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1225 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1226 ADVERTISED_Pause);
1227 break;
1229 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1230 bp->port.advertising |= ADVERTISED_Asym_Pause;
1231 break;
1233 default:
1234 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1235 ADVERTISED_Pause);
1236 break;
1241 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1243 if (!BP_NOMCP(bp)) {
1244 u8 rc;
1246 /* Initialize link parameters structure variables */
1247 /* It is recommended to turn off RX FC for jumbo frames
1248 for better performance */
1249 if (bp->dev->mtu > 5000)
1250 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1251 else
1252 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1254 bnx2x_acquire_phy_lock(bp);
1256 if (load_mode == LOAD_DIAG)
1257 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
1259 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1261 bnx2x_release_phy_lock(bp);
1263 bnx2x_calc_fc_adv(bp);
1265 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1266 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1267 bnx2x_link_report(bp);
1270 return rc;
1272 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1273 return -EINVAL;
1276 void bnx2x_link_set(struct bnx2x *bp)
1278 if (!BP_NOMCP(bp)) {
1279 bnx2x_acquire_phy_lock(bp);
1280 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1281 bnx2x_release_phy_lock(bp);
1283 bnx2x_calc_fc_adv(bp);
1284 } else
1285 BNX2X_ERR("Bootcode is missing - can not set link\n");
1288 static void bnx2x__link_reset(struct bnx2x *bp)
1290 if (!BP_NOMCP(bp)) {
1291 bnx2x_acquire_phy_lock(bp);
1292 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1293 bnx2x_release_phy_lock(bp);
1294 } else
1295 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1298 u8 bnx2x_link_test(struct bnx2x *bp)
1300 u8 rc = 0;
1302 if (!BP_NOMCP(bp)) {
1303 bnx2x_acquire_phy_lock(bp);
1304 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1305 bnx2x_release_phy_lock(bp);
1306 } else
1307 BNX2X_ERR("Bootcode is missing - can not test link\n");
1309 return rc;
1312 static void bnx2x_init_port_minmax(struct bnx2x *bp)
1314 u32 r_param = bp->link_vars.line_speed / 8;
1315 u32 fair_periodic_timeout_usec;
1316 u32 t_fair;
1318 memset(&(bp->cmng.rs_vars), 0,
1319 sizeof(struct rate_shaping_vars_per_port));
1320 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1322 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1323 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1325 /* this is the threshold below which no timer arming will occur
1326 1.25 coefficient is for the threshold to be a little bigger
1327 than the real time, to compensate for timer in-accuracy */
1328 bp->cmng.rs_vars.rs_threshold =
1329 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1331 /* resolution of fairness timer */
1332 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1333 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1334 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1336 /* this is the threshold below which we won't arm the timer anymore */
1337 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1339 /* we multiply by 1e3/8 to get bytes/msec.
1340 We don't want the credits to pass a credit
1341 of the t_fair*FAIR_MEM (algorithm resolution) */
1342 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1343 /* since each tick is 4 usec */
1344 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1347 /* Calculates the sum of vn_min_rates.
1348 It's needed for further normalizing of the min_rates.
1349 Returns:
1350 sum of vn_min_rates.
1352 0 - if all the min_rates are 0.
1353 In the later case fainess algorithm should be deactivated.
1354 If not all min_rates are zero then those that are zeroes will be set to 1.
1356 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1358 int all_zero = 1;
1359 int port = BP_PORT(bp);
1360 int vn;
1362 bp->vn_weight_sum = 0;
1363 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1364 int func = 2*vn + port;
1365 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1366 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1367 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1369 /* Skip hidden vns */
1370 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1371 continue;
1373 /* If min rate is zero - set it to 1 */
1374 if (!vn_min_rate)
1375 vn_min_rate = DEF_MIN_RATE;
1376 else
1377 all_zero = 0;
1379 bp->vn_weight_sum += vn_min_rate;
1382 /* ... only if all min rates are zeros - disable fairness */
1383 if (all_zero) {
1384 bp->cmng.flags.cmng_enables &=
1385 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1386 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1387 " fairness will be disabled\n");
1388 } else
1389 bp->cmng.flags.cmng_enables |=
1390 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1393 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1395 struct rate_shaping_vars_per_vn m_rs_vn;
1396 struct fairness_vars_per_vn m_fair_vn;
1397 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1398 u16 vn_min_rate, vn_max_rate;
1399 int i;
1401 /* If function is hidden - set min and max to zeroes */
1402 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1403 vn_min_rate = 0;
1404 vn_max_rate = 0;
1406 } else {
1407 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1408 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1409 /* If min rate is zero - set it to 1 */
1410 if (!vn_min_rate)
1411 vn_min_rate = DEF_MIN_RATE;
1412 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1413 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1415 DP(NETIF_MSG_IFUP,
1416 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
1417 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1419 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1420 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1422 /* global vn counter - maximal Mbps for this vn */
1423 m_rs_vn.vn_counter.rate = vn_max_rate;
1425 /* quota - number of bytes transmitted in this period */
1426 m_rs_vn.vn_counter.quota =
1427 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1429 if (bp->vn_weight_sum) {
1430 /* credit for each period of the fairness algorithm:
1431 number of bytes in T_FAIR (the vn share the port rate).
1432 vn_weight_sum should not be larger than 10000, thus
1433 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1434 than zero */
1435 m_fair_vn.vn_credit_delta =
1436 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1437 (8 * bp->vn_weight_sum))),
1438 (bp->cmng.fair_vars.fair_threshold * 2));
1439 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
1440 m_fair_vn.vn_credit_delta);
1443 /* Store it to internal memory */
1444 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1445 REG_WR(bp, BAR_XSTRORM_INTMEM +
1446 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1447 ((u32 *)(&m_rs_vn))[i]);
1449 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1450 REG_WR(bp, BAR_XSTRORM_INTMEM +
1451 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1452 ((u32 *)(&m_fair_vn))[i]);
1456 /* This function is called upon link interrupt */
1457 static void bnx2x_link_attn(struct bnx2x *bp)
1459 u32 prev_link_status = bp->link_vars.link_status;
1460 /* Make sure that we are synced with the current statistics */
1461 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1463 bnx2x_link_update(&bp->link_params, &bp->link_vars);
1465 if (bp->link_vars.link_up) {
1467 /* dropless flow control */
1468 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1469 int port = BP_PORT(bp);
1470 u32 pause_enabled = 0;
1472 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1473 pause_enabled = 1;
1475 REG_WR(bp, BAR_USTRORM_INTMEM +
1476 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1477 pause_enabled);
1480 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1481 struct host_port_stats *pstats;
1483 pstats = bnx2x_sp(bp, port_stats);
1484 /* reset old bmac stats */
1485 memset(&(pstats->mac_stx[0]), 0,
1486 sizeof(struct mac_stx));
1488 if (bp->state == BNX2X_STATE_OPEN)
1489 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1492 /* indicate link status only if link status actually changed */
1493 if (prev_link_status != bp->link_vars.link_status)
1494 bnx2x_link_report(bp);
1496 if (IS_E1HMF(bp)) {
1497 int port = BP_PORT(bp);
1498 int func;
1499 int vn;
1501 /* Set the attention towards other drivers on the same port */
1502 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1503 if (vn == BP_E1HVN(bp))
1504 continue;
1506 func = ((vn << 1) | port);
1507 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1508 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1511 if (bp->link_vars.link_up) {
1512 int i;
1514 /* Init rate shaping and fairness contexts */
1515 bnx2x_init_port_minmax(bp);
1517 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1518 bnx2x_init_vn_minmax(bp, 2*vn + port);
1520 /* Store it to internal memory */
1521 for (i = 0;
1522 i < sizeof(struct cmng_struct_per_port) / 4; i++)
1523 REG_WR(bp, BAR_XSTRORM_INTMEM +
1524 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1525 ((u32 *)(&bp->cmng))[i]);
1530 void bnx2x__link_status_update(struct bnx2x *bp)
1532 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
1533 return;
1535 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
1537 if (bp->link_vars.link_up)
1538 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1539 else
1540 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1542 bnx2x_calc_vn_weight_sum(bp);
1544 /* indicate link status */
1545 bnx2x_link_report(bp);
1548 static void bnx2x_pmf_update(struct bnx2x *bp)
1550 int port = BP_PORT(bp);
1551 u32 val;
1553 bp->port.pmf = 1;
1554 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1556 /* enable nig attention */
1557 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1558 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1559 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1561 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1564 /* end of Link */
1566 /* slow path */
1569 * General service functions
1572 /* send the MCP a request, block until there is a reply */
1573 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
1575 int func = BP_FUNC(bp);
1576 u32 seq = ++bp->fw_seq;
1577 u32 rc = 0;
1578 u32 cnt = 1;
1579 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1581 mutex_lock(&bp->fw_mb_mutex);
1582 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1583 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1585 do {
1586 /* let the FW do it's magic ... */
1587 msleep(delay);
1589 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
1591 /* Give the FW up to 5 second (500*10ms) */
1592 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
1594 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
1595 cnt*delay, rc, seq);
1597 /* is this a reply to our command? */
1598 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
1599 rc &= FW_MSG_CODE_MASK;
1600 else {
1601 /* FW BUG! */
1602 BNX2X_ERR("FW failed to respond!\n");
1603 bnx2x_fw_dump(bp);
1604 rc = 0;
1606 mutex_unlock(&bp->fw_mb_mutex);
1608 return rc;
1611 static void bnx2x_e1h_disable(struct bnx2x *bp)
1613 int port = BP_PORT(bp);
1615 netif_tx_disable(bp->dev);
1617 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
1619 netif_carrier_off(bp->dev);
1622 static void bnx2x_e1h_enable(struct bnx2x *bp)
1624 int port = BP_PORT(bp);
1626 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
1628 /* Tx queue should be only reenabled */
1629 netif_tx_wake_all_queues(bp->dev);
1632 * Should not call netif_carrier_on since it will be called if the link
1633 * is up when checking for link state
1637 static void bnx2x_update_min_max(struct bnx2x *bp)
1639 int port = BP_PORT(bp);
1640 int vn, i;
1642 /* Init rate shaping and fairness contexts */
1643 bnx2x_init_port_minmax(bp);
1645 bnx2x_calc_vn_weight_sum(bp);
1647 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1648 bnx2x_init_vn_minmax(bp, 2*vn + port);
1650 if (bp->port.pmf) {
1651 int func;
1653 /* Set the attention towards other drivers on the same port */
1654 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1655 if (vn == BP_E1HVN(bp))
1656 continue;
1658 func = ((vn << 1) | port);
1659 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1660 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1663 /* Store it to internal memory */
1664 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1665 REG_WR(bp, BAR_XSTRORM_INTMEM +
1666 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1667 ((u32 *)(&bp->cmng))[i]);
1671 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1673 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
1675 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
1678 * This is the only place besides the function initialization
1679 * where the bp->flags can change so it is done without any
1680 * locks
1682 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1683 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
1684 bp->flags |= MF_FUNC_DIS;
1686 bnx2x_e1h_disable(bp);
1687 } else {
1688 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
1689 bp->flags &= ~MF_FUNC_DIS;
1691 bnx2x_e1h_enable(bp);
1693 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
1695 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1697 bnx2x_update_min_max(bp);
1698 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1701 /* Report results to MCP */
1702 if (dcc_event)
1703 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
1704 else
1705 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
1708 /* must be called under the spq lock */
1709 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
1711 struct eth_spe *next_spe = bp->spq_prod_bd;
1713 if (bp->spq_prod_bd == bp->spq_last_bd) {
1714 bp->spq_prod_bd = bp->spq;
1715 bp->spq_prod_idx = 0;
1716 DP(NETIF_MSG_TIMER, "end of spq\n");
1717 } else {
1718 bp->spq_prod_bd++;
1719 bp->spq_prod_idx++;
1721 return next_spe;
1724 /* must be called under the spq lock */
1725 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1727 int func = BP_FUNC(bp);
1729 /* Make sure that BD data is updated before writing the producer */
1730 wmb();
1732 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1733 bp->spq_prod_idx);
1734 mmiowb();
1737 /* the slow path queue is odd since completions arrive on the fastpath ring */
1738 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1739 u32 data_hi, u32 data_lo, int common)
1741 struct eth_spe *spe;
1743 #ifdef BNX2X_STOP_ON_ERROR
1744 if (unlikely(bp->panic))
1745 return -EIO;
1746 #endif
1748 spin_lock_bh(&bp->spq_lock);
1750 if (!bp->spq_left) {
1751 BNX2X_ERR("BUG! SPQ ring full!\n");
1752 spin_unlock_bh(&bp->spq_lock);
1753 bnx2x_panic();
1754 return -EBUSY;
1757 spe = bnx2x_sp_get_next(bp);
1759 /* CID needs port number to be encoded int it */
1760 spe->hdr.conn_and_cmd_data =
1761 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1762 HW_CID(bp, cid));
1763 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
1764 if (common)
1765 spe->hdr.type |=
1766 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1768 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1769 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
1771 bp->spq_left--;
1773 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1774 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
1775 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1776 (u32)(U64_LO(bp->spq_mapping) +
1777 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1778 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1780 bnx2x_sp_prod_update(bp);
1781 spin_unlock_bh(&bp->spq_lock);
1782 return 0;
1785 /* acquire split MCP access lock register */
1786 static int bnx2x_acquire_alr(struct bnx2x *bp)
1788 u32 j, val;
1789 int rc = 0;
1791 might_sleep();
1792 for (j = 0; j < 1000; j++) {
1793 val = (1UL << 31);
1794 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1795 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1796 if (val & (1L << 31))
1797 break;
1799 msleep(5);
1801 if (!(val & (1L << 31))) {
1802 BNX2X_ERR("Cannot acquire MCP access lock register\n");
1803 rc = -EBUSY;
1806 return rc;
1809 /* release split MCP access lock register */
1810 static void bnx2x_release_alr(struct bnx2x *bp)
1812 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
1815 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1817 struct host_def_status_block *def_sb = bp->def_status_blk;
1818 u16 rc = 0;
1820 barrier(); /* status block is written to by the chip */
1821 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1822 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1823 rc |= 1;
1825 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1826 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1827 rc |= 2;
1829 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1830 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1831 rc |= 4;
1833 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1834 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1835 rc |= 8;
1837 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1838 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1839 rc |= 16;
1841 return rc;
1845 * slow path service functions
1848 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1850 int port = BP_PORT(bp);
1851 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
1852 COMMAND_REG_ATTN_BITS_SET);
1853 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1854 MISC_REG_AEU_MASK_ATTN_FUNC_0;
1855 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1856 NIG_REG_MASK_INTERRUPT_PORT0;
1857 u32 aeu_mask;
1858 u32 nig_mask = 0;
1860 if (bp->attn_state & asserted)
1861 BNX2X_ERR("IGU ERROR\n");
1863 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1864 aeu_mask = REG_RD(bp, aeu_addr);
1866 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
1867 aeu_mask, asserted);
1868 aeu_mask &= ~(asserted & 0x3ff);
1869 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
1871 REG_WR(bp, aeu_addr, aeu_mask);
1872 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1874 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
1875 bp->attn_state |= asserted;
1876 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
1878 if (asserted & ATTN_HARD_WIRED_MASK) {
1879 if (asserted & ATTN_NIG_FOR_FUNC) {
1881 bnx2x_acquire_phy_lock(bp);
1883 /* save nig interrupt mask */
1884 nig_mask = REG_RD(bp, nig_int_mask_addr);
1885 REG_WR(bp, nig_int_mask_addr, 0);
1887 bnx2x_link_attn(bp);
1889 /* handle unicore attn? */
1891 if (asserted & ATTN_SW_TIMER_4_FUNC)
1892 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1894 if (asserted & GPIO_2_FUNC)
1895 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1897 if (asserted & GPIO_3_FUNC)
1898 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1900 if (asserted & GPIO_4_FUNC)
1901 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1903 if (port == 0) {
1904 if (asserted & ATTN_GENERAL_ATTN_1) {
1905 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1906 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1908 if (asserted & ATTN_GENERAL_ATTN_2) {
1909 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1910 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1912 if (asserted & ATTN_GENERAL_ATTN_3) {
1913 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1914 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1916 } else {
1917 if (asserted & ATTN_GENERAL_ATTN_4) {
1918 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1919 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1921 if (asserted & ATTN_GENERAL_ATTN_5) {
1922 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1923 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1925 if (asserted & ATTN_GENERAL_ATTN_6) {
1926 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1927 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1931 } /* if hardwired */
1933 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
1934 asserted, hc_addr);
1935 REG_WR(bp, hc_addr, asserted);
1937 /* now set back the mask */
1938 if (asserted & ATTN_NIG_FOR_FUNC) {
1939 REG_WR(bp, nig_int_mask_addr, nig_mask);
1940 bnx2x_release_phy_lock(bp);
1944 static inline void bnx2x_fan_failure(struct bnx2x *bp)
1946 int port = BP_PORT(bp);
1948 /* mark the failure */
1949 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
1950 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
1951 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
1952 bp->link_params.ext_phy_config);
1954 /* log the failure */
1955 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
1956 " the driver to shutdown the card to prevent permanent"
1957 " damage. Please contact OEM Support for assistance\n");
1960 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
1962 int port = BP_PORT(bp);
1963 int reg_offset;
1964 u32 val, swap_val, swap_override;
1966 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
1967 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
1969 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
1971 val = REG_RD(bp, reg_offset);
1972 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
1973 REG_WR(bp, reg_offset, val);
1975 BNX2X_ERR("SPIO5 hw attention\n");
1977 /* Fan failure attention */
1978 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
1979 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
1980 /* Low power mode is controlled by GPIO 2 */
1981 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1982 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
1983 /* The PHY reset is controlled by GPIO 1 */
1984 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
1985 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
1986 break;
1988 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
1989 /* The PHY reset is controlled by GPIO 1 */
1990 /* fake the port number to cancel the swap done in
1991 set_gpio() */
1992 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
1993 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
1994 port = (swap_val && swap_override) ^ 1;
1995 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
1996 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
1997 break;
1999 default:
2000 break;
2002 bnx2x_fan_failure(bp);
2005 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2006 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2007 bnx2x_acquire_phy_lock(bp);
2008 bnx2x_handle_module_detect_int(&bp->link_params);
2009 bnx2x_release_phy_lock(bp);
2012 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2014 val = REG_RD(bp, reg_offset);
2015 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2016 REG_WR(bp, reg_offset, val);
2018 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2019 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2020 bnx2x_panic();
2024 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2026 u32 val;
2028 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2030 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2031 BNX2X_ERR("DB hw attention 0x%x\n", val);
2032 /* DORQ discard attention */
2033 if (val & 0x2)
2034 BNX2X_ERR("FATAL error from DORQ\n");
2037 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2039 int port = BP_PORT(bp);
2040 int reg_offset;
2042 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2043 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2045 val = REG_RD(bp, reg_offset);
2046 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2047 REG_WR(bp, reg_offset, val);
2049 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2050 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2051 bnx2x_panic();
2055 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2057 u32 val;
2059 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2061 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2062 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2063 /* CFC error attention */
2064 if (val & 0x2)
2065 BNX2X_ERR("FATAL error from CFC\n");
2068 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2070 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2071 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2072 /* RQ_USDMDP_FIFO_OVERFLOW */
2073 if (val & 0x18000)
2074 BNX2X_ERR("FATAL error from PXP\n");
2077 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2079 int port = BP_PORT(bp);
2080 int reg_offset;
2082 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2083 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2085 val = REG_RD(bp, reg_offset);
2086 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2087 REG_WR(bp, reg_offset, val);
2089 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2090 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
2091 bnx2x_panic();
2095 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2097 u32 val;
2099 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2101 if (attn & BNX2X_PMF_LINK_ASSERT) {
2102 int func = BP_FUNC(bp);
2104 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2105 bp->mf_config = SHMEM_RD(bp,
2106 mf_cfg.func_mf_config[func].config);
2107 val = SHMEM_RD(bp, func_mb[func].drv_status);
2108 if (val & DRV_STATUS_DCC_EVENT_MASK)
2109 bnx2x_dcc_event(bp,
2110 (val & DRV_STATUS_DCC_EVENT_MASK));
2111 bnx2x__link_status_update(bp);
2112 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
2113 bnx2x_pmf_update(bp);
2115 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2117 BNX2X_ERR("MC assert!\n");
2118 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2119 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2120 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2121 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2122 bnx2x_panic();
2124 } else if (attn & BNX2X_MCP_ASSERT) {
2126 BNX2X_ERR("MCP assert!\n");
2127 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2128 bnx2x_fw_dump(bp);
2130 } else
2131 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2134 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2135 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2136 if (attn & BNX2X_GRC_TIMEOUT) {
2137 val = CHIP_IS_E1H(bp) ?
2138 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2139 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2141 if (attn & BNX2X_GRC_RSV) {
2142 val = CHIP_IS_E1H(bp) ?
2143 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2144 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2146 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2150 #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
2151 #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
2152 #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2153 #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
2154 #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
2155 #define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2157 * should be run under rtnl lock
2159 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2161 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2162 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2163 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2164 barrier();
2165 mmiowb();
2169 * should be run under rtnl lock
2171 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2173 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2174 val |= (1 << 16);
2175 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2176 barrier();
2177 mmiowb();
2181 * should be run under rtnl lock
2183 bool bnx2x_reset_is_done(struct bnx2x *bp)
2185 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2186 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2187 return (val & RESET_DONE_FLAG_MASK) ? false : true;
2191 * should be run under rtnl lock
2193 inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
2195 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2197 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2199 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2200 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2201 barrier();
2202 mmiowb();
2206 * should be run under rtnl lock
2208 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
2210 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2212 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2214 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2215 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2216 barrier();
2217 mmiowb();
2219 return val1;
2223 * should be run under rtnl lock
2225 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2227 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2230 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2232 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2233 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2236 static inline void _print_next_block(int idx, const char *blk)
2238 if (idx)
2239 pr_cont(", ");
2240 pr_cont("%s", blk);
2243 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2245 int i = 0;
2246 u32 cur_bit = 0;
2247 for (i = 0; sig; i++) {
2248 cur_bit = ((u32)0x1 << i);
2249 if (sig & cur_bit) {
2250 switch (cur_bit) {
2251 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2252 _print_next_block(par_num++, "BRB");
2253 break;
2254 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2255 _print_next_block(par_num++, "PARSER");
2256 break;
2257 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2258 _print_next_block(par_num++, "TSDM");
2259 break;
2260 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2261 _print_next_block(par_num++, "SEARCHER");
2262 break;
2263 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2264 _print_next_block(par_num++, "TSEMI");
2265 break;
2268 /* Clear the bit */
2269 sig &= ~cur_bit;
2273 return par_num;
2276 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2278 int i = 0;
2279 u32 cur_bit = 0;
2280 for (i = 0; sig; i++) {
2281 cur_bit = ((u32)0x1 << i);
2282 if (sig & cur_bit) {
2283 switch (cur_bit) {
2284 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2285 _print_next_block(par_num++, "PBCLIENT");
2286 break;
2287 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2288 _print_next_block(par_num++, "QM");
2289 break;
2290 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2291 _print_next_block(par_num++, "XSDM");
2292 break;
2293 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2294 _print_next_block(par_num++, "XSEMI");
2295 break;
2296 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2297 _print_next_block(par_num++, "DOORBELLQ");
2298 break;
2299 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2300 _print_next_block(par_num++, "VAUX PCI CORE");
2301 break;
2302 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2303 _print_next_block(par_num++, "DEBUG");
2304 break;
2305 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2306 _print_next_block(par_num++, "USDM");
2307 break;
2308 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2309 _print_next_block(par_num++, "USEMI");
2310 break;
2311 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2312 _print_next_block(par_num++, "UPB");
2313 break;
2314 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2315 _print_next_block(par_num++, "CSDM");
2316 break;
2319 /* Clear the bit */
2320 sig &= ~cur_bit;
2324 return par_num;
2327 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
2329 int i = 0;
2330 u32 cur_bit = 0;
2331 for (i = 0; sig; i++) {
2332 cur_bit = ((u32)0x1 << i);
2333 if (sig & cur_bit) {
2334 switch (cur_bit) {
2335 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
2336 _print_next_block(par_num++, "CSEMI");
2337 break;
2338 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
2339 _print_next_block(par_num++, "PXP");
2340 break;
2341 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
2342 _print_next_block(par_num++,
2343 "PXPPCICLOCKCLIENT");
2344 break;
2345 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
2346 _print_next_block(par_num++, "CFC");
2347 break;
2348 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
2349 _print_next_block(par_num++, "CDU");
2350 break;
2351 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
2352 _print_next_block(par_num++, "IGU");
2353 break;
2354 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
2355 _print_next_block(par_num++, "MISC");
2356 break;
2359 /* Clear the bit */
2360 sig &= ~cur_bit;
2364 return par_num;
2367 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
2369 int i = 0;
2370 u32 cur_bit = 0;
2371 for (i = 0; sig; i++) {
2372 cur_bit = ((u32)0x1 << i);
2373 if (sig & cur_bit) {
2374 switch (cur_bit) {
2375 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
2376 _print_next_block(par_num++, "MCP ROM");
2377 break;
2378 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
2379 _print_next_block(par_num++, "MCP UMP RX");
2380 break;
2381 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
2382 _print_next_block(par_num++, "MCP UMP TX");
2383 break;
2384 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
2385 _print_next_block(par_num++, "MCP SCPAD");
2386 break;
2389 /* Clear the bit */
2390 sig &= ~cur_bit;
2394 return par_num;
2397 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
2398 u32 sig2, u32 sig3)
2400 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
2401 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
2402 int par_num = 0;
2403 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
2404 "[0]:0x%08x [1]:0x%08x "
2405 "[2]:0x%08x [3]:0x%08x\n",
2406 sig0 & HW_PRTY_ASSERT_SET_0,
2407 sig1 & HW_PRTY_ASSERT_SET_1,
2408 sig2 & HW_PRTY_ASSERT_SET_2,
2409 sig3 & HW_PRTY_ASSERT_SET_3);
2410 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
2411 bp->dev->name);
2412 par_num = bnx2x_print_blocks_with_parity0(
2413 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
2414 par_num = bnx2x_print_blocks_with_parity1(
2415 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
2416 par_num = bnx2x_print_blocks_with_parity2(
2417 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
2418 par_num = bnx2x_print_blocks_with_parity3(
2419 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
2420 printk("\n");
2421 return true;
2422 } else
2423 return false;
2426 bool bnx2x_chk_parity_attn(struct bnx2x *bp)
2428 struct attn_route attn;
2429 int port = BP_PORT(bp);
2431 attn.sig[0] = REG_RD(bp,
2432 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
2433 port*4);
2434 attn.sig[1] = REG_RD(bp,
2435 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
2436 port*4);
2437 attn.sig[2] = REG_RD(bp,
2438 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
2439 port*4);
2440 attn.sig[3] = REG_RD(bp,
2441 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
2442 port*4);
2444 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
2445 attn.sig[3]);
2448 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2450 struct attn_route attn, *group_mask;
2451 int port = BP_PORT(bp);
2452 int index;
2453 u32 reg_addr;
2454 u32 val;
2455 u32 aeu_mask;
2457 /* need to take HW lock because MCP or other port might also
2458 try to handle this event */
2459 bnx2x_acquire_alr(bp);
2461 if (bnx2x_chk_parity_attn(bp)) {
2462 bp->recovery_state = BNX2X_RECOVERY_INIT;
2463 bnx2x_set_reset_in_progress(bp);
2464 schedule_delayed_work(&bp->reset_task, 0);
2465 /* Disable HW interrupts */
2466 bnx2x_int_disable(bp);
2467 bnx2x_release_alr(bp);
2468 /* In case of parity errors don't handle attentions so that
2469 * other function would "see" parity errors.
2471 return;
2474 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2475 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2476 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2477 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2478 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2479 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2481 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2482 if (deasserted & (1 << index)) {
2483 group_mask = &bp->attn_group[index];
2485 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2486 index, group_mask->sig[0], group_mask->sig[1],
2487 group_mask->sig[2], group_mask->sig[3]);
2489 bnx2x_attn_int_deasserted3(bp,
2490 attn.sig[3] & group_mask->sig[3]);
2491 bnx2x_attn_int_deasserted1(bp,
2492 attn.sig[1] & group_mask->sig[1]);
2493 bnx2x_attn_int_deasserted2(bp,
2494 attn.sig[2] & group_mask->sig[2]);
2495 bnx2x_attn_int_deasserted0(bp,
2496 attn.sig[0] & group_mask->sig[0]);
2500 bnx2x_release_alr(bp);
2502 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2504 val = ~deasserted;
2505 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2506 val, reg_addr);
2507 REG_WR(bp, reg_addr, val);
2509 if (~bp->attn_state & deasserted)
2510 BNX2X_ERR("IGU ERROR\n");
2512 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2513 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2515 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2516 aeu_mask = REG_RD(bp, reg_addr);
2518 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2519 aeu_mask, deasserted);
2520 aeu_mask |= (deasserted & 0x3ff);
2521 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2523 REG_WR(bp, reg_addr, aeu_mask);
2524 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2526 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2527 bp->attn_state &= ~deasserted;
2528 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2531 static void bnx2x_attn_int(struct bnx2x *bp)
2533 /* read local copy of bits */
2534 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2535 attn_bits);
2536 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2537 attn_bits_ack);
2538 u32 attn_state = bp->attn_state;
2540 /* look for changed bits */
2541 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2542 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2544 DP(NETIF_MSG_HW,
2545 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2546 attn_bits, attn_ack, asserted, deasserted);
2548 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2549 BNX2X_ERR("BAD attention state\n");
2551 /* handle bits that were raised */
2552 if (asserted)
2553 bnx2x_attn_int_asserted(bp, asserted);
2555 if (deasserted)
2556 bnx2x_attn_int_deasserted(bp, deasserted);
2559 static void bnx2x_sp_task(struct work_struct *work)
2561 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2562 u16 status;
2564 /* Return here if interrupt is disabled */
2565 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2566 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2567 return;
2570 status = bnx2x_update_dsb_idx(bp);
2571 /* if (status == 0) */
2572 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2574 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
2576 /* HW attentions */
2577 if (status & 0x1) {
2578 bnx2x_attn_int(bp);
2579 status &= ~0x1;
2582 /* CStorm events: STAT_QUERY */
2583 if (status & 0x2) {
2584 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
2585 status &= ~0x2;
2588 if (unlikely(status))
2589 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2590 status);
2592 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2593 IGU_INT_NOP, 1);
2594 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2595 IGU_INT_NOP, 1);
2596 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2597 IGU_INT_NOP, 1);
2598 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2599 IGU_INT_NOP, 1);
2600 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2601 IGU_INT_ENABLE, 1);
2604 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2606 struct net_device *dev = dev_instance;
2607 struct bnx2x *bp = netdev_priv(dev);
2609 /* Return here if interrupt is disabled */
2610 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2611 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2612 return IRQ_HANDLED;
2615 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2617 #ifdef BNX2X_STOP_ON_ERROR
2618 if (unlikely(bp->panic))
2619 return IRQ_HANDLED;
2620 #endif
2622 #ifdef BCM_CNIC
2624 struct cnic_ops *c_ops;
2626 rcu_read_lock();
2627 c_ops = rcu_dereference(bp->cnic_ops);
2628 if (c_ops)
2629 c_ops->cnic_handler(bp->cnic_data, NULL);
2630 rcu_read_unlock();
2632 #endif
2633 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2635 return IRQ_HANDLED;
2638 /* end of slow path */
2640 static void bnx2x_timer(unsigned long data)
2642 struct bnx2x *bp = (struct bnx2x *) data;
2644 if (!netif_running(bp->dev))
2645 return;
2647 if (atomic_read(&bp->intr_sem) != 0)
2648 goto timer_restart;
2650 if (poll) {
2651 struct bnx2x_fastpath *fp = &bp->fp[0];
2652 int rc;
2654 bnx2x_tx_int(fp);
2655 rc = bnx2x_rx_int(fp, 1000);
2658 if (!BP_NOMCP(bp)) {
2659 int func = BP_FUNC(bp);
2660 u32 drv_pulse;
2661 u32 mcp_pulse;
2663 ++bp->fw_drv_pulse_wr_seq;
2664 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
2665 /* TBD - add SYSTEM_TIME */
2666 drv_pulse = bp->fw_drv_pulse_wr_seq;
2667 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
2669 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
2670 MCP_PULSE_SEQ_MASK);
2671 /* The delta between driver pulse and mcp response
2672 * should be 1 (before mcp response) or 0 (after mcp response)
2674 if ((drv_pulse != mcp_pulse) &&
2675 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
2676 /* someone lost a heartbeat... */
2677 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2678 drv_pulse, mcp_pulse);
2682 if (bp->state == BNX2X_STATE_OPEN)
2683 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
2685 timer_restart:
2686 mod_timer(&bp->timer, jiffies + bp->current_interval);
2689 /* end of Statistics */
2691 /* nic init */
2694 * nic init service functions
2697 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
2699 int port = BP_PORT(bp);
2701 /* "CSTORM" */
2702 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2703 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
2704 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
2705 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2706 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
2707 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
2710 void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
2711 dma_addr_t mapping, int sb_id)
2713 int port = BP_PORT(bp);
2714 int func = BP_FUNC(bp);
2715 int index;
2716 u64 section;
2718 /* USTORM */
2719 section = ((u64)mapping) + offsetof(struct host_status_block,
2720 u_status_block);
2721 sb->u_status_block.status_block_id = sb_id;
2723 REG_WR(bp, BAR_CSTRORM_INTMEM +
2724 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
2725 REG_WR(bp, BAR_CSTRORM_INTMEM +
2726 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
2727 U64_HI(section));
2728 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
2729 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
2731 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
2732 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2733 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
2735 /* CSTORM */
2736 section = ((u64)mapping) + offsetof(struct host_status_block,
2737 c_status_block);
2738 sb->c_status_block.status_block_id = sb_id;
2740 REG_WR(bp, BAR_CSTRORM_INTMEM +
2741 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
2742 REG_WR(bp, BAR_CSTRORM_INTMEM +
2743 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
2744 U64_HI(section));
2745 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
2746 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
2748 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
2749 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2750 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
2752 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2755 static void bnx2x_zero_def_sb(struct bnx2x *bp)
2757 int func = BP_FUNC(bp);
2759 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
2760 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2761 sizeof(struct tstorm_def_status_block)/4);
2762 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2763 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
2764 sizeof(struct cstorm_def_status_block_u)/4);
2765 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2766 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
2767 sizeof(struct cstorm_def_status_block_c)/4);
2768 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
2769 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2770 sizeof(struct xstorm_def_status_block)/4);
2773 static void bnx2x_init_def_sb(struct bnx2x *bp,
2774 struct host_def_status_block *def_sb,
2775 dma_addr_t mapping, int sb_id)
2777 int port = BP_PORT(bp);
2778 int func = BP_FUNC(bp);
2779 int index, val, reg_offset;
2780 u64 section;
2782 /* ATTN */
2783 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2784 atten_status_block);
2785 def_sb->atten_status_block.status_block_id = sb_id;
2787 bp->attn_state = 0;
2789 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2790 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2792 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2793 bp->attn_group[index].sig[0] = REG_RD(bp,
2794 reg_offset + 0x10*index);
2795 bp->attn_group[index].sig[1] = REG_RD(bp,
2796 reg_offset + 0x4 + 0x10*index);
2797 bp->attn_group[index].sig[2] = REG_RD(bp,
2798 reg_offset + 0x8 + 0x10*index);
2799 bp->attn_group[index].sig[3] = REG_RD(bp,
2800 reg_offset + 0xc + 0x10*index);
2803 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
2804 HC_REG_ATTN_MSG0_ADDR_L);
2806 REG_WR(bp, reg_offset, U64_LO(section));
2807 REG_WR(bp, reg_offset + 4, U64_HI(section));
2809 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
2811 val = REG_RD(bp, reg_offset);
2812 val |= sb_id;
2813 REG_WR(bp, reg_offset, val);
2815 /* USTORM */
2816 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2817 u_def_status_block);
2818 def_sb->u_def_status_block.status_block_id = sb_id;
2820 REG_WR(bp, BAR_CSTRORM_INTMEM +
2821 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
2822 REG_WR(bp, BAR_CSTRORM_INTMEM +
2823 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
2824 U64_HI(section));
2825 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
2826 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
2828 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
2829 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2830 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
2832 /* CSTORM */
2833 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2834 c_def_status_block);
2835 def_sb->c_def_status_block.status_block_id = sb_id;
2837 REG_WR(bp, BAR_CSTRORM_INTMEM +
2838 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
2839 REG_WR(bp, BAR_CSTRORM_INTMEM +
2840 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
2841 U64_HI(section));
2842 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
2843 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
2845 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2846 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2847 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
2849 /* TSTORM */
2850 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2851 t_def_status_block);
2852 def_sb->t_def_status_block.status_block_id = sb_id;
2854 REG_WR(bp, BAR_TSTRORM_INTMEM +
2855 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2856 REG_WR(bp, BAR_TSTRORM_INTMEM +
2857 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2858 U64_HI(section));
2859 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
2860 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2862 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2863 REG_WR16(bp, BAR_TSTRORM_INTMEM +
2864 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2866 /* XSTORM */
2867 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2868 x_def_status_block);
2869 def_sb->x_def_status_block.status_block_id = sb_id;
2871 REG_WR(bp, BAR_XSTRORM_INTMEM +
2872 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2873 REG_WR(bp, BAR_XSTRORM_INTMEM +
2874 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2875 U64_HI(section));
2876 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
2877 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2879 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2880 REG_WR16(bp, BAR_XSTRORM_INTMEM +
2881 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2883 bp->stats_pending = 0;
2884 bp->set_mac_pending = 0;
2886 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2889 void bnx2x_update_coalesce(struct bnx2x *bp)
2891 int port = BP_PORT(bp);
2892 int i;
2894 for_each_queue(bp, i) {
2895 int sb_id = bp->fp[i].sb_id;
2897 /* HC_INDEX_U_ETH_RX_CQ_CONS */
2898 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2899 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
2900 U_SB_ETH_RX_CQ_INDEX),
2901 bp->rx_ticks/(4 * BNX2X_BTR));
2902 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2903 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
2904 U_SB_ETH_RX_CQ_INDEX),
2905 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2907 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2908 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2909 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
2910 C_SB_ETH_TX_CQ_INDEX),
2911 bp->tx_ticks/(4 * BNX2X_BTR));
2912 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2913 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
2914 C_SB_ETH_TX_CQ_INDEX),
2915 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2919 static void bnx2x_init_sp_ring(struct bnx2x *bp)
2921 int func = BP_FUNC(bp);
2923 spin_lock_init(&bp->spq_lock);
2925 bp->spq_left = MAX_SPQ_PENDING;
2926 bp->spq_prod_idx = 0;
2927 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
2928 bp->spq_prod_bd = bp->spq;
2929 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
2931 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
2932 U64_LO(bp->spq_mapping));
2933 REG_WR(bp,
2934 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
2935 U64_HI(bp->spq_mapping));
2937 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
2938 bp->spq_prod_idx);
2941 static void bnx2x_init_context(struct bnx2x *bp)
2943 int i;
2945 /* Rx */
2946 for_each_queue(bp, i) {
2947 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
2948 struct bnx2x_fastpath *fp = &bp->fp[i];
2949 u8 cl_id = fp->cl_id;
2951 context->ustorm_st_context.common.sb_index_numbers =
2952 BNX2X_RX_SB_INDEX_NUM;
2953 context->ustorm_st_context.common.clientId = cl_id;
2954 context->ustorm_st_context.common.status_block_id = fp->sb_id;
2955 context->ustorm_st_context.common.flags =
2956 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
2957 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
2958 context->ustorm_st_context.common.statistics_counter_id =
2959 cl_id;
2960 context->ustorm_st_context.common.mc_alignment_log_size =
2961 BNX2X_RX_ALIGN_SHIFT;
2962 context->ustorm_st_context.common.bd_buff_size =
2963 bp->rx_buf_size;
2964 context->ustorm_st_context.common.bd_page_base_hi =
2965 U64_HI(fp->rx_desc_mapping);
2966 context->ustorm_st_context.common.bd_page_base_lo =
2967 U64_LO(fp->rx_desc_mapping);
2968 if (!fp->disable_tpa) {
2969 context->ustorm_st_context.common.flags |=
2970 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
2971 context->ustorm_st_context.common.sge_buff_size =
2972 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
2973 0xffff);
2974 context->ustorm_st_context.common.sge_page_base_hi =
2975 U64_HI(fp->rx_sge_mapping);
2976 context->ustorm_st_context.common.sge_page_base_lo =
2977 U64_LO(fp->rx_sge_mapping);
2979 context->ustorm_st_context.common.max_sges_for_packet =
2980 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
2981 context->ustorm_st_context.common.max_sges_for_packet =
2982 ((context->ustorm_st_context.common.
2983 max_sges_for_packet + PAGES_PER_SGE - 1) &
2984 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
2987 context->ustorm_ag_context.cdu_usage =
2988 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
2989 CDU_REGION_NUMBER_UCM_AG,
2990 ETH_CONNECTION_TYPE);
2992 context->xstorm_ag_context.cdu_reserved =
2993 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
2994 CDU_REGION_NUMBER_XCM_AG,
2995 ETH_CONNECTION_TYPE);
2998 /* Tx */
2999 for_each_queue(bp, i) {
3000 struct bnx2x_fastpath *fp = &bp->fp[i];
3001 struct eth_context *context =
3002 bnx2x_sp(bp, context[i].eth);
3004 context->cstorm_st_context.sb_index_number =
3005 C_SB_ETH_TX_CQ_INDEX;
3006 context->cstorm_st_context.status_block_id = fp->sb_id;
3008 context->xstorm_st_context.tx_bd_page_base_hi =
3009 U64_HI(fp->tx_desc_mapping);
3010 context->xstorm_st_context.tx_bd_page_base_lo =
3011 U64_LO(fp->tx_desc_mapping);
3012 context->xstorm_st_context.statistics_data = (fp->cl_id |
3013 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
3017 static void bnx2x_init_ind_table(struct bnx2x *bp)
3019 int func = BP_FUNC(bp);
3020 int i;
3022 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
3023 return;
3025 DP(NETIF_MSG_IFUP,
3026 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
3027 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
3028 REG_WR8(bp, BAR_TSTRORM_INTMEM +
3029 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
3030 bp->fp->cl_id + (i % bp->num_queues));
3033 void bnx2x_set_client_config(struct bnx2x *bp)
3035 struct tstorm_eth_client_config tstorm_client = {0};
3036 int port = BP_PORT(bp);
3037 int i;
3039 tstorm_client.mtu = bp->dev->mtu;
3040 tstorm_client.config_flags =
3041 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
3042 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
3043 #ifdef BCM_VLAN
3044 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
3045 tstorm_client.config_flags |=
3046 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
3047 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3049 #endif
3051 for_each_queue(bp, i) {
3052 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
3054 REG_WR(bp, BAR_TSTRORM_INTMEM +
3055 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
3056 ((u32 *)&tstorm_client)[0]);
3057 REG_WR(bp, BAR_TSTRORM_INTMEM +
3058 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
3059 ((u32 *)&tstorm_client)[1]);
3062 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
3063 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
3066 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3068 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
3069 int mode = bp->rx_mode;
3070 int mask = bp->rx_mode_cl_mask;
3071 int func = BP_FUNC(bp);
3072 int port = BP_PORT(bp);
3073 int i;
3074 /* All but management unicast packets should pass to the host as well */
3075 u32 llh_mask =
3076 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
3077 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
3078 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3079 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
3081 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
3083 switch (mode) {
3084 case BNX2X_RX_MODE_NONE: /* no Rx */
3085 tstorm_mac_filter.ucast_drop_all = mask;
3086 tstorm_mac_filter.mcast_drop_all = mask;
3087 tstorm_mac_filter.bcast_drop_all = mask;
3088 break;
3090 case BNX2X_RX_MODE_NORMAL:
3091 tstorm_mac_filter.bcast_accept_all = mask;
3092 break;
3094 case BNX2X_RX_MODE_ALLMULTI:
3095 tstorm_mac_filter.mcast_accept_all = mask;
3096 tstorm_mac_filter.bcast_accept_all = mask;
3097 break;
3099 case BNX2X_RX_MODE_PROMISC:
3100 tstorm_mac_filter.ucast_accept_all = mask;
3101 tstorm_mac_filter.mcast_accept_all = mask;
3102 tstorm_mac_filter.bcast_accept_all = mask;
3103 /* pass management unicast packets as well */
3104 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
3105 break;
3107 default:
3108 BNX2X_ERR("BAD rx mode (%d)\n", mode);
3109 break;
3112 REG_WR(bp,
3113 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
3114 llh_mask);
3116 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
3117 REG_WR(bp, BAR_TSTRORM_INTMEM +
3118 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
3119 ((u32 *)&tstorm_mac_filter)[i]);
3121 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
3122 ((u32 *)&tstorm_mac_filter)[i]); */
3125 if (mode != BNX2X_RX_MODE_NONE)
3126 bnx2x_set_client_config(bp);
3129 static void bnx2x_init_internal_common(struct bnx2x *bp)
3131 int i;
3133 /* Zero this manually as its initialization is
3134 currently missing in the initTool */
3135 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3136 REG_WR(bp, BAR_USTRORM_INTMEM +
3137 USTORM_AGG_DATA_OFFSET + i * 4, 0);
3140 static void bnx2x_init_internal_port(struct bnx2x *bp)
3142 int port = BP_PORT(bp);
3144 REG_WR(bp,
3145 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
3146 REG_WR(bp,
3147 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
3148 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3149 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3152 static void bnx2x_init_internal_func(struct bnx2x *bp)
3154 struct tstorm_eth_function_common_config tstorm_config = {0};
3155 struct stats_indication_flags stats_flags = {0};
3156 int port = BP_PORT(bp);
3157 int func = BP_FUNC(bp);
3158 int i, j;
3159 u32 offset;
3160 u16 max_agg_size;
3162 tstorm_config.config_flags = RSS_FLAGS(bp);
3164 if (is_multi(bp))
3165 tstorm_config.rss_result_mask = MULTI_MASK;
3167 /* Enable TPA if needed */
3168 if (bp->flags & TPA_ENABLE_FLAG)
3169 tstorm_config.config_flags |=
3170 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
3172 if (IS_E1HMF(bp))
3173 tstorm_config.config_flags |=
3174 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
3176 tstorm_config.leading_client_id = BP_L_ID(bp);
3178 REG_WR(bp, BAR_TSTRORM_INTMEM +
3179 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
3180 (*(u32 *)&tstorm_config));
3182 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
3183 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
3184 bnx2x_set_storm_rx_mode(bp);
3186 for_each_queue(bp, i) {
3187 u8 cl_id = bp->fp[i].cl_id;
3189 /* reset xstorm per client statistics */
3190 offset = BAR_XSTRORM_INTMEM +
3191 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3192 for (j = 0;
3193 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
3194 REG_WR(bp, offset + j*4, 0);
3196 /* reset tstorm per client statistics */
3197 offset = BAR_TSTRORM_INTMEM +
3198 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3199 for (j = 0;
3200 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
3201 REG_WR(bp, offset + j*4, 0);
3203 /* reset ustorm per client statistics */
3204 offset = BAR_USTRORM_INTMEM +
3205 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3206 for (j = 0;
3207 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
3208 REG_WR(bp, offset + j*4, 0);
3211 /* Init statistics related context */
3212 stats_flags.collect_eth = 1;
3214 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
3215 ((u32 *)&stats_flags)[0]);
3216 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
3217 ((u32 *)&stats_flags)[1]);
3219 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
3220 ((u32 *)&stats_flags)[0]);
3221 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
3222 ((u32 *)&stats_flags)[1]);
3224 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
3225 ((u32 *)&stats_flags)[0]);
3226 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
3227 ((u32 *)&stats_flags)[1]);
3229 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
3230 ((u32 *)&stats_flags)[0]);
3231 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
3232 ((u32 *)&stats_flags)[1]);
3234 REG_WR(bp, BAR_XSTRORM_INTMEM +
3235 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3236 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3237 REG_WR(bp, BAR_XSTRORM_INTMEM +
3238 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3239 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3241 REG_WR(bp, BAR_TSTRORM_INTMEM +
3242 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3243 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3244 REG_WR(bp, BAR_TSTRORM_INTMEM +
3245 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3246 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3248 REG_WR(bp, BAR_USTRORM_INTMEM +
3249 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3250 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3251 REG_WR(bp, BAR_USTRORM_INTMEM +
3252 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3253 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3255 if (CHIP_IS_E1H(bp)) {
3256 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3257 IS_E1HMF(bp));
3258 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3259 IS_E1HMF(bp));
3260 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3261 IS_E1HMF(bp));
3262 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3263 IS_E1HMF(bp));
3265 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
3266 bp->e1hov);
3269 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
3270 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
3271 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
3272 for_each_queue(bp, i) {
3273 struct bnx2x_fastpath *fp = &bp->fp[i];
3275 REG_WR(bp, BAR_USTRORM_INTMEM +
3276 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
3277 U64_LO(fp->rx_comp_mapping));
3278 REG_WR(bp, BAR_USTRORM_INTMEM +
3279 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
3280 U64_HI(fp->rx_comp_mapping));
3282 /* Next page */
3283 REG_WR(bp, BAR_USTRORM_INTMEM +
3284 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
3285 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3286 REG_WR(bp, BAR_USTRORM_INTMEM +
3287 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
3288 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3290 REG_WR16(bp, BAR_USTRORM_INTMEM +
3291 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
3292 max_agg_size);
3295 /* dropless flow control */
3296 if (CHIP_IS_E1H(bp)) {
3297 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
3299 rx_pause.bd_thr_low = 250;
3300 rx_pause.cqe_thr_low = 250;
3301 rx_pause.cos = 1;
3302 rx_pause.sge_thr_low = 0;
3303 rx_pause.bd_thr_high = 350;
3304 rx_pause.cqe_thr_high = 350;
3305 rx_pause.sge_thr_high = 0;
3307 for_each_queue(bp, i) {
3308 struct bnx2x_fastpath *fp = &bp->fp[i];
3310 if (!fp->disable_tpa) {
3311 rx_pause.sge_thr_low = 150;
3312 rx_pause.sge_thr_high = 250;
3316 offset = BAR_USTRORM_INTMEM +
3317 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
3318 fp->cl_id);
3319 for (j = 0;
3320 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
3321 j++)
3322 REG_WR(bp, offset + j*4,
3323 ((u32 *)&rx_pause)[j]);
3327 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3329 /* Init rate shaping and fairness contexts */
3330 if (IS_E1HMF(bp)) {
3331 int vn;
3333 /* During init there is no active link
3334 Until link is up, set link rate to 10Gbps */
3335 bp->link_vars.line_speed = SPEED_10000;
3336 bnx2x_init_port_minmax(bp);
3338 if (!BP_NOMCP(bp))
3339 bp->mf_config =
3340 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
3341 bnx2x_calc_vn_weight_sum(bp);
3343 for (vn = VN_0; vn < E1HVN_MAX; vn++)
3344 bnx2x_init_vn_minmax(bp, 2*vn + port);
3346 /* Enable rate shaping and fairness */
3347 bp->cmng.flags.cmng_enables |=
3348 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
3350 } else {
3351 /* rate shaping and fairness are disabled */
3352 DP(NETIF_MSG_IFUP,
3353 "single function mode minmax will be disabled\n");
3357 /* Store cmng structures to internal memory */
3358 if (bp->port.pmf)
3359 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
3360 REG_WR(bp, BAR_XSTRORM_INTMEM +
3361 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
3362 ((u32 *)(&bp->cmng))[i]);
3365 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3367 switch (load_code) {
3368 case FW_MSG_CODE_DRV_LOAD_COMMON:
3369 bnx2x_init_internal_common(bp);
3370 /* no break */
3372 case FW_MSG_CODE_DRV_LOAD_PORT:
3373 bnx2x_init_internal_port(bp);
3374 /* no break */
3376 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3377 bnx2x_init_internal_func(bp);
3378 break;
3380 default:
3381 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3382 break;
3386 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
3388 int i;
3390 for_each_queue(bp, i) {
3391 struct bnx2x_fastpath *fp = &bp->fp[i];
3393 fp->bp = bp;
3394 fp->state = BNX2X_FP_STATE_CLOSED;
3395 fp->index = i;
3396 fp->cl_id = BP_L_ID(bp) + i;
3397 #ifdef BCM_CNIC
3398 fp->sb_id = fp->cl_id + 1;
3399 #else
3400 fp->sb_id = fp->cl_id;
3401 #endif
3402 DP(NETIF_MSG_IFUP,
3403 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
3404 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
3405 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
3406 fp->sb_id);
3407 bnx2x_update_fpsb_idx(fp);
3410 /* ensure status block indices were read */
3411 rmb();
3414 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
3415 DEF_SB_ID);
3416 bnx2x_update_dsb_idx(bp);
3417 bnx2x_update_coalesce(bp);
3418 bnx2x_init_rx_rings(bp);
3419 bnx2x_init_tx_ring(bp);
3420 bnx2x_init_sp_ring(bp);
3421 bnx2x_init_context(bp);
3422 bnx2x_init_internal(bp, load_code);
3423 bnx2x_init_ind_table(bp);
3424 bnx2x_stats_init(bp);
3426 /* At this point, we are ready for interrupts */
3427 atomic_set(&bp->intr_sem, 0);
3429 /* flush all before enabling interrupts */
3430 mb();
3431 mmiowb();
3433 bnx2x_int_enable(bp);
3435 /* Check for SPIO5 */
3436 bnx2x_attn_int_deasserted0(bp,
3437 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
3438 AEU_INPUTS_ATTN_BITS_SPIO5);
3441 /* end of nic init */
3444 * gzip service functions
3447 static int bnx2x_gunzip_init(struct bnx2x *bp)
3449 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
3450 &bp->gunzip_mapping, GFP_KERNEL);
3451 if (bp->gunzip_buf == NULL)
3452 goto gunzip_nomem1;
3454 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3455 if (bp->strm == NULL)
3456 goto gunzip_nomem2;
3458 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3459 GFP_KERNEL);
3460 if (bp->strm->workspace == NULL)
3461 goto gunzip_nomem3;
3463 return 0;
3465 gunzip_nomem3:
3466 kfree(bp->strm);
3467 bp->strm = NULL;
3469 gunzip_nomem2:
3470 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3471 bp->gunzip_mapping);
3472 bp->gunzip_buf = NULL;
3474 gunzip_nomem1:
3475 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
3476 " un-compression\n");
3477 return -ENOMEM;
3480 static void bnx2x_gunzip_end(struct bnx2x *bp)
3482 kfree(bp->strm->workspace);
3484 kfree(bp->strm);
3485 bp->strm = NULL;
3487 if (bp->gunzip_buf) {
3488 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3489 bp->gunzip_mapping);
3490 bp->gunzip_buf = NULL;
3494 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
3496 int n, rc;
3498 /* check gzip header */
3499 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
3500 BNX2X_ERR("Bad gzip header\n");
3501 return -EINVAL;
3504 n = 10;
3506 #define FNAME 0x8
3508 if (zbuf[3] & FNAME)
3509 while ((zbuf[n++] != 0) && (n < len));
3511 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
3512 bp->strm->avail_in = len - n;
3513 bp->strm->next_out = bp->gunzip_buf;
3514 bp->strm->avail_out = FW_BUF_SIZE;
3516 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
3517 if (rc != Z_OK)
3518 return rc;
3520 rc = zlib_inflate(bp->strm, Z_FINISH);
3521 if ((rc != Z_OK) && (rc != Z_STREAM_END))
3522 netdev_err(bp->dev, "Firmware decompression error: %s\n",
3523 bp->strm->msg);
3525 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
3526 if (bp->gunzip_outlen & 0x3)
3527 netdev_err(bp->dev, "Firmware decompression error:"
3528 " gunzip_outlen (%d) not aligned\n",
3529 bp->gunzip_outlen);
3530 bp->gunzip_outlen >>= 2;
3532 zlib_inflateEnd(bp->strm);
3534 if (rc == Z_STREAM_END)
3535 return 0;
3537 return rc;
3540 /* nic load/unload */
3543 * General service functions
3546 /* send a NIG loopback debug packet */
3547 static void bnx2x_lb_pckt(struct bnx2x *bp)
3549 u32 wb_write[3];
3551 /* Ethernet source and destination addresses */
3552 wb_write[0] = 0x55555555;
3553 wb_write[1] = 0x55555555;
3554 wb_write[2] = 0x20; /* SOP */
3555 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3557 /* NON-IP protocol */
3558 wb_write[0] = 0x09000000;
3559 wb_write[1] = 0x55555555;
3560 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
3561 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3564 /* some of the internal memories
3565 * are not directly readable from the driver
3566 * to test them we send debug packets
3568 static int bnx2x_int_mem_test(struct bnx2x *bp)
3570 int factor;
3571 int count, i;
3572 u32 val = 0;
3574 if (CHIP_REV_IS_FPGA(bp))
3575 factor = 120;
3576 else if (CHIP_REV_IS_EMUL(bp))
3577 factor = 200;
3578 else
3579 factor = 1;
3581 DP(NETIF_MSG_HW, "start part1\n");
3583 /* Disable inputs of parser neighbor blocks */
3584 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3585 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3586 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3587 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
3589 /* Write 0 to parser credits for CFC search request */
3590 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3592 /* send Ethernet packet */
3593 bnx2x_lb_pckt(bp);
3595 /* TODO do i reset NIG statistic? */
3596 /* Wait until NIG register shows 1 packet of size 0x10 */
3597 count = 1000 * factor;
3598 while (count) {
3600 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3601 val = *bnx2x_sp(bp, wb_data[0]);
3602 if (val == 0x10)
3603 break;
3605 msleep(10);
3606 count--;
3608 if (val != 0x10) {
3609 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3610 return -1;
3613 /* Wait until PRS register shows 1 packet */
3614 count = 1000 * factor;
3615 while (count) {
3616 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3617 if (val == 1)
3618 break;
3620 msleep(10);
3621 count--;
3623 if (val != 0x1) {
3624 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3625 return -2;
3628 /* Reset and init BRB, PRS */
3629 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3630 msleep(50);
3631 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3632 msleep(50);
3633 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3634 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3636 DP(NETIF_MSG_HW, "part2\n");
3638 /* Disable inputs of parser neighbor blocks */
3639 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3640 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3641 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3642 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
3644 /* Write 0 to parser credits for CFC search request */
3645 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3647 /* send 10 Ethernet packets */
3648 for (i = 0; i < 10; i++)
3649 bnx2x_lb_pckt(bp);
3651 /* Wait until NIG register shows 10 + 1
3652 packets of size 11*0x10 = 0xb0 */
3653 count = 1000 * factor;
3654 while (count) {
3656 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3657 val = *bnx2x_sp(bp, wb_data[0]);
3658 if (val == 0xb0)
3659 break;
3661 msleep(10);
3662 count--;
3664 if (val != 0xb0) {
3665 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3666 return -3;
3669 /* Wait until PRS register shows 2 packets */
3670 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3671 if (val != 2)
3672 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3674 /* Write 1 to parser credits for CFC search request */
3675 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
3677 /* Wait until PRS register shows 3 packets */
3678 msleep(10 * factor);
3679 /* Wait until NIG register shows 1 packet of size 0x10 */
3680 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3681 if (val != 3)
3682 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3684 /* clear NIG EOP FIFO */
3685 for (i = 0; i < 11; i++)
3686 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
3687 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
3688 if (val != 1) {
3689 BNX2X_ERR("clear of NIG failed\n");
3690 return -4;
3693 /* Reset and init BRB, PRS, NIG */
3694 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3695 msleep(50);
3696 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3697 msleep(50);
3698 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3699 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3700 #ifndef BCM_CNIC
3701 /* set NIC mode */
3702 REG_WR(bp, PRS_REG_NIC_MODE, 1);
3703 #endif
3705 /* Enable inputs of parser neighbor blocks */
3706 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
3707 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
3708 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3709 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
3711 DP(NETIF_MSG_HW, "done\n");
3713 return 0; /* OK */
3716 static void enable_blocks_attention(struct bnx2x *bp)
3718 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3719 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
3720 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3721 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
3722 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
3723 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
3724 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
3725 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
3726 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
3727 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3728 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
3729 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
3730 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
3731 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
3732 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3733 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
3734 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
3735 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
3736 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
3737 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
3738 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3739 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3740 if (CHIP_REV_IS_FPGA(bp))
3741 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
3742 else
3743 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
3744 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
3745 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
3746 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
3747 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3748 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
3749 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
3750 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
3751 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3752 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
3755 static const struct {
3756 u32 addr;
3757 u32 mask;
3758 } bnx2x_parity_mask[] = {
3759 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
3760 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
3761 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
3762 {HC_REG_HC_PRTY_MASK, 0xffffffff},
3763 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
3764 {QM_REG_QM_PRTY_MASK, 0x0},
3765 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
3766 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
3767 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
3768 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
3769 {CDU_REG_CDU_PRTY_MASK, 0x0},
3770 {CFC_REG_CFC_PRTY_MASK, 0x0},
3771 {DBG_REG_DBG_PRTY_MASK, 0x0},
3772 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
3773 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
3774 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
3775 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
3776 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
3777 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
3778 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
3779 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
3780 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
3781 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
3782 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
3783 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
3784 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
3785 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
3786 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
3789 static void enable_blocks_parity(struct bnx2x *bp)
3791 int i, mask_arr_len =
3792 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
3794 for (i = 0; i < mask_arr_len; i++)
3795 REG_WR(bp, bnx2x_parity_mask[i].addr,
3796 bnx2x_parity_mask[i].mask);
3800 static void bnx2x_reset_common(struct bnx2x *bp)
3802 /* reset_common */
3803 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
3804 0xd3ffff7f);
3805 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
3808 static void bnx2x_init_pxp(struct bnx2x *bp)
3810 u16 devctl;
3811 int r_order, w_order;
3813 pci_read_config_word(bp->pdev,
3814 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
3815 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
3816 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3817 if (bp->mrrs == -1)
3818 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3819 else {
3820 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
3821 r_order = bp->mrrs;
3824 bnx2x_init_pxp_arb(bp, r_order, w_order);
3827 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3829 int is_required;
3830 u32 val;
3831 int port;
3833 if (BP_NOMCP(bp))
3834 return;
3836 is_required = 0;
3837 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
3838 SHARED_HW_CFG_FAN_FAILURE_MASK;
3840 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
3841 is_required = 1;
3844 * The fan failure mechanism is usually related to the PHY type since
3845 * the power consumption of the board is affected by the PHY. Currently,
3846 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
3848 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
3849 for (port = PORT_0; port < PORT_MAX; port++) {
3850 u32 phy_type =
3851 SHMEM_RD(bp, dev_info.port_hw_config[port].
3852 external_phy_config) &
3853 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3854 is_required |=
3855 ((phy_type ==
3856 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
3857 (phy_type ==
3858 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
3859 (phy_type ==
3860 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
3863 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
3865 if (is_required == 0)
3866 return;
3868 /* Fan failure is indicated by SPIO 5 */
3869 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
3870 MISC_REGISTERS_SPIO_INPUT_HI_Z);
3872 /* set to active low mode */
3873 val = REG_RD(bp, MISC_REG_SPIO_INT);
3874 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
3875 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
3876 REG_WR(bp, MISC_REG_SPIO_INT, val);
3878 /* enable interrupt to signal the IGU */
3879 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
3880 val |= (1 << MISC_REGISTERS_SPIO_5);
3881 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3884 static int bnx2x_init_common(struct bnx2x *bp)
3886 u32 val, i;
3887 #ifdef BCM_CNIC
3888 u32 wb_write[2];
3889 #endif
3891 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
3893 bnx2x_reset_common(bp);
3894 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
3895 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
3897 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
3898 if (CHIP_IS_E1H(bp))
3899 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
3901 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
3902 msleep(30);
3903 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
3905 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
3906 if (CHIP_IS_E1(bp)) {
3907 /* enable HW interrupt from PXP on USDM overflow
3908 bit 16 on INT_MASK_0 */
3909 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3912 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
3913 bnx2x_init_pxp(bp);
3915 #ifdef __BIG_ENDIAN
3916 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
3917 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
3918 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
3919 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
3920 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
3921 /* make sure this value is 0 */
3922 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
3924 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3925 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
3926 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
3927 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
3928 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
3929 #endif
3931 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
3932 #ifdef BCM_CNIC
3933 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3934 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3935 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
3936 #endif
3938 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
3939 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
3941 /* let the HW do it's magic ... */
3942 msleep(100);
3943 /* finish PXP init */
3944 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
3945 if (val != 1) {
3946 BNX2X_ERR("PXP2 CFG failed\n");
3947 return -EBUSY;
3949 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
3950 if (val != 1) {
3951 BNX2X_ERR("PXP2 RD_INIT failed\n");
3952 return -EBUSY;
3955 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
3956 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
3958 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
3960 /* clean the DMAE memory */
3961 bp->dmae_ready = 1;
3962 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
3964 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
3965 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
3966 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
3967 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
3969 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
3970 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
3971 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
3972 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
3974 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
3976 #ifdef BCM_CNIC
3977 wb_write[0] = 0;
3978 wb_write[1] = 0;
3979 for (i = 0; i < 64; i++) {
3980 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
3981 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
3983 if (CHIP_IS_E1H(bp)) {
3984 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
3985 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
3986 wb_write, 2);
3989 #endif
3990 /* soft reset pulse */
3991 REG_WR(bp, QM_REG_SOFT_RESET, 1);
3992 REG_WR(bp, QM_REG_SOFT_RESET, 0);
3994 #ifdef BCM_CNIC
3995 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
3996 #endif
3998 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
3999 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
4000 if (!CHIP_REV_IS_SLOW(bp)) {
4001 /* enable hw interrupt from doorbell Q */
4002 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4005 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4006 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4007 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4008 #ifndef BCM_CNIC
4009 /* set NIC mode */
4010 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4011 #endif
4012 if (CHIP_IS_E1H(bp))
4013 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
4015 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4016 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4017 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4018 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
4020 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4021 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4022 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4023 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4025 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4026 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4027 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4028 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
4030 /* sync semi rtc */
4031 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4032 0x80000000);
4033 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4034 0x80000000);
4036 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4037 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4038 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
4040 REG_WR(bp, SRC_REG_SOFT_RST, 1);
4041 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4042 REG_WR(bp, i, random32());
4043 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
4044 #ifdef BCM_CNIC
4045 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4046 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4047 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4048 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4049 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4050 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4051 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4052 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4053 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4054 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4055 #endif
4056 REG_WR(bp, SRC_REG_SOFT_RST, 0);
4058 if (sizeof(union cdu_context) != 1024)
4059 /* we currently assume that a context is 1024 bytes */
4060 dev_alert(&bp->pdev->dev, "please adjust the size "
4061 "of cdu_context(%ld)\n",
4062 (long)sizeof(union cdu_context));
4064 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
4065 val = (4 << 24) + (0 << 12) + 1024;
4066 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
4068 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
4069 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
4070 /* enable context validation interrupt from CFC */
4071 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4073 /* set the thresholds to prevent CFC/CDU race */
4074 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
4076 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
4077 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
4079 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
4080 /* Reset PCIE errors for debug */
4081 REG_WR(bp, 0x2814, 0xffffffff);
4082 REG_WR(bp, 0x3820, 0xffffffff);
4084 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
4085 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
4086 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
4087 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
4089 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
4090 if (CHIP_IS_E1H(bp)) {
4091 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
4092 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
4095 if (CHIP_REV_IS_SLOW(bp))
4096 msleep(200);
4098 /* finish CFC init */
4099 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4100 if (val != 1) {
4101 BNX2X_ERR("CFC LL_INIT failed\n");
4102 return -EBUSY;
4104 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4105 if (val != 1) {
4106 BNX2X_ERR("CFC AC_INIT failed\n");
4107 return -EBUSY;
4109 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4110 if (val != 1) {
4111 BNX2X_ERR("CFC CAM_INIT failed\n");
4112 return -EBUSY;
4114 REG_WR(bp, CFC_REG_DEBUG0, 0);
4116 /* read NIG statistic
4117 to see if this is our first up since powerup */
4118 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4119 val = *bnx2x_sp(bp, wb_data[0]);
4121 /* do internal memory self test */
4122 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4123 BNX2X_ERR("internal mem self test failed\n");
4124 return -EBUSY;
4127 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
4128 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4129 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4130 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4131 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4132 bp->port.need_hw_lock = 1;
4133 break;
4135 default:
4136 break;
4139 bnx2x_setup_fan_failure_detection(bp);
4141 /* clear PXP2 attentions */
4142 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
4144 enable_blocks_attention(bp);
4145 if (CHIP_PARITY_SUPPORTED(bp))
4146 enable_blocks_parity(bp);
4148 if (!BP_NOMCP(bp)) {
4149 bnx2x_acquire_phy_lock(bp);
4150 bnx2x_common_init_phy(bp, bp->common.shmem_base);
4151 bnx2x_release_phy_lock(bp);
4152 } else
4153 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4155 return 0;
4158 static int bnx2x_init_port(struct bnx2x *bp)
4160 int port = BP_PORT(bp);
4161 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
4162 u32 low, high;
4163 u32 val;
4165 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
4167 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
4169 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
4170 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
4172 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4173 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4174 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
4175 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
4177 #ifdef BCM_CNIC
4178 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
4180 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
4181 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4182 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
4183 #endif
4185 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
4187 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
4188 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
4189 /* no pause for emulation and FPGA */
4190 low = 0;
4191 high = 513;
4192 } else {
4193 if (IS_E1HMF(bp))
4194 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4195 else if (bp->dev->mtu > 4096) {
4196 if (bp->flags & ONE_PORT_FLAG)
4197 low = 160;
4198 else {
4199 val = bp->dev->mtu;
4200 /* (24*1024 + val*4)/256 */
4201 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
4203 } else
4204 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4205 high = low + 56; /* 14*1024/256 */
4207 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4208 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4211 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
4213 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
4214 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
4215 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
4216 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
4218 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4219 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4220 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4221 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
4223 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
4224 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
4226 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
4228 /* configure PBF to work without PAUSE mtu 9000 */
4229 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
4231 /* update threshold */
4232 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
4233 /* update init credit */
4234 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
4236 /* probe changes */
4237 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
4238 msleep(5);
4239 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
4241 #ifdef BCM_CNIC
4242 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
4243 #endif
4244 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
4245 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
4247 if (CHIP_IS_E1(bp)) {
4248 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4249 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4251 bnx2x_init_block(bp, HC_BLOCK, init_stage);
4253 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
4254 /* init aeu_mask_attn_func_0/1:
4255 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4256 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4257 * bits 4-7 are used for "per vn group attention" */
4258 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4259 (IS_E1HMF(bp) ? 0xF7 : 0x7));
4261 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
4262 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
4263 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
4264 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
4265 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
4267 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
4269 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4271 if (CHIP_IS_E1H(bp)) {
4272 /* 0x2 disable e1hov, 0x1 enable */
4273 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4274 (IS_E1HMF(bp) ? 0x1 : 0x2));
4277 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4278 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
4279 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
4283 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
4284 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
4286 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
4287 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4289 u32 swap_val, swap_override, aeu_gpio_mask, offset;
4291 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
4292 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
4294 /* The GPIO should be swapped if the swap register is
4295 set and active */
4296 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4297 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
4299 /* Select function upon port-swap configuration */
4300 if (port == 0) {
4301 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
4302 aeu_gpio_mask = (swap_val && swap_override) ?
4303 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
4304 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
4305 } else {
4306 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
4307 aeu_gpio_mask = (swap_val && swap_override) ?
4308 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
4309 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
4311 val = REG_RD(bp, offset);
4312 /* add GPIO3 to group */
4313 val |= aeu_gpio_mask;
4314 REG_WR(bp, offset, val);
4316 bp->port.need_hw_lock = 1;
4317 break;
4319 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4320 bp->port.need_hw_lock = 1;
4321 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4322 /* add SPIO 5 to group 0 */
4324 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4325 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4326 val = REG_RD(bp, reg_addr);
4327 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4328 REG_WR(bp, reg_addr, val);
4330 break;
4331 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4332 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4333 bp->port.need_hw_lock = 1;
4334 break;
4335 default:
4336 break;
4339 bnx2x__link_reset(bp);
4341 return 0;
4344 #define ILT_PER_FUNC (768/2)
4345 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
4346 /* the phys address is shifted right 12 bits and has an added
4347 1=valid bit added to the 53rd bit
4348 then since this is a wide register(TM)
4349 we split it into two 32 bit writes
4351 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4352 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
4353 #define PXP_ONE_ILT(x) (((x) << 10) | x)
4354 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
4356 #ifdef BCM_CNIC
4357 #define CNIC_ILT_LINES 127
4358 #define CNIC_CTX_PER_ILT 16
4359 #else
4360 #define CNIC_ILT_LINES 0
4361 #endif
4363 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4365 int reg;
4367 if (CHIP_IS_E1H(bp))
4368 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4369 else /* E1 */
4370 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4372 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4375 static int bnx2x_init_func(struct bnx2x *bp)
4377 int port = BP_PORT(bp);
4378 int func = BP_FUNC(bp);
4379 u32 addr, val;
4380 int i;
4382 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
4384 /* set MSI reconfigure capability */
4385 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4386 val = REG_RD(bp, addr);
4387 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
4388 REG_WR(bp, addr, val);
4390 i = FUNC_ILT_BASE(func);
4392 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
4393 if (CHIP_IS_E1H(bp)) {
4394 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
4395 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
4396 } else /* E1 */
4397 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
4398 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
4400 #ifdef BCM_CNIC
4401 i += 1 + CNIC_ILT_LINES;
4402 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
4403 if (CHIP_IS_E1(bp))
4404 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
4405 else {
4406 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
4407 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
4410 i++;
4411 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
4412 if (CHIP_IS_E1(bp))
4413 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
4414 else {
4415 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
4416 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
4419 i++;
4420 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
4421 if (CHIP_IS_E1(bp))
4422 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
4423 else {
4424 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
4425 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
4428 /* tell the searcher where the T2 table is */
4429 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
4431 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
4432 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
4434 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
4435 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
4436 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
4438 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
4439 #endif
4441 if (CHIP_IS_E1H(bp)) {
4442 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4443 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4444 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4445 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4446 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4447 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4448 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4449 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4450 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
4452 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4453 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
4456 /* HC init per function */
4457 if (CHIP_IS_E1H(bp)) {
4458 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4460 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4461 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4463 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
4465 /* Reset PCIE errors for debug */
4466 REG_WR(bp, 0x2114, 0xffffffff);
4467 REG_WR(bp, 0x2120, 0xffffffff);
4469 return 0;
4472 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4474 int i, rc = 0;
4476 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
4477 BP_FUNC(bp), load_code);
4479 bp->dmae_ready = 0;
4480 mutex_init(&bp->dmae_mutex);
4481 rc = bnx2x_gunzip_init(bp);
4482 if (rc)
4483 return rc;
4485 switch (load_code) {
4486 case FW_MSG_CODE_DRV_LOAD_COMMON:
4487 rc = bnx2x_init_common(bp);
4488 if (rc)
4489 goto init_hw_err;
4490 /* no break */
4492 case FW_MSG_CODE_DRV_LOAD_PORT:
4493 bp->dmae_ready = 1;
4494 rc = bnx2x_init_port(bp);
4495 if (rc)
4496 goto init_hw_err;
4497 /* no break */
4499 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4500 bp->dmae_ready = 1;
4501 rc = bnx2x_init_func(bp);
4502 if (rc)
4503 goto init_hw_err;
4504 break;
4506 default:
4507 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4508 break;
4511 if (!BP_NOMCP(bp)) {
4512 int func = BP_FUNC(bp);
4514 bp->fw_drv_pulse_wr_seq =
4515 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
4516 DRV_PULSE_SEQ_MASK);
4517 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4520 /* this needs to be done before gunzip end */
4521 bnx2x_zero_def_sb(bp);
4522 for_each_queue(bp, i)
4523 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4524 #ifdef BCM_CNIC
4525 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4526 #endif
4528 init_hw_err:
4529 bnx2x_gunzip_end(bp);
4531 return rc;
4534 void bnx2x_free_mem(struct bnx2x *bp)
4537 #define BNX2X_PCI_FREE(x, y, size) \
4538 do { \
4539 if (x) { \
4540 dma_free_coherent(&bp->pdev->dev, size, x, y); \
4541 x = NULL; \
4542 y = 0; \
4544 } while (0)
4546 #define BNX2X_FREE(x) \
4547 do { \
4548 if (x) { \
4549 vfree(x); \
4550 x = NULL; \
4552 } while (0)
4554 int i;
4556 /* fastpath */
4557 /* Common */
4558 for_each_queue(bp, i) {
4560 /* status blocks */
4561 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
4562 bnx2x_fp(bp, i, status_blk_mapping),
4563 sizeof(struct host_status_block));
4565 /* Rx */
4566 for_each_queue(bp, i) {
4568 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4569 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
4570 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
4571 bnx2x_fp(bp, i, rx_desc_mapping),
4572 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4574 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
4575 bnx2x_fp(bp, i, rx_comp_mapping),
4576 sizeof(struct eth_fast_path_rx_cqe) *
4577 NUM_RCQ_BD);
4579 /* SGE ring */
4580 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
4581 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
4582 bnx2x_fp(bp, i, rx_sge_mapping),
4583 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4585 /* Tx */
4586 for_each_queue(bp, i) {
4588 /* fastpath tx rings: tx_buf tx_desc */
4589 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
4590 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
4591 bnx2x_fp(bp, i, tx_desc_mapping),
4592 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4594 /* end of fastpath */
4596 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
4597 sizeof(struct host_def_status_block));
4599 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
4600 sizeof(struct bnx2x_slowpath));
4602 #ifdef BCM_CNIC
4603 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
4604 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
4605 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
4606 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
4607 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
4608 sizeof(struct host_status_block));
4609 #endif
4610 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
4612 #undef BNX2X_PCI_FREE
4613 #undef BNX2X_KFREE
4616 int bnx2x_alloc_mem(struct bnx2x *bp)
4619 #define BNX2X_PCI_ALLOC(x, y, size) \
4620 do { \
4621 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
4622 if (x == NULL) \
4623 goto alloc_mem_err; \
4624 memset(x, 0, size); \
4625 } while (0)
4627 #define BNX2X_ALLOC(x, size) \
4628 do { \
4629 x = vmalloc(size); \
4630 if (x == NULL) \
4631 goto alloc_mem_err; \
4632 memset(x, 0, size); \
4633 } while (0)
4635 int i;
4637 /* fastpath */
4638 /* Common */
4639 for_each_queue(bp, i) {
4640 bnx2x_fp(bp, i, bp) = bp;
4642 /* status blocks */
4643 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
4644 &bnx2x_fp(bp, i, status_blk_mapping),
4645 sizeof(struct host_status_block));
4647 /* Rx */
4648 for_each_queue(bp, i) {
4650 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4651 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
4652 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4653 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
4654 &bnx2x_fp(bp, i, rx_desc_mapping),
4655 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4657 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
4658 &bnx2x_fp(bp, i, rx_comp_mapping),
4659 sizeof(struct eth_fast_path_rx_cqe) *
4660 NUM_RCQ_BD);
4662 /* SGE ring */
4663 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
4664 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4665 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
4666 &bnx2x_fp(bp, i, rx_sge_mapping),
4667 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4669 /* Tx */
4670 for_each_queue(bp, i) {
4672 /* fastpath tx rings: tx_buf tx_desc */
4673 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
4674 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4675 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
4676 &bnx2x_fp(bp, i, tx_desc_mapping),
4677 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4679 /* end of fastpath */
4681 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4682 sizeof(struct host_def_status_block));
4684 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4685 sizeof(struct bnx2x_slowpath));
4687 #ifdef BCM_CNIC
4688 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
4690 /* allocate searcher T2 table
4691 we allocate 1/4 of alloc num for T2
4692 (which is not entered into the ILT) */
4693 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
4695 /* Initialize T2 (for 1024 connections) */
4696 for (i = 0; i < 16*1024; i += 64)
4697 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
4699 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
4700 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
4702 /* QM queues (128*MAX_CONN) */
4703 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
4705 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
4706 sizeof(struct host_status_block));
4707 #endif
4709 /* Slow path ring */
4710 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
4712 return 0;
4714 alloc_mem_err:
4715 bnx2x_free_mem(bp);
4716 return -ENOMEM;
4718 #undef BNX2X_PCI_ALLOC
4719 #undef BNX2X_ALLOC
4724 * Init service functions
4728 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
4730 * @param bp driver descriptor
4731 * @param set set or clear an entry (1 or 0)
4732 * @param mac pointer to a buffer containing a MAC
4733 * @param cl_bit_vec bit vector of clients to register a MAC for
4734 * @param cam_offset offset in a CAM to use
4735 * @param with_bcast set broadcast MAC as well
4737 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
4738 u32 cl_bit_vec, u8 cam_offset,
4739 u8 with_bcast)
4741 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
4742 int port = BP_PORT(bp);
4744 /* CAM allocation
4745 * unicasts 0-31:port0 32-63:port1
4746 * multicast 64-127:port0 128-191:port1
4748 config->hdr.length = 1 + (with_bcast ? 1 : 0);
4749 config->hdr.offset = cam_offset;
4750 config->hdr.client_id = 0xff;
4751 config->hdr.reserved1 = 0;
4753 /* primary MAC */
4754 config->config_table[0].cam_entry.msb_mac_addr =
4755 swab16(*(u16 *)&mac[0]);
4756 config->config_table[0].cam_entry.middle_mac_addr =
4757 swab16(*(u16 *)&mac[2]);
4758 config->config_table[0].cam_entry.lsb_mac_addr =
4759 swab16(*(u16 *)&mac[4]);
4760 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
4761 if (set)
4762 config->config_table[0].target_table_entry.flags = 0;
4763 else
4764 CAM_INVALIDATE(config->config_table[0]);
4765 config->config_table[0].target_table_entry.clients_bit_vector =
4766 cpu_to_le32(cl_bit_vec);
4767 config->config_table[0].target_table_entry.vlan_id = 0;
4769 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
4770 (set ? "setting" : "clearing"),
4771 config->config_table[0].cam_entry.msb_mac_addr,
4772 config->config_table[0].cam_entry.middle_mac_addr,
4773 config->config_table[0].cam_entry.lsb_mac_addr);
4775 /* broadcast */
4776 if (with_bcast) {
4777 config->config_table[1].cam_entry.msb_mac_addr =
4778 cpu_to_le16(0xffff);
4779 config->config_table[1].cam_entry.middle_mac_addr =
4780 cpu_to_le16(0xffff);
4781 config->config_table[1].cam_entry.lsb_mac_addr =
4782 cpu_to_le16(0xffff);
4783 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
4784 if (set)
4785 config->config_table[1].target_table_entry.flags =
4786 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4787 else
4788 CAM_INVALIDATE(config->config_table[1]);
4789 config->config_table[1].target_table_entry.clients_bit_vector =
4790 cpu_to_le32(cl_bit_vec);
4791 config->config_table[1].target_table_entry.vlan_id = 0;
4794 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4795 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4796 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4800 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
4802 * @param bp driver descriptor
4803 * @param set set or clear an entry (1 or 0)
4804 * @param mac pointer to a buffer containing a MAC
4805 * @param cl_bit_vec bit vector of clients to register a MAC for
4806 * @param cam_offset offset in a CAM to use
4808 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
4809 u32 cl_bit_vec, u8 cam_offset)
4811 struct mac_configuration_cmd_e1h *config =
4812 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
4814 config->hdr.length = 1;
4815 config->hdr.offset = cam_offset;
4816 config->hdr.client_id = 0xff;
4817 config->hdr.reserved1 = 0;
4819 /* primary MAC */
4820 config->config_table[0].msb_mac_addr =
4821 swab16(*(u16 *)&mac[0]);
4822 config->config_table[0].middle_mac_addr =
4823 swab16(*(u16 *)&mac[2]);
4824 config->config_table[0].lsb_mac_addr =
4825 swab16(*(u16 *)&mac[4]);
4826 config->config_table[0].clients_bit_vector =
4827 cpu_to_le32(cl_bit_vec);
4828 config->config_table[0].vlan_id = 0;
4829 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
4830 if (set)
4831 config->config_table[0].flags = BP_PORT(bp);
4832 else
4833 config->config_table[0].flags =
4834 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
4836 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
4837 (set ? "setting" : "clearing"),
4838 config->config_table[0].msb_mac_addr,
4839 config->config_table[0].middle_mac_addr,
4840 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
4842 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4843 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4844 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4847 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4848 int *state_p, int poll)
4850 /* can take a while if any port is running */
4851 int cnt = 5000;
4853 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4854 poll ? "polling" : "waiting", state, idx);
4856 might_sleep();
4857 while (cnt--) {
4858 if (poll) {
4859 bnx2x_rx_int(bp->fp, 10);
4860 /* if index is different from 0
4861 * the reply for some commands will
4862 * be on the non default queue
4864 if (idx)
4865 bnx2x_rx_int(&bp->fp[idx], 10);
4868 mb(); /* state is changed by bnx2x_sp_event() */
4869 if (*state_p == state) {
4870 #ifdef BNX2X_STOP_ON_ERROR
4871 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
4872 #endif
4873 return 0;
4876 msleep(1);
4878 if (bp->panic)
4879 return -EIO;
4882 /* timeout! */
4883 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4884 poll ? "polling" : "waiting", state, idx);
4885 #ifdef BNX2X_STOP_ON_ERROR
4886 bnx2x_panic();
4887 #endif
4889 return -EBUSY;
4892 void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
4894 bp->set_mac_pending++;
4895 smp_wmb();
4897 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
4898 (1 << bp->fp->cl_id), BP_FUNC(bp));
4900 /* Wait for a completion */
4901 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4904 void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
4906 bp->set_mac_pending++;
4907 smp_wmb();
4909 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
4910 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
4913 /* Wait for a completion */
4914 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4917 #ifdef BCM_CNIC
4919 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
4920 * MAC(s). This function will wait until the ramdord completion
4921 * returns.
4923 * @param bp driver handle
4924 * @param set set or clear the CAM entry
4926 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
4928 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
4930 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
4932 bp->set_mac_pending++;
4933 smp_wmb();
4935 /* Send a SET_MAC ramrod */
4936 if (CHIP_IS_E1(bp))
4937 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
4938 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
4940 else
4941 /* CAM allocation for E1H
4942 * unicasts: by func number
4943 * multicast: 20+FUNC*20, 20 each
4945 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
4946 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
4948 /* Wait for a completion when setting */
4949 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4951 return 0;
4953 #endif
4955 int bnx2x_setup_leading(struct bnx2x *bp)
4957 int rc;
4959 /* reset IGU state */
4960 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4962 /* SETUP ramrod */
4963 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
4965 /* Wait for completion */
4966 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
4968 return rc;
4971 int bnx2x_setup_multi(struct bnx2x *bp, int index)
4973 struct bnx2x_fastpath *fp = &bp->fp[index];
4975 /* reset IGU state */
4976 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4978 /* SETUP ramrod */
4979 fp->state = BNX2X_FP_STATE_OPENING;
4980 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
4981 fp->cl_id, 0);
4983 /* Wait for completion */
4984 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
4985 &(fp->state), 0);
4989 void bnx2x_set_num_queues_msix(struct bnx2x *bp)
4992 switch (bp->multi_mode) {
4993 case ETH_RSS_MODE_DISABLED:
4994 bp->num_queues = 1;
4995 break;
4997 case ETH_RSS_MODE_REGULAR:
4998 if (num_queues)
4999 bp->num_queues = min_t(u32, num_queues,
5000 BNX2X_MAX_QUEUES(bp));
5001 else
5002 bp->num_queues = min_t(u32, num_online_cpus(),
5003 BNX2X_MAX_QUEUES(bp));
5004 break;
5007 default:
5008 bp->num_queues = 1;
5009 break;
5015 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
5017 struct bnx2x_fastpath *fp = &bp->fp[index];
5018 int rc;
5020 /* halt the connection */
5021 fp->state = BNX2X_FP_STATE_HALTING;
5022 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
5024 /* Wait for completion */
5025 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
5026 &(fp->state), 1);
5027 if (rc) /* timeout */
5028 return rc;
5030 /* delete cfc entry */
5031 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
5033 /* Wait for completion */
5034 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
5035 &(fp->state), 1);
5036 return rc;
5039 static int bnx2x_stop_leading(struct bnx2x *bp)
5041 __le16 dsb_sp_prod_idx;
5042 /* if the other port is handling traffic,
5043 this can take a lot of time */
5044 int cnt = 500;
5045 int rc;
5047 might_sleep();
5049 /* Send HALT ramrod */
5050 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
5051 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
5053 /* Wait for completion */
5054 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
5055 &(bp->fp[0].state), 1);
5056 if (rc) /* timeout */
5057 return rc;
5059 dsb_sp_prod_idx = *bp->dsb_sp_prod;
5061 /* Send PORT_DELETE ramrod */
5062 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
5064 /* Wait for completion to arrive on default status block
5065 we are going to reset the chip anyway
5066 so there is not much to do if this times out
5068 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
5069 if (!cnt) {
5070 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
5071 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5072 *bp->dsb_sp_prod, dsb_sp_prod_idx);
5073 #ifdef BNX2X_STOP_ON_ERROR
5074 bnx2x_panic();
5075 #endif
5076 rc = -EBUSY;
5077 break;
5079 cnt--;
5080 msleep(1);
5081 rmb(); /* Refresh the dsb_sp_prod */
5083 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
5084 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
5086 return rc;
5089 static void bnx2x_reset_func(struct bnx2x *bp)
5091 int port = BP_PORT(bp);
5092 int func = BP_FUNC(bp);
5093 int base, i;
5095 /* Configure IGU */
5096 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5097 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5099 #ifdef BCM_CNIC
5100 /* Disable Timer scan */
5101 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
5103 * Wait for at least 10ms and up to 2 second for the timers scan to
5104 * complete
5106 for (i = 0; i < 200; i++) {
5107 msleep(10);
5108 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
5109 break;
5111 #endif
5112 /* Clear ILT */
5113 base = FUNC_ILT_BASE(func);
5114 for (i = base; i < base + ILT_PER_FUNC; i++)
5115 bnx2x_ilt_wr(bp, i, 0);
5118 static void bnx2x_reset_port(struct bnx2x *bp)
5120 int port = BP_PORT(bp);
5121 u32 val;
5123 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5125 /* Do not rcv packets to BRB */
5126 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5127 /* Do not direct rcv packets that are not for MCP to the BRB */
5128 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5129 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5131 /* Configure AEU */
5132 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5134 msleep(100);
5135 /* Check for BRB port occupancy */
5136 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5137 if (val)
5138 DP(NETIF_MSG_IFDOWN,
5139 "BRB1 is not empty %d blocks are occupied\n", val);
5141 /* TODO: Close Doorbell port? */
5144 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5146 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
5147 BP_FUNC(bp), reset_code);
5149 switch (reset_code) {
5150 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5151 bnx2x_reset_port(bp);
5152 bnx2x_reset_func(bp);
5153 bnx2x_reset_common(bp);
5154 break;
5156 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5157 bnx2x_reset_port(bp);
5158 bnx2x_reset_func(bp);
5159 break;
5161 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5162 bnx2x_reset_func(bp);
5163 break;
5165 default:
5166 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5167 break;
5171 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5173 int port = BP_PORT(bp);
5174 u32 reset_code = 0;
5175 int i, cnt, rc;
5177 /* Wait until tx fastpath tasks complete */
5178 for_each_queue(bp, i) {
5179 struct bnx2x_fastpath *fp = &bp->fp[i];
5181 cnt = 1000;
5182 while (bnx2x_has_tx_work_unload(fp)) {
5184 bnx2x_tx_int(fp);
5185 if (!cnt) {
5186 BNX2X_ERR("timeout waiting for queue[%d]\n",
5188 #ifdef BNX2X_STOP_ON_ERROR
5189 bnx2x_panic();
5190 return -EBUSY;
5191 #else
5192 break;
5193 #endif
5195 cnt--;
5196 msleep(1);
5199 /* Give HW time to discard old tx messages */
5200 msleep(1);
5202 if (CHIP_IS_E1(bp)) {
5203 struct mac_configuration_cmd *config =
5204 bnx2x_sp(bp, mcast_config);
5206 bnx2x_set_eth_mac_addr_e1(bp, 0);
5208 for (i = 0; i < config->hdr.length; i++)
5209 CAM_INVALIDATE(config->config_table[i]);
5211 config->hdr.length = i;
5212 if (CHIP_REV_IS_SLOW(bp))
5213 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
5214 else
5215 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
5216 config->hdr.client_id = bp->fp->cl_id;
5217 config->hdr.reserved1 = 0;
5219 bp->set_mac_pending++;
5220 smp_wmb();
5222 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5223 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
5224 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
5226 } else { /* E1H */
5227 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5229 bnx2x_set_eth_mac_addr_e1h(bp, 0);
5231 for (i = 0; i < MC_HASH_SIZE; i++)
5232 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
5234 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
5236 #ifdef BCM_CNIC
5237 /* Clear iSCSI L2 MAC */
5238 mutex_lock(&bp->cnic_mutex);
5239 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
5240 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
5241 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
5243 mutex_unlock(&bp->cnic_mutex);
5244 #endif
5246 if (unload_mode == UNLOAD_NORMAL)
5247 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5249 else if (bp->flags & NO_WOL_FLAG)
5250 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
5252 else if (bp->wol) {
5253 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5254 u8 *mac_addr = bp->dev->dev_addr;
5255 u32 val;
5256 /* The mac address is written to entries 1-4 to
5257 preserve entry 0 which is used by the PMF */
5258 u8 entry = (BP_E1HVN(bp) + 1)*8;
5260 val = (mac_addr[0] << 8) | mac_addr[1];
5261 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
5263 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
5264 (mac_addr[4] << 8) | mac_addr[5];
5265 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
5267 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
5269 } else
5270 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5272 /* Close multi and leading connections
5273 Completions for ramrods are collected in a synchronous way */
5274 for_each_nondefault_queue(bp, i)
5275 if (bnx2x_stop_multi(bp, i))
5276 goto unload_error;
5278 rc = bnx2x_stop_leading(bp);
5279 if (rc) {
5280 BNX2X_ERR("Stop leading failed!\n");
5281 #ifdef BNX2X_STOP_ON_ERROR
5282 return -EBUSY;
5283 #else
5284 goto unload_error;
5285 #endif
5288 unload_error:
5289 if (!BP_NOMCP(bp))
5290 reset_code = bnx2x_fw_command(bp, reset_code);
5291 else {
5292 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
5293 load_count[0], load_count[1], load_count[2]);
5294 load_count[0]--;
5295 load_count[1 + port]--;
5296 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
5297 load_count[0], load_count[1], load_count[2]);
5298 if (load_count[0] == 0)
5299 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
5300 else if (load_count[1 + port] == 0)
5301 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
5302 else
5303 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
5306 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
5307 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
5308 bnx2x__link_reset(bp);
5310 /* Reset the chip */
5311 bnx2x_reset_chip(bp, reset_code);
5313 /* Report UNLOAD_DONE to MCP */
5314 if (!BP_NOMCP(bp))
5315 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5319 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
5321 u32 val;
5323 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
5325 if (CHIP_IS_E1(bp)) {
5326 int port = BP_PORT(bp);
5327 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5328 MISC_REG_AEU_MASK_ATTN_FUNC_0;
5330 val = REG_RD(bp, addr);
5331 val &= ~(0x300);
5332 REG_WR(bp, addr, val);
5333 } else if (CHIP_IS_E1H(bp)) {
5334 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
5335 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
5336 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
5337 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
5342 /* Close gates #2, #3 and #4: */
5343 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
5345 u32 val, addr;
5347 /* Gates #2 and #4a are closed/opened for "not E1" only */
5348 if (!CHIP_IS_E1(bp)) {
5349 /* #4 */
5350 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
5351 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
5352 close ? (val | 0x1) : (val & (~(u32)1)));
5353 /* #2 */
5354 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
5355 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
5356 close ? (val | 0x1) : (val & (~(u32)1)));
5359 /* #3 */
5360 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
5361 val = REG_RD(bp, addr);
5362 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
5364 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
5365 close ? "closing" : "opening");
5366 mmiowb();
5369 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
5371 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
5373 /* Do some magic... */
5374 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5375 *magic_val = val & SHARED_MF_CLP_MAGIC;
5376 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
5379 /* Restore the value of the `magic' bit.
5381 * @param pdev Device handle.
5382 * @param magic_val Old value of the `magic' bit.
5384 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
5386 /* Restore the `magic' bit value... */
5387 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
5388 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
5389 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
5390 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5391 MF_CFG_WR(bp, shared_mf_config.clp_mb,
5392 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
5395 /* Prepares for MCP reset: takes care of CLP configurations.
5397 * @param bp
5398 * @param magic_val Old value of 'magic' bit.
5400 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
5402 u32 shmem;
5403 u32 validity_offset;
5405 DP(NETIF_MSG_HW, "Starting\n");
5407 /* Set `magic' bit in order to save MF config */
5408 if (!CHIP_IS_E1(bp))
5409 bnx2x_clp_reset_prep(bp, magic_val);
5411 /* Get shmem offset */
5412 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5413 validity_offset = offsetof(struct shmem_region, validity_map[0]);
5415 /* Clear validity map flags */
5416 if (shmem > 0)
5417 REG_WR(bp, shmem + validity_offset, 0);
5420 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
5421 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
5423 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
5424 * depending on the HW type.
5426 * @param bp
5428 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
5430 /* special handling for emulation and FPGA,
5431 wait 10 times longer */
5432 if (CHIP_REV_IS_SLOW(bp))
5433 msleep(MCP_ONE_TIMEOUT*10);
5434 else
5435 msleep(MCP_ONE_TIMEOUT);
5438 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
5440 u32 shmem, cnt, validity_offset, val;
5441 int rc = 0;
5443 msleep(100);
5445 /* Get shmem offset */
5446 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5447 if (shmem == 0) {
5448 BNX2X_ERR("Shmem 0 return failure\n");
5449 rc = -ENOTTY;
5450 goto exit_lbl;
5453 validity_offset = offsetof(struct shmem_region, validity_map[0]);
5455 /* Wait for MCP to come up */
5456 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
5457 /* TBD: its best to check validity map of last port.
5458 * currently checks on port 0.
5460 val = REG_RD(bp, shmem + validity_offset);
5461 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
5462 shmem + validity_offset, val);
5464 /* check that shared memory is valid. */
5465 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5466 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5467 break;
5469 bnx2x_mcp_wait_one(bp);
5472 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
5474 /* Check that shared memory is valid. This indicates that MCP is up. */
5475 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
5476 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
5477 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
5478 rc = -ENOTTY;
5479 goto exit_lbl;
5482 exit_lbl:
5483 /* Restore the `magic' bit value */
5484 if (!CHIP_IS_E1(bp))
5485 bnx2x_clp_reset_done(bp, magic_val);
5487 return rc;
5490 static void bnx2x_pxp_prep(struct bnx2x *bp)
5492 if (!CHIP_IS_E1(bp)) {
5493 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
5494 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
5495 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
5496 mmiowb();
5501 * Reset the whole chip except for:
5502 * - PCIE core
5503 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
5504 * one reset bit)
5505 * - IGU
5506 * - MISC (including AEU)
5507 * - GRC
5508 * - RBCN, RBCP
5510 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
5512 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
5514 not_reset_mask1 =
5515 MISC_REGISTERS_RESET_REG_1_RST_HC |
5516 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
5517 MISC_REGISTERS_RESET_REG_1_RST_PXP;
5519 not_reset_mask2 =
5520 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
5521 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
5522 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
5523 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
5524 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
5525 MISC_REGISTERS_RESET_REG_2_RST_GRC |
5526 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
5527 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
5529 reset_mask1 = 0xffffffff;
5531 if (CHIP_IS_E1(bp))
5532 reset_mask2 = 0xffff;
5533 else
5534 reset_mask2 = 0x1ffff;
5536 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5537 reset_mask1 & (~not_reset_mask1));
5538 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5539 reset_mask2 & (~not_reset_mask2));
5541 barrier();
5542 mmiowb();
5544 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
5545 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
5546 mmiowb();
5549 static int bnx2x_process_kill(struct bnx2x *bp)
5551 int cnt = 1000;
5552 u32 val = 0;
5553 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
5556 /* Empty the Tetris buffer, wait for 1s */
5557 do {
5558 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
5559 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
5560 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
5561 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
5562 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
5563 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
5564 ((port_is_idle_0 & 0x1) == 0x1) &&
5565 ((port_is_idle_1 & 0x1) == 0x1) &&
5566 (pgl_exp_rom2 == 0xffffffff))
5567 break;
5568 msleep(1);
5569 } while (cnt-- > 0);
5571 if (cnt <= 0) {
5572 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
5573 " are still"
5574 " outstanding read requests after 1s!\n");
5575 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
5576 " port_is_idle_0=0x%08x,"
5577 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
5578 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
5579 pgl_exp_rom2);
5580 return -EAGAIN;
5583 barrier();
5585 /* Close gates #2, #3 and #4 */
5586 bnx2x_set_234_gates(bp, true);
5588 /* TBD: Indicate that "process kill" is in progress to MCP */
5590 /* Clear "unprepared" bit */
5591 REG_WR(bp, MISC_REG_UNPREPARED, 0);
5592 barrier();
5594 /* Make sure all is written to the chip before the reset */
5595 mmiowb();
5597 /* Wait for 1ms to empty GLUE and PCI-E core queues,
5598 * PSWHST, GRC and PSWRD Tetris buffer.
5600 msleep(1);
5602 /* Prepare to chip reset: */
5603 /* MCP */
5604 bnx2x_reset_mcp_prep(bp, &val);
5606 /* PXP */
5607 bnx2x_pxp_prep(bp);
5608 barrier();
5610 /* reset the chip */
5611 bnx2x_process_kill_chip_reset(bp);
5612 barrier();
5614 /* Recover after reset: */
5615 /* MCP */
5616 if (bnx2x_reset_mcp_comp(bp, val))
5617 return -EAGAIN;
5619 /* PXP */
5620 bnx2x_pxp_prep(bp);
5622 /* Open the gates #2, #3 and #4 */
5623 bnx2x_set_234_gates(bp, false);
5625 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
5626 * reset state, re-enable attentions. */
5628 return 0;
5631 static int bnx2x_leader_reset(struct bnx2x *bp)
5633 int rc = 0;
5634 /* Try to recover after the failure */
5635 if (bnx2x_process_kill(bp)) {
5636 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
5637 bp->dev->name);
5638 rc = -EAGAIN;
5639 goto exit_leader_reset;
5642 /* Clear "reset is in progress" bit and update the driver state */
5643 bnx2x_set_reset_done(bp);
5644 bp->recovery_state = BNX2X_RECOVERY_DONE;
5646 exit_leader_reset:
5647 bp->is_leader = 0;
5648 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
5649 smp_wmb();
5650 return rc;
5653 /* Assumption: runs under rtnl lock. This together with the fact
5654 * that it's called only from bnx2x_reset_task() ensure that it
5655 * will never be called when netif_running(bp->dev) is false.
5657 static void bnx2x_parity_recover(struct bnx2x *bp)
5659 DP(NETIF_MSG_HW, "Handling parity\n");
5660 while (1) {
5661 switch (bp->recovery_state) {
5662 case BNX2X_RECOVERY_INIT:
5663 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
5664 /* Try to get a LEADER_LOCK HW lock */
5665 if (bnx2x_trylock_hw_lock(bp,
5666 HW_LOCK_RESOURCE_RESERVED_08))
5667 bp->is_leader = 1;
5669 /* Stop the driver */
5670 /* If interface has been removed - break */
5671 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
5672 return;
5674 bp->recovery_state = BNX2X_RECOVERY_WAIT;
5675 /* Ensure "is_leader" and "recovery_state"
5676 * update values are seen on other CPUs
5678 smp_wmb();
5679 break;
5681 case BNX2X_RECOVERY_WAIT:
5682 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
5683 if (bp->is_leader) {
5684 u32 load_counter = bnx2x_get_load_cnt(bp);
5685 if (load_counter) {
5686 /* Wait until all other functions get
5687 * down.
5689 schedule_delayed_work(&bp->reset_task,
5690 HZ/10);
5691 return;
5692 } else {
5693 /* If all other functions got down -
5694 * try to bring the chip back to
5695 * normal. In any case it's an exit
5696 * point for a leader.
5698 if (bnx2x_leader_reset(bp) ||
5699 bnx2x_nic_load(bp, LOAD_NORMAL)) {
5700 printk(KERN_ERR"%s: Recovery "
5701 "has failed. Power cycle is "
5702 "needed.\n", bp->dev->name);
5703 /* Disconnect this device */
5704 netif_device_detach(bp->dev);
5705 /* Block ifup for all function
5706 * of this ASIC until
5707 * "process kill" or power
5708 * cycle.
5710 bnx2x_set_reset_in_progress(bp);
5711 /* Shut down the power */
5712 bnx2x_set_power_state(bp,
5713 PCI_D3hot);
5714 return;
5717 return;
5719 } else { /* non-leader */
5720 if (!bnx2x_reset_is_done(bp)) {
5721 /* Try to get a LEADER_LOCK HW lock as
5722 * long as a former leader may have
5723 * been unloaded by the user or
5724 * released a leadership by another
5725 * reason.
5727 if (bnx2x_trylock_hw_lock(bp,
5728 HW_LOCK_RESOURCE_RESERVED_08)) {
5729 /* I'm a leader now! Restart a
5730 * switch case.
5732 bp->is_leader = 1;
5733 break;
5736 schedule_delayed_work(&bp->reset_task,
5737 HZ/10);
5738 return;
5740 } else { /* A leader has completed
5741 * the "process kill". It's an exit
5742 * point for a non-leader.
5744 bnx2x_nic_load(bp, LOAD_NORMAL);
5745 bp->recovery_state =
5746 BNX2X_RECOVERY_DONE;
5747 smp_wmb();
5748 return;
5751 default:
5752 return;
5757 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
5758 * scheduled on a general queue in order to prevent a dead lock.
5760 static void bnx2x_reset_task(struct work_struct *work)
5762 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
5764 #ifdef BNX2X_STOP_ON_ERROR
5765 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
5766 " so reset not done to allow debug dump,\n"
5767 KERN_ERR " you will need to reboot when done\n");
5768 return;
5769 #endif
5771 rtnl_lock();
5773 if (!netif_running(bp->dev))
5774 goto reset_task_exit;
5776 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
5777 bnx2x_parity_recover(bp);
5778 else {
5779 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
5780 bnx2x_nic_load(bp, LOAD_NORMAL);
5783 reset_task_exit:
5784 rtnl_unlock();
5787 /* end of nic load/unload */
5790 * Init service functions
5793 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
5795 switch (func) {
5796 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
5797 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
5798 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
5799 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
5800 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
5801 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
5802 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
5803 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
5804 default:
5805 BNX2X_ERR("Unsupported function index: %d\n", func);
5806 return (u32)(-1);
5810 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
5812 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
5814 /* Flush all outstanding writes */
5815 mmiowb();
5817 /* Pretend to be function 0 */
5818 REG_WR(bp, reg, 0);
5819 /* Flush the GRC transaction (in the chip) */
5820 new_val = REG_RD(bp, reg);
5821 if (new_val != 0) {
5822 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
5823 new_val);
5824 BUG();
5827 /* From now we are in the "like-E1" mode */
5828 bnx2x_int_disable(bp);
5830 /* Flush all outstanding writes */
5831 mmiowb();
5833 /* Restore the original funtion settings */
5834 REG_WR(bp, reg, orig_func);
5835 new_val = REG_RD(bp, reg);
5836 if (new_val != orig_func) {
5837 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
5838 orig_func, new_val);
5839 BUG();
5843 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
5845 if (CHIP_IS_E1H(bp))
5846 bnx2x_undi_int_disable_e1h(bp, func);
5847 else
5848 bnx2x_int_disable(bp);
5851 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5853 u32 val;
5855 /* Check if there is any driver already loaded */
5856 val = REG_RD(bp, MISC_REG_UNPREPARED);
5857 if (val == 0x1) {
5858 /* Check if it is the UNDI driver
5859 * UNDI driver initializes CID offset for normal bell to 0x7
5861 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5862 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
5863 if (val == 0x7) {
5864 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5865 /* save our func */
5866 int func = BP_FUNC(bp);
5867 u32 swap_en;
5868 u32 swap_val;
5870 /* clear the UNDI indication */
5871 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
5873 BNX2X_DEV_INFO("UNDI is active! reset device\n");
5875 /* try unload UNDI on port 0 */
5876 bp->func = 0;
5877 bp->fw_seq =
5878 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5879 DRV_MSG_SEQ_NUMBER_MASK);
5880 reset_code = bnx2x_fw_command(bp, reset_code);
5882 /* if UNDI is loaded on the other port */
5883 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
5885 /* send "DONE" for previous unload */
5886 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5888 /* unload UNDI on port 1 */
5889 bp->func = 1;
5890 bp->fw_seq =
5891 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5892 DRV_MSG_SEQ_NUMBER_MASK);
5893 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5895 bnx2x_fw_command(bp, reset_code);
5898 /* now it's safe to release the lock */
5899 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5901 bnx2x_undi_int_disable(bp, func);
5903 /* close input traffic and wait for it */
5904 /* Do not rcv packets to BRB */
5905 REG_WR(bp,
5906 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
5907 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
5908 /* Do not direct rcv packets that are not for MCP to
5909 * the BRB */
5910 REG_WR(bp,
5911 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
5912 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5913 /* clear AEU */
5914 REG_WR(bp,
5915 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5916 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
5917 msleep(10);
5919 /* save NIG port swap info */
5920 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5921 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5922 /* reset device */
5923 REG_WR(bp,
5924 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5925 0xd3ffffff);
5926 REG_WR(bp,
5927 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5928 0x1403);
5929 /* take the NIG out of reset and restore swap values */
5930 REG_WR(bp,
5931 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5932 MISC_REGISTERS_RESET_REG_1_RST_NIG);
5933 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
5934 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
5936 /* send unload done to the MCP */
5937 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5939 /* restore our func and fw_seq */
5940 bp->func = func;
5941 bp->fw_seq =
5942 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5943 DRV_MSG_SEQ_NUMBER_MASK);
5945 } else
5946 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5950 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5952 u32 val, val2, val3, val4, id;
5953 u16 pmc;
5955 /* Get the chip revision id and number. */
5956 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5957 val = REG_RD(bp, MISC_REG_CHIP_NUM);
5958 id = ((val & 0xffff) << 16);
5959 val = REG_RD(bp, MISC_REG_CHIP_REV);
5960 id |= ((val & 0xf) << 12);
5961 val = REG_RD(bp, MISC_REG_CHIP_METAL);
5962 id |= ((val & 0xff) << 4);
5963 val = REG_RD(bp, MISC_REG_BOND_ID);
5964 id |= (val & 0xf);
5965 bp->common.chip_id = id;
5966 bp->link_params.chip_id = bp->common.chip_id;
5967 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
5969 val = (REG_RD(bp, 0x2874) & 0x55);
5970 if ((bp->common.chip_id & 0x1) ||
5971 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
5972 bp->flags |= ONE_PORT_FLAG;
5973 BNX2X_DEV_INFO("single port device\n");
5976 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
5977 bp->common.flash_size = (NVRAM_1MB_SIZE <<
5978 (val & MCPR_NVM_CFG4_FLASH_SIZE));
5979 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5980 bp->common.flash_size, bp->common.flash_size);
5982 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5983 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
5984 bp->link_params.shmem_base = bp->common.shmem_base;
5985 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
5986 bp->common.shmem_base, bp->common.shmem2_base);
5988 if (!bp->common.shmem_base ||
5989 (bp->common.shmem_base < 0xA0000) ||
5990 (bp->common.shmem_base >= 0xC0000)) {
5991 BNX2X_DEV_INFO("MCP not active\n");
5992 bp->flags |= NO_MCP_FLAG;
5993 return;
5996 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
5997 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5998 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5999 BNX2X_ERROR("BAD MCP validity signature\n");
6001 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6002 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
6004 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6005 SHARED_HW_CFG_LED_MODE_MASK) >>
6006 SHARED_HW_CFG_LED_MODE_SHIFT);
6008 bp->link_params.feature_config_flags = 0;
6009 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
6010 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
6011 bp->link_params.feature_config_flags |=
6012 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6013 else
6014 bp->link_params.feature_config_flags &=
6015 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6017 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6018 bp->common.bc_ver = val;
6019 BNX2X_DEV_INFO("bc_ver %X\n", val);
6020 if (val < BNX2X_BC_VER) {
6021 /* for now only warn
6022 * later we might need to enforce this */
6023 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
6024 "please upgrade BC\n", BNX2X_BC_VER, val);
6026 bp->link_params.feature_config_flags |=
6027 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
6028 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
6030 if (BP_E1HVN(bp) == 0) {
6031 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6032 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6033 } else {
6034 /* no WOL capability for E1HVN != 0 */
6035 bp->flags |= NO_WOL_FLAG;
6037 BNX2X_DEV_INFO("%sWoL capable\n",
6038 (bp->flags & NO_WOL_FLAG) ? "not " : "");
6040 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6041 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6042 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6043 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6045 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
6046 val, val2, val3, val4);
6049 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6050 u32 switch_cfg)
6052 int port = BP_PORT(bp);
6053 u32 ext_phy_type;
6055 switch (switch_cfg) {
6056 case SWITCH_CFG_1G:
6057 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6059 ext_phy_type =
6060 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6061 switch (ext_phy_type) {
6062 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6063 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6064 ext_phy_type);
6066 bp->port.supported |= (SUPPORTED_10baseT_Half |
6067 SUPPORTED_10baseT_Full |
6068 SUPPORTED_100baseT_Half |
6069 SUPPORTED_100baseT_Full |
6070 SUPPORTED_1000baseT_Full |
6071 SUPPORTED_2500baseX_Full |
6072 SUPPORTED_TP |
6073 SUPPORTED_FIBRE |
6074 SUPPORTED_Autoneg |
6075 SUPPORTED_Pause |
6076 SUPPORTED_Asym_Pause);
6077 break;
6079 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6080 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6081 ext_phy_type);
6083 bp->port.supported |= (SUPPORTED_10baseT_Half |
6084 SUPPORTED_10baseT_Full |
6085 SUPPORTED_100baseT_Half |
6086 SUPPORTED_100baseT_Full |
6087 SUPPORTED_1000baseT_Full |
6088 SUPPORTED_TP |
6089 SUPPORTED_FIBRE |
6090 SUPPORTED_Autoneg |
6091 SUPPORTED_Pause |
6092 SUPPORTED_Asym_Pause);
6093 break;
6095 default:
6096 BNX2X_ERR("NVRAM config error. "
6097 "BAD SerDes ext_phy_config 0x%x\n",
6098 bp->link_params.ext_phy_config);
6099 return;
6102 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6103 port*0x10);
6104 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6105 break;
6107 case SWITCH_CFG_10G:
6108 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6110 ext_phy_type =
6111 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6112 switch (ext_phy_type) {
6113 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6114 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6115 ext_phy_type);
6117 bp->port.supported |= (SUPPORTED_10baseT_Half |
6118 SUPPORTED_10baseT_Full |
6119 SUPPORTED_100baseT_Half |
6120 SUPPORTED_100baseT_Full |
6121 SUPPORTED_1000baseT_Full |
6122 SUPPORTED_2500baseX_Full |
6123 SUPPORTED_10000baseT_Full |
6124 SUPPORTED_TP |
6125 SUPPORTED_FIBRE |
6126 SUPPORTED_Autoneg |
6127 SUPPORTED_Pause |
6128 SUPPORTED_Asym_Pause);
6129 break;
6131 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6132 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
6133 ext_phy_type);
6135 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6136 SUPPORTED_1000baseT_Full |
6137 SUPPORTED_FIBRE |
6138 SUPPORTED_Autoneg |
6139 SUPPORTED_Pause |
6140 SUPPORTED_Asym_Pause);
6141 break;
6143 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6144 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
6145 ext_phy_type);
6147 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6148 SUPPORTED_2500baseX_Full |
6149 SUPPORTED_1000baseT_Full |
6150 SUPPORTED_FIBRE |
6151 SUPPORTED_Autoneg |
6152 SUPPORTED_Pause |
6153 SUPPORTED_Asym_Pause);
6154 break;
6156 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
6157 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
6158 ext_phy_type);
6160 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6161 SUPPORTED_FIBRE |
6162 SUPPORTED_Pause |
6163 SUPPORTED_Asym_Pause);
6164 break;
6166 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6167 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
6168 ext_phy_type);
6170 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6171 SUPPORTED_1000baseT_Full |
6172 SUPPORTED_FIBRE |
6173 SUPPORTED_Pause |
6174 SUPPORTED_Asym_Pause);
6175 break;
6177 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6178 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
6179 ext_phy_type);
6181 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6182 SUPPORTED_1000baseT_Full |
6183 SUPPORTED_Autoneg |
6184 SUPPORTED_FIBRE |
6185 SUPPORTED_Pause |
6186 SUPPORTED_Asym_Pause);
6187 break;
6189 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6190 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
6191 ext_phy_type);
6193 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6194 SUPPORTED_1000baseT_Full |
6195 SUPPORTED_Autoneg |
6196 SUPPORTED_FIBRE |
6197 SUPPORTED_Pause |
6198 SUPPORTED_Asym_Pause);
6199 break;
6201 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6202 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
6203 ext_phy_type);
6205 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6206 SUPPORTED_TP |
6207 SUPPORTED_Autoneg |
6208 SUPPORTED_Pause |
6209 SUPPORTED_Asym_Pause);
6210 break;
6212 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
6213 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
6214 ext_phy_type);
6216 bp->port.supported |= (SUPPORTED_10baseT_Half |
6217 SUPPORTED_10baseT_Full |
6218 SUPPORTED_100baseT_Half |
6219 SUPPORTED_100baseT_Full |
6220 SUPPORTED_1000baseT_Full |
6221 SUPPORTED_10000baseT_Full |
6222 SUPPORTED_TP |
6223 SUPPORTED_Autoneg |
6224 SUPPORTED_Pause |
6225 SUPPORTED_Asym_Pause);
6226 break;
6228 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
6229 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
6230 bp->link_params.ext_phy_config);
6231 break;
6233 default:
6234 BNX2X_ERR("NVRAM config error. "
6235 "BAD XGXS ext_phy_config 0x%x\n",
6236 bp->link_params.ext_phy_config);
6237 return;
6240 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6241 port*0x18);
6242 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6244 break;
6246 default:
6247 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
6248 bp->port.link_config);
6249 return;
6251 bp->link_params.phy_addr = bp->port.phy_addr;
6253 /* mask what we support according to speed_cap_mask */
6254 if (!(bp->link_params.speed_cap_mask &
6255 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
6256 bp->port.supported &= ~SUPPORTED_10baseT_Half;
6258 if (!(bp->link_params.speed_cap_mask &
6259 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
6260 bp->port.supported &= ~SUPPORTED_10baseT_Full;
6262 if (!(bp->link_params.speed_cap_mask &
6263 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
6264 bp->port.supported &= ~SUPPORTED_100baseT_Half;
6266 if (!(bp->link_params.speed_cap_mask &
6267 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
6268 bp->port.supported &= ~SUPPORTED_100baseT_Full;
6270 if (!(bp->link_params.speed_cap_mask &
6271 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
6272 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
6273 SUPPORTED_1000baseT_Full);
6275 if (!(bp->link_params.speed_cap_mask &
6276 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
6277 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
6279 if (!(bp->link_params.speed_cap_mask &
6280 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
6281 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
6283 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
6286 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
6288 bp->link_params.req_duplex = DUPLEX_FULL;
6290 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
6291 case PORT_FEATURE_LINK_SPEED_AUTO:
6292 if (bp->port.supported & SUPPORTED_Autoneg) {
6293 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
6294 bp->port.advertising = bp->port.supported;
6295 } else {
6296 u32 ext_phy_type =
6297 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6299 if ((ext_phy_type ==
6300 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
6301 (ext_phy_type ==
6302 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
6303 /* force 10G, no AN */
6304 bp->link_params.req_line_speed = SPEED_10000;
6305 bp->port.advertising =
6306 (ADVERTISED_10000baseT_Full |
6307 ADVERTISED_FIBRE);
6308 break;
6310 BNX2X_ERR("NVRAM config error. "
6311 "Invalid link_config 0x%x"
6312 " Autoneg not supported\n",
6313 bp->port.link_config);
6314 return;
6316 break;
6318 case PORT_FEATURE_LINK_SPEED_10M_FULL:
6319 if (bp->port.supported & SUPPORTED_10baseT_Full) {
6320 bp->link_params.req_line_speed = SPEED_10;
6321 bp->port.advertising = (ADVERTISED_10baseT_Full |
6322 ADVERTISED_TP);
6323 } else {
6324 BNX2X_ERROR("NVRAM config error. "
6325 "Invalid link_config 0x%x"
6326 " speed_cap_mask 0x%x\n",
6327 bp->port.link_config,
6328 bp->link_params.speed_cap_mask);
6329 return;
6331 break;
6333 case PORT_FEATURE_LINK_SPEED_10M_HALF:
6334 if (bp->port.supported & SUPPORTED_10baseT_Half) {
6335 bp->link_params.req_line_speed = SPEED_10;
6336 bp->link_params.req_duplex = DUPLEX_HALF;
6337 bp->port.advertising = (ADVERTISED_10baseT_Half |
6338 ADVERTISED_TP);
6339 } else {
6340 BNX2X_ERROR("NVRAM config error. "
6341 "Invalid link_config 0x%x"
6342 " speed_cap_mask 0x%x\n",
6343 bp->port.link_config,
6344 bp->link_params.speed_cap_mask);
6345 return;
6347 break;
6349 case PORT_FEATURE_LINK_SPEED_100M_FULL:
6350 if (bp->port.supported & SUPPORTED_100baseT_Full) {
6351 bp->link_params.req_line_speed = SPEED_100;
6352 bp->port.advertising = (ADVERTISED_100baseT_Full |
6353 ADVERTISED_TP);
6354 } else {
6355 BNX2X_ERROR("NVRAM config error. "
6356 "Invalid link_config 0x%x"
6357 " speed_cap_mask 0x%x\n",
6358 bp->port.link_config,
6359 bp->link_params.speed_cap_mask);
6360 return;
6362 break;
6364 case PORT_FEATURE_LINK_SPEED_100M_HALF:
6365 if (bp->port.supported & SUPPORTED_100baseT_Half) {
6366 bp->link_params.req_line_speed = SPEED_100;
6367 bp->link_params.req_duplex = DUPLEX_HALF;
6368 bp->port.advertising = (ADVERTISED_100baseT_Half |
6369 ADVERTISED_TP);
6370 } else {
6371 BNX2X_ERROR("NVRAM config error. "
6372 "Invalid link_config 0x%x"
6373 " speed_cap_mask 0x%x\n",
6374 bp->port.link_config,
6375 bp->link_params.speed_cap_mask);
6376 return;
6378 break;
6380 case PORT_FEATURE_LINK_SPEED_1G:
6381 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
6382 bp->link_params.req_line_speed = SPEED_1000;
6383 bp->port.advertising = (ADVERTISED_1000baseT_Full |
6384 ADVERTISED_TP);
6385 } else {
6386 BNX2X_ERROR("NVRAM config error. "
6387 "Invalid link_config 0x%x"
6388 " speed_cap_mask 0x%x\n",
6389 bp->port.link_config,
6390 bp->link_params.speed_cap_mask);
6391 return;
6393 break;
6395 case PORT_FEATURE_LINK_SPEED_2_5G:
6396 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
6397 bp->link_params.req_line_speed = SPEED_2500;
6398 bp->port.advertising = (ADVERTISED_2500baseX_Full |
6399 ADVERTISED_TP);
6400 } else {
6401 BNX2X_ERROR("NVRAM config error. "
6402 "Invalid link_config 0x%x"
6403 " speed_cap_mask 0x%x\n",
6404 bp->port.link_config,
6405 bp->link_params.speed_cap_mask);
6406 return;
6408 break;
6410 case PORT_FEATURE_LINK_SPEED_10G_CX4:
6411 case PORT_FEATURE_LINK_SPEED_10G_KX4:
6412 case PORT_FEATURE_LINK_SPEED_10G_KR:
6413 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
6414 bp->link_params.req_line_speed = SPEED_10000;
6415 bp->port.advertising = (ADVERTISED_10000baseT_Full |
6416 ADVERTISED_FIBRE);
6417 } else {
6418 BNX2X_ERROR("NVRAM config error. "
6419 "Invalid link_config 0x%x"
6420 " speed_cap_mask 0x%x\n",
6421 bp->port.link_config,
6422 bp->link_params.speed_cap_mask);
6423 return;
6425 break;
6427 default:
6428 BNX2X_ERROR("NVRAM config error. "
6429 "BAD link speed link_config 0x%x\n",
6430 bp->port.link_config);
6431 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
6432 bp->port.advertising = bp->port.supported;
6433 break;
6436 bp->link_params.req_flow_ctrl = (bp->port.link_config &
6437 PORT_FEATURE_FLOW_CONTROL_MASK);
6438 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
6439 !(bp->port.supported & SUPPORTED_Autoneg))
6440 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
6442 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
6443 " advertising 0x%x\n",
6444 bp->link_params.req_line_speed,
6445 bp->link_params.req_duplex,
6446 bp->link_params.req_flow_ctrl, bp->port.advertising);
6449 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
6451 mac_hi = cpu_to_be16(mac_hi);
6452 mac_lo = cpu_to_be32(mac_lo);
6453 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
6454 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
6457 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
6459 int port = BP_PORT(bp);
6460 u32 val, val2;
6461 u32 config;
6462 u16 i;
6463 u32 ext_phy_type;
6465 bp->link_params.bp = bp;
6466 bp->link_params.port = port;
6468 bp->link_params.lane_config =
6469 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
6470 bp->link_params.ext_phy_config =
6471 SHMEM_RD(bp,
6472 dev_info.port_hw_config[port].external_phy_config);
6473 /* BCM8727_NOC => BCM8727 no over current */
6474 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
6475 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
6476 bp->link_params.ext_phy_config &=
6477 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6478 bp->link_params.ext_phy_config |=
6479 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
6480 bp->link_params.feature_config_flags |=
6481 FEATURE_CONFIG_BCM8727_NOC;
6484 bp->link_params.speed_cap_mask =
6485 SHMEM_RD(bp,
6486 dev_info.port_hw_config[port].speed_capability_mask);
6488 bp->port.link_config =
6489 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6491 /* Get the 4 lanes xgxs config rx and tx */
6492 for (i = 0; i < 2; i++) {
6493 val = SHMEM_RD(bp,
6494 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
6495 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
6496 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
6498 val = SHMEM_RD(bp,
6499 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
6500 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
6501 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
6504 /* If the device is capable of WoL, set the default state according
6505 * to the HW
6507 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
6508 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
6509 (config & PORT_FEATURE_WOL_ENABLED));
6511 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
6512 " speed_cap_mask 0x%08x link_config 0x%08x\n",
6513 bp->link_params.lane_config,
6514 bp->link_params.ext_phy_config,
6515 bp->link_params.speed_cap_mask, bp->port.link_config);
6517 bp->link_params.switch_cfg |= (bp->port.link_config &
6518 PORT_FEATURE_CONNECTED_SWITCH_MASK);
6519 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
6521 bnx2x_link_settings_requested(bp);
6524 * If connected directly, work with the internal PHY, otherwise, work
6525 * with the external PHY
6527 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6528 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
6529 bp->mdio.prtad = bp->link_params.phy_addr;
6531 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
6532 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
6533 bp->mdio.prtad =
6534 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
6536 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6537 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
6538 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
6539 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
6540 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6542 #ifdef BCM_CNIC
6543 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
6544 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
6545 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
6546 #endif
6549 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6551 int func = BP_FUNC(bp);
6552 u32 val, val2;
6553 int rc = 0;
6555 bnx2x_get_common_hwinfo(bp);
6557 bp->e1hov = 0;
6558 bp->e1hmf = 0;
6559 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
6560 bp->mf_config =
6561 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
6563 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
6564 FUNC_MF_CFG_E1HOV_TAG_MASK);
6565 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
6566 bp->e1hmf = 1;
6567 BNX2X_DEV_INFO("%s function mode\n",
6568 IS_E1HMF(bp) ? "multi" : "single");
6570 if (IS_E1HMF(bp)) {
6571 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
6572 e1hov_tag) &
6573 FUNC_MF_CFG_E1HOV_TAG_MASK);
6574 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
6575 bp->e1hov = val;
6576 BNX2X_DEV_INFO("E1HOV for func %d is %d "
6577 "(0x%04x)\n",
6578 func, bp->e1hov, bp->e1hov);
6579 } else {
6580 BNX2X_ERROR("No valid E1HOV for func %d,"
6581 " aborting\n", func);
6582 rc = -EPERM;
6584 } else {
6585 if (BP_E1HVN(bp)) {
6586 BNX2X_ERROR("VN %d in single function mode,"
6587 " aborting\n", BP_E1HVN(bp));
6588 rc = -EPERM;
6593 if (!BP_NOMCP(bp)) {
6594 bnx2x_get_port_hwinfo(bp);
6596 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
6597 DRV_MSG_SEQ_NUMBER_MASK);
6598 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
6601 if (IS_E1HMF(bp)) {
6602 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
6603 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
6604 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
6605 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
6606 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6607 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6608 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6609 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6610 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
6611 bp->dev->dev_addr[5] = (u8)(val & 0xff);
6612 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
6613 ETH_ALEN);
6614 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
6615 ETH_ALEN);
6618 return rc;
6621 if (BP_NOMCP(bp)) {
6622 /* only supposed to happen on emulation/FPGA */
6623 BNX2X_ERROR("warning: random MAC workaround active\n");
6624 random_ether_addr(bp->dev->dev_addr);
6625 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6628 return rc;
6631 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
6633 int cnt, i, block_end, rodi;
6634 char vpd_data[BNX2X_VPD_LEN+1];
6635 char str_id_reg[VENDOR_ID_LEN+1];
6636 char str_id_cap[VENDOR_ID_LEN+1];
6637 u8 len;
6639 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
6640 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
6642 if (cnt < BNX2X_VPD_LEN)
6643 goto out_not_found;
6645 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
6646 PCI_VPD_LRDT_RO_DATA);
6647 if (i < 0)
6648 goto out_not_found;
6651 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
6652 pci_vpd_lrdt_size(&vpd_data[i]);
6654 i += PCI_VPD_LRDT_TAG_SIZE;
6656 if (block_end > BNX2X_VPD_LEN)
6657 goto out_not_found;
6659 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6660 PCI_VPD_RO_KEYWORD_MFR_ID);
6661 if (rodi < 0)
6662 goto out_not_found;
6664 len = pci_vpd_info_field_size(&vpd_data[rodi]);
6666 if (len != VENDOR_ID_LEN)
6667 goto out_not_found;
6669 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6671 /* vendor specific info */
6672 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
6673 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
6674 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
6675 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
6677 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6678 PCI_VPD_RO_KEYWORD_VENDOR0);
6679 if (rodi >= 0) {
6680 len = pci_vpd_info_field_size(&vpd_data[rodi]);
6682 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6684 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
6685 memcpy(bp->fw_ver, &vpd_data[rodi], len);
6686 bp->fw_ver[len] = ' ';
6689 return;
6691 out_not_found:
6692 return;
6695 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6697 int func = BP_FUNC(bp);
6698 int timer_interval;
6699 int rc;
6701 /* Disable interrupt handling until HW is initialized */
6702 atomic_set(&bp->intr_sem, 1);
6703 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6705 mutex_init(&bp->port.phy_mutex);
6706 mutex_init(&bp->fw_mb_mutex);
6707 spin_lock_init(&bp->stats_lock);
6708 #ifdef BCM_CNIC
6709 mutex_init(&bp->cnic_mutex);
6710 #endif
6712 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
6713 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
6715 rc = bnx2x_get_hwinfo(bp);
6717 bnx2x_read_fwinfo(bp);
6718 /* need to reset chip if undi was active */
6719 if (!BP_NOMCP(bp))
6720 bnx2x_undi_unload(bp);
6722 if (CHIP_REV_IS_FPGA(bp))
6723 dev_err(&bp->pdev->dev, "FPGA detected\n");
6725 if (BP_NOMCP(bp) && (func == 0))
6726 dev_err(&bp->pdev->dev, "MCP disabled, "
6727 "must load devices in order!\n");
6729 /* Set multi queue mode */
6730 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
6731 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
6732 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
6733 "requested is not MSI-X\n");
6734 multi_mode = ETH_RSS_MODE_DISABLED;
6736 bp->multi_mode = multi_mode;
6737 bp->int_mode = int_mode;
6739 bp->dev->features |= NETIF_F_GRO;
6741 /* Set TPA flags */
6742 if (disable_tpa) {
6743 bp->flags &= ~TPA_ENABLE_FLAG;
6744 bp->dev->features &= ~NETIF_F_LRO;
6745 } else {
6746 bp->flags |= TPA_ENABLE_FLAG;
6747 bp->dev->features |= NETIF_F_LRO;
6749 bp->disable_tpa = disable_tpa;
6751 if (CHIP_IS_E1(bp))
6752 bp->dropless_fc = 0;
6753 else
6754 bp->dropless_fc = dropless_fc;
6756 bp->mrrs = mrrs;
6758 bp->tx_ring_size = MAX_TX_AVAIL;
6759 bp->rx_ring_size = MAX_RX_AVAIL;
6761 bp->rx_csum = 1;
6763 /* make sure that the numbers are in the right granularity */
6764 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6765 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6767 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
6768 bp->current_interval = (poll ? poll : timer_interval);
6770 init_timer(&bp->timer);
6771 bp->timer.expires = jiffies + bp->current_interval;
6772 bp->timer.data = (unsigned long) bp;
6773 bp->timer.function = bnx2x_timer;
6775 return rc;
6779 /****************************************************************************
6780 * General service functions
6781 ****************************************************************************/
6783 /* called with rtnl_lock */
6784 static int bnx2x_open(struct net_device *dev)
6786 struct bnx2x *bp = netdev_priv(dev);
6788 netif_carrier_off(dev);
6790 bnx2x_set_power_state(bp, PCI_D0);
6792 if (!bnx2x_reset_is_done(bp)) {
6793 do {
6794 /* Reset MCP mail box sequence if there is on going
6795 * recovery
6797 bp->fw_seq = 0;
6799 /* If it's the first function to load and reset done
6800 * is still not cleared it may mean that. We don't
6801 * check the attention state here because it may have
6802 * already been cleared by a "common" reset but we
6803 * shell proceed with "process kill" anyway.
6805 if ((bnx2x_get_load_cnt(bp) == 0) &&
6806 bnx2x_trylock_hw_lock(bp,
6807 HW_LOCK_RESOURCE_RESERVED_08) &&
6808 (!bnx2x_leader_reset(bp))) {
6809 DP(NETIF_MSG_HW, "Recovered in open\n");
6810 break;
6813 bnx2x_set_power_state(bp, PCI_D3hot);
6815 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
6816 " completed yet. Try again later. If u still see this"
6817 " message after a few retries then power cycle is"
6818 " required.\n", bp->dev->name);
6820 return -EAGAIN;
6821 } while (0);
6824 bp->recovery_state = BNX2X_RECOVERY_DONE;
6826 return bnx2x_nic_load(bp, LOAD_OPEN);
6829 /* called with rtnl_lock */
6830 static int bnx2x_close(struct net_device *dev)
6832 struct bnx2x *bp = netdev_priv(dev);
6834 /* Unload the driver, release IRQs */
6835 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
6836 bnx2x_set_power_state(bp, PCI_D3hot);
6838 return 0;
6841 /* called with netif_tx_lock from dev_mcast.c */
6842 void bnx2x_set_rx_mode(struct net_device *dev)
6844 struct bnx2x *bp = netdev_priv(dev);
6845 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
6846 int port = BP_PORT(bp);
6848 if (bp->state != BNX2X_STATE_OPEN) {
6849 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6850 return;
6853 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
6855 if (dev->flags & IFF_PROMISC)
6856 rx_mode = BNX2X_RX_MODE_PROMISC;
6858 else if ((dev->flags & IFF_ALLMULTI) ||
6859 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
6860 CHIP_IS_E1(bp)))
6861 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6863 else { /* some multicasts */
6864 if (CHIP_IS_E1(bp)) {
6865 int i, old, offset;
6866 struct netdev_hw_addr *ha;
6867 struct mac_configuration_cmd *config =
6868 bnx2x_sp(bp, mcast_config);
6870 i = 0;
6871 netdev_for_each_mc_addr(ha, dev) {
6872 config->config_table[i].
6873 cam_entry.msb_mac_addr =
6874 swab16(*(u16 *)&ha->addr[0]);
6875 config->config_table[i].
6876 cam_entry.middle_mac_addr =
6877 swab16(*(u16 *)&ha->addr[2]);
6878 config->config_table[i].
6879 cam_entry.lsb_mac_addr =
6880 swab16(*(u16 *)&ha->addr[4]);
6881 config->config_table[i].cam_entry.flags =
6882 cpu_to_le16(port);
6883 config->config_table[i].
6884 target_table_entry.flags = 0;
6885 config->config_table[i].target_table_entry.
6886 clients_bit_vector =
6887 cpu_to_le32(1 << BP_L_ID(bp));
6888 config->config_table[i].
6889 target_table_entry.vlan_id = 0;
6891 DP(NETIF_MSG_IFUP,
6892 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6893 config->config_table[i].
6894 cam_entry.msb_mac_addr,
6895 config->config_table[i].
6896 cam_entry.middle_mac_addr,
6897 config->config_table[i].
6898 cam_entry.lsb_mac_addr);
6899 i++;
6901 old = config->hdr.length;
6902 if (old > i) {
6903 for (; i < old; i++) {
6904 if (CAM_IS_INVALID(config->
6905 config_table[i])) {
6906 /* already invalidated */
6907 break;
6909 /* invalidate */
6910 CAM_INVALIDATE(config->
6911 config_table[i]);
6915 if (CHIP_REV_IS_SLOW(bp))
6916 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6917 else
6918 offset = BNX2X_MAX_MULTICAST*(1 + port);
6920 config->hdr.length = i;
6921 config->hdr.offset = offset;
6922 config->hdr.client_id = bp->fp->cl_id;
6923 config->hdr.reserved1 = 0;
6925 bp->set_mac_pending++;
6926 smp_wmb();
6928 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6929 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6930 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
6932 } else { /* E1H */
6933 /* Accept one or more multicasts */
6934 struct netdev_hw_addr *ha;
6935 u32 mc_filter[MC_HASH_SIZE];
6936 u32 crc, bit, regidx;
6937 int i;
6939 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6941 netdev_for_each_mc_addr(ha, dev) {
6942 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6943 ha->addr);
6945 crc = crc32c_le(0, ha->addr, ETH_ALEN);
6946 bit = (crc >> 24) & 0xff;
6947 regidx = bit >> 5;
6948 bit &= 0x1f;
6949 mc_filter[regidx] |= (1 << bit);
6952 for (i = 0; i < MC_HASH_SIZE; i++)
6953 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6954 mc_filter[i]);
6958 bp->rx_mode = rx_mode;
6959 bnx2x_set_storm_rx_mode(bp);
6963 /* called with rtnl_lock */
6964 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
6965 int devad, u16 addr)
6967 struct bnx2x *bp = netdev_priv(netdev);
6968 u16 value;
6969 int rc;
6970 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6972 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
6973 prtad, devad, addr);
6975 if (prtad != bp->mdio.prtad) {
6976 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
6977 prtad, bp->mdio.prtad);
6978 return -EINVAL;
6981 /* The HW expects different devad if CL22 is used */
6982 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
6984 bnx2x_acquire_phy_lock(bp);
6985 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
6986 devad, addr, &value);
6987 bnx2x_release_phy_lock(bp);
6988 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
6990 if (!rc)
6991 rc = value;
6992 return rc;
6995 /* called with rtnl_lock */
6996 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
6997 u16 addr, u16 value)
6999 struct bnx2x *bp = netdev_priv(netdev);
7000 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7001 int rc;
7003 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
7004 " value 0x%x\n", prtad, devad, addr, value);
7006 if (prtad != bp->mdio.prtad) {
7007 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
7008 prtad, bp->mdio.prtad);
7009 return -EINVAL;
7012 /* The HW expects different devad if CL22 is used */
7013 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
7015 bnx2x_acquire_phy_lock(bp);
7016 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
7017 devad, addr, value);
7018 bnx2x_release_phy_lock(bp);
7019 return rc;
7022 /* called with rtnl_lock */
7023 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7025 struct bnx2x *bp = netdev_priv(dev);
7026 struct mii_ioctl_data *mdio = if_mii(ifr);
7028 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
7029 mdio->phy_id, mdio->reg_num, mdio->val_in);
7031 if (!netif_running(dev))
7032 return -EAGAIN;
7034 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
7037 #ifdef CONFIG_NET_POLL_CONTROLLER
7038 static void poll_bnx2x(struct net_device *dev)
7040 struct bnx2x *bp = netdev_priv(dev);
7042 disable_irq(bp->pdev->irq);
7043 bnx2x_interrupt(bp->pdev->irq, dev);
7044 enable_irq(bp->pdev->irq);
7046 #endif
7048 static const struct net_device_ops bnx2x_netdev_ops = {
7049 .ndo_open = bnx2x_open,
7050 .ndo_stop = bnx2x_close,
7051 .ndo_start_xmit = bnx2x_start_xmit,
7052 .ndo_set_multicast_list = bnx2x_set_rx_mode,
7053 .ndo_set_mac_address = bnx2x_change_mac_addr,
7054 .ndo_validate_addr = eth_validate_addr,
7055 .ndo_do_ioctl = bnx2x_ioctl,
7056 .ndo_change_mtu = bnx2x_change_mtu,
7057 .ndo_tx_timeout = bnx2x_tx_timeout,
7058 #ifdef BCM_VLAN
7059 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
7060 #endif
7061 #ifdef CONFIG_NET_POLL_CONTROLLER
7062 .ndo_poll_controller = poll_bnx2x,
7063 #endif
7066 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
7067 struct net_device *dev)
7069 struct bnx2x *bp;
7070 int rc;
7072 SET_NETDEV_DEV(dev, &pdev->dev);
7073 bp = netdev_priv(dev);
7075 bp->dev = dev;
7076 bp->pdev = pdev;
7077 bp->flags = 0;
7078 bp->func = PCI_FUNC(pdev->devfn);
7080 rc = pci_enable_device(pdev);
7081 if (rc) {
7082 dev_err(&bp->pdev->dev,
7083 "Cannot enable PCI device, aborting\n");
7084 goto err_out;
7087 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7088 dev_err(&bp->pdev->dev,
7089 "Cannot find PCI device base address, aborting\n");
7090 rc = -ENODEV;
7091 goto err_out_disable;
7094 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
7095 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
7096 " base address, aborting\n");
7097 rc = -ENODEV;
7098 goto err_out_disable;
7101 if (atomic_read(&pdev->enable_cnt) == 1) {
7102 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7103 if (rc) {
7104 dev_err(&bp->pdev->dev,
7105 "Cannot obtain PCI resources, aborting\n");
7106 goto err_out_disable;
7109 pci_set_master(pdev);
7110 pci_save_state(pdev);
7113 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7114 if (bp->pm_cap == 0) {
7115 dev_err(&bp->pdev->dev,
7116 "Cannot find power management capability, aborting\n");
7117 rc = -EIO;
7118 goto err_out_release;
7121 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
7122 if (bp->pcie_cap == 0) {
7123 dev_err(&bp->pdev->dev,
7124 "Cannot find PCI Express capability, aborting\n");
7125 rc = -EIO;
7126 goto err_out_release;
7129 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
7130 bp->flags |= USING_DAC_FLAG;
7131 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
7132 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
7133 " failed, aborting\n");
7134 rc = -EIO;
7135 goto err_out_release;
7138 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
7139 dev_err(&bp->pdev->dev,
7140 "System does not support DMA, aborting\n");
7141 rc = -EIO;
7142 goto err_out_release;
7145 dev->mem_start = pci_resource_start(pdev, 0);
7146 dev->base_addr = dev->mem_start;
7147 dev->mem_end = pci_resource_end(pdev, 0);
7149 dev->irq = pdev->irq;
7151 bp->regview = pci_ioremap_bar(pdev, 0);
7152 if (!bp->regview) {
7153 dev_err(&bp->pdev->dev,
7154 "Cannot map register space, aborting\n");
7155 rc = -ENOMEM;
7156 goto err_out_release;
7159 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
7160 min_t(u64, BNX2X_DB_SIZE,
7161 pci_resource_len(pdev, 2)));
7162 if (!bp->doorbells) {
7163 dev_err(&bp->pdev->dev,
7164 "Cannot map doorbell space, aborting\n");
7165 rc = -ENOMEM;
7166 goto err_out_unmap;
7169 bnx2x_set_power_state(bp, PCI_D0);
7171 /* clean indirect addresses */
7172 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
7173 PCICFG_VENDOR_ID_OFFSET);
7174 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
7175 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
7176 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
7177 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
7179 /* Reset the load counter */
7180 bnx2x_clear_load_cnt(bp);
7182 dev->watchdog_timeo = TX_TIMEOUT;
7184 dev->netdev_ops = &bnx2x_netdev_ops;
7185 bnx2x_set_ethtool_ops(dev);
7186 dev->features |= NETIF_F_SG;
7187 dev->features |= NETIF_F_HW_CSUM;
7188 if (bp->flags & USING_DAC_FLAG)
7189 dev->features |= NETIF_F_HIGHDMA;
7190 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7191 dev->features |= NETIF_F_TSO6;
7192 #ifdef BCM_VLAN
7193 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
7194 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
7196 dev->vlan_features |= NETIF_F_SG;
7197 dev->vlan_features |= NETIF_F_HW_CSUM;
7198 if (bp->flags & USING_DAC_FLAG)
7199 dev->vlan_features |= NETIF_F_HIGHDMA;
7200 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7201 dev->vlan_features |= NETIF_F_TSO6;
7202 #endif
7204 /* get_port_hwinfo() will set prtad and mmds properly */
7205 bp->mdio.prtad = MDIO_PRTAD_NONE;
7206 bp->mdio.mmds = 0;
7207 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7208 bp->mdio.dev = dev;
7209 bp->mdio.mdio_read = bnx2x_mdio_read;
7210 bp->mdio.mdio_write = bnx2x_mdio_write;
7212 return 0;
7214 err_out_unmap:
7215 if (bp->regview) {
7216 iounmap(bp->regview);
7217 bp->regview = NULL;
7219 if (bp->doorbells) {
7220 iounmap(bp->doorbells);
7221 bp->doorbells = NULL;
7224 err_out_release:
7225 if (atomic_read(&pdev->enable_cnt) == 1)
7226 pci_release_regions(pdev);
7228 err_out_disable:
7229 pci_disable_device(pdev);
7230 pci_set_drvdata(pdev, NULL);
7232 err_out:
7233 return rc;
7236 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
7237 int *width, int *speed)
7239 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7241 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
7243 /* return value of 1=2.5GHz 2=5GHz */
7244 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
7247 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
7249 const struct firmware *firmware = bp->firmware;
7250 struct bnx2x_fw_file_hdr *fw_hdr;
7251 struct bnx2x_fw_file_section *sections;
7252 u32 offset, len, num_ops;
7253 u16 *ops_offsets;
7254 int i;
7255 const u8 *fw_ver;
7257 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
7258 return -EINVAL;
7260 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
7261 sections = (struct bnx2x_fw_file_section *)fw_hdr;
7263 /* Make sure none of the offsets and sizes make us read beyond
7264 * the end of the firmware data */
7265 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
7266 offset = be32_to_cpu(sections[i].offset);
7267 len = be32_to_cpu(sections[i].len);
7268 if (offset + len > firmware->size) {
7269 dev_err(&bp->pdev->dev,
7270 "Section %d length is out of bounds\n", i);
7271 return -EINVAL;
7275 /* Likewise for the init_ops offsets */
7276 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
7277 ops_offsets = (u16 *)(firmware->data + offset);
7278 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
7280 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
7281 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
7282 dev_err(&bp->pdev->dev,
7283 "Section offset %d is out of bounds\n", i);
7284 return -EINVAL;
7288 /* Check FW version */
7289 offset = be32_to_cpu(fw_hdr->fw_version.offset);
7290 fw_ver = firmware->data + offset;
7291 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
7292 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
7293 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
7294 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
7295 dev_err(&bp->pdev->dev,
7296 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
7297 fw_ver[0], fw_ver[1], fw_ver[2],
7298 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
7299 BCM_5710_FW_MINOR_VERSION,
7300 BCM_5710_FW_REVISION_VERSION,
7301 BCM_5710_FW_ENGINEERING_VERSION);
7302 return -EINVAL;
7305 return 0;
7308 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7310 const __be32 *source = (const __be32 *)_source;
7311 u32 *target = (u32 *)_target;
7312 u32 i;
7314 for (i = 0; i < n/4; i++)
7315 target[i] = be32_to_cpu(source[i]);
7319 Ops array is stored in the following format:
7320 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7322 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
7324 const __be32 *source = (const __be32 *)_source;
7325 struct raw_op *target = (struct raw_op *)_target;
7326 u32 i, j, tmp;
7328 for (i = 0, j = 0; i < n/8; i++, j += 2) {
7329 tmp = be32_to_cpu(source[j]);
7330 target[i].op = (tmp >> 24) & 0xff;
7331 target[i].offset = tmp & 0xffffff;
7332 target[i].raw_data = be32_to_cpu(source[j + 1]);
7336 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7338 const __be16 *source = (const __be16 *)_source;
7339 u16 *target = (u16 *)_target;
7340 u32 i;
7342 for (i = 0; i < n/2; i++)
7343 target[i] = be16_to_cpu(source[i]);
7346 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
7347 do { \
7348 u32 len = be32_to_cpu(fw_hdr->arr.len); \
7349 bp->arr = kmalloc(len, GFP_KERNEL); \
7350 if (!bp->arr) { \
7351 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7352 goto lbl; \
7354 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
7355 (u8 *)bp->arr, len); \
7356 } while (0)
7358 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
7360 const char *fw_file_name;
7361 struct bnx2x_fw_file_hdr *fw_hdr;
7362 int rc;
7364 if (CHIP_IS_E1(bp))
7365 fw_file_name = FW_FILE_NAME_E1;
7366 else if (CHIP_IS_E1H(bp))
7367 fw_file_name = FW_FILE_NAME_E1H;
7368 else {
7369 dev_err(dev, "Unsupported chip revision\n");
7370 return -EINVAL;
7373 dev_info(dev, "Loading %s\n", fw_file_name);
7375 rc = request_firmware(&bp->firmware, fw_file_name, dev);
7376 if (rc) {
7377 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
7378 goto request_firmware_exit;
7381 rc = bnx2x_check_firmware(bp);
7382 if (rc) {
7383 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
7384 goto request_firmware_exit;
7387 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
7389 /* Initialize the pointers to the init arrays */
7390 /* Blob */
7391 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
7393 /* Opcodes */
7394 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
7396 /* Offsets */
7397 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
7398 be16_to_cpu_n);
7400 /* STORMs firmware */
7401 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7402 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
7403 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
7404 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
7405 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7406 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
7407 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
7408 be32_to_cpu(fw_hdr->usem_pram_data.offset);
7409 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7410 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
7411 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
7412 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
7413 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7414 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
7415 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
7416 be32_to_cpu(fw_hdr->csem_pram_data.offset);
7418 return 0;
7420 init_offsets_alloc_err:
7421 kfree(bp->init_ops);
7422 init_ops_alloc_err:
7423 kfree(bp->init_data);
7424 request_firmware_exit:
7425 release_firmware(bp->firmware);
7427 return rc;
7431 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7432 const struct pci_device_id *ent)
7434 struct net_device *dev = NULL;
7435 struct bnx2x *bp;
7436 int pcie_width, pcie_speed;
7437 int rc;
7439 /* dev zeroed in init_etherdev */
7440 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
7441 if (!dev) {
7442 dev_err(&pdev->dev, "Cannot allocate net device\n");
7443 return -ENOMEM;
7446 bp = netdev_priv(dev);
7447 bp->msg_enable = debug;
7449 pci_set_drvdata(pdev, dev);
7451 rc = bnx2x_init_dev(pdev, dev);
7452 if (rc < 0) {
7453 free_netdev(dev);
7454 return rc;
7457 rc = bnx2x_init_bp(bp);
7458 if (rc)
7459 goto init_one_exit;
7461 /* Set init arrays */
7462 rc = bnx2x_init_firmware(bp, &pdev->dev);
7463 if (rc) {
7464 dev_err(&pdev->dev, "Error loading firmware\n");
7465 goto init_one_exit;
7468 rc = register_netdev(dev);
7469 if (rc) {
7470 dev_err(&pdev->dev, "Cannot register net device\n");
7471 goto init_one_exit;
7474 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
7475 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
7476 " IRQ %d, ", board_info[ent->driver_data].name,
7477 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
7478 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
7479 dev->base_addr, bp->pdev->irq);
7480 pr_cont("node addr %pM\n", dev->dev_addr);
7482 return 0;
7484 init_one_exit:
7485 if (bp->regview)
7486 iounmap(bp->regview);
7488 if (bp->doorbells)
7489 iounmap(bp->doorbells);
7491 free_netdev(dev);
7493 if (atomic_read(&pdev->enable_cnt) == 1)
7494 pci_release_regions(pdev);
7496 pci_disable_device(pdev);
7497 pci_set_drvdata(pdev, NULL);
7499 return rc;
7502 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7504 struct net_device *dev = pci_get_drvdata(pdev);
7505 struct bnx2x *bp;
7507 if (!dev) {
7508 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
7509 return;
7511 bp = netdev_priv(dev);
7513 unregister_netdev(dev);
7515 /* Make sure RESET task is not scheduled before continuing */
7516 cancel_delayed_work_sync(&bp->reset_task);
7518 kfree(bp->init_ops_offsets);
7519 kfree(bp->init_ops);
7520 kfree(bp->init_data);
7521 release_firmware(bp->firmware);
7523 if (bp->regview)
7524 iounmap(bp->regview);
7526 if (bp->doorbells)
7527 iounmap(bp->doorbells);
7529 free_netdev(dev);
7531 if (atomic_read(&pdev->enable_cnt) == 1)
7532 pci_release_regions(pdev);
7534 pci_disable_device(pdev);
7535 pci_set_drvdata(pdev, NULL);
7538 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
7540 int i;
7542 bp->state = BNX2X_STATE_ERROR;
7544 bp->rx_mode = BNX2X_RX_MODE_NONE;
7546 bnx2x_netif_stop(bp, 0);
7547 netif_carrier_off(bp->dev);
7549 del_timer_sync(&bp->timer);
7550 bp->stats_state = STATS_STATE_DISABLED;
7551 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
7553 /* Release IRQs */
7554 bnx2x_free_irq(bp, false);
7556 if (CHIP_IS_E1(bp)) {
7557 struct mac_configuration_cmd *config =
7558 bnx2x_sp(bp, mcast_config);
7560 for (i = 0; i < config->hdr.length; i++)
7561 CAM_INVALIDATE(config->config_table[i]);
7564 /* Free SKBs, SGEs, TPA pool and driver internals */
7565 bnx2x_free_skbs(bp);
7566 for_each_queue(bp, i)
7567 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7568 for_each_queue(bp, i)
7569 netif_napi_del(&bnx2x_fp(bp, i, napi));
7570 bnx2x_free_mem(bp);
7572 bp->state = BNX2X_STATE_CLOSED;
7574 return 0;
7577 static void bnx2x_eeh_recover(struct bnx2x *bp)
7579 u32 val;
7581 mutex_init(&bp->port.phy_mutex);
7583 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7584 bp->link_params.shmem_base = bp->common.shmem_base;
7585 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7587 if (!bp->common.shmem_base ||
7588 (bp->common.shmem_base < 0xA0000) ||
7589 (bp->common.shmem_base >= 0xC0000)) {
7590 BNX2X_DEV_INFO("MCP not active\n");
7591 bp->flags |= NO_MCP_FLAG;
7592 return;
7595 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7596 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7597 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7598 BNX2X_ERR("BAD MCP validity signature\n");
7600 if (!BP_NOMCP(bp)) {
7601 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
7602 & DRV_MSG_SEQ_NUMBER_MASK);
7603 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7608 * bnx2x_io_error_detected - called when PCI error is detected
7609 * @pdev: Pointer to PCI device
7610 * @state: The current pci connection state
7612 * This function is called after a PCI bus error affecting
7613 * this device has been detected.
7615 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
7616 pci_channel_state_t state)
7618 struct net_device *dev = pci_get_drvdata(pdev);
7619 struct bnx2x *bp = netdev_priv(dev);
7621 rtnl_lock();
7623 netif_device_detach(dev);
7625 if (state == pci_channel_io_perm_failure) {
7626 rtnl_unlock();
7627 return PCI_ERS_RESULT_DISCONNECT;
7630 if (netif_running(dev))
7631 bnx2x_eeh_nic_unload(bp);
7633 pci_disable_device(pdev);
7635 rtnl_unlock();
7637 /* Request a slot reset */
7638 return PCI_ERS_RESULT_NEED_RESET;
7642 * bnx2x_io_slot_reset - called after the PCI bus has been reset
7643 * @pdev: Pointer to PCI device
7645 * Restart the card from scratch, as if from a cold-boot.
7647 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
7649 struct net_device *dev = pci_get_drvdata(pdev);
7650 struct bnx2x *bp = netdev_priv(dev);
7652 rtnl_lock();
7654 if (pci_enable_device(pdev)) {
7655 dev_err(&pdev->dev,
7656 "Cannot re-enable PCI device after reset\n");
7657 rtnl_unlock();
7658 return PCI_ERS_RESULT_DISCONNECT;
7661 pci_set_master(pdev);
7662 pci_restore_state(pdev);
7664 if (netif_running(dev))
7665 bnx2x_set_power_state(bp, PCI_D0);
7667 rtnl_unlock();
7669 return PCI_ERS_RESULT_RECOVERED;
7673 * bnx2x_io_resume - called when traffic can start flowing again
7674 * @pdev: Pointer to PCI device
7676 * This callback is called when the error recovery driver tells us that
7677 * its OK to resume normal operation.
7679 static void bnx2x_io_resume(struct pci_dev *pdev)
7681 struct net_device *dev = pci_get_drvdata(pdev);
7682 struct bnx2x *bp = netdev_priv(dev);
7684 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
7685 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
7686 return;
7689 rtnl_lock();
7691 bnx2x_eeh_recover(bp);
7693 if (netif_running(dev))
7694 bnx2x_nic_load(bp, LOAD_NORMAL);
7696 netif_device_attach(dev);
7698 rtnl_unlock();
7701 static struct pci_error_handlers bnx2x_err_handler = {
7702 .error_detected = bnx2x_io_error_detected,
7703 .slot_reset = bnx2x_io_slot_reset,
7704 .resume = bnx2x_io_resume,
7707 static struct pci_driver bnx2x_pci_driver = {
7708 .name = DRV_MODULE_NAME,
7709 .id_table = bnx2x_pci_tbl,
7710 .probe = bnx2x_init_one,
7711 .remove = __devexit_p(bnx2x_remove_one),
7712 .suspend = bnx2x_suspend,
7713 .resume = bnx2x_resume,
7714 .err_handler = &bnx2x_err_handler,
7717 static int __init bnx2x_init(void)
7719 int ret;
7721 pr_info("%s", version);
7723 bnx2x_wq = create_singlethread_workqueue("bnx2x");
7724 if (bnx2x_wq == NULL) {
7725 pr_err("Cannot create workqueue\n");
7726 return -ENOMEM;
7729 ret = pci_register_driver(&bnx2x_pci_driver);
7730 if (ret) {
7731 pr_err("Cannot register driver\n");
7732 destroy_workqueue(bnx2x_wq);
7734 return ret;
7737 static void __exit bnx2x_cleanup(void)
7739 pci_unregister_driver(&bnx2x_pci_driver);
7741 destroy_workqueue(bnx2x_wq);
7744 module_init(bnx2x_init);
7745 module_exit(bnx2x_cleanup);
7747 #ifdef BCM_CNIC
7749 /* count denotes the number of new completions we have seen */
7750 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
7752 struct eth_spe *spe;
7754 #ifdef BNX2X_STOP_ON_ERROR
7755 if (unlikely(bp->panic))
7756 return;
7757 #endif
7759 spin_lock_bh(&bp->spq_lock);
7760 bp->cnic_spq_pending -= count;
7762 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
7763 bp->cnic_spq_pending++) {
7765 if (!bp->cnic_kwq_pending)
7766 break;
7768 spe = bnx2x_sp_get_next(bp);
7769 *spe = *bp->cnic_kwq_cons;
7771 bp->cnic_kwq_pending--;
7773 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
7774 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
7776 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
7777 bp->cnic_kwq_cons = bp->cnic_kwq;
7778 else
7779 bp->cnic_kwq_cons++;
7781 bnx2x_sp_prod_update(bp);
7782 spin_unlock_bh(&bp->spq_lock);
7785 static int bnx2x_cnic_sp_queue(struct net_device *dev,
7786 struct kwqe_16 *kwqes[], u32 count)
7788 struct bnx2x *bp = netdev_priv(dev);
7789 int i;
7791 #ifdef BNX2X_STOP_ON_ERROR
7792 if (unlikely(bp->panic))
7793 return -EIO;
7794 #endif
7796 spin_lock_bh(&bp->spq_lock);
7798 for (i = 0; i < count; i++) {
7799 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
7801 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
7802 break;
7804 *bp->cnic_kwq_prod = *spe;
7806 bp->cnic_kwq_pending++;
7808 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
7809 spe->hdr.conn_and_cmd_data, spe->hdr.type,
7810 spe->data.mac_config_addr.hi,
7811 spe->data.mac_config_addr.lo,
7812 bp->cnic_kwq_pending);
7814 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
7815 bp->cnic_kwq_prod = bp->cnic_kwq;
7816 else
7817 bp->cnic_kwq_prod++;
7820 spin_unlock_bh(&bp->spq_lock);
7822 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
7823 bnx2x_cnic_sp_post(bp, 0);
7825 return i;
7828 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7830 struct cnic_ops *c_ops;
7831 int rc = 0;
7833 mutex_lock(&bp->cnic_mutex);
7834 c_ops = bp->cnic_ops;
7835 if (c_ops)
7836 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7837 mutex_unlock(&bp->cnic_mutex);
7839 return rc;
7842 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7844 struct cnic_ops *c_ops;
7845 int rc = 0;
7847 rcu_read_lock();
7848 c_ops = rcu_dereference(bp->cnic_ops);
7849 if (c_ops)
7850 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7851 rcu_read_unlock();
7853 return rc;
7857 * for commands that have no data
7859 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
7861 struct cnic_ctl_info ctl = {0};
7863 ctl.cmd = cmd;
7865 return bnx2x_cnic_ctl_send(bp, &ctl);
7868 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
7870 struct cnic_ctl_info ctl;
7872 /* first we tell CNIC and only then we count this as a completion */
7873 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
7874 ctl.data.comp.cid = cid;
7876 bnx2x_cnic_ctl_send_bh(bp, &ctl);
7877 bnx2x_cnic_sp_post(bp, 1);
7880 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7882 struct bnx2x *bp = netdev_priv(dev);
7883 int rc = 0;
7885 switch (ctl->cmd) {
7886 case DRV_CTL_CTXTBL_WR_CMD: {
7887 u32 index = ctl->data.io.offset;
7888 dma_addr_t addr = ctl->data.io.dma_addr;
7890 bnx2x_ilt_wr(bp, index, addr);
7891 break;
7894 case DRV_CTL_COMPLETION_CMD: {
7895 int count = ctl->data.comp.comp_count;
7897 bnx2x_cnic_sp_post(bp, count);
7898 break;
7901 /* rtnl_lock is held. */
7902 case DRV_CTL_START_L2_CMD: {
7903 u32 cli = ctl->data.ring.client_id;
7905 bp->rx_mode_cl_mask |= (1 << cli);
7906 bnx2x_set_storm_rx_mode(bp);
7907 break;
7910 /* rtnl_lock is held. */
7911 case DRV_CTL_STOP_L2_CMD: {
7912 u32 cli = ctl->data.ring.client_id;
7914 bp->rx_mode_cl_mask &= ~(1 << cli);
7915 bnx2x_set_storm_rx_mode(bp);
7916 break;
7919 default:
7920 BNX2X_ERR("unknown command %x\n", ctl->cmd);
7921 rc = -EINVAL;
7924 return rc;
7927 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
7929 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7931 if (bp->flags & USING_MSIX_FLAG) {
7932 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
7933 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
7934 cp->irq_arr[0].vector = bp->msix_table[1].vector;
7935 } else {
7936 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
7937 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
7939 cp->irq_arr[0].status_blk = bp->cnic_sb;
7940 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
7941 cp->irq_arr[1].status_blk = bp->def_status_blk;
7942 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
7944 cp->num_irq = 2;
7947 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
7948 void *data)
7950 struct bnx2x *bp = netdev_priv(dev);
7951 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7953 if (ops == NULL)
7954 return -EINVAL;
7956 if (atomic_read(&bp->intr_sem) != 0)
7957 return -EBUSY;
7959 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
7960 if (!bp->cnic_kwq)
7961 return -ENOMEM;
7963 bp->cnic_kwq_cons = bp->cnic_kwq;
7964 bp->cnic_kwq_prod = bp->cnic_kwq;
7965 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
7967 bp->cnic_spq_pending = 0;
7968 bp->cnic_kwq_pending = 0;
7970 bp->cnic_data = data;
7972 cp->num_irq = 0;
7973 cp->drv_state = CNIC_DRV_STATE_REGD;
7975 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
7977 bnx2x_setup_cnic_irq_info(bp);
7978 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7979 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7980 rcu_assign_pointer(bp->cnic_ops, ops);
7982 return 0;
7985 static int bnx2x_unregister_cnic(struct net_device *dev)
7987 struct bnx2x *bp = netdev_priv(dev);
7988 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7990 mutex_lock(&bp->cnic_mutex);
7991 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7992 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7993 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7995 cp->drv_state = 0;
7996 rcu_assign_pointer(bp->cnic_ops, NULL);
7997 mutex_unlock(&bp->cnic_mutex);
7998 synchronize_rcu();
7999 kfree(bp->cnic_kwq);
8000 bp->cnic_kwq = NULL;
8002 return 0;
8005 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
8007 struct bnx2x *bp = netdev_priv(dev);
8008 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8010 cp->drv_owner = THIS_MODULE;
8011 cp->chip_id = CHIP_ID(bp);
8012 cp->pdev = bp->pdev;
8013 cp->io_base = bp->regview;
8014 cp->io_base2 = bp->doorbells;
8015 cp->max_kwqe_pending = 8;
8016 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
8017 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
8018 cp->ctx_tbl_len = CNIC_ILT_LINES;
8019 cp->starting_cid = BCM_CNIC_CID_START;
8020 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
8021 cp->drv_ctl = bnx2x_drv_ctl;
8022 cp->drv_register_cnic = bnx2x_register_cnic;
8023 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
8025 return cp;
8027 EXPORT_SYMBOL(bnx2x_cnic_probe);
8029 #endif /* BCM_CNIC */