bnx2x: clean up in case of error in bnx2x_init_hw()
[linux-2.6.git] / drivers / net / bnx2x_main.c
blob16685a153c1012c9663d5113adc8cccf962a21ee
1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 #include <linux/stringify.h>
55 #include "bnx2x.h"
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
60 #define DRV_MODULE_VERSION "1.52.1-5"
61 #define DRV_MODULE_RELDATE "2009/11/09"
62 #define BNX2X_BC_VER 0x040200
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
66 /* FW files */
67 #define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT (5*HZ)
78 static char version[] __devinitdata =
79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
103 static int int_mode;
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
107 static int dropless_fc;
108 module_param(dropless_fc, int, 0);
109 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
111 static int poll;
112 module_param(poll, int, 0);
113 MODULE_PARM_DESC(poll, " Use polling (for debug)");
115 static int mrrs = -1;
116 module_param(mrrs, int, 0);
117 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
119 static int debug;
120 module_param(debug, int, 0);
121 MODULE_PARM_DESC(debug, " Default debug msglevel");
123 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
125 static struct workqueue_struct *bnx2x_wq;
127 enum bnx2x_board_type {
128 BCM57710 = 0,
129 BCM57711 = 1,
130 BCM57711E = 2,
133 /* indexed by board_type, above */
134 static struct {
135 char *name;
136 } board_info[] __devinitdata = {
137 { "Broadcom NetXtreme II BCM57710 XGb" },
138 { "Broadcom NetXtreme II BCM57711 XGb" },
139 { "Broadcom NetXtreme II BCM57711E XGb" }
143 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
147 { 0 }
150 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
152 /****************************************************************************
153 * General service functions
154 ****************************************************************************/
156 /* used only at init
157 * locking is done by mcp
159 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
161 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164 PCICFG_VENDOR_ID_OFFSET);
167 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
169 u32 val;
171 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174 PCICFG_VENDOR_ID_OFFSET);
176 return val;
179 static const u32 dmae_reg_go_c[] = {
180 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
186 /* copy command into DMAE command memory and set DMAE command go */
187 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188 int idx)
190 u32 cmd_offset;
191 int i;
193 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
197 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
200 REG_WR(bp, dmae_reg_go_c[idx], 1);
203 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204 u32 len32)
206 struct dmae_command dmae;
207 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
208 int cnt = 200;
210 if (!bp->dmae_ready) {
211 u32 *data = bnx2x_sp(bp, wb_data[0]);
213 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr, len32);
215 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 return;
219 memset(&dmae, 0, sizeof(struct dmae_command));
221 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
224 #ifdef __BIG_ENDIAN
225 DMAE_CMD_ENDIANITY_B_DW_SWAP |
226 #else
227 DMAE_CMD_ENDIANITY_DW_SWAP |
228 #endif
229 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231 dmae.src_addr_lo = U64_LO(dma_addr);
232 dmae.src_addr_hi = U64_HI(dma_addr);
233 dmae.dst_addr_lo = dst_addr >> 2;
234 dmae.dst_addr_hi = 0;
235 dmae.len = len32;
236 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_val = DMAE_COMP_VAL;
240 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
241 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
242 "dst_addr [%x:%08x (%08x)]\n"
243 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
244 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
247 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
248 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
251 mutex_lock(&bp->dmae_mutex);
253 *wb_comp = 0;
255 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
257 udelay(5);
259 while (*wb_comp != DMAE_COMP_VAL) {
260 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
262 if (!cnt) {
263 BNX2X_ERR("DMAE timeout!\n");
264 break;
266 cnt--;
267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp))
269 msleep(100);
270 else
271 udelay(5);
274 mutex_unlock(&bp->dmae_mutex);
277 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
279 struct dmae_command dmae;
280 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
281 int cnt = 200;
283 if (!bp->dmae_ready) {
284 u32 *data = bnx2x_sp(bp, wb_data[0]);
285 int i;
287 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr, len32);
289 for (i = 0; i < len32; i++)
290 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 return;
294 memset(&dmae, 0, sizeof(struct dmae_command));
296 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
299 #ifdef __BIG_ENDIAN
300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
301 #else
302 DMAE_CMD_ENDIANITY_DW_SWAP |
303 #endif
304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306 dmae.src_addr_lo = src_addr >> 2;
307 dmae.src_addr_hi = 0;
308 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310 dmae.len = len32;
311 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_val = DMAE_COMP_VAL;
315 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
316 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
317 "dst_addr [%x:%08x (%08x)]\n"
318 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
319 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
323 mutex_lock(&bp->dmae_mutex);
325 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
326 *wb_comp = 0;
328 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
330 udelay(5);
332 while (*wb_comp != DMAE_COMP_VAL) {
334 if (!cnt) {
335 BNX2X_ERR("DMAE timeout!\n");
336 break;
338 cnt--;
339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp))
341 msleep(100);
342 else
343 udelay(5);
345 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
346 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
349 mutex_unlock(&bp->dmae_mutex);
352 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353 u32 addr, u32 len)
355 int offset = 0;
357 while (len > DMAE_LEN32_WR_MAX) {
358 bnx2x_write_dmae(bp, phys_addr + offset,
359 addr + offset, DMAE_LEN32_WR_MAX);
360 offset += DMAE_LEN32_WR_MAX * 4;
361 len -= DMAE_LEN32_WR_MAX;
364 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
367 /* used only for slowpath so not inlined */
368 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
370 u32 wb_write[2];
372 wb_write[0] = val_hi;
373 wb_write[1] = val_lo;
374 REG_WR_DMAE(bp, reg, wb_write, 2);
377 #ifdef USE_WB_RD
378 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
380 u32 wb_data[2];
382 REG_RD_DMAE(bp, reg, wb_data, 2);
384 return HILO_U64(wb_data[0], wb_data[1]);
386 #endif
388 static int bnx2x_mc_assert(struct bnx2x *bp)
390 char last_idx;
391 int i, rc = 0;
392 u32 row0, row1, row2, row3;
394 /* XSTORM */
395 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396 XSTORM_ASSERT_LIST_INDEX_OFFSET);
397 if (last_idx)
398 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
400 /* print the asserts */
401 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
403 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i));
405 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
412 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414 " 0x%08x 0x%08x 0x%08x\n",
415 i, row3, row2, row1, row0);
416 rc++;
417 } else {
418 break;
422 /* TSTORM */
423 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424 TSTORM_ASSERT_LIST_INDEX_OFFSET);
425 if (last_idx)
426 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
428 /* print the asserts */
429 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
431 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i));
433 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
440 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442 " 0x%08x 0x%08x 0x%08x\n",
443 i, row3, row2, row1, row0);
444 rc++;
445 } else {
446 break;
450 /* CSTORM */
451 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452 CSTORM_ASSERT_LIST_INDEX_OFFSET);
453 if (last_idx)
454 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
456 /* print the asserts */
457 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
459 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i));
461 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
468 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470 " 0x%08x 0x%08x 0x%08x\n",
471 i, row3, row2, row1, row0);
472 rc++;
473 } else {
474 break;
478 /* USTORM */
479 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480 USTORM_ASSERT_LIST_INDEX_OFFSET);
481 if (last_idx)
482 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
484 /* print the asserts */
485 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
487 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i));
489 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 4);
491 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 8);
493 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 12);
496 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498 " 0x%08x 0x%08x 0x%08x\n",
499 i, row3, row2, row1, row0);
500 rc++;
501 } else {
502 break;
506 return rc;
509 static void bnx2x_fw_dump(struct bnx2x *bp)
511 u32 mark, offset;
512 __be32 data[9];
513 int word;
515 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
516 mark = ((mark + 0x3) & ~0x3);
517 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
519 printk(KERN_ERR PFX);
520 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521 for (word = 0; word < 8; word++)
522 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
523 offset + 4*word));
524 data[8] = 0x0;
525 printk(KERN_CONT "%s", (char *)data);
527 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528 for (word = 0; word < 8; word++)
529 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
530 offset + 4*word));
531 data[8] = 0x0;
532 printk(KERN_CONT "%s", (char *)data);
534 printk(KERN_ERR PFX "end of fw dump\n");
537 static void bnx2x_panic_dump(struct bnx2x *bp)
539 int i;
540 u16 j, start, end;
542 bp->stats_state = STATS_STATE_DISABLED;
543 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
545 BNX2X_ERR("begin crash dump -----------------\n");
547 /* Indices */
548 /* Common */
549 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
550 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
551 " spq_prod_idx(%u)\n",
552 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
555 /* Rx */
556 for_each_queue(bp, i) {
557 struct bnx2x_fastpath *fp = &bp->fp[i];
559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
560 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
561 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
562 i, fp->rx_bd_prod, fp->rx_bd_cons,
563 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
565 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
566 " fp_u_idx(%x) *sb_u_idx(%x)\n",
567 fp->rx_sge_prod, fp->last_max_sge,
568 le16_to_cpu(fp->fp_u_idx),
569 fp->status_blk->u_status_block.status_block_index);
572 /* Tx */
573 for_each_queue(bp, i) {
574 struct bnx2x_fastpath *fp = &bp->fp[i];
576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
577 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
578 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
579 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
580 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
581 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
582 fp->status_blk->c_status_block.status_block_index,
583 fp->tx_db.data.prod);
586 /* Rings */
587 /* Rx */
588 for_each_queue(bp, i) {
589 struct bnx2x_fastpath *fp = &bp->fp[i];
591 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
593 for (j = start; j != end; j = RX_BD(j + 1)) {
594 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
597 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
598 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
601 start = RX_SGE(fp->rx_sge_prod);
602 end = RX_SGE(fp->last_max_sge);
603 for (j = start; j != end; j = RX_SGE(j + 1)) {
604 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
607 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
608 i, j, rx_sge[1], rx_sge[0], sw_page->page);
611 start = RCQ_BD(fp->rx_comp_cons - 10);
612 end = RCQ_BD(fp->rx_comp_cons + 503);
613 for (j = start; j != end; j = RCQ_BD(j + 1)) {
614 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
616 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
621 /* Tx */
622 for_each_queue(bp, i) {
623 struct bnx2x_fastpath *fp = &bp->fp[i];
625 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627 for (j = start; j != end; j = TX_BD(j + 1)) {
628 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
630 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631 i, j, sw_bd->skb, sw_bd->first_bd);
634 start = TX_BD(fp->tx_bd_cons - 10);
635 end = TX_BD(fp->tx_bd_cons + 254);
636 for (j = start; j != end; j = TX_BD(j + 1)) {
637 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
639 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
644 bnx2x_fw_dump(bp);
645 bnx2x_mc_assert(bp);
646 BNX2X_ERR("end crash dump -----------------\n");
649 static void bnx2x_int_enable(struct bnx2x *bp)
651 int port = BP_PORT(bp);
652 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653 u32 val = REG_RD(bp, addr);
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
657 if (msix) {
658 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659 HC_CONFIG_0_REG_INT_LINE_EN_0);
660 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
662 } else if (msi) {
663 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
667 } else {
668 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674 val, port, addr);
676 REG_WR(bp, addr, val);
678 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
682 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
684 REG_WR(bp, addr, val);
686 * Ensure that HC_CONFIG is written before leading/trailing edge config
688 mmiowb();
689 barrier();
691 if (CHIP_IS_E1H(bp)) {
692 /* init leading/trailing edge */
693 if (IS_E1HMF(bp)) {
694 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
695 if (bp->port.pmf)
696 /* enable nig and gpio3 attention */
697 val |= 0x1100;
698 } else
699 val = 0xffff;
701 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
705 /* Make sure that interrupts are indeed enabled from here on */
706 mmiowb();
709 static void bnx2x_int_disable(struct bnx2x *bp)
711 int port = BP_PORT(bp);
712 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713 u32 val = REG_RD(bp, addr);
715 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717 HC_CONFIG_0_REG_INT_LINE_EN_0 |
718 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
720 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
721 val, port, addr);
723 /* flush all outstanding writes */
724 mmiowb();
726 REG_WR(bp, addr, val);
727 if (REG_RD(bp, addr) != val)
728 BNX2X_ERR("BUG! proper val not read from IGU!\n");
731 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
733 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
734 int i, offset;
736 /* disable interrupt handling */
737 atomic_inc(&bp->intr_sem);
738 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
740 if (disable_hw)
741 /* prevent the HW from sending interrupts */
742 bnx2x_int_disable(bp);
744 /* make sure all ISRs are done */
745 if (msix) {
746 synchronize_irq(bp->msix_table[0].vector);
747 offset = 1;
748 #ifdef BCM_CNIC
749 offset++;
750 #endif
751 for_each_queue(bp, i)
752 synchronize_irq(bp->msix_table[i + offset].vector);
753 } else
754 synchronize_irq(bp->pdev->irq);
756 /* make sure sp_task is not running */
757 cancel_delayed_work(&bp->sp_task);
758 flush_workqueue(bnx2x_wq);
761 /* fast path */
764 * General service functions
767 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
768 u8 storm, u16 index, u8 op, u8 update)
770 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
771 COMMAND_REG_INT_ACK);
772 struct igu_ack_register igu_ack;
774 igu_ack.status_block_index = index;
775 igu_ack.sb_id_and_flags =
776 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
777 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
778 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
779 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
781 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
782 (*(u32 *)&igu_ack), hc_addr);
783 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
785 /* Make sure that ACK is written */
786 mmiowb();
787 barrier();
790 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
792 struct host_status_block *fpsb = fp->status_blk;
794 barrier(); /* status block is written to by the chip */
795 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
796 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
799 static u16 bnx2x_ack_int(struct bnx2x *bp)
801 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
802 COMMAND_REG_SIMD_MASK);
803 u32 result = REG_RD(bp, hc_addr);
805 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
806 result, hc_addr);
808 return result;
813 * fast path service functions
816 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
818 /* Tell compiler that consumer and producer can change */
819 barrier();
820 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
823 /* free skb in the packet ring at pos idx
824 * return idx of last bd freed
826 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
827 u16 idx)
829 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
830 struct eth_tx_start_bd *tx_start_bd;
831 struct eth_tx_bd *tx_data_bd;
832 struct sk_buff *skb = tx_buf->skb;
833 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
834 int nbd;
836 /* prefetch skb end pointer to speedup dev_kfree_skb() */
837 prefetch(&skb->end);
839 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
840 idx, tx_buf, skb);
842 /* unmap first bd */
843 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
844 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
846 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
848 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
849 #ifdef BNX2X_STOP_ON_ERROR
850 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
851 BNX2X_ERR("BAD nbd!\n");
852 bnx2x_panic();
854 #endif
855 new_cons = nbd + tx_buf->first_bd;
857 /* Get the next bd */
858 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
860 /* Skip a parse bd... */
861 --nbd;
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
864 /* ...and the TSO split header bd since they have no mapping */
865 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
866 --nbd;
867 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
870 /* now free frags */
871 while (nbd > 0) {
873 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
874 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
876 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
877 if (--nbd)
878 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
881 /* release skb */
882 WARN_ON(!skb);
883 dev_kfree_skb(skb);
884 tx_buf->first_bd = 0;
885 tx_buf->skb = NULL;
887 return new_cons;
890 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
892 s16 used;
893 u16 prod;
894 u16 cons;
896 barrier(); /* Tell compiler that prod and cons can change */
897 prod = fp->tx_bd_prod;
898 cons = fp->tx_bd_cons;
900 /* NUM_TX_RINGS = number of "next-page" entries
901 It will be used as a threshold */
902 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
904 #ifdef BNX2X_STOP_ON_ERROR
905 WARN_ON(used < 0);
906 WARN_ON(used > fp->bp->tx_ring_size);
907 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
908 #endif
910 return (s16)(fp->bp->tx_ring_size) - used;
913 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
915 u16 hw_cons;
917 /* Tell compiler that status block fields can change */
918 barrier();
919 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
920 return hw_cons != fp->tx_pkt_cons;
923 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
925 struct bnx2x *bp = fp->bp;
926 struct netdev_queue *txq;
927 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
929 #ifdef BNX2X_STOP_ON_ERROR
930 if (unlikely(bp->panic))
931 return -1;
932 #endif
934 txq = netdev_get_tx_queue(bp->dev, fp->index);
935 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
936 sw_cons = fp->tx_pkt_cons;
938 while (sw_cons != hw_cons) {
939 u16 pkt_cons;
941 pkt_cons = TX_BD(sw_cons);
943 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
945 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
946 hw_cons, sw_cons, pkt_cons);
948 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
949 rmb();
950 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
953 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
954 sw_cons++;
957 fp->tx_pkt_cons = sw_cons;
958 fp->tx_bd_cons = bd_cons;
960 /* TBD need a thresh? */
961 if (unlikely(netif_tx_queue_stopped(txq))) {
963 /* Need to make the tx_bd_cons update visible to start_xmit()
964 * before checking for netif_tx_queue_stopped(). Without the
965 * memory barrier, there is a small possibility that
966 * start_xmit() will miss it and cause the queue to be stopped
967 * forever.
969 smp_mb();
971 if ((netif_tx_queue_stopped(txq)) &&
972 (bp->state == BNX2X_STATE_OPEN) &&
973 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
974 netif_tx_wake_queue(txq);
976 return 0;
979 #ifdef BCM_CNIC
980 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
981 #endif
983 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
984 union eth_rx_cqe *rr_cqe)
986 struct bnx2x *bp = fp->bp;
987 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
988 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
990 DP(BNX2X_MSG_SP,
991 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
992 fp->index, cid, command, bp->state,
993 rr_cqe->ramrod_cqe.ramrod_type);
995 bp->spq_left++;
997 if (fp->index) {
998 switch (command | fp->state) {
999 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1000 BNX2X_FP_STATE_OPENING):
1001 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1002 cid);
1003 fp->state = BNX2X_FP_STATE_OPEN;
1004 break;
1006 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1007 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1008 cid);
1009 fp->state = BNX2X_FP_STATE_HALTED;
1010 break;
1012 default:
1013 BNX2X_ERR("unexpected MC reply (%d) "
1014 "fp->state is %x\n", command, fp->state);
1015 break;
1017 mb(); /* force bnx2x_wait_ramrod() to see the change */
1018 return;
1021 switch (command | bp->state) {
1022 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1023 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1024 bp->state = BNX2X_STATE_OPEN;
1025 break;
1027 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1028 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1029 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1030 fp->state = BNX2X_FP_STATE_HALTED;
1031 break;
1033 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1034 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1035 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1036 break;
1038 #ifdef BCM_CNIC
1039 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1040 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1041 bnx2x_cnic_cfc_comp(bp, cid);
1042 break;
1043 #endif
1045 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1046 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1047 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1048 bp->set_mac_pending--;
1049 smp_wmb();
1050 break;
1052 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1053 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1054 bp->set_mac_pending--;
1055 smp_wmb();
1056 break;
1058 default:
1059 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1060 command, bp->state);
1061 break;
1063 mb(); /* force bnx2x_wait_ramrod() to see the change */
1066 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1067 struct bnx2x_fastpath *fp, u16 index)
1069 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070 struct page *page = sw_buf->page;
1071 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1073 /* Skip "next page" elements */
1074 if (!page)
1075 return;
1077 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1078 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1079 __free_pages(page, PAGES_PER_SGE_SHIFT);
1081 sw_buf->page = NULL;
1082 sge->addr_hi = 0;
1083 sge->addr_lo = 0;
1086 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1087 struct bnx2x_fastpath *fp, int last)
1089 int i;
1091 for (i = 0; i < last; i++)
1092 bnx2x_free_rx_sge(bp, fp, i);
1095 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1096 struct bnx2x_fastpath *fp, u16 index)
1098 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1099 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1100 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1101 dma_addr_t mapping;
1103 if (unlikely(page == NULL))
1104 return -ENOMEM;
1106 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1107 PCI_DMA_FROMDEVICE);
1108 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1109 __free_pages(page, PAGES_PER_SGE_SHIFT);
1110 return -ENOMEM;
1113 sw_buf->page = page;
1114 pci_unmap_addr_set(sw_buf, mapping, mapping);
1116 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1117 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1119 return 0;
1122 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1123 struct bnx2x_fastpath *fp, u16 index)
1125 struct sk_buff *skb;
1126 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1127 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1128 dma_addr_t mapping;
1130 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1131 if (unlikely(skb == NULL))
1132 return -ENOMEM;
1134 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1135 PCI_DMA_FROMDEVICE);
1136 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1137 dev_kfree_skb(skb);
1138 return -ENOMEM;
1141 rx_buf->skb = skb;
1142 pci_unmap_addr_set(rx_buf, mapping, mapping);
1144 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1145 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1147 return 0;
1150 /* note that we are not allocating a new skb,
1151 * we are just moving one from cons to prod
1152 * we are not creating a new mapping,
1153 * so there is no need to check for dma_mapping_error().
1155 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1156 struct sk_buff *skb, u16 cons, u16 prod)
1158 struct bnx2x *bp = fp->bp;
1159 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1160 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1161 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1162 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1164 pci_dma_sync_single_for_device(bp->pdev,
1165 pci_unmap_addr(cons_rx_buf, mapping),
1166 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1168 prod_rx_buf->skb = cons_rx_buf->skb;
1169 pci_unmap_addr_set(prod_rx_buf, mapping,
1170 pci_unmap_addr(cons_rx_buf, mapping));
1171 *prod_bd = *cons_bd;
1174 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1175 u16 idx)
1177 u16 last_max = fp->last_max_sge;
1179 if (SUB_S16(idx, last_max) > 0)
1180 fp->last_max_sge = idx;
1183 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1185 int i, j;
1187 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1188 int idx = RX_SGE_CNT * i - 1;
1190 for (j = 0; j < 2; j++) {
1191 SGE_MASK_CLEAR_BIT(fp, idx);
1192 idx--;
1197 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1198 struct eth_fast_path_rx_cqe *fp_cqe)
1200 struct bnx2x *bp = fp->bp;
1201 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1202 le16_to_cpu(fp_cqe->len_on_bd)) >>
1203 SGE_PAGE_SHIFT;
1204 u16 last_max, last_elem, first_elem;
1205 u16 delta = 0;
1206 u16 i;
1208 if (!sge_len)
1209 return;
1211 /* First mark all used pages */
1212 for (i = 0; i < sge_len; i++)
1213 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1215 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1216 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1218 /* Here we assume that the last SGE index is the biggest */
1219 prefetch((void *)(fp->sge_mask));
1220 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1222 last_max = RX_SGE(fp->last_max_sge);
1223 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1224 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1226 /* If ring is not full */
1227 if (last_elem + 1 != first_elem)
1228 last_elem++;
1230 /* Now update the prod */
1231 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1232 if (likely(fp->sge_mask[i]))
1233 break;
1235 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1236 delta += RX_SGE_MASK_ELEM_SZ;
1239 if (delta > 0) {
1240 fp->rx_sge_prod += delta;
1241 /* clear page-end entries */
1242 bnx2x_clear_sge_mask_next_elems(fp);
1245 DP(NETIF_MSG_RX_STATUS,
1246 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1247 fp->last_max_sge, fp->rx_sge_prod);
1250 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1252 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1253 memset(fp->sge_mask, 0xff,
1254 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1256 /* Clear the two last indices in the page to 1:
1257 these are the indices that correspond to the "next" element,
1258 hence will never be indicated and should be removed from
1259 the calculations. */
1260 bnx2x_clear_sge_mask_next_elems(fp);
1263 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1264 struct sk_buff *skb, u16 cons, u16 prod)
1266 struct bnx2x *bp = fp->bp;
1267 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1268 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1269 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1270 dma_addr_t mapping;
1272 /* move empty skb from pool to prod and map it */
1273 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1274 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1275 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1276 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1278 /* move partial skb from cons to pool (don't unmap yet) */
1279 fp->tpa_pool[queue] = *cons_rx_buf;
1281 /* mark bin state as start - print error if current state != stop */
1282 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1283 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1285 fp->tpa_state[queue] = BNX2X_TPA_START;
1287 /* point prod_bd to new skb */
1288 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1289 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1291 #ifdef BNX2X_STOP_ON_ERROR
1292 fp->tpa_queue_used |= (1 << queue);
1293 #ifdef __powerpc64__
1294 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1295 #else
1296 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1297 #endif
1298 fp->tpa_queue_used);
1299 #endif
1302 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1303 struct sk_buff *skb,
1304 struct eth_fast_path_rx_cqe *fp_cqe,
1305 u16 cqe_idx)
1307 struct sw_rx_page *rx_pg, old_rx_pg;
1308 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1309 u32 i, frag_len, frag_size, pages;
1310 int err;
1311 int j;
1313 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1314 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1316 /* This is needed in order to enable forwarding support */
1317 if (frag_size)
1318 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1319 max(frag_size, (u32)len_on_bd));
1321 #ifdef BNX2X_STOP_ON_ERROR
1322 if (pages >
1323 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1324 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1325 pages, cqe_idx);
1326 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1327 fp_cqe->pkt_len, len_on_bd);
1328 bnx2x_panic();
1329 return -EINVAL;
1331 #endif
1333 /* Run through the SGL and compose the fragmented skb */
1334 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1335 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1337 /* FW gives the indices of the SGE as if the ring is an array
1338 (meaning that "next" element will consume 2 indices) */
1339 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1340 rx_pg = &fp->rx_page_ring[sge_idx];
1341 old_rx_pg = *rx_pg;
1343 /* If we fail to allocate a substitute page, we simply stop
1344 where we are and drop the whole packet */
1345 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1346 if (unlikely(err)) {
1347 fp->eth_q_stats.rx_skb_alloc_failed++;
1348 return err;
1351 /* Unmap the page as we r going to pass it to the stack */
1352 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1353 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1355 /* Add one frag and update the appropriate fields in the skb */
1356 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1358 skb->data_len += frag_len;
1359 skb->truesize += frag_len;
1360 skb->len += frag_len;
1362 frag_size -= frag_len;
1365 return 0;
1368 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1369 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1370 u16 cqe_idx)
1372 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1373 struct sk_buff *skb = rx_buf->skb;
1374 /* alloc new skb */
1375 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1377 /* Unmap skb in the pool anyway, as we are going to change
1378 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1379 fails. */
1380 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1381 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1383 if (likely(new_skb)) {
1384 /* fix ip xsum and give it to the stack */
1385 /* (no need to map the new skb) */
1386 #ifdef BCM_VLAN
1387 int is_vlan_cqe =
1388 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1389 PARSING_FLAGS_VLAN);
1390 int is_not_hwaccel_vlan_cqe =
1391 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1392 #endif
1394 prefetch(skb);
1395 prefetch(((char *)(skb)) + 128);
1397 #ifdef BNX2X_STOP_ON_ERROR
1398 if (pad + len > bp->rx_buf_size) {
1399 BNX2X_ERR("skb_put is about to fail... "
1400 "pad %d len %d rx_buf_size %d\n",
1401 pad, len, bp->rx_buf_size);
1402 bnx2x_panic();
1403 return;
1405 #endif
1407 skb_reserve(skb, pad);
1408 skb_put(skb, len);
1410 skb->protocol = eth_type_trans(skb, bp->dev);
1411 skb->ip_summed = CHECKSUM_UNNECESSARY;
1414 struct iphdr *iph;
1416 iph = (struct iphdr *)skb->data;
1417 #ifdef BCM_VLAN
1418 /* If there is no Rx VLAN offloading -
1419 take VLAN tag into an account */
1420 if (unlikely(is_not_hwaccel_vlan_cqe))
1421 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1422 #endif
1423 iph->check = 0;
1424 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1427 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1428 &cqe->fast_path_cqe, cqe_idx)) {
1429 #ifdef BCM_VLAN
1430 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1431 (!is_not_hwaccel_vlan_cqe))
1432 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1433 le16_to_cpu(cqe->fast_path_cqe.
1434 vlan_tag));
1435 else
1436 #endif
1437 netif_receive_skb(skb);
1438 } else {
1439 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1440 " - dropping packet!\n");
1441 dev_kfree_skb(skb);
1445 /* put new skb in bin */
1446 fp->tpa_pool[queue].skb = new_skb;
1448 } else {
1449 /* else drop the packet and keep the buffer in the bin */
1450 DP(NETIF_MSG_RX_STATUS,
1451 "Failed to allocate new skb - dropping packet!\n");
1452 fp->eth_q_stats.rx_skb_alloc_failed++;
1455 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1458 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1459 struct bnx2x_fastpath *fp,
1460 u16 bd_prod, u16 rx_comp_prod,
1461 u16 rx_sge_prod)
1463 struct ustorm_eth_rx_producers rx_prods = {0};
1464 int i;
1466 /* Update producers */
1467 rx_prods.bd_prod = bd_prod;
1468 rx_prods.cqe_prod = rx_comp_prod;
1469 rx_prods.sge_prod = rx_sge_prod;
1472 * Make sure that the BD and SGE data is updated before updating the
1473 * producers since FW might read the BD/SGE right after the producer
1474 * is updated.
1475 * This is only applicable for weak-ordered memory model archs such
1476 * as IA-64. The following barrier is also mandatory since FW will
1477 * assumes BDs must have buffers.
1479 wmb();
1481 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1482 REG_WR(bp, BAR_USTRORM_INTMEM +
1483 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1484 ((u32 *)&rx_prods)[i]);
1486 mmiowb(); /* keep prod updates ordered */
1488 DP(NETIF_MSG_RX_STATUS,
1489 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1490 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1493 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1495 struct bnx2x *bp = fp->bp;
1496 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1497 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1498 int rx_pkt = 0;
1500 #ifdef BNX2X_STOP_ON_ERROR
1501 if (unlikely(bp->panic))
1502 return 0;
1503 #endif
1505 /* CQ "next element" is of the size of the regular element,
1506 that's why it's ok here */
1507 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1508 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1509 hw_comp_cons++;
1511 bd_cons = fp->rx_bd_cons;
1512 bd_prod = fp->rx_bd_prod;
1513 bd_prod_fw = bd_prod;
1514 sw_comp_cons = fp->rx_comp_cons;
1515 sw_comp_prod = fp->rx_comp_prod;
1517 /* Memory barrier necessary as speculative reads of the rx
1518 * buffer can be ahead of the index in the status block
1520 rmb();
1522 DP(NETIF_MSG_RX_STATUS,
1523 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1524 fp->index, hw_comp_cons, sw_comp_cons);
1526 while (sw_comp_cons != hw_comp_cons) {
1527 struct sw_rx_bd *rx_buf = NULL;
1528 struct sk_buff *skb;
1529 union eth_rx_cqe *cqe;
1530 u8 cqe_fp_flags;
1531 u16 len, pad;
1533 comp_ring_cons = RCQ_BD(sw_comp_cons);
1534 bd_prod = RX_BD(bd_prod);
1535 bd_cons = RX_BD(bd_cons);
1537 /* Prefetch the page containing the BD descriptor
1538 at producer's index. It will be needed when new skb is
1539 allocated */
1540 prefetch((void *)(PAGE_ALIGN((unsigned long)
1541 (&fp->rx_desc_ring[bd_prod])) -
1542 PAGE_SIZE + 1));
1544 cqe = &fp->rx_comp_ring[comp_ring_cons];
1545 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1547 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1548 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1549 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1550 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1551 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1552 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1554 /* is this a slowpath msg? */
1555 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1556 bnx2x_sp_event(fp, cqe);
1557 goto next_cqe;
1559 /* this is an rx packet */
1560 } else {
1561 rx_buf = &fp->rx_buf_ring[bd_cons];
1562 skb = rx_buf->skb;
1563 prefetch(skb);
1564 prefetch((u8 *)skb + 256);
1565 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1566 pad = cqe->fast_path_cqe.placement_offset;
1568 /* If CQE is marked both TPA_START and TPA_END
1569 it is a non-TPA CQE */
1570 if ((!fp->disable_tpa) &&
1571 (TPA_TYPE(cqe_fp_flags) !=
1572 (TPA_TYPE_START | TPA_TYPE_END))) {
1573 u16 queue = cqe->fast_path_cqe.queue_index;
1575 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1576 DP(NETIF_MSG_RX_STATUS,
1577 "calling tpa_start on queue %d\n",
1578 queue);
1580 bnx2x_tpa_start(fp, queue, skb,
1581 bd_cons, bd_prod);
1582 goto next_rx;
1585 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1586 DP(NETIF_MSG_RX_STATUS,
1587 "calling tpa_stop on queue %d\n",
1588 queue);
1590 if (!BNX2X_RX_SUM_FIX(cqe))
1591 BNX2X_ERR("STOP on none TCP "
1592 "data\n");
1594 /* This is a size of the linear data
1595 on this skb */
1596 len = le16_to_cpu(cqe->fast_path_cqe.
1597 len_on_bd);
1598 bnx2x_tpa_stop(bp, fp, queue, pad,
1599 len, cqe, comp_ring_cons);
1600 #ifdef BNX2X_STOP_ON_ERROR
1601 if (bp->panic)
1602 return 0;
1603 #endif
1605 bnx2x_update_sge_prod(fp,
1606 &cqe->fast_path_cqe);
1607 goto next_cqe;
1611 pci_dma_sync_single_for_device(bp->pdev,
1612 pci_unmap_addr(rx_buf, mapping),
1613 pad + RX_COPY_THRESH,
1614 PCI_DMA_FROMDEVICE);
1615 prefetch(skb);
1616 prefetch(((char *)(skb)) + 128);
1618 /* is this an error packet? */
1619 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1620 DP(NETIF_MSG_RX_ERR,
1621 "ERROR flags %x rx packet %u\n",
1622 cqe_fp_flags, sw_comp_cons);
1623 fp->eth_q_stats.rx_err_discard_pkt++;
1624 goto reuse_rx;
1627 /* Since we don't have a jumbo ring
1628 * copy small packets if mtu > 1500
1630 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1631 (len <= RX_COPY_THRESH)) {
1632 struct sk_buff *new_skb;
1634 new_skb = netdev_alloc_skb(bp->dev,
1635 len + pad);
1636 if (new_skb == NULL) {
1637 DP(NETIF_MSG_RX_ERR,
1638 "ERROR packet dropped "
1639 "because of alloc failure\n");
1640 fp->eth_q_stats.rx_skb_alloc_failed++;
1641 goto reuse_rx;
1644 /* aligned copy */
1645 skb_copy_from_linear_data_offset(skb, pad,
1646 new_skb->data + pad, len);
1647 skb_reserve(new_skb, pad);
1648 skb_put(new_skb, len);
1650 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1652 skb = new_skb;
1654 } else
1655 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1656 pci_unmap_single(bp->pdev,
1657 pci_unmap_addr(rx_buf, mapping),
1658 bp->rx_buf_size,
1659 PCI_DMA_FROMDEVICE);
1660 skb_reserve(skb, pad);
1661 skb_put(skb, len);
1663 } else {
1664 DP(NETIF_MSG_RX_ERR,
1665 "ERROR packet dropped because "
1666 "of alloc failure\n");
1667 fp->eth_q_stats.rx_skb_alloc_failed++;
1668 reuse_rx:
1669 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1670 goto next_rx;
1673 skb->protocol = eth_type_trans(skb, bp->dev);
1675 skb->ip_summed = CHECKSUM_NONE;
1676 if (bp->rx_csum) {
1677 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1678 skb->ip_summed = CHECKSUM_UNNECESSARY;
1679 else
1680 fp->eth_q_stats.hw_csum_err++;
1684 skb_record_rx_queue(skb, fp->index);
1686 #ifdef BCM_VLAN
1687 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1688 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1689 PARSING_FLAGS_VLAN))
1690 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1691 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1692 else
1693 #endif
1694 netif_receive_skb(skb);
1697 next_rx:
1698 rx_buf->skb = NULL;
1700 bd_cons = NEXT_RX_IDX(bd_cons);
1701 bd_prod = NEXT_RX_IDX(bd_prod);
1702 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1703 rx_pkt++;
1704 next_cqe:
1705 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1706 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1708 if (rx_pkt == budget)
1709 break;
1710 } /* while */
1712 fp->rx_bd_cons = bd_cons;
1713 fp->rx_bd_prod = bd_prod_fw;
1714 fp->rx_comp_cons = sw_comp_cons;
1715 fp->rx_comp_prod = sw_comp_prod;
1717 /* Update producers */
1718 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1719 fp->rx_sge_prod);
1721 fp->rx_pkt += rx_pkt;
1722 fp->rx_calls++;
1724 return rx_pkt;
1727 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1729 struct bnx2x_fastpath *fp = fp_cookie;
1730 struct bnx2x *bp = fp->bp;
1732 /* Return here if interrupt is disabled */
1733 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1734 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1735 return IRQ_HANDLED;
1738 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1739 fp->index, fp->sb_id);
1740 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1742 #ifdef BNX2X_STOP_ON_ERROR
1743 if (unlikely(bp->panic))
1744 return IRQ_HANDLED;
1745 #endif
1747 /* Handle Rx and Tx according to MSI-X vector */
1748 prefetch(fp->rx_cons_sb);
1749 prefetch(fp->tx_cons_sb);
1750 prefetch(&fp->status_blk->u_status_block.status_block_index);
1751 prefetch(&fp->status_blk->c_status_block.status_block_index);
1752 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1754 return IRQ_HANDLED;
1757 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1759 struct bnx2x *bp = netdev_priv(dev_instance);
1760 u16 status = bnx2x_ack_int(bp);
1761 u16 mask;
1762 int i;
1764 /* Return here if interrupt is shared and it's not for us */
1765 if (unlikely(status == 0)) {
1766 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1767 return IRQ_NONE;
1769 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1771 /* Return here if interrupt is disabled */
1772 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1773 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1774 return IRQ_HANDLED;
1777 #ifdef BNX2X_STOP_ON_ERROR
1778 if (unlikely(bp->panic))
1779 return IRQ_HANDLED;
1780 #endif
1782 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1783 struct bnx2x_fastpath *fp = &bp->fp[i];
1785 mask = 0x2 << fp->sb_id;
1786 if (status & mask) {
1787 /* Handle Rx and Tx according to SB id */
1788 prefetch(fp->rx_cons_sb);
1789 prefetch(&fp->status_blk->u_status_block.
1790 status_block_index);
1791 prefetch(fp->tx_cons_sb);
1792 prefetch(&fp->status_blk->c_status_block.
1793 status_block_index);
1794 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1795 status &= ~mask;
1799 #ifdef BCM_CNIC
1800 mask = 0x2 << CNIC_SB_ID(bp);
1801 if (status & (mask | 0x1)) {
1802 struct cnic_ops *c_ops = NULL;
1804 rcu_read_lock();
1805 c_ops = rcu_dereference(bp->cnic_ops);
1806 if (c_ops)
1807 c_ops->cnic_handler(bp->cnic_data, NULL);
1808 rcu_read_unlock();
1810 status &= ~mask;
1812 #endif
1814 if (unlikely(status & 0x1)) {
1815 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1817 status &= ~0x1;
1818 if (!status)
1819 return IRQ_HANDLED;
1822 if (status)
1823 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1824 status);
1826 return IRQ_HANDLED;
1829 /* end of fast path */
1831 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1833 /* Link */
1836 * General service functions
1839 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1841 u32 lock_status;
1842 u32 resource_bit = (1 << resource);
1843 int func = BP_FUNC(bp);
1844 u32 hw_lock_control_reg;
1845 int cnt;
1847 /* Validating that the resource is within range */
1848 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1849 DP(NETIF_MSG_HW,
1850 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1851 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1852 return -EINVAL;
1855 if (func <= 5) {
1856 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1857 } else {
1858 hw_lock_control_reg =
1859 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1862 /* Validating that the resource is not already taken */
1863 lock_status = REG_RD(bp, hw_lock_control_reg);
1864 if (lock_status & resource_bit) {
1865 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1866 lock_status, resource_bit);
1867 return -EEXIST;
1870 /* Try for 5 second every 5ms */
1871 for (cnt = 0; cnt < 1000; cnt++) {
1872 /* Try to acquire the lock */
1873 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1874 lock_status = REG_RD(bp, hw_lock_control_reg);
1875 if (lock_status & resource_bit)
1876 return 0;
1878 msleep(5);
1880 DP(NETIF_MSG_HW, "Timeout\n");
1881 return -EAGAIN;
1884 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1886 u32 lock_status;
1887 u32 resource_bit = (1 << resource);
1888 int func = BP_FUNC(bp);
1889 u32 hw_lock_control_reg;
1891 /* Validating that the resource is within range */
1892 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1893 DP(NETIF_MSG_HW,
1894 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1895 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1896 return -EINVAL;
1899 if (func <= 5) {
1900 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1901 } else {
1902 hw_lock_control_reg =
1903 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1906 /* Validating that the resource is currently taken */
1907 lock_status = REG_RD(bp, hw_lock_control_reg);
1908 if (!(lock_status & resource_bit)) {
1909 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1910 lock_status, resource_bit);
1911 return -EFAULT;
1914 REG_WR(bp, hw_lock_control_reg, resource_bit);
1915 return 0;
1918 /* HW Lock for shared dual port PHYs */
1919 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1921 mutex_lock(&bp->port.phy_mutex);
1923 if (bp->port.need_hw_lock)
1924 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1927 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1929 if (bp->port.need_hw_lock)
1930 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1932 mutex_unlock(&bp->port.phy_mutex);
1935 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1937 /* The GPIO should be swapped if swap register is set and active */
1938 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940 int gpio_shift = gpio_num +
1941 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942 u32 gpio_mask = (1 << gpio_shift);
1943 u32 gpio_reg;
1944 int value;
1946 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1947 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1948 return -EINVAL;
1951 /* read GPIO value */
1952 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1954 /* get the requested pin value */
1955 if ((gpio_reg & gpio_mask) == gpio_mask)
1956 value = 1;
1957 else
1958 value = 0;
1960 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1962 return value;
1965 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1967 /* The GPIO should be swapped if swap register is set and active */
1968 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1969 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1970 int gpio_shift = gpio_num +
1971 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1972 u32 gpio_mask = (1 << gpio_shift);
1973 u32 gpio_reg;
1975 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1976 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1977 return -EINVAL;
1980 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1981 /* read GPIO and mask except the float bits */
1982 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1984 switch (mode) {
1985 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1986 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1987 gpio_num, gpio_shift);
1988 /* clear FLOAT and set CLR */
1989 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1990 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1991 break;
1993 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1994 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1995 gpio_num, gpio_shift);
1996 /* clear FLOAT and set SET */
1997 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1998 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1999 break;
2001 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2002 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2003 gpio_num, gpio_shift);
2004 /* set FLOAT */
2005 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2006 break;
2008 default:
2009 break;
2012 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2013 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2015 return 0;
2018 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2020 /* The GPIO should be swapped if swap register is set and active */
2021 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2022 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2023 int gpio_shift = gpio_num +
2024 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2025 u32 gpio_mask = (1 << gpio_shift);
2026 u32 gpio_reg;
2028 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2029 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2030 return -EINVAL;
2033 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2034 /* read GPIO int */
2035 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2037 switch (mode) {
2038 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2039 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2040 "output low\n", gpio_num, gpio_shift);
2041 /* clear SET and set CLR */
2042 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2043 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2044 break;
2046 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2047 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2048 "output high\n", gpio_num, gpio_shift);
2049 /* clear CLR and set SET */
2050 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2051 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2052 break;
2054 default:
2055 break;
2058 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2059 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2061 return 0;
2064 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2066 u32 spio_mask = (1 << spio_num);
2067 u32 spio_reg;
2069 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2070 (spio_num > MISC_REGISTERS_SPIO_7)) {
2071 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2072 return -EINVAL;
2075 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2076 /* read SPIO and mask except the float bits */
2077 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2079 switch (mode) {
2080 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2081 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2082 /* clear FLOAT and set CLR */
2083 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2084 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2085 break;
2087 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2088 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2089 /* clear FLOAT and set SET */
2090 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2091 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2092 break;
2094 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2095 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2096 /* set FLOAT */
2097 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2098 break;
2100 default:
2101 break;
2104 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2105 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2107 return 0;
2110 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2112 switch (bp->link_vars.ieee_fc &
2113 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2114 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2115 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2116 ADVERTISED_Pause);
2117 break;
2119 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2120 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2121 ADVERTISED_Pause);
2122 break;
2124 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2125 bp->port.advertising |= ADVERTISED_Asym_Pause;
2126 break;
2128 default:
2129 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2130 ADVERTISED_Pause);
2131 break;
2135 static void bnx2x_link_report(struct bnx2x *bp)
2137 if (bp->flags & MF_FUNC_DIS) {
2138 netif_carrier_off(bp->dev);
2139 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2140 return;
2143 if (bp->link_vars.link_up) {
2144 u16 line_speed;
2146 if (bp->state == BNX2X_STATE_OPEN)
2147 netif_carrier_on(bp->dev);
2148 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2150 line_speed = bp->link_vars.line_speed;
2151 if (IS_E1HMF(bp)) {
2152 u16 vn_max_rate;
2154 vn_max_rate =
2155 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2156 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2157 if (vn_max_rate < line_speed)
2158 line_speed = vn_max_rate;
2160 printk("%d Mbps ", line_speed);
2162 if (bp->link_vars.duplex == DUPLEX_FULL)
2163 printk("full duplex");
2164 else
2165 printk("half duplex");
2167 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2168 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2169 printk(", receive ");
2170 if (bp->link_vars.flow_ctrl &
2171 BNX2X_FLOW_CTRL_TX)
2172 printk("& transmit ");
2173 } else {
2174 printk(", transmit ");
2176 printk("flow control ON");
2178 printk("\n");
2180 } else { /* link_down */
2181 netif_carrier_off(bp->dev);
2182 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2186 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2188 if (!BP_NOMCP(bp)) {
2189 u8 rc;
2191 /* Initialize link parameters structure variables */
2192 /* It is recommended to turn off RX FC for jumbo frames
2193 for better performance */
2194 if (bp->dev->mtu > 5000)
2195 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2196 else
2197 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2199 bnx2x_acquire_phy_lock(bp);
2201 if (load_mode == LOAD_DIAG)
2202 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2204 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2206 bnx2x_release_phy_lock(bp);
2208 bnx2x_calc_fc_adv(bp);
2210 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2211 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2212 bnx2x_link_report(bp);
2215 return rc;
2217 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2218 return -EINVAL;
2221 static void bnx2x_link_set(struct bnx2x *bp)
2223 if (!BP_NOMCP(bp)) {
2224 bnx2x_acquire_phy_lock(bp);
2225 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2226 bnx2x_release_phy_lock(bp);
2228 bnx2x_calc_fc_adv(bp);
2229 } else
2230 BNX2X_ERR("Bootcode is missing - can not set link\n");
2233 static void bnx2x__link_reset(struct bnx2x *bp)
2235 if (!BP_NOMCP(bp)) {
2236 bnx2x_acquire_phy_lock(bp);
2237 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2238 bnx2x_release_phy_lock(bp);
2239 } else
2240 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2243 static u8 bnx2x_link_test(struct bnx2x *bp)
2245 u8 rc;
2247 bnx2x_acquire_phy_lock(bp);
2248 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2249 bnx2x_release_phy_lock(bp);
2251 return rc;
2254 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2256 u32 r_param = bp->link_vars.line_speed / 8;
2257 u32 fair_periodic_timeout_usec;
2258 u32 t_fair;
2260 memset(&(bp->cmng.rs_vars), 0,
2261 sizeof(struct rate_shaping_vars_per_port));
2262 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2264 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2265 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2267 /* this is the threshold below which no timer arming will occur
2268 1.25 coefficient is for the threshold to be a little bigger
2269 than the real time, to compensate for timer in-accuracy */
2270 bp->cmng.rs_vars.rs_threshold =
2271 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2273 /* resolution of fairness timer */
2274 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2275 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2276 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2278 /* this is the threshold below which we won't arm the timer anymore */
2279 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2281 /* we multiply by 1e3/8 to get bytes/msec.
2282 We don't want the credits to pass a credit
2283 of the t_fair*FAIR_MEM (algorithm resolution) */
2284 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2285 /* since each tick is 4 usec */
2286 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2289 /* Calculates the sum of vn_min_rates.
2290 It's needed for further normalizing of the min_rates.
2291 Returns:
2292 sum of vn_min_rates.
2294 0 - if all the min_rates are 0.
2295 In the later case fainess algorithm should be deactivated.
2296 If not all min_rates are zero then those that are zeroes will be set to 1.
2298 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2300 int all_zero = 1;
2301 int port = BP_PORT(bp);
2302 int vn;
2304 bp->vn_weight_sum = 0;
2305 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2306 int func = 2*vn + port;
2307 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2308 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2309 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2311 /* Skip hidden vns */
2312 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2313 continue;
2315 /* If min rate is zero - set it to 1 */
2316 if (!vn_min_rate)
2317 vn_min_rate = DEF_MIN_RATE;
2318 else
2319 all_zero = 0;
2321 bp->vn_weight_sum += vn_min_rate;
2324 /* ... only if all min rates are zeros - disable fairness */
2325 if (all_zero) {
2326 bp->cmng.flags.cmng_enables &=
2327 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2328 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2329 " fairness will be disabled\n");
2330 } else
2331 bp->cmng.flags.cmng_enables |=
2332 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2335 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2337 struct rate_shaping_vars_per_vn m_rs_vn;
2338 struct fairness_vars_per_vn m_fair_vn;
2339 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2340 u16 vn_min_rate, vn_max_rate;
2341 int i;
2343 /* If function is hidden - set min and max to zeroes */
2344 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2345 vn_min_rate = 0;
2346 vn_max_rate = 0;
2348 } else {
2349 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2350 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2351 /* If min rate is zero - set it to 1 */
2352 if (!vn_min_rate)
2353 vn_min_rate = DEF_MIN_RATE;
2354 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2355 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2357 DP(NETIF_MSG_IFUP,
2358 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
2359 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2361 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2362 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2364 /* global vn counter - maximal Mbps for this vn */
2365 m_rs_vn.vn_counter.rate = vn_max_rate;
2367 /* quota - number of bytes transmitted in this period */
2368 m_rs_vn.vn_counter.quota =
2369 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2371 if (bp->vn_weight_sum) {
2372 /* credit for each period of the fairness algorithm:
2373 number of bytes in T_FAIR (the vn share the port rate).
2374 vn_weight_sum should not be larger than 10000, thus
2375 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2376 than zero */
2377 m_fair_vn.vn_credit_delta =
2378 max((u32)(vn_min_rate * (T_FAIR_COEF /
2379 (8 * bp->vn_weight_sum))),
2380 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2381 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2382 m_fair_vn.vn_credit_delta);
2385 /* Store it to internal memory */
2386 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2387 REG_WR(bp, BAR_XSTRORM_INTMEM +
2388 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2389 ((u32 *)(&m_rs_vn))[i]);
2391 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2392 REG_WR(bp, BAR_XSTRORM_INTMEM +
2393 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2394 ((u32 *)(&m_fair_vn))[i]);
2398 /* This function is called upon link interrupt */
2399 static void bnx2x_link_attn(struct bnx2x *bp)
2401 /* Make sure that we are synced with the current statistics */
2402 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2404 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2406 if (bp->link_vars.link_up) {
2408 /* dropless flow control */
2409 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2410 int port = BP_PORT(bp);
2411 u32 pause_enabled = 0;
2413 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2414 pause_enabled = 1;
2416 REG_WR(bp, BAR_USTRORM_INTMEM +
2417 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2418 pause_enabled);
2421 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2422 struct host_port_stats *pstats;
2424 pstats = bnx2x_sp(bp, port_stats);
2425 /* reset old bmac stats */
2426 memset(&(pstats->mac_stx[0]), 0,
2427 sizeof(struct mac_stx));
2429 if (bp->state == BNX2X_STATE_OPEN)
2430 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2433 /* indicate link status */
2434 bnx2x_link_report(bp);
2436 if (IS_E1HMF(bp)) {
2437 int port = BP_PORT(bp);
2438 int func;
2439 int vn;
2441 /* Set the attention towards other drivers on the same port */
2442 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2443 if (vn == BP_E1HVN(bp))
2444 continue;
2446 func = ((vn << 1) | port);
2447 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2448 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2451 if (bp->link_vars.link_up) {
2452 int i;
2454 /* Init rate shaping and fairness contexts */
2455 bnx2x_init_port_minmax(bp);
2457 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2458 bnx2x_init_vn_minmax(bp, 2*vn + port);
2460 /* Store it to internal memory */
2461 for (i = 0;
2462 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2463 REG_WR(bp, BAR_XSTRORM_INTMEM +
2464 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2465 ((u32 *)(&bp->cmng))[i]);
2470 static void bnx2x__link_status_update(struct bnx2x *bp)
2472 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2473 return;
2475 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2477 if (bp->link_vars.link_up)
2478 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2479 else
2480 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2482 bnx2x_calc_vn_weight_sum(bp);
2484 /* indicate link status */
2485 bnx2x_link_report(bp);
2488 static void bnx2x_pmf_update(struct bnx2x *bp)
2490 int port = BP_PORT(bp);
2491 u32 val;
2493 bp->port.pmf = 1;
2494 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2496 /* enable nig attention */
2497 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2498 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2499 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2501 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2504 /* end of Link */
2506 /* slow path */
2509 * General service functions
2512 /* send the MCP a request, block until there is a reply */
2513 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2515 int func = BP_FUNC(bp);
2516 u32 seq = ++bp->fw_seq;
2517 u32 rc = 0;
2518 u32 cnt = 1;
2519 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2521 mutex_lock(&bp->fw_mb_mutex);
2522 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2523 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2525 do {
2526 /* let the FW do it's magic ... */
2527 msleep(delay);
2529 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2531 /* Give the FW up to 5 second (500*10ms) */
2532 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2534 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2535 cnt*delay, rc, seq);
2537 /* is this a reply to our command? */
2538 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2539 rc &= FW_MSG_CODE_MASK;
2540 else {
2541 /* FW BUG! */
2542 BNX2X_ERR("FW failed to respond!\n");
2543 bnx2x_fw_dump(bp);
2544 rc = 0;
2546 mutex_unlock(&bp->fw_mb_mutex);
2548 return rc;
2551 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2552 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2553 static void bnx2x_set_rx_mode(struct net_device *dev);
2555 static void bnx2x_e1h_disable(struct bnx2x *bp)
2557 int port = BP_PORT(bp);
2559 netif_tx_disable(bp->dev);
2561 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2563 netif_carrier_off(bp->dev);
2566 static void bnx2x_e1h_enable(struct bnx2x *bp)
2568 int port = BP_PORT(bp);
2570 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2572 /* Tx queue should be only reenabled */
2573 netif_tx_wake_all_queues(bp->dev);
2576 * Should not call netif_carrier_on since it will be called if the link
2577 * is up when checking for link state
2581 static void bnx2x_update_min_max(struct bnx2x *bp)
2583 int port = BP_PORT(bp);
2584 int vn, i;
2586 /* Init rate shaping and fairness contexts */
2587 bnx2x_init_port_minmax(bp);
2589 bnx2x_calc_vn_weight_sum(bp);
2591 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2592 bnx2x_init_vn_minmax(bp, 2*vn + port);
2594 if (bp->port.pmf) {
2595 int func;
2597 /* Set the attention towards other drivers on the same port */
2598 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2599 if (vn == BP_E1HVN(bp))
2600 continue;
2602 func = ((vn << 1) | port);
2603 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2604 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2607 /* Store it to internal memory */
2608 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2609 REG_WR(bp, BAR_XSTRORM_INTMEM +
2610 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2611 ((u32 *)(&bp->cmng))[i]);
2615 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2617 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2619 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2622 * This is the only place besides the function initialization
2623 * where the bp->flags can change so it is done without any
2624 * locks
2626 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2627 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2628 bp->flags |= MF_FUNC_DIS;
2630 bnx2x_e1h_disable(bp);
2631 } else {
2632 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2633 bp->flags &= ~MF_FUNC_DIS;
2635 bnx2x_e1h_enable(bp);
2637 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2639 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2641 bnx2x_update_min_max(bp);
2642 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2645 /* Report results to MCP */
2646 if (dcc_event)
2647 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2648 else
2649 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2652 /* must be called under the spq lock */
2653 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2655 struct eth_spe *next_spe = bp->spq_prod_bd;
2657 if (bp->spq_prod_bd == bp->spq_last_bd) {
2658 bp->spq_prod_bd = bp->spq;
2659 bp->spq_prod_idx = 0;
2660 DP(NETIF_MSG_TIMER, "end of spq\n");
2661 } else {
2662 bp->spq_prod_bd++;
2663 bp->spq_prod_idx++;
2665 return next_spe;
2668 /* must be called under the spq lock */
2669 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2671 int func = BP_FUNC(bp);
2673 /* Make sure that BD data is updated before writing the producer */
2674 wmb();
2676 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2677 bp->spq_prod_idx);
2678 mmiowb();
2681 /* the slow path queue is odd since completions arrive on the fastpath ring */
2682 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2683 u32 data_hi, u32 data_lo, int common)
2685 struct eth_spe *spe;
2687 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2688 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2689 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2690 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2691 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2693 #ifdef BNX2X_STOP_ON_ERROR
2694 if (unlikely(bp->panic))
2695 return -EIO;
2696 #endif
2698 spin_lock_bh(&bp->spq_lock);
2700 if (!bp->spq_left) {
2701 BNX2X_ERR("BUG! SPQ ring full!\n");
2702 spin_unlock_bh(&bp->spq_lock);
2703 bnx2x_panic();
2704 return -EBUSY;
2707 spe = bnx2x_sp_get_next(bp);
2709 /* CID needs port number to be encoded int it */
2710 spe->hdr.conn_and_cmd_data =
2711 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2712 HW_CID(bp, cid)));
2713 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2714 if (common)
2715 spe->hdr.type |=
2716 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2718 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2719 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2721 bp->spq_left--;
2723 bnx2x_sp_prod_update(bp);
2724 spin_unlock_bh(&bp->spq_lock);
2725 return 0;
2728 /* acquire split MCP access lock register */
2729 static int bnx2x_acquire_alr(struct bnx2x *bp)
2731 u32 i, j, val;
2732 int rc = 0;
2734 might_sleep();
2735 i = 100;
2736 for (j = 0; j < i*10; j++) {
2737 val = (1UL << 31);
2738 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2739 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2740 if (val & (1L << 31))
2741 break;
2743 msleep(5);
2745 if (!(val & (1L << 31))) {
2746 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2747 rc = -EBUSY;
2750 return rc;
2753 /* release split MCP access lock register */
2754 static void bnx2x_release_alr(struct bnx2x *bp)
2756 u32 val = 0;
2758 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2761 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2763 struct host_def_status_block *def_sb = bp->def_status_blk;
2764 u16 rc = 0;
2766 barrier(); /* status block is written to by the chip */
2767 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2768 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2769 rc |= 1;
2771 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2772 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2773 rc |= 2;
2775 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2776 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2777 rc |= 4;
2779 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2780 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2781 rc |= 8;
2783 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2784 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2785 rc |= 16;
2787 return rc;
2791 * slow path service functions
2794 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2796 int port = BP_PORT(bp);
2797 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2798 COMMAND_REG_ATTN_BITS_SET);
2799 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2800 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2801 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2802 NIG_REG_MASK_INTERRUPT_PORT0;
2803 u32 aeu_mask;
2804 u32 nig_mask = 0;
2806 if (bp->attn_state & asserted)
2807 BNX2X_ERR("IGU ERROR\n");
2809 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2810 aeu_mask = REG_RD(bp, aeu_addr);
2812 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2813 aeu_mask, asserted);
2814 aeu_mask &= ~(asserted & 0xff);
2815 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2817 REG_WR(bp, aeu_addr, aeu_mask);
2818 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2820 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2821 bp->attn_state |= asserted;
2822 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2824 if (asserted & ATTN_HARD_WIRED_MASK) {
2825 if (asserted & ATTN_NIG_FOR_FUNC) {
2827 bnx2x_acquire_phy_lock(bp);
2829 /* save nig interrupt mask */
2830 nig_mask = REG_RD(bp, nig_int_mask_addr);
2831 REG_WR(bp, nig_int_mask_addr, 0);
2833 bnx2x_link_attn(bp);
2835 /* handle unicore attn? */
2837 if (asserted & ATTN_SW_TIMER_4_FUNC)
2838 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2840 if (asserted & GPIO_2_FUNC)
2841 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2843 if (asserted & GPIO_3_FUNC)
2844 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2846 if (asserted & GPIO_4_FUNC)
2847 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2849 if (port == 0) {
2850 if (asserted & ATTN_GENERAL_ATTN_1) {
2851 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2852 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2854 if (asserted & ATTN_GENERAL_ATTN_2) {
2855 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2856 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2858 if (asserted & ATTN_GENERAL_ATTN_3) {
2859 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2860 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2862 } else {
2863 if (asserted & ATTN_GENERAL_ATTN_4) {
2864 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2865 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2867 if (asserted & ATTN_GENERAL_ATTN_5) {
2868 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2869 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2871 if (asserted & ATTN_GENERAL_ATTN_6) {
2872 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2873 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2877 } /* if hardwired */
2879 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2880 asserted, hc_addr);
2881 REG_WR(bp, hc_addr, asserted);
2883 /* now set back the mask */
2884 if (asserted & ATTN_NIG_FOR_FUNC) {
2885 REG_WR(bp, nig_int_mask_addr, nig_mask);
2886 bnx2x_release_phy_lock(bp);
2890 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2892 int port = BP_PORT(bp);
2894 /* mark the failure */
2895 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2896 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2897 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2898 bp->link_params.ext_phy_config);
2900 /* log the failure */
2901 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2902 " the driver to shutdown the card to prevent permanent"
2903 " damage. Please contact Dell Support for assistance\n",
2904 bp->dev->name);
2907 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2909 int port = BP_PORT(bp);
2910 int reg_offset;
2911 u32 val, swap_val, swap_override;
2913 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2914 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2916 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2918 val = REG_RD(bp, reg_offset);
2919 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2920 REG_WR(bp, reg_offset, val);
2922 BNX2X_ERR("SPIO5 hw attention\n");
2924 /* Fan failure attention */
2925 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2926 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2927 /* Low power mode is controlled by GPIO 2 */
2928 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2929 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2930 /* The PHY reset is controlled by GPIO 1 */
2931 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2932 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2933 break;
2935 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2936 /* The PHY reset is controlled by GPIO 1 */
2937 /* fake the port number to cancel the swap done in
2938 set_gpio() */
2939 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2940 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2941 port = (swap_val && swap_override) ^ 1;
2942 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2943 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2944 break;
2946 default:
2947 break;
2949 bnx2x_fan_failure(bp);
2952 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2953 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2954 bnx2x_acquire_phy_lock(bp);
2955 bnx2x_handle_module_detect_int(&bp->link_params);
2956 bnx2x_release_phy_lock(bp);
2959 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2961 val = REG_RD(bp, reg_offset);
2962 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2963 REG_WR(bp, reg_offset, val);
2965 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2966 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2967 bnx2x_panic();
2971 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2973 u32 val;
2975 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2977 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2978 BNX2X_ERR("DB hw attention 0x%x\n", val);
2979 /* DORQ discard attention */
2980 if (val & 0x2)
2981 BNX2X_ERR("FATAL error from DORQ\n");
2984 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2986 int port = BP_PORT(bp);
2987 int reg_offset;
2989 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2990 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2992 val = REG_RD(bp, reg_offset);
2993 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2994 REG_WR(bp, reg_offset, val);
2996 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2997 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2998 bnx2x_panic();
3002 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3004 u32 val;
3006 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3008 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3009 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3010 /* CFC error attention */
3011 if (val & 0x2)
3012 BNX2X_ERR("FATAL error from CFC\n");
3015 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3017 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3018 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3019 /* RQ_USDMDP_FIFO_OVERFLOW */
3020 if (val & 0x18000)
3021 BNX2X_ERR("FATAL error from PXP\n");
3024 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3026 int port = BP_PORT(bp);
3027 int reg_offset;
3029 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3030 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3032 val = REG_RD(bp, reg_offset);
3033 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3034 REG_WR(bp, reg_offset, val);
3036 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3037 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3038 bnx2x_panic();
3042 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3044 u32 val;
3046 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3048 if (attn & BNX2X_PMF_LINK_ASSERT) {
3049 int func = BP_FUNC(bp);
3051 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3052 bp->mf_config = SHMEM_RD(bp,
3053 mf_cfg.func_mf_config[func].config);
3054 val = SHMEM_RD(bp, func_mb[func].drv_status);
3055 if (val & DRV_STATUS_DCC_EVENT_MASK)
3056 bnx2x_dcc_event(bp,
3057 (val & DRV_STATUS_DCC_EVENT_MASK));
3058 bnx2x__link_status_update(bp);
3059 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3060 bnx2x_pmf_update(bp);
3062 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3064 BNX2X_ERR("MC assert!\n");
3065 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3066 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3067 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3068 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3069 bnx2x_panic();
3071 } else if (attn & BNX2X_MCP_ASSERT) {
3073 BNX2X_ERR("MCP assert!\n");
3074 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3075 bnx2x_fw_dump(bp);
3077 } else
3078 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3081 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3082 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3083 if (attn & BNX2X_GRC_TIMEOUT) {
3084 val = CHIP_IS_E1H(bp) ?
3085 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3086 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3088 if (attn & BNX2X_GRC_RSV) {
3089 val = CHIP_IS_E1H(bp) ?
3090 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3091 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3093 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3097 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3099 struct attn_route attn;
3100 struct attn_route group_mask;
3101 int port = BP_PORT(bp);
3102 int index;
3103 u32 reg_addr;
3104 u32 val;
3105 u32 aeu_mask;
3107 /* need to take HW lock because MCP or other port might also
3108 try to handle this event */
3109 bnx2x_acquire_alr(bp);
3111 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3112 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3113 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3114 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3115 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3116 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3118 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3119 if (deasserted & (1 << index)) {
3120 group_mask = bp->attn_group[index];
3122 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3123 index, group_mask.sig[0], group_mask.sig[1],
3124 group_mask.sig[2], group_mask.sig[3]);
3126 bnx2x_attn_int_deasserted3(bp,
3127 attn.sig[3] & group_mask.sig[3]);
3128 bnx2x_attn_int_deasserted1(bp,
3129 attn.sig[1] & group_mask.sig[1]);
3130 bnx2x_attn_int_deasserted2(bp,
3131 attn.sig[2] & group_mask.sig[2]);
3132 bnx2x_attn_int_deasserted0(bp,
3133 attn.sig[0] & group_mask.sig[0]);
3135 if ((attn.sig[0] & group_mask.sig[0] &
3136 HW_PRTY_ASSERT_SET_0) ||
3137 (attn.sig[1] & group_mask.sig[1] &
3138 HW_PRTY_ASSERT_SET_1) ||
3139 (attn.sig[2] & group_mask.sig[2] &
3140 HW_PRTY_ASSERT_SET_2))
3141 BNX2X_ERR("FATAL HW block parity attention\n");
3145 bnx2x_release_alr(bp);
3147 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3149 val = ~deasserted;
3150 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3151 val, reg_addr);
3152 REG_WR(bp, reg_addr, val);
3154 if (~bp->attn_state & deasserted)
3155 BNX2X_ERR("IGU ERROR\n");
3157 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3158 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3160 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3161 aeu_mask = REG_RD(bp, reg_addr);
3163 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3164 aeu_mask, deasserted);
3165 aeu_mask |= (deasserted & 0xff);
3166 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3168 REG_WR(bp, reg_addr, aeu_mask);
3169 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3171 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3172 bp->attn_state &= ~deasserted;
3173 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3176 static void bnx2x_attn_int(struct bnx2x *bp)
3178 /* read local copy of bits */
3179 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3180 attn_bits);
3181 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3182 attn_bits_ack);
3183 u32 attn_state = bp->attn_state;
3185 /* look for changed bits */
3186 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3187 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3189 DP(NETIF_MSG_HW,
3190 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3191 attn_bits, attn_ack, asserted, deasserted);
3193 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3194 BNX2X_ERR("BAD attention state\n");
3196 /* handle bits that were raised */
3197 if (asserted)
3198 bnx2x_attn_int_asserted(bp, asserted);
3200 if (deasserted)
3201 bnx2x_attn_int_deasserted(bp, deasserted);
3204 static void bnx2x_sp_task(struct work_struct *work)
3206 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3207 u16 status;
3210 /* Return here if interrupt is disabled */
3211 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3212 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3213 return;
3216 status = bnx2x_update_dsb_idx(bp);
3217 /* if (status == 0) */
3218 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3220 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3222 /* HW attentions */
3223 if (status & 0x1)
3224 bnx2x_attn_int(bp);
3226 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3227 IGU_INT_NOP, 1);
3228 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3229 IGU_INT_NOP, 1);
3230 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3231 IGU_INT_NOP, 1);
3232 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3233 IGU_INT_NOP, 1);
3234 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3235 IGU_INT_ENABLE, 1);
3239 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3241 struct net_device *dev = dev_instance;
3242 struct bnx2x *bp = netdev_priv(dev);
3244 /* Return here if interrupt is disabled */
3245 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3246 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3247 return IRQ_HANDLED;
3250 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3252 #ifdef BNX2X_STOP_ON_ERROR
3253 if (unlikely(bp->panic))
3254 return IRQ_HANDLED;
3255 #endif
3257 #ifdef BCM_CNIC
3259 struct cnic_ops *c_ops;
3261 rcu_read_lock();
3262 c_ops = rcu_dereference(bp->cnic_ops);
3263 if (c_ops)
3264 c_ops->cnic_handler(bp->cnic_data, NULL);
3265 rcu_read_unlock();
3267 #endif
3268 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3270 return IRQ_HANDLED;
3273 /* end of slow path */
3275 /* Statistics */
3277 /****************************************************************************
3278 * Macros
3279 ****************************************************************************/
3281 /* sum[hi:lo] += add[hi:lo] */
3282 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3283 do { \
3284 s_lo += a_lo; \
3285 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3286 } while (0)
3288 /* difference = minuend - subtrahend */
3289 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3290 do { \
3291 if (m_lo < s_lo) { \
3292 /* underflow */ \
3293 d_hi = m_hi - s_hi; \
3294 if (d_hi > 0) { \
3295 /* we can 'loan' 1 */ \
3296 d_hi--; \
3297 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3298 } else { \
3299 /* m_hi <= s_hi */ \
3300 d_hi = 0; \
3301 d_lo = 0; \
3303 } else { \
3304 /* m_lo >= s_lo */ \
3305 if (m_hi < s_hi) { \
3306 d_hi = 0; \
3307 d_lo = 0; \
3308 } else { \
3309 /* m_hi >= s_hi */ \
3310 d_hi = m_hi - s_hi; \
3311 d_lo = m_lo - s_lo; \
3314 } while (0)
3316 #define UPDATE_STAT64(s, t) \
3317 do { \
3318 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3319 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3320 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3321 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3322 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3323 pstats->mac_stx[1].t##_lo, diff.lo); \
3324 } while (0)
3326 #define UPDATE_STAT64_NIG(s, t) \
3327 do { \
3328 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3329 diff.lo, new->s##_lo, old->s##_lo); \
3330 ADD_64(estats->t##_hi, diff.hi, \
3331 estats->t##_lo, diff.lo); \
3332 } while (0)
3334 /* sum[hi:lo] += add */
3335 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3336 do { \
3337 s_lo += a; \
3338 s_hi += (s_lo < a) ? 1 : 0; \
3339 } while (0)
3341 #define UPDATE_EXTEND_STAT(s) \
3342 do { \
3343 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3344 pstats->mac_stx[1].s##_lo, \
3345 new->s); \
3346 } while (0)
3348 #define UPDATE_EXTEND_TSTAT(s, t) \
3349 do { \
3350 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3351 old_tclient->s = tclient->s; \
3352 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3353 } while (0)
3355 #define UPDATE_EXTEND_USTAT(s, t) \
3356 do { \
3357 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3358 old_uclient->s = uclient->s; \
3359 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3360 } while (0)
3362 #define UPDATE_EXTEND_XSTAT(s, t) \
3363 do { \
3364 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3365 old_xclient->s = xclient->s; \
3366 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3367 } while (0)
3369 /* minuend -= subtrahend */
3370 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3371 do { \
3372 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3373 } while (0)
3375 /* minuend[hi:lo] -= subtrahend */
3376 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3377 do { \
3378 SUB_64(m_hi, 0, m_lo, s); \
3379 } while (0)
3381 #define SUB_EXTEND_USTAT(s, t) \
3382 do { \
3383 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3384 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3385 } while (0)
3388 * General service functions
3391 static inline long bnx2x_hilo(u32 *hiref)
3393 u32 lo = *(hiref + 1);
3394 #if (BITS_PER_LONG == 64)
3395 u32 hi = *hiref;
3397 return HILO_U64(hi, lo);
3398 #else
3399 return lo;
3400 #endif
3404 * Init service functions
3407 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3409 if (!bp->stats_pending) {
3410 struct eth_query_ramrod_data ramrod_data = {0};
3411 int i, rc;
3413 ramrod_data.drv_counter = bp->stats_counter++;
3414 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3415 for_each_queue(bp, i)
3416 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3418 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3419 ((u32 *)&ramrod_data)[1],
3420 ((u32 *)&ramrod_data)[0], 0);
3421 if (rc == 0) {
3422 /* stats ramrod has it's own slot on the spq */
3423 bp->spq_left++;
3424 bp->stats_pending = 1;
3429 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3431 struct dmae_command *dmae = &bp->stats_dmae;
3432 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3434 *stats_comp = DMAE_COMP_VAL;
3435 if (CHIP_REV_IS_SLOW(bp))
3436 return;
3438 /* loader */
3439 if (bp->executer_idx) {
3440 int loader_idx = PMF_DMAE_C(bp);
3442 memset(dmae, 0, sizeof(struct dmae_command));
3444 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3445 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3446 DMAE_CMD_DST_RESET |
3447 #ifdef __BIG_ENDIAN
3448 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3449 #else
3450 DMAE_CMD_ENDIANITY_DW_SWAP |
3451 #endif
3452 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3453 DMAE_CMD_PORT_0) |
3454 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3455 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3456 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3457 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3458 sizeof(struct dmae_command) *
3459 (loader_idx + 1)) >> 2;
3460 dmae->dst_addr_hi = 0;
3461 dmae->len = sizeof(struct dmae_command) >> 2;
3462 if (CHIP_IS_E1(bp))
3463 dmae->len--;
3464 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3465 dmae->comp_addr_hi = 0;
3466 dmae->comp_val = 1;
3468 *stats_comp = 0;
3469 bnx2x_post_dmae(bp, dmae, loader_idx);
3471 } else if (bp->func_stx) {
3472 *stats_comp = 0;
3473 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3477 static int bnx2x_stats_comp(struct bnx2x *bp)
3479 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3480 int cnt = 10;
3482 might_sleep();
3483 while (*stats_comp != DMAE_COMP_VAL) {
3484 if (!cnt) {
3485 BNX2X_ERR("timeout waiting for stats finished\n");
3486 break;
3488 cnt--;
3489 msleep(1);
3491 return 1;
3495 * Statistics service functions
3498 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3500 struct dmae_command *dmae;
3501 u32 opcode;
3502 int loader_idx = PMF_DMAE_C(bp);
3503 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3505 /* sanity */
3506 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3507 BNX2X_ERR("BUG!\n");
3508 return;
3511 bp->executer_idx = 0;
3513 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3514 DMAE_CMD_C_ENABLE |
3515 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3516 #ifdef __BIG_ENDIAN
3517 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3518 #else
3519 DMAE_CMD_ENDIANITY_DW_SWAP |
3520 #endif
3521 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3522 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3524 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3525 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3526 dmae->src_addr_lo = bp->port.port_stx >> 2;
3527 dmae->src_addr_hi = 0;
3528 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3529 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3530 dmae->len = DMAE_LEN32_RD_MAX;
3531 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3532 dmae->comp_addr_hi = 0;
3533 dmae->comp_val = 1;
3535 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3536 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3537 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3538 dmae->src_addr_hi = 0;
3539 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3540 DMAE_LEN32_RD_MAX * 4);
3541 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3542 DMAE_LEN32_RD_MAX * 4);
3543 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3544 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3545 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3546 dmae->comp_val = DMAE_COMP_VAL;
3548 *stats_comp = 0;
3549 bnx2x_hw_stats_post(bp);
3550 bnx2x_stats_comp(bp);
3553 static void bnx2x_port_stats_init(struct bnx2x *bp)
3555 struct dmae_command *dmae;
3556 int port = BP_PORT(bp);
3557 int vn = BP_E1HVN(bp);
3558 u32 opcode;
3559 int loader_idx = PMF_DMAE_C(bp);
3560 u32 mac_addr;
3561 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3563 /* sanity */
3564 if (!bp->link_vars.link_up || !bp->port.pmf) {
3565 BNX2X_ERR("BUG!\n");
3566 return;
3569 bp->executer_idx = 0;
3571 /* MCP */
3572 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3573 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3574 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3575 #ifdef __BIG_ENDIAN
3576 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3577 #else
3578 DMAE_CMD_ENDIANITY_DW_SWAP |
3579 #endif
3580 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3581 (vn << DMAE_CMD_E1HVN_SHIFT));
3583 if (bp->port.port_stx) {
3585 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3586 dmae->opcode = opcode;
3587 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3588 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3589 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3590 dmae->dst_addr_hi = 0;
3591 dmae->len = sizeof(struct host_port_stats) >> 2;
3592 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3593 dmae->comp_addr_hi = 0;
3594 dmae->comp_val = 1;
3597 if (bp->func_stx) {
3599 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3600 dmae->opcode = opcode;
3601 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3602 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3603 dmae->dst_addr_lo = bp->func_stx >> 2;
3604 dmae->dst_addr_hi = 0;
3605 dmae->len = sizeof(struct host_func_stats) >> 2;
3606 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3607 dmae->comp_addr_hi = 0;
3608 dmae->comp_val = 1;
3611 /* MAC */
3612 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3613 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3614 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3615 #ifdef __BIG_ENDIAN
3616 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3617 #else
3618 DMAE_CMD_ENDIANITY_DW_SWAP |
3619 #endif
3620 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3621 (vn << DMAE_CMD_E1HVN_SHIFT));
3623 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3625 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3626 NIG_REG_INGRESS_BMAC0_MEM);
3628 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3629 BIGMAC_REGISTER_TX_STAT_GTBYT */
3630 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3631 dmae->opcode = opcode;
3632 dmae->src_addr_lo = (mac_addr +
3633 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3634 dmae->src_addr_hi = 0;
3635 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3636 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3637 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3638 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3639 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3640 dmae->comp_addr_hi = 0;
3641 dmae->comp_val = 1;
3643 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3644 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3645 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3646 dmae->opcode = opcode;
3647 dmae->src_addr_lo = (mac_addr +
3648 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3649 dmae->src_addr_hi = 0;
3650 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3651 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3652 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3653 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3654 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3655 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3656 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3657 dmae->comp_addr_hi = 0;
3658 dmae->comp_val = 1;
3660 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3662 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3664 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3665 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3666 dmae->opcode = opcode;
3667 dmae->src_addr_lo = (mac_addr +
3668 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3669 dmae->src_addr_hi = 0;
3670 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3671 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3672 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3673 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3674 dmae->comp_addr_hi = 0;
3675 dmae->comp_val = 1;
3677 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3678 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3679 dmae->opcode = opcode;
3680 dmae->src_addr_lo = (mac_addr +
3681 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3682 dmae->src_addr_hi = 0;
3683 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3684 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3685 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3686 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3687 dmae->len = 1;
3688 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3689 dmae->comp_addr_hi = 0;
3690 dmae->comp_val = 1;
3692 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3693 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3694 dmae->opcode = opcode;
3695 dmae->src_addr_lo = (mac_addr +
3696 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3697 dmae->src_addr_hi = 0;
3698 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3699 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3700 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3701 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3702 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3703 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3704 dmae->comp_addr_hi = 0;
3705 dmae->comp_val = 1;
3708 /* NIG */
3709 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3710 dmae->opcode = opcode;
3711 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3712 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3713 dmae->src_addr_hi = 0;
3714 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3715 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3716 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3717 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3718 dmae->comp_addr_hi = 0;
3719 dmae->comp_val = 1;
3721 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3722 dmae->opcode = opcode;
3723 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3724 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3725 dmae->src_addr_hi = 0;
3726 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3727 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3728 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3729 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3730 dmae->len = (2*sizeof(u32)) >> 2;
3731 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3732 dmae->comp_addr_hi = 0;
3733 dmae->comp_val = 1;
3735 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3736 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3737 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3738 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3739 #ifdef __BIG_ENDIAN
3740 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3741 #else
3742 DMAE_CMD_ENDIANITY_DW_SWAP |
3743 #endif
3744 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3745 (vn << DMAE_CMD_E1HVN_SHIFT));
3746 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3747 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3748 dmae->src_addr_hi = 0;
3749 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3750 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3751 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3752 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3753 dmae->len = (2*sizeof(u32)) >> 2;
3754 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3755 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3756 dmae->comp_val = DMAE_COMP_VAL;
3758 *stats_comp = 0;
3761 static void bnx2x_func_stats_init(struct bnx2x *bp)
3763 struct dmae_command *dmae = &bp->stats_dmae;
3764 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3766 /* sanity */
3767 if (!bp->func_stx) {
3768 BNX2X_ERR("BUG!\n");
3769 return;
3772 bp->executer_idx = 0;
3773 memset(dmae, 0, sizeof(struct dmae_command));
3775 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3776 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3777 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3778 #ifdef __BIG_ENDIAN
3779 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3780 #else
3781 DMAE_CMD_ENDIANITY_DW_SWAP |
3782 #endif
3783 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3784 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3785 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3786 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3787 dmae->dst_addr_lo = bp->func_stx >> 2;
3788 dmae->dst_addr_hi = 0;
3789 dmae->len = sizeof(struct host_func_stats) >> 2;
3790 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3791 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3792 dmae->comp_val = DMAE_COMP_VAL;
3794 *stats_comp = 0;
3797 static void bnx2x_stats_start(struct bnx2x *bp)
3799 if (bp->port.pmf)
3800 bnx2x_port_stats_init(bp);
3802 else if (bp->func_stx)
3803 bnx2x_func_stats_init(bp);
3805 bnx2x_hw_stats_post(bp);
3806 bnx2x_storm_stats_post(bp);
3809 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3811 bnx2x_stats_comp(bp);
3812 bnx2x_stats_pmf_update(bp);
3813 bnx2x_stats_start(bp);
3816 static void bnx2x_stats_restart(struct bnx2x *bp)
3818 bnx2x_stats_comp(bp);
3819 bnx2x_stats_start(bp);
3822 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3824 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3825 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3826 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3827 struct {
3828 u32 lo;
3829 u32 hi;
3830 } diff;
3832 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3833 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3834 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3835 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3836 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3837 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3838 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3839 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3840 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3841 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3842 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3843 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3844 UPDATE_STAT64(tx_stat_gt127,
3845 tx_stat_etherstatspkts65octetsto127octets);
3846 UPDATE_STAT64(tx_stat_gt255,
3847 tx_stat_etherstatspkts128octetsto255octets);
3848 UPDATE_STAT64(tx_stat_gt511,
3849 tx_stat_etherstatspkts256octetsto511octets);
3850 UPDATE_STAT64(tx_stat_gt1023,
3851 tx_stat_etherstatspkts512octetsto1023octets);
3852 UPDATE_STAT64(tx_stat_gt1518,
3853 tx_stat_etherstatspkts1024octetsto1522octets);
3854 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3855 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3856 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3857 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3858 UPDATE_STAT64(tx_stat_gterr,
3859 tx_stat_dot3statsinternalmactransmiterrors);
3860 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3862 estats->pause_frames_received_hi =
3863 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3864 estats->pause_frames_received_lo =
3865 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3867 estats->pause_frames_sent_hi =
3868 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3869 estats->pause_frames_sent_lo =
3870 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3873 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3875 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3876 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3877 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3879 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3880 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3881 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3882 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3883 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3884 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3885 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3886 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3887 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3888 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3889 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3890 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3891 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3892 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3893 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3894 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3895 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3896 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3897 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3898 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3899 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3900 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3901 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3902 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3903 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3904 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3905 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3906 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3907 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3908 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3909 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3911 estats->pause_frames_received_hi =
3912 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3913 estats->pause_frames_received_lo =
3914 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3915 ADD_64(estats->pause_frames_received_hi,
3916 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3917 estats->pause_frames_received_lo,
3918 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3920 estats->pause_frames_sent_hi =
3921 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3922 estats->pause_frames_sent_lo =
3923 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3924 ADD_64(estats->pause_frames_sent_hi,
3925 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3926 estats->pause_frames_sent_lo,
3927 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3930 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3932 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3933 struct nig_stats *old = &(bp->port.old_nig_stats);
3934 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3935 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3936 struct {
3937 u32 lo;
3938 u32 hi;
3939 } diff;
3940 u32 nig_timer_max;
3942 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3943 bnx2x_bmac_stats_update(bp);
3945 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3946 bnx2x_emac_stats_update(bp);
3948 else { /* unreached */
3949 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3950 return -1;
3953 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3954 new->brb_discard - old->brb_discard);
3955 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3956 new->brb_truncate - old->brb_truncate);
3958 UPDATE_STAT64_NIG(egress_mac_pkt0,
3959 etherstatspkts1024octetsto1522octets);
3960 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3962 memcpy(old, new, sizeof(struct nig_stats));
3964 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3965 sizeof(struct mac_stx));
3966 estats->brb_drop_hi = pstats->brb_drop_hi;
3967 estats->brb_drop_lo = pstats->brb_drop_lo;
3969 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3971 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3972 if (nig_timer_max != estats->nig_timer_max) {
3973 estats->nig_timer_max = nig_timer_max;
3974 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3977 return 0;
3980 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3982 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3983 struct tstorm_per_port_stats *tport =
3984 &stats->tstorm_common.port_statistics;
3985 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3986 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3987 int i;
3989 memcpy(&(fstats->total_bytes_received_hi),
3990 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
3991 sizeof(struct host_func_stats) - 2*sizeof(u32));
3992 estats->error_bytes_received_hi = 0;
3993 estats->error_bytes_received_lo = 0;
3994 estats->etherstatsoverrsizepkts_hi = 0;
3995 estats->etherstatsoverrsizepkts_lo = 0;
3996 estats->no_buff_discard_hi = 0;
3997 estats->no_buff_discard_lo = 0;
3999 for_each_queue(bp, i) {
4000 struct bnx2x_fastpath *fp = &bp->fp[i];
4001 int cl_id = fp->cl_id;
4002 struct tstorm_per_client_stats *tclient =
4003 &stats->tstorm_common.client_statistics[cl_id];
4004 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4005 struct ustorm_per_client_stats *uclient =
4006 &stats->ustorm_common.client_statistics[cl_id];
4007 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4008 struct xstorm_per_client_stats *xclient =
4009 &stats->xstorm_common.client_statistics[cl_id];
4010 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4011 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4012 u32 diff;
4014 /* are storm stats valid? */
4015 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4016 bp->stats_counter) {
4017 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4018 " xstorm counter (%d) != stats_counter (%d)\n",
4019 i, xclient->stats_counter, bp->stats_counter);
4020 return -1;
4022 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4023 bp->stats_counter) {
4024 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4025 " tstorm counter (%d) != stats_counter (%d)\n",
4026 i, tclient->stats_counter, bp->stats_counter);
4027 return -2;
4029 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4030 bp->stats_counter) {
4031 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4032 " ustorm counter (%d) != stats_counter (%d)\n",
4033 i, uclient->stats_counter, bp->stats_counter);
4034 return -4;
4037 qstats->total_bytes_received_hi =
4038 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4039 qstats->total_bytes_received_lo =
4040 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4042 ADD_64(qstats->total_bytes_received_hi,
4043 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4044 qstats->total_bytes_received_lo,
4045 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4047 ADD_64(qstats->total_bytes_received_hi,
4048 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4049 qstats->total_bytes_received_lo,
4050 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4052 qstats->valid_bytes_received_hi =
4053 qstats->total_bytes_received_hi;
4054 qstats->valid_bytes_received_lo =
4055 qstats->total_bytes_received_lo;
4057 qstats->error_bytes_received_hi =
4058 le32_to_cpu(tclient->rcv_error_bytes.hi);
4059 qstats->error_bytes_received_lo =
4060 le32_to_cpu(tclient->rcv_error_bytes.lo);
4062 ADD_64(qstats->total_bytes_received_hi,
4063 qstats->error_bytes_received_hi,
4064 qstats->total_bytes_received_lo,
4065 qstats->error_bytes_received_lo);
4067 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4068 total_unicast_packets_received);
4069 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4070 total_multicast_packets_received);
4071 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4072 total_broadcast_packets_received);
4073 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4074 etherstatsoverrsizepkts);
4075 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4077 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4078 total_unicast_packets_received);
4079 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4080 total_multicast_packets_received);
4081 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4082 total_broadcast_packets_received);
4083 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4084 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4085 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4087 qstats->total_bytes_transmitted_hi =
4088 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4089 qstats->total_bytes_transmitted_lo =
4090 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4092 ADD_64(qstats->total_bytes_transmitted_hi,
4093 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4094 qstats->total_bytes_transmitted_lo,
4095 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4097 ADD_64(qstats->total_bytes_transmitted_hi,
4098 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4099 qstats->total_bytes_transmitted_lo,
4100 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4102 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4103 total_unicast_packets_transmitted);
4104 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4105 total_multicast_packets_transmitted);
4106 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4107 total_broadcast_packets_transmitted);
4109 old_tclient->checksum_discard = tclient->checksum_discard;
4110 old_tclient->ttl0_discard = tclient->ttl0_discard;
4112 ADD_64(fstats->total_bytes_received_hi,
4113 qstats->total_bytes_received_hi,
4114 fstats->total_bytes_received_lo,
4115 qstats->total_bytes_received_lo);
4116 ADD_64(fstats->total_bytes_transmitted_hi,
4117 qstats->total_bytes_transmitted_hi,
4118 fstats->total_bytes_transmitted_lo,
4119 qstats->total_bytes_transmitted_lo);
4120 ADD_64(fstats->total_unicast_packets_received_hi,
4121 qstats->total_unicast_packets_received_hi,
4122 fstats->total_unicast_packets_received_lo,
4123 qstats->total_unicast_packets_received_lo);
4124 ADD_64(fstats->total_multicast_packets_received_hi,
4125 qstats->total_multicast_packets_received_hi,
4126 fstats->total_multicast_packets_received_lo,
4127 qstats->total_multicast_packets_received_lo);
4128 ADD_64(fstats->total_broadcast_packets_received_hi,
4129 qstats->total_broadcast_packets_received_hi,
4130 fstats->total_broadcast_packets_received_lo,
4131 qstats->total_broadcast_packets_received_lo);
4132 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4133 qstats->total_unicast_packets_transmitted_hi,
4134 fstats->total_unicast_packets_transmitted_lo,
4135 qstats->total_unicast_packets_transmitted_lo);
4136 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4137 qstats->total_multicast_packets_transmitted_hi,
4138 fstats->total_multicast_packets_transmitted_lo,
4139 qstats->total_multicast_packets_transmitted_lo);
4140 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4141 qstats->total_broadcast_packets_transmitted_hi,
4142 fstats->total_broadcast_packets_transmitted_lo,
4143 qstats->total_broadcast_packets_transmitted_lo);
4144 ADD_64(fstats->valid_bytes_received_hi,
4145 qstats->valid_bytes_received_hi,
4146 fstats->valid_bytes_received_lo,
4147 qstats->valid_bytes_received_lo);
4149 ADD_64(estats->error_bytes_received_hi,
4150 qstats->error_bytes_received_hi,
4151 estats->error_bytes_received_lo,
4152 qstats->error_bytes_received_lo);
4153 ADD_64(estats->etherstatsoverrsizepkts_hi,
4154 qstats->etherstatsoverrsizepkts_hi,
4155 estats->etherstatsoverrsizepkts_lo,
4156 qstats->etherstatsoverrsizepkts_lo);
4157 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4158 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4161 ADD_64(fstats->total_bytes_received_hi,
4162 estats->rx_stat_ifhcinbadoctets_hi,
4163 fstats->total_bytes_received_lo,
4164 estats->rx_stat_ifhcinbadoctets_lo);
4166 memcpy(estats, &(fstats->total_bytes_received_hi),
4167 sizeof(struct host_func_stats) - 2*sizeof(u32));
4169 ADD_64(estats->etherstatsoverrsizepkts_hi,
4170 estats->rx_stat_dot3statsframestoolong_hi,
4171 estats->etherstatsoverrsizepkts_lo,
4172 estats->rx_stat_dot3statsframestoolong_lo);
4173 ADD_64(estats->error_bytes_received_hi,
4174 estats->rx_stat_ifhcinbadoctets_hi,
4175 estats->error_bytes_received_lo,
4176 estats->rx_stat_ifhcinbadoctets_lo);
4178 if (bp->port.pmf) {
4179 estats->mac_filter_discard =
4180 le32_to_cpu(tport->mac_filter_discard);
4181 estats->xxoverflow_discard =
4182 le32_to_cpu(tport->xxoverflow_discard);
4183 estats->brb_truncate_discard =
4184 le32_to_cpu(tport->brb_truncate_discard);
4185 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4188 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4190 bp->stats_pending = 0;
4192 return 0;
4195 static void bnx2x_net_stats_update(struct bnx2x *bp)
4197 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4198 struct net_device_stats *nstats = &bp->dev->stats;
4199 int i;
4201 nstats->rx_packets =
4202 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4203 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4204 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4206 nstats->tx_packets =
4207 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4208 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4209 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4211 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4213 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4215 nstats->rx_dropped = estats->mac_discard;
4216 for_each_queue(bp, i)
4217 nstats->rx_dropped +=
4218 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4220 nstats->tx_dropped = 0;
4222 nstats->multicast =
4223 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4225 nstats->collisions =
4226 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4228 nstats->rx_length_errors =
4229 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4230 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4231 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4232 bnx2x_hilo(&estats->brb_truncate_hi);
4233 nstats->rx_crc_errors =
4234 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4235 nstats->rx_frame_errors =
4236 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4237 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4238 nstats->rx_missed_errors = estats->xxoverflow_discard;
4240 nstats->rx_errors = nstats->rx_length_errors +
4241 nstats->rx_over_errors +
4242 nstats->rx_crc_errors +
4243 nstats->rx_frame_errors +
4244 nstats->rx_fifo_errors +
4245 nstats->rx_missed_errors;
4247 nstats->tx_aborted_errors =
4248 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4249 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4250 nstats->tx_carrier_errors =
4251 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4252 nstats->tx_fifo_errors = 0;
4253 nstats->tx_heartbeat_errors = 0;
4254 nstats->tx_window_errors = 0;
4256 nstats->tx_errors = nstats->tx_aborted_errors +
4257 nstats->tx_carrier_errors +
4258 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4261 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4263 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4264 int i;
4266 estats->driver_xoff = 0;
4267 estats->rx_err_discard_pkt = 0;
4268 estats->rx_skb_alloc_failed = 0;
4269 estats->hw_csum_err = 0;
4270 for_each_queue(bp, i) {
4271 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4273 estats->driver_xoff += qstats->driver_xoff;
4274 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4275 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4276 estats->hw_csum_err += qstats->hw_csum_err;
4280 static void bnx2x_stats_update(struct bnx2x *bp)
4282 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4284 if (*stats_comp != DMAE_COMP_VAL)
4285 return;
4287 if (bp->port.pmf)
4288 bnx2x_hw_stats_update(bp);
4290 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4291 BNX2X_ERR("storm stats were not updated for 3 times\n");
4292 bnx2x_panic();
4293 return;
4296 bnx2x_net_stats_update(bp);
4297 bnx2x_drv_stats_update(bp);
4299 if (bp->msglevel & NETIF_MSG_TIMER) {
4300 struct bnx2x_fastpath *fp0_rx = bp->fp;
4301 struct bnx2x_fastpath *fp0_tx = bp->fp;
4302 struct tstorm_per_client_stats *old_tclient =
4303 &bp->fp->old_tclient;
4304 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4305 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4306 struct net_device_stats *nstats = &bp->dev->stats;
4307 int i;
4309 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4310 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4311 " tx pkt (%lx)\n",
4312 bnx2x_tx_avail(fp0_tx),
4313 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4314 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4315 " rx pkt (%lx)\n",
4316 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4317 fp0_rx->rx_comp_cons),
4318 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4319 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4320 "brb truncate %u\n",
4321 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4322 qstats->driver_xoff,
4323 estats->brb_drop_lo, estats->brb_truncate_lo);
4324 printk(KERN_DEBUG "tstats: checksum_discard %u "
4325 "packets_too_big_discard %lu no_buff_discard %lu "
4326 "mac_discard %u mac_filter_discard %u "
4327 "xxovrflow_discard %u brb_truncate_discard %u "
4328 "ttl0_discard %u\n",
4329 le32_to_cpu(old_tclient->checksum_discard),
4330 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4331 bnx2x_hilo(&qstats->no_buff_discard_hi),
4332 estats->mac_discard, estats->mac_filter_discard,
4333 estats->xxoverflow_discard, estats->brb_truncate_discard,
4334 le32_to_cpu(old_tclient->ttl0_discard));
4336 for_each_queue(bp, i) {
4337 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4338 bnx2x_fp(bp, i, tx_pkt),
4339 bnx2x_fp(bp, i, rx_pkt),
4340 bnx2x_fp(bp, i, rx_calls));
4344 bnx2x_hw_stats_post(bp);
4345 bnx2x_storm_stats_post(bp);
4348 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4350 struct dmae_command *dmae;
4351 u32 opcode;
4352 int loader_idx = PMF_DMAE_C(bp);
4353 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4355 bp->executer_idx = 0;
4357 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4358 DMAE_CMD_C_ENABLE |
4359 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4360 #ifdef __BIG_ENDIAN
4361 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4362 #else
4363 DMAE_CMD_ENDIANITY_DW_SWAP |
4364 #endif
4365 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4366 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4368 if (bp->port.port_stx) {
4370 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4371 if (bp->func_stx)
4372 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4373 else
4374 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4375 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4376 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4377 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4378 dmae->dst_addr_hi = 0;
4379 dmae->len = sizeof(struct host_port_stats) >> 2;
4380 if (bp->func_stx) {
4381 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4382 dmae->comp_addr_hi = 0;
4383 dmae->comp_val = 1;
4384 } else {
4385 dmae->comp_addr_lo =
4386 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4387 dmae->comp_addr_hi =
4388 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4389 dmae->comp_val = DMAE_COMP_VAL;
4391 *stats_comp = 0;
4395 if (bp->func_stx) {
4397 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4398 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4399 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4400 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4401 dmae->dst_addr_lo = bp->func_stx >> 2;
4402 dmae->dst_addr_hi = 0;
4403 dmae->len = sizeof(struct host_func_stats) >> 2;
4404 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4405 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4406 dmae->comp_val = DMAE_COMP_VAL;
4408 *stats_comp = 0;
4412 static void bnx2x_stats_stop(struct bnx2x *bp)
4414 int update = 0;
4416 bnx2x_stats_comp(bp);
4418 if (bp->port.pmf)
4419 update = (bnx2x_hw_stats_update(bp) == 0);
4421 update |= (bnx2x_storm_stats_update(bp) == 0);
4423 if (update) {
4424 bnx2x_net_stats_update(bp);
4426 if (bp->port.pmf)
4427 bnx2x_port_stats_stop(bp);
4429 bnx2x_hw_stats_post(bp);
4430 bnx2x_stats_comp(bp);
4434 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4438 static const struct {
4439 void (*action)(struct bnx2x *bp);
4440 enum bnx2x_stats_state next_state;
4441 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4442 /* state event */
4444 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4445 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4446 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4447 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4450 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4451 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4452 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4453 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4457 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4459 enum bnx2x_stats_state state = bp->stats_state;
4461 bnx2x_stats_stm[state][event].action(bp);
4462 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4464 /* Make sure the state has been "changed" */
4465 smp_wmb();
4467 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4468 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4469 state, event, bp->stats_state);
4472 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4474 struct dmae_command *dmae;
4475 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4477 /* sanity */
4478 if (!bp->port.pmf || !bp->port.port_stx) {
4479 BNX2X_ERR("BUG!\n");
4480 return;
4483 bp->executer_idx = 0;
4485 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4486 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4487 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4488 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4489 #ifdef __BIG_ENDIAN
4490 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4491 #else
4492 DMAE_CMD_ENDIANITY_DW_SWAP |
4493 #endif
4494 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4495 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4496 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4497 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4498 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4499 dmae->dst_addr_hi = 0;
4500 dmae->len = sizeof(struct host_port_stats) >> 2;
4501 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4502 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4503 dmae->comp_val = DMAE_COMP_VAL;
4505 *stats_comp = 0;
4506 bnx2x_hw_stats_post(bp);
4507 bnx2x_stats_comp(bp);
4510 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4512 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4513 int port = BP_PORT(bp);
4514 int func;
4515 u32 func_stx;
4517 /* sanity */
4518 if (!bp->port.pmf || !bp->func_stx) {
4519 BNX2X_ERR("BUG!\n");
4520 return;
4523 /* save our func_stx */
4524 func_stx = bp->func_stx;
4526 for (vn = VN_0; vn < vn_max; vn++) {
4527 func = 2*vn + port;
4529 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4530 bnx2x_func_stats_init(bp);
4531 bnx2x_hw_stats_post(bp);
4532 bnx2x_stats_comp(bp);
4535 /* restore our func_stx */
4536 bp->func_stx = func_stx;
4539 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4541 struct dmae_command *dmae = &bp->stats_dmae;
4542 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4544 /* sanity */
4545 if (!bp->func_stx) {
4546 BNX2X_ERR("BUG!\n");
4547 return;
4550 bp->executer_idx = 0;
4551 memset(dmae, 0, sizeof(struct dmae_command));
4553 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4554 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4555 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4556 #ifdef __BIG_ENDIAN
4557 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4558 #else
4559 DMAE_CMD_ENDIANITY_DW_SWAP |
4560 #endif
4561 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4562 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4563 dmae->src_addr_lo = bp->func_stx >> 2;
4564 dmae->src_addr_hi = 0;
4565 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4566 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4567 dmae->len = sizeof(struct host_func_stats) >> 2;
4568 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4569 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4570 dmae->comp_val = DMAE_COMP_VAL;
4572 *stats_comp = 0;
4573 bnx2x_hw_stats_post(bp);
4574 bnx2x_stats_comp(bp);
4577 static void bnx2x_stats_init(struct bnx2x *bp)
4579 int port = BP_PORT(bp);
4580 int func = BP_FUNC(bp);
4581 int i;
4583 bp->stats_pending = 0;
4584 bp->executer_idx = 0;
4585 bp->stats_counter = 0;
4587 /* port and func stats for management */
4588 if (!BP_NOMCP(bp)) {
4589 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4590 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4592 } else {
4593 bp->port.port_stx = 0;
4594 bp->func_stx = 0;
4596 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4597 bp->port.port_stx, bp->func_stx);
4599 /* port stats */
4600 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4601 bp->port.old_nig_stats.brb_discard =
4602 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4603 bp->port.old_nig_stats.brb_truncate =
4604 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4605 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4606 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4607 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4608 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4610 /* function stats */
4611 for_each_queue(bp, i) {
4612 struct bnx2x_fastpath *fp = &bp->fp[i];
4614 memset(&fp->old_tclient, 0,
4615 sizeof(struct tstorm_per_client_stats));
4616 memset(&fp->old_uclient, 0,
4617 sizeof(struct ustorm_per_client_stats));
4618 memset(&fp->old_xclient, 0,
4619 sizeof(struct xstorm_per_client_stats));
4620 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4623 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4624 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4626 bp->stats_state = STATS_STATE_DISABLED;
4628 if (bp->port.pmf) {
4629 if (bp->port.port_stx)
4630 bnx2x_port_stats_base_init(bp);
4632 if (bp->func_stx)
4633 bnx2x_func_stats_base_init(bp);
4635 } else if (bp->func_stx)
4636 bnx2x_func_stats_base_update(bp);
4639 static void bnx2x_timer(unsigned long data)
4641 struct bnx2x *bp = (struct bnx2x *) data;
4643 if (!netif_running(bp->dev))
4644 return;
4646 if (atomic_read(&bp->intr_sem) != 0)
4647 goto timer_restart;
4649 if (poll) {
4650 struct bnx2x_fastpath *fp = &bp->fp[0];
4651 int rc;
4653 bnx2x_tx_int(fp);
4654 rc = bnx2x_rx_int(fp, 1000);
4657 if (!BP_NOMCP(bp)) {
4658 int func = BP_FUNC(bp);
4659 u32 drv_pulse;
4660 u32 mcp_pulse;
4662 ++bp->fw_drv_pulse_wr_seq;
4663 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4664 /* TBD - add SYSTEM_TIME */
4665 drv_pulse = bp->fw_drv_pulse_wr_seq;
4666 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4668 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4669 MCP_PULSE_SEQ_MASK);
4670 /* The delta between driver pulse and mcp response
4671 * should be 1 (before mcp response) or 0 (after mcp response)
4673 if ((drv_pulse != mcp_pulse) &&
4674 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4675 /* someone lost a heartbeat... */
4676 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4677 drv_pulse, mcp_pulse);
4681 if (bp->state == BNX2X_STATE_OPEN)
4682 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4684 timer_restart:
4685 mod_timer(&bp->timer, jiffies + bp->current_interval);
4688 /* end of Statistics */
4690 /* nic init */
4693 * nic init service functions
4696 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4698 int port = BP_PORT(bp);
4700 /* "CSTORM" */
4701 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4702 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4703 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4704 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4705 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4706 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4709 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4710 dma_addr_t mapping, int sb_id)
4712 int port = BP_PORT(bp);
4713 int func = BP_FUNC(bp);
4714 int index;
4715 u64 section;
4717 /* USTORM */
4718 section = ((u64)mapping) + offsetof(struct host_status_block,
4719 u_status_block);
4720 sb->u_status_block.status_block_id = sb_id;
4722 REG_WR(bp, BAR_CSTRORM_INTMEM +
4723 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4724 REG_WR(bp, BAR_CSTRORM_INTMEM +
4725 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4726 U64_HI(section));
4727 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4728 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4730 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4731 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4732 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4734 /* CSTORM */
4735 section = ((u64)mapping) + offsetof(struct host_status_block,
4736 c_status_block);
4737 sb->c_status_block.status_block_id = sb_id;
4739 REG_WR(bp, BAR_CSTRORM_INTMEM +
4740 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4741 REG_WR(bp, BAR_CSTRORM_INTMEM +
4742 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4743 U64_HI(section));
4744 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4745 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4747 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4748 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4749 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4751 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4754 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4756 int func = BP_FUNC(bp);
4758 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4759 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4760 sizeof(struct tstorm_def_status_block)/4);
4761 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4762 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4763 sizeof(struct cstorm_def_status_block_u)/4);
4764 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4765 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4766 sizeof(struct cstorm_def_status_block_c)/4);
4767 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4768 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4769 sizeof(struct xstorm_def_status_block)/4);
4772 static void bnx2x_init_def_sb(struct bnx2x *bp,
4773 struct host_def_status_block *def_sb,
4774 dma_addr_t mapping, int sb_id)
4776 int port = BP_PORT(bp);
4777 int func = BP_FUNC(bp);
4778 int index, val, reg_offset;
4779 u64 section;
4781 /* ATTN */
4782 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4783 atten_status_block);
4784 def_sb->atten_status_block.status_block_id = sb_id;
4786 bp->attn_state = 0;
4788 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4789 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4791 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4792 bp->attn_group[index].sig[0] = REG_RD(bp,
4793 reg_offset + 0x10*index);
4794 bp->attn_group[index].sig[1] = REG_RD(bp,
4795 reg_offset + 0x4 + 0x10*index);
4796 bp->attn_group[index].sig[2] = REG_RD(bp,
4797 reg_offset + 0x8 + 0x10*index);
4798 bp->attn_group[index].sig[3] = REG_RD(bp,
4799 reg_offset + 0xc + 0x10*index);
4802 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4803 HC_REG_ATTN_MSG0_ADDR_L);
4805 REG_WR(bp, reg_offset, U64_LO(section));
4806 REG_WR(bp, reg_offset + 4, U64_HI(section));
4808 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4810 val = REG_RD(bp, reg_offset);
4811 val |= sb_id;
4812 REG_WR(bp, reg_offset, val);
4814 /* USTORM */
4815 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4816 u_def_status_block);
4817 def_sb->u_def_status_block.status_block_id = sb_id;
4819 REG_WR(bp, BAR_CSTRORM_INTMEM +
4820 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4821 REG_WR(bp, BAR_CSTRORM_INTMEM +
4822 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4823 U64_HI(section));
4824 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4825 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4827 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4828 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4829 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4831 /* CSTORM */
4832 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4833 c_def_status_block);
4834 def_sb->c_def_status_block.status_block_id = sb_id;
4836 REG_WR(bp, BAR_CSTRORM_INTMEM +
4837 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4838 REG_WR(bp, BAR_CSTRORM_INTMEM +
4839 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4840 U64_HI(section));
4841 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4842 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4844 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4845 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4846 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4848 /* TSTORM */
4849 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4850 t_def_status_block);
4851 def_sb->t_def_status_block.status_block_id = sb_id;
4853 REG_WR(bp, BAR_TSTRORM_INTMEM +
4854 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4855 REG_WR(bp, BAR_TSTRORM_INTMEM +
4856 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4857 U64_HI(section));
4858 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4859 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4861 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4862 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4863 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4865 /* XSTORM */
4866 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4867 x_def_status_block);
4868 def_sb->x_def_status_block.status_block_id = sb_id;
4870 REG_WR(bp, BAR_XSTRORM_INTMEM +
4871 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4872 REG_WR(bp, BAR_XSTRORM_INTMEM +
4873 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4874 U64_HI(section));
4875 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4876 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4878 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4879 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4880 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4882 bp->stats_pending = 0;
4883 bp->set_mac_pending = 0;
4885 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4888 static void bnx2x_update_coalesce(struct bnx2x *bp)
4890 int port = BP_PORT(bp);
4891 int i;
4893 for_each_queue(bp, i) {
4894 int sb_id = bp->fp[i].sb_id;
4896 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4897 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4898 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4899 U_SB_ETH_RX_CQ_INDEX),
4900 bp->rx_ticks/(4 * BNX2X_BTR));
4901 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4902 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4903 U_SB_ETH_RX_CQ_INDEX),
4904 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4906 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4907 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4908 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4909 C_SB_ETH_TX_CQ_INDEX),
4910 bp->tx_ticks/(4 * BNX2X_BTR));
4911 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4912 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4913 C_SB_ETH_TX_CQ_INDEX),
4914 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4918 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4919 struct bnx2x_fastpath *fp, int last)
4921 int i;
4923 for (i = 0; i < last; i++) {
4924 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4925 struct sk_buff *skb = rx_buf->skb;
4927 if (skb == NULL) {
4928 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4929 continue;
4932 if (fp->tpa_state[i] == BNX2X_TPA_START)
4933 pci_unmap_single(bp->pdev,
4934 pci_unmap_addr(rx_buf, mapping),
4935 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4937 dev_kfree_skb(skb);
4938 rx_buf->skb = NULL;
4942 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4944 int func = BP_FUNC(bp);
4945 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4946 ETH_MAX_AGGREGATION_QUEUES_E1H;
4947 u16 ring_prod, cqe_ring_prod;
4948 int i, j;
4950 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4951 DP(NETIF_MSG_IFUP,
4952 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4954 if (bp->flags & TPA_ENABLE_FLAG) {
4956 for_each_queue(bp, j) {
4957 struct bnx2x_fastpath *fp = &bp->fp[j];
4959 for (i = 0; i < max_agg_queues; i++) {
4960 fp->tpa_pool[i].skb =
4961 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4962 if (!fp->tpa_pool[i].skb) {
4963 BNX2X_ERR("Failed to allocate TPA "
4964 "skb pool for queue[%d] - "
4965 "disabling TPA on this "
4966 "queue!\n", j);
4967 bnx2x_free_tpa_pool(bp, fp, i);
4968 fp->disable_tpa = 1;
4969 break;
4971 pci_unmap_addr_set((struct sw_rx_bd *)
4972 &bp->fp->tpa_pool[i],
4973 mapping, 0);
4974 fp->tpa_state[i] = BNX2X_TPA_STOP;
4979 for_each_queue(bp, j) {
4980 struct bnx2x_fastpath *fp = &bp->fp[j];
4982 fp->rx_bd_cons = 0;
4983 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4984 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4986 /* "next page" elements initialization */
4987 /* SGE ring */
4988 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4989 struct eth_rx_sge *sge;
4991 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4992 sge->addr_hi =
4993 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4994 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4995 sge->addr_lo =
4996 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4997 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5000 bnx2x_init_sge_ring_bit_mask(fp);
5002 /* RX BD ring */
5003 for (i = 1; i <= NUM_RX_RINGS; i++) {
5004 struct eth_rx_bd *rx_bd;
5006 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5007 rx_bd->addr_hi =
5008 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5009 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5010 rx_bd->addr_lo =
5011 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5012 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5015 /* CQ ring */
5016 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5017 struct eth_rx_cqe_next_page *nextpg;
5019 nextpg = (struct eth_rx_cqe_next_page *)
5020 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5021 nextpg->addr_hi =
5022 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5023 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5024 nextpg->addr_lo =
5025 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5026 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5029 /* Allocate SGEs and initialize the ring elements */
5030 for (i = 0, ring_prod = 0;
5031 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5033 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5034 BNX2X_ERR("was only able to allocate "
5035 "%d rx sges\n", i);
5036 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5037 /* Cleanup already allocated elements */
5038 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5039 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5040 fp->disable_tpa = 1;
5041 ring_prod = 0;
5042 break;
5044 ring_prod = NEXT_SGE_IDX(ring_prod);
5046 fp->rx_sge_prod = ring_prod;
5048 /* Allocate BDs and initialize BD ring */
5049 fp->rx_comp_cons = 0;
5050 cqe_ring_prod = ring_prod = 0;
5051 for (i = 0; i < bp->rx_ring_size; i++) {
5052 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5053 BNX2X_ERR("was only able to allocate "
5054 "%d rx skbs on queue[%d]\n", i, j);
5055 fp->eth_q_stats.rx_skb_alloc_failed++;
5056 break;
5058 ring_prod = NEXT_RX_IDX(ring_prod);
5059 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5060 WARN_ON(ring_prod <= i);
5063 fp->rx_bd_prod = ring_prod;
5064 /* must not have more available CQEs than BDs */
5065 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5066 cqe_ring_prod);
5067 fp->rx_pkt = fp->rx_calls = 0;
5069 /* Warning!
5070 * this will generate an interrupt (to the TSTORM)
5071 * must only be done after chip is initialized
5073 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5074 fp->rx_sge_prod);
5075 if (j != 0)
5076 continue;
5078 REG_WR(bp, BAR_USTRORM_INTMEM +
5079 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5080 U64_LO(fp->rx_comp_mapping));
5081 REG_WR(bp, BAR_USTRORM_INTMEM +
5082 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5083 U64_HI(fp->rx_comp_mapping));
5087 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5089 int i, j;
5091 for_each_queue(bp, j) {
5092 struct bnx2x_fastpath *fp = &bp->fp[j];
5094 for (i = 1; i <= NUM_TX_RINGS; i++) {
5095 struct eth_tx_next_bd *tx_next_bd =
5096 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5098 tx_next_bd->addr_hi =
5099 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5100 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5101 tx_next_bd->addr_lo =
5102 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5103 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5106 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5107 fp->tx_db.data.zero_fill1 = 0;
5108 fp->tx_db.data.prod = 0;
5110 fp->tx_pkt_prod = 0;
5111 fp->tx_pkt_cons = 0;
5112 fp->tx_bd_prod = 0;
5113 fp->tx_bd_cons = 0;
5114 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5115 fp->tx_pkt = 0;
5119 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5121 int func = BP_FUNC(bp);
5123 spin_lock_init(&bp->spq_lock);
5125 bp->spq_left = MAX_SPQ_PENDING;
5126 bp->spq_prod_idx = 0;
5127 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5128 bp->spq_prod_bd = bp->spq;
5129 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5131 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5132 U64_LO(bp->spq_mapping));
5133 REG_WR(bp,
5134 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5135 U64_HI(bp->spq_mapping));
5137 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5138 bp->spq_prod_idx);
5141 static void bnx2x_init_context(struct bnx2x *bp)
5143 int i;
5145 /* Rx */
5146 for_each_queue(bp, i) {
5147 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5148 struct bnx2x_fastpath *fp = &bp->fp[i];
5149 u8 cl_id = fp->cl_id;
5151 context->ustorm_st_context.common.sb_index_numbers =
5152 BNX2X_RX_SB_INDEX_NUM;
5153 context->ustorm_st_context.common.clientId = cl_id;
5154 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5155 context->ustorm_st_context.common.flags =
5156 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5157 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5158 context->ustorm_st_context.common.statistics_counter_id =
5159 cl_id;
5160 context->ustorm_st_context.common.mc_alignment_log_size =
5161 BNX2X_RX_ALIGN_SHIFT;
5162 context->ustorm_st_context.common.bd_buff_size =
5163 bp->rx_buf_size;
5164 context->ustorm_st_context.common.bd_page_base_hi =
5165 U64_HI(fp->rx_desc_mapping);
5166 context->ustorm_st_context.common.bd_page_base_lo =
5167 U64_LO(fp->rx_desc_mapping);
5168 if (!fp->disable_tpa) {
5169 context->ustorm_st_context.common.flags |=
5170 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5171 context->ustorm_st_context.common.sge_buff_size =
5172 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5173 (u32)0xffff);
5174 context->ustorm_st_context.common.sge_page_base_hi =
5175 U64_HI(fp->rx_sge_mapping);
5176 context->ustorm_st_context.common.sge_page_base_lo =
5177 U64_LO(fp->rx_sge_mapping);
5179 context->ustorm_st_context.common.max_sges_for_packet =
5180 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5181 context->ustorm_st_context.common.max_sges_for_packet =
5182 ((context->ustorm_st_context.common.
5183 max_sges_for_packet + PAGES_PER_SGE - 1) &
5184 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5187 context->ustorm_ag_context.cdu_usage =
5188 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5189 CDU_REGION_NUMBER_UCM_AG,
5190 ETH_CONNECTION_TYPE);
5192 context->xstorm_ag_context.cdu_reserved =
5193 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5194 CDU_REGION_NUMBER_XCM_AG,
5195 ETH_CONNECTION_TYPE);
5198 /* Tx */
5199 for_each_queue(bp, i) {
5200 struct bnx2x_fastpath *fp = &bp->fp[i];
5201 struct eth_context *context =
5202 bnx2x_sp(bp, context[i].eth);
5204 context->cstorm_st_context.sb_index_number =
5205 C_SB_ETH_TX_CQ_INDEX;
5206 context->cstorm_st_context.status_block_id = fp->sb_id;
5208 context->xstorm_st_context.tx_bd_page_base_hi =
5209 U64_HI(fp->tx_desc_mapping);
5210 context->xstorm_st_context.tx_bd_page_base_lo =
5211 U64_LO(fp->tx_desc_mapping);
5212 context->xstorm_st_context.statistics_data = (fp->cl_id |
5213 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5217 static void bnx2x_init_ind_table(struct bnx2x *bp)
5219 int func = BP_FUNC(bp);
5220 int i;
5222 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5223 return;
5225 DP(NETIF_MSG_IFUP,
5226 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5227 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5228 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5229 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5230 bp->fp->cl_id + (i % bp->num_queues));
5233 static void bnx2x_set_client_config(struct bnx2x *bp)
5235 struct tstorm_eth_client_config tstorm_client = {0};
5236 int port = BP_PORT(bp);
5237 int i;
5239 tstorm_client.mtu = bp->dev->mtu;
5240 tstorm_client.config_flags =
5241 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5242 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5243 #ifdef BCM_VLAN
5244 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5245 tstorm_client.config_flags |=
5246 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5247 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5249 #endif
5251 for_each_queue(bp, i) {
5252 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5254 REG_WR(bp, BAR_TSTRORM_INTMEM +
5255 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5256 ((u32 *)&tstorm_client)[0]);
5257 REG_WR(bp, BAR_TSTRORM_INTMEM +
5258 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5259 ((u32 *)&tstorm_client)[1]);
5262 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5263 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5266 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5268 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5269 int mode = bp->rx_mode;
5270 int mask = bp->rx_mode_cl_mask;
5271 int func = BP_FUNC(bp);
5272 int port = BP_PORT(bp);
5273 int i;
5274 /* All but management unicast packets should pass to the host as well */
5275 u32 llh_mask =
5276 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5277 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5278 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5279 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5281 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5283 switch (mode) {
5284 case BNX2X_RX_MODE_NONE: /* no Rx */
5285 tstorm_mac_filter.ucast_drop_all = mask;
5286 tstorm_mac_filter.mcast_drop_all = mask;
5287 tstorm_mac_filter.bcast_drop_all = mask;
5288 break;
5290 case BNX2X_RX_MODE_NORMAL:
5291 tstorm_mac_filter.bcast_accept_all = mask;
5292 break;
5294 case BNX2X_RX_MODE_ALLMULTI:
5295 tstorm_mac_filter.mcast_accept_all = mask;
5296 tstorm_mac_filter.bcast_accept_all = mask;
5297 break;
5299 case BNX2X_RX_MODE_PROMISC:
5300 tstorm_mac_filter.ucast_accept_all = mask;
5301 tstorm_mac_filter.mcast_accept_all = mask;
5302 tstorm_mac_filter.bcast_accept_all = mask;
5303 /* pass management unicast packets as well */
5304 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5305 break;
5307 default:
5308 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5309 break;
5312 REG_WR(bp,
5313 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5314 llh_mask);
5316 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5317 REG_WR(bp, BAR_TSTRORM_INTMEM +
5318 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5319 ((u32 *)&tstorm_mac_filter)[i]);
5321 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5322 ((u32 *)&tstorm_mac_filter)[i]); */
5325 if (mode != BNX2X_RX_MODE_NONE)
5326 bnx2x_set_client_config(bp);
5329 static void bnx2x_init_internal_common(struct bnx2x *bp)
5331 int i;
5333 /* Zero this manually as its initialization is
5334 currently missing in the initTool */
5335 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5336 REG_WR(bp, BAR_USTRORM_INTMEM +
5337 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5340 static void bnx2x_init_internal_port(struct bnx2x *bp)
5342 int port = BP_PORT(bp);
5344 REG_WR(bp,
5345 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5346 REG_WR(bp,
5347 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5348 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5349 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5352 static void bnx2x_init_internal_func(struct bnx2x *bp)
5354 struct tstorm_eth_function_common_config tstorm_config = {0};
5355 struct stats_indication_flags stats_flags = {0};
5356 int port = BP_PORT(bp);
5357 int func = BP_FUNC(bp);
5358 int i, j;
5359 u32 offset;
5360 u16 max_agg_size;
5362 if (is_multi(bp)) {
5363 tstorm_config.config_flags = MULTI_FLAGS(bp);
5364 tstorm_config.rss_result_mask = MULTI_MASK;
5367 /* Enable TPA if needed */
5368 if (bp->flags & TPA_ENABLE_FLAG)
5369 tstorm_config.config_flags |=
5370 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5372 if (IS_E1HMF(bp))
5373 tstorm_config.config_flags |=
5374 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5376 tstorm_config.leading_client_id = BP_L_ID(bp);
5378 REG_WR(bp, BAR_TSTRORM_INTMEM +
5379 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5380 (*(u32 *)&tstorm_config));
5382 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5383 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5384 bnx2x_set_storm_rx_mode(bp);
5386 for_each_queue(bp, i) {
5387 u8 cl_id = bp->fp[i].cl_id;
5389 /* reset xstorm per client statistics */
5390 offset = BAR_XSTRORM_INTMEM +
5391 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5392 for (j = 0;
5393 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5394 REG_WR(bp, offset + j*4, 0);
5396 /* reset tstorm per client statistics */
5397 offset = BAR_TSTRORM_INTMEM +
5398 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5399 for (j = 0;
5400 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5401 REG_WR(bp, offset + j*4, 0);
5403 /* reset ustorm per client statistics */
5404 offset = BAR_USTRORM_INTMEM +
5405 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5406 for (j = 0;
5407 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5408 REG_WR(bp, offset + j*4, 0);
5411 /* Init statistics related context */
5412 stats_flags.collect_eth = 1;
5414 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5415 ((u32 *)&stats_flags)[0]);
5416 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5417 ((u32 *)&stats_flags)[1]);
5419 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5420 ((u32 *)&stats_flags)[0]);
5421 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5422 ((u32 *)&stats_flags)[1]);
5424 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5425 ((u32 *)&stats_flags)[0]);
5426 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5427 ((u32 *)&stats_flags)[1]);
5429 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5430 ((u32 *)&stats_flags)[0]);
5431 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5432 ((u32 *)&stats_flags)[1]);
5434 REG_WR(bp, BAR_XSTRORM_INTMEM +
5435 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5436 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5437 REG_WR(bp, BAR_XSTRORM_INTMEM +
5438 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5439 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5441 REG_WR(bp, BAR_TSTRORM_INTMEM +
5442 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5443 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5444 REG_WR(bp, BAR_TSTRORM_INTMEM +
5445 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5446 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5448 REG_WR(bp, BAR_USTRORM_INTMEM +
5449 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5450 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5451 REG_WR(bp, BAR_USTRORM_INTMEM +
5452 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5453 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5455 if (CHIP_IS_E1H(bp)) {
5456 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5457 IS_E1HMF(bp));
5458 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5459 IS_E1HMF(bp));
5460 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5461 IS_E1HMF(bp));
5462 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5463 IS_E1HMF(bp));
5465 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5466 bp->e1hov);
5469 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5470 max_agg_size =
5471 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5472 SGE_PAGE_SIZE * PAGES_PER_SGE),
5473 (u32)0xffff);
5474 for_each_queue(bp, i) {
5475 struct bnx2x_fastpath *fp = &bp->fp[i];
5477 REG_WR(bp, BAR_USTRORM_INTMEM +
5478 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5479 U64_LO(fp->rx_comp_mapping));
5480 REG_WR(bp, BAR_USTRORM_INTMEM +
5481 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5482 U64_HI(fp->rx_comp_mapping));
5484 /* Next page */
5485 REG_WR(bp, BAR_USTRORM_INTMEM +
5486 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5487 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5488 REG_WR(bp, BAR_USTRORM_INTMEM +
5489 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5490 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5492 REG_WR16(bp, BAR_USTRORM_INTMEM +
5493 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5494 max_agg_size);
5497 /* dropless flow control */
5498 if (CHIP_IS_E1H(bp)) {
5499 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5501 rx_pause.bd_thr_low = 250;
5502 rx_pause.cqe_thr_low = 250;
5503 rx_pause.cos = 1;
5504 rx_pause.sge_thr_low = 0;
5505 rx_pause.bd_thr_high = 350;
5506 rx_pause.cqe_thr_high = 350;
5507 rx_pause.sge_thr_high = 0;
5509 for_each_queue(bp, i) {
5510 struct bnx2x_fastpath *fp = &bp->fp[i];
5512 if (!fp->disable_tpa) {
5513 rx_pause.sge_thr_low = 150;
5514 rx_pause.sge_thr_high = 250;
5518 offset = BAR_USTRORM_INTMEM +
5519 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5520 fp->cl_id);
5521 for (j = 0;
5522 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5523 j++)
5524 REG_WR(bp, offset + j*4,
5525 ((u32 *)&rx_pause)[j]);
5529 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5531 /* Init rate shaping and fairness contexts */
5532 if (IS_E1HMF(bp)) {
5533 int vn;
5535 /* During init there is no active link
5536 Until link is up, set link rate to 10Gbps */
5537 bp->link_vars.line_speed = SPEED_10000;
5538 bnx2x_init_port_minmax(bp);
5540 if (!BP_NOMCP(bp))
5541 bp->mf_config =
5542 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5543 bnx2x_calc_vn_weight_sum(bp);
5545 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5546 bnx2x_init_vn_minmax(bp, 2*vn + port);
5548 /* Enable rate shaping and fairness */
5549 bp->cmng.flags.cmng_enables |=
5550 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5552 } else {
5553 /* rate shaping and fairness are disabled */
5554 DP(NETIF_MSG_IFUP,
5555 "single function mode minmax will be disabled\n");
5559 /* Store it to internal memory */
5560 if (bp->port.pmf)
5561 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5562 REG_WR(bp, BAR_XSTRORM_INTMEM +
5563 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5564 ((u32 *)(&bp->cmng))[i]);
5567 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5569 switch (load_code) {
5570 case FW_MSG_CODE_DRV_LOAD_COMMON:
5571 bnx2x_init_internal_common(bp);
5572 /* no break */
5574 case FW_MSG_CODE_DRV_LOAD_PORT:
5575 bnx2x_init_internal_port(bp);
5576 /* no break */
5578 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5579 bnx2x_init_internal_func(bp);
5580 break;
5582 default:
5583 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5584 break;
5588 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5590 int i;
5592 for_each_queue(bp, i) {
5593 struct bnx2x_fastpath *fp = &bp->fp[i];
5595 fp->bp = bp;
5596 fp->state = BNX2X_FP_STATE_CLOSED;
5597 fp->index = i;
5598 fp->cl_id = BP_L_ID(bp) + i;
5599 #ifdef BCM_CNIC
5600 fp->sb_id = fp->cl_id + 1;
5601 #else
5602 fp->sb_id = fp->cl_id;
5603 #endif
5604 DP(NETIF_MSG_IFUP,
5605 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5606 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5607 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5608 fp->sb_id);
5609 bnx2x_update_fpsb_idx(fp);
5612 /* ensure status block indices were read */
5613 rmb();
5616 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5617 DEF_SB_ID);
5618 bnx2x_update_dsb_idx(bp);
5619 bnx2x_update_coalesce(bp);
5620 bnx2x_init_rx_rings(bp);
5621 bnx2x_init_tx_ring(bp);
5622 bnx2x_init_sp_ring(bp);
5623 bnx2x_init_context(bp);
5624 bnx2x_init_internal(bp, load_code);
5625 bnx2x_init_ind_table(bp);
5626 bnx2x_stats_init(bp);
5628 /* At this point, we are ready for interrupts */
5629 atomic_set(&bp->intr_sem, 0);
5631 /* flush all before enabling interrupts */
5632 mb();
5633 mmiowb();
5635 bnx2x_int_enable(bp);
5637 /* Check for SPIO5 */
5638 bnx2x_attn_int_deasserted0(bp,
5639 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5640 AEU_INPUTS_ATTN_BITS_SPIO5);
5643 /* end of nic init */
5646 * gzip service functions
5649 static int bnx2x_gunzip_init(struct bnx2x *bp)
5651 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5652 &bp->gunzip_mapping);
5653 if (bp->gunzip_buf == NULL)
5654 goto gunzip_nomem1;
5656 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5657 if (bp->strm == NULL)
5658 goto gunzip_nomem2;
5660 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5661 GFP_KERNEL);
5662 if (bp->strm->workspace == NULL)
5663 goto gunzip_nomem3;
5665 return 0;
5667 gunzip_nomem3:
5668 kfree(bp->strm);
5669 bp->strm = NULL;
5671 gunzip_nomem2:
5672 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5673 bp->gunzip_mapping);
5674 bp->gunzip_buf = NULL;
5676 gunzip_nomem1:
5677 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5678 " un-compression\n", bp->dev->name);
5679 return -ENOMEM;
5682 static void bnx2x_gunzip_end(struct bnx2x *bp)
5684 kfree(bp->strm->workspace);
5686 kfree(bp->strm);
5687 bp->strm = NULL;
5689 if (bp->gunzip_buf) {
5690 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5691 bp->gunzip_mapping);
5692 bp->gunzip_buf = NULL;
5696 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5698 int n, rc;
5700 /* check gzip header */
5701 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5702 BNX2X_ERR("Bad gzip header\n");
5703 return -EINVAL;
5706 n = 10;
5708 #define FNAME 0x8
5710 if (zbuf[3] & FNAME)
5711 while ((zbuf[n++] != 0) && (n < len));
5713 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5714 bp->strm->avail_in = len - n;
5715 bp->strm->next_out = bp->gunzip_buf;
5716 bp->strm->avail_out = FW_BUF_SIZE;
5718 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5719 if (rc != Z_OK)
5720 return rc;
5722 rc = zlib_inflate(bp->strm, Z_FINISH);
5723 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5724 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5725 bp->dev->name, bp->strm->msg);
5727 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5728 if (bp->gunzip_outlen & 0x3)
5729 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5730 " gunzip_outlen (%d) not aligned\n",
5731 bp->dev->name, bp->gunzip_outlen);
5732 bp->gunzip_outlen >>= 2;
5734 zlib_inflateEnd(bp->strm);
5736 if (rc == Z_STREAM_END)
5737 return 0;
5739 return rc;
5742 /* nic load/unload */
5745 * General service functions
5748 /* send a NIG loopback debug packet */
5749 static void bnx2x_lb_pckt(struct bnx2x *bp)
5751 u32 wb_write[3];
5753 /* Ethernet source and destination addresses */
5754 wb_write[0] = 0x55555555;
5755 wb_write[1] = 0x55555555;
5756 wb_write[2] = 0x20; /* SOP */
5757 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5759 /* NON-IP protocol */
5760 wb_write[0] = 0x09000000;
5761 wb_write[1] = 0x55555555;
5762 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5763 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5766 /* some of the internal memories
5767 * are not directly readable from the driver
5768 * to test them we send debug packets
5770 static int bnx2x_int_mem_test(struct bnx2x *bp)
5772 int factor;
5773 int count, i;
5774 u32 val = 0;
5776 if (CHIP_REV_IS_FPGA(bp))
5777 factor = 120;
5778 else if (CHIP_REV_IS_EMUL(bp))
5779 factor = 200;
5780 else
5781 factor = 1;
5783 DP(NETIF_MSG_HW, "start part1\n");
5785 /* Disable inputs of parser neighbor blocks */
5786 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5787 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5788 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5789 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5791 /* Write 0 to parser credits for CFC search request */
5792 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5794 /* send Ethernet packet */
5795 bnx2x_lb_pckt(bp);
5797 /* TODO do i reset NIG statistic? */
5798 /* Wait until NIG register shows 1 packet of size 0x10 */
5799 count = 1000 * factor;
5800 while (count) {
5802 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5803 val = *bnx2x_sp(bp, wb_data[0]);
5804 if (val == 0x10)
5805 break;
5807 msleep(10);
5808 count--;
5810 if (val != 0x10) {
5811 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5812 return -1;
5815 /* Wait until PRS register shows 1 packet */
5816 count = 1000 * factor;
5817 while (count) {
5818 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5819 if (val == 1)
5820 break;
5822 msleep(10);
5823 count--;
5825 if (val != 0x1) {
5826 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5827 return -2;
5830 /* Reset and init BRB, PRS */
5831 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5832 msleep(50);
5833 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5834 msleep(50);
5835 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5836 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5838 DP(NETIF_MSG_HW, "part2\n");
5840 /* Disable inputs of parser neighbor blocks */
5841 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5842 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5843 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5844 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5846 /* Write 0 to parser credits for CFC search request */
5847 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5849 /* send 10 Ethernet packets */
5850 for (i = 0; i < 10; i++)
5851 bnx2x_lb_pckt(bp);
5853 /* Wait until NIG register shows 10 + 1
5854 packets of size 11*0x10 = 0xb0 */
5855 count = 1000 * factor;
5856 while (count) {
5858 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5859 val = *bnx2x_sp(bp, wb_data[0]);
5860 if (val == 0xb0)
5861 break;
5863 msleep(10);
5864 count--;
5866 if (val != 0xb0) {
5867 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5868 return -3;
5871 /* Wait until PRS register shows 2 packets */
5872 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5873 if (val != 2)
5874 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5876 /* Write 1 to parser credits for CFC search request */
5877 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5879 /* Wait until PRS register shows 3 packets */
5880 msleep(10 * factor);
5881 /* Wait until NIG register shows 1 packet of size 0x10 */
5882 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5883 if (val != 3)
5884 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5886 /* clear NIG EOP FIFO */
5887 for (i = 0; i < 11; i++)
5888 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5889 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5890 if (val != 1) {
5891 BNX2X_ERR("clear of NIG failed\n");
5892 return -4;
5895 /* Reset and init BRB, PRS, NIG */
5896 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5897 msleep(50);
5898 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5899 msleep(50);
5900 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5901 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5902 #ifndef BCM_CNIC
5903 /* set NIC mode */
5904 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5905 #endif
5907 /* Enable inputs of parser neighbor blocks */
5908 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5909 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5910 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5911 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5913 DP(NETIF_MSG_HW, "done\n");
5915 return 0; /* OK */
5918 static void enable_blocks_attention(struct bnx2x *bp)
5920 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5921 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5922 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5923 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5924 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5925 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5926 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5927 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5928 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5929 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5930 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5931 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5932 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5933 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5934 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5935 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5936 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5937 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5938 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5939 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5940 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5941 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5942 if (CHIP_REV_IS_FPGA(bp))
5943 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5944 else
5945 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5946 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5947 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5948 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5949 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5950 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5951 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5952 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5953 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5954 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5958 static void bnx2x_reset_common(struct bnx2x *bp)
5960 /* reset_common */
5961 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5962 0xd3ffff7f);
5963 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5966 static void bnx2x_init_pxp(struct bnx2x *bp)
5968 u16 devctl;
5969 int r_order, w_order;
5971 pci_read_config_word(bp->pdev,
5972 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5973 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5974 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5975 if (bp->mrrs == -1)
5976 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5977 else {
5978 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5979 r_order = bp->mrrs;
5982 bnx2x_init_pxp_arb(bp, r_order, w_order);
5985 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5987 u32 val;
5988 u8 port;
5989 u8 is_required = 0;
5991 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5992 SHARED_HW_CFG_FAN_FAILURE_MASK;
5994 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5995 is_required = 1;
5998 * The fan failure mechanism is usually related to the PHY type since
5999 * the power consumption of the board is affected by the PHY. Currently,
6000 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6002 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6003 for (port = PORT_0; port < PORT_MAX; port++) {
6004 u32 phy_type =
6005 SHMEM_RD(bp, dev_info.port_hw_config[port].
6006 external_phy_config) &
6007 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6008 is_required |=
6009 ((phy_type ==
6010 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6011 (phy_type ==
6012 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6013 (phy_type ==
6014 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6017 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6019 if (is_required == 0)
6020 return;
6022 /* Fan failure is indicated by SPIO 5 */
6023 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6024 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6026 /* set to active low mode */
6027 val = REG_RD(bp, MISC_REG_SPIO_INT);
6028 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6029 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6030 REG_WR(bp, MISC_REG_SPIO_INT, val);
6032 /* enable interrupt to signal the IGU */
6033 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6034 val |= (1 << MISC_REGISTERS_SPIO_5);
6035 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6038 static int bnx2x_init_common(struct bnx2x *bp)
6040 u32 val, i;
6041 #ifdef BCM_CNIC
6042 u32 wb_write[2];
6043 #endif
6045 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6047 bnx2x_reset_common(bp);
6048 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6049 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6051 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6052 if (CHIP_IS_E1H(bp))
6053 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6055 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6056 msleep(30);
6057 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6059 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6060 if (CHIP_IS_E1(bp)) {
6061 /* enable HW interrupt from PXP on USDM overflow
6062 bit 16 on INT_MASK_0 */
6063 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6066 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6067 bnx2x_init_pxp(bp);
6069 #ifdef __BIG_ENDIAN
6070 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6071 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6072 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6073 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6074 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6075 /* make sure this value is 0 */
6076 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6078 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6079 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6080 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6081 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6082 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6083 #endif
6085 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6086 #ifdef BCM_CNIC
6087 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6088 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6089 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6090 #endif
6092 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6093 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6095 /* let the HW do it's magic ... */
6096 msleep(100);
6097 /* finish PXP init */
6098 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6099 if (val != 1) {
6100 BNX2X_ERR("PXP2 CFG failed\n");
6101 return -EBUSY;
6103 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6104 if (val != 1) {
6105 BNX2X_ERR("PXP2 RD_INIT failed\n");
6106 return -EBUSY;
6109 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6110 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6112 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6114 /* clean the DMAE memory */
6115 bp->dmae_ready = 1;
6116 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6118 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6119 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6120 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6121 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6123 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6124 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6125 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6126 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6128 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6130 #ifdef BCM_CNIC
6131 wb_write[0] = 0;
6132 wb_write[1] = 0;
6133 for (i = 0; i < 64; i++) {
6134 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6135 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6137 if (CHIP_IS_E1H(bp)) {
6138 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6139 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6140 wb_write, 2);
6143 #endif
6144 /* soft reset pulse */
6145 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6146 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6148 #ifdef BCM_CNIC
6149 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6150 #endif
6152 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6153 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6154 if (!CHIP_REV_IS_SLOW(bp)) {
6155 /* enable hw interrupt from doorbell Q */
6156 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6159 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6160 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6161 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6162 #ifndef BCM_CNIC
6163 /* set NIC mode */
6164 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6165 #endif
6166 if (CHIP_IS_E1H(bp))
6167 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6169 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6170 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6171 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6172 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6174 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6175 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6176 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6177 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6179 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6180 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6181 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6182 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6184 /* sync semi rtc */
6185 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6186 0x80000000);
6187 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6188 0x80000000);
6190 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6191 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6192 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6194 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6195 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6196 REG_WR(bp, i, 0xc0cac01a);
6197 /* TODO: replace with something meaningful */
6199 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6200 #ifdef BCM_CNIC
6201 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6202 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6203 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6204 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6205 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6206 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6207 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6208 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6209 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6210 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6211 #endif
6212 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6214 if (sizeof(union cdu_context) != 1024)
6215 /* we currently assume that a context is 1024 bytes */
6216 printk(KERN_ALERT PFX "please adjust the size of"
6217 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6219 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6220 val = (4 << 24) + (0 << 12) + 1024;
6221 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6223 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6224 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6225 /* enable context validation interrupt from CFC */
6226 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6228 /* set the thresholds to prevent CFC/CDU race */
6229 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6231 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6232 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6234 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6235 /* Reset PCIE errors for debug */
6236 REG_WR(bp, 0x2814, 0xffffffff);
6237 REG_WR(bp, 0x3820, 0xffffffff);
6239 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6240 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6241 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6242 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6244 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6245 if (CHIP_IS_E1H(bp)) {
6246 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6247 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6250 if (CHIP_REV_IS_SLOW(bp))
6251 msleep(200);
6253 /* finish CFC init */
6254 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6255 if (val != 1) {
6256 BNX2X_ERR("CFC LL_INIT failed\n");
6257 return -EBUSY;
6259 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6260 if (val != 1) {
6261 BNX2X_ERR("CFC AC_INIT failed\n");
6262 return -EBUSY;
6264 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6265 if (val != 1) {
6266 BNX2X_ERR("CFC CAM_INIT failed\n");
6267 return -EBUSY;
6269 REG_WR(bp, CFC_REG_DEBUG0, 0);
6271 /* read NIG statistic
6272 to see if this is our first up since powerup */
6273 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6274 val = *bnx2x_sp(bp, wb_data[0]);
6276 /* do internal memory self test */
6277 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6278 BNX2X_ERR("internal mem self test failed\n");
6279 return -EBUSY;
6282 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6283 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6284 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6285 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6286 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6287 bp->port.need_hw_lock = 1;
6288 break;
6290 default:
6291 break;
6294 bnx2x_setup_fan_failure_detection(bp);
6296 /* clear PXP2 attentions */
6297 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6299 enable_blocks_attention(bp);
6301 if (!BP_NOMCP(bp)) {
6302 bnx2x_acquire_phy_lock(bp);
6303 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6304 bnx2x_release_phy_lock(bp);
6305 } else
6306 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6308 return 0;
6311 static int bnx2x_init_port(struct bnx2x *bp)
6313 int port = BP_PORT(bp);
6314 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6315 u32 low, high;
6316 u32 val;
6318 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6320 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6322 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6323 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6325 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6326 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6327 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6328 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6330 #ifdef BCM_CNIC
6331 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6333 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6334 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6335 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6336 #endif
6337 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6339 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6340 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6341 /* no pause for emulation and FPGA */
6342 low = 0;
6343 high = 513;
6344 } else {
6345 if (IS_E1HMF(bp))
6346 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6347 else if (bp->dev->mtu > 4096) {
6348 if (bp->flags & ONE_PORT_FLAG)
6349 low = 160;
6350 else {
6351 val = bp->dev->mtu;
6352 /* (24*1024 + val*4)/256 */
6353 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6355 } else
6356 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6357 high = low + 56; /* 14*1024/256 */
6359 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6360 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6363 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6365 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6366 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6367 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6368 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6370 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6371 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6372 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6373 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6375 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6376 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6378 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6380 /* configure PBF to work without PAUSE mtu 9000 */
6381 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6383 /* update threshold */
6384 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6385 /* update init credit */
6386 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6388 /* probe changes */
6389 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6390 msleep(5);
6391 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6393 #ifdef BCM_CNIC
6394 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6395 #endif
6396 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6397 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6399 if (CHIP_IS_E1(bp)) {
6400 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6401 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6403 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6405 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6406 /* init aeu_mask_attn_func_0/1:
6407 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6408 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6409 * bits 4-7 are used for "per vn group attention" */
6410 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6411 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6413 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6414 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6415 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6416 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6417 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6419 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6421 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6423 if (CHIP_IS_E1H(bp)) {
6424 /* 0x2 disable e1hov, 0x1 enable */
6425 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6426 (IS_E1HMF(bp) ? 0x1 : 0x2));
6429 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6430 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6431 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6435 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6436 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6438 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6439 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6441 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6443 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6444 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6446 /* The GPIO should be swapped if the swap register is
6447 set and active */
6448 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6449 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6451 /* Select function upon port-swap configuration */
6452 if (port == 0) {
6453 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6454 aeu_gpio_mask = (swap_val && swap_override) ?
6455 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6456 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6457 } else {
6458 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6459 aeu_gpio_mask = (swap_val && swap_override) ?
6460 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6461 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6463 val = REG_RD(bp, offset);
6464 /* add GPIO3 to group */
6465 val |= aeu_gpio_mask;
6466 REG_WR(bp, offset, val);
6468 break;
6470 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6471 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6472 /* add SPIO 5 to group 0 */
6474 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6475 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6476 val = REG_RD(bp, reg_addr);
6477 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6478 REG_WR(bp, reg_addr, val);
6480 break;
6482 default:
6483 break;
6486 bnx2x__link_reset(bp);
6488 return 0;
6491 #define ILT_PER_FUNC (768/2)
6492 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6493 /* the phys address is shifted right 12 bits and has an added
6494 1=valid bit added to the 53rd bit
6495 then since this is a wide register(TM)
6496 we split it into two 32 bit writes
6498 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6499 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6500 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6501 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6503 #ifdef BCM_CNIC
6504 #define CNIC_ILT_LINES 127
6505 #define CNIC_CTX_PER_ILT 16
6506 #else
6507 #define CNIC_ILT_LINES 0
6508 #endif
6510 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6512 int reg;
6514 if (CHIP_IS_E1H(bp))
6515 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6516 else /* E1 */
6517 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6519 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6522 static int bnx2x_init_func(struct bnx2x *bp)
6524 int port = BP_PORT(bp);
6525 int func = BP_FUNC(bp);
6526 u32 addr, val;
6527 int i;
6529 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6531 /* set MSI reconfigure capability */
6532 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6533 val = REG_RD(bp, addr);
6534 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6535 REG_WR(bp, addr, val);
6537 i = FUNC_ILT_BASE(func);
6539 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6540 if (CHIP_IS_E1H(bp)) {
6541 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6542 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6543 } else /* E1 */
6544 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6545 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6547 #ifdef BCM_CNIC
6548 i += 1 + CNIC_ILT_LINES;
6549 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6550 if (CHIP_IS_E1(bp))
6551 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6552 else {
6553 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6554 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6557 i++;
6558 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6559 if (CHIP_IS_E1(bp))
6560 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6561 else {
6562 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6563 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6566 i++;
6567 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6568 if (CHIP_IS_E1(bp))
6569 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6570 else {
6571 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6572 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6575 /* tell the searcher where the T2 table is */
6576 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6578 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6579 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6581 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6582 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6583 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6585 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6586 #endif
6588 if (CHIP_IS_E1H(bp)) {
6589 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6590 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6591 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6592 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6593 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6594 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6595 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6596 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6597 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
6599 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6600 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6603 /* HC init per function */
6604 if (CHIP_IS_E1H(bp)) {
6605 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6607 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6608 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6610 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6612 /* Reset PCIE errors for debug */
6613 REG_WR(bp, 0x2114, 0xffffffff);
6614 REG_WR(bp, 0x2120, 0xffffffff);
6616 return 0;
6619 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6621 int i, rc = 0;
6623 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6624 BP_FUNC(bp), load_code);
6626 bp->dmae_ready = 0;
6627 mutex_init(&bp->dmae_mutex);
6628 rc = bnx2x_gunzip_init(bp);
6629 if (rc)
6630 return rc;
6632 switch (load_code) {
6633 case FW_MSG_CODE_DRV_LOAD_COMMON:
6634 rc = bnx2x_init_common(bp);
6635 if (rc)
6636 goto init_hw_err;
6637 /* no break */
6639 case FW_MSG_CODE_DRV_LOAD_PORT:
6640 bp->dmae_ready = 1;
6641 rc = bnx2x_init_port(bp);
6642 if (rc)
6643 goto init_hw_err;
6644 /* no break */
6646 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6647 bp->dmae_ready = 1;
6648 rc = bnx2x_init_func(bp);
6649 if (rc)
6650 goto init_hw_err;
6651 break;
6653 default:
6654 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6655 break;
6658 if (!BP_NOMCP(bp)) {
6659 int func = BP_FUNC(bp);
6661 bp->fw_drv_pulse_wr_seq =
6662 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6663 DRV_PULSE_SEQ_MASK);
6664 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6667 /* this needs to be done before gunzip end */
6668 bnx2x_zero_def_sb(bp);
6669 for_each_queue(bp, i)
6670 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6671 #ifdef BCM_CNIC
6672 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6673 #endif
6675 init_hw_err:
6676 bnx2x_gunzip_end(bp);
6678 return rc;
6681 static void bnx2x_free_mem(struct bnx2x *bp)
6684 #define BNX2X_PCI_FREE(x, y, size) \
6685 do { \
6686 if (x) { \
6687 pci_free_consistent(bp->pdev, size, x, y); \
6688 x = NULL; \
6689 y = 0; \
6691 } while (0)
6693 #define BNX2X_FREE(x) \
6694 do { \
6695 if (x) { \
6696 vfree(x); \
6697 x = NULL; \
6699 } while (0)
6701 int i;
6703 /* fastpath */
6704 /* Common */
6705 for_each_queue(bp, i) {
6707 /* status blocks */
6708 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6709 bnx2x_fp(bp, i, status_blk_mapping),
6710 sizeof(struct host_status_block));
6712 /* Rx */
6713 for_each_queue(bp, i) {
6715 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6716 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6717 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6718 bnx2x_fp(bp, i, rx_desc_mapping),
6719 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6721 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6722 bnx2x_fp(bp, i, rx_comp_mapping),
6723 sizeof(struct eth_fast_path_rx_cqe) *
6724 NUM_RCQ_BD);
6726 /* SGE ring */
6727 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6728 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6729 bnx2x_fp(bp, i, rx_sge_mapping),
6730 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6732 /* Tx */
6733 for_each_queue(bp, i) {
6735 /* fastpath tx rings: tx_buf tx_desc */
6736 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6737 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6738 bnx2x_fp(bp, i, tx_desc_mapping),
6739 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6741 /* end of fastpath */
6743 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6744 sizeof(struct host_def_status_block));
6746 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6747 sizeof(struct bnx2x_slowpath));
6749 #ifdef BCM_CNIC
6750 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6751 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6752 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6753 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6754 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6755 sizeof(struct host_status_block));
6756 #endif
6757 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6759 #undef BNX2X_PCI_FREE
6760 #undef BNX2X_KFREE
6763 static int bnx2x_alloc_mem(struct bnx2x *bp)
6766 #define BNX2X_PCI_ALLOC(x, y, size) \
6767 do { \
6768 x = pci_alloc_consistent(bp->pdev, size, y); \
6769 if (x == NULL) \
6770 goto alloc_mem_err; \
6771 memset(x, 0, size); \
6772 } while (0)
6774 #define BNX2X_ALLOC(x, size) \
6775 do { \
6776 x = vmalloc(size); \
6777 if (x == NULL) \
6778 goto alloc_mem_err; \
6779 memset(x, 0, size); \
6780 } while (0)
6782 int i;
6784 /* fastpath */
6785 /* Common */
6786 for_each_queue(bp, i) {
6787 bnx2x_fp(bp, i, bp) = bp;
6789 /* status blocks */
6790 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6791 &bnx2x_fp(bp, i, status_blk_mapping),
6792 sizeof(struct host_status_block));
6794 /* Rx */
6795 for_each_queue(bp, i) {
6797 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6798 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6799 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6800 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6801 &bnx2x_fp(bp, i, rx_desc_mapping),
6802 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6804 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6805 &bnx2x_fp(bp, i, rx_comp_mapping),
6806 sizeof(struct eth_fast_path_rx_cqe) *
6807 NUM_RCQ_BD);
6809 /* SGE ring */
6810 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6811 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6812 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6813 &bnx2x_fp(bp, i, rx_sge_mapping),
6814 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6816 /* Tx */
6817 for_each_queue(bp, i) {
6819 /* fastpath tx rings: tx_buf tx_desc */
6820 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6821 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6822 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6823 &bnx2x_fp(bp, i, tx_desc_mapping),
6824 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6826 /* end of fastpath */
6828 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6829 sizeof(struct host_def_status_block));
6831 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6832 sizeof(struct bnx2x_slowpath));
6834 #ifdef BCM_CNIC
6835 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6837 /* allocate searcher T2 table
6838 we allocate 1/4 of alloc num for T2
6839 (which is not entered into the ILT) */
6840 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6842 /* Initialize T2 (for 1024 connections) */
6843 for (i = 0; i < 16*1024; i += 64)
6844 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6846 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6847 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6849 /* QM queues (128*MAX_CONN) */
6850 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6852 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6853 sizeof(struct host_status_block));
6854 #endif
6856 /* Slow path ring */
6857 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6859 return 0;
6861 alloc_mem_err:
6862 bnx2x_free_mem(bp);
6863 return -ENOMEM;
6865 #undef BNX2X_PCI_ALLOC
6866 #undef BNX2X_ALLOC
6869 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6871 int i;
6873 for_each_queue(bp, i) {
6874 struct bnx2x_fastpath *fp = &bp->fp[i];
6876 u16 bd_cons = fp->tx_bd_cons;
6877 u16 sw_prod = fp->tx_pkt_prod;
6878 u16 sw_cons = fp->tx_pkt_cons;
6880 while (sw_cons != sw_prod) {
6881 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6882 sw_cons++;
6887 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6889 int i, j;
6891 for_each_queue(bp, j) {
6892 struct bnx2x_fastpath *fp = &bp->fp[j];
6894 for (i = 0; i < NUM_RX_BD; i++) {
6895 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6896 struct sk_buff *skb = rx_buf->skb;
6898 if (skb == NULL)
6899 continue;
6901 pci_unmap_single(bp->pdev,
6902 pci_unmap_addr(rx_buf, mapping),
6903 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6905 rx_buf->skb = NULL;
6906 dev_kfree_skb(skb);
6908 if (!fp->disable_tpa)
6909 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6910 ETH_MAX_AGGREGATION_QUEUES_E1 :
6911 ETH_MAX_AGGREGATION_QUEUES_E1H);
6915 static void bnx2x_free_skbs(struct bnx2x *bp)
6917 bnx2x_free_tx_skbs(bp);
6918 bnx2x_free_rx_skbs(bp);
6921 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6923 int i, offset = 1;
6925 free_irq(bp->msix_table[0].vector, bp->dev);
6926 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6927 bp->msix_table[0].vector);
6929 #ifdef BCM_CNIC
6930 offset++;
6931 #endif
6932 for_each_queue(bp, i) {
6933 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6934 "state %x\n", i, bp->msix_table[i + offset].vector,
6935 bnx2x_fp(bp, i, state));
6937 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6941 static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
6943 if (bp->flags & USING_MSIX_FLAG) {
6944 if (!disable_only)
6945 bnx2x_free_msix_irqs(bp);
6946 pci_disable_msix(bp->pdev);
6947 bp->flags &= ~USING_MSIX_FLAG;
6949 } else if (bp->flags & USING_MSI_FLAG) {
6950 if (!disable_only)
6951 free_irq(bp->pdev->irq, bp->dev);
6952 pci_disable_msi(bp->pdev);
6953 bp->flags &= ~USING_MSI_FLAG;
6955 } else if (!disable_only)
6956 free_irq(bp->pdev->irq, bp->dev);
6959 static int bnx2x_enable_msix(struct bnx2x *bp)
6961 int i, rc, offset = 1;
6962 int igu_vec = 0;
6964 bp->msix_table[0].entry = igu_vec;
6965 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6967 #ifdef BCM_CNIC
6968 igu_vec = BP_L_ID(bp) + offset;
6969 bp->msix_table[1].entry = igu_vec;
6970 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6971 offset++;
6972 #endif
6973 for_each_queue(bp, i) {
6974 igu_vec = BP_L_ID(bp) + offset + i;
6975 bp->msix_table[i + offset].entry = igu_vec;
6976 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6977 "(fastpath #%u)\n", i + offset, igu_vec, i);
6980 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6981 BNX2X_NUM_QUEUES(bp) + offset);
6982 if (rc) {
6983 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6984 return rc;
6987 bp->flags |= USING_MSIX_FLAG;
6989 return 0;
6992 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6994 int i, rc, offset = 1;
6996 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6997 bp->dev->name, bp->dev);
6998 if (rc) {
6999 BNX2X_ERR("request sp irq failed\n");
7000 return -EBUSY;
7003 #ifdef BCM_CNIC
7004 offset++;
7005 #endif
7006 for_each_queue(bp, i) {
7007 struct bnx2x_fastpath *fp = &bp->fp[i];
7008 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7009 bp->dev->name, i);
7011 rc = request_irq(bp->msix_table[i + offset].vector,
7012 bnx2x_msix_fp_int, 0, fp->name, fp);
7013 if (rc) {
7014 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
7015 bnx2x_free_msix_irqs(bp);
7016 return -EBUSY;
7019 fp->state = BNX2X_FP_STATE_IRQ;
7022 i = BNX2X_NUM_QUEUES(bp);
7023 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
7024 " ... fp[%d] %d\n",
7025 bp->dev->name, bp->msix_table[0].vector,
7026 0, bp->msix_table[offset].vector,
7027 i - 1, bp->msix_table[offset + i - 1].vector);
7029 return 0;
7032 static int bnx2x_enable_msi(struct bnx2x *bp)
7034 int rc;
7036 rc = pci_enable_msi(bp->pdev);
7037 if (rc) {
7038 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7039 return -1;
7041 bp->flags |= USING_MSI_FLAG;
7043 return 0;
7046 static int bnx2x_req_irq(struct bnx2x *bp)
7048 unsigned long flags;
7049 int rc;
7051 if (bp->flags & USING_MSI_FLAG)
7052 flags = 0;
7053 else
7054 flags = IRQF_SHARED;
7056 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7057 bp->dev->name, bp->dev);
7058 if (!rc)
7059 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7061 return rc;
7064 static void bnx2x_napi_enable(struct bnx2x *bp)
7066 int i;
7068 for_each_queue(bp, i)
7069 napi_enable(&bnx2x_fp(bp, i, napi));
7072 static void bnx2x_napi_disable(struct bnx2x *bp)
7074 int i;
7076 for_each_queue(bp, i)
7077 napi_disable(&bnx2x_fp(bp, i, napi));
7080 static void bnx2x_netif_start(struct bnx2x *bp)
7082 int intr_sem;
7084 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7085 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7087 if (intr_sem) {
7088 if (netif_running(bp->dev)) {
7089 bnx2x_napi_enable(bp);
7090 bnx2x_int_enable(bp);
7091 if (bp->state == BNX2X_STATE_OPEN)
7092 netif_tx_wake_all_queues(bp->dev);
7097 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7099 bnx2x_int_disable_sync(bp, disable_hw);
7100 bnx2x_napi_disable(bp);
7101 netif_tx_disable(bp->dev);
7105 * Init service functions
7109 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7111 * @param bp driver descriptor
7112 * @param set set or clear an entry (1 or 0)
7113 * @param mac pointer to a buffer containing a MAC
7114 * @param cl_bit_vec bit vector of clients to register a MAC for
7115 * @param cam_offset offset in a CAM to use
7116 * @param with_bcast set broadcast MAC as well
7118 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7119 u32 cl_bit_vec, u8 cam_offset,
7120 u8 with_bcast)
7122 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7123 int port = BP_PORT(bp);
7125 /* CAM allocation
7126 * unicasts 0-31:port0 32-63:port1
7127 * multicast 64-127:port0 128-191:port1
7129 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7130 config->hdr.offset = cam_offset;
7131 config->hdr.client_id = 0xff;
7132 config->hdr.reserved1 = 0;
7134 /* primary MAC */
7135 config->config_table[0].cam_entry.msb_mac_addr =
7136 swab16(*(u16 *)&mac[0]);
7137 config->config_table[0].cam_entry.middle_mac_addr =
7138 swab16(*(u16 *)&mac[2]);
7139 config->config_table[0].cam_entry.lsb_mac_addr =
7140 swab16(*(u16 *)&mac[4]);
7141 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7142 if (set)
7143 config->config_table[0].target_table_entry.flags = 0;
7144 else
7145 CAM_INVALIDATE(config->config_table[0]);
7146 config->config_table[0].target_table_entry.clients_bit_vector =
7147 cpu_to_le32(cl_bit_vec);
7148 config->config_table[0].target_table_entry.vlan_id = 0;
7150 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7151 (set ? "setting" : "clearing"),
7152 config->config_table[0].cam_entry.msb_mac_addr,
7153 config->config_table[0].cam_entry.middle_mac_addr,
7154 config->config_table[0].cam_entry.lsb_mac_addr);
7156 /* broadcast */
7157 if (with_bcast) {
7158 config->config_table[1].cam_entry.msb_mac_addr =
7159 cpu_to_le16(0xffff);
7160 config->config_table[1].cam_entry.middle_mac_addr =
7161 cpu_to_le16(0xffff);
7162 config->config_table[1].cam_entry.lsb_mac_addr =
7163 cpu_to_le16(0xffff);
7164 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7165 if (set)
7166 config->config_table[1].target_table_entry.flags =
7167 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7168 else
7169 CAM_INVALIDATE(config->config_table[1]);
7170 config->config_table[1].target_table_entry.clients_bit_vector =
7171 cpu_to_le32(cl_bit_vec);
7172 config->config_table[1].target_table_entry.vlan_id = 0;
7175 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7176 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7177 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7181 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7183 * @param bp driver descriptor
7184 * @param set set or clear an entry (1 or 0)
7185 * @param mac pointer to a buffer containing a MAC
7186 * @param cl_bit_vec bit vector of clients to register a MAC for
7187 * @param cam_offset offset in a CAM to use
7189 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7190 u32 cl_bit_vec, u8 cam_offset)
7192 struct mac_configuration_cmd_e1h *config =
7193 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7195 config->hdr.length = 1;
7196 config->hdr.offset = cam_offset;
7197 config->hdr.client_id = 0xff;
7198 config->hdr.reserved1 = 0;
7200 /* primary MAC */
7201 config->config_table[0].msb_mac_addr =
7202 swab16(*(u16 *)&mac[0]);
7203 config->config_table[0].middle_mac_addr =
7204 swab16(*(u16 *)&mac[2]);
7205 config->config_table[0].lsb_mac_addr =
7206 swab16(*(u16 *)&mac[4]);
7207 config->config_table[0].clients_bit_vector =
7208 cpu_to_le32(cl_bit_vec);
7209 config->config_table[0].vlan_id = 0;
7210 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7211 if (set)
7212 config->config_table[0].flags = BP_PORT(bp);
7213 else
7214 config->config_table[0].flags =
7215 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7217 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
7218 (set ? "setting" : "clearing"),
7219 config->config_table[0].msb_mac_addr,
7220 config->config_table[0].middle_mac_addr,
7221 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7223 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7224 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7225 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7228 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7229 int *state_p, int poll)
7231 /* can take a while if any port is running */
7232 int cnt = 5000;
7234 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7235 poll ? "polling" : "waiting", state, idx);
7237 might_sleep();
7238 while (cnt--) {
7239 if (poll) {
7240 bnx2x_rx_int(bp->fp, 10);
7241 /* if index is different from 0
7242 * the reply for some commands will
7243 * be on the non default queue
7245 if (idx)
7246 bnx2x_rx_int(&bp->fp[idx], 10);
7249 mb(); /* state is changed by bnx2x_sp_event() */
7250 if (*state_p == state) {
7251 #ifdef BNX2X_STOP_ON_ERROR
7252 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7253 #endif
7254 return 0;
7257 msleep(1);
7259 if (bp->panic)
7260 return -EIO;
7263 /* timeout! */
7264 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7265 poll ? "polling" : "waiting", state, idx);
7266 #ifdef BNX2X_STOP_ON_ERROR
7267 bnx2x_panic();
7268 #endif
7270 return -EBUSY;
7273 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7275 bp->set_mac_pending++;
7276 smp_wmb();
7278 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7279 (1 << bp->fp->cl_id), BP_FUNC(bp));
7281 /* Wait for a completion */
7282 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7285 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7287 bp->set_mac_pending++;
7288 smp_wmb();
7290 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7291 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7294 /* Wait for a completion */
7295 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7298 #ifdef BCM_CNIC
7300 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7301 * MAC(s). This function will wait until the ramdord completion
7302 * returns.
7304 * @param bp driver handle
7305 * @param set set or clear the CAM entry
7307 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7309 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7311 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7313 bp->set_mac_pending++;
7314 smp_wmb();
7316 /* Send a SET_MAC ramrod */
7317 if (CHIP_IS_E1(bp))
7318 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7319 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7321 else
7322 /* CAM allocation for E1H
7323 * unicasts: by func number
7324 * multicast: 20+FUNC*20, 20 each
7326 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7327 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7329 /* Wait for a completion when setting */
7330 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7332 return 0;
7334 #endif
7336 static int bnx2x_setup_leading(struct bnx2x *bp)
7338 int rc;
7340 /* reset IGU state */
7341 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7343 /* SETUP ramrod */
7344 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7346 /* Wait for completion */
7347 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7349 return rc;
7352 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7354 struct bnx2x_fastpath *fp = &bp->fp[index];
7356 /* reset IGU state */
7357 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7359 /* SETUP ramrod */
7360 fp->state = BNX2X_FP_STATE_OPENING;
7361 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7362 fp->cl_id, 0);
7364 /* Wait for completion */
7365 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7366 &(fp->state), 0);
7369 static int bnx2x_poll(struct napi_struct *napi, int budget);
7371 static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7374 switch (bp->multi_mode) {
7375 case ETH_RSS_MODE_DISABLED:
7376 bp->num_queues = 1;
7377 break;
7379 case ETH_RSS_MODE_REGULAR:
7380 if (num_queues)
7381 bp->num_queues = min_t(u32, num_queues,
7382 BNX2X_MAX_QUEUES(bp));
7383 else
7384 bp->num_queues = min_t(u32, num_online_cpus(),
7385 BNX2X_MAX_QUEUES(bp));
7386 break;
7389 default:
7390 bp->num_queues = 1;
7391 break;
7395 static int bnx2x_set_num_queues(struct bnx2x *bp)
7397 int rc = 0;
7399 switch (int_mode) {
7400 case INT_MODE_INTx:
7401 case INT_MODE_MSI:
7402 bp->num_queues = 1;
7403 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7404 break;
7406 case INT_MODE_MSIX:
7407 default:
7408 /* Set number of queues according to bp->multi_mode value */
7409 bnx2x_set_num_queues_msix(bp);
7411 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7412 bp->num_queues);
7414 /* if we can't use MSI-X we only need one fp,
7415 * so try to enable MSI-X with the requested number of fp's
7416 * and fallback to MSI or legacy INTx with one fp
7418 rc = bnx2x_enable_msix(bp);
7419 if (rc)
7420 /* failed to enable MSI-X */
7421 bp->num_queues = 1;
7422 break;
7424 bp->dev->real_num_tx_queues = bp->num_queues;
7425 return rc;
7428 #ifdef BCM_CNIC
7429 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7430 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7431 #endif
7433 /* must be called with rtnl_lock */
7434 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7436 u32 load_code;
7437 int i, rc;
7439 #ifdef BNX2X_STOP_ON_ERROR
7440 if (unlikely(bp->panic))
7441 return -EPERM;
7442 #endif
7444 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7446 rc = bnx2x_set_num_queues(bp);
7448 if (bnx2x_alloc_mem(bp)) {
7449 bnx2x_free_irq(bp, true);
7450 return -ENOMEM;
7453 for_each_queue(bp, i)
7454 bnx2x_fp(bp, i, disable_tpa) =
7455 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7457 for_each_queue(bp, i)
7458 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7459 bnx2x_poll, 128);
7461 bnx2x_napi_enable(bp);
7463 if (bp->flags & USING_MSIX_FLAG) {
7464 rc = bnx2x_req_msix_irqs(bp);
7465 if (rc) {
7466 bnx2x_free_irq(bp, true);
7467 goto load_error1;
7469 } else {
7470 /* Fall to INTx if failed to enable MSI-X due to lack of
7471 memory (in bnx2x_set_num_queues()) */
7472 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7473 bnx2x_enable_msi(bp);
7474 bnx2x_ack_int(bp);
7475 rc = bnx2x_req_irq(bp);
7476 if (rc) {
7477 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7478 bnx2x_free_irq(bp, true);
7479 goto load_error1;
7481 if (bp->flags & USING_MSI_FLAG) {
7482 bp->dev->irq = bp->pdev->irq;
7483 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7484 bp->dev->name, bp->pdev->irq);
7488 /* Send LOAD_REQUEST command to MCP
7489 Returns the type of LOAD command:
7490 if it is the first port to be initialized
7491 common blocks should be initialized, otherwise - not
7493 if (!BP_NOMCP(bp)) {
7494 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7495 if (!load_code) {
7496 BNX2X_ERR("MCP response failure, aborting\n");
7497 rc = -EBUSY;
7498 goto load_error2;
7500 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7501 rc = -EBUSY; /* other port in diagnostic mode */
7502 goto load_error2;
7505 } else {
7506 int port = BP_PORT(bp);
7508 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7509 load_count[0], load_count[1], load_count[2]);
7510 load_count[0]++;
7511 load_count[1 + port]++;
7512 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7513 load_count[0], load_count[1], load_count[2]);
7514 if (load_count[0] == 1)
7515 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7516 else if (load_count[1 + port] == 1)
7517 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7518 else
7519 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7522 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7523 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7524 bp->port.pmf = 1;
7525 else
7526 bp->port.pmf = 0;
7527 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7529 /* Initialize HW */
7530 rc = bnx2x_init_hw(bp, load_code);
7531 if (rc) {
7532 BNX2X_ERR("HW init failed, aborting\n");
7533 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7534 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7535 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7536 goto load_error2;
7539 /* Setup NIC internals and enable interrupts */
7540 bnx2x_nic_init(bp, load_code);
7542 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7543 (bp->common.shmem2_base))
7544 SHMEM2_WR(bp, dcc_support,
7545 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7546 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7548 /* Send LOAD_DONE command to MCP */
7549 if (!BP_NOMCP(bp)) {
7550 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7551 if (!load_code) {
7552 BNX2X_ERR("MCP response failure, aborting\n");
7553 rc = -EBUSY;
7554 goto load_error3;
7558 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7560 rc = bnx2x_setup_leading(bp);
7561 if (rc) {
7562 BNX2X_ERR("Setup leading failed!\n");
7563 #ifndef BNX2X_STOP_ON_ERROR
7564 goto load_error3;
7565 #else
7566 bp->panic = 1;
7567 return -EBUSY;
7568 #endif
7571 if (CHIP_IS_E1H(bp))
7572 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7573 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7574 bp->flags |= MF_FUNC_DIS;
7577 if (bp->state == BNX2X_STATE_OPEN) {
7578 #ifdef BCM_CNIC
7579 /* Enable Timer scan */
7580 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7581 #endif
7582 for_each_nondefault_queue(bp, i) {
7583 rc = bnx2x_setup_multi(bp, i);
7584 if (rc)
7585 #ifdef BCM_CNIC
7586 goto load_error4;
7587 #else
7588 goto load_error3;
7589 #endif
7592 if (CHIP_IS_E1(bp))
7593 bnx2x_set_eth_mac_addr_e1(bp, 1);
7594 else
7595 bnx2x_set_eth_mac_addr_e1h(bp, 1);
7596 #ifdef BCM_CNIC
7597 /* Set iSCSI L2 MAC */
7598 mutex_lock(&bp->cnic_mutex);
7599 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7600 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7601 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7602 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
7603 CNIC_SB_ID(bp));
7605 mutex_unlock(&bp->cnic_mutex);
7606 #endif
7609 if (bp->port.pmf)
7610 bnx2x_initial_phy_init(bp, load_mode);
7612 /* Start fast path */
7613 switch (load_mode) {
7614 case LOAD_NORMAL:
7615 if (bp->state == BNX2X_STATE_OPEN) {
7616 /* Tx queue should be only reenabled */
7617 netif_tx_wake_all_queues(bp->dev);
7619 /* Initialize the receive filter. */
7620 bnx2x_set_rx_mode(bp->dev);
7621 break;
7623 case LOAD_OPEN:
7624 netif_tx_start_all_queues(bp->dev);
7625 if (bp->state != BNX2X_STATE_OPEN)
7626 netif_tx_disable(bp->dev);
7627 /* Initialize the receive filter. */
7628 bnx2x_set_rx_mode(bp->dev);
7629 break;
7631 case LOAD_DIAG:
7632 /* Initialize the receive filter. */
7633 bnx2x_set_rx_mode(bp->dev);
7634 bp->state = BNX2X_STATE_DIAG;
7635 break;
7637 default:
7638 break;
7641 if (!bp->port.pmf)
7642 bnx2x__link_status_update(bp);
7644 /* start the timer */
7645 mod_timer(&bp->timer, jiffies + bp->current_interval);
7647 #ifdef BCM_CNIC
7648 bnx2x_setup_cnic_irq_info(bp);
7649 if (bp->state == BNX2X_STATE_OPEN)
7650 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7651 #endif
7653 return 0;
7655 #ifdef BCM_CNIC
7656 load_error4:
7657 /* Disable Timer scan */
7658 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7659 #endif
7660 load_error3:
7661 bnx2x_int_disable_sync(bp, 1);
7662 if (!BP_NOMCP(bp)) {
7663 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7664 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7666 bp->port.pmf = 0;
7667 /* Free SKBs, SGEs, TPA pool and driver internals */
7668 bnx2x_free_skbs(bp);
7669 for_each_queue(bp, i)
7670 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7671 load_error2:
7672 /* Release IRQs */
7673 bnx2x_free_irq(bp, false);
7674 load_error1:
7675 bnx2x_napi_disable(bp);
7676 for_each_queue(bp, i)
7677 netif_napi_del(&bnx2x_fp(bp, i, napi));
7678 bnx2x_free_mem(bp);
7680 return rc;
7683 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7685 struct bnx2x_fastpath *fp = &bp->fp[index];
7686 int rc;
7688 /* halt the connection */
7689 fp->state = BNX2X_FP_STATE_HALTING;
7690 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7692 /* Wait for completion */
7693 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7694 &(fp->state), 1);
7695 if (rc) /* timeout */
7696 return rc;
7698 /* delete cfc entry */
7699 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7701 /* Wait for completion */
7702 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7703 &(fp->state), 1);
7704 return rc;
7707 static int bnx2x_stop_leading(struct bnx2x *bp)
7709 __le16 dsb_sp_prod_idx;
7710 /* if the other port is handling traffic,
7711 this can take a lot of time */
7712 int cnt = 500;
7713 int rc;
7715 might_sleep();
7717 /* Send HALT ramrod */
7718 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7719 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7721 /* Wait for completion */
7722 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7723 &(bp->fp[0].state), 1);
7724 if (rc) /* timeout */
7725 return rc;
7727 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7729 /* Send PORT_DELETE ramrod */
7730 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7732 /* Wait for completion to arrive on default status block
7733 we are going to reset the chip anyway
7734 so there is not much to do if this times out
7736 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7737 if (!cnt) {
7738 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7739 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7740 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7741 #ifdef BNX2X_STOP_ON_ERROR
7742 bnx2x_panic();
7743 #endif
7744 rc = -EBUSY;
7745 break;
7747 cnt--;
7748 msleep(1);
7749 rmb(); /* Refresh the dsb_sp_prod */
7751 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7752 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7754 return rc;
7757 static void bnx2x_reset_func(struct bnx2x *bp)
7759 int port = BP_PORT(bp);
7760 int func = BP_FUNC(bp);
7761 int base, i;
7763 /* Configure IGU */
7764 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7765 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7767 #ifdef BCM_CNIC
7768 /* Disable Timer scan */
7769 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7771 * Wait for at least 10ms and up to 2 second for the timers scan to
7772 * complete
7774 for (i = 0; i < 200; i++) {
7775 msleep(10);
7776 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7777 break;
7779 #endif
7780 /* Clear ILT */
7781 base = FUNC_ILT_BASE(func);
7782 for (i = base; i < base + ILT_PER_FUNC; i++)
7783 bnx2x_ilt_wr(bp, i, 0);
7786 static void bnx2x_reset_port(struct bnx2x *bp)
7788 int port = BP_PORT(bp);
7789 u32 val;
7791 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7793 /* Do not rcv packets to BRB */
7794 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7795 /* Do not direct rcv packets that are not for MCP to the BRB */
7796 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7797 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7799 /* Configure AEU */
7800 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7802 msleep(100);
7803 /* Check for BRB port occupancy */
7804 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7805 if (val)
7806 DP(NETIF_MSG_IFDOWN,
7807 "BRB1 is not empty %d blocks are occupied\n", val);
7809 /* TODO: Close Doorbell port? */
7812 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7814 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7815 BP_FUNC(bp), reset_code);
7817 switch (reset_code) {
7818 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7819 bnx2x_reset_port(bp);
7820 bnx2x_reset_func(bp);
7821 bnx2x_reset_common(bp);
7822 break;
7824 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7825 bnx2x_reset_port(bp);
7826 bnx2x_reset_func(bp);
7827 break;
7829 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7830 bnx2x_reset_func(bp);
7831 break;
7833 default:
7834 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7835 break;
7839 /* must be called with rtnl_lock */
7840 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7842 int port = BP_PORT(bp);
7843 u32 reset_code = 0;
7844 int i, cnt, rc;
7846 #ifdef BCM_CNIC
7847 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7848 #endif
7849 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7851 /* Set "drop all" */
7852 bp->rx_mode = BNX2X_RX_MODE_NONE;
7853 bnx2x_set_storm_rx_mode(bp);
7855 /* Disable HW interrupts, NAPI and Tx */
7856 bnx2x_netif_stop(bp, 1);
7858 del_timer_sync(&bp->timer);
7859 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7860 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7861 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7863 /* Release IRQs */
7864 bnx2x_free_irq(bp, false);
7866 /* Wait until tx fastpath tasks complete */
7867 for_each_queue(bp, i) {
7868 struct bnx2x_fastpath *fp = &bp->fp[i];
7870 cnt = 1000;
7871 while (bnx2x_has_tx_work_unload(fp)) {
7873 bnx2x_tx_int(fp);
7874 if (!cnt) {
7875 BNX2X_ERR("timeout waiting for queue[%d]\n",
7877 #ifdef BNX2X_STOP_ON_ERROR
7878 bnx2x_panic();
7879 return -EBUSY;
7880 #else
7881 break;
7882 #endif
7884 cnt--;
7885 msleep(1);
7888 /* Give HW time to discard old tx messages */
7889 msleep(1);
7891 if (CHIP_IS_E1(bp)) {
7892 struct mac_configuration_cmd *config =
7893 bnx2x_sp(bp, mcast_config);
7895 bnx2x_set_eth_mac_addr_e1(bp, 0);
7897 for (i = 0; i < config->hdr.length; i++)
7898 CAM_INVALIDATE(config->config_table[i]);
7900 config->hdr.length = i;
7901 if (CHIP_REV_IS_SLOW(bp))
7902 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7903 else
7904 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7905 config->hdr.client_id = bp->fp->cl_id;
7906 config->hdr.reserved1 = 0;
7908 bp->set_mac_pending++;
7909 smp_wmb();
7911 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7912 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7913 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7915 } else { /* E1H */
7916 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7918 bnx2x_set_eth_mac_addr_e1h(bp, 0);
7920 for (i = 0; i < MC_HASH_SIZE; i++)
7921 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7923 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7925 #ifdef BCM_CNIC
7926 /* Clear iSCSI L2 MAC */
7927 mutex_lock(&bp->cnic_mutex);
7928 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7929 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7930 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7932 mutex_unlock(&bp->cnic_mutex);
7933 #endif
7935 if (unload_mode == UNLOAD_NORMAL)
7936 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7938 else if (bp->flags & NO_WOL_FLAG)
7939 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7941 else if (bp->wol) {
7942 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7943 u8 *mac_addr = bp->dev->dev_addr;
7944 u32 val;
7945 /* The mac address is written to entries 1-4 to
7946 preserve entry 0 which is used by the PMF */
7947 u8 entry = (BP_E1HVN(bp) + 1)*8;
7949 val = (mac_addr[0] << 8) | mac_addr[1];
7950 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7952 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7953 (mac_addr[4] << 8) | mac_addr[5];
7954 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7956 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7958 } else
7959 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7961 /* Close multi and leading connections
7962 Completions for ramrods are collected in a synchronous way */
7963 for_each_nondefault_queue(bp, i)
7964 if (bnx2x_stop_multi(bp, i))
7965 goto unload_error;
7967 rc = bnx2x_stop_leading(bp);
7968 if (rc) {
7969 BNX2X_ERR("Stop leading failed!\n");
7970 #ifdef BNX2X_STOP_ON_ERROR
7971 return -EBUSY;
7972 #else
7973 goto unload_error;
7974 #endif
7977 unload_error:
7978 if (!BP_NOMCP(bp))
7979 reset_code = bnx2x_fw_command(bp, reset_code);
7980 else {
7981 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
7982 load_count[0], load_count[1], load_count[2]);
7983 load_count[0]--;
7984 load_count[1 + port]--;
7985 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
7986 load_count[0], load_count[1], load_count[2]);
7987 if (load_count[0] == 0)
7988 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7989 else if (load_count[1 + port] == 0)
7990 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7991 else
7992 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7995 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7996 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7997 bnx2x__link_reset(bp);
7999 /* Reset the chip */
8000 bnx2x_reset_chip(bp, reset_code);
8002 /* Report UNLOAD_DONE to MCP */
8003 if (!BP_NOMCP(bp))
8004 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8006 bp->port.pmf = 0;
8008 /* Free SKBs, SGEs, TPA pool and driver internals */
8009 bnx2x_free_skbs(bp);
8010 for_each_queue(bp, i)
8011 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8012 for_each_queue(bp, i)
8013 netif_napi_del(&bnx2x_fp(bp, i, napi));
8014 bnx2x_free_mem(bp);
8016 bp->state = BNX2X_STATE_CLOSED;
8018 netif_carrier_off(bp->dev);
8020 return 0;
8023 static void bnx2x_reset_task(struct work_struct *work)
8025 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8027 #ifdef BNX2X_STOP_ON_ERROR
8028 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8029 " so reset not done to allow debug dump,\n"
8030 " you will need to reboot when done\n");
8031 return;
8032 #endif
8034 rtnl_lock();
8036 if (!netif_running(bp->dev))
8037 goto reset_task_exit;
8039 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8040 bnx2x_nic_load(bp, LOAD_NORMAL);
8042 reset_task_exit:
8043 rtnl_unlock();
8046 /* end of nic load/unload */
8048 /* ethtool_ops */
8051 * Init service functions
8054 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8056 switch (func) {
8057 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8058 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8059 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8060 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8061 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8062 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8063 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8064 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8065 default:
8066 BNX2X_ERR("Unsupported function index: %d\n", func);
8067 return (u32)(-1);
8071 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8073 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8075 /* Flush all outstanding writes */
8076 mmiowb();
8078 /* Pretend to be function 0 */
8079 REG_WR(bp, reg, 0);
8080 /* Flush the GRC transaction (in the chip) */
8081 new_val = REG_RD(bp, reg);
8082 if (new_val != 0) {
8083 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8084 new_val);
8085 BUG();
8088 /* From now we are in the "like-E1" mode */
8089 bnx2x_int_disable(bp);
8091 /* Flush all outstanding writes */
8092 mmiowb();
8094 /* Restore the original funtion settings */
8095 REG_WR(bp, reg, orig_func);
8096 new_val = REG_RD(bp, reg);
8097 if (new_val != orig_func) {
8098 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8099 orig_func, new_val);
8100 BUG();
8104 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8106 if (CHIP_IS_E1H(bp))
8107 bnx2x_undi_int_disable_e1h(bp, func);
8108 else
8109 bnx2x_int_disable(bp);
8112 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8114 u32 val;
8116 /* Check if there is any driver already loaded */
8117 val = REG_RD(bp, MISC_REG_UNPREPARED);
8118 if (val == 0x1) {
8119 /* Check if it is the UNDI driver
8120 * UNDI driver initializes CID offset for normal bell to 0x7
8122 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8123 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8124 if (val == 0x7) {
8125 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8126 /* save our func */
8127 int func = BP_FUNC(bp);
8128 u32 swap_en;
8129 u32 swap_val;
8131 /* clear the UNDI indication */
8132 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8134 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8136 /* try unload UNDI on port 0 */
8137 bp->func = 0;
8138 bp->fw_seq =
8139 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8140 DRV_MSG_SEQ_NUMBER_MASK);
8141 reset_code = bnx2x_fw_command(bp, reset_code);
8143 /* if UNDI is loaded on the other port */
8144 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8146 /* send "DONE" for previous unload */
8147 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8149 /* unload UNDI on port 1 */
8150 bp->func = 1;
8151 bp->fw_seq =
8152 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8153 DRV_MSG_SEQ_NUMBER_MASK);
8154 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8156 bnx2x_fw_command(bp, reset_code);
8159 /* now it's safe to release the lock */
8160 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8162 bnx2x_undi_int_disable(bp, func);
8164 /* close input traffic and wait for it */
8165 /* Do not rcv packets to BRB */
8166 REG_WR(bp,
8167 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8168 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8169 /* Do not direct rcv packets that are not for MCP to
8170 * the BRB */
8171 REG_WR(bp,
8172 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8173 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8174 /* clear AEU */
8175 REG_WR(bp,
8176 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8177 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8178 msleep(10);
8180 /* save NIG port swap info */
8181 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8182 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
8183 /* reset device */
8184 REG_WR(bp,
8185 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8186 0xd3ffffff);
8187 REG_WR(bp,
8188 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8189 0x1403);
8190 /* take the NIG out of reset and restore swap values */
8191 REG_WR(bp,
8192 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8193 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8194 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8195 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8197 /* send unload done to the MCP */
8198 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8200 /* restore our func and fw_seq */
8201 bp->func = func;
8202 bp->fw_seq =
8203 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8204 DRV_MSG_SEQ_NUMBER_MASK);
8206 } else
8207 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8211 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8213 u32 val, val2, val3, val4, id;
8214 u16 pmc;
8216 /* Get the chip revision id and number. */
8217 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8218 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8219 id = ((val & 0xffff) << 16);
8220 val = REG_RD(bp, MISC_REG_CHIP_REV);
8221 id |= ((val & 0xf) << 12);
8222 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8223 id |= ((val & 0xff) << 4);
8224 val = REG_RD(bp, MISC_REG_BOND_ID);
8225 id |= (val & 0xf);
8226 bp->common.chip_id = id;
8227 bp->link_params.chip_id = bp->common.chip_id;
8228 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8230 val = (REG_RD(bp, 0x2874) & 0x55);
8231 if ((bp->common.chip_id & 0x1) ||
8232 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8233 bp->flags |= ONE_PORT_FLAG;
8234 BNX2X_DEV_INFO("single port device\n");
8237 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8238 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8239 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8240 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8241 bp->common.flash_size, bp->common.flash_size);
8243 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8244 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8245 bp->link_params.shmem_base = bp->common.shmem_base;
8246 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8247 bp->common.shmem_base, bp->common.shmem2_base);
8249 if (!bp->common.shmem_base ||
8250 (bp->common.shmem_base < 0xA0000) ||
8251 (bp->common.shmem_base >= 0xC0000)) {
8252 BNX2X_DEV_INFO("MCP not active\n");
8253 bp->flags |= NO_MCP_FLAG;
8254 return;
8257 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8258 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8259 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8260 BNX2X_ERR("BAD MCP validity signature\n");
8262 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8263 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8265 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8266 SHARED_HW_CFG_LED_MODE_MASK) >>
8267 SHARED_HW_CFG_LED_MODE_SHIFT);
8269 bp->link_params.feature_config_flags = 0;
8270 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8271 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8272 bp->link_params.feature_config_flags |=
8273 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8274 else
8275 bp->link_params.feature_config_flags &=
8276 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8278 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8279 bp->common.bc_ver = val;
8280 BNX2X_DEV_INFO("bc_ver %X\n", val);
8281 if (val < BNX2X_BC_VER) {
8282 /* for now only warn
8283 * later we might need to enforce this */
8284 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8285 " please upgrade BC\n", BNX2X_BC_VER, val);
8287 bp->link_params.feature_config_flags |=
8288 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8289 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8291 if (BP_E1HVN(bp) == 0) {
8292 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8293 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8294 } else {
8295 /* no WOL capability for E1HVN != 0 */
8296 bp->flags |= NO_WOL_FLAG;
8298 BNX2X_DEV_INFO("%sWoL capable\n",
8299 (bp->flags & NO_WOL_FLAG) ? "not " : "");
8301 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8302 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8303 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8304 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8306 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8307 val, val2, val3, val4);
8310 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8311 u32 switch_cfg)
8313 int port = BP_PORT(bp);
8314 u32 ext_phy_type;
8316 switch (switch_cfg) {
8317 case SWITCH_CFG_1G:
8318 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8320 ext_phy_type =
8321 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8322 switch (ext_phy_type) {
8323 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8324 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8325 ext_phy_type);
8327 bp->port.supported |= (SUPPORTED_10baseT_Half |
8328 SUPPORTED_10baseT_Full |
8329 SUPPORTED_100baseT_Half |
8330 SUPPORTED_100baseT_Full |
8331 SUPPORTED_1000baseT_Full |
8332 SUPPORTED_2500baseX_Full |
8333 SUPPORTED_TP |
8334 SUPPORTED_FIBRE |
8335 SUPPORTED_Autoneg |
8336 SUPPORTED_Pause |
8337 SUPPORTED_Asym_Pause);
8338 break;
8340 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8341 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8342 ext_phy_type);
8344 bp->port.supported |= (SUPPORTED_10baseT_Half |
8345 SUPPORTED_10baseT_Full |
8346 SUPPORTED_100baseT_Half |
8347 SUPPORTED_100baseT_Full |
8348 SUPPORTED_1000baseT_Full |
8349 SUPPORTED_TP |
8350 SUPPORTED_FIBRE |
8351 SUPPORTED_Autoneg |
8352 SUPPORTED_Pause |
8353 SUPPORTED_Asym_Pause);
8354 break;
8356 default:
8357 BNX2X_ERR("NVRAM config error. "
8358 "BAD SerDes ext_phy_config 0x%x\n",
8359 bp->link_params.ext_phy_config);
8360 return;
8363 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8364 port*0x10);
8365 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8366 break;
8368 case SWITCH_CFG_10G:
8369 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8371 ext_phy_type =
8372 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8373 switch (ext_phy_type) {
8374 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8375 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8376 ext_phy_type);
8378 bp->port.supported |= (SUPPORTED_10baseT_Half |
8379 SUPPORTED_10baseT_Full |
8380 SUPPORTED_100baseT_Half |
8381 SUPPORTED_100baseT_Full |
8382 SUPPORTED_1000baseT_Full |
8383 SUPPORTED_2500baseX_Full |
8384 SUPPORTED_10000baseT_Full |
8385 SUPPORTED_TP |
8386 SUPPORTED_FIBRE |
8387 SUPPORTED_Autoneg |
8388 SUPPORTED_Pause |
8389 SUPPORTED_Asym_Pause);
8390 break;
8392 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8393 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8394 ext_phy_type);
8396 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8397 SUPPORTED_1000baseT_Full |
8398 SUPPORTED_FIBRE |
8399 SUPPORTED_Autoneg |
8400 SUPPORTED_Pause |
8401 SUPPORTED_Asym_Pause);
8402 break;
8404 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8405 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8406 ext_phy_type);
8408 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8409 SUPPORTED_2500baseX_Full |
8410 SUPPORTED_1000baseT_Full |
8411 SUPPORTED_FIBRE |
8412 SUPPORTED_Autoneg |
8413 SUPPORTED_Pause |
8414 SUPPORTED_Asym_Pause);
8415 break;
8417 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8418 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8419 ext_phy_type);
8421 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8422 SUPPORTED_FIBRE |
8423 SUPPORTED_Pause |
8424 SUPPORTED_Asym_Pause);
8425 break;
8427 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8428 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8429 ext_phy_type);
8431 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8432 SUPPORTED_1000baseT_Full |
8433 SUPPORTED_FIBRE |
8434 SUPPORTED_Pause |
8435 SUPPORTED_Asym_Pause);
8436 break;
8438 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8439 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8440 ext_phy_type);
8442 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8443 SUPPORTED_1000baseT_Full |
8444 SUPPORTED_Autoneg |
8445 SUPPORTED_FIBRE |
8446 SUPPORTED_Pause |
8447 SUPPORTED_Asym_Pause);
8448 break;
8450 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8451 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8452 ext_phy_type);
8454 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8455 SUPPORTED_1000baseT_Full |
8456 SUPPORTED_Autoneg |
8457 SUPPORTED_FIBRE |
8458 SUPPORTED_Pause |
8459 SUPPORTED_Asym_Pause);
8460 break;
8462 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8463 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8464 ext_phy_type);
8466 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8467 SUPPORTED_TP |
8468 SUPPORTED_Autoneg |
8469 SUPPORTED_Pause |
8470 SUPPORTED_Asym_Pause);
8471 break;
8473 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8474 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8475 ext_phy_type);
8477 bp->port.supported |= (SUPPORTED_10baseT_Half |
8478 SUPPORTED_10baseT_Full |
8479 SUPPORTED_100baseT_Half |
8480 SUPPORTED_100baseT_Full |
8481 SUPPORTED_1000baseT_Full |
8482 SUPPORTED_10000baseT_Full |
8483 SUPPORTED_TP |
8484 SUPPORTED_Autoneg |
8485 SUPPORTED_Pause |
8486 SUPPORTED_Asym_Pause);
8487 break;
8489 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8490 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8491 bp->link_params.ext_phy_config);
8492 break;
8494 default:
8495 BNX2X_ERR("NVRAM config error. "
8496 "BAD XGXS ext_phy_config 0x%x\n",
8497 bp->link_params.ext_phy_config);
8498 return;
8501 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8502 port*0x18);
8503 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8505 break;
8507 default:
8508 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8509 bp->port.link_config);
8510 return;
8512 bp->link_params.phy_addr = bp->port.phy_addr;
8514 /* mask what we support according to speed_cap_mask */
8515 if (!(bp->link_params.speed_cap_mask &
8516 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8517 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8519 if (!(bp->link_params.speed_cap_mask &
8520 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8521 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8523 if (!(bp->link_params.speed_cap_mask &
8524 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8525 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8527 if (!(bp->link_params.speed_cap_mask &
8528 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8529 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8531 if (!(bp->link_params.speed_cap_mask &
8532 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8533 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8534 SUPPORTED_1000baseT_Full);
8536 if (!(bp->link_params.speed_cap_mask &
8537 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8538 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8540 if (!(bp->link_params.speed_cap_mask &
8541 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8542 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8544 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8547 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8549 bp->link_params.req_duplex = DUPLEX_FULL;
8551 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8552 case PORT_FEATURE_LINK_SPEED_AUTO:
8553 if (bp->port.supported & SUPPORTED_Autoneg) {
8554 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8555 bp->port.advertising = bp->port.supported;
8556 } else {
8557 u32 ext_phy_type =
8558 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8560 if ((ext_phy_type ==
8561 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8562 (ext_phy_type ==
8563 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8564 /* force 10G, no AN */
8565 bp->link_params.req_line_speed = SPEED_10000;
8566 bp->port.advertising =
8567 (ADVERTISED_10000baseT_Full |
8568 ADVERTISED_FIBRE);
8569 break;
8571 BNX2X_ERR("NVRAM config error. "
8572 "Invalid link_config 0x%x"
8573 " Autoneg not supported\n",
8574 bp->port.link_config);
8575 return;
8577 break;
8579 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8580 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8581 bp->link_params.req_line_speed = SPEED_10;
8582 bp->port.advertising = (ADVERTISED_10baseT_Full |
8583 ADVERTISED_TP);
8584 } else {
8585 BNX2X_ERR("NVRAM config error. "
8586 "Invalid link_config 0x%x"
8587 " speed_cap_mask 0x%x\n",
8588 bp->port.link_config,
8589 bp->link_params.speed_cap_mask);
8590 return;
8592 break;
8594 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8595 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8596 bp->link_params.req_line_speed = SPEED_10;
8597 bp->link_params.req_duplex = DUPLEX_HALF;
8598 bp->port.advertising = (ADVERTISED_10baseT_Half |
8599 ADVERTISED_TP);
8600 } else {
8601 BNX2X_ERR("NVRAM config error. "
8602 "Invalid link_config 0x%x"
8603 " speed_cap_mask 0x%x\n",
8604 bp->port.link_config,
8605 bp->link_params.speed_cap_mask);
8606 return;
8608 break;
8610 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8611 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8612 bp->link_params.req_line_speed = SPEED_100;
8613 bp->port.advertising = (ADVERTISED_100baseT_Full |
8614 ADVERTISED_TP);
8615 } else {
8616 BNX2X_ERR("NVRAM config error. "
8617 "Invalid link_config 0x%x"
8618 " speed_cap_mask 0x%x\n",
8619 bp->port.link_config,
8620 bp->link_params.speed_cap_mask);
8621 return;
8623 break;
8625 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8626 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8627 bp->link_params.req_line_speed = SPEED_100;
8628 bp->link_params.req_duplex = DUPLEX_HALF;
8629 bp->port.advertising = (ADVERTISED_100baseT_Half |
8630 ADVERTISED_TP);
8631 } else {
8632 BNX2X_ERR("NVRAM config error. "
8633 "Invalid link_config 0x%x"
8634 " speed_cap_mask 0x%x\n",
8635 bp->port.link_config,
8636 bp->link_params.speed_cap_mask);
8637 return;
8639 break;
8641 case PORT_FEATURE_LINK_SPEED_1G:
8642 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8643 bp->link_params.req_line_speed = SPEED_1000;
8644 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8645 ADVERTISED_TP);
8646 } else {
8647 BNX2X_ERR("NVRAM config error. "
8648 "Invalid link_config 0x%x"
8649 " speed_cap_mask 0x%x\n",
8650 bp->port.link_config,
8651 bp->link_params.speed_cap_mask);
8652 return;
8654 break;
8656 case PORT_FEATURE_LINK_SPEED_2_5G:
8657 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8658 bp->link_params.req_line_speed = SPEED_2500;
8659 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8660 ADVERTISED_TP);
8661 } else {
8662 BNX2X_ERR("NVRAM config error. "
8663 "Invalid link_config 0x%x"
8664 " speed_cap_mask 0x%x\n",
8665 bp->port.link_config,
8666 bp->link_params.speed_cap_mask);
8667 return;
8669 break;
8671 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8672 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8673 case PORT_FEATURE_LINK_SPEED_10G_KR:
8674 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8675 bp->link_params.req_line_speed = SPEED_10000;
8676 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8677 ADVERTISED_FIBRE);
8678 } else {
8679 BNX2X_ERR("NVRAM config error. "
8680 "Invalid link_config 0x%x"
8681 " speed_cap_mask 0x%x\n",
8682 bp->port.link_config,
8683 bp->link_params.speed_cap_mask);
8684 return;
8686 break;
8688 default:
8689 BNX2X_ERR("NVRAM config error. "
8690 "BAD link speed link_config 0x%x\n",
8691 bp->port.link_config);
8692 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8693 bp->port.advertising = bp->port.supported;
8694 break;
8697 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8698 PORT_FEATURE_FLOW_CONTROL_MASK);
8699 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8700 !(bp->port.supported & SUPPORTED_Autoneg))
8701 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8703 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8704 " advertising 0x%x\n",
8705 bp->link_params.req_line_speed,
8706 bp->link_params.req_duplex,
8707 bp->link_params.req_flow_ctrl, bp->port.advertising);
8710 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8712 mac_hi = cpu_to_be16(mac_hi);
8713 mac_lo = cpu_to_be32(mac_lo);
8714 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8715 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8718 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8720 int port = BP_PORT(bp);
8721 u32 val, val2;
8722 u32 config;
8723 u16 i;
8724 u32 ext_phy_type;
8726 bp->link_params.bp = bp;
8727 bp->link_params.port = port;
8729 bp->link_params.lane_config =
8730 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8731 bp->link_params.ext_phy_config =
8732 SHMEM_RD(bp,
8733 dev_info.port_hw_config[port].external_phy_config);
8734 /* BCM8727_NOC => BCM8727 no over current */
8735 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8736 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8737 bp->link_params.ext_phy_config &=
8738 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8739 bp->link_params.ext_phy_config |=
8740 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8741 bp->link_params.feature_config_flags |=
8742 FEATURE_CONFIG_BCM8727_NOC;
8745 bp->link_params.speed_cap_mask =
8746 SHMEM_RD(bp,
8747 dev_info.port_hw_config[port].speed_capability_mask);
8749 bp->port.link_config =
8750 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8752 /* Get the 4 lanes xgxs config rx and tx */
8753 for (i = 0; i < 2; i++) {
8754 val = SHMEM_RD(bp,
8755 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8756 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8757 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8759 val = SHMEM_RD(bp,
8760 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8761 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8762 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8765 /* If the device is capable of WoL, set the default state according
8766 * to the HW
8768 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8769 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8770 (config & PORT_FEATURE_WOL_ENABLED));
8772 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8773 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8774 bp->link_params.lane_config,
8775 bp->link_params.ext_phy_config,
8776 bp->link_params.speed_cap_mask, bp->port.link_config);
8778 bp->link_params.switch_cfg |= (bp->port.link_config &
8779 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8780 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8782 bnx2x_link_settings_requested(bp);
8785 * If connected directly, work with the internal PHY, otherwise, work
8786 * with the external PHY
8788 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8789 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8790 bp->mdio.prtad = bp->link_params.phy_addr;
8792 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8793 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8794 bp->mdio.prtad =
8795 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
8797 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8798 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8799 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8800 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8801 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8803 #ifdef BCM_CNIC
8804 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8805 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8806 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8807 #endif
8810 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8812 int func = BP_FUNC(bp);
8813 u32 val, val2;
8814 int rc = 0;
8816 bnx2x_get_common_hwinfo(bp);
8818 bp->e1hov = 0;
8819 bp->e1hmf = 0;
8820 if (CHIP_IS_E1H(bp)) {
8821 bp->mf_config =
8822 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8824 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8825 FUNC_MF_CFG_E1HOV_TAG_MASK);
8826 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8827 bp->e1hmf = 1;
8828 BNX2X_DEV_INFO("%s function mode\n",
8829 IS_E1HMF(bp) ? "multi" : "single");
8831 if (IS_E1HMF(bp)) {
8832 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8833 e1hov_tag) &
8834 FUNC_MF_CFG_E1HOV_TAG_MASK);
8835 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8836 bp->e1hov = val;
8837 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8838 "(0x%04x)\n",
8839 func, bp->e1hov, bp->e1hov);
8840 } else {
8841 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8842 " aborting\n", func);
8843 rc = -EPERM;
8845 } else {
8846 if (BP_E1HVN(bp)) {
8847 BNX2X_ERR("!!! VN %d in single function mode,"
8848 " aborting\n", BP_E1HVN(bp));
8849 rc = -EPERM;
8854 if (!BP_NOMCP(bp)) {
8855 bnx2x_get_port_hwinfo(bp);
8857 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8858 DRV_MSG_SEQ_NUMBER_MASK);
8859 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8862 if (IS_E1HMF(bp)) {
8863 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8864 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8865 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8866 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8867 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8868 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8869 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8870 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8871 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8872 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8873 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8874 ETH_ALEN);
8875 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8876 ETH_ALEN);
8879 return rc;
8882 if (BP_NOMCP(bp)) {
8883 /* only supposed to happen on emulation/FPGA */
8884 BNX2X_ERR("warning random MAC workaround active\n");
8885 random_ether_addr(bp->dev->dev_addr);
8886 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8889 return rc;
8892 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8894 int func = BP_FUNC(bp);
8895 int timer_interval;
8896 int rc;
8898 /* Disable interrupt handling until HW is initialized */
8899 atomic_set(&bp->intr_sem, 1);
8900 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8902 mutex_init(&bp->port.phy_mutex);
8903 mutex_init(&bp->fw_mb_mutex);
8904 #ifdef BCM_CNIC
8905 mutex_init(&bp->cnic_mutex);
8906 #endif
8908 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8909 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8911 rc = bnx2x_get_hwinfo(bp);
8913 /* need to reset chip if undi was active */
8914 if (!BP_NOMCP(bp))
8915 bnx2x_undi_unload(bp);
8917 if (CHIP_REV_IS_FPGA(bp))
8918 printk(KERN_ERR PFX "FPGA detected\n");
8920 if (BP_NOMCP(bp) && (func == 0))
8921 printk(KERN_ERR PFX
8922 "MCP disabled, must load devices in order!\n");
8924 /* Set multi queue mode */
8925 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8926 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8927 printk(KERN_ERR PFX
8928 "Multi disabled since int_mode requested is not MSI-X\n");
8929 multi_mode = ETH_RSS_MODE_DISABLED;
8931 bp->multi_mode = multi_mode;
8934 /* Set TPA flags */
8935 if (disable_tpa) {
8936 bp->flags &= ~TPA_ENABLE_FLAG;
8937 bp->dev->features &= ~NETIF_F_LRO;
8938 } else {
8939 bp->flags |= TPA_ENABLE_FLAG;
8940 bp->dev->features |= NETIF_F_LRO;
8943 if (CHIP_IS_E1(bp))
8944 bp->dropless_fc = 0;
8945 else
8946 bp->dropless_fc = dropless_fc;
8948 bp->mrrs = mrrs;
8950 bp->tx_ring_size = MAX_TX_AVAIL;
8951 bp->rx_ring_size = MAX_RX_AVAIL;
8953 bp->rx_csum = 1;
8955 /* make sure that the numbers are in the right granularity */
8956 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8957 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8959 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8960 bp->current_interval = (poll ? poll : timer_interval);
8962 init_timer(&bp->timer);
8963 bp->timer.expires = jiffies + bp->current_interval;
8964 bp->timer.data = (unsigned long) bp;
8965 bp->timer.function = bnx2x_timer;
8967 return rc;
8971 * ethtool service functions
8974 /* All ethtool functions called with rtnl_lock */
8976 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8978 struct bnx2x *bp = netdev_priv(dev);
8980 cmd->supported = bp->port.supported;
8981 cmd->advertising = bp->port.advertising;
8983 if ((bp->state == BNX2X_STATE_OPEN) &&
8984 !(bp->flags & MF_FUNC_DIS) &&
8985 (bp->link_vars.link_up)) {
8986 cmd->speed = bp->link_vars.line_speed;
8987 cmd->duplex = bp->link_vars.duplex;
8988 if (IS_E1HMF(bp)) {
8989 u16 vn_max_rate;
8991 vn_max_rate =
8992 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8993 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8994 if (vn_max_rate < cmd->speed)
8995 cmd->speed = vn_max_rate;
8997 } else {
8998 cmd->speed = -1;
8999 cmd->duplex = -1;
9002 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9003 u32 ext_phy_type =
9004 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9006 switch (ext_phy_type) {
9007 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9008 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9009 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9010 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9011 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9012 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9013 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9014 cmd->port = PORT_FIBRE;
9015 break;
9017 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9018 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9019 cmd->port = PORT_TP;
9020 break;
9022 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9023 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9024 bp->link_params.ext_phy_config);
9025 break;
9027 default:
9028 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
9029 bp->link_params.ext_phy_config);
9030 break;
9032 } else
9033 cmd->port = PORT_TP;
9035 cmd->phy_address = bp->mdio.prtad;
9036 cmd->transceiver = XCVR_INTERNAL;
9038 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9039 cmd->autoneg = AUTONEG_ENABLE;
9040 else
9041 cmd->autoneg = AUTONEG_DISABLE;
9043 cmd->maxtxpkt = 0;
9044 cmd->maxrxpkt = 0;
9046 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9047 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9048 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9049 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9050 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9051 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9052 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9054 return 0;
9057 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9059 struct bnx2x *bp = netdev_priv(dev);
9060 u32 advertising;
9062 if (IS_E1HMF(bp))
9063 return 0;
9065 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9066 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9067 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9068 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9069 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9070 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9071 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9073 if (cmd->autoneg == AUTONEG_ENABLE) {
9074 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9075 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
9076 return -EINVAL;
9079 /* advertise the requested speed and duplex if supported */
9080 cmd->advertising &= bp->port.supported;
9082 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9083 bp->link_params.req_duplex = DUPLEX_FULL;
9084 bp->port.advertising |= (ADVERTISED_Autoneg |
9085 cmd->advertising);
9087 } else { /* forced speed */
9088 /* advertise the requested speed and duplex if supported */
9089 switch (cmd->speed) {
9090 case SPEED_10:
9091 if (cmd->duplex == DUPLEX_FULL) {
9092 if (!(bp->port.supported &
9093 SUPPORTED_10baseT_Full)) {
9094 DP(NETIF_MSG_LINK,
9095 "10M full not supported\n");
9096 return -EINVAL;
9099 advertising = (ADVERTISED_10baseT_Full |
9100 ADVERTISED_TP);
9101 } else {
9102 if (!(bp->port.supported &
9103 SUPPORTED_10baseT_Half)) {
9104 DP(NETIF_MSG_LINK,
9105 "10M half not supported\n");
9106 return -EINVAL;
9109 advertising = (ADVERTISED_10baseT_Half |
9110 ADVERTISED_TP);
9112 break;
9114 case SPEED_100:
9115 if (cmd->duplex == DUPLEX_FULL) {
9116 if (!(bp->port.supported &
9117 SUPPORTED_100baseT_Full)) {
9118 DP(NETIF_MSG_LINK,
9119 "100M full not supported\n");
9120 return -EINVAL;
9123 advertising = (ADVERTISED_100baseT_Full |
9124 ADVERTISED_TP);
9125 } else {
9126 if (!(bp->port.supported &
9127 SUPPORTED_100baseT_Half)) {
9128 DP(NETIF_MSG_LINK,
9129 "100M half not supported\n");
9130 return -EINVAL;
9133 advertising = (ADVERTISED_100baseT_Half |
9134 ADVERTISED_TP);
9136 break;
9138 case SPEED_1000:
9139 if (cmd->duplex != DUPLEX_FULL) {
9140 DP(NETIF_MSG_LINK, "1G half not supported\n");
9141 return -EINVAL;
9144 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
9145 DP(NETIF_MSG_LINK, "1G full not supported\n");
9146 return -EINVAL;
9149 advertising = (ADVERTISED_1000baseT_Full |
9150 ADVERTISED_TP);
9151 break;
9153 case SPEED_2500:
9154 if (cmd->duplex != DUPLEX_FULL) {
9155 DP(NETIF_MSG_LINK,
9156 "2.5G half not supported\n");
9157 return -EINVAL;
9160 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
9161 DP(NETIF_MSG_LINK,
9162 "2.5G full not supported\n");
9163 return -EINVAL;
9166 advertising = (ADVERTISED_2500baseX_Full |
9167 ADVERTISED_TP);
9168 break;
9170 case SPEED_10000:
9171 if (cmd->duplex != DUPLEX_FULL) {
9172 DP(NETIF_MSG_LINK, "10G half not supported\n");
9173 return -EINVAL;
9176 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
9177 DP(NETIF_MSG_LINK, "10G full not supported\n");
9178 return -EINVAL;
9181 advertising = (ADVERTISED_10000baseT_Full |
9182 ADVERTISED_FIBRE);
9183 break;
9185 default:
9186 DP(NETIF_MSG_LINK, "Unsupported speed\n");
9187 return -EINVAL;
9190 bp->link_params.req_line_speed = cmd->speed;
9191 bp->link_params.req_duplex = cmd->duplex;
9192 bp->port.advertising = advertising;
9195 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
9196 DP_LEVEL " req_duplex %d advertising 0x%x\n",
9197 bp->link_params.req_line_speed, bp->link_params.req_duplex,
9198 bp->port.advertising);
9200 if (netif_running(dev)) {
9201 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9202 bnx2x_link_set(bp);
9205 return 0;
9208 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9209 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9211 static int bnx2x_get_regs_len(struct net_device *dev)
9213 struct bnx2x *bp = netdev_priv(dev);
9214 int regdump_len = 0;
9215 int i;
9217 if (CHIP_IS_E1(bp)) {
9218 for (i = 0; i < REGS_COUNT; i++)
9219 if (IS_E1_ONLINE(reg_addrs[i].info))
9220 regdump_len += reg_addrs[i].size;
9222 for (i = 0; i < WREGS_COUNT_E1; i++)
9223 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9224 regdump_len += wreg_addrs_e1[i].size *
9225 (1 + wreg_addrs_e1[i].read_regs_count);
9227 } else { /* E1H */
9228 for (i = 0; i < REGS_COUNT; i++)
9229 if (IS_E1H_ONLINE(reg_addrs[i].info))
9230 regdump_len += reg_addrs[i].size;
9232 for (i = 0; i < WREGS_COUNT_E1H; i++)
9233 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9234 regdump_len += wreg_addrs_e1h[i].size *
9235 (1 + wreg_addrs_e1h[i].read_regs_count);
9237 regdump_len *= 4;
9238 regdump_len += sizeof(struct dump_hdr);
9240 return regdump_len;
9243 static void bnx2x_get_regs(struct net_device *dev,
9244 struct ethtool_regs *regs, void *_p)
9246 u32 *p = _p, i, j;
9247 struct bnx2x *bp = netdev_priv(dev);
9248 struct dump_hdr dump_hdr = {0};
9250 regs->version = 0;
9251 memset(p, 0, regs->len);
9253 if (!netif_running(bp->dev))
9254 return;
9256 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9257 dump_hdr.dump_sign = dump_sign_all;
9258 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9259 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9260 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9261 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9262 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9264 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9265 p += dump_hdr.hdr_size + 1;
9267 if (CHIP_IS_E1(bp)) {
9268 for (i = 0; i < REGS_COUNT; i++)
9269 if (IS_E1_ONLINE(reg_addrs[i].info))
9270 for (j = 0; j < reg_addrs[i].size; j++)
9271 *p++ = REG_RD(bp,
9272 reg_addrs[i].addr + j*4);
9274 } else { /* E1H */
9275 for (i = 0; i < REGS_COUNT; i++)
9276 if (IS_E1H_ONLINE(reg_addrs[i].info))
9277 for (j = 0; j < reg_addrs[i].size; j++)
9278 *p++ = REG_RD(bp,
9279 reg_addrs[i].addr + j*4);
9283 #define PHY_FW_VER_LEN 10
9285 static void bnx2x_get_drvinfo(struct net_device *dev,
9286 struct ethtool_drvinfo *info)
9288 struct bnx2x *bp = netdev_priv(dev);
9289 u8 phy_fw_ver[PHY_FW_VER_LEN];
9291 strcpy(info->driver, DRV_MODULE_NAME);
9292 strcpy(info->version, DRV_MODULE_VERSION);
9294 phy_fw_ver[0] = '\0';
9295 if (bp->port.pmf) {
9296 bnx2x_acquire_phy_lock(bp);
9297 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9298 (bp->state != BNX2X_STATE_CLOSED),
9299 phy_fw_ver, PHY_FW_VER_LEN);
9300 bnx2x_release_phy_lock(bp);
9303 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9304 (bp->common.bc_ver & 0xff0000) >> 16,
9305 (bp->common.bc_ver & 0xff00) >> 8,
9306 (bp->common.bc_ver & 0xff),
9307 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9308 strcpy(info->bus_info, pci_name(bp->pdev));
9309 info->n_stats = BNX2X_NUM_STATS;
9310 info->testinfo_len = BNX2X_NUM_TESTS;
9311 info->eedump_len = bp->common.flash_size;
9312 info->regdump_len = bnx2x_get_regs_len(dev);
9315 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9317 struct bnx2x *bp = netdev_priv(dev);
9319 if (bp->flags & NO_WOL_FLAG) {
9320 wol->supported = 0;
9321 wol->wolopts = 0;
9322 } else {
9323 wol->supported = WAKE_MAGIC;
9324 if (bp->wol)
9325 wol->wolopts = WAKE_MAGIC;
9326 else
9327 wol->wolopts = 0;
9329 memset(&wol->sopass, 0, sizeof(wol->sopass));
9332 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9334 struct bnx2x *bp = netdev_priv(dev);
9336 if (wol->wolopts & ~WAKE_MAGIC)
9337 return -EINVAL;
9339 if (wol->wolopts & WAKE_MAGIC) {
9340 if (bp->flags & NO_WOL_FLAG)
9341 return -EINVAL;
9343 bp->wol = 1;
9344 } else
9345 bp->wol = 0;
9347 return 0;
9350 static u32 bnx2x_get_msglevel(struct net_device *dev)
9352 struct bnx2x *bp = netdev_priv(dev);
9354 return bp->msglevel;
9357 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9359 struct bnx2x *bp = netdev_priv(dev);
9361 if (capable(CAP_NET_ADMIN))
9362 bp->msglevel = level;
9365 static int bnx2x_nway_reset(struct net_device *dev)
9367 struct bnx2x *bp = netdev_priv(dev);
9369 if (!bp->port.pmf)
9370 return 0;
9372 if (netif_running(dev)) {
9373 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9374 bnx2x_link_set(bp);
9377 return 0;
9380 static u32 bnx2x_get_link(struct net_device *dev)
9382 struct bnx2x *bp = netdev_priv(dev);
9384 if (bp->flags & MF_FUNC_DIS)
9385 return 0;
9387 return bp->link_vars.link_up;
9390 static int bnx2x_get_eeprom_len(struct net_device *dev)
9392 struct bnx2x *bp = netdev_priv(dev);
9394 return bp->common.flash_size;
9397 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9399 int port = BP_PORT(bp);
9400 int count, i;
9401 u32 val = 0;
9403 /* adjust timeout for emulation/FPGA */
9404 count = NVRAM_TIMEOUT_COUNT;
9405 if (CHIP_REV_IS_SLOW(bp))
9406 count *= 100;
9408 /* request access to nvram interface */
9409 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9410 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9412 for (i = 0; i < count*10; i++) {
9413 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9414 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9415 break;
9417 udelay(5);
9420 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9421 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9422 return -EBUSY;
9425 return 0;
9428 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9430 int port = BP_PORT(bp);
9431 int count, i;
9432 u32 val = 0;
9434 /* adjust timeout for emulation/FPGA */
9435 count = NVRAM_TIMEOUT_COUNT;
9436 if (CHIP_REV_IS_SLOW(bp))
9437 count *= 100;
9439 /* relinquish nvram interface */
9440 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9441 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9443 for (i = 0; i < count*10; i++) {
9444 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9445 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9446 break;
9448 udelay(5);
9451 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9452 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9453 return -EBUSY;
9456 return 0;
9459 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9461 u32 val;
9463 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9465 /* enable both bits, even on read */
9466 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9467 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9468 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9471 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9473 u32 val;
9475 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9477 /* disable both bits, even after read */
9478 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9479 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9480 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9483 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9484 u32 cmd_flags)
9486 int count, i, rc;
9487 u32 val;
9489 /* build the command word */
9490 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9492 /* need to clear DONE bit separately */
9493 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9495 /* address of the NVRAM to read from */
9496 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9497 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9499 /* issue a read command */
9500 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9502 /* adjust timeout for emulation/FPGA */
9503 count = NVRAM_TIMEOUT_COUNT;
9504 if (CHIP_REV_IS_SLOW(bp))
9505 count *= 100;
9507 /* wait for completion */
9508 *ret_val = 0;
9509 rc = -EBUSY;
9510 for (i = 0; i < count; i++) {
9511 udelay(5);
9512 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9514 if (val & MCPR_NVM_COMMAND_DONE) {
9515 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9516 /* we read nvram data in cpu order
9517 * but ethtool sees it as an array of bytes
9518 * converting to big-endian will do the work */
9519 *ret_val = cpu_to_be32(val);
9520 rc = 0;
9521 break;
9525 return rc;
9528 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9529 int buf_size)
9531 int rc;
9532 u32 cmd_flags;
9533 __be32 val;
9535 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9536 DP(BNX2X_MSG_NVM,
9537 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9538 offset, buf_size);
9539 return -EINVAL;
9542 if (offset + buf_size > bp->common.flash_size) {
9543 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9544 " buf_size (0x%x) > flash_size (0x%x)\n",
9545 offset, buf_size, bp->common.flash_size);
9546 return -EINVAL;
9549 /* request access to nvram interface */
9550 rc = bnx2x_acquire_nvram_lock(bp);
9551 if (rc)
9552 return rc;
9554 /* enable access to nvram interface */
9555 bnx2x_enable_nvram_access(bp);
9557 /* read the first word(s) */
9558 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9559 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9560 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9561 memcpy(ret_buf, &val, 4);
9563 /* advance to the next dword */
9564 offset += sizeof(u32);
9565 ret_buf += sizeof(u32);
9566 buf_size -= sizeof(u32);
9567 cmd_flags = 0;
9570 if (rc == 0) {
9571 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9572 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9573 memcpy(ret_buf, &val, 4);
9576 /* disable access to nvram interface */
9577 bnx2x_disable_nvram_access(bp);
9578 bnx2x_release_nvram_lock(bp);
9580 return rc;
9583 static int bnx2x_get_eeprom(struct net_device *dev,
9584 struct ethtool_eeprom *eeprom, u8 *eebuf)
9586 struct bnx2x *bp = netdev_priv(dev);
9587 int rc;
9589 if (!netif_running(dev))
9590 return -EAGAIN;
9592 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9593 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9594 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9595 eeprom->len, eeprom->len);
9597 /* parameters already validated in ethtool_get_eeprom */
9599 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9601 return rc;
9604 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9605 u32 cmd_flags)
9607 int count, i, rc;
9609 /* build the command word */
9610 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9612 /* need to clear DONE bit separately */
9613 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9615 /* write the data */
9616 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9618 /* address of the NVRAM to write to */
9619 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9620 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9622 /* issue the write command */
9623 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9625 /* adjust timeout for emulation/FPGA */
9626 count = NVRAM_TIMEOUT_COUNT;
9627 if (CHIP_REV_IS_SLOW(bp))
9628 count *= 100;
9630 /* wait for completion */
9631 rc = -EBUSY;
9632 for (i = 0; i < count; i++) {
9633 udelay(5);
9634 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9635 if (val & MCPR_NVM_COMMAND_DONE) {
9636 rc = 0;
9637 break;
9641 return rc;
9644 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
9646 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9647 int buf_size)
9649 int rc;
9650 u32 cmd_flags;
9651 u32 align_offset;
9652 __be32 val;
9654 if (offset + buf_size > bp->common.flash_size) {
9655 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9656 " buf_size (0x%x) > flash_size (0x%x)\n",
9657 offset, buf_size, bp->common.flash_size);
9658 return -EINVAL;
9661 /* request access to nvram interface */
9662 rc = bnx2x_acquire_nvram_lock(bp);
9663 if (rc)
9664 return rc;
9666 /* enable access to nvram interface */
9667 bnx2x_enable_nvram_access(bp);
9669 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9670 align_offset = (offset & ~0x03);
9671 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9673 if (rc == 0) {
9674 val &= ~(0xff << BYTE_OFFSET(offset));
9675 val |= (*data_buf << BYTE_OFFSET(offset));
9677 /* nvram data is returned as an array of bytes
9678 * convert it back to cpu order */
9679 val = be32_to_cpu(val);
9681 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9682 cmd_flags);
9685 /* disable access to nvram interface */
9686 bnx2x_disable_nvram_access(bp);
9687 bnx2x_release_nvram_lock(bp);
9689 return rc;
9692 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9693 int buf_size)
9695 int rc;
9696 u32 cmd_flags;
9697 u32 val;
9698 u32 written_so_far;
9700 if (buf_size == 1) /* ethtool */
9701 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9703 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9704 DP(BNX2X_MSG_NVM,
9705 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9706 offset, buf_size);
9707 return -EINVAL;
9710 if (offset + buf_size > bp->common.flash_size) {
9711 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9712 " buf_size (0x%x) > flash_size (0x%x)\n",
9713 offset, buf_size, bp->common.flash_size);
9714 return -EINVAL;
9717 /* request access to nvram interface */
9718 rc = bnx2x_acquire_nvram_lock(bp);
9719 if (rc)
9720 return rc;
9722 /* enable access to nvram interface */
9723 bnx2x_enable_nvram_access(bp);
9725 written_so_far = 0;
9726 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9727 while ((written_so_far < buf_size) && (rc == 0)) {
9728 if (written_so_far == (buf_size - sizeof(u32)))
9729 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9730 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9731 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9732 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9733 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9735 memcpy(&val, data_buf, 4);
9737 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9739 /* advance to the next dword */
9740 offset += sizeof(u32);
9741 data_buf += sizeof(u32);
9742 written_so_far += sizeof(u32);
9743 cmd_flags = 0;
9746 /* disable access to nvram interface */
9747 bnx2x_disable_nvram_access(bp);
9748 bnx2x_release_nvram_lock(bp);
9750 return rc;
9753 static int bnx2x_set_eeprom(struct net_device *dev,
9754 struct ethtool_eeprom *eeprom, u8 *eebuf)
9756 struct bnx2x *bp = netdev_priv(dev);
9757 int port = BP_PORT(bp);
9758 int rc = 0;
9760 if (!netif_running(dev))
9761 return -EAGAIN;
9763 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9764 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9765 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9766 eeprom->len, eeprom->len);
9768 /* parameters already validated in ethtool_set_eeprom */
9770 /* PHY eeprom can be accessed only by the PMF */
9771 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9772 !bp->port.pmf)
9773 return -EINVAL;
9775 if (eeprom->magic == 0x50485950) {
9776 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9777 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9779 bnx2x_acquire_phy_lock(bp);
9780 rc |= bnx2x_link_reset(&bp->link_params,
9781 &bp->link_vars, 0);
9782 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9783 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9784 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9785 MISC_REGISTERS_GPIO_HIGH, port);
9786 bnx2x_release_phy_lock(bp);
9787 bnx2x_link_report(bp);
9789 } else if (eeprom->magic == 0x50485952) {
9790 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9791 if (bp->state == BNX2X_STATE_OPEN) {
9792 bnx2x_acquire_phy_lock(bp);
9793 rc |= bnx2x_link_reset(&bp->link_params,
9794 &bp->link_vars, 1);
9796 rc |= bnx2x_phy_init(&bp->link_params,
9797 &bp->link_vars);
9798 bnx2x_release_phy_lock(bp);
9799 bnx2x_calc_fc_adv(bp);
9801 } else if (eeprom->magic == 0x53985943) {
9802 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9803 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9804 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9805 u8 ext_phy_addr =
9806 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9808 /* DSP Remove Download Mode */
9809 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9810 MISC_REGISTERS_GPIO_LOW, port);
9812 bnx2x_acquire_phy_lock(bp);
9814 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9816 /* wait 0.5 sec to allow it to run */
9817 msleep(500);
9818 bnx2x_ext_phy_hw_reset(bp, port);
9819 msleep(500);
9820 bnx2x_release_phy_lock(bp);
9822 } else
9823 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9825 return rc;
9828 static int bnx2x_get_coalesce(struct net_device *dev,
9829 struct ethtool_coalesce *coal)
9831 struct bnx2x *bp = netdev_priv(dev);
9833 memset(coal, 0, sizeof(struct ethtool_coalesce));
9835 coal->rx_coalesce_usecs = bp->rx_ticks;
9836 coal->tx_coalesce_usecs = bp->tx_ticks;
9838 return 0;
9841 #define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9842 static int bnx2x_set_coalesce(struct net_device *dev,
9843 struct ethtool_coalesce *coal)
9845 struct bnx2x *bp = netdev_priv(dev);
9847 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9848 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9849 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9851 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9852 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9853 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9855 if (netif_running(dev))
9856 bnx2x_update_coalesce(bp);
9858 return 0;
9861 static void bnx2x_get_ringparam(struct net_device *dev,
9862 struct ethtool_ringparam *ering)
9864 struct bnx2x *bp = netdev_priv(dev);
9866 ering->rx_max_pending = MAX_RX_AVAIL;
9867 ering->rx_mini_max_pending = 0;
9868 ering->rx_jumbo_max_pending = 0;
9870 ering->rx_pending = bp->rx_ring_size;
9871 ering->rx_mini_pending = 0;
9872 ering->rx_jumbo_pending = 0;
9874 ering->tx_max_pending = MAX_TX_AVAIL;
9875 ering->tx_pending = bp->tx_ring_size;
9878 static int bnx2x_set_ringparam(struct net_device *dev,
9879 struct ethtool_ringparam *ering)
9881 struct bnx2x *bp = netdev_priv(dev);
9882 int rc = 0;
9884 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9885 (ering->tx_pending > MAX_TX_AVAIL) ||
9886 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9887 return -EINVAL;
9889 bp->rx_ring_size = ering->rx_pending;
9890 bp->tx_ring_size = ering->tx_pending;
9892 if (netif_running(dev)) {
9893 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9894 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9897 return rc;
9900 static void bnx2x_get_pauseparam(struct net_device *dev,
9901 struct ethtool_pauseparam *epause)
9903 struct bnx2x *bp = netdev_priv(dev);
9905 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9906 BNX2X_FLOW_CTRL_AUTO) &&
9907 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9909 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9910 BNX2X_FLOW_CTRL_RX);
9911 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9912 BNX2X_FLOW_CTRL_TX);
9914 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9915 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9916 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9919 static int bnx2x_set_pauseparam(struct net_device *dev,
9920 struct ethtool_pauseparam *epause)
9922 struct bnx2x *bp = netdev_priv(dev);
9924 if (IS_E1HMF(bp))
9925 return 0;
9927 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9928 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9929 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9931 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9933 if (epause->rx_pause)
9934 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9936 if (epause->tx_pause)
9937 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9939 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9940 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9942 if (epause->autoneg) {
9943 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9944 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9945 return -EINVAL;
9948 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9949 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9952 DP(NETIF_MSG_LINK,
9953 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9955 if (netif_running(dev)) {
9956 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9957 bnx2x_link_set(bp);
9960 return 0;
9963 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9965 struct bnx2x *bp = netdev_priv(dev);
9966 int changed = 0;
9967 int rc = 0;
9969 /* TPA requires Rx CSUM offloading */
9970 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9971 if (!(dev->features & NETIF_F_LRO)) {
9972 dev->features |= NETIF_F_LRO;
9973 bp->flags |= TPA_ENABLE_FLAG;
9974 changed = 1;
9977 } else if (dev->features & NETIF_F_LRO) {
9978 dev->features &= ~NETIF_F_LRO;
9979 bp->flags &= ~TPA_ENABLE_FLAG;
9980 changed = 1;
9983 if (changed && netif_running(dev)) {
9984 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9985 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9988 return rc;
9991 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9993 struct bnx2x *bp = netdev_priv(dev);
9995 return bp->rx_csum;
9998 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10000 struct bnx2x *bp = netdev_priv(dev);
10001 int rc = 0;
10003 bp->rx_csum = data;
10005 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10006 TPA'ed packets will be discarded due to wrong TCP CSUM */
10007 if (!data) {
10008 u32 flags = ethtool_op_get_flags(dev);
10010 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10013 return rc;
10016 static int bnx2x_set_tso(struct net_device *dev, u32 data)
10018 if (data) {
10019 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10020 dev->features |= NETIF_F_TSO6;
10021 } else {
10022 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
10023 dev->features &= ~NETIF_F_TSO6;
10026 return 0;
10029 static const struct {
10030 char string[ETH_GSTRING_LEN];
10031 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
10032 { "register_test (offline)" },
10033 { "memory_test (offline)" },
10034 { "loopback_test (offline)" },
10035 { "nvram_test (online)" },
10036 { "interrupt_test (online)" },
10037 { "link_test (online)" },
10038 { "idle check (online)" }
10041 static int bnx2x_test_registers(struct bnx2x *bp)
10043 int idx, i, rc = -ENODEV;
10044 u32 wr_val = 0;
10045 int port = BP_PORT(bp);
10046 static const struct {
10047 u32 offset0;
10048 u32 offset1;
10049 u32 mask;
10050 } reg_tbl[] = {
10051 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10052 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10053 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10054 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10055 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10056 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10057 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10058 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10059 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10060 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10061 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10062 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10063 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10064 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10065 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10066 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10067 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10068 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
10069 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
10070 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10071 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
10072 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10073 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10074 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10075 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10076 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10077 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10078 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10079 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
10080 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10081 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
10082 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10083 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10084 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10085 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10086 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10087 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10089 { 0xffffffff, 0, 0x00000000 }
10092 if (!netif_running(bp->dev))
10093 return rc;
10095 /* Repeat the test twice:
10096 First by writing 0x00000000, second by writing 0xffffffff */
10097 for (idx = 0; idx < 2; idx++) {
10099 switch (idx) {
10100 case 0:
10101 wr_val = 0;
10102 break;
10103 case 1:
10104 wr_val = 0xffffffff;
10105 break;
10108 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10109 u32 offset, mask, save_val, val;
10111 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10112 mask = reg_tbl[i].mask;
10114 save_val = REG_RD(bp, offset);
10116 REG_WR(bp, offset, wr_val);
10117 val = REG_RD(bp, offset);
10119 /* Restore the original register's value */
10120 REG_WR(bp, offset, save_val);
10122 /* verify that value is as expected value */
10123 if ((val & mask) != (wr_val & mask))
10124 goto test_reg_exit;
10128 rc = 0;
10130 test_reg_exit:
10131 return rc;
10134 static int bnx2x_test_memory(struct bnx2x *bp)
10136 int i, j, rc = -ENODEV;
10137 u32 val;
10138 static const struct {
10139 u32 offset;
10140 int size;
10141 } mem_tbl[] = {
10142 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10143 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10144 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10145 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10146 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10147 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10148 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10150 { 0xffffffff, 0 }
10152 static const struct {
10153 char *name;
10154 u32 offset;
10155 u32 e1_mask;
10156 u32 e1h_mask;
10157 } prty_tbl[] = {
10158 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10159 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10160 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10161 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10162 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10163 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
10165 { NULL, 0xffffffff, 0, 0 }
10168 if (!netif_running(bp->dev))
10169 return rc;
10171 /* Go through all the memories */
10172 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10173 for (j = 0; j < mem_tbl[i].size; j++)
10174 REG_RD(bp, mem_tbl[i].offset + j*4);
10176 /* Check the parity status */
10177 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10178 val = REG_RD(bp, prty_tbl[i].offset);
10179 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10180 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
10181 DP(NETIF_MSG_HW,
10182 "%s is 0x%x\n", prty_tbl[i].name, val);
10183 goto test_mem_exit;
10187 rc = 0;
10189 test_mem_exit:
10190 return rc;
10193 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10195 int cnt = 1000;
10197 if (link_up)
10198 while (bnx2x_link_test(bp) && cnt--)
10199 msleep(10);
10202 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10204 unsigned int pkt_size, num_pkts, i;
10205 struct sk_buff *skb;
10206 unsigned char *packet;
10207 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10208 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
10209 u16 tx_start_idx, tx_idx;
10210 u16 rx_start_idx, rx_idx;
10211 u16 pkt_prod, bd_prod;
10212 struct sw_tx_bd *tx_buf;
10213 struct eth_tx_start_bd *tx_start_bd;
10214 struct eth_tx_parse_bd *pbd = NULL;
10215 dma_addr_t mapping;
10216 union eth_rx_cqe *cqe;
10217 u8 cqe_fp_flags;
10218 struct sw_rx_bd *rx_buf;
10219 u16 len;
10220 int rc = -ENODEV;
10222 /* check the loopback mode */
10223 switch (loopback_mode) {
10224 case BNX2X_PHY_LOOPBACK:
10225 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10226 return -EINVAL;
10227 break;
10228 case BNX2X_MAC_LOOPBACK:
10229 bp->link_params.loopback_mode = LOOPBACK_BMAC;
10230 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
10231 break;
10232 default:
10233 return -EINVAL;
10236 /* prepare the loopback packet */
10237 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10238 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
10239 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10240 if (!skb) {
10241 rc = -ENOMEM;
10242 goto test_loopback_exit;
10244 packet = skb_put(skb, pkt_size);
10245 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
10246 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10247 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
10248 for (i = ETH_HLEN; i < pkt_size; i++)
10249 packet[i] = (unsigned char) (i & 0xff);
10251 /* send the loopback packet */
10252 num_pkts = 0;
10253 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10254 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10256 pkt_prod = fp_tx->tx_pkt_prod++;
10257 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10258 tx_buf->first_bd = fp_tx->tx_bd_prod;
10259 tx_buf->skb = skb;
10260 tx_buf->flags = 0;
10262 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10263 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10264 mapping = pci_map_single(bp->pdev, skb->data,
10265 skb_headlen(skb), PCI_DMA_TODEVICE);
10266 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10267 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10268 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10269 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10270 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10271 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10272 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10273 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10275 /* turn on parsing and get a BD */
10276 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10277 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10279 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10281 wmb();
10283 fp_tx->tx_db.data.prod += 2;
10284 barrier();
10285 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
10287 mmiowb();
10289 num_pkts++;
10290 fp_tx->tx_bd_prod += 2; /* start + pbd */
10292 udelay(100);
10294 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10295 if (tx_idx != tx_start_idx + num_pkts)
10296 goto test_loopback_exit;
10298 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10299 if (rx_idx != rx_start_idx + num_pkts)
10300 goto test_loopback_exit;
10302 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10303 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10304 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10305 goto test_loopback_rx_exit;
10307 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10308 if (len != pkt_size)
10309 goto test_loopback_rx_exit;
10311 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10312 skb = rx_buf->skb;
10313 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10314 for (i = ETH_HLEN; i < pkt_size; i++)
10315 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10316 goto test_loopback_rx_exit;
10318 rc = 0;
10320 test_loopback_rx_exit:
10322 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10323 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10324 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10325 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10327 /* Update producers */
10328 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10329 fp_rx->rx_sge_prod);
10331 test_loopback_exit:
10332 bp->link_params.loopback_mode = LOOPBACK_NONE;
10334 return rc;
10337 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10339 int rc = 0, res;
10341 if (!netif_running(bp->dev))
10342 return BNX2X_LOOPBACK_FAILED;
10344 bnx2x_netif_stop(bp, 1);
10345 bnx2x_acquire_phy_lock(bp);
10347 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10348 if (res) {
10349 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10350 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10353 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10354 if (res) {
10355 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10356 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10359 bnx2x_release_phy_lock(bp);
10360 bnx2x_netif_start(bp);
10362 return rc;
10365 #define CRC32_RESIDUAL 0xdebb20e3
10367 static int bnx2x_test_nvram(struct bnx2x *bp)
10369 static const struct {
10370 int offset;
10371 int size;
10372 } nvram_tbl[] = {
10373 { 0, 0x14 }, /* bootstrap */
10374 { 0x14, 0xec }, /* dir */
10375 { 0x100, 0x350 }, /* manuf_info */
10376 { 0x450, 0xf0 }, /* feature_info */
10377 { 0x640, 0x64 }, /* upgrade_key_info */
10378 { 0x6a4, 0x64 },
10379 { 0x708, 0x70 }, /* manuf_key_info */
10380 { 0x778, 0x70 },
10381 { 0, 0 }
10383 __be32 buf[0x350 / 4];
10384 u8 *data = (u8 *)buf;
10385 int i, rc;
10386 u32 magic, crc;
10388 rc = bnx2x_nvram_read(bp, 0, data, 4);
10389 if (rc) {
10390 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10391 goto test_nvram_exit;
10394 magic = be32_to_cpu(buf[0]);
10395 if (magic != 0x669955aa) {
10396 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10397 rc = -ENODEV;
10398 goto test_nvram_exit;
10401 for (i = 0; nvram_tbl[i].size; i++) {
10403 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10404 nvram_tbl[i].size);
10405 if (rc) {
10406 DP(NETIF_MSG_PROBE,
10407 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10408 goto test_nvram_exit;
10411 crc = ether_crc_le(nvram_tbl[i].size, data);
10412 if (crc != CRC32_RESIDUAL) {
10413 DP(NETIF_MSG_PROBE,
10414 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
10415 rc = -ENODEV;
10416 goto test_nvram_exit;
10420 test_nvram_exit:
10421 return rc;
10424 static int bnx2x_test_intr(struct bnx2x *bp)
10426 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10427 int i, rc;
10429 if (!netif_running(bp->dev))
10430 return -ENODEV;
10432 config->hdr.length = 0;
10433 if (CHIP_IS_E1(bp))
10434 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10435 else
10436 config->hdr.offset = BP_FUNC(bp);
10437 config->hdr.client_id = bp->fp->cl_id;
10438 config->hdr.reserved1 = 0;
10440 bp->set_mac_pending++;
10441 smp_wmb();
10442 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10443 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10444 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10445 if (rc == 0) {
10446 for (i = 0; i < 10; i++) {
10447 if (!bp->set_mac_pending)
10448 break;
10449 smp_rmb();
10450 msleep_interruptible(10);
10452 if (i == 10)
10453 rc = -ENODEV;
10456 return rc;
10459 static void bnx2x_self_test(struct net_device *dev,
10460 struct ethtool_test *etest, u64 *buf)
10462 struct bnx2x *bp = netdev_priv(dev);
10464 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10466 if (!netif_running(dev))
10467 return;
10469 /* offline tests are not supported in MF mode */
10470 if (IS_E1HMF(bp))
10471 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10473 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10474 int port = BP_PORT(bp);
10475 u32 val;
10476 u8 link_up;
10478 /* save current value of input enable for TX port IF */
10479 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10480 /* disable input for TX port IF */
10481 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10483 link_up = (bnx2x_link_test(bp) == 0);
10484 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10485 bnx2x_nic_load(bp, LOAD_DIAG);
10486 /* wait until link state is restored */
10487 bnx2x_wait_for_link(bp, link_up);
10489 if (bnx2x_test_registers(bp) != 0) {
10490 buf[0] = 1;
10491 etest->flags |= ETH_TEST_FL_FAILED;
10493 if (bnx2x_test_memory(bp) != 0) {
10494 buf[1] = 1;
10495 etest->flags |= ETH_TEST_FL_FAILED;
10497 buf[2] = bnx2x_test_loopback(bp, link_up);
10498 if (buf[2] != 0)
10499 etest->flags |= ETH_TEST_FL_FAILED;
10501 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10503 /* restore input for TX port IF */
10504 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10506 bnx2x_nic_load(bp, LOAD_NORMAL);
10507 /* wait until link state is restored */
10508 bnx2x_wait_for_link(bp, link_up);
10510 if (bnx2x_test_nvram(bp) != 0) {
10511 buf[3] = 1;
10512 etest->flags |= ETH_TEST_FL_FAILED;
10514 if (bnx2x_test_intr(bp) != 0) {
10515 buf[4] = 1;
10516 etest->flags |= ETH_TEST_FL_FAILED;
10518 if (bp->port.pmf)
10519 if (bnx2x_link_test(bp) != 0) {
10520 buf[5] = 1;
10521 etest->flags |= ETH_TEST_FL_FAILED;
10524 #ifdef BNX2X_EXTRA_DEBUG
10525 bnx2x_panic_dump(bp);
10526 #endif
10529 static const struct {
10530 long offset;
10531 int size;
10532 u8 string[ETH_GSTRING_LEN];
10533 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10534 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10535 { Q_STATS_OFFSET32(error_bytes_received_hi),
10536 8, "[%d]: rx_error_bytes" },
10537 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10538 8, "[%d]: rx_ucast_packets" },
10539 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10540 8, "[%d]: rx_mcast_packets" },
10541 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10542 8, "[%d]: rx_bcast_packets" },
10543 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10544 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10545 4, "[%d]: rx_phy_ip_err_discards"},
10546 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10547 4, "[%d]: rx_skb_alloc_discard" },
10548 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10550 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10551 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10552 8, "[%d]: tx_packets" }
10555 static const struct {
10556 long offset;
10557 int size;
10558 u32 flags;
10559 #define STATS_FLAGS_PORT 1
10560 #define STATS_FLAGS_FUNC 2
10561 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10562 u8 string[ETH_GSTRING_LEN];
10563 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10564 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10565 8, STATS_FLAGS_BOTH, "rx_bytes" },
10566 { STATS_OFFSET32(error_bytes_received_hi),
10567 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10568 { STATS_OFFSET32(total_unicast_packets_received_hi),
10569 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10570 { STATS_OFFSET32(total_multicast_packets_received_hi),
10571 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10572 { STATS_OFFSET32(total_broadcast_packets_received_hi),
10573 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10574 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10575 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10576 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10577 8, STATS_FLAGS_PORT, "rx_align_errors" },
10578 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10579 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10580 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10581 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10582 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10583 8, STATS_FLAGS_PORT, "rx_fragments" },
10584 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10585 8, STATS_FLAGS_PORT, "rx_jabbers" },
10586 { STATS_OFFSET32(no_buff_discard_hi),
10587 8, STATS_FLAGS_BOTH, "rx_discards" },
10588 { STATS_OFFSET32(mac_filter_discard),
10589 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10590 { STATS_OFFSET32(xxoverflow_discard),
10591 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10592 { STATS_OFFSET32(brb_drop_hi),
10593 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10594 { STATS_OFFSET32(brb_truncate_hi),
10595 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10596 { STATS_OFFSET32(pause_frames_received_hi),
10597 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10598 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10599 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10600 { STATS_OFFSET32(nig_timer_max),
10601 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10602 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10603 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10604 { STATS_OFFSET32(rx_skb_alloc_failed),
10605 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10606 { STATS_OFFSET32(hw_csum_err),
10607 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10609 { STATS_OFFSET32(total_bytes_transmitted_hi),
10610 8, STATS_FLAGS_BOTH, "tx_bytes" },
10611 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10612 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10613 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10614 8, STATS_FLAGS_BOTH, "tx_packets" },
10615 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10616 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10617 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10618 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10619 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10620 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10621 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10622 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10623 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10624 8, STATS_FLAGS_PORT, "tx_deferred" },
10625 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10626 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10627 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10628 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10629 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10630 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10631 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10632 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10633 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10634 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10635 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10636 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10637 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10638 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10639 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10640 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10641 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10642 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10643 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10644 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10645 { STATS_OFFSET32(pause_frames_sent_hi),
10646 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10649 #define IS_PORT_STAT(i) \
10650 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10651 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10652 #define IS_E1HMF_MODE_STAT(bp) \
10653 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10655 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10657 struct bnx2x *bp = netdev_priv(dev);
10658 int i, num_stats;
10660 switch(stringset) {
10661 case ETH_SS_STATS:
10662 if (is_multi(bp)) {
10663 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
10664 if (!IS_E1HMF_MODE_STAT(bp))
10665 num_stats += BNX2X_NUM_STATS;
10666 } else {
10667 if (IS_E1HMF_MODE_STAT(bp)) {
10668 num_stats = 0;
10669 for (i = 0; i < BNX2X_NUM_STATS; i++)
10670 if (IS_FUNC_STAT(i))
10671 num_stats++;
10672 } else
10673 num_stats = BNX2X_NUM_STATS;
10675 return num_stats;
10677 case ETH_SS_TEST:
10678 return BNX2X_NUM_TESTS;
10680 default:
10681 return -EINVAL;
10685 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10687 struct bnx2x *bp = netdev_priv(dev);
10688 int i, j, k;
10690 switch (stringset) {
10691 case ETH_SS_STATS:
10692 if (is_multi(bp)) {
10693 k = 0;
10694 for_each_queue(bp, i) {
10695 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10696 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10697 bnx2x_q_stats_arr[j].string, i);
10698 k += BNX2X_NUM_Q_STATS;
10700 if (IS_E1HMF_MODE_STAT(bp))
10701 break;
10702 for (j = 0; j < BNX2X_NUM_STATS; j++)
10703 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10704 bnx2x_stats_arr[j].string);
10705 } else {
10706 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10707 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10708 continue;
10709 strcpy(buf + j*ETH_GSTRING_LEN,
10710 bnx2x_stats_arr[i].string);
10711 j++;
10714 break;
10716 case ETH_SS_TEST:
10717 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10718 break;
10722 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10723 struct ethtool_stats *stats, u64 *buf)
10725 struct bnx2x *bp = netdev_priv(dev);
10726 u32 *hw_stats, *offset;
10727 int i, j, k;
10729 if (is_multi(bp)) {
10730 k = 0;
10731 for_each_queue(bp, i) {
10732 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10733 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10734 if (bnx2x_q_stats_arr[j].size == 0) {
10735 /* skip this counter */
10736 buf[k + j] = 0;
10737 continue;
10739 offset = (hw_stats +
10740 bnx2x_q_stats_arr[j].offset);
10741 if (bnx2x_q_stats_arr[j].size == 4) {
10742 /* 4-byte counter */
10743 buf[k + j] = (u64) *offset;
10744 continue;
10746 /* 8-byte counter */
10747 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10749 k += BNX2X_NUM_Q_STATS;
10751 if (IS_E1HMF_MODE_STAT(bp))
10752 return;
10753 hw_stats = (u32 *)&bp->eth_stats;
10754 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10755 if (bnx2x_stats_arr[j].size == 0) {
10756 /* skip this counter */
10757 buf[k + j] = 0;
10758 continue;
10760 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10761 if (bnx2x_stats_arr[j].size == 4) {
10762 /* 4-byte counter */
10763 buf[k + j] = (u64) *offset;
10764 continue;
10766 /* 8-byte counter */
10767 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10769 } else {
10770 hw_stats = (u32 *)&bp->eth_stats;
10771 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10772 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10773 continue;
10774 if (bnx2x_stats_arr[i].size == 0) {
10775 /* skip this counter */
10776 buf[j] = 0;
10777 j++;
10778 continue;
10780 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10781 if (bnx2x_stats_arr[i].size == 4) {
10782 /* 4-byte counter */
10783 buf[j] = (u64) *offset;
10784 j++;
10785 continue;
10787 /* 8-byte counter */
10788 buf[j] = HILO_U64(*offset, *(offset + 1));
10789 j++;
10794 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10796 struct bnx2x *bp = netdev_priv(dev);
10797 int i;
10799 if (!netif_running(dev))
10800 return 0;
10802 if (!bp->port.pmf)
10803 return 0;
10805 if (data == 0)
10806 data = 2;
10808 for (i = 0; i < (data * 2); i++) {
10809 if ((i % 2) == 0)
10810 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10811 SPEED_1000);
10812 else
10813 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
10815 msleep_interruptible(500);
10816 if (signal_pending(current))
10817 break;
10820 if (bp->link_vars.link_up)
10821 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10822 bp->link_vars.line_speed);
10824 return 0;
10827 static const struct ethtool_ops bnx2x_ethtool_ops = {
10828 .get_settings = bnx2x_get_settings,
10829 .set_settings = bnx2x_set_settings,
10830 .get_drvinfo = bnx2x_get_drvinfo,
10831 .get_regs_len = bnx2x_get_regs_len,
10832 .get_regs = bnx2x_get_regs,
10833 .get_wol = bnx2x_get_wol,
10834 .set_wol = bnx2x_set_wol,
10835 .get_msglevel = bnx2x_get_msglevel,
10836 .set_msglevel = bnx2x_set_msglevel,
10837 .nway_reset = bnx2x_nway_reset,
10838 .get_link = bnx2x_get_link,
10839 .get_eeprom_len = bnx2x_get_eeprom_len,
10840 .get_eeprom = bnx2x_get_eeprom,
10841 .set_eeprom = bnx2x_set_eeprom,
10842 .get_coalesce = bnx2x_get_coalesce,
10843 .set_coalesce = bnx2x_set_coalesce,
10844 .get_ringparam = bnx2x_get_ringparam,
10845 .set_ringparam = bnx2x_set_ringparam,
10846 .get_pauseparam = bnx2x_get_pauseparam,
10847 .set_pauseparam = bnx2x_set_pauseparam,
10848 .get_rx_csum = bnx2x_get_rx_csum,
10849 .set_rx_csum = bnx2x_set_rx_csum,
10850 .get_tx_csum = ethtool_op_get_tx_csum,
10851 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10852 .set_flags = bnx2x_set_flags,
10853 .get_flags = ethtool_op_get_flags,
10854 .get_sg = ethtool_op_get_sg,
10855 .set_sg = ethtool_op_set_sg,
10856 .get_tso = ethtool_op_get_tso,
10857 .set_tso = bnx2x_set_tso,
10858 .self_test = bnx2x_self_test,
10859 .get_sset_count = bnx2x_get_sset_count,
10860 .get_strings = bnx2x_get_strings,
10861 .phys_id = bnx2x_phys_id,
10862 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10865 /* end of ethtool_ops */
10867 /****************************************************************************
10868 * General service functions
10869 ****************************************************************************/
10871 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10873 u16 pmcsr;
10875 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10877 switch (state) {
10878 case PCI_D0:
10879 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10880 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10881 PCI_PM_CTRL_PME_STATUS));
10883 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10884 /* delay required during transition out of D3hot */
10885 msleep(20);
10886 break;
10888 case PCI_D3hot:
10889 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10890 pmcsr |= 3;
10892 if (bp->wol)
10893 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10895 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10896 pmcsr);
10898 /* No more memory access after this point until
10899 * device is brought back to D0.
10901 break;
10903 default:
10904 return -EINVAL;
10906 return 0;
10909 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10911 u16 rx_cons_sb;
10913 /* Tell compiler that status block fields can change */
10914 barrier();
10915 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10916 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10917 rx_cons_sb++;
10918 return (fp->rx_comp_cons != rx_cons_sb);
10922 * net_device service functions
10925 static int bnx2x_poll(struct napi_struct *napi, int budget)
10927 int work_done = 0;
10928 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10929 napi);
10930 struct bnx2x *bp = fp->bp;
10932 while (1) {
10933 #ifdef BNX2X_STOP_ON_ERROR
10934 if (unlikely(bp->panic)) {
10935 napi_complete(napi);
10936 return 0;
10938 #endif
10940 if (bnx2x_has_tx_work(fp))
10941 bnx2x_tx_int(fp);
10943 if (bnx2x_has_rx_work(fp)) {
10944 work_done += bnx2x_rx_int(fp, budget - work_done);
10946 /* must not complete if we consumed full budget */
10947 if (work_done >= budget)
10948 break;
10951 /* Fall out from the NAPI loop if needed */
10952 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10953 bnx2x_update_fpsb_idx(fp);
10954 /* bnx2x_has_rx_work() reads the status block, thus we need
10955 * to ensure that status block indices have been actually read
10956 * (bnx2x_update_fpsb_idx) prior to this check
10957 * (bnx2x_has_rx_work) so that we won't write the "newer"
10958 * value of the status block to IGU (if there was a DMA right
10959 * after bnx2x_has_rx_work and if there is no rmb, the memory
10960 * reading (bnx2x_update_fpsb_idx) may be postponed to right
10961 * before bnx2x_ack_sb). In this case there will never be
10962 * another interrupt until there is another update of the
10963 * status block, while there is still unhandled work.
10965 rmb();
10967 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10968 napi_complete(napi);
10969 /* Re-enable interrupts */
10970 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10971 le16_to_cpu(fp->fp_c_idx),
10972 IGU_INT_NOP, 1);
10973 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10974 le16_to_cpu(fp->fp_u_idx),
10975 IGU_INT_ENABLE, 1);
10976 break;
10981 return work_done;
10985 /* we split the first BD into headers and data BDs
10986 * to ease the pain of our fellow microcode engineers
10987 * we use one mapping for both BDs
10988 * So far this has only been observed to happen
10989 * in Other Operating Systems(TM)
10991 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10992 struct bnx2x_fastpath *fp,
10993 struct sw_tx_bd *tx_buf,
10994 struct eth_tx_start_bd **tx_bd, u16 hlen,
10995 u16 bd_prod, int nbd)
10997 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
10998 struct eth_tx_bd *d_tx_bd;
10999 dma_addr_t mapping;
11000 int old_len = le16_to_cpu(h_tx_bd->nbytes);
11002 /* first fix first BD */
11003 h_tx_bd->nbd = cpu_to_le16(nbd);
11004 h_tx_bd->nbytes = cpu_to_le16(hlen);
11006 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11007 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11008 h_tx_bd->addr_lo, h_tx_bd->nbd);
11010 /* now get a new data BD
11011 * (after the pbd) and fill it */
11012 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11013 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11015 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11016 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11018 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11019 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11020 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
11022 /* this marks the BD as one that has no individual mapping */
11023 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11025 DP(NETIF_MSG_TX_QUEUED,
11026 "TSO split data size is %d (%x:%x)\n",
11027 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11029 /* update tx_bd */
11030 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
11032 return bd_prod;
11035 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11037 if (fix > 0)
11038 csum = (u16) ~csum_fold(csum_sub(csum,
11039 csum_partial(t_header - fix, fix, 0)));
11041 else if (fix < 0)
11042 csum = (u16) ~csum_fold(csum_add(csum,
11043 csum_partial(t_header, -fix, 0)));
11045 return swab16(csum);
11048 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11050 u32 rc;
11052 if (skb->ip_summed != CHECKSUM_PARTIAL)
11053 rc = XMIT_PLAIN;
11055 else {
11056 if (skb->protocol == htons(ETH_P_IPV6)) {
11057 rc = XMIT_CSUM_V6;
11058 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11059 rc |= XMIT_CSUM_TCP;
11061 } else {
11062 rc = XMIT_CSUM_V4;
11063 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11064 rc |= XMIT_CSUM_TCP;
11068 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
11069 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
11071 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
11072 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
11074 return rc;
11077 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11078 /* check if packet requires linearization (packet is too fragmented)
11079 no need to check fragmentation if page size > 8K (there will be no
11080 violation to FW restrictions) */
11081 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11082 u32 xmit_type)
11084 int to_copy = 0;
11085 int hlen = 0;
11086 int first_bd_sz = 0;
11088 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11089 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11091 if (xmit_type & XMIT_GSO) {
11092 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11093 /* Check if LSO packet needs to be copied:
11094 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11095 int wnd_size = MAX_FETCH_BD - 3;
11096 /* Number of windows to check */
11097 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11098 int wnd_idx = 0;
11099 int frag_idx = 0;
11100 u32 wnd_sum = 0;
11102 /* Headers length */
11103 hlen = (int)(skb_transport_header(skb) - skb->data) +
11104 tcp_hdrlen(skb);
11106 /* Amount of data (w/o headers) on linear part of SKB*/
11107 first_bd_sz = skb_headlen(skb) - hlen;
11109 wnd_sum = first_bd_sz;
11111 /* Calculate the first sum - it's special */
11112 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11113 wnd_sum +=
11114 skb_shinfo(skb)->frags[frag_idx].size;
11116 /* If there was data on linear skb data - check it */
11117 if (first_bd_sz > 0) {
11118 if (unlikely(wnd_sum < lso_mss)) {
11119 to_copy = 1;
11120 goto exit_lbl;
11123 wnd_sum -= first_bd_sz;
11126 /* Others are easier: run through the frag list and
11127 check all windows */
11128 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11129 wnd_sum +=
11130 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11132 if (unlikely(wnd_sum < lso_mss)) {
11133 to_copy = 1;
11134 break;
11136 wnd_sum -=
11137 skb_shinfo(skb)->frags[wnd_idx].size;
11139 } else {
11140 /* in non-LSO too fragmented packet should always
11141 be linearized */
11142 to_copy = 1;
11146 exit_lbl:
11147 if (unlikely(to_copy))
11148 DP(NETIF_MSG_TX_QUEUED,
11149 "Linearization IS REQUIRED for %s packet. "
11150 "num_frags %d hlen %d first_bd_sz %d\n",
11151 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11152 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11154 return to_copy;
11156 #endif
11158 /* called with netif_tx_lock
11159 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
11160 * netif_wake_queue()
11162 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11164 struct bnx2x *bp = netdev_priv(dev);
11165 struct bnx2x_fastpath *fp;
11166 struct netdev_queue *txq;
11167 struct sw_tx_bd *tx_buf;
11168 struct eth_tx_start_bd *tx_start_bd;
11169 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
11170 struct eth_tx_parse_bd *pbd = NULL;
11171 u16 pkt_prod, bd_prod;
11172 int nbd, fp_index;
11173 dma_addr_t mapping;
11174 u32 xmit_type = bnx2x_xmit_type(bp, skb);
11175 int i;
11176 u8 hlen = 0;
11177 __le16 pkt_size = 0;
11179 #ifdef BNX2X_STOP_ON_ERROR
11180 if (unlikely(bp->panic))
11181 return NETDEV_TX_BUSY;
11182 #endif
11184 fp_index = skb_get_queue_mapping(skb);
11185 txq = netdev_get_tx_queue(dev, fp_index);
11187 fp = &bp->fp[fp_index];
11189 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
11190 fp->eth_q_stats.driver_xoff++;
11191 netif_tx_stop_queue(txq);
11192 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11193 return NETDEV_TX_BUSY;
11196 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11197 " gso type %x xmit_type %x\n",
11198 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11199 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11201 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11202 /* First, check if we need to linearize the skb (due to FW
11203 restrictions). No need to check fragmentation if page size > 8K
11204 (there will be no violation to FW restrictions) */
11205 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11206 /* Statistics of linearization */
11207 bp->lin_cnt++;
11208 if (skb_linearize(skb) != 0) {
11209 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11210 "silently dropping this SKB\n");
11211 dev_kfree_skb_any(skb);
11212 return NETDEV_TX_OK;
11215 #endif
11218 Please read carefully. First we use one BD which we mark as start,
11219 then we have a parsing info BD (used for TSO or xsum),
11220 and only then we have the rest of the TSO BDs.
11221 (don't forget to mark the last one as last,
11222 and to unmap only AFTER you write to the BD ...)
11223 And above all, all pdb sizes are in words - NOT DWORDS!
11226 pkt_prod = fp->tx_pkt_prod++;
11227 bd_prod = TX_BD(fp->tx_bd_prod);
11229 /* get a tx_buf and first BD */
11230 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
11231 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11233 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11234 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11235 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11236 /* header nbd */
11237 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11239 /* remember the first BD of the packet */
11240 tx_buf->first_bd = fp->tx_bd_prod;
11241 tx_buf->skb = skb;
11242 tx_buf->flags = 0;
11244 DP(NETIF_MSG_TX_QUEUED,
11245 "sending pkt %u @%p next_idx %u bd %u @%p\n",
11246 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
11248 #ifdef BCM_VLAN
11249 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11250 (bp->flags & HW_VLAN_TX_FLAG)) {
11251 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11252 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
11253 } else
11254 #endif
11255 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11257 /* turn on parsing and get a BD */
11258 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11259 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
11261 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11263 if (xmit_type & XMIT_CSUM) {
11264 hlen = (skb_network_header(skb) - skb->data) / 2;
11266 /* for now NS flag is not used in Linux */
11267 pbd->global_data =
11268 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11269 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11271 pbd->ip_hlen = (skb_transport_header(skb) -
11272 skb_network_header(skb)) / 2;
11274 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11276 pbd->total_hlen = cpu_to_le16(hlen);
11277 hlen = hlen*2;
11279 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11281 if (xmit_type & XMIT_CSUM_V4)
11282 tx_start_bd->bd_flags.as_bitfield |=
11283 ETH_TX_BD_FLAGS_IP_CSUM;
11284 else
11285 tx_start_bd->bd_flags.as_bitfield |=
11286 ETH_TX_BD_FLAGS_IPV6;
11288 if (xmit_type & XMIT_CSUM_TCP) {
11289 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11291 } else {
11292 s8 fix = SKB_CS_OFF(skb); /* signed! */
11294 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11296 DP(NETIF_MSG_TX_QUEUED,
11297 "hlen %d fix %d csum before fix %x\n",
11298 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11300 /* HW bug: fixup the CSUM */
11301 pbd->tcp_pseudo_csum =
11302 bnx2x_csum_fix(skb_transport_header(skb),
11303 SKB_CS(skb), fix);
11305 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11306 pbd->tcp_pseudo_csum);
11310 mapping = pci_map_single(bp->pdev, skb->data,
11311 skb_headlen(skb), PCI_DMA_TODEVICE);
11313 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11314 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11315 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11316 tx_start_bd->nbd = cpu_to_le16(nbd);
11317 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11318 pkt_size = tx_start_bd->nbytes;
11320 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
11321 " nbytes %d flags %x vlan %x\n",
11322 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11323 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11324 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11326 if (xmit_type & XMIT_GSO) {
11328 DP(NETIF_MSG_TX_QUEUED,
11329 "TSO packet len %d hlen %d total len %d tso size %d\n",
11330 skb->len, hlen, skb_headlen(skb),
11331 skb_shinfo(skb)->gso_size);
11333 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11335 if (unlikely(skb_headlen(skb) > hlen))
11336 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11337 hlen, bd_prod, ++nbd);
11339 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11340 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11341 pbd->tcp_flags = pbd_tcp_flags(skb);
11343 if (xmit_type & XMIT_GSO_V4) {
11344 pbd->ip_id = swab16(ip_hdr(skb)->id);
11345 pbd->tcp_pseudo_csum =
11346 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11347 ip_hdr(skb)->daddr,
11348 0, IPPROTO_TCP, 0));
11350 } else
11351 pbd->tcp_pseudo_csum =
11352 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11353 &ipv6_hdr(skb)->daddr,
11354 0, IPPROTO_TCP, 0));
11356 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11358 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11360 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11361 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11363 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11364 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11365 if (total_pkt_bd == NULL)
11366 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11368 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11369 frag->size, PCI_DMA_TODEVICE);
11371 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11372 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11373 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11374 le16_add_cpu(&pkt_size, frag->size);
11376 DP(NETIF_MSG_TX_QUEUED,
11377 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11378 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11379 le16_to_cpu(tx_data_bd->nbytes));
11382 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11384 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11386 /* now send a tx doorbell, counting the next BD
11387 * if the packet contains or ends with it
11389 if (TX_BD_POFF(bd_prod) < nbd)
11390 nbd++;
11392 if (total_pkt_bd != NULL)
11393 total_pkt_bd->total_pkt_bytes = pkt_size;
11395 if (pbd)
11396 DP(NETIF_MSG_TX_QUEUED,
11397 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11398 " tcp_flags %x xsum %x seq %u hlen %u\n",
11399 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11400 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11401 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11403 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
11406 * Make sure that the BD data is updated before updating the producer
11407 * since FW might read the BD right after the producer is updated.
11408 * This is only applicable for weak-ordered memory model archs such
11409 * as IA-64. The following barrier is also mandatory since FW will
11410 * assumes packets must have BDs.
11412 wmb();
11414 fp->tx_db.data.prod += nbd;
11415 barrier();
11416 DOORBELL(bp, fp->index, fp->tx_db.raw);
11418 mmiowb();
11420 fp->tx_bd_prod += nbd;
11422 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11423 netif_tx_stop_queue(txq);
11424 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11425 if we put Tx into XOFF state. */
11426 smp_mb();
11427 fp->eth_q_stats.driver_xoff++;
11428 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11429 netif_tx_wake_queue(txq);
11431 fp->tx_pkt++;
11433 return NETDEV_TX_OK;
11436 /* called with rtnl_lock */
11437 static int bnx2x_open(struct net_device *dev)
11439 struct bnx2x *bp = netdev_priv(dev);
11441 netif_carrier_off(dev);
11443 bnx2x_set_power_state(bp, PCI_D0);
11445 return bnx2x_nic_load(bp, LOAD_OPEN);
11448 /* called with rtnl_lock */
11449 static int bnx2x_close(struct net_device *dev)
11451 struct bnx2x *bp = netdev_priv(dev);
11453 /* Unload the driver, release IRQs */
11454 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11455 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11456 if (!CHIP_REV_IS_SLOW(bp))
11457 bnx2x_set_power_state(bp, PCI_D3hot);
11459 return 0;
11462 /* called with netif_tx_lock from dev_mcast.c */
11463 static void bnx2x_set_rx_mode(struct net_device *dev)
11465 struct bnx2x *bp = netdev_priv(dev);
11466 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11467 int port = BP_PORT(bp);
11469 if (bp->state != BNX2X_STATE_OPEN) {
11470 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11471 return;
11474 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11476 if (dev->flags & IFF_PROMISC)
11477 rx_mode = BNX2X_RX_MODE_PROMISC;
11479 else if ((dev->flags & IFF_ALLMULTI) ||
11480 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
11481 CHIP_IS_E1(bp)))
11482 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11484 else { /* some multicasts */
11485 if (CHIP_IS_E1(bp)) {
11486 int i, old, offset;
11487 struct dev_mc_list *mclist;
11488 struct mac_configuration_cmd *config =
11489 bnx2x_sp(bp, mcast_config);
11491 for (i = 0, mclist = dev->mc_list;
11492 mclist && (i < netdev_mc_count(dev));
11493 i++, mclist = mclist->next) {
11495 config->config_table[i].
11496 cam_entry.msb_mac_addr =
11497 swab16(*(u16 *)&mclist->dmi_addr[0]);
11498 config->config_table[i].
11499 cam_entry.middle_mac_addr =
11500 swab16(*(u16 *)&mclist->dmi_addr[2]);
11501 config->config_table[i].
11502 cam_entry.lsb_mac_addr =
11503 swab16(*(u16 *)&mclist->dmi_addr[4]);
11504 config->config_table[i].cam_entry.flags =
11505 cpu_to_le16(port);
11506 config->config_table[i].
11507 target_table_entry.flags = 0;
11508 config->config_table[i].target_table_entry.
11509 clients_bit_vector =
11510 cpu_to_le32(1 << BP_L_ID(bp));
11511 config->config_table[i].
11512 target_table_entry.vlan_id = 0;
11514 DP(NETIF_MSG_IFUP,
11515 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11516 config->config_table[i].
11517 cam_entry.msb_mac_addr,
11518 config->config_table[i].
11519 cam_entry.middle_mac_addr,
11520 config->config_table[i].
11521 cam_entry.lsb_mac_addr);
11523 old = config->hdr.length;
11524 if (old > i) {
11525 for (; i < old; i++) {
11526 if (CAM_IS_INVALID(config->
11527 config_table[i])) {
11528 /* already invalidated */
11529 break;
11531 /* invalidate */
11532 CAM_INVALIDATE(config->
11533 config_table[i]);
11537 if (CHIP_REV_IS_SLOW(bp))
11538 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11539 else
11540 offset = BNX2X_MAX_MULTICAST*(1 + port);
11542 config->hdr.length = i;
11543 config->hdr.offset = offset;
11544 config->hdr.client_id = bp->fp->cl_id;
11545 config->hdr.reserved1 = 0;
11547 bp->set_mac_pending++;
11548 smp_wmb();
11550 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11551 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11552 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11554 } else { /* E1H */
11555 /* Accept one or more multicasts */
11556 struct dev_mc_list *mclist;
11557 u32 mc_filter[MC_HASH_SIZE];
11558 u32 crc, bit, regidx;
11559 int i;
11561 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11563 for (i = 0, mclist = dev->mc_list;
11564 mclist && (i < netdev_mc_count(dev));
11565 i++, mclist = mclist->next) {
11567 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11568 mclist->dmi_addr);
11570 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11571 bit = (crc >> 24) & 0xff;
11572 regidx = bit >> 5;
11573 bit &= 0x1f;
11574 mc_filter[regidx] |= (1 << bit);
11577 for (i = 0; i < MC_HASH_SIZE; i++)
11578 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11579 mc_filter[i]);
11583 bp->rx_mode = rx_mode;
11584 bnx2x_set_storm_rx_mode(bp);
11587 /* called with rtnl_lock */
11588 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11590 struct sockaddr *addr = p;
11591 struct bnx2x *bp = netdev_priv(dev);
11593 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11594 return -EINVAL;
11596 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11597 if (netif_running(dev)) {
11598 if (CHIP_IS_E1(bp))
11599 bnx2x_set_eth_mac_addr_e1(bp, 1);
11600 else
11601 bnx2x_set_eth_mac_addr_e1h(bp, 1);
11604 return 0;
11607 /* called with rtnl_lock */
11608 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11609 int devad, u16 addr)
11611 struct bnx2x *bp = netdev_priv(netdev);
11612 u16 value;
11613 int rc;
11614 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11616 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11617 prtad, devad, addr);
11619 if (prtad != bp->mdio.prtad) {
11620 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11621 prtad, bp->mdio.prtad);
11622 return -EINVAL;
11625 /* The HW expects different devad if CL22 is used */
11626 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11628 bnx2x_acquire_phy_lock(bp);
11629 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11630 devad, addr, &value);
11631 bnx2x_release_phy_lock(bp);
11632 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11634 if (!rc)
11635 rc = value;
11636 return rc;
11639 /* called with rtnl_lock */
11640 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11641 u16 addr, u16 value)
11643 struct bnx2x *bp = netdev_priv(netdev);
11644 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11645 int rc;
11647 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11648 " value 0x%x\n", prtad, devad, addr, value);
11650 if (prtad != bp->mdio.prtad) {
11651 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11652 prtad, bp->mdio.prtad);
11653 return -EINVAL;
11656 /* The HW expects different devad if CL22 is used */
11657 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11659 bnx2x_acquire_phy_lock(bp);
11660 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11661 devad, addr, value);
11662 bnx2x_release_phy_lock(bp);
11663 return rc;
11666 /* called with rtnl_lock */
11667 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11669 struct bnx2x *bp = netdev_priv(dev);
11670 struct mii_ioctl_data *mdio = if_mii(ifr);
11672 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11673 mdio->phy_id, mdio->reg_num, mdio->val_in);
11675 if (!netif_running(dev))
11676 return -EAGAIN;
11678 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11681 /* called with rtnl_lock */
11682 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11684 struct bnx2x *bp = netdev_priv(dev);
11685 int rc = 0;
11687 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11688 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11689 return -EINVAL;
11691 /* This does not race with packet allocation
11692 * because the actual alloc size is
11693 * only updated as part of load
11695 dev->mtu = new_mtu;
11697 if (netif_running(dev)) {
11698 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11699 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11702 return rc;
11705 static void bnx2x_tx_timeout(struct net_device *dev)
11707 struct bnx2x *bp = netdev_priv(dev);
11709 #ifdef BNX2X_STOP_ON_ERROR
11710 if (!bp->panic)
11711 bnx2x_panic();
11712 #endif
11713 /* This allows the netif to be shutdown gracefully before resetting */
11714 schedule_work(&bp->reset_task);
11717 #ifdef BCM_VLAN
11718 /* called with rtnl_lock */
11719 static void bnx2x_vlan_rx_register(struct net_device *dev,
11720 struct vlan_group *vlgrp)
11722 struct bnx2x *bp = netdev_priv(dev);
11724 bp->vlgrp = vlgrp;
11726 /* Set flags according to the required capabilities */
11727 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11729 if (dev->features & NETIF_F_HW_VLAN_TX)
11730 bp->flags |= HW_VLAN_TX_FLAG;
11732 if (dev->features & NETIF_F_HW_VLAN_RX)
11733 bp->flags |= HW_VLAN_RX_FLAG;
11735 if (netif_running(dev))
11736 bnx2x_set_client_config(bp);
11739 #endif
11741 #ifdef CONFIG_NET_POLL_CONTROLLER
11742 static void poll_bnx2x(struct net_device *dev)
11744 struct bnx2x *bp = netdev_priv(dev);
11746 disable_irq(bp->pdev->irq);
11747 bnx2x_interrupt(bp->pdev->irq, dev);
11748 enable_irq(bp->pdev->irq);
11750 #endif
11752 static const struct net_device_ops bnx2x_netdev_ops = {
11753 .ndo_open = bnx2x_open,
11754 .ndo_stop = bnx2x_close,
11755 .ndo_start_xmit = bnx2x_start_xmit,
11756 .ndo_set_multicast_list = bnx2x_set_rx_mode,
11757 .ndo_set_mac_address = bnx2x_change_mac_addr,
11758 .ndo_validate_addr = eth_validate_addr,
11759 .ndo_do_ioctl = bnx2x_ioctl,
11760 .ndo_change_mtu = bnx2x_change_mtu,
11761 .ndo_tx_timeout = bnx2x_tx_timeout,
11762 #ifdef BCM_VLAN
11763 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11764 #endif
11765 #ifdef CONFIG_NET_POLL_CONTROLLER
11766 .ndo_poll_controller = poll_bnx2x,
11767 #endif
11770 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11771 struct net_device *dev)
11773 struct bnx2x *bp;
11774 int rc;
11776 SET_NETDEV_DEV(dev, &pdev->dev);
11777 bp = netdev_priv(dev);
11779 bp->dev = dev;
11780 bp->pdev = pdev;
11781 bp->flags = 0;
11782 bp->func = PCI_FUNC(pdev->devfn);
11784 rc = pci_enable_device(pdev);
11785 if (rc) {
11786 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11787 goto err_out;
11790 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11791 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11792 " aborting\n");
11793 rc = -ENODEV;
11794 goto err_out_disable;
11797 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11798 printk(KERN_ERR PFX "Cannot find second PCI device"
11799 " base address, aborting\n");
11800 rc = -ENODEV;
11801 goto err_out_disable;
11804 if (atomic_read(&pdev->enable_cnt) == 1) {
11805 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11806 if (rc) {
11807 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11808 " aborting\n");
11809 goto err_out_disable;
11812 pci_set_master(pdev);
11813 pci_save_state(pdev);
11816 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11817 if (bp->pm_cap == 0) {
11818 printk(KERN_ERR PFX "Cannot find power management"
11819 " capability, aborting\n");
11820 rc = -EIO;
11821 goto err_out_release;
11824 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11825 if (bp->pcie_cap == 0) {
11826 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11827 " aborting\n");
11828 rc = -EIO;
11829 goto err_out_release;
11832 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11833 bp->flags |= USING_DAC_FLAG;
11834 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11835 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11836 " failed, aborting\n");
11837 rc = -EIO;
11838 goto err_out_release;
11841 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11842 printk(KERN_ERR PFX "System does not support DMA,"
11843 " aborting\n");
11844 rc = -EIO;
11845 goto err_out_release;
11848 dev->mem_start = pci_resource_start(pdev, 0);
11849 dev->base_addr = dev->mem_start;
11850 dev->mem_end = pci_resource_end(pdev, 0);
11852 dev->irq = pdev->irq;
11854 bp->regview = pci_ioremap_bar(pdev, 0);
11855 if (!bp->regview) {
11856 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11857 rc = -ENOMEM;
11858 goto err_out_release;
11861 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11862 min_t(u64, BNX2X_DB_SIZE,
11863 pci_resource_len(pdev, 2)));
11864 if (!bp->doorbells) {
11865 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11866 rc = -ENOMEM;
11867 goto err_out_unmap;
11870 bnx2x_set_power_state(bp, PCI_D0);
11872 /* clean indirect addresses */
11873 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11874 PCICFG_VENDOR_ID_OFFSET);
11875 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11876 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11877 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11878 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11880 dev->watchdog_timeo = TX_TIMEOUT;
11882 dev->netdev_ops = &bnx2x_netdev_ops;
11883 dev->ethtool_ops = &bnx2x_ethtool_ops;
11884 dev->features |= NETIF_F_SG;
11885 dev->features |= NETIF_F_HW_CSUM;
11886 if (bp->flags & USING_DAC_FLAG)
11887 dev->features |= NETIF_F_HIGHDMA;
11888 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11889 dev->features |= NETIF_F_TSO6;
11890 #ifdef BCM_VLAN
11891 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11892 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11894 dev->vlan_features |= NETIF_F_SG;
11895 dev->vlan_features |= NETIF_F_HW_CSUM;
11896 if (bp->flags & USING_DAC_FLAG)
11897 dev->vlan_features |= NETIF_F_HIGHDMA;
11898 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11899 dev->vlan_features |= NETIF_F_TSO6;
11900 #endif
11902 /* get_port_hwinfo() will set prtad and mmds properly */
11903 bp->mdio.prtad = MDIO_PRTAD_NONE;
11904 bp->mdio.mmds = 0;
11905 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11906 bp->mdio.dev = dev;
11907 bp->mdio.mdio_read = bnx2x_mdio_read;
11908 bp->mdio.mdio_write = bnx2x_mdio_write;
11910 return 0;
11912 err_out_unmap:
11913 if (bp->regview) {
11914 iounmap(bp->regview);
11915 bp->regview = NULL;
11917 if (bp->doorbells) {
11918 iounmap(bp->doorbells);
11919 bp->doorbells = NULL;
11922 err_out_release:
11923 if (atomic_read(&pdev->enable_cnt) == 1)
11924 pci_release_regions(pdev);
11926 err_out_disable:
11927 pci_disable_device(pdev);
11928 pci_set_drvdata(pdev, NULL);
11930 err_out:
11931 return rc;
11934 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11935 int *width, int *speed)
11937 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11939 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11941 /* return value of 1=2.5GHz 2=5GHz */
11942 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11945 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11947 const struct firmware *firmware = bp->firmware;
11948 struct bnx2x_fw_file_hdr *fw_hdr;
11949 struct bnx2x_fw_file_section *sections;
11950 u32 offset, len, num_ops;
11951 u16 *ops_offsets;
11952 int i;
11953 const u8 *fw_ver;
11955 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11956 return -EINVAL;
11958 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11959 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11961 /* Make sure none of the offsets and sizes make us read beyond
11962 * the end of the firmware data */
11963 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11964 offset = be32_to_cpu(sections[i].offset);
11965 len = be32_to_cpu(sections[i].len);
11966 if (offset + len > firmware->size) {
11967 printk(KERN_ERR PFX "Section %d length is out of "
11968 "bounds\n", i);
11969 return -EINVAL;
11973 /* Likewise for the init_ops offsets */
11974 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11975 ops_offsets = (u16 *)(firmware->data + offset);
11976 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11978 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11979 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11980 printk(KERN_ERR PFX "Section offset %d is out of "
11981 "bounds\n", i);
11982 return -EINVAL;
11986 /* Check FW version */
11987 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11988 fw_ver = firmware->data + offset;
11989 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11990 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11991 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11992 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11993 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11994 " Should be %d.%d.%d.%d\n",
11995 fw_ver[0], fw_ver[1], fw_ver[2],
11996 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11997 BCM_5710_FW_MINOR_VERSION,
11998 BCM_5710_FW_REVISION_VERSION,
11999 BCM_5710_FW_ENGINEERING_VERSION);
12000 return -EINVAL;
12003 return 0;
12006 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12008 const __be32 *source = (const __be32 *)_source;
12009 u32 *target = (u32 *)_target;
12010 u32 i;
12012 for (i = 0; i < n/4; i++)
12013 target[i] = be32_to_cpu(source[i]);
12017 Ops array is stored in the following format:
12018 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12020 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12022 const __be32 *source = (const __be32 *)_source;
12023 struct raw_op *target = (struct raw_op *)_target;
12024 u32 i, j, tmp;
12026 for (i = 0, j = 0; i < n/8; i++, j += 2) {
12027 tmp = be32_to_cpu(source[j]);
12028 target[i].op = (tmp >> 24) & 0xff;
12029 target[i].offset = tmp & 0xffffff;
12030 target[i].raw_data = be32_to_cpu(source[j+1]);
12034 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12036 const __be16 *source = (const __be16 *)_source;
12037 u16 *target = (u16 *)_target;
12038 u32 i;
12040 for (i = 0; i < n/2; i++)
12041 target[i] = be16_to_cpu(source[i]);
12044 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12045 do { \
12046 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12047 bp->arr = kmalloc(len, GFP_KERNEL); \
12048 if (!bp->arr) { \
12049 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12050 "for "#arr"\n", len); \
12051 goto lbl; \
12053 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12054 (u8 *)bp->arr, len); \
12055 } while (0)
12057 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12059 const char *fw_file_name;
12060 struct bnx2x_fw_file_hdr *fw_hdr;
12061 int rc;
12063 if (CHIP_IS_E1(bp))
12064 fw_file_name = FW_FILE_NAME_E1;
12065 else
12066 fw_file_name = FW_FILE_NAME_E1H;
12068 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12070 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12071 if (rc) {
12072 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12073 fw_file_name);
12074 goto request_firmware_exit;
12077 rc = bnx2x_check_firmware(bp);
12078 if (rc) {
12079 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12080 goto request_firmware_exit;
12083 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12085 /* Initialize the pointers to the init arrays */
12086 /* Blob */
12087 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12089 /* Opcodes */
12090 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12092 /* Offsets */
12093 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12094 be16_to_cpu_n);
12096 /* STORMs firmware */
12097 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12098 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12099 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12100 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12101 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12102 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12103 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12104 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12105 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12106 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12107 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12108 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12109 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12110 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12111 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12112 be32_to_cpu(fw_hdr->csem_pram_data.offset);
12114 return 0;
12116 init_offsets_alloc_err:
12117 kfree(bp->init_ops);
12118 init_ops_alloc_err:
12119 kfree(bp->init_data);
12120 request_firmware_exit:
12121 release_firmware(bp->firmware);
12123 return rc;
12127 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12128 const struct pci_device_id *ent)
12130 struct net_device *dev = NULL;
12131 struct bnx2x *bp;
12132 int pcie_width, pcie_speed;
12133 int rc;
12135 /* dev zeroed in init_etherdev */
12136 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
12137 if (!dev) {
12138 printk(KERN_ERR PFX "Cannot allocate net device\n");
12139 return -ENOMEM;
12142 bp = netdev_priv(dev);
12143 bp->msglevel = debug;
12145 pci_set_drvdata(pdev, dev);
12147 rc = bnx2x_init_dev(pdev, dev);
12148 if (rc < 0) {
12149 free_netdev(dev);
12150 return rc;
12153 rc = bnx2x_init_bp(bp);
12154 if (rc)
12155 goto init_one_exit;
12157 /* Set init arrays */
12158 rc = bnx2x_init_firmware(bp, &pdev->dev);
12159 if (rc) {
12160 printk(KERN_ERR PFX "Error loading firmware\n");
12161 goto init_one_exit;
12164 rc = register_netdev(dev);
12165 if (rc) {
12166 dev_err(&pdev->dev, "Cannot register net device\n");
12167 goto init_one_exit;
12170 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12171 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
12172 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
12173 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12174 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12175 dev->base_addr, bp->pdev->irq);
12176 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
12178 return 0;
12180 init_one_exit:
12181 if (bp->regview)
12182 iounmap(bp->regview);
12184 if (bp->doorbells)
12185 iounmap(bp->doorbells);
12187 free_netdev(dev);
12189 if (atomic_read(&pdev->enable_cnt) == 1)
12190 pci_release_regions(pdev);
12192 pci_disable_device(pdev);
12193 pci_set_drvdata(pdev, NULL);
12195 return rc;
12198 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12200 struct net_device *dev = pci_get_drvdata(pdev);
12201 struct bnx2x *bp;
12203 if (!dev) {
12204 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12205 return;
12207 bp = netdev_priv(dev);
12209 unregister_netdev(dev);
12211 kfree(bp->init_ops_offsets);
12212 kfree(bp->init_ops);
12213 kfree(bp->init_data);
12214 release_firmware(bp->firmware);
12216 if (bp->regview)
12217 iounmap(bp->regview);
12219 if (bp->doorbells)
12220 iounmap(bp->doorbells);
12222 free_netdev(dev);
12224 if (atomic_read(&pdev->enable_cnt) == 1)
12225 pci_release_regions(pdev);
12227 pci_disable_device(pdev);
12228 pci_set_drvdata(pdev, NULL);
12231 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12233 struct net_device *dev = pci_get_drvdata(pdev);
12234 struct bnx2x *bp;
12236 if (!dev) {
12237 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12238 return -ENODEV;
12240 bp = netdev_priv(dev);
12242 rtnl_lock();
12244 pci_save_state(pdev);
12246 if (!netif_running(dev)) {
12247 rtnl_unlock();
12248 return 0;
12251 netif_device_detach(dev);
12253 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12255 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
12257 rtnl_unlock();
12259 return 0;
12262 static int bnx2x_resume(struct pci_dev *pdev)
12264 struct net_device *dev = pci_get_drvdata(pdev);
12265 struct bnx2x *bp;
12266 int rc;
12268 if (!dev) {
12269 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12270 return -ENODEV;
12272 bp = netdev_priv(dev);
12274 rtnl_lock();
12276 pci_restore_state(pdev);
12278 if (!netif_running(dev)) {
12279 rtnl_unlock();
12280 return 0;
12283 bnx2x_set_power_state(bp, PCI_D0);
12284 netif_device_attach(dev);
12286 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12288 rtnl_unlock();
12290 return rc;
12293 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12295 int i;
12297 bp->state = BNX2X_STATE_ERROR;
12299 bp->rx_mode = BNX2X_RX_MODE_NONE;
12301 bnx2x_netif_stop(bp, 0);
12303 del_timer_sync(&bp->timer);
12304 bp->stats_state = STATS_STATE_DISABLED;
12305 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12307 /* Release IRQs */
12308 bnx2x_free_irq(bp, false);
12310 if (CHIP_IS_E1(bp)) {
12311 struct mac_configuration_cmd *config =
12312 bnx2x_sp(bp, mcast_config);
12314 for (i = 0; i < config->hdr.length; i++)
12315 CAM_INVALIDATE(config->config_table[i]);
12318 /* Free SKBs, SGEs, TPA pool and driver internals */
12319 bnx2x_free_skbs(bp);
12320 for_each_queue(bp, i)
12321 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12322 for_each_queue(bp, i)
12323 netif_napi_del(&bnx2x_fp(bp, i, napi));
12324 bnx2x_free_mem(bp);
12326 bp->state = BNX2X_STATE_CLOSED;
12328 netif_carrier_off(bp->dev);
12330 return 0;
12333 static void bnx2x_eeh_recover(struct bnx2x *bp)
12335 u32 val;
12337 mutex_init(&bp->port.phy_mutex);
12339 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12340 bp->link_params.shmem_base = bp->common.shmem_base;
12341 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12343 if (!bp->common.shmem_base ||
12344 (bp->common.shmem_base < 0xA0000) ||
12345 (bp->common.shmem_base >= 0xC0000)) {
12346 BNX2X_DEV_INFO("MCP not active\n");
12347 bp->flags |= NO_MCP_FLAG;
12348 return;
12351 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12352 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12353 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12354 BNX2X_ERR("BAD MCP validity signature\n");
12356 if (!BP_NOMCP(bp)) {
12357 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12358 & DRV_MSG_SEQ_NUMBER_MASK);
12359 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12364 * bnx2x_io_error_detected - called when PCI error is detected
12365 * @pdev: Pointer to PCI device
12366 * @state: The current pci connection state
12368 * This function is called after a PCI bus error affecting
12369 * this device has been detected.
12371 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12372 pci_channel_state_t state)
12374 struct net_device *dev = pci_get_drvdata(pdev);
12375 struct bnx2x *bp = netdev_priv(dev);
12377 rtnl_lock();
12379 netif_device_detach(dev);
12381 if (state == pci_channel_io_perm_failure) {
12382 rtnl_unlock();
12383 return PCI_ERS_RESULT_DISCONNECT;
12386 if (netif_running(dev))
12387 bnx2x_eeh_nic_unload(bp);
12389 pci_disable_device(pdev);
12391 rtnl_unlock();
12393 /* Request a slot reset */
12394 return PCI_ERS_RESULT_NEED_RESET;
12398 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12399 * @pdev: Pointer to PCI device
12401 * Restart the card from scratch, as if from a cold-boot.
12403 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12405 struct net_device *dev = pci_get_drvdata(pdev);
12406 struct bnx2x *bp = netdev_priv(dev);
12408 rtnl_lock();
12410 if (pci_enable_device(pdev)) {
12411 dev_err(&pdev->dev,
12412 "Cannot re-enable PCI device after reset\n");
12413 rtnl_unlock();
12414 return PCI_ERS_RESULT_DISCONNECT;
12417 pci_set_master(pdev);
12418 pci_restore_state(pdev);
12420 if (netif_running(dev))
12421 bnx2x_set_power_state(bp, PCI_D0);
12423 rtnl_unlock();
12425 return PCI_ERS_RESULT_RECOVERED;
12429 * bnx2x_io_resume - called when traffic can start flowing again
12430 * @pdev: Pointer to PCI device
12432 * This callback is called when the error recovery driver tells us that
12433 * its OK to resume normal operation.
12435 static void bnx2x_io_resume(struct pci_dev *pdev)
12437 struct net_device *dev = pci_get_drvdata(pdev);
12438 struct bnx2x *bp = netdev_priv(dev);
12440 rtnl_lock();
12442 bnx2x_eeh_recover(bp);
12444 if (netif_running(dev))
12445 bnx2x_nic_load(bp, LOAD_NORMAL);
12447 netif_device_attach(dev);
12449 rtnl_unlock();
12452 static struct pci_error_handlers bnx2x_err_handler = {
12453 .error_detected = bnx2x_io_error_detected,
12454 .slot_reset = bnx2x_io_slot_reset,
12455 .resume = bnx2x_io_resume,
12458 static struct pci_driver bnx2x_pci_driver = {
12459 .name = DRV_MODULE_NAME,
12460 .id_table = bnx2x_pci_tbl,
12461 .probe = bnx2x_init_one,
12462 .remove = __devexit_p(bnx2x_remove_one),
12463 .suspend = bnx2x_suspend,
12464 .resume = bnx2x_resume,
12465 .err_handler = &bnx2x_err_handler,
12468 static int __init bnx2x_init(void)
12470 int ret;
12472 printk(KERN_INFO "%s", version);
12474 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12475 if (bnx2x_wq == NULL) {
12476 printk(KERN_ERR PFX "Cannot create workqueue\n");
12477 return -ENOMEM;
12480 ret = pci_register_driver(&bnx2x_pci_driver);
12481 if (ret) {
12482 printk(KERN_ERR PFX "Cannot register driver\n");
12483 destroy_workqueue(bnx2x_wq);
12485 return ret;
12488 static void __exit bnx2x_cleanup(void)
12490 pci_unregister_driver(&bnx2x_pci_driver);
12492 destroy_workqueue(bnx2x_wq);
12495 module_init(bnx2x_init);
12496 module_exit(bnx2x_cleanup);
12498 #ifdef BCM_CNIC
12500 /* count denotes the number of new completions we have seen */
12501 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12503 struct eth_spe *spe;
12505 #ifdef BNX2X_STOP_ON_ERROR
12506 if (unlikely(bp->panic))
12507 return;
12508 #endif
12510 spin_lock_bh(&bp->spq_lock);
12511 bp->cnic_spq_pending -= count;
12513 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12514 bp->cnic_spq_pending++) {
12516 if (!bp->cnic_kwq_pending)
12517 break;
12519 spe = bnx2x_sp_get_next(bp);
12520 *spe = *bp->cnic_kwq_cons;
12522 bp->cnic_kwq_pending--;
12524 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12525 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12527 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12528 bp->cnic_kwq_cons = bp->cnic_kwq;
12529 else
12530 bp->cnic_kwq_cons++;
12532 bnx2x_sp_prod_update(bp);
12533 spin_unlock_bh(&bp->spq_lock);
12536 static int bnx2x_cnic_sp_queue(struct net_device *dev,
12537 struct kwqe_16 *kwqes[], u32 count)
12539 struct bnx2x *bp = netdev_priv(dev);
12540 int i;
12542 #ifdef BNX2X_STOP_ON_ERROR
12543 if (unlikely(bp->panic))
12544 return -EIO;
12545 #endif
12547 spin_lock_bh(&bp->spq_lock);
12549 for (i = 0; i < count; i++) {
12550 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12552 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12553 break;
12555 *bp->cnic_kwq_prod = *spe;
12557 bp->cnic_kwq_pending++;
12559 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12560 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12561 spe->data.mac_config_addr.hi,
12562 spe->data.mac_config_addr.lo,
12563 bp->cnic_kwq_pending);
12565 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12566 bp->cnic_kwq_prod = bp->cnic_kwq;
12567 else
12568 bp->cnic_kwq_prod++;
12571 spin_unlock_bh(&bp->spq_lock);
12573 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12574 bnx2x_cnic_sp_post(bp, 0);
12576 return i;
12579 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12581 struct cnic_ops *c_ops;
12582 int rc = 0;
12584 mutex_lock(&bp->cnic_mutex);
12585 c_ops = bp->cnic_ops;
12586 if (c_ops)
12587 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12588 mutex_unlock(&bp->cnic_mutex);
12590 return rc;
12593 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12595 struct cnic_ops *c_ops;
12596 int rc = 0;
12598 rcu_read_lock();
12599 c_ops = rcu_dereference(bp->cnic_ops);
12600 if (c_ops)
12601 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12602 rcu_read_unlock();
12604 return rc;
12608 * for commands that have no data
12610 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12612 struct cnic_ctl_info ctl = {0};
12614 ctl.cmd = cmd;
12616 return bnx2x_cnic_ctl_send(bp, &ctl);
12619 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12621 struct cnic_ctl_info ctl;
12623 /* first we tell CNIC and only then we count this as a completion */
12624 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12625 ctl.data.comp.cid = cid;
12627 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12628 bnx2x_cnic_sp_post(bp, 1);
12631 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12633 struct bnx2x *bp = netdev_priv(dev);
12634 int rc = 0;
12636 switch (ctl->cmd) {
12637 case DRV_CTL_CTXTBL_WR_CMD: {
12638 u32 index = ctl->data.io.offset;
12639 dma_addr_t addr = ctl->data.io.dma_addr;
12641 bnx2x_ilt_wr(bp, index, addr);
12642 break;
12645 case DRV_CTL_COMPLETION_CMD: {
12646 int count = ctl->data.comp.comp_count;
12648 bnx2x_cnic_sp_post(bp, count);
12649 break;
12652 /* rtnl_lock is held. */
12653 case DRV_CTL_START_L2_CMD: {
12654 u32 cli = ctl->data.ring.client_id;
12656 bp->rx_mode_cl_mask |= (1 << cli);
12657 bnx2x_set_storm_rx_mode(bp);
12658 break;
12661 /* rtnl_lock is held. */
12662 case DRV_CTL_STOP_L2_CMD: {
12663 u32 cli = ctl->data.ring.client_id;
12665 bp->rx_mode_cl_mask &= ~(1 << cli);
12666 bnx2x_set_storm_rx_mode(bp);
12667 break;
12670 default:
12671 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12672 rc = -EINVAL;
12675 return rc;
12678 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12680 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12682 if (bp->flags & USING_MSIX_FLAG) {
12683 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12684 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12685 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12686 } else {
12687 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12688 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12690 cp->irq_arr[0].status_blk = bp->cnic_sb;
12691 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12692 cp->irq_arr[1].status_blk = bp->def_status_blk;
12693 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12695 cp->num_irq = 2;
12698 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12699 void *data)
12701 struct bnx2x *bp = netdev_priv(dev);
12702 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12704 if (ops == NULL)
12705 return -EINVAL;
12707 if (atomic_read(&bp->intr_sem) != 0)
12708 return -EBUSY;
12710 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12711 if (!bp->cnic_kwq)
12712 return -ENOMEM;
12714 bp->cnic_kwq_cons = bp->cnic_kwq;
12715 bp->cnic_kwq_prod = bp->cnic_kwq;
12716 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12718 bp->cnic_spq_pending = 0;
12719 bp->cnic_kwq_pending = 0;
12721 bp->cnic_data = data;
12723 cp->num_irq = 0;
12724 cp->drv_state = CNIC_DRV_STATE_REGD;
12726 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12728 bnx2x_setup_cnic_irq_info(bp);
12729 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12730 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12731 rcu_assign_pointer(bp->cnic_ops, ops);
12733 return 0;
12736 static int bnx2x_unregister_cnic(struct net_device *dev)
12738 struct bnx2x *bp = netdev_priv(dev);
12739 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12741 mutex_lock(&bp->cnic_mutex);
12742 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12743 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12744 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12746 cp->drv_state = 0;
12747 rcu_assign_pointer(bp->cnic_ops, NULL);
12748 mutex_unlock(&bp->cnic_mutex);
12749 synchronize_rcu();
12750 kfree(bp->cnic_kwq);
12751 bp->cnic_kwq = NULL;
12753 return 0;
12756 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12758 struct bnx2x *bp = netdev_priv(dev);
12759 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12761 cp->drv_owner = THIS_MODULE;
12762 cp->chip_id = CHIP_ID(bp);
12763 cp->pdev = bp->pdev;
12764 cp->io_base = bp->regview;
12765 cp->io_base2 = bp->doorbells;
12766 cp->max_kwqe_pending = 8;
12767 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12768 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12769 cp->ctx_tbl_len = CNIC_ILT_LINES;
12770 cp->starting_cid = BCM_CNIC_CID_START;
12771 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12772 cp->drv_ctl = bnx2x_drv_ctl;
12773 cp->drv_register_cnic = bnx2x_register_cnic;
12774 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12776 return cp;
12778 EXPORT_SYMBOL(bnx2x_cnic_probe);
12780 #endif /* BCM_CNIC */