sched: Remove unneeded __ref tag
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / bnx2x_main.c
blobfbf1352e9c1cbd5701202a65ed95f962c6e6d486
1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
59 #define DRV_MODULE_VERSION "1.48.105-1"
60 #define DRV_MODULE_RELDATE "2009/04/22"
61 #define BNX2X_BC_VER 0x040200
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
65 /* FW files */
66 #define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT (5*HZ)
72 static char version[] __devinitdata =
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
85 static int disable_tpa;
86 module_param(disable_tpa, int, 0);
87 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
89 static int int_mode;
90 module_param(int_mode, int, 0);
91 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
93 static int poll;
94 module_param(poll, int, 0);
95 MODULE_PARM_DESC(poll, " Use polling (for debug)");
97 static int mrrs = -1;
98 module_param(mrrs, int, 0);
99 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
101 static int debug;
102 module_param(debug, int, 0);
103 MODULE_PARM_DESC(debug, " Default debug msglevel");
105 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
107 static struct workqueue_struct *bnx2x_wq;
109 enum bnx2x_board_type {
110 BCM57710 = 0,
111 BCM57711 = 1,
112 BCM57711E = 2,
115 /* indexed by board_type, above */
116 static struct {
117 char *name;
118 } board_info[] __devinitdata = {
119 { "Broadcom NetXtreme II BCM57710 XGb" },
120 { "Broadcom NetXtreme II BCM57711 XGb" },
121 { "Broadcom NetXtreme II BCM57711E XGb" }
125 static const struct pci_device_id bnx2x_pci_tbl[] = {
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
132 { 0 }
135 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
137 /****************************************************************************
138 * General service functions
139 ****************************************************************************/
141 /* used only at init
142 * locking is done by mcp
144 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149 PCICFG_VENDOR_ID_OFFSET);
152 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
154 u32 val;
156 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
157 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
159 PCICFG_VENDOR_ID_OFFSET);
161 return val;
164 static const u32 dmae_reg_go_c[] = {
165 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
166 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
167 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
168 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
171 /* copy command into DMAE command memory and set DMAE command go */
172 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
173 int idx)
175 u32 cmd_offset;
176 int i;
178 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
179 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
180 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
182 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
185 REG_WR(bp, dmae_reg_go_c[idx], 1);
188 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
189 u32 len32)
191 struct dmae_command *dmae = &bp->init_dmae;
192 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
193 int cnt = 200;
195 if (!bp->dmae_ready) {
196 u32 *data = bnx2x_sp(bp, wb_data[0]);
198 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
199 " using indirect\n", dst_addr, len32);
200 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
201 return;
204 mutex_lock(&bp->dmae_mutex);
206 memset(dmae, 0, sizeof(struct dmae_command));
208 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
209 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
210 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
211 #ifdef __BIG_ENDIAN
212 DMAE_CMD_ENDIANITY_B_DW_SWAP |
213 #else
214 DMAE_CMD_ENDIANITY_DW_SWAP |
215 #endif
216 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
217 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
218 dmae->src_addr_lo = U64_LO(dma_addr);
219 dmae->src_addr_hi = U64_HI(dma_addr);
220 dmae->dst_addr_lo = dst_addr >> 2;
221 dmae->dst_addr_hi = 0;
222 dmae->len = len32;
223 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
224 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
225 dmae->comp_val = DMAE_COMP_VAL;
227 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
228 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
229 "dst_addr [%x:%08x (%08x)]\n"
230 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
231 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
232 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
233 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
234 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
235 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
236 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
238 *wb_comp = 0;
240 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
242 udelay(5);
244 while (*wb_comp != DMAE_COMP_VAL) {
245 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
247 if (!cnt) {
248 BNX2X_ERR("DMAE timeout!\n");
249 break;
251 cnt--;
252 /* adjust delay for emulation/FPGA */
253 if (CHIP_REV_IS_SLOW(bp))
254 msleep(100);
255 else
256 udelay(5);
259 mutex_unlock(&bp->dmae_mutex);
262 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
264 struct dmae_command *dmae = &bp->init_dmae;
265 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
266 int cnt = 200;
268 if (!bp->dmae_ready) {
269 u32 *data = bnx2x_sp(bp, wb_data[0]);
270 int i;
272 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
273 " using indirect\n", src_addr, len32);
274 for (i = 0; i < len32; i++)
275 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
276 return;
279 mutex_lock(&bp->dmae_mutex);
281 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
282 memset(dmae, 0, sizeof(struct dmae_command));
284 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
285 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
286 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
287 #ifdef __BIG_ENDIAN
288 DMAE_CMD_ENDIANITY_B_DW_SWAP |
289 #else
290 DMAE_CMD_ENDIANITY_DW_SWAP |
291 #endif
292 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
293 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
294 dmae->src_addr_lo = src_addr >> 2;
295 dmae->src_addr_hi = 0;
296 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
297 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
298 dmae->len = len32;
299 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
300 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
301 dmae->comp_val = DMAE_COMP_VAL;
303 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
304 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
305 "dst_addr [%x:%08x (%08x)]\n"
306 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
307 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
308 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
309 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
311 *wb_comp = 0;
313 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
315 udelay(5);
317 while (*wb_comp != DMAE_COMP_VAL) {
319 if (!cnt) {
320 BNX2X_ERR("DMAE timeout!\n");
321 break;
323 cnt--;
324 /* adjust delay for emulation/FPGA */
325 if (CHIP_REV_IS_SLOW(bp))
326 msleep(100);
327 else
328 udelay(5);
330 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
331 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
332 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
334 mutex_unlock(&bp->dmae_mutex);
337 /* used only for slowpath so not inlined */
338 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
340 u32 wb_write[2];
342 wb_write[0] = val_hi;
343 wb_write[1] = val_lo;
344 REG_WR_DMAE(bp, reg, wb_write, 2);
347 #ifdef USE_WB_RD
348 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
350 u32 wb_data[2];
352 REG_RD_DMAE(bp, reg, wb_data, 2);
354 return HILO_U64(wb_data[0], wb_data[1]);
356 #endif
358 static int bnx2x_mc_assert(struct bnx2x *bp)
360 char last_idx;
361 int i, rc = 0;
362 u32 row0, row1, row2, row3;
364 /* XSTORM */
365 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
366 XSTORM_ASSERT_LIST_INDEX_OFFSET);
367 if (last_idx)
368 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
370 /* print the asserts */
371 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
373 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374 XSTORM_ASSERT_LIST_OFFSET(i));
375 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
376 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
377 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
378 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
379 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
380 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
382 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
383 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384 " 0x%08x 0x%08x 0x%08x\n",
385 i, row3, row2, row1, row0);
386 rc++;
387 } else {
388 break;
392 /* TSTORM */
393 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
394 TSTORM_ASSERT_LIST_INDEX_OFFSET);
395 if (last_idx)
396 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
398 /* print the asserts */
399 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
401 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402 TSTORM_ASSERT_LIST_OFFSET(i));
403 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
404 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
405 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
406 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
407 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
408 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
410 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
411 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412 " 0x%08x 0x%08x 0x%08x\n",
413 i, row3, row2, row1, row0);
414 rc++;
415 } else {
416 break;
420 /* CSTORM */
421 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
422 CSTORM_ASSERT_LIST_INDEX_OFFSET);
423 if (last_idx)
424 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
426 /* print the asserts */
427 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
429 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430 CSTORM_ASSERT_LIST_OFFSET(i));
431 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
432 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
433 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
434 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
435 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
436 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
438 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
439 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440 " 0x%08x 0x%08x 0x%08x\n",
441 i, row3, row2, row1, row0);
442 rc++;
443 } else {
444 break;
448 /* USTORM */
449 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
450 USTORM_ASSERT_LIST_INDEX_OFFSET);
451 if (last_idx)
452 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
454 /* print the asserts */
455 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
457 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
458 USTORM_ASSERT_LIST_OFFSET(i));
459 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
460 USTORM_ASSERT_LIST_OFFSET(i) + 4);
461 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
462 USTORM_ASSERT_LIST_OFFSET(i) + 8);
463 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
464 USTORM_ASSERT_LIST_OFFSET(i) + 12);
466 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
467 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468 " 0x%08x 0x%08x 0x%08x\n",
469 i, row3, row2, row1, row0);
470 rc++;
471 } else {
472 break;
476 return rc;
479 static void bnx2x_fw_dump(struct bnx2x *bp)
481 u32 mark, offset;
482 __be32 data[9];
483 int word;
485 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
486 mark = ((mark + 0x3) & ~0x3);
487 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
489 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 offset + 4*word));
493 data[8] = 0x0;
494 printk(KERN_CONT "%s", (char *)data);
496 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
497 for (word = 0; word < 8; word++)
498 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
499 offset + 4*word));
500 data[8] = 0x0;
501 printk(KERN_CONT "%s", (char *)data);
503 printk("\n" KERN_ERR PFX "end of fw dump\n");
506 static void bnx2x_panic_dump(struct bnx2x *bp)
508 int i;
509 u16 j, start, end;
511 bp->stats_state = STATS_STATE_DISABLED;
512 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
514 BNX2X_ERR("begin crash dump -----------------\n");
516 /* Indices */
517 /* Common */
518 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
519 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
520 " spq_prod_idx(%u)\n",
521 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
522 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
524 /* Rx */
525 for_each_rx_queue(bp, i) {
526 struct bnx2x_fastpath *fp = &bp->fp[i];
528 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
529 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
530 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
531 i, fp->rx_bd_prod, fp->rx_bd_cons,
532 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
533 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
534 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
535 " fp_u_idx(%x) *sb_u_idx(%x)\n",
536 fp->rx_sge_prod, fp->last_max_sge,
537 le16_to_cpu(fp->fp_u_idx),
538 fp->status_blk->u_status_block.status_block_index);
541 /* Tx */
542 for_each_tx_queue(bp, i) {
543 struct bnx2x_fastpath *fp = &bp->fp[i];
544 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
546 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
547 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
548 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
549 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
550 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
551 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
552 fp->status_blk->c_status_block.status_block_index,
553 hw_prods->packets_prod, hw_prods->bds_prod);
556 /* Rings */
557 /* Rx */
558 for_each_rx_queue(bp, i) {
559 struct bnx2x_fastpath *fp = &bp->fp[i];
561 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
562 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
563 for (j = start; j != end; j = RX_BD(j + 1)) {
564 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
565 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
567 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
568 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
571 start = RX_SGE(fp->rx_sge_prod);
572 end = RX_SGE(fp->last_max_sge);
573 for (j = start; j != end; j = RX_SGE(j + 1)) {
574 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
575 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
577 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
578 i, j, rx_sge[1], rx_sge[0], sw_page->page);
581 start = RCQ_BD(fp->rx_comp_cons - 10);
582 end = RCQ_BD(fp->rx_comp_cons + 503);
583 for (j = start; j != end; j = RCQ_BD(j + 1)) {
584 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
586 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
587 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
591 /* Tx */
592 for_each_tx_queue(bp, i) {
593 struct bnx2x_fastpath *fp = &bp->fp[i];
595 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
596 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
597 for (j = start; j != end; j = TX_BD(j + 1)) {
598 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
600 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
601 i, j, sw_bd->skb, sw_bd->first_bd);
604 start = TX_BD(fp->tx_bd_cons - 10);
605 end = TX_BD(fp->tx_bd_cons + 254);
606 for (j = start; j != end; j = TX_BD(j + 1)) {
607 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
609 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
610 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
614 bnx2x_fw_dump(bp);
615 bnx2x_mc_assert(bp);
616 BNX2X_ERR("end crash dump -----------------\n");
619 static void bnx2x_int_enable(struct bnx2x *bp)
621 int port = BP_PORT(bp);
622 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
623 u32 val = REG_RD(bp, addr);
624 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
625 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
627 if (msix) {
628 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
629 HC_CONFIG_0_REG_INT_LINE_EN_0);
630 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
631 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
632 } else if (msi) {
633 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
634 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
635 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
636 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
637 } else {
638 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0 |
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644 val, port, addr);
646 REG_WR(bp, addr, val);
648 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
651 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
652 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
654 REG_WR(bp, addr, val);
656 if (CHIP_IS_E1H(bp)) {
657 /* init leading/trailing edge */
658 if (IS_E1HMF(bp)) {
659 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
660 if (bp->port.pmf)
661 /* enable nig and gpio3 attention */
662 val |= 0x1100;
663 } else
664 val = 0xffff;
666 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
667 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
671 static void bnx2x_int_disable(struct bnx2x *bp)
673 int port = BP_PORT(bp);
674 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
675 u32 val = REG_RD(bp, addr);
677 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
678 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
679 HC_CONFIG_0_REG_INT_LINE_EN_0 |
680 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
682 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
683 val, port, addr);
685 /* flush all outstanding writes */
686 mmiowb();
688 REG_WR(bp, addr, val);
689 if (REG_RD(bp, addr) != val)
690 BNX2X_ERR("BUG! proper val not read from IGU!\n");
694 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
696 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
697 int i, offset;
699 /* disable interrupt handling */
700 atomic_inc(&bp->intr_sem);
701 if (disable_hw)
702 /* prevent the HW from sending interrupts */
703 bnx2x_int_disable(bp);
705 /* make sure all ISRs are done */
706 if (msix) {
707 synchronize_irq(bp->msix_table[0].vector);
708 offset = 1;
709 for_each_queue(bp, i)
710 synchronize_irq(bp->msix_table[i + offset].vector);
711 } else
712 synchronize_irq(bp->pdev->irq);
714 /* make sure sp_task is not running */
715 cancel_delayed_work(&bp->sp_task);
716 flush_workqueue(bnx2x_wq);
719 /* fast path */
722 * General service functions
725 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
726 u8 storm, u16 index, u8 op, u8 update)
728 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
729 COMMAND_REG_INT_ACK);
730 struct igu_ack_register igu_ack;
732 igu_ack.status_block_index = index;
733 igu_ack.sb_id_and_flags =
734 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
735 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
736 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
737 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
739 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
740 (*(u32 *)&igu_ack), hc_addr);
741 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
744 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
746 struct host_status_block *fpsb = fp->status_blk;
747 u16 rc = 0;
749 barrier(); /* status block is written to by the chip */
750 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
751 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
752 rc |= 1;
754 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
755 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
756 rc |= 2;
758 return rc;
761 static u16 bnx2x_ack_int(struct bnx2x *bp)
763 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
764 COMMAND_REG_SIMD_MASK);
765 u32 result = REG_RD(bp, hc_addr);
767 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
768 result, hc_addr);
770 return result;
775 * fast path service functions
778 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
780 u16 tx_cons_sb;
782 /* Tell compiler that status block fields can change */
783 barrier();
784 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
785 return (fp->tx_pkt_cons != tx_cons_sb);
788 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
790 /* Tell compiler that consumer and producer can change */
791 barrier();
792 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
795 /* free skb in the packet ring at pos idx
796 * return idx of last bd freed
798 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
799 u16 idx)
801 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
802 struct eth_tx_bd *tx_bd;
803 struct sk_buff *skb = tx_buf->skb;
804 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
805 int nbd;
807 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
808 idx, tx_buf, skb);
810 /* unmap first bd */
811 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
812 tx_bd = &fp->tx_desc_ring[bd_idx];
813 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
814 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
816 nbd = le16_to_cpu(tx_bd->nbd) - 1;
817 new_cons = nbd + tx_buf->first_bd;
818 #ifdef BNX2X_STOP_ON_ERROR
819 if (nbd > (MAX_SKB_FRAGS + 2)) {
820 BNX2X_ERR("BAD nbd!\n");
821 bnx2x_panic();
823 #endif
825 /* Skip a parse bd and the TSO split header bd
826 since they have no mapping */
827 if (nbd)
828 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
830 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
831 ETH_TX_BD_FLAGS_TCP_CSUM |
832 ETH_TX_BD_FLAGS_SW_LSO)) {
833 if (--nbd)
834 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
835 tx_bd = &fp->tx_desc_ring[bd_idx];
836 /* is this a TSO split header bd? */
837 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
838 if (--nbd)
839 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
843 /* now free frags */
844 while (nbd > 0) {
846 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
847 tx_bd = &fp->tx_desc_ring[bd_idx];
848 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
849 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
850 if (--nbd)
851 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
854 /* release skb */
855 WARN_ON(!skb);
856 dev_kfree_skb(skb);
857 tx_buf->first_bd = 0;
858 tx_buf->skb = NULL;
860 return new_cons;
863 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
865 s16 used;
866 u16 prod;
867 u16 cons;
869 barrier(); /* Tell compiler that prod and cons can change */
870 prod = fp->tx_bd_prod;
871 cons = fp->tx_bd_cons;
873 /* NUM_TX_RINGS = number of "next-page" entries
874 It will be used as a threshold */
875 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
877 #ifdef BNX2X_STOP_ON_ERROR
878 WARN_ON(used < 0);
879 WARN_ON(used > fp->bp->tx_ring_size);
880 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
881 #endif
883 return (s16)(fp->bp->tx_ring_size) - used;
886 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
888 struct bnx2x *bp = fp->bp;
889 struct netdev_queue *txq;
890 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
891 int done = 0;
893 #ifdef BNX2X_STOP_ON_ERROR
894 if (unlikely(bp->panic))
895 return;
896 #endif
898 txq = netdev_get_tx_queue(bp->dev, fp->index);
899 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
900 sw_cons = fp->tx_pkt_cons;
902 while (sw_cons != hw_cons) {
903 u16 pkt_cons;
905 pkt_cons = TX_BD(sw_cons);
907 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
909 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
910 hw_cons, sw_cons, pkt_cons);
912 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
913 rmb();
914 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
917 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
918 sw_cons++;
919 done++;
922 fp->tx_pkt_cons = sw_cons;
923 fp->tx_bd_cons = bd_cons;
925 /* TBD need a thresh? */
926 if (unlikely(netif_tx_queue_stopped(txq))) {
928 __netif_tx_lock(txq, smp_processor_id());
930 /* Need to make the tx_bd_cons update visible to start_xmit()
931 * before checking for netif_tx_queue_stopped(). Without the
932 * memory barrier, there is a small possibility that
933 * start_xmit() will miss it and cause the queue to be stopped
934 * forever.
936 smp_mb();
938 if ((netif_tx_queue_stopped(txq)) &&
939 (bp->state == BNX2X_STATE_OPEN) &&
940 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
941 netif_tx_wake_queue(txq);
943 __netif_tx_unlock(txq);
948 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
949 union eth_rx_cqe *rr_cqe)
951 struct bnx2x *bp = fp->bp;
952 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
953 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
955 DP(BNX2X_MSG_SP,
956 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
957 fp->index, cid, command, bp->state,
958 rr_cqe->ramrod_cqe.ramrod_type);
960 bp->spq_left++;
962 if (fp->index) {
963 switch (command | fp->state) {
964 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
965 BNX2X_FP_STATE_OPENING):
966 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
967 cid);
968 fp->state = BNX2X_FP_STATE_OPEN;
969 break;
971 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
972 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
973 cid);
974 fp->state = BNX2X_FP_STATE_HALTED;
975 break;
977 default:
978 BNX2X_ERR("unexpected MC reply (%d) "
979 "fp->state is %x\n", command, fp->state);
980 break;
982 mb(); /* force bnx2x_wait_ramrod() to see the change */
983 return;
986 switch (command | bp->state) {
987 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
988 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
989 bp->state = BNX2X_STATE_OPEN;
990 break;
992 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
993 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
994 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
995 fp->state = BNX2X_FP_STATE_HALTED;
996 break;
998 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
999 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1000 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1001 break;
1004 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1005 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1006 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1007 bp->set_mac_pending = 0;
1008 break;
1010 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1011 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1012 break;
1014 default:
1015 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1016 command, bp->state);
1017 break;
1019 mb(); /* force bnx2x_wait_ramrod() to see the change */
1022 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1023 struct bnx2x_fastpath *fp, u16 index)
1025 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026 struct page *page = sw_buf->page;
1027 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1029 /* Skip "next page" elements */
1030 if (!page)
1031 return;
1033 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1034 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1035 __free_pages(page, PAGES_PER_SGE_SHIFT);
1037 sw_buf->page = NULL;
1038 sge->addr_hi = 0;
1039 sge->addr_lo = 0;
1042 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1043 struct bnx2x_fastpath *fp, int last)
1045 int i;
1047 for (i = 0; i < last; i++)
1048 bnx2x_free_rx_sge(bp, fp, i);
1051 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1052 struct bnx2x_fastpath *fp, u16 index)
1054 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1055 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1056 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1057 dma_addr_t mapping;
1059 if (unlikely(page == NULL))
1060 return -ENOMEM;
1062 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1063 PCI_DMA_FROMDEVICE);
1064 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1065 __free_pages(page, PAGES_PER_SGE_SHIFT);
1066 return -ENOMEM;
1069 sw_buf->page = page;
1070 pci_unmap_addr_set(sw_buf, mapping, mapping);
1072 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1073 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1075 return 0;
1078 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1079 struct bnx2x_fastpath *fp, u16 index)
1081 struct sk_buff *skb;
1082 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1083 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1084 dma_addr_t mapping;
1086 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1087 if (unlikely(skb == NULL))
1088 return -ENOMEM;
1090 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1091 PCI_DMA_FROMDEVICE);
1092 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1093 dev_kfree_skb(skb);
1094 return -ENOMEM;
1097 rx_buf->skb = skb;
1098 pci_unmap_addr_set(rx_buf, mapping, mapping);
1100 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1101 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1103 return 0;
1106 /* note that we are not allocating a new skb,
1107 * we are just moving one from cons to prod
1108 * we are not creating a new mapping,
1109 * so there is no need to check for dma_mapping_error().
1111 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1112 struct sk_buff *skb, u16 cons, u16 prod)
1114 struct bnx2x *bp = fp->bp;
1115 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1116 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1117 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1118 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1120 pci_dma_sync_single_for_device(bp->pdev,
1121 pci_unmap_addr(cons_rx_buf, mapping),
1122 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1124 prod_rx_buf->skb = cons_rx_buf->skb;
1125 pci_unmap_addr_set(prod_rx_buf, mapping,
1126 pci_unmap_addr(cons_rx_buf, mapping));
1127 *prod_bd = *cons_bd;
1130 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1131 u16 idx)
1133 u16 last_max = fp->last_max_sge;
1135 if (SUB_S16(idx, last_max) > 0)
1136 fp->last_max_sge = idx;
1139 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1141 int i, j;
1143 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1144 int idx = RX_SGE_CNT * i - 1;
1146 for (j = 0; j < 2; j++) {
1147 SGE_MASK_CLEAR_BIT(fp, idx);
1148 idx--;
1153 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1154 struct eth_fast_path_rx_cqe *fp_cqe)
1156 struct bnx2x *bp = fp->bp;
1157 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1158 le16_to_cpu(fp_cqe->len_on_bd)) >>
1159 SGE_PAGE_SHIFT;
1160 u16 last_max, last_elem, first_elem;
1161 u16 delta = 0;
1162 u16 i;
1164 if (!sge_len)
1165 return;
1167 /* First mark all used pages */
1168 for (i = 0; i < sge_len; i++)
1169 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1171 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1172 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1174 /* Here we assume that the last SGE index is the biggest */
1175 prefetch((void *)(fp->sge_mask));
1176 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1178 last_max = RX_SGE(fp->last_max_sge);
1179 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1180 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1182 /* If ring is not full */
1183 if (last_elem + 1 != first_elem)
1184 last_elem++;
1186 /* Now update the prod */
1187 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1188 if (likely(fp->sge_mask[i]))
1189 break;
1191 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1192 delta += RX_SGE_MASK_ELEM_SZ;
1195 if (delta > 0) {
1196 fp->rx_sge_prod += delta;
1197 /* clear page-end entries */
1198 bnx2x_clear_sge_mask_next_elems(fp);
1201 DP(NETIF_MSG_RX_STATUS,
1202 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1203 fp->last_max_sge, fp->rx_sge_prod);
1206 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1208 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1209 memset(fp->sge_mask, 0xff,
1210 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1212 /* Clear the two last indices in the page to 1:
1213 these are the indices that correspond to the "next" element,
1214 hence will never be indicated and should be removed from
1215 the calculations. */
1216 bnx2x_clear_sge_mask_next_elems(fp);
1219 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1220 struct sk_buff *skb, u16 cons, u16 prod)
1222 struct bnx2x *bp = fp->bp;
1223 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1224 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1225 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1226 dma_addr_t mapping;
1228 /* move empty skb from pool to prod and map it */
1229 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1230 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1231 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1232 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1234 /* move partial skb from cons to pool (don't unmap yet) */
1235 fp->tpa_pool[queue] = *cons_rx_buf;
1237 /* mark bin state as start - print error if current state != stop */
1238 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1239 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1241 fp->tpa_state[queue] = BNX2X_TPA_START;
1243 /* point prod_bd to new skb */
1244 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1245 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1247 #ifdef BNX2X_STOP_ON_ERROR
1248 fp->tpa_queue_used |= (1 << queue);
1249 #ifdef __powerpc64__
1250 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1251 #else
1252 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1253 #endif
1254 fp->tpa_queue_used);
1255 #endif
1258 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1259 struct sk_buff *skb,
1260 struct eth_fast_path_rx_cqe *fp_cqe,
1261 u16 cqe_idx)
1263 struct sw_rx_page *rx_pg, old_rx_pg;
1264 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1265 u32 i, frag_len, frag_size, pages;
1266 int err;
1267 int j;
1269 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1270 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1272 /* This is needed in order to enable forwarding support */
1273 if (frag_size)
1274 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1275 max(frag_size, (u32)len_on_bd));
1277 #ifdef BNX2X_STOP_ON_ERROR
1278 if (pages >
1279 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1280 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1281 pages, cqe_idx);
1282 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1283 fp_cqe->pkt_len, len_on_bd);
1284 bnx2x_panic();
1285 return -EINVAL;
1287 #endif
1289 /* Run through the SGL and compose the fragmented skb */
1290 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1291 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1293 /* FW gives the indices of the SGE as if the ring is an array
1294 (meaning that "next" element will consume 2 indices) */
1295 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1296 rx_pg = &fp->rx_page_ring[sge_idx];
1297 old_rx_pg = *rx_pg;
1299 /* If we fail to allocate a substitute page, we simply stop
1300 where we are and drop the whole packet */
1301 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1302 if (unlikely(err)) {
1303 fp->eth_q_stats.rx_skb_alloc_failed++;
1304 return err;
1307 /* Unmap the page as we r going to pass it to the stack */
1308 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1309 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1311 /* Add one frag and update the appropriate fields in the skb */
1312 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1314 skb->data_len += frag_len;
1315 skb->truesize += frag_len;
1316 skb->len += frag_len;
1318 frag_size -= frag_len;
1321 return 0;
1324 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1325 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1326 u16 cqe_idx)
1328 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1329 struct sk_buff *skb = rx_buf->skb;
1330 /* alloc new skb */
1331 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1333 /* Unmap skb in the pool anyway, as we are going to change
1334 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1335 fails. */
1336 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1337 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1339 if (likely(new_skb)) {
1340 /* fix ip xsum and give it to the stack */
1341 /* (no need to map the new skb) */
1342 #ifdef BCM_VLAN
1343 int is_vlan_cqe =
1344 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1345 PARSING_FLAGS_VLAN);
1346 int is_not_hwaccel_vlan_cqe =
1347 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1348 #endif
1350 prefetch(skb);
1351 prefetch(((char *)(skb)) + 128);
1353 #ifdef BNX2X_STOP_ON_ERROR
1354 if (pad + len > bp->rx_buf_size) {
1355 BNX2X_ERR("skb_put is about to fail... "
1356 "pad %d len %d rx_buf_size %d\n",
1357 pad, len, bp->rx_buf_size);
1358 bnx2x_panic();
1359 return;
1361 #endif
1363 skb_reserve(skb, pad);
1364 skb_put(skb, len);
1366 skb->protocol = eth_type_trans(skb, bp->dev);
1367 skb->ip_summed = CHECKSUM_UNNECESSARY;
1370 struct iphdr *iph;
1372 iph = (struct iphdr *)skb->data;
1373 #ifdef BCM_VLAN
1374 /* If there is no Rx VLAN offloading -
1375 take VLAN tag into an account */
1376 if (unlikely(is_not_hwaccel_vlan_cqe))
1377 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1378 #endif
1379 iph->check = 0;
1380 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1383 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1384 &cqe->fast_path_cqe, cqe_idx)) {
1385 #ifdef BCM_VLAN
1386 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1387 (!is_not_hwaccel_vlan_cqe))
1388 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1389 le16_to_cpu(cqe->fast_path_cqe.
1390 vlan_tag));
1391 else
1392 #endif
1393 netif_receive_skb(skb);
1394 } else {
1395 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1396 " - dropping packet!\n");
1397 dev_kfree_skb(skb);
1401 /* put new skb in bin */
1402 fp->tpa_pool[queue].skb = new_skb;
1404 } else {
1405 /* else drop the packet and keep the buffer in the bin */
1406 DP(NETIF_MSG_RX_STATUS,
1407 "Failed to allocate new skb - dropping packet!\n");
1408 fp->eth_q_stats.rx_skb_alloc_failed++;
1411 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1414 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1415 struct bnx2x_fastpath *fp,
1416 u16 bd_prod, u16 rx_comp_prod,
1417 u16 rx_sge_prod)
1419 struct ustorm_eth_rx_producers rx_prods = {0};
1420 int i;
1422 /* Update producers */
1423 rx_prods.bd_prod = bd_prod;
1424 rx_prods.cqe_prod = rx_comp_prod;
1425 rx_prods.sge_prod = rx_sge_prod;
1428 * Make sure that the BD and SGE data is updated before updating the
1429 * producers since FW might read the BD/SGE right after the producer
1430 * is updated.
1431 * This is only applicable for weak-ordered memory model archs such
1432 * as IA-64. The following barrier is also mandatory since FW will
1433 * assumes BDs must have buffers.
1435 wmb();
1437 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1438 REG_WR(bp, BAR_USTRORM_INTMEM +
1439 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1440 ((u32 *)&rx_prods)[i]);
1442 mmiowb(); /* keep prod updates ordered */
1444 DP(NETIF_MSG_RX_STATUS,
1445 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1446 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1449 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1451 struct bnx2x *bp = fp->bp;
1452 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1453 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1454 int rx_pkt = 0;
1456 #ifdef BNX2X_STOP_ON_ERROR
1457 if (unlikely(bp->panic))
1458 return 0;
1459 #endif
1461 /* CQ "next element" is of the size of the regular element,
1462 that's why it's ok here */
1463 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1464 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1465 hw_comp_cons++;
1467 bd_cons = fp->rx_bd_cons;
1468 bd_prod = fp->rx_bd_prod;
1469 bd_prod_fw = bd_prod;
1470 sw_comp_cons = fp->rx_comp_cons;
1471 sw_comp_prod = fp->rx_comp_prod;
1473 /* Memory barrier necessary as speculative reads of the rx
1474 * buffer can be ahead of the index in the status block
1476 rmb();
1478 DP(NETIF_MSG_RX_STATUS,
1479 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1480 fp->index, hw_comp_cons, sw_comp_cons);
1482 while (sw_comp_cons != hw_comp_cons) {
1483 struct sw_rx_bd *rx_buf = NULL;
1484 struct sk_buff *skb;
1485 union eth_rx_cqe *cqe;
1486 u8 cqe_fp_flags;
1487 u16 len, pad;
1489 comp_ring_cons = RCQ_BD(sw_comp_cons);
1490 bd_prod = RX_BD(bd_prod);
1491 bd_cons = RX_BD(bd_cons);
1493 cqe = &fp->rx_comp_ring[comp_ring_cons];
1494 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1496 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1497 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1498 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1499 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1500 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1501 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1503 /* is this a slowpath msg? */
1504 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1505 bnx2x_sp_event(fp, cqe);
1506 goto next_cqe;
1508 /* this is an rx packet */
1509 } else {
1510 rx_buf = &fp->rx_buf_ring[bd_cons];
1511 skb = rx_buf->skb;
1512 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1513 pad = cqe->fast_path_cqe.placement_offset;
1515 /* If CQE is marked both TPA_START and TPA_END
1516 it is a non-TPA CQE */
1517 if ((!fp->disable_tpa) &&
1518 (TPA_TYPE(cqe_fp_flags) !=
1519 (TPA_TYPE_START | TPA_TYPE_END))) {
1520 u16 queue = cqe->fast_path_cqe.queue_index;
1522 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1523 DP(NETIF_MSG_RX_STATUS,
1524 "calling tpa_start on queue %d\n",
1525 queue);
1527 bnx2x_tpa_start(fp, queue, skb,
1528 bd_cons, bd_prod);
1529 goto next_rx;
1532 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1533 DP(NETIF_MSG_RX_STATUS,
1534 "calling tpa_stop on queue %d\n",
1535 queue);
1537 if (!BNX2X_RX_SUM_FIX(cqe))
1538 BNX2X_ERR("STOP on none TCP "
1539 "data\n");
1541 /* This is a size of the linear data
1542 on this skb */
1543 len = le16_to_cpu(cqe->fast_path_cqe.
1544 len_on_bd);
1545 bnx2x_tpa_stop(bp, fp, queue, pad,
1546 len, cqe, comp_ring_cons);
1547 #ifdef BNX2X_STOP_ON_ERROR
1548 if (bp->panic)
1549 return 0;
1550 #endif
1552 bnx2x_update_sge_prod(fp,
1553 &cqe->fast_path_cqe);
1554 goto next_cqe;
1558 pci_dma_sync_single_for_device(bp->pdev,
1559 pci_unmap_addr(rx_buf, mapping),
1560 pad + RX_COPY_THRESH,
1561 PCI_DMA_FROMDEVICE);
1562 prefetch(skb);
1563 prefetch(((char *)(skb)) + 128);
1565 /* is this an error packet? */
1566 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1567 DP(NETIF_MSG_RX_ERR,
1568 "ERROR flags %x rx packet %u\n",
1569 cqe_fp_flags, sw_comp_cons);
1570 fp->eth_q_stats.rx_err_discard_pkt++;
1571 goto reuse_rx;
1574 /* Since we don't have a jumbo ring
1575 * copy small packets if mtu > 1500
1577 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1578 (len <= RX_COPY_THRESH)) {
1579 struct sk_buff *new_skb;
1581 new_skb = netdev_alloc_skb(bp->dev,
1582 len + pad);
1583 if (new_skb == NULL) {
1584 DP(NETIF_MSG_RX_ERR,
1585 "ERROR packet dropped "
1586 "because of alloc failure\n");
1587 fp->eth_q_stats.rx_skb_alloc_failed++;
1588 goto reuse_rx;
1591 /* aligned copy */
1592 skb_copy_from_linear_data_offset(skb, pad,
1593 new_skb->data + pad, len);
1594 skb_reserve(new_skb, pad);
1595 skb_put(new_skb, len);
1597 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1599 skb = new_skb;
1601 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1602 pci_unmap_single(bp->pdev,
1603 pci_unmap_addr(rx_buf, mapping),
1604 bp->rx_buf_size,
1605 PCI_DMA_FROMDEVICE);
1606 skb_reserve(skb, pad);
1607 skb_put(skb, len);
1609 } else {
1610 DP(NETIF_MSG_RX_ERR,
1611 "ERROR packet dropped because "
1612 "of alloc failure\n");
1613 fp->eth_q_stats.rx_skb_alloc_failed++;
1614 reuse_rx:
1615 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1616 goto next_rx;
1619 skb->protocol = eth_type_trans(skb, bp->dev);
1621 skb->ip_summed = CHECKSUM_NONE;
1622 if (bp->rx_csum) {
1623 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1624 skb->ip_summed = CHECKSUM_UNNECESSARY;
1625 else
1626 fp->eth_q_stats.hw_csum_err++;
1630 skb_record_rx_queue(skb, fp->index);
1631 #ifdef BCM_VLAN
1632 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1633 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1634 PARSING_FLAGS_VLAN))
1635 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1636 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1637 else
1638 #endif
1639 netif_receive_skb(skb);
1642 next_rx:
1643 rx_buf->skb = NULL;
1645 bd_cons = NEXT_RX_IDX(bd_cons);
1646 bd_prod = NEXT_RX_IDX(bd_prod);
1647 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1648 rx_pkt++;
1649 next_cqe:
1650 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1651 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1653 if (rx_pkt == budget)
1654 break;
1655 } /* while */
1657 fp->rx_bd_cons = bd_cons;
1658 fp->rx_bd_prod = bd_prod_fw;
1659 fp->rx_comp_cons = sw_comp_cons;
1660 fp->rx_comp_prod = sw_comp_prod;
1662 /* Update producers */
1663 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1664 fp->rx_sge_prod);
1666 fp->rx_pkt += rx_pkt;
1667 fp->rx_calls++;
1669 return rx_pkt;
1672 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1674 struct bnx2x_fastpath *fp = fp_cookie;
1675 struct bnx2x *bp = fp->bp;
1676 int index = fp->index;
1678 /* Return here if interrupt is disabled */
1679 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1680 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1681 return IRQ_HANDLED;
1684 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1685 index, fp->sb_id);
1686 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1688 #ifdef BNX2X_STOP_ON_ERROR
1689 if (unlikely(bp->panic))
1690 return IRQ_HANDLED;
1691 #endif
1693 prefetch(fp->rx_cons_sb);
1694 prefetch(fp->tx_cons_sb);
1695 prefetch(&fp->status_blk->c_status_block.status_block_index);
1696 prefetch(&fp->status_blk->u_status_block.status_block_index);
1698 napi_schedule(&bnx2x_fp(bp, index, napi));
1700 return IRQ_HANDLED;
1703 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1705 struct bnx2x *bp = netdev_priv(dev_instance);
1706 u16 status = bnx2x_ack_int(bp);
1707 u16 mask;
1709 /* Return here if interrupt is shared and it's not for us */
1710 if (unlikely(status == 0)) {
1711 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1712 return IRQ_NONE;
1714 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1716 /* Return here if interrupt is disabled */
1717 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1718 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1719 return IRQ_HANDLED;
1722 #ifdef BNX2X_STOP_ON_ERROR
1723 if (unlikely(bp->panic))
1724 return IRQ_HANDLED;
1725 #endif
1727 mask = 0x2 << bp->fp[0].sb_id;
1728 if (status & mask) {
1729 struct bnx2x_fastpath *fp = &bp->fp[0];
1731 prefetch(fp->rx_cons_sb);
1732 prefetch(fp->tx_cons_sb);
1733 prefetch(&fp->status_blk->c_status_block.status_block_index);
1734 prefetch(&fp->status_blk->u_status_block.status_block_index);
1736 napi_schedule(&bnx2x_fp(bp, 0, napi));
1738 status &= ~mask;
1742 if (unlikely(status & 0x1)) {
1743 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1745 status &= ~0x1;
1746 if (!status)
1747 return IRQ_HANDLED;
1750 if (status)
1751 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1752 status);
1754 return IRQ_HANDLED;
1757 /* end of fast path */
1759 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1761 /* Link */
1764 * General service functions
1767 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1769 u32 lock_status;
1770 u32 resource_bit = (1 << resource);
1771 int func = BP_FUNC(bp);
1772 u32 hw_lock_control_reg;
1773 int cnt;
1775 /* Validating that the resource is within range */
1776 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1777 DP(NETIF_MSG_HW,
1778 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1779 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1780 return -EINVAL;
1783 if (func <= 5) {
1784 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1785 } else {
1786 hw_lock_control_reg =
1787 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1790 /* Validating that the resource is not already taken */
1791 lock_status = REG_RD(bp, hw_lock_control_reg);
1792 if (lock_status & resource_bit) {
1793 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1794 lock_status, resource_bit);
1795 return -EEXIST;
1798 /* Try for 5 second every 5ms */
1799 for (cnt = 0; cnt < 1000; cnt++) {
1800 /* Try to acquire the lock */
1801 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1802 lock_status = REG_RD(bp, hw_lock_control_reg);
1803 if (lock_status & resource_bit)
1804 return 0;
1806 msleep(5);
1808 DP(NETIF_MSG_HW, "Timeout\n");
1809 return -EAGAIN;
1812 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1814 u32 lock_status;
1815 u32 resource_bit = (1 << resource);
1816 int func = BP_FUNC(bp);
1817 u32 hw_lock_control_reg;
1819 /* Validating that the resource is within range */
1820 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1821 DP(NETIF_MSG_HW,
1822 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1823 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1824 return -EINVAL;
1827 if (func <= 5) {
1828 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1829 } else {
1830 hw_lock_control_reg =
1831 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1834 /* Validating that the resource is currently taken */
1835 lock_status = REG_RD(bp, hw_lock_control_reg);
1836 if (!(lock_status & resource_bit)) {
1837 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1838 lock_status, resource_bit);
1839 return -EFAULT;
1842 REG_WR(bp, hw_lock_control_reg, resource_bit);
1843 return 0;
1846 /* HW Lock for shared dual port PHYs */
1847 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1849 mutex_lock(&bp->port.phy_mutex);
1851 if (bp->port.need_hw_lock)
1852 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1855 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1857 if (bp->port.need_hw_lock)
1858 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1860 mutex_unlock(&bp->port.phy_mutex);
1863 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1865 /* The GPIO should be swapped if swap register is set and active */
1866 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1867 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1868 int gpio_shift = gpio_num +
1869 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1870 u32 gpio_mask = (1 << gpio_shift);
1871 u32 gpio_reg;
1872 int value;
1874 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1875 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1876 return -EINVAL;
1879 /* read GPIO value */
1880 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1882 /* get the requested pin value */
1883 if ((gpio_reg & gpio_mask) == gpio_mask)
1884 value = 1;
1885 else
1886 value = 0;
1888 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1890 return value;
1893 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1895 /* The GPIO should be swapped if swap register is set and active */
1896 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1897 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1898 int gpio_shift = gpio_num +
1899 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1900 u32 gpio_mask = (1 << gpio_shift);
1901 u32 gpio_reg;
1903 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1904 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1905 return -EINVAL;
1908 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1909 /* read GPIO and mask except the float bits */
1910 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1912 switch (mode) {
1913 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1914 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1915 gpio_num, gpio_shift);
1916 /* clear FLOAT and set CLR */
1917 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1918 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1919 break;
1921 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1922 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1923 gpio_num, gpio_shift);
1924 /* clear FLOAT and set SET */
1925 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1926 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1927 break;
1929 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1930 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1931 gpio_num, gpio_shift);
1932 /* set FLOAT */
1933 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1934 break;
1936 default:
1937 break;
1940 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1941 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1943 return 0;
1946 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1948 /* The GPIO should be swapped if swap register is set and active */
1949 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1950 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1951 int gpio_shift = gpio_num +
1952 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1953 u32 gpio_mask = (1 << gpio_shift);
1954 u32 gpio_reg;
1956 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1957 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1958 return -EINVAL;
1961 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1962 /* read GPIO int */
1963 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1965 switch (mode) {
1966 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1967 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1968 "output low\n", gpio_num, gpio_shift);
1969 /* clear SET and set CLR */
1970 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1971 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1972 break;
1974 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1975 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1976 "output high\n", gpio_num, gpio_shift);
1977 /* clear CLR and set SET */
1978 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1979 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1980 break;
1982 default:
1983 break;
1986 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1987 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1989 return 0;
1992 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1994 u32 spio_mask = (1 << spio_num);
1995 u32 spio_reg;
1997 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1998 (spio_num > MISC_REGISTERS_SPIO_7)) {
1999 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2000 return -EINVAL;
2003 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2004 /* read SPIO and mask except the float bits */
2005 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2007 switch (mode) {
2008 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2009 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2010 /* clear FLOAT and set CLR */
2011 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2012 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2013 break;
2015 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2016 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2017 /* clear FLOAT and set SET */
2018 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2019 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2020 break;
2022 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2023 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2024 /* set FLOAT */
2025 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2026 break;
2028 default:
2029 break;
2032 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2033 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2035 return 0;
2038 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2040 switch (bp->link_vars.ieee_fc &
2041 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2042 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2043 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2044 ADVERTISED_Pause);
2045 break;
2047 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2048 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2049 ADVERTISED_Pause);
2050 break;
2052 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2053 bp->port.advertising |= ADVERTISED_Asym_Pause;
2054 break;
2056 default:
2057 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2058 ADVERTISED_Pause);
2059 break;
2063 static void bnx2x_link_report(struct bnx2x *bp)
2065 if (bp->link_vars.link_up) {
2066 if (bp->state == BNX2X_STATE_OPEN)
2067 netif_carrier_on(bp->dev);
2068 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2070 printk("%d Mbps ", bp->link_vars.line_speed);
2072 if (bp->link_vars.duplex == DUPLEX_FULL)
2073 printk("full duplex");
2074 else
2075 printk("half duplex");
2077 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2078 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2079 printk(", receive ");
2080 if (bp->link_vars.flow_ctrl &
2081 BNX2X_FLOW_CTRL_TX)
2082 printk("& transmit ");
2083 } else {
2084 printk(", transmit ");
2086 printk("flow control ON");
2088 printk("\n");
2090 } else { /* link_down */
2091 netif_carrier_off(bp->dev);
2092 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2096 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2098 if (!BP_NOMCP(bp)) {
2099 u8 rc;
2101 /* Initialize link parameters structure variables */
2102 /* It is recommended to turn off RX FC for jumbo frames
2103 for better performance */
2104 if (IS_E1HMF(bp))
2105 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2106 else if (bp->dev->mtu > 5000)
2107 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2108 else
2109 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2111 bnx2x_acquire_phy_lock(bp);
2113 if (load_mode == LOAD_DIAG)
2114 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2116 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2118 bnx2x_release_phy_lock(bp);
2120 bnx2x_calc_fc_adv(bp);
2122 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2123 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2124 bnx2x_link_report(bp);
2127 return rc;
2129 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2130 return -EINVAL;
2133 static void bnx2x_link_set(struct bnx2x *bp)
2135 if (!BP_NOMCP(bp)) {
2136 bnx2x_acquire_phy_lock(bp);
2137 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2138 bnx2x_release_phy_lock(bp);
2140 bnx2x_calc_fc_adv(bp);
2141 } else
2142 BNX2X_ERR("Bootcode is missing - can not set link\n");
2145 static void bnx2x__link_reset(struct bnx2x *bp)
2147 if (!BP_NOMCP(bp)) {
2148 bnx2x_acquire_phy_lock(bp);
2149 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2150 bnx2x_release_phy_lock(bp);
2151 } else
2152 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2155 static u8 bnx2x_link_test(struct bnx2x *bp)
2157 u8 rc;
2159 bnx2x_acquire_phy_lock(bp);
2160 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2161 bnx2x_release_phy_lock(bp);
2163 return rc;
2166 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2168 u32 r_param = bp->link_vars.line_speed / 8;
2169 u32 fair_periodic_timeout_usec;
2170 u32 t_fair;
2172 memset(&(bp->cmng.rs_vars), 0,
2173 sizeof(struct rate_shaping_vars_per_port));
2174 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2176 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2177 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2179 /* this is the threshold below which no timer arming will occur
2180 1.25 coefficient is for the threshold to be a little bigger
2181 than the real time, to compensate for timer in-accuracy */
2182 bp->cmng.rs_vars.rs_threshold =
2183 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2185 /* resolution of fairness timer */
2186 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2187 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2188 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2190 /* this is the threshold below which we won't arm the timer anymore */
2191 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2193 /* we multiply by 1e3/8 to get bytes/msec.
2194 We don't want the credits to pass a credit
2195 of the t_fair*FAIR_MEM (algorithm resolution) */
2196 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2197 /* since each tick is 4 usec */
2198 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2201 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2203 struct rate_shaping_vars_per_vn m_rs_vn;
2204 struct fairness_vars_per_vn m_fair_vn;
2205 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2206 u16 vn_min_rate, vn_max_rate;
2207 int i;
2209 /* If function is hidden - set min and max to zeroes */
2210 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2211 vn_min_rate = 0;
2212 vn_max_rate = 0;
2214 } else {
2215 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2216 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2217 /* If fairness is enabled (not all min rates are zeroes) and
2218 if current min rate is zero - set it to 1.
2219 This is a requirement of the algorithm. */
2220 if (bp->vn_weight_sum && (vn_min_rate == 0))
2221 vn_min_rate = DEF_MIN_RATE;
2222 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2223 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2226 DP(NETIF_MSG_IFUP,
2227 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2228 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2230 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2231 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2233 /* global vn counter - maximal Mbps for this vn */
2234 m_rs_vn.vn_counter.rate = vn_max_rate;
2236 /* quota - number of bytes transmitted in this period */
2237 m_rs_vn.vn_counter.quota =
2238 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2240 if (bp->vn_weight_sum) {
2241 /* credit for each period of the fairness algorithm:
2242 number of bytes in T_FAIR (the vn share the port rate).
2243 vn_weight_sum should not be larger than 10000, thus
2244 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2245 than zero */
2246 m_fair_vn.vn_credit_delta =
2247 max((u32)(vn_min_rate * (T_FAIR_COEF /
2248 (8 * bp->vn_weight_sum))),
2249 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2250 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2251 m_fair_vn.vn_credit_delta);
2254 /* Store it to internal memory */
2255 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2256 REG_WR(bp, BAR_XSTRORM_INTMEM +
2257 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2258 ((u32 *)(&m_rs_vn))[i]);
2260 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2261 REG_WR(bp, BAR_XSTRORM_INTMEM +
2262 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2263 ((u32 *)(&m_fair_vn))[i]);
2267 /* This function is called upon link interrupt */
2268 static void bnx2x_link_attn(struct bnx2x *bp)
2270 /* Make sure that we are synced with the current statistics */
2271 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2273 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2275 if (bp->link_vars.link_up) {
2277 /* dropless flow control */
2278 if (CHIP_IS_E1H(bp)) {
2279 int port = BP_PORT(bp);
2280 u32 pause_enabled = 0;
2282 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2283 pause_enabled = 1;
2285 REG_WR(bp, BAR_USTRORM_INTMEM +
2286 USTORM_PAUSE_ENABLED_OFFSET(port),
2287 pause_enabled);
2290 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2291 struct host_port_stats *pstats;
2293 pstats = bnx2x_sp(bp, port_stats);
2294 /* reset old bmac stats */
2295 memset(&(pstats->mac_stx[0]), 0,
2296 sizeof(struct mac_stx));
2298 if ((bp->state == BNX2X_STATE_OPEN) ||
2299 (bp->state == BNX2X_STATE_DISABLED))
2300 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2303 /* indicate link status */
2304 bnx2x_link_report(bp);
2306 if (IS_E1HMF(bp)) {
2307 int port = BP_PORT(bp);
2308 int func;
2309 int vn;
2311 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2312 if (vn == BP_E1HVN(bp))
2313 continue;
2315 func = ((vn << 1) | port);
2317 /* Set the attention towards other drivers
2318 on the same port */
2319 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2320 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2323 if (bp->link_vars.link_up) {
2324 int i;
2326 /* Init rate shaping and fairness contexts */
2327 bnx2x_init_port_minmax(bp);
2329 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2330 bnx2x_init_vn_minmax(bp, 2*vn + port);
2332 /* Store it to internal memory */
2333 for (i = 0;
2334 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2335 REG_WR(bp, BAR_XSTRORM_INTMEM +
2336 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2337 ((u32 *)(&bp->cmng))[i]);
2342 static void bnx2x__link_status_update(struct bnx2x *bp)
2344 if (bp->state != BNX2X_STATE_OPEN)
2345 return;
2347 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2349 if (bp->link_vars.link_up)
2350 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2351 else
2352 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2354 /* indicate link status */
2355 bnx2x_link_report(bp);
2358 static void bnx2x_pmf_update(struct bnx2x *bp)
2360 int port = BP_PORT(bp);
2361 u32 val;
2363 bp->port.pmf = 1;
2364 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2366 /* enable nig attention */
2367 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2368 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2369 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2371 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2374 /* end of Link */
2376 /* slow path */
2379 * General service functions
2382 /* the slow path queue is odd since completions arrive on the fastpath ring */
2383 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2384 u32 data_hi, u32 data_lo, int common)
2386 int func = BP_FUNC(bp);
2388 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2389 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2390 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2391 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2392 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2394 #ifdef BNX2X_STOP_ON_ERROR
2395 if (unlikely(bp->panic))
2396 return -EIO;
2397 #endif
2399 spin_lock_bh(&bp->spq_lock);
2401 if (!bp->spq_left) {
2402 BNX2X_ERR("BUG! SPQ ring full!\n");
2403 spin_unlock_bh(&bp->spq_lock);
2404 bnx2x_panic();
2405 return -EBUSY;
2408 /* CID needs port number to be encoded int it */
2409 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2410 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2411 HW_CID(bp, cid)));
2412 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2413 if (common)
2414 bp->spq_prod_bd->hdr.type |=
2415 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2417 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2418 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2420 bp->spq_left--;
2422 if (bp->spq_prod_bd == bp->spq_last_bd) {
2423 bp->spq_prod_bd = bp->spq;
2424 bp->spq_prod_idx = 0;
2425 DP(NETIF_MSG_TIMER, "end of spq\n");
2427 } else {
2428 bp->spq_prod_bd++;
2429 bp->spq_prod_idx++;
2432 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2433 bp->spq_prod_idx);
2435 spin_unlock_bh(&bp->spq_lock);
2436 return 0;
2439 /* acquire split MCP access lock register */
2440 static int bnx2x_acquire_alr(struct bnx2x *bp)
2442 u32 i, j, val;
2443 int rc = 0;
2445 might_sleep();
2446 i = 100;
2447 for (j = 0; j < i*10; j++) {
2448 val = (1UL << 31);
2449 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2450 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2451 if (val & (1L << 31))
2452 break;
2454 msleep(5);
2456 if (!(val & (1L << 31))) {
2457 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2458 rc = -EBUSY;
2461 return rc;
2464 /* release split MCP access lock register */
2465 static void bnx2x_release_alr(struct bnx2x *bp)
2467 u32 val = 0;
2469 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2472 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2474 struct host_def_status_block *def_sb = bp->def_status_blk;
2475 u16 rc = 0;
2477 barrier(); /* status block is written to by the chip */
2478 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2479 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2480 rc |= 1;
2482 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2483 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2484 rc |= 2;
2486 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2487 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2488 rc |= 4;
2490 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2491 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2492 rc |= 8;
2494 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2495 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2496 rc |= 16;
2498 return rc;
2502 * slow path service functions
2505 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2507 int port = BP_PORT(bp);
2508 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2509 COMMAND_REG_ATTN_BITS_SET);
2510 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2511 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2512 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2513 NIG_REG_MASK_INTERRUPT_PORT0;
2514 u32 aeu_mask;
2515 u32 nig_mask = 0;
2517 if (bp->attn_state & asserted)
2518 BNX2X_ERR("IGU ERROR\n");
2520 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2521 aeu_mask = REG_RD(bp, aeu_addr);
2523 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2524 aeu_mask, asserted);
2525 aeu_mask &= ~(asserted & 0xff);
2526 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2528 REG_WR(bp, aeu_addr, aeu_mask);
2529 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2531 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2532 bp->attn_state |= asserted;
2533 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2535 if (asserted & ATTN_HARD_WIRED_MASK) {
2536 if (asserted & ATTN_NIG_FOR_FUNC) {
2538 bnx2x_acquire_phy_lock(bp);
2540 /* save nig interrupt mask */
2541 nig_mask = REG_RD(bp, nig_int_mask_addr);
2542 REG_WR(bp, nig_int_mask_addr, 0);
2544 bnx2x_link_attn(bp);
2546 /* handle unicore attn? */
2548 if (asserted & ATTN_SW_TIMER_4_FUNC)
2549 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2551 if (asserted & GPIO_2_FUNC)
2552 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2554 if (asserted & GPIO_3_FUNC)
2555 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2557 if (asserted & GPIO_4_FUNC)
2558 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2560 if (port == 0) {
2561 if (asserted & ATTN_GENERAL_ATTN_1) {
2562 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2563 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2565 if (asserted & ATTN_GENERAL_ATTN_2) {
2566 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2567 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2569 if (asserted & ATTN_GENERAL_ATTN_3) {
2570 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2571 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2573 } else {
2574 if (asserted & ATTN_GENERAL_ATTN_4) {
2575 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2576 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2578 if (asserted & ATTN_GENERAL_ATTN_5) {
2579 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2580 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2582 if (asserted & ATTN_GENERAL_ATTN_6) {
2583 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2584 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2588 } /* if hardwired */
2590 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2591 asserted, hc_addr);
2592 REG_WR(bp, hc_addr, asserted);
2594 /* now set back the mask */
2595 if (asserted & ATTN_NIG_FOR_FUNC) {
2596 REG_WR(bp, nig_int_mask_addr, nig_mask);
2597 bnx2x_release_phy_lock(bp);
2601 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2603 int port = BP_PORT(bp);
2604 int reg_offset;
2605 u32 val;
2607 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2608 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2610 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2612 val = REG_RD(bp, reg_offset);
2613 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2614 REG_WR(bp, reg_offset, val);
2616 BNX2X_ERR("SPIO5 hw attention\n");
2618 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2619 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2620 /* Fan failure attention */
2622 /* The PHY reset is controlled by GPIO 1 */
2623 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2624 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2625 /* Low power mode is controlled by GPIO 2 */
2626 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2627 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2628 /* mark the failure */
2629 bp->link_params.ext_phy_config &=
2630 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2631 bp->link_params.ext_phy_config |=
2632 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2633 SHMEM_WR(bp,
2634 dev_info.port_hw_config[port].
2635 external_phy_config,
2636 bp->link_params.ext_phy_config);
2637 /* log the failure */
2638 printk(KERN_ERR PFX "Fan Failure on Network"
2639 " Controller %s has caused the driver to"
2640 " shutdown the card to prevent permanent"
2641 " damage. Please contact Dell Support for"
2642 " assistance\n", bp->dev->name);
2643 break;
2645 default:
2646 break;
2650 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2651 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2652 bnx2x_acquire_phy_lock(bp);
2653 bnx2x_handle_module_detect_int(&bp->link_params);
2654 bnx2x_release_phy_lock(bp);
2657 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2659 val = REG_RD(bp, reg_offset);
2660 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2661 REG_WR(bp, reg_offset, val);
2663 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2664 (attn & HW_INTERRUT_ASSERT_SET_0));
2665 bnx2x_panic();
2669 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2671 u32 val;
2673 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2675 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2676 BNX2X_ERR("DB hw attention 0x%x\n", val);
2677 /* DORQ discard attention */
2678 if (val & 0x2)
2679 BNX2X_ERR("FATAL error from DORQ\n");
2682 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2684 int port = BP_PORT(bp);
2685 int reg_offset;
2687 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2688 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2690 val = REG_RD(bp, reg_offset);
2691 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2692 REG_WR(bp, reg_offset, val);
2694 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2695 (attn & HW_INTERRUT_ASSERT_SET_1));
2696 bnx2x_panic();
2700 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2702 u32 val;
2704 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2706 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2707 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2708 /* CFC error attention */
2709 if (val & 0x2)
2710 BNX2X_ERR("FATAL error from CFC\n");
2713 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2715 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2716 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2717 /* RQ_USDMDP_FIFO_OVERFLOW */
2718 if (val & 0x18000)
2719 BNX2X_ERR("FATAL error from PXP\n");
2722 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2724 int port = BP_PORT(bp);
2725 int reg_offset;
2727 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2728 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2730 val = REG_RD(bp, reg_offset);
2731 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2732 REG_WR(bp, reg_offset, val);
2734 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2735 (attn & HW_INTERRUT_ASSERT_SET_2));
2736 bnx2x_panic();
2740 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2742 u32 val;
2744 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2746 if (attn & BNX2X_PMF_LINK_ASSERT) {
2747 int func = BP_FUNC(bp);
2749 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2750 bnx2x__link_status_update(bp);
2751 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2752 DRV_STATUS_PMF)
2753 bnx2x_pmf_update(bp);
2755 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2757 BNX2X_ERR("MC assert!\n");
2758 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2759 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2760 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2761 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2762 bnx2x_panic();
2764 } else if (attn & BNX2X_MCP_ASSERT) {
2766 BNX2X_ERR("MCP assert!\n");
2767 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2768 bnx2x_fw_dump(bp);
2770 } else
2771 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2774 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2775 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2776 if (attn & BNX2X_GRC_TIMEOUT) {
2777 val = CHIP_IS_E1H(bp) ?
2778 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2779 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2781 if (attn & BNX2X_GRC_RSV) {
2782 val = CHIP_IS_E1H(bp) ?
2783 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2784 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2786 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2790 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2792 struct attn_route attn;
2793 struct attn_route group_mask;
2794 int port = BP_PORT(bp);
2795 int index;
2796 u32 reg_addr;
2797 u32 val;
2798 u32 aeu_mask;
2800 /* need to take HW lock because MCP or other port might also
2801 try to handle this event */
2802 bnx2x_acquire_alr(bp);
2804 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2805 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2806 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2807 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2808 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2809 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2811 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2812 if (deasserted & (1 << index)) {
2813 group_mask = bp->attn_group[index];
2815 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2816 index, group_mask.sig[0], group_mask.sig[1],
2817 group_mask.sig[2], group_mask.sig[3]);
2819 bnx2x_attn_int_deasserted3(bp,
2820 attn.sig[3] & group_mask.sig[3]);
2821 bnx2x_attn_int_deasserted1(bp,
2822 attn.sig[1] & group_mask.sig[1]);
2823 bnx2x_attn_int_deasserted2(bp,
2824 attn.sig[2] & group_mask.sig[2]);
2825 bnx2x_attn_int_deasserted0(bp,
2826 attn.sig[0] & group_mask.sig[0]);
2828 if ((attn.sig[0] & group_mask.sig[0] &
2829 HW_PRTY_ASSERT_SET_0) ||
2830 (attn.sig[1] & group_mask.sig[1] &
2831 HW_PRTY_ASSERT_SET_1) ||
2832 (attn.sig[2] & group_mask.sig[2] &
2833 HW_PRTY_ASSERT_SET_2))
2834 BNX2X_ERR("FATAL HW block parity attention\n");
2838 bnx2x_release_alr(bp);
2840 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2842 val = ~deasserted;
2843 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2844 val, reg_addr);
2845 REG_WR(bp, reg_addr, val);
2847 if (~bp->attn_state & deasserted)
2848 BNX2X_ERR("IGU ERROR\n");
2850 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2851 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2853 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2854 aeu_mask = REG_RD(bp, reg_addr);
2856 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2857 aeu_mask, deasserted);
2858 aeu_mask |= (deasserted & 0xff);
2859 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2861 REG_WR(bp, reg_addr, aeu_mask);
2862 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2864 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2865 bp->attn_state &= ~deasserted;
2866 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2869 static void bnx2x_attn_int(struct bnx2x *bp)
2871 /* read local copy of bits */
2872 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2873 attn_bits);
2874 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2875 attn_bits_ack);
2876 u32 attn_state = bp->attn_state;
2878 /* look for changed bits */
2879 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2880 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2882 DP(NETIF_MSG_HW,
2883 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2884 attn_bits, attn_ack, asserted, deasserted);
2886 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2887 BNX2X_ERR("BAD attention state\n");
2889 /* handle bits that were raised */
2890 if (asserted)
2891 bnx2x_attn_int_asserted(bp, asserted);
2893 if (deasserted)
2894 bnx2x_attn_int_deasserted(bp, deasserted);
2897 static void bnx2x_sp_task(struct work_struct *work)
2899 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2900 u16 status;
2903 /* Return here if interrupt is disabled */
2904 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2905 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2906 return;
2909 status = bnx2x_update_dsb_idx(bp);
2910 /* if (status == 0) */
2911 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2913 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2915 /* HW attentions */
2916 if (status & 0x1)
2917 bnx2x_attn_int(bp);
2919 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2920 IGU_INT_NOP, 1);
2921 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2922 IGU_INT_NOP, 1);
2923 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2924 IGU_INT_NOP, 1);
2925 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2926 IGU_INT_NOP, 1);
2927 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2928 IGU_INT_ENABLE, 1);
2932 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2934 struct net_device *dev = dev_instance;
2935 struct bnx2x *bp = netdev_priv(dev);
2937 /* Return here if interrupt is disabled */
2938 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2939 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2940 return IRQ_HANDLED;
2943 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2945 #ifdef BNX2X_STOP_ON_ERROR
2946 if (unlikely(bp->panic))
2947 return IRQ_HANDLED;
2948 #endif
2950 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2952 return IRQ_HANDLED;
2955 /* end of slow path */
2957 /* Statistics */
2959 /****************************************************************************
2960 * Macros
2961 ****************************************************************************/
2963 /* sum[hi:lo] += add[hi:lo] */
2964 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2965 do { \
2966 s_lo += a_lo; \
2967 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2968 } while (0)
2970 /* difference = minuend - subtrahend */
2971 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2972 do { \
2973 if (m_lo < s_lo) { \
2974 /* underflow */ \
2975 d_hi = m_hi - s_hi; \
2976 if (d_hi > 0) { \
2977 /* we can 'loan' 1 */ \
2978 d_hi--; \
2979 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2980 } else { \
2981 /* m_hi <= s_hi */ \
2982 d_hi = 0; \
2983 d_lo = 0; \
2985 } else { \
2986 /* m_lo >= s_lo */ \
2987 if (m_hi < s_hi) { \
2988 d_hi = 0; \
2989 d_lo = 0; \
2990 } else { \
2991 /* m_hi >= s_hi */ \
2992 d_hi = m_hi - s_hi; \
2993 d_lo = m_lo - s_lo; \
2996 } while (0)
2998 #define UPDATE_STAT64(s, t) \
2999 do { \
3000 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3001 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3002 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3003 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3004 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3005 pstats->mac_stx[1].t##_lo, diff.lo); \
3006 } while (0)
3008 #define UPDATE_STAT64_NIG(s, t) \
3009 do { \
3010 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3011 diff.lo, new->s##_lo, old->s##_lo); \
3012 ADD_64(estats->t##_hi, diff.hi, \
3013 estats->t##_lo, diff.lo); \
3014 } while (0)
3016 /* sum[hi:lo] += add */
3017 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3018 do { \
3019 s_lo += a; \
3020 s_hi += (s_lo < a) ? 1 : 0; \
3021 } while (0)
3023 #define UPDATE_EXTEND_STAT(s) \
3024 do { \
3025 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3026 pstats->mac_stx[1].s##_lo, \
3027 new->s); \
3028 } while (0)
3030 #define UPDATE_EXTEND_TSTAT(s, t) \
3031 do { \
3032 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3033 old_tclient->s = tclient->s; \
3034 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3035 } while (0)
3037 #define UPDATE_EXTEND_USTAT(s, t) \
3038 do { \
3039 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3040 old_uclient->s = uclient->s; \
3041 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3042 } while (0)
3044 #define UPDATE_EXTEND_XSTAT(s, t) \
3045 do { \
3046 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3047 old_xclient->s = xclient->s; \
3048 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3049 } while (0)
3051 /* minuend -= subtrahend */
3052 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3053 do { \
3054 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3055 } while (0)
3057 /* minuend[hi:lo] -= subtrahend */
3058 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3059 do { \
3060 SUB_64(m_hi, 0, m_lo, s); \
3061 } while (0)
3063 #define SUB_EXTEND_USTAT(s, t) \
3064 do { \
3065 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3066 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3067 } while (0)
3070 * General service functions
3073 static inline long bnx2x_hilo(u32 *hiref)
3075 u32 lo = *(hiref + 1);
3076 #if (BITS_PER_LONG == 64)
3077 u32 hi = *hiref;
3079 return HILO_U64(hi, lo);
3080 #else
3081 return lo;
3082 #endif
3086 * Init service functions
3089 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3091 if (!bp->stats_pending) {
3092 struct eth_query_ramrod_data ramrod_data = {0};
3093 int i, rc;
3095 ramrod_data.drv_counter = bp->stats_counter++;
3096 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3097 for_each_queue(bp, i)
3098 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3100 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3101 ((u32 *)&ramrod_data)[1],
3102 ((u32 *)&ramrod_data)[0], 0);
3103 if (rc == 0) {
3104 /* stats ramrod has it's own slot on the spq */
3105 bp->spq_left++;
3106 bp->stats_pending = 1;
3111 static void bnx2x_stats_init(struct bnx2x *bp)
3113 int port = BP_PORT(bp);
3114 int i;
3116 bp->stats_pending = 0;
3117 bp->executer_idx = 0;
3118 bp->stats_counter = 0;
3120 /* port stats */
3121 if (!BP_NOMCP(bp))
3122 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3123 else
3124 bp->port.port_stx = 0;
3125 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3127 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3128 bp->port.old_nig_stats.brb_discard =
3129 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3130 bp->port.old_nig_stats.brb_truncate =
3131 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3132 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3133 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3134 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3135 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3137 /* function stats */
3138 for_each_queue(bp, i) {
3139 struct bnx2x_fastpath *fp = &bp->fp[i];
3141 memset(&fp->old_tclient, 0,
3142 sizeof(struct tstorm_per_client_stats));
3143 memset(&fp->old_uclient, 0,
3144 sizeof(struct ustorm_per_client_stats));
3145 memset(&fp->old_xclient, 0,
3146 sizeof(struct xstorm_per_client_stats));
3147 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3150 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3151 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3153 bp->stats_state = STATS_STATE_DISABLED;
3154 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3155 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3158 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3160 struct dmae_command *dmae = &bp->stats_dmae;
3161 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3163 *stats_comp = DMAE_COMP_VAL;
3164 if (CHIP_REV_IS_SLOW(bp))
3165 return;
3167 /* loader */
3168 if (bp->executer_idx) {
3169 int loader_idx = PMF_DMAE_C(bp);
3171 memset(dmae, 0, sizeof(struct dmae_command));
3173 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3174 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3175 DMAE_CMD_DST_RESET |
3176 #ifdef __BIG_ENDIAN
3177 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3178 #else
3179 DMAE_CMD_ENDIANITY_DW_SWAP |
3180 #endif
3181 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3182 DMAE_CMD_PORT_0) |
3183 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3184 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3185 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3186 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3187 sizeof(struct dmae_command) *
3188 (loader_idx + 1)) >> 2;
3189 dmae->dst_addr_hi = 0;
3190 dmae->len = sizeof(struct dmae_command) >> 2;
3191 if (CHIP_IS_E1(bp))
3192 dmae->len--;
3193 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3194 dmae->comp_addr_hi = 0;
3195 dmae->comp_val = 1;
3197 *stats_comp = 0;
3198 bnx2x_post_dmae(bp, dmae, loader_idx);
3200 } else if (bp->func_stx) {
3201 *stats_comp = 0;
3202 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3206 static int bnx2x_stats_comp(struct bnx2x *bp)
3208 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3209 int cnt = 10;
3211 might_sleep();
3212 while (*stats_comp != DMAE_COMP_VAL) {
3213 if (!cnt) {
3214 BNX2X_ERR("timeout waiting for stats finished\n");
3215 break;
3217 cnt--;
3218 msleep(1);
3220 return 1;
3224 * Statistics service functions
3227 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3229 struct dmae_command *dmae;
3230 u32 opcode;
3231 int loader_idx = PMF_DMAE_C(bp);
3232 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3234 /* sanity */
3235 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3236 BNX2X_ERR("BUG!\n");
3237 return;
3240 bp->executer_idx = 0;
3242 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3243 DMAE_CMD_C_ENABLE |
3244 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3245 #ifdef __BIG_ENDIAN
3246 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3247 #else
3248 DMAE_CMD_ENDIANITY_DW_SWAP |
3249 #endif
3250 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3251 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3253 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3254 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3255 dmae->src_addr_lo = bp->port.port_stx >> 2;
3256 dmae->src_addr_hi = 0;
3257 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3258 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3259 dmae->len = DMAE_LEN32_RD_MAX;
3260 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3261 dmae->comp_addr_hi = 0;
3262 dmae->comp_val = 1;
3264 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3265 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3266 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3267 dmae->src_addr_hi = 0;
3268 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3269 DMAE_LEN32_RD_MAX * 4);
3270 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3271 DMAE_LEN32_RD_MAX * 4);
3272 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3273 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3274 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3275 dmae->comp_val = DMAE_COMP_VAL;
3277 *stats_comp = 0;
3278 bnx2x_hw_stats_post(bp);
3279 bnx2x_stats_comp(bp);
3282 static void bnx2x_port_stats_init(struct bnx2x *bp)
3284 struct dmae_command *dmae;
3285 int port = BP_PORT(bp);
3286 int vn = BP_E1HVN(bp);
3287 u32 opcode;
3288 int loader_idx = PMF_DMAE_C(bp);
3289 u32 mac_addr;
3290 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3292 /* sanity */
3293 if (!bp->link_vars.link_up || !bp->port.pmf) {
3294 BNX2X_ERR("BUG!\n");
3295 return;
3298 bp->executer_idx = 0;
3300 /* MCP */
3301 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3302 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3303 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3304 #ifdef __BIG_ENDIAN
3305 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3306 #else
3307 DMAE_CMD_ENDIANITY_DW_SWAP |
3308 #endif
3309 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3310 (vn << DMAE_CMD_E1HVN_SHIFT));
3312 if (bp->port.port_stx) {
3314 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3315 dmae->opcode = opcode;
3316 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3317 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3318 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3319 dmae->dst_addr_hi = 0;
3320 dmae->len = sizeof(struct host_port_stats) >> 2;
3321 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3322 dmae->comp_addr_hi = 0;
3323 dmae->comp_val = 1;
3326 if (bp->func_stx) {
3328 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3329 dmae->opcode = opcode;
3330 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3331 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3332 dmae->dst_addr_lo = bp->func_stx >> 2;
3333 dmae->dst_addr_hi = 0;
3334 dmae->len = sizeof(struct host_func_stats) >> 2;
3335 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3336 dmae->comp_addr_hi = 0;
3337 dmae->comp_val = 1;
3340 /* MAC */
3341 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3342 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3343 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3344 #ifdef __BIG_ENDIAN
3345 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3346 #else
3347 DMAE_CMD_ENDIANITY_DW_SWAP |
3348 #endif
3349 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3350 (vn << DMAE_CMD_E1HVN_SHIFT));
3352 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3354 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3355 NIG_REG_INGRESS_BMAC0_MEM);
3357 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3358 BIGMAC_REGISTER_TX_STAT_GTBYT */
3359 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3360 dmae->opcode = opcode;
3361 dmae->src_addr_lo = (mac_addr +
3362 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3363 dmae->src_addr_hi = 0;
3364 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3365 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3366 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3367 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3368 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3369 dmae->comp_addr_hi = 0;
3370 dmae->comp_val = 1;
3372 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3373 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3374 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3375 dmae->opcode = opcode;
3376 dmae->src_addr_lo = (mac_addr +
3377 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3378 dmae->src_addr_hi = 0;
3379 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3380 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3381 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3382 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3383 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3384 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3385 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3386 dmae->comp_addr_hi = 0;
3387 dmae->comp_val = 1;
3389 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3391 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3393 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3394 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3395 dmae->opcode = opcode;
3396 dmae->src_addr_lo = (mac_addr +
3397 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3398 dmae->src_addr_hi = 0;
3399 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3400 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3401 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3402 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3403 dmae->comp_addr_hi = 0;
3404 dmae->comp_val = 1;
3406 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3407 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3408 dmae->opcode = opcode;
3409 dmae->src_addr_lo = (mac_addr +
3410 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3411 dmae->src_addr_hi = 0;
3412 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3413 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3414 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3415 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3416 dmae->len = 1;
3417 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3418 dmae->comp_addr_hi = 0;
3419 dmae->comp_val = 1;
3421 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3422 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3423 dmae->opcode = opcode;
3424 dmae->src_addr_lo = (mac_addr +
3425 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3426 dmae->src_addr_hi = 0;
3427 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3428 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3429 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3430 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3431 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3432 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3433 dmae->comp_addr_hi = 0;
3434 dmae->comp_val = 1;
3437 /* NIG */
3438 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3439 dmae->opcode = opcode;
3440 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3441 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3442 dmae->src_addr_hi = 0;
3443 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3444 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3445 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3446 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3447 dmae->comp_addr_hi = 0;
3448 dmae->comp_val = 1;
3450 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3451 dmae->opcode = opcode;
3452 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3453 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3454 dmae->src_addr_hi = 0;
3455 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3456 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3457 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3458 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3459 dmae->len = (2*sizeof(u32)) >> 2;
3460 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3461 dmae->comp_addr_hi = 0;
3462 dmae->comp_val = 1;
3464 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3465 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3466 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3467 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3468 #ifdef __BIG_ENDIAN
3469 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3470 #else
3471 DMAE_CMD_ENDIANITY_DW_SWAP |
3472 #endif
3473 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3474 (vn << DMAE_CMD_E1HVN_SHIFT));
3475 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3476 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3477 dmae->src_addr_hi = 0;
3478 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3479 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3480 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3481 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3482 dmae->len = (2*sizeof(u32)) >> 2;
3483 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3484 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3485 dmae->comp_val = DMAE_COMP_VAL;
3487 *stats_comp = 0;
3490 static void bnx2x_func_stats_init(struct bnx2x *bp)
3492 struct dmae_command *dmae = &bp->stats_dmae;
3493 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3495 /* sanity */
3496 if (!bp->func_stx) {
3497 BNX2X_ERR("BUG!\n");
3498 return;
3501 bp->executer_idx = 0;
3502 memset(dmae, 0, sizeof(struct dmae_command));
3504 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3505 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3506 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3507 #ifdef __BIG_ENDIAN
3508 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3509 #else
3510 DMAE_CMD_ENDIANITY_DW_SWAP |
3511 #endif
3512 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3513 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3514 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3515 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3516 dmae->dst_addr_lo = bp->func_stx >> 2;
3517 dmae->dst_addr_hi = 0;
3518 dmae->len = sizeof(struct host_func_stats) >> 2;
3519 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3520 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3521 dmae->comp_val = DMAE_COMP_VAL;
3523 *stats_comp = 0;
3526 static void bnx2x_stats_start(struct bnx2x *bp)
3528 if (bp->port.pmf)
3529 bnx2x_port_stats_init(bp);
3531 else if (bp->func_stx)
3532 bnx2x_func_stats_init(bp);
3534 bnx2x_hw_stats_post(bp);
3535 bnx2x_storm_stats_post(bp);
3538 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3540 bnx2x_stats_comp(bp);
3541 bnx2x_stats_pmf_update(bp);
3542 bnx2x_stats_start(bp);
3545 static void bnx2x_stats_restart(struct bnx2x *bp)
3547 bnx2x_stats_comp(bp);
3548 bnx2x_stats_start(bp);
3551 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3553 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3554 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3555 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3556 struct {
3557 u32 lo;
3558 u32 hi;
3559 } diff;
3561 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3562 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3563 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3564 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3565 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3566 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3567 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3568 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3569 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3570 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3571 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3572 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3573 UPDATE_STAT64(tx_stat_gt127,
3574 tx_stat_etherstatspkts65octetsto127octets);
3575 UPDATE_STAT64(tx_stat_gt255,
3576 tx_stat_etherstatspkts128octetsto255octets);
3577 UPDATE_STAT64(tx_stat_gt511,
3578 tx_stat_etherstatspkts256octetsto511octets);
3579 UPDATE_STAT64(tx_stat_gt1023,
3580 tx_stat_etherstatspkts512octetsto1023octets);
3581 UPDATE_STAT64(tx_stat_gt1518,
3582 tx_stat_etherstatspkts1024octetsto1522octets);
3583 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3584 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3585 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3586 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3587 UPDATE_STAT64(tx_stat_gterr,
3588 tx_stat_dot3statsinternalmactransmiterrors);
3589 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3591 estats->pause_frames_received_hi =
3592 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3593 estats->pause_frames_received_lo =
3594 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3596 estats->pause_frames_sent_hi =
3597 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3598 estats->pause_frames_sent_lo =
3599 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3602 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3604 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3605 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3606 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3608 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3609 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3610 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3611 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3612 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3613 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3614 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3615 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3616 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3617 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3618 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3619 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3620 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3621 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3622 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3623 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3624 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3625 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3626 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3627 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3628 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3629 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3630 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3631 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3632 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3633 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3634 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3635 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3636 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3637 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3638 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3640 estats->pause_frames_received_hi =
3641 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3642 estats->pause_frames_received_lo =
3643 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3644 ADD_64(estats->pause_frames_received_hi,
3645 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3646 estats->pause_frames_received_lo,
3647 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3649 estats->pause_frames_sent_hi =
3650 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3651 estats->pause_frames_sent_lo =
3652 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3653 ADD_64(estats->pause_frames_sent_hi,
3654 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3655 estats->pause_frames_sent_lo,
3656 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3659 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3661 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3662 struct nig_stats *old = &(bp->port.old_nig_stats);
3663 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3664 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3665 struct {
3666 u32 lo;
3667 u32 hi;
3668 } diff;
3669 u32 nig_timer_max;
3671 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3672 bnx2x_bmac_stats_update(bp);
3674 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3675 bnx2x_emac_stats_update(bp);
3677 else { /* unreached */
3678 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3679 return -1;
3682 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3683 new->brb_discard - old->brb_discard);
3684 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3685 new->brb_truncate - old->brb_truncate);
3687 UPDATE_STAT64_NIG(egress_mac_pkt0,
3688 etherstatspkts1024octetsto1522octets);
3689 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3691 memcpy(old, new, sizeof(struct nig_stats));
3693 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3694 sizeof(struct mac_stx));
3695 estats->brb_drop_hi = pstats->brb_drop_hi;
3696 estats->brb_drop_lo = pstats->brb_drop_lo;
3698 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3700 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3701 if (nig_timer_max != estats->nig_timer_max) {
3702 estats->nig_timer_max = nig_timer_max;
3703 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3706 return 0;
3709 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3711 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3712 struct tstorm_per_port_stats *tport =
3713 &stats->tstorm_common.port_statistics;
3714 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3715 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3716 int i;
3718 memset(&(fstats->total_bytes_received_hi), 0,
3719 sizeof(struct host_func_stats) - 2*sizeof(u32));
3720 estats->error_bytes_received_hi = 0;
3721 estats->error_bytes_received_lo = 0;
3722 estats->etherstatsoverrsizepkts_hi = 0;
3723 estats->etherstatsoverrsizepkts_lo = 0;
3724 estats->no_buff_discard_hi = 0;
3725 estats->no_buff_discard_lo = 0;
3727 for_each_queue(bp, i) {
3728 struct bnx2x_fastpath *fp = &bp->fp[i];
3729 int cl_id = fp->cl_id;
3730 struct tstorm_per_client_stats *tclient =
3731 &stats->tstorm_common.client_statistics[cl_id];
3732 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3733 struct ustorm_per_client_stats *uclient =
3734 &stats->ustorm_common.client_statistics[cl_id];
3735 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3736 struct xstorm_per_client_stats *xclient =
3737 &stats->xstorm_common.client_statistics[cl_id];
3738 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3739 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3740 u32 diff;
3742 /* are storm stats valid? */
3743 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3744 bp->stats_counter) {
3745 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3746 " xstorm counter (%d) != stats_counter (%d)\n",
3747 i, xclient->stats_counter, bp->stats_counter);
3748 return -1;
3750 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3751 bp->stats_counter) {
3752 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3753 " tstorm counter (%d) != stats_counter (%d)\n",
3754 i, tclient->stats_counter, bp->stats_counter);
3755 return -2;
3757 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3758 bp->stats_counter) {
3759 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3760 " ustorm counter (%d) != stats_counter (%d)\n",
3761 i, uclient->stats_counter, bp->stats_counter);
3762 return -4;
3765 qstats->total_bytes_received_hi =
3766 qstats->valid_bytes_received_hi =
3767 le32_to_cpu(tclient->total_rcv_bytes.hi);
3768 qstats->total_bytes_received_lo =
3769 qstats->valid_bytes_received_lo =
3770 le32_to_cpu(tclient->total_rcv_bytes.lo);
3772 qstats->error_bytes_received_hi =
3773 le32_to_cpu(tclient->rcv_error_bytes.hi);
3774 qstats->error_bytes_received_lo =
3775 le32_to_cpu(tclient->rcv_error_bytes.lo);
3777 ADD_64(qstats->total_bytes_received_hi,
3778 qstats->error_bytes_received_hi,
3779 qstats->total_bytes_received_lo,
3780 qstats->error_bytes_received_lo);
3782 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3783 total_unicast_packets_received);
3784 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3785 total_multicast_packets_received);
3786 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3787 total_broadcast_packets_received);
3788 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3789 etherstatsoverrsizepkts);
3790 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3792 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3793 total_unicast_packets_received);
3794 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3795 total_multicast_packets_received);
3796 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3797 total_broadcast_packets_received);
3798 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3799 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3800 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3802 qstats->total_bytes_transmitted_hi =
3803 le32_to_cpu(xclient->total_sent_bytes.hi);
3804 qstats->total_bytes_transmitted_lo =
3805 le32_to_cpu(xclient->total_sent_bytes.lo);
3807 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3808 total_unicast_packets_transmitted);
3809 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3810 total_multicast_packets_transmitted);
3811 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3812 total_broadcast_packets_transmitted);
3814 old_tclient->checksum_discard = tclient->checksum_discard;
3815 old_tclient->ttl0_discard = tclient->ttl0_discard;
3817 ADD_64(fstats->total_bytes_received_hi,
3818 qstats->total_bytes_received_hi,
3819 fstats->total_bytes_received_lo,
3820 qstats->total_bytes_received_lo);
3821 ADD_64(fstats->total_bytes_transmitted_hi,
3822 qstats->total_bytes_transmitted_hi,
3823 fstats->total_bytes_transmitted_lo,
3824 qstats->total_bytes_transmitted_lo);
3825 ADD_64(fstats->total_unicast_packets_received_hi,
3826 qstats->total_unicast_packets_received_hi,
3827 fstats->total_unicast_packets_received_lo,
3828 qstats->total_unicast_packets_received_lo);
3829 ADD_64(fstats->total_multicast_packets_received_hi,
3830 qstats->total_multicast_packets_received_hi,
3831 fstats->total_multicast_packets_received_lo,
3832 qstats->total_multicast_packets_received_lo);
3833 ADD_64(fstats->total_broadcast_packets_received_hi,
3834 qstats->total_broadcast_packets_received_hi,
3835 fstats->total_broadcast_packets_received_lo,
3836 qstats->total_broadcast_packets_received_lo);
3837 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3838 qstats->total_unicast_packets_transmitted_hi,
3839 fstats->total_unicast_packets_transmitted_lo,
3840 qstats->total_unicast_packets_transmitted_lo);
3841 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3842 qstats->total_multicast_packets_transmitted_hi,
3843 fstats->total_multicast_packets_transmitted_lo,
3844 qstats->total_multicast_packets_transmitted_lo);
3845 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3846 qstats->total_broadcast_packets_transmitted_hi,
3847 fstats->total_broadcast_packets_transmitted_lo,
3848 qstats->total_broadcast_packets_transmitted_lo);
3849 ADD_64(fstats->valid_bytes_received_hi,
3850 qstats->valid_bytes_received_hi,
3851 fstats->valid_bytes_received_lo,
3852 qstats->valid_bytes_received_lo);
3854 ADD_64(estats->error_bytes_received_hi,
3855 qstats->error_bytes_received_hi,
3856 estats->error_bytes_received_lo,
3857 qstats->error_bytes_received_lo);
3858 ADD_64(estats->etherstatsoverrsizepkts_hi,
3859 qstats->etherstatsoverrsizepkts_hi,
3860 estats->etherstatsoverrsizepkts_lo,
3861 qstats->etherstatsoverrsizepkts_lo);
3862 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3863 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3866 ADD_64(fstats->total_bytes_received_hi,
3867 estats->rx_stat_ifhcinbadoctets_hi,
3868 fstats->total_bytes_received_lo,
3869 estats->rx_stat_ifhcinbadoctets_lo);
3871 memcpy(estats, &(fstats->total_bytes_received_hi),
3872 sizeof(struct host_func_stats) - 2*sizeof(u32));
3874 ADD_64(estats->etherstatsoverrsizepkts_hi,
3875 estats->rx_stat_dot3statsframestoolong_hi,
3876 estats->etherstatsoverrsizepkts_lo,
3877 estats->rx_stat_dot3statsframestoolong_lo);
3878 ADD_64(estats->error_bytes_received_hi,
3879 estats->rx_stat_ifhcinbadoctets_hi,
3880 estats->error_bytes_received_lo,
3881 estats->rx_stat_ifhcinbadoctets_lo);
3883 if (bp->port.pmf) {
3884 estats->mac_filter_discard =
3885 le32_to_cpu(tport->mac_filter_discard);
3886 estats->xxoverflow_discard =
3887 le32_to_cpu(tport->xxoverflow_discard);
3888 estats->brb_truncate_discard =
3889 le32_to_cpu(tport->brb_truncate_discard);
3890 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3893 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3895 bp->stats_pending = 0;
3897 return 0;
3900 static void bnx2x_net_stats_update(struct bnx2x *bp)
3902 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3903 struct net_device_stats *nstats = &bp->dev->stats;
3904 int i;
3906 nstats->rx_packets =
3907 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3908 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3909 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3911 nstats->tx_packets =
3912 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3913 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3914 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3916 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3918 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3920 nstats->rx_dropped = estats->mac_discard;
3921 for_each_queue(bp, i)
3922 nstats->rx_dropped +=
3923 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3925 nstats->tx_dropped = 0;
3927 nstats->multicast =
3928 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3930 nstats->collisions =
3931 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3933 nstats->rx_length_errors =
3934 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3935 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3936 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3937 bnx2x_hilo(&estats->brb_truncate_hi);
3938 nstats->rx_crc_errors =
3939 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3940 nstats->rx_frame_errors =
3941 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3942 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3943 nstats->rx_missed_errors = estats->xxoverflow_discard;
3945 nstats->rx_errors = nstats->rx_length_errors +
3946 nstats->rx_over_errors +
3947 nstats->rx_crc_errors +
3948 nstats->rx_frame_errors +
3949 nstats->rx_fifo_errors +
3950 nstats->rx_missed_errors;
3952 nstats->tx_aborted_errors =
3953 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3954 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3955 nstats->tx_carrier_errors =
3956 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3957 nstats->tx_fifo_errors = 0;
3958 nstats->tx_heartbeat_errors = 0;
3959 nstats->tx_window_errors = 0;
3961 nstats->tx_errors = nstats->tx_aborted_errors +
3962 nstats->tx_carrier_errors +
3963 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3966 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3968 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3969 int i;
3971 estats->driver_xoff = 0;
3972 estats->rx_err_discard_pkt = 0;
3973 estats->rx_skb_alloc_failed = 0;
3974 estats->hw_csum_err = 0;
3975 for_each_queue(bp, i) {
3976 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3978 estats->driver_xoff += qstats->driver_xoff;
3979 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3980 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3981 estats->hw_csum_err += qstats->hw_csum_err;
3985 static void bnx2x_stats_update(struct bnx2x *bp)
3987 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3989 if (*stats_comp != DMAE_COMP_VAL)
3990 return;
3992 if (bp->port.pmf)
3993 bnx2x_hw_stats_update(bp);
3995 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3996 BNX2X_ERR("storm stats were not updated for 3 times\n");
3997 bnx2x_panic();
3998 return;
4001 bnx2x_net_stats_update(bp);
4002 bnx2x_drv_stats_update(bp);
4004 if (bp->msglevel & NETIF_MSG_TIMER) {
4005 struct tstorm_per_client_stats *old_tclient =
4006 &bp->fp->old_tclient;
4007 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4008 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4009 struct net_device_stats *nstats = &bp->dev->stats;
4010 int i;
4012 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4013 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4014 " tx pkt (%lx)\n",
4015 bnx2x_tx_avail(bp->fp),
4016 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4017 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4018 " rx pkt (%lx)\n",
4019 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4020 bp->fp->rx_comp_cons),
4021 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4022 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4023 "brb truncate %u\n",
4024 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4025 qstats->driver_xoff,
4026 estats->brb_drop_lo, estats->brb_truncate_lo);
4027 printk(KERN_DEBUG "tstats: checksum_discard %u "
4028 "packets_too_big_discard %lu no_buff_discard %lu "
4029 "mac_discard %u mac_filter_discard %u "
4030 "xxovrflow_discard %u brb_truncate_discard %u "
4031 "ttl0_discard %u\n",
4032 le32_to_cpu(old_tclient->checksum_discard),
4033 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4034 bnx2x_hilo(&qstats->no_buff_discard_hi),
4035 estats->mac_discard, estats->mac_filter_discard,
4036 estats->xxoverflow_discard, estats->brb_truncate_discard,
4037 le32_to_cpu(old_tclient->ttl0_discard));
4039 for_each_queue(bp, i) {
4040 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4041 bnx2x_fp(bp, i, tx_pkt),
4042 bnx2x_fp(bp, i, rx_pkt),
4043 bnx2x_fp(bp, i, rx_calls));
4047 bnx2x_hw_stats_post(bp);
4048 bnx2x_storm_stats_post(bp);
4051 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4053 struct dmae_command *dmae;
4054 u32 opcode;
4055 int loader_idx = PMF_DMAE_C(bp);
4056 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4058 bp->executer_idx = 0;
4060 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4061 DMAE_CMD_C_ENABLE |
4062 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4063 #ifdef __BIG_ENDIAN
4064 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4065 #else
4066 DMAE_CMD_ENDIANITY_DW_SWAP |
4067 #endif
4068 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4069 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4071 if (bp->port.port_stx) {
4073 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4074 if (bp->func_stx)
4075 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4076 else
4077 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4078 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4079 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4080 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4081 dmae->dst_addr_hi = 0;
4082 dmae->len = sizeof(struct host_port_stats) >> 2;
4083 if (bp->func_stx) {
4084 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4085 dmae->comp_addr_hi = 0;
4086 dmae->comp_val = 1;
4087 } else {
4088 dmae->comp_addr_lo =
4089 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4090 dmae->comp_addr_hi =
4091 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4092 dmae->comp_val = DMAE_COMP_VAL;
4094 *stats_comp = 0;
4098 if (bp->func_stx) {
4100 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4101 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4102 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4103 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4104 dmae->dst_addr_lo = bp->func_stx >> 2;
4105 dmae->dst_addr_hi = 0;
4106 dmae->len = sizeof(struct host_func_stats) >> 2;
4107 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4108 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4109 dmae->comp_val = DMAE_COMP_VAL;
4111 *stats_comp = 0;
4115 static void bnx2x_stats_stop(struct bnx2x *bp)
4117 int update = 0;
4119 bnx2x_stats_comp(bp);
4121 if (bp->port.pmf)
4122 update = (bnx2x_hw_stats_update(bp) == 0);
4124 update |= (bnx2x_storm_stats_update(bp) == 0);
4126 if (update) {
4127 bnx2x_net_stats_update(bp);
4129 if (bp->port.pmf)
4130 bnx2x_port_stats_stop(bp);
4132 bnx2x_hw_stats_post(bp);
4133 bnx2x_stats_comp(bp);
4137 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4141 static const struct {
4142 void (*action)(struct bnx2x *bp);
4143 enum bnx2x_stats_state next_state;
4144 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4145 /* state event */
4147 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4148 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4149 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4150 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4153 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4154 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4155 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4156 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4160 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4162 enum bnx2x_stats_state state = bp->stats_state;
4164 bnx2x_stats_stm[state][event].action(bp);
4165 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4167 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4168 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4169 state, event, bp->stats_state);
4172 static void bnx2x_timer(unsigned long data)
4174 struct bnx2x *bp = (struct bnx2x *) data;
4176 if (!netif_running(bp->dev))
4177 return;
4179 if (atomic_read(&bp->intr_sem) != 0)
4180 goto timer_restart;
4182 if (poll) {
4183 struct bnx2x_fastpath *fp = &bp->fp[0];
4184 int rc;
4186 bnx2x_tx_int(fp);
4187 rc = bnx2x_rx_int(fp, 1000);
4190 if (!BP_NOMCP(bp)) {
4191 int func = BP_FUNC(bp);
4192 u32 drv_pulse;
4193 u32 mcp_pulse;
4195 ++bp->fw_drv_pulse_wr_seq;
4196 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4197 /* TBD - add SYSTEM_TIME */
4198 drv_pulse = bp->fw_drv_pulse_wr_seq;
4199 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4201 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4202 MCP_PULSE_SEQ_MASK);
4203 /* The delta between driver pulse and mcp response
4204 * should be 1 (before mcp response) or 0 (after mcp response)
4206 if ((drv_pulse != mcp_pulse) &&
4207 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4208 /* someone lost a heartbeat... */
4209 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4210 drv_pulse, mcp_pulse);
4214 if ((bp->state == BNX2X_STATE_OPEN) ||
4215 (bp->state == BNX2X_STATE_DISABLED))
4216 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4218 timer_restart:
4219 mod_timer(&bp->timer, jiffies + bp->current_interval);
4222 /* end of Statistics */
4224 /* nic init */
4227 * nic init service functions
4230 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4232 int port = BP_PORT(bp);
4234 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4235 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4236 sizeof(struct ustorm_status_block)/4);
4237 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4238 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4239 sizeof(struct cstorm_status_block)/4);
4242 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4243 dma_addr_t mapping, int sb_id)
4245 int port = BP_PORT(bp);
4246 int func = BP_FUNC(bp);
4247 int index;
4248 u64 section;
4250 /* USTORM */
4251 section = ((u64)mapping) + offsetof(struct host_status_block,
4252 u_status_block);
4253 sb->u_status_block.status_block_id = sb_id;
4255 REG_WR(bp, BAR_USTRORM_INTMEM +
4256 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4257 REG_WR(bp, BAR_USTRORM_INTMEM +
4258 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4259 U64_HI(section));
4260 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4261 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4263 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4264 REG_WR16(bp, BAR_USTRORM_INTMEM +
4265 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4267 /* CSTORM */
4268 section = ((u64)mapping) + offsetof(struct host_status_block,
4269 c_status_block);
4270 sb->c_status_block.status_block_id = sb_id;
4272 REG_WR(bp, BAR_CSTRORM_INTMEM +
4273 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4274 REG_WR(bp, BAR_CSTRORM_INTMEM +
4275 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4276 U64_HI(section));
4277 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4278 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4280 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4281 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4282 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4284 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4287 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4289 int func = BP_FUNC(bp);
4291 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4292 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4293 sizeof(struct tstorm_def_status_block)/4);
4294 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4295 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4296 sizeof(struct ustorm_def_status_block)/4);
4297 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4298 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4299 sizeof(struct cstorm_def_status_block)/4);
4300 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
4301 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4302 sizeof(struct xstorm_def_status_block)/4);
4305 static void bnx2x_init_def_sb(struct bnx2x *bp,
4306 struct host_def_status_block *def_sb,
4307 dma_addr_t mapping, int sb_id)
4309 int port = BP_PORT(bp);
4310 int func = BP_FUNC(bp);
4311 int index, val, reg_offset;
4312 u64 section;
4314 /* ATTN */
4315 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4316 atten_status_block);
4317 def_sb->atten_status_block.status_block_id = sb_id;
4319 bp->attn_state = 0;
4321 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4322 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4324 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4325 bp->attn_group[index].sig[0] = REG_RD(bp,
4326 reg_offset + 0x10*index);
4327 bp->attn_group[index].sig[1] = REG_RD(bp,
4328 reg_offset + 0x4 + 0x10*index);
4329 bp->attn_group[index].sig[2] = REG_RD(bp,
4330 reg_offset + 0x8 + 0x10*index);
4331 bp->attn_group[index].sig[3] = REG_RD(bp,
4332 reg_offset + 0xc + 0x10*index);
4335 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4336 HC_REG_ATTN_MSG0_ADDR_L);
4338 REG_WR(bp, reg_offset, U64_LO(section));
4339 REG_WR(bp, reg_offset + 4, U64_HI(section));
4341 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4343 val = REG_RD(bp, reg_offset);
4344 val |= sb_id;
4345 REG_WR(bp, reg_offset, val);
4347 /* USTORM */
4348 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4349 u_def_status_block);
4350 def_sb->u_def_status_block.status_block_id = sb_id;
4352 REG_WR(bp, BAR_USTRORM_INTMEM +
4353 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4354 REG_WR(bp, BAR_USTRORM_INTMEM +
4355 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4356 U64_HI(section));
4357 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4358 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4360 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4361 REG_WR16(bp, BAR_USTRORM_INTMEM +
4362 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4364 /* CSTORM */
4365 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4366 c_def_status_block);
4367 def_sb->c_def_status_block.status_block_id = sb_id;
4369 REG_WR(bp, BAR_CSTRORM_INTMEM +
4370 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4371 REG_WR(bp, BAR_CSTRORM_INTMEM +
4372 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4373 U64_HI(section));
4374 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4375 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4377 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4378 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4379 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4381 /* TSTORM */
4382 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4383 t_def_status_block);
4384 def_sb->t_def_status_block.status_block_id = sb_id;
4386 REG_WR(bp, BAR_TSTRORM_INTMEM +
4387 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4388 REG_WR(bp, BAR_TSTRORM_INTMEM +
4389 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4390 U64_HI(section));
4391 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4392 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4394 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4395 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4396 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4398 /* XSTORM */
4399 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4400 x_def_status_block);
4401 def_sb->x_def_status_block.status_block_id = sb_id;
4403 REG_WR(bp, BAR_XSTRORM_INTMEM +
4404 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4405 REG_WR(bp, BAR_XSTRORM_INTMEM +
4406 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4407 U64_HI(section));
4408 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4409 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4411 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4412 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4413 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4415 bp->stats_pending = 0;
4416 bp->set_mac_pending = 0;
4418 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4421 static void bnx2x_update_coalesce(struct bnx2x *bp)
4423 int port = BP_PORT(bp);
4424 int i;
4426 for_each_queue(bp, i) {
4427 int sb_id = bp->fp[i].sb_id;
4429 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4430 REG_WR8(bp, BAR_USTRORM_INTMEM +
4431 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4432 U_SB_ETH_RX_CQ_INDEX),
4433 bp->rx_ticks/12);
4434 REG_WR16(bp, BAR_USTRORM_INTMEM +
4435 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4436 U_SB_ETH_RX_CQ_INDEX),
4437 bp->rx_ticks ? 0 : 1);
4439 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4440 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4441 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4442 C_SB_ETH_TX_CQ_INDEX),
4443 bp->tx_ticks/12);
4444 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4445 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4446 C_SB_ETH_TX_CQ_INDEX),
4447 bp->tx_ticks ? 0 : 1);
4451 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4452 struct bnx2x_fastpath *fp, int last)
4454 int i;
4456 for (i = 0; i < last; i++) {
4457 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4458 struct sk_buff *skb = rx_buf->skb;
4460 if (skb == NULL) {
4461 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4462 continue;
4465 if (fp->tpa_state[i] == BNX2X_TPA_START)
4466 pci_unmap_single(bp->pdev,
4467 pci_unmap_addr(rx_buf, mapping),
4468 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4470 dev_kfree_skb(skb);
4471 rx_buf->skb = NULL;
4475 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4477 int func = BP_FUNC(bp);
4478 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4479 ETH_MAX_AGGREGATION_QUEUES_E1H;
4480 u16 ring_prod, cqe_ring_prod;
4481 int i, j;
4483 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4484 DP(NETIF_MSG_IFUP,
4485 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4487 if (bp->flags & TPA_ENABLE_FLAG) {
4489 for_each_rx_queue(bp, j) {
4490 struct bnx2x_fastpath *fp = &bp->fp[j];
4492 for (i = 0; i < max_agg_queues; i++) {
4493 fp->tpa_pool[i].skb =
4494 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4495 if (!fp->tpa_pool[i].skb) {
4496 BNX2X_ERR("Failed to allocate TPA "
4497 "skb pool for queue[%d] - "
4498 "disabling TPA on this "
4499 "queue!\n", j);
4500 bnx2x_free_tpa_pool(bp, fp, i);
4501 fp->disable_tpa = 1;
4502 break;
4504 pci_unmap_addr_set((struct sw_rx_bd *)
4505 &bp->fp->tpa_pool[i],
4506 mapping, 0);
4507 fp->tpa_state[i] = BNX2X_TPA_STOP;
4512 for_each_rx_queue(bp, j) {
4513 struct bnx2x_fastpath *fp = &bp->fp[j];
4515 fp->rx_bd_cons = 0;
4516 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4517 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4519 /* "next page" elements initialization */
4520 /* SGE ring */
4521 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4522 struct eth_rx_sge *sge;
4524 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4525 sge->addr_hi =
4526 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4527 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4528 sge->addr_lo =
4529 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4530 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4533 bnx2x_init_sge_ring_bit_mask(fp);
4535 /* RX BD ring */
4536 for (i = 1; i <= NUM_RX_RINGS; i++) {
4537 struct eth_rx_bd *rx_bd;
4539 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4540 rx_bd->addr_hi =
4541 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4542 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4543 rx_bd->addr_lo =
4544 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4545 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4548 /* CQ ring */
4549 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4550 struct eth_rx_cqe_next_page *nextpg;
4552 nextpg = (struct eth_rx_cqe_next_page *)
4553 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4554 nextpg->addr_hi =
4555 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4556 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4557 nextpg->addr_lo =
4558 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4559 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4562 /* Allocate SGEs and initialize the ring elements */
4563 for (i = 0, ring_prod = 0;
4564 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4566 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4567 BNX2X_ERR("was only able to allocate "
4568 "%d rx sges\n", i);
4569 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4570 /* Cleanup already allocated elements */
4571 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4572 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4573 fp->disable_tpa = 1;
4574 ring_prod = 0;
4575 break;
4577 ring_prod = NEXT_SGE_IDX(ring_prod);
4579 fp->rx_sge_prod = ring_prod;
4581 /* Allocate BDs and initialize BD ring */
4582 fp->rx_comp_cons = 0;
4583 cqe_ring_prod = ring_prod = 0;
4584 for (i = 0; i < bp->rx_ring_size; i++) {
4585 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4586 BNX2X_ERR("was only able to allocate "
4587 "%d rx skbs on queue[%d]\n", i, j);
4588 fp->eth_q_stats.rx_skb_alloc_failed++;
4589 break;
4591 ring_prod = NEXT_RX_IDX(ring_prod);
4592 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4593 WARN_ON(ring_prod <= i);
4596 fp->rx_bd_prod = ring_prod;
4597 /* must not have more available CQEs than BDs */
4598 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4599 cqe_ring_prod);
4600 fp->rx_pkt = fp->rx_calls = 0;
4602 /* Warning!
4603 * this will generate an interrupt (to the TSTORM)
4604 * must only be done after chip is initialized
4606 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4607 fp->rx_sge_prod);
4608 if (j != 0)
4609 continue;
4611 REG_WR(bp, BAR_USTRORM_INTMEM +
4612 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4613 U64_LO(fp->rx_comp_mapping));
4614 REG_WR(bp, BAR_USTRORM_INTMEM +
4615 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4616 U64_HI(fp->rx_comp_mapping));
4620 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4622 int i, j;
4624 for_each_tx_queue(bp, j) {
4625 struct bnx2x_fastpath *fp = &bp->fp[j];
4627 for (i = 1; i <= NUM_TX_RINGS; i++) {
4628 struct eth_tx_bd *tx_bd =
4629 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4631 tx_bd->addr_hi =
4632 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4633 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4634 tx_bd->addr_lo =
4635 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4636 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4639 fp->tx_pkt_prod = 0;
4640 fp->tx_pkt_cons = 0;
4641 fp->tx_bd_prod = 0;
4642 fp->tx_bd_cons = 0;
4643 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4644 fp->tx_pkt = 0;
4648 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4650 int func = BP_FUNC(bp);
4652 spin_lock_init(&bp->spq_lock);
4654 bp->spq_left = MAX_SPQ_PENDING;
4655 bp->spq_prod_idx = 0;
4656 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4657 bp->spq_prod_bd = bp->spq;
4658 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4660 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4661 U64_LO(bp->spq_mapping));
4662 REG_WR(bp,
4663 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4664 U64_HI(bp->spq_mapping));
4666 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4667 bp->spq_prod_idx);
4670 static void bnx2x_init_context(struct bnx2x *bp)
4672 int i;
4674 for_each_queue(bp, i) {
4675 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4676 struct bnx2x_fastpath *fp = &bp->fp[i];
4677 u8 cl_id = fp->cl_id;
4678 u8 sb_id = fp->sb_id;
4680 context->ustorm_st_context.common.sb_index_numbers =
4681 BNX2X_RX_SB_INDEX_NUM;
4682 context->ustorm_st_context.common.clientId = cl_id;
4683 context->ustorm_st_context.common.status_block_id = sb_id;
4684 context->ustorm_st_context.common.flags =
4685 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4686 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4687 context->ustorm_st_context.common.statistics_counter_id =
4688 cl_id;
4689 context->ustorm_st_context.common.mc_alignment_log_size =
4690 BNX2X_RX_ALIGN_SHIFT;
4691 context->ustorm_st_context.common.bd_buff_size =
4692 bp->rx_buf_size;
4693 context->ustorm_st_context.common.bd_page_base_hi =
4694 U64_HI(fp->rx_desc_mapping);
4695 context->ustorm_st_context.common.bd_page_base_lo =
4696 U64_LO(fp->rx_desc_mapping);
4697 if (!fp->disable_tpa) {
4698 context->ustorm_st_context.common.flags |=
4699 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4700 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4701 context->ustorm_st_context.common.sge_buff_size =
4702 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4703 (u32)0xffff);
4704 context->ustorm_st_context.common.sge_page_base_hi =
4705 U64_HI(fp->rx_sge_mapping);
4706 context->ustorm_st_context.common.sge_page_base_lo =
4707 U64_LO(fp->rx_sge_mapping);
4710 context->ustorm_ag_context.cdu_usage =
4711 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4712 CDU_REGION_NUMBER_UCM_AG,
4713 ETH_CONNECTION_TYPE);
4715 context->xstorm_st_context.tx_bd_page_base_hi =
4716 U64_HI(fp->tx_desc_mapping);
4717 context->xstorm_st_context.tx_bd_page_base_lo =
4718 U64_LO(fp->tx_desc_mapping);
4719 context->xstorm_st_context.db_data_addr_hi =
4720 U64_HI(fp->tx_prods_mapping);
4721 context->xstorm_st_context.db_data_addr_lo =
4722 U64_LO(fp->tx_prods_mapping);
4723 context->xstorm_st_context.statistics_data = (cl_id |
4724 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4725 context->cstorm_st_context.sb_index_number =
4726 C_SB_ETH_TX_CQ_INDEX;
4727 context->cstorm_st_context.status_block_id = sb_id;
4729 context->xstorm_ag_context.cdu_reserved =
4730 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4731 CDU_REGION_NUMBER_XCM_AG,
4732 ETH_CONNECTION_TYPE);
4736 static void bnx2x_init_ind_table(struct bnx2x *bp)
4738 int func = BP_FUNC(bp);
4739 int i;
4741 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4742 return;
4744 DP(NETIF_MSG_IFUP,
4745 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4746 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4747 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4748 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4749 bp->fp->cl_id + (i % bp->num_rx_queues));
4752 static void bnx2x_set_client_config(struct bnx2x *bp)
4754 struct tstorm_eth_client_config tstorm_client = {0};
4755 int port = BP_PORT(bp);
4756 int i;
4758 tstorm_client.mtu = bp->dev->mtu;
4759 tstorm_client.config_flags =
4760 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4761 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4762 #ifdef BCM_VLAN
4763 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4764 tstorm_client.config_flags |=
4765 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4766 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4768 #endif
4770 if (bp->flags & TPA_ENABLE_FLAG) {
4771 tstorm_client.max_sges_for_packet =
4772 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4773 tstorm_client.max_sges_for_packet =
4774 ((tstorm_client.max_sges_for_packet +
4775 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4776 PAGES_PER_SGE_SHIFT;
4778 tstorm_client.config_flags |=
4779 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4782 for_each_queue(bp, i) {
4783 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4785 REG_WR(bp, BAR_TSTRORM_INTMEM +
4786 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4787 ((u32 *)&tstorm_client)[0]);
4788 REG_WR(bp, BAR_TSTRORM_INTMEM +
4789 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4790 ((u32 *)&tstorm_client)[1]);
4793 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4794 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4797 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4799 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4800 int mode = bp->rx_mode;
4801 int mask = (1 << BP_L_ID(bp));
4802 int func = BP_FUNC(bp);
4803 int i;
4805 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4807 switch (mode) {
4808 case BNX2X_RX_MODE_NONE: /* no Rx */
4809 tstorm_mac_filter.ucast_drop_all = mask;
4810 tstorm_mac_filter.mcast_drop_all = mask;
4811 tstorm_mac_filter.bcast_drop_all = mask;
4812 break;
4814 case BNX2X_RX_MODE_NORMAL:
4815 tstorm_mac_filter.bcast_accept_all = mask;
4816 break;
4818 case BNX2X_RX_MODE_ALLMULTI:
4819 tstorm_mac_filter.mcast_accept_all = mask;
4820 tstorm_mac_filter.bcast_accept_all = mask;
4821 break;
4823 case BNX2X_RX_MODE_PROMISC:
4824 tstorm_mac_filter.ucast_accept_all = mask;
4825 tstorm_mac_filter.mcast_accept_all = mask;
4826 tstorm_mac_filter.bcast_accept_all = mask;
4827 break;
4829 default:
4830 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4831 break;
4834 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4835 REG_WR(bp, BAR_TSTRORM_INTMEM +
4836 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4837 ((u32 *)&tstorm_mac_filter)[i]);
4839 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4840 ((u32 *)&tstorm_mac_filter)[i]); */
4843 if (mode != BNX2X_RX_MODE_NONE)
4844 bnx2x_set_client_config(bp);
4847 static void bnx2x_init_internal_common(struct bnx2x *bp)
4849 int i;
4851 if (bp->flags & TPA_ENABLE_FLAG) {
4852 struct tstorm_eth_tpa_exist tpa = {0};
4854 tpa.tpa_exist = 1;
4856 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4857 ((u32 *)&tpa)[0]);
4858 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4859 ((u32 *)&tpa)[1]);
4862 /* Zero this manually as its initialization is
4863 currently missing in the initTool */
4864 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4865 REG_WR(bp, BAR_USTRORM_INTMEM +
4866 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4869 static void bnx2x_init_internal_port(struct bnx2x *bp)
4871 int port = BP_PORT(bp);
4873 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4874 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4875 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4876 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4879 /* Calculates the sum of vn_min_rates.
4880 It's needed for further normalizing of the min_rates.
4881 Returns:
4882 sum of vn_min_rates.
4884 0 - if all the min_rates are 0.
4885 In the later case fainess algorithm should be deactivated.
4886 If not all min_rates are zero then those that are zeroes will be set to 1.
4888 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4890 int all_zero = 1;
4891 int port = BP_PORT(bp);
4892 int vn;
4894 bp->vn_weight_sum = 0;
4895 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4896 int func = 2*vn + port;
4897 u32 vn_cfg =
4898 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4899 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4900 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4902 /* Skip hidden vns */
4903 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4904 continue;
4906 /* If min rate is zero - set it to 1 */
4907 if (!vn_min_rate)
4908 vn_min_rate = DEF_MIN_RATE;
4909 else
4910 all_zero = 0;
4912 bp->vn_weight_sum += vn_min_rate;
4915 /* ... only if all min rates are zeros - disable fairness */
4916 if (all_zero)
4917 bp->vn_weight_sum = 0;
4920 static void bnx2x_init_internal_func(struct bnx2x *bp)
4922 struct tstorm_eth_function_common_config tstorm_config = {0};
4923 struct stats_indication_flags stats_flags = {0};
4924 int port = BP_PORT(bp);
4925 int func = BP_FUNC(bp);
4926 int i, j;
4927 u32 offset;
4928 u16 max_agg_size;
4930 if (is_multi(bp)) {
4931 tstorm_config.config_flags = MULTI_FLAGS(bp);
4932 tstorm_config.rss_result_mask = MULTI_MASK;
4934 if (IS_E1HMF(bp))
4935 tstorm_config.config_flags |=
4936 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4938 tstorm_config.leading_client_id = BP_L_ID(bp);
4940 REG_WR(bp, BAR_TSTRORM_INTMEM +
4941 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4942 (*(u32 *)&tstorm_config));
4944 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4945 bnx2x_set_storm_rx_mode(bp);
4947 for_each_queue(bp, i) {
4948 u8 cl_id = bp->fp[i].cl_id;
4950 /* reset xstorm per client statistics */
4951 offset = BAR_XSTRORM_INTMEM +
4952 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4953 for (j = 0;
4954 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4955 REG_WR(bp, offset + j*4, 0);
4957 /* reset tstorm per client statistics */
4958 offset = BAR_TSTRORM_INTMEM +
4959 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4960 for (j = 0;
4961 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4962 REG_WR(bp, offset + j*4, 0);
4964 /* reset ustorm per client statistics */
4965 offset = BAR_USTRORM_INTMEM +
4966 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4967 for (j = 0;
4968 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4969 REG_WR(bp, offset + j*4, 0);
4972 /* Init statistics related context */
4973 stats_flags.collect_eth = 1;
4975 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4976 ((u32 *)&stats_flags)[0]);
4977 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4978 ((u32 *)&stats_flags)[1]);
4980 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4981 ((u32 *)&stats_flags)[0]);
4982 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4983 ((u32 *)&stats_flags)[1]);
4985 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4986 ((u32 *)&stats_flags)[0]);
4987 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4988 ((u32 *)&stats_flags)[1]);
4990 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4991 ((u32 *)&stats_flags)[0]);
4992 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4993 ((u32 *)&stats_flags)[1]);
4995 REG_WR(bp, BAR_XSTRORM_INTMEM +
4996 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4997 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4998 REG_WR(bp, BAR_XSTRORM_INTMEM +
4999 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5000 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5002 REG_WR(bp, BAR_TSTRORM_INTMEM +
5003 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5004 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5005 REG_WR(bp, BAR_TSTRORM_INTMEM +
5006 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5007 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5009 REG_WR(bp, BAR_USTRORM_INTMEM +
5010 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5011 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5012 REG_WR(bp, BAR_USTRORM_INTMEM +
5013 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5014 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5016 if (CHIP_IS_E1H(bp)) {
5017 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5018 IS_E1HMF(bp));
5019 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5020 IS_E1HMF(bp));
5021 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5022 IS_E1HMF(bp));
5023 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5024 IS_E1HMF(bp));
5026 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5027 bp->e1hov);
5030 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5031 max_agg_size =
5032 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5033 SGE_PAGE_SIZE * PAGES_PER_SGE),
5034 (u32)0xffff);
5035 for_each_rx_queue(bp, i) {
5036 struct bnx2x_fastpath *fp = &bp->fp[i];
5038 REG_WR(bp, BAR_USTRORM_INTMEM +
5039 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5040 U64_LO(fp->rx_comp_mapping));
5041 REG_WR(bp, BAR_USTRORM_INTMEM +
5042 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5043 U64_HI(fp->rx_comp_mapping));
5045 REG_WR16(bp, BAR_USTRORM_INTMEM +
5046 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5047 max_agg_size);
5050 /* dropless flow control */
5051 if (CHIP_IS_E1H(bp)) {
5052 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5054 rx_pause.bd_thr_low = 250;
5055 rx_pause.cqe_thr_low = 250;
5056 rx_pause.cos = 1;
5057 rx_pause.sge_thr_low = 0;
5058 rx_pause.bd_thr_high = 350;
5059 rx_pause.cqe_thr_high = 350;
5060 rx_pause.sge_thr_high = 0;
5062 for_each_rx_queue(bp, i) {
5063 struct bnx2x_fastpath *fp = &bp->fp[i];
5065 if (!fp->disable_tpa) {
5066 rx_pause.sge_thr_low = 150;
5067 rx_pause.sge_thr_high = 250;
5071 offset = BAR_USTRORM_INTMEM +
5072 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5073 fp->cl_id);
5074 for (j = 0;
5075 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5076 j++)
5077 REG_WR(bp, offset + j*4,
5078 ((u32 *)&rx_pause)[j]);
5082 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5084 /* Init rate shaping and fairness contexts */
5085 if (IS_E1HMF(bp)) {
5086 int vn;
5088 /* During init there is no active link
5089 Until link is up, set link rate to 10Gbps */
5090 bp->link_vars.line_speed = SPEED_10000;
5091 bnx2x_init_port_minmax(bp);
5093 bnx2x_calc_vn_weight_sum(bp);
5095 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5096 bnx2x_init_vn_minmax(bp, 2*vn + port);
5098 /* Enable rate shaping and fairness */
5099 bp->cmng.flags.cmng_enables =
5100 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5101 if (bp->vn_weight_sum)
5102 bp->cmng.flags.cmng_enables |=
5103 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5104 else
5105 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5106 " fairness will be disabled\n");
5107 } else {
5108 /* rate shaping and fairness are disabled */
5109 DP(NETIF_MSG_IFUP,
5110 "single function mode minmax will be disabled\n");
5114 /* Store it to internal memory */
5115 if (bp->port.pmf)
5116 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5117 REG_WR(bp, BAR_XSTRORM_INTMEM +
5118 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5119 ((u32 *)(&bp->cmng))[i]);
5122 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5124 switch (load_code) {
5125 case FW_MSG_CODE_DRV_LOAD_COMMON:
5126 bnx2x_init_internal_common(bp);
5127 /* no break */
5129 case FW_MSG_CODE_DRV_LOAD_PORT:
5130 bnx2x_init_internal_port(bp);
5131 /* no break */
5133 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5134 bnx2x_init_internal_func(bp);
5135 break;
5137 default:
5138 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5139 break;
5143 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5145 int i;
5147 for_each_queue(bp, i) {
5148 struct bnx2x_fastpath *fp = &bp->fp[i];
5150 fp->bp = bp;
5151 fp->state = BNX2X_FP_STATE_CLOSED;
5152 fp->index = i;
5153 fp->cl_id = BP_L_ID(bp) + i;
5154 fp->sb_id = fp->cl_id;
5155 DP(NETIF_MSG_IFUP,
5156 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5157 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5158 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5159 fp->sb_id);
5160 bnx2x_update_fpsb_idx(fp);
5163 /* ensure status block indices were read */
5164 rmb();
5167 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5168 DEF_SB_ID);
5169 bnx2x_update_dsb_idx(bp);
5170 bnx2x_update_coalesce(bp);
5171 bnx2x_init_rx_rings(bp);
5172 bnx2x_init_tx_ring(bp);
5173 bnx2x_init_sp_ring(bp);
5174 bnx2x_init_context(bp);
5175 bnx2x_init_internal(bp, load_code);
5176 bnx2x_init_ind_table(bp);
5177 bnx2x_stats_init(bp);
5179 /* At this point, we are ready for interrupts */
5180 atomic_set(&bp->intr_sem, 0);
5182 /* flush all before enabling interrupts */
5183 mb();
5184 mmiowb();
5186 bnx2x_int_enable(bp);
5189 /* end of nic init */
5192 * gzip service functions
5195 static int bnx2x_gunzip_init(struct bnx2x *bp)
5197 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5198 &bp->gunzip_mapping);
5199 if (bp->gunzip_buf == NULL)
5200 goto gunzip_nomem1;
5202 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5203 if (bp->strm == NULL)
5204 goto gunzip_nomem2;
5206 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5207 GFP_KERNEL);
5208 if (bp->strm->workspace == NULL)
5209 goto gunzip_nomem3;
5211 return 0;
5213 gunzip_nomem3:
5214 kfree(bp->strm);
5215 bp->strm = NULL;
5217 gunzip_nomem2:
5218 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5219 bp->gunzip_mapping);
5220 bp->gunzip_buf = NULL;
5222 gunzip_nomem1:
5223 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5224 " un-compression\n", bp->dev->name);
5225 return -ENOMEM;
5228 static void bnx2x_gunzip_end(struct bnx2x *bp)
5230 kfree(bp->strm->workspace);
5232 kfree(bp->strm);
5233 bp->strm = NULL;
5235 if (bp->gunzip_buf) {
5236 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5237 bp->gunzip_mapping);
5238 bp->gunzip_buf = NULL;
5242 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5244 int n, rc;
5246 /* check gzip header */
5247 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5248 BNX2X_ERR("Bad gzip header\n");
5249 return -EINVAL;
5252 n = 10;
5254 #define FNAME 0x8
5256 if (zbuf[3] & FNAME)
5257 while ((zbuf[n++] != 0) && (n < len));
5259 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5260 bp->strm->avail_in = len - n;
5261 bp->strm->next_out = bp->gunzip_buf;
5262 bp->strm->avail_out = FW_BUF_SIZE;
5264 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5265 if (rc != Z_OK)
5266 return rc;
5268 rc = zlib_inflate(bp->strm, Z_FINISH);
5269 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5270 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5271 bp->dev->name, bp->strm->msg);
5273 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5274 if (bp->gunzip_outlen & 0x3)
5275 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5276 " gunzip_outlen (%d) not aligned\n",
5277 bp->dev->name, bp->gunzip_outlen);
5278 bp->gunzip_outlen >>= 2;
5280 zlib_inflateEnd(bp->strm);
5282 if (rc == Z_STREAM_END)
5283 return 0;
5285 return rc;
5288 /* nic load/unload */
5291 * General service functions
5294 /* send a NIG loopback debug packet */
5295 static void bnx2x_lb_pckt(struct bnx2x *bp)
5297 u32 wb_write[3];
5299 /* Ethernet source and destination addresses */
5300 wb_write[0] = 0x55555555;
5301 wb_write[1] = 0x55555555;
5302 wb_write[2] = 0x20; /* SOP */
5303 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5305 /* NON-IP protocol */
5306 wb_write[0] = 0x09000000;
5307 wb_write[1] = 0x55555555;
5308 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5309 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5312 /* some of the internal memories
5313 * are not directly readable from the driver
5314 * to test them we send debug packets
5316 static int bnx2x_int_mem_test(struct bnx2x *bp)
5318 int factor;
5319 int count, i;
5320 u32 val = 0;
5322 if (CHIP_REV_IS_FPGA(bp))
5323 factor = 120;
5324 else if (CHIP_REV_IS_EMUL(bp))
5325 factor = 200;
5326 else
5327 factor = 1;
5329 DP(NETIF_MSG_HW, "start part1\n");
5331 /* Disable inputs of parser neighbor blocks */
5332 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5333 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5334 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5335 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5337 /* Write 0 to parser credits for CFC search request */
5338 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5340 /* send Ethernet packet */
5341 bnx2x_lb_pckt(bp);
5343 /* TODO do i reset NIG statistic? */
5344 /* Wait until NIG register shows 1 packet of size 0x10 */
5345 count = 1000 * factor;
5346 while (count) {
5348 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5349 val = *bnx2x_sp(bp, wb_data[0]);
5350 if (val == 0x10)
5351 break;
5353 msleep(10);
5354 count--;
5356 if (val != 0x10) {
5357 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5358 return -1;
5361 /* Wait until PRS register shows 1 packet */
5362 count = 1000 * factor;
5363 while (count) {
5364 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5365 if (val == 1)
5366 break;
5368 msleep(10);
5369 count--;
5371 if (val != 0x1) {
5372 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5373 return -2;
5376 /* Reset and init BRB, PRS */
5377 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5378 msleep(50);
5379 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5380 msleep(50);
5381 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5382 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5384 DP(NETIF_MSG_HW, "part2\n");
5386 /* Disable inputs of parser neighbor blocks */
5387 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5388 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5389 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5390 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5392 /* Write 0 to parser credits for CFC search request */
5393 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5395 /* send 10 Ethernet packets */
5396 for (i = 0; i < 10; i++)
5397 bnx2x_lb_pckt(bp);
5399 /* Wait until NIG register shows 10 + 1
5400 packets of size 11*0x10 = 0xb0 */
5401 count = 1000 * factor;
5402 while (count) {
5404 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5405 val = *bnx2x_sp(bp, wb_data[0]);
5406 if (val == 0xb0)
5407 break;
5409 msleep(10);
5410 count--;
5412 if (val != 0xb0) {
5413 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5414 return -3;
5417 /* Wait until PRS register shows 2 packets */
5418 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5419 if (val != 2)
5420 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5422 /* Write 1 to parser credits for CFC search request */
5423 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5425 /* Wait until PRS register shows 3 packets */
5426 msleep(10 * factor);
5427 /* Wait until NIG register shows 1 packet of size 0x10 */
5428 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5429 if (val != 3)
5430 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5432 /* clear NIG EOP FIFO */
5433 for (i = 0; i < 11; i++)
5434 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5435 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5436 if (val != 1) {
5437 BNX2X_ERR("clear of NIG failed\n");
5438 return -4;
5441 /* Reset and init BRB, PRS, NIG */
5442 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5443 msleep(50);
5444 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5445 msleep(50);
5446 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5447 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5448 #ifndef BCM_ISCSI
5449 /* set NIC mode */
5450 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5451 #endif
5453 /* Enable inputs of parser neighbor blocks */
5454 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5455 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5456 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5457 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5459 DP(NETIF_MSG_HW, "done\n");
5461 return 0; /* OK */
5464 static void enable_blocks_attention(struct bnx2x *bp)
5466 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5467 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5468 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5469 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5470 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5471 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5472 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5473 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5474 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5475 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5476 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5477 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5478 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5479 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5480 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5481 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5482 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5483 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5484 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5485 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5486 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5487 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5488 if (CHIP_REV_IS_FPGA(bp))
5489 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5490 else
5491 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5492 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5493 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5494 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5495 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5496 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5497 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5498 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5499 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5500 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5504 static void bnx2x_reset_common(struct bnx2x *bp)
5506 /* reset_common */
5507 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5508 0xd3ffff7f);
5509 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5512 static int bnx2x_init_common(struct bnx2x *bp)
5514 u32 val, i;
5516 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5518 bnx2x_reset_common(bp);
5519 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5520 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5522 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5523 if (CHIP_IS_E1H(bp))
5524 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5526 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5527 msleep(30);
5528 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5530 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5531 if (CHIP_IS_E1(bp)) {
5532 /* enable HW interrupt from PXP on USDM overflow
5533 bit 16 on INT_MASK_0 */
5534 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5537 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5538 bnx2x_init_pxp(bp);
5540 #ifdef __BIG_ENDIAN
5541 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5542 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5543 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5544 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5545 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5546 /* make sure this value is 0 */
5547 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5549 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5550 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5551 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5552 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5553 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5554 #endif
5556 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5557 #ifdef BCM_ISCSI
5558 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5559 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5560 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5561 #endif
5563 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5564 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5566 /* let the HW do it's magic ... */
5567 msleep(100);
5568 /* finish PXP init */
5569 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5570 if (val != 1) {
5571 BNX2X_ERR("PXP2 CFG failed\n");
5572 return -EBUSY;
5574 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5575 if (val != 1) {
5576 BNX2X_ERR("PXP2 RD_INIT failed\n");
5577 return -EBUSY;
5580 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5581 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5583 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5585 /* clean the DMAE memory */
5586 bp->dmae_ready = 1;
5587 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5589 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5590 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5591 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5592 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5594 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5595 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5596 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5597 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5599 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5600 /* soft reset pulse */
5601 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5602 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5604 #ifdef BCM_ISCSI
5605 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5606 #endif
5608 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5609 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5610 if (!CHIP_REV_IS_SLOW(bp)) {
5611 /* enable hw interrupt from doorbell Q */
5612 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5615 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5616 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5617 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5618 /* set NIC mode */
5619 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5620 if (CHIP_IS_E1H(bp))
5621 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5623 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5624 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5625 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5626 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5628 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5629 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5630 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5631 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5633 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5634 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5635 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5636 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5638 /* sync semi rtc */
5639 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5640 0x80000000);
5641 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5642 0x80000000);
5644 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5645 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5646 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5648 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5649 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5650 REG_WR(bp, i, 0xc0cac01a);
5651 /* TODO: replace with something meaningful */
5653 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5654 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5656 if (sizeof(union cdu_context) != 1024)
5657 /* we currently assume that a context is 1024 bytes */
5658 printk(KERN_ALERT PFX "please adjust the size of"
5659 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5661 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5662 val = (4 << 24) + (0 << 12) + 1024;
5663 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5664 if (CHIP_IS_E1(bp)) {
5665 /* !!! fix pxp client crdit until excel update */
5666 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5667 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5670 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5671 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5672 /* enable context validation interrupt from CFC */
5673 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5675 /* set the thresholds to prevent CFC/CDU race */
5676 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5678 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5679 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5681 /* PXPCS COMMON comes here */
5682 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5683 /* Reset PCIE errors for debug */
5684 REG_WR(bp, 0x2814, 0xffffffff);
5685 REG_WR(bp, 0x3820, 0xffffffff);
5687 /* EMAC0 COMMON comes here */
5688 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5689 /* EMAC1 COMMON comes here */
5690 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5691 /* DBU COMMON comes here */
5692 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5693 /* DBG COMMON comes here */
5694 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5696 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5697 if (CHIP_IS_E1H(bp)) {
5698 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5699 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5702 if (CHIP_REV_IS_SLOW(bp))
5703 msleep(200);
5705 /* finish CFC init */
5706 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5707 if (val != 1) {
5708 BNX2X_ERR("CFC LL_INIT failed\n");
5709 return -EBUSY;
5711 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5712 if (val != 1) {
5713 BNX2X_ERR("CFC AC_INIT failed\n");
5714 return -EBUSY;
5716 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5717 if (val != 1) {
5718 BNX2X_ERR("CFC CAM_INIT failed\n");
5719 return -EBUSY;
5721 REG_WR(bp, CFC_REG_DEBUG0, 0);
5723 /* read NIG statistic
5724 to see if this is our first up since powerup */
5725 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5726 val = *bnx2x_sp(bp, wb_data[0]);
5728 /* do internal memory self test */
5729 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5730 BNX2X_ERR("internal mem self test failed\n");
5731 return -EBUSY;
5734 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5735 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5736 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5737 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5738 bp->port.need_hw_lock = 1;
5739 break;
5741 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5742 /* Fan failure is indicated by SPIO 5 */
5743 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5744 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5746 /* set to active low mode */
5747 val = REG_RD(bp, MISC_REG_SPIO_INT);
5748 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5749 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5750 REG_WR(bp, MISC_REG_SPIO_INT, val);
5752 /* enable interrupt to signal the IGU */
5753 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5754 val |= (1 << MISC_REGISTERS_SPIO_5);
5755 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5756 break;
5758 default:
5759 break;
5762 /* clear PXP2 attentions */
5763 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5765 enable_blocks_attention(bp);
5767 if (!BP_NOMCP(bp)) {
5768 bnx2x_acquire_phy_lock(bp);
5769 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5770 bnx2x_release_phy_lock(bp);
5771 } else
5772 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5774 return 0;
5777 static int bnx2x_init_port(struct bnx2x *bp)
5779 int port = BP_PORT(bp);
5780 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5781 u32 low, high;
5782 u32 val;
5784 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5786 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5788 /* Port PXP comes here */
5789 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5790 /* Port PXP2 comes here */
5791 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5792 #ifdef BCM_ISCSI
5793 /* Port0 1
5794 * Port1 385 */
5795 i++;
5796 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5797 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5798 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5799 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5801 /* Port0 2
5802 * Port1 386 */
5803 i++;
5804 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5805 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5806 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5807 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5809 /* Port0 3
5810 * Port1 387 */
5811 i++;
5812 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5813 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5814 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5815 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5816 #endif
5817 /* Port CMs come here */
5818 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5820 /* Port QM comes here */
5821 #ifdef BCM_ISCSI
5822 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5823 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5825 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5826 #endif
5827 /* Port DQ comes here */
5828 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5830 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5831 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5832 /* no pause for emulation and FPGA */
5833 low = 0;
5834 high = 513;
5835 } else {
5836 if (IS_E1HMF(bp))
5837 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5838 else if (bp->dev->mtu > 4096) {
5839 if (bp->flags & ONE_PORT_FLAG)
5840 low = 160;
5841 else {
5842 val = bp->dev->mtu;
5843 /* (24*1024 + val*4)/256 */
5844 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5846 } else
5847 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5848 high = low + 56; /* 14*1024/256 */
5850 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5851 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5854 /* Port PRS comes here */
5855 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5856 /* Port TSDM comes here */
5857 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5858 /* Port CSDM comes here */
5859 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5860 /* Port USDM comes here */
5861 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5862 /* Port XSDM comes here */
5863 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5865 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5866 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5867 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5868 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5870 /* Port UPB comes here */
5871 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5872 /* Port XPB comes here */
5873 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5875 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5877 /* configure PBF to work without PAUSE mtu 9000 */
5878 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5880 /* update threshold */
5881 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5882 /* update init credit */
5883 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5885 /* probe changes */
5886 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5887 msleep(5);
5888 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5890 #ifdef BCM_ISCSI
5891 /* tell the searcher where the T2 table is */
5892 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5894 wb_write[0] = U64_LO(bp->t2_mapping);
5895 wb_write[1] = U64_HI(bp->t2_mapping);
5896 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5897 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5898 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5899 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5901 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5902 /* Port SRCH comes here */
5903 #endif
5904 /* Port CDU comes here */
5905 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5906 /* Port CFC comes here */
5907 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5909 if (CHIP_IS_E1(bp)) {
5910 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5911 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5913 bnx2x_init_block(bp, HC_BLOCK, init_stage);
5915 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
5916 /* init aeu_mask_attn_func_0/1:
5917 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5918 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5919 * bits 4-7 are used for "per vn group attention" */
5920 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5921 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5923 /* Port PXPCS comes here */
5924 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
5925 /* Port EMAC0 comes here */
5926 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
5927 /* Port EMAC1 comes here */
5928 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
5929 /* Port DBU comes here */
5930 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
5931 /* Port DBG comes here */
5932 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
5934 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
5936 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5938 if (CHIP_IS_E1H(bp)) {
5939 /* 0x2 disable e1hov, 0x1 enable */
5940 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5941 (IS_E1HMF(bp) ? 0x1 : 0x2));
5943 /* support pause requests from USDM, TSDM and BRB */
5944 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5947 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5948 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5949 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5953 /* Port MCP comes here */
5954 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
5955 /* Port DMAE comes here */
5956 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
5958 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5959 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5961 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5963 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5964 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5966 /* The GPIO should be swapped if the swap register is
5967 set and active */
5968 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5969 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5971 /* Select function upon port-swap configuration */
5972 if (port == 0) {
5973 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5974 aeu_gpio_mask = (swap_val && swap_override) ?
5975 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5976 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5977 } else {
5978 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5979 aeu_gpio_mask = (swap_val && swap_override) ?
5980 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5981 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5983 val = REG_RD(bp, offset);
5984 /* add GPIO3 to group */
5985 val |= aeu_gpio_mask;
5986 REG_WR(bp, offset, val);
5988 break;
5990 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5991 /* add SPIO 5 to group 0 */
5992 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5993 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5994 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5995 break;
5997 default:
5998 break;
6001 bnx2x__link_reset(bp);
6003 return 0;
6006 #define ILT_PER_FUNC (768/2)
6007 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6008 /* the phys address is shifted right 12 bits and has an added
6009 1=valid bit added to the 53rd bit
6010 then since this is a wide register(TM)
6011 we split it into two 32 bit writes
6013 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6014 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6015 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6016 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6018 #define CNIC_ILT_LINES 0
6020 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6022 int reg;
6024 if (CHIP_IS_E1H(bp))
6025 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6026 else /* E1 */
6027 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6029 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6032 static int bnx2x_init_func(struct bnx2x *bp)
6034 int port = BP_PORT(bp);
6035 int func = BP_FUNC(bp);
6036 u32 addr, val;
6037 int i;
6039 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6041 /* set MSI reconfigure capability */
6042 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6043 val = REG_RD(bp, addr);
6044 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6045 REG_WR(bp, addr, val);
6047 i = FUNC_ILT_BASE(func);
6049 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6050 if (CHIP_IS_E1H(bp)) {
6051 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6052 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6053 } else /* E1 */
6054 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6055 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6058 if (CHIP_IS_E1H(bp)) {
6059 for (i = 0; i < 9; i++)
6060 bnx2x_init_block(bp,
6061 cm_blocks[i], FUNC0_STAGE + func);
6063 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6064 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6067 /* HC init per function */
6068 if (CHIP_IS_E1H(bp)) {
6069 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6071 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6072 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6074 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6076 /* Reset PCIE errors for debug */
6077 REG_WR(bp, 0x2114, 0xffffffff);
6078 REG_WR(bp, 0x2120, 0xffffffff);
6080 return 0;
6083 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6085 int i, rc = 0;
6087 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6088 BP_FUNC(bp), load_code);
6090 bp->dmae_ready = 0;
6091 mutex_init(&bp->dmae_mutex);
6092 bnx2x_gunzip_init(bp);
6094 switch (load_code) {
6095 case FW_MSG_CODE_DRV_LOAD_COMMON:
6096 rc = bnx2x_init_common(bp);
6097 if (rc)
6098 goto init_hw_err;
6099 /* no break */
6101 case FW_MSG_CODE_DRV_LOAD_PORT:
6102 bp->dmae_ready = 1;
6103 rc = bnx2x_init_port(bp);
6104 if (rc)
6105 goto init_hw_err;
6106 /* no break */
6108 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6109 bp->dmae_ready = 1;
6110 rc = bnx2x_init_func(bp);
6111 if (rc)
6112 goto init_hw_err;
6113 break;
6115 default:
6116 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6117 break;
6120 if (!BP_NOMCP(bp)) {
6121 int func = BP_FUNC(bp);
6123 bp->fw_drv_pulse_wr_seq =
6124 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6125 DRV_PULSE_SEQ_MASK);
6126 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6127 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6128 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6129 } else
6130 bp->func_stx = 0;
6132 /* this needs to be done before gunzip end */
6133 bnx2x_zero_def_sb(bp);
6134 for_each_queue(bp, i)
6135 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6137 init_hw_err:
6138 bnx2x_gunzip_end(bp);
6140 return rc;
6143 /* send the MCP a request, block until there is a reply */
6144 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6146 int func = BP_FUNC(bp);
6147 u32 seq = ++bp->fw_seq;
6148 u32 rc = 0;
6149 u32 cnt = 1;
6150 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6152 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6153 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6155 do {
6156 /* let the FW do it's magic ... */
6157 msleep(delay);
6159 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6161 /* Give the FW up to 2 second (200*10ms) */
6162 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6164 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6165 cnt*delay, rc, seq);
6167 /* is this a reply to our command? */
6168 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6169 rc &= FW_MSG_CODE_MASK;
6171 } else {
6172 /* FW BUG! */
6173 BNX2X_ERR("FW failed to respond!\n");
6174 bnx2x_fw_dump(bp);
6175 rc = 0;
6178 return rc;
6181 static void bnx2x_free_mem(struct bnx2x *bp)
6184 #define BNX2X_PCI_FREE(x, y, size) \
6185 do { \
6186 if (x) { \
6187 pci_free_consistent(bp->pdev, size, x, y); \
6188 x = NULL; \
6189 y = 0; \
6191 } while (0)
6193 #define BNX2X_FREE(x) \
6194 do { \
6195 if (x) { \
6196 vfree(x); \
6197 x = NULL; \
6199 } while (0)
6201 int i;
6203 /* fastpath */
6204 /* Common */
6205 for_each_queue(bp, i) {
6207 /* status blocks */
6208 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6209 bnx2x_fp(bp, i, status_blk_mapping),
6210 sizeof(struct host_status_block) +
6211 sizeof(struct eth_tx_db_data));
6213 /* Rx */
6214 for_each_rx_queue(bp, i) {
6216 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6217 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6218 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6219 bnx2x_fp(bp, i, rx_desc_mapping),
6220 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6222 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6223 bnx2x_fp(bp, i, rx_comp_mapping),
6224 sizeof(struct eth_fast_path_rx_cqe) *
6225 NUM_RCQ_BD);
6227 /* SGE ring */
6228 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6229 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6230 bnx2x_fp(bp, i, rx_sge_mapping),
6231 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6233 /* Tx */
6234 for_each_tx_queue(bp, i) {
6236 /* fastpath tx rings: tx_buf tx_desc */
6237 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6238 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6239 bnx2x_fp(bp, i, tx_desc_mapping),
6240 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6242 /* end of fastpath */
6244 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6245 sizeof(struct host_def_status_block));
6247 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6248 sizeof(struct bnx2x_slowpath));
6250 #ifdef BCM_ISCSI
6251 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6252 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6253 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6254 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6255 #endif
6256 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6258 #undef BNX2X_PCI_FREE
6259 #undef BNX2X_KFREE
6262 static int bnx2x_alloc_mem(struct bnx2x *bp)
6265 #define BNX2X_PCI_ALLOC(x, y, size) \
6266 do { \
6267 x = pci_alloc_consistent(bp->pdev, size, y); \
6268 if (x == NULL) \
6269 goto alloc_mem_err; \
6270 memset(x, 0, size); \
6271 } while (0)
6273 #define BNX2X_ALLOC(x, size) \
6274 do { \
6275 x = vmalloc(size); \
6276 if (x == NULL) \
6277 goto alloc_mem_err; \
6278 memset(x, 0, size); \
6279 } while (0)
6281 int i;
6283 /* fastpath */
6284 /* Common */
6285 for_each_queue(bp, i) {
6286 bnx2x_fp(bp, i, bp) = bp;
6288 /* status blocks */
6289 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6290 &bnx2x_fp(bp, i, status_blk_mapping),
6291 sizeof(struct host_status_block) +
6292 sizeof(struct eth_tx_db_data));
6294 /* Rx */
6295 for_each_rx_queue(bp, i) {
6297 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6298 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6299 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6300 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6301 &bnx2x_fp(bp, i, rx_desc_mapping),
6302 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6304 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6305 &bnx2x_fp(bp, i, rx_comp_mapping),
6306 sizeof(struct eth_fast_path_rx_cqe) *
6307 NUM_RCQ_BD);
6309 /* SGE ring */
6310 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6311 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6312 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6313 &bnx2x_fp(bp, i, rx_sge_mapping),
6314 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6316 /* Tx */
6317 for_each_tx_queue(bp, i) {
6319 bnx2x_fp(bp, i, hw_tx_prods) =
6320 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6322 bnx2x_fp(bp, i, tx_prods_mapping) =
6323 bnx2x_fp(bp, i, status_blk_mapping) +
6324 sizeof(struct host_status_block);
6326 /* fastpath tx rings: tx_buf tx_desc */
6327 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6328 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6329 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6330 &bnx2x_fp(bp, i, tx_desc_mapping),
6331 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6333 /* end of fastpath */
6335 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6336 sizeof(struct host_def_status_block));
6338 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6339 sizeof(struct bnx2x_slowpath));
6341 #ifdef BCM_ISCSI
6342 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6344 /* Initialize T1 */
6345 for (i = 0; i < 64*1024; i += 64) {
6346 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6347 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6350 /* allocate searcher T2 table
6351 we allocate 1/4 of alloc num for T2
6352 (which is not entered into the ILT) */
6353 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6355 /* Initialize T2 */
6356 for (i = 0; i < 16*1024; i += 64)
6357 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6359 /* now fixup the last line in the block to point to the next block */
6360 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6362 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6363 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6365 /* QM queues (128*MAX_CONN) */
6366 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6367 #endif
6369 /* Slow path ring */
6370 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6372 return 0;
6374 alloc_mem_err:
6375 bnx2x_free_mem(bp);
6376 return -ENOMEM;
6378 #undef BNX2X_PCI_ALLOC
6379 #undef BNX2X_ALLOC
6382 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6384 int i;
6386 for_each_tx_queue(bp, i) {
6387 struct bnx2x_fastpath *fp = &bp->fp[i];
6389 u16 bd_cons = fp->tx_bd_cons;
6390 u16 sw_prod = fp->tx_pkt_prod;
6391 u16 sw_cons = fp->tx_pkt_cons;
6393 while (sw_cons != sw_prod) {
6394 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6395 sw_cons++;
6400 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6402 int i, j;
6404 for_each_rx_queue(bp, j) {
6405 struct bnx2x_fastpath *fp = &bp->fp[j];
6407 for (i = 0; i < NUM_RX_BD; i++) {
6408 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6409 struct sk_buff *skb = rx_buf->skb;
6411 if (skb == NULL)
6412 continue;
6414 pci_unmap_single(bp->pdev,
6415 pci_unmap_addr(rx_buf, mapping),
6416 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6418 rx_buf->skb = NULL;
6419 dev_kfree_skb(skb);
6421 if (!fp->disable_tpa)
6422 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6423 ETH_MAX_AGGREGATION_QUEUES_E1 :
6424 ETH_MAX_AGGREGATION_QUEUES_E1H);
6428 static void bnx2x_free_skbs(struct bnx2x *bp)
6430 bnx2x_free_tx_skbs(bp);
6431 bnx2x_free_rx_skbs(bp);
6434 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6436 int i, offset = 1;
6438 free_irq(bp->msix_table[0].vector, bp->dev);
6439 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6440 bp->msix_table[0].vector);
6442 for_each_queue(bp, i) {
6443 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6444 "state %x\n", i, bp->msix_table[i + offset].vector,
6445 bnx2x_fp(bp, i, state));
6447 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6451 static void bnx2x_free_irq(struct bnx2x *bp)
6453 if (bp->flags & USING_MSIX_FLAG) {
6454 bnx2x_free_msix_irqs(bp);
6455 pci_disable_msix(bp->pdev);
6456 bp->flags &= ~USING_MSIX_FLAG;
6458 } else if (bp->flags & USING_MSI_FLAG) {
6459 free_irq(bp->pdev->irq, bp->dev);
6460 pci_disable_msi(bp->pdev);
6461 bp->flags &= ~USING_MSI_FLAG;
6463 } else
6464 free_irq(bp->pdev->irq, bp->dev);
6467 static int bnx2x_enable_msix(struct bnx2x *bp)
6469 int i, rc, offset = 1;
6470 int igu_vec = 0;
6472 bp->msix_table[0].entry = igu_vec;
6473 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6475 for_each_queue(bp, i) {
6476 igu_vec = BP_L_ID(bp) + offset + i;
6477 bp->msix_table[i + offset].entry = igu_vec;
6478 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6479 "(fastpath #%u)\n", i + offset, igu_vec, i);
6482 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6483 BNX2X_NUM_QUEUES(bp) + offset);
6484 if (rc) {
6485 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6486 return rc;
6489 bp->flags |= USING_MSIX_FLAG;
6491 return 0;
6494 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6496 int i, rc, offset = 1;
6498 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6499 bp->dev->name, bp->dev);
6500 if (rc) {
6501 BNX2X_ERR("request sp irq failed\n");
6502 return -EBUSY;
6505 for_each_queue(bp, i) {
6506 struct bnx2x_fastpath *fp = &bp->fp[i];
6508 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6509 rc = request_irq(bp->msix_table[i + offset].vector,
6510 bnx2x_msix_fp_int, 0, fp->name, fp);
6511 if (rc) {
6512 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6513 bnx2x_free_msix_irqs(bp);
6514 return -EBUSY;
6517 fp->state = BNX2X_FP_STATE_IRQ;
6520 i = BNX2X_NUM_QUEUES(bp);
6521 if (is_multi(bp))
6522 printk(KERN_INFO PFX
6523 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6524 bp->dev->name, bp->msix_table[0].vector,
6525 bp->msix_table[offset].vector,
6526 bp->msix_table[offset + i - 1].vector);
6527 else
6528 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6529 bp->dev->name, bp->msix_table[0].vector,
6530 bp->msix_table[offset + i - 1].vector);
6532 return 0;
6535 static int bnx2x_enable_msi(struct bnx2x *bp)
6537 int rc;
6539 rc = pci_enable_msi(bp->pdev);
6540 if (rc) {
6541 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6542 return -1;
6544 bp->flags |= USING_MSI_FLAG;
6546 return 0;
6549 static int bnx2x_req_irq(struct bnx2x *bp)
6551 unsigned long flags;
6552 int rc;
6554 if (bp->flags & USING_MSI_FLAG)
6555 flags = 0;
6556 else
6557 flags = IRQF_SHARED;
6559 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6560 bp->dev->name, bp->dev);
6561 if (!rc)
6562 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6564 return rc;
6567 static void bnx2x_napi_enable(struct bnx2x *bp)
6569 int i;
6571 for_each_rx_queue(bp, i)
6572 napi_enable(&bnx2x_fp(bp, i, napi));
6575 static void bnx2x_napi_disable(struct bnx2x *bp)
6577 int i;
6579 for_each_rx_queue(bp, i)
6580 napi_disable(&bnx2x_fp(bp, i, napi));
6583 static void bnx2x_netif_start(struct bnx2x *bp)
6585 if (atomic_dec_and_test(&bp->intr_sem)) {
6586 if (netif_running(bp->dev)) {
6587 bnx2x_napi_enable(bp);
6588 bnx2x_int_enable(bp);
6589 if (bp->state == BNX2X_STATE_OPEN)
6590 netif_tx_wake_all_queues(bp->dev);
6595 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6597 bnx2x_int_disable_sync(bp, disable_hw);
6598 bnx2x_napi_disable(bp);
6599 netif_tx_disable(bp->dev);
6600 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6604 * Init service functions
6607 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6609 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6610 int port = BP_PORT(bp);
6612 /* CAM allocation
6613 * unicasts 0-31:port0 32-63:port1
6614 * multicast 64-127:port0 128-191:port1
6616 config->hdr.length = 2;
6617 config->hdr.offset = port ? 32 : 0;
6618 config->hdr.client_id = bp->fp->cl_id;
6619 config->hdr.reserved1 = 0;
6621 /* primary MAC */
6622 config->config_table[0].cam_entry.msb_mac_addr =
6623 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6624 config->config_table[0].cam_entry.middle_mac_addr =
6625 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6626 config->config_table[0].cam_entry.lsb_mac_addr =
6627 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6628 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6629 if (set)
6630 config->config_table[0].target_table_entry.flags = 0;
6631 else
6632 CAM_INVALIDATE(config->config_table[0]);
6633 config->config_table[0].target_table_entry.client_id = 0;
6634 config->config_table[0].target_table_entry.vlan_id = 0;
6636 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6637 (set ? "setting" : "clearing"),
6638 config->config_table[0].cam_entry.msb_mac_addr,
6639 config->config_table[0].cam_entry.middle_mac_addr,
6640 config->config_table[0].cam_entry.lsb_mac_addr);
6642 /* broadcast */
6643 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6644 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6645 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
6646 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6647 if (set)
6648 config->config_table[1].target_table_entry.flags =
6649 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6650 else
6651 CAM_INVALIDATE(config->config_table[1]);
6652 config->config_table[1].target_table_entry.client_id = 0;
6653 config->config_table[1].target_table_entry.vlan_id = 0;
6655 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6656 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6657 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6660 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6662 struct mac_configuration_cmd_e1h *config =
6663 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6665 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6666 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6667 return;
6670 /* CAM allocation for E1H
6671 * unicasts: by func number
6672 * multicast: 20+FUNC*20, 20 each
6674 config->hdr.length = 1;
6675 config->hdr.offset = BP_FUNC(bp);
6676 config->hdr.client_id = bp->fp->cl_id;
6677 config->hdr.reserved1 = 0;
6679 /* primary MAC */
6680 config->config_table[0].msb_mac_addr =
6681 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6682 config->config_table[0].middle_mac_addr =
6683 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6684 config->config_table[0].lsb_mac_addr =
6685 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6686 config->config_table[0].client_id = BP_L_ID(bp);
6687 config->config_table[0].vlan_id = 0;
6688 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6689 if (set)
6690 config->config_table[0].flags = BP_PORT(bp);
6691 else
6692 config->config_table[0].flags =
6693 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6695 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6696 (set ? "setting" : "clearing"),
6697 config->config_table[0].msb_mac_addr,
6698 config->config_table[0].middle_mac_addr,
6699 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6701 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6702 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6703 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6706 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6707 int *state_p, int poll)
6709 /* can take a while if any port is running */
6710 int cnt = 5000;
6712 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6713 poll ? "polling" : "waiting", state, idx);
6715 might_sleep();
6716 while (cnt--) {
6717 if (poll) {
6718 bnx2x_rx_int(bp->fp, 10);
6719 /* if index is different from 0
6720 * the reply for some commands will
6721 * be on the non default queue
6723 if (idx)
6724 bnx2x_rx_int(&bp->fp[idx], 10);
6727 mb(); /* state is changed by bnx2x_sp_event() */
6728 if (*state_p == state) {
6729 #ifdef BNX2X_STOP_ON_ERROR
6730 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6731 #endif
6732 return 0;
6735 msleep(1);
6738 /* timeout! */
6739 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6740 poll ? "polling" : "waiting", state, idx);
6741 #ifdef BNX2X_STOP_ON_ERROR
6742 bnx2x_panic();
6743 #endif
6745 return -EBUSY;
6748 static int bnx2x_setup_leading(struct bnx2x *bp)
6750 int rc;
6752 /* reset IGU state */
6753 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6755 /* SETUP ramrod */
6756 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6758 /* Wait for completion */
6759 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6761 return rc;
6764 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6766 struct bnx2x_fastpath *fp = &bp->fp[index];
6768 /* reset IGU state */
6769 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6771 /* SETUP ramrod */
6772 fp->state = BNX2X_FP_STATE_OPENING;
6773 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6774 fp->cl_id, 0);
6776 /* Wait for completion */
6777 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6778 &(fp->state), 0);
6781 static int bnx2x_poll(struct napi_struct *napi, int budget);
6783 static void bnx2x_set_int_mode(struct bnx2x *bp)
6785 int num_queues;
6787 switch (int_mode) {
6788 case INT_MODE_INTx:
6789 case INT_MODE_MSI:
6790 num_queues = 1;
6791 bp->num_rx_queues = num_queues;
6792 bp->num_tx_queues = num_queues;
6793 DP(NETIF_MSG_IFUP,
6794 "set number of queues to %d\n", num_queues);
6795 break;
6797 case INT_MODE_MSIX:
6798 default:
6799 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6800 num_queues = min_t(u32, num_online_cpus(),
6801 BNX2X_MAX_QUEUES(bp));
6802 else
6803 num_queues = 1;
6804 bp->num_rx_queues = num_queues;
6805 bp->num_tx_queues = num_queues;
6806 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6807 " number of tx queues to %d\n",
6808 bp->num_rx_queues, bp->num_tx_queues);
6809 /* if we can't use MSI-X we only need one fp,
6810 * so try to enable MSI-X with the requested number of fp's
6811 * and fallback to MSI or legacy INTx with one fp
6813 if (bnx2x_enable_msix(bp)) {
6814 /* failed to enable MSI-X */
6815 num_queues = 1;
6816 bp->num_rx_queues = num_queues;
6817 bp->num_tx_queues = num_queues;
6818 if (bp->multi_mode)
6819 BNX2X_ERR("Multi requested but failed to "
6820 "enable MSI-X set number of "
6821 "queues to %d\n", num_queues);
6823 break;
6825 bp->dev->real_num_tx_queues = bp->num_tx_queues;
6828 static void bnx2x_set_rx_mode(struct net_device *dev);
6830 /* must be called with rtnl_lock */
6831 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6833 u32 load_code;
6834 int i, rc = 0;
6835 #ifdef BNX2X_STOP_ON_ERROR
6836 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6837 if (unlikely(bp->panic))
6838 return -EPERM;
6839 #endif
6841 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6843 bnx2x_set_int_mode(bp);
6845 if (bnx2x_alloc_mem(bp))
6846 return -ENOMEM;
6848 for_each_rx_queue(bp, i)
6849 bnx2x_fp(bp, i, disable_tpa) =
6850 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6852 for_each_rx_queue(bp, i)
6853 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6854 bnx2x_poll, 128);
6856 #ifdef BNX2X_STOP_ON_ERROR
6857 for_each_rx_queue(bp, i) {
6858 struct bnx2x_fastpath *fp = &bp->fp[i];
6860 fp->poll_no_work = 0;
6861 fp->poll_calls = 0;
6862 fp->poll_max_calls = 0;
6863 fp->poll_complete = 0;
6864 fp->poll_exit = 0;
6866 #endif
6867 bnx2x_napi_enable(bp);
6869 if (bp->flags & USING_MSIX_FLAG) {
6870 rc = bnx2x_req_msix_irqs(bp);
6871 if (rc) {
6872 pci_disable_msix(bp->pdev);
6873 goto load_error1;
6875 } else {
6876 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6877 bnx2x_enable_msi(bp);
6878 bnx2x_ack_int(bp);
6879 rc = bnx2x_req_irq(bp);
6880 if (rc) {
6881 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6882 if (bp->flags & USING_MSI_FLAG)
6883 pci_disable_msi(bp->pdev);
6884 goto load_error1;
6886 if (bp->flags & USING_MSI_FLAG) {
6887 bp->dev->irq = bp->pdev->irq;
6888 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6889 bp->dev->name, bp->pdev->irq);
6893 /* Send LOAD_REQUEST command to MCP
6894 Returns the type of LOAD command:
6895 if it is the first port to be initialized
6896 common blocks should be initialized, otherwise - not
6898 if (!BP_NOMCP(bp)) {
6899 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6900 if (!load_code) {
6901 BNX2X_ERR("MCP response failure, aborting\n");
6902 rc = -EBUSY;
6903 goto load_error2;
6905 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6906 rc = -EBUSY; /* other port in diagnostic mode */
6907 goto load_error2;
6910 } else {
6911 int port = BP_PORT(bp);
6913 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
6914 load_count[0], load_count[1], load_count[2]);
6915 load_count[0]++;
6916 load_count[1 + port]++;
6917 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
6918 load_count[0], load_count[1], load_count[2]);
6919 if (load_count[0] == 1)
6920 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6921 else if (load_count[1 + port] == 1)
6922 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6923 else
6924 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6927 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6928 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6929 bp->port.pmf = 1;
6930 else
6931 bp->port.pmf = 0;
6932 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6934 /* Initialize HW */
6935 rc = bnx2x_init_hw(bp, load_code);
6936 if (rc) {
6937 BNX2X_ERR("HW init failed, aborting\n");
6938 goto load_error2;
6941 /* Setup NIC internals and enable interrupts */
6942 bnx2x_nic_init(bp, load_code);
6944 /* Send LOAD_DONE command to MCP */
6945 if (!BP_NOMCP(bp)) {
6946 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6947 if (!load_code) {
6948 BNX2X_ERR("MCP response failure, aborting\n");
6949 rc = -EBUSY;
6950 goto load_error3;
6954 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6956 rc = bnx2x_setup_leading(bp);
6957 if (rc) {
6958 BNX2X_ERR("Setup leading failed!\n");
6959 goto load_error3;
6962 if (CHIP_IS_E1H(bp))
6963 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6964 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
6965 bp->state = BNX2X_STATE_DISABLED;
6968 if (bp->state == BNX2X_STATE_OPEN)
6969 for_each_nondefault_queue(bp, i) {
6970 rc = bnx2x_setup_multi(bp, i);
6971 if (rc)
6972 goto load_error3;
6975 if (CHIP_IS_E1(bp))
6976 bnx2x_set_mac_addr_e1(bp, 1);
6977 else
6978 bnx2x_set_mac_addr_e1h(bp, 1);
6980 if (bp->port.pmf)
6981 bnx2x_initial_phy_init(bp, load_mode);
6983 /* Start fast path */
6984 switch (load_mode) {
6985 case LOAD_NORMAL:
6986 /* Tx queue should be only reenabled */
6987 netif_tx_wake_all_queues(bp->dev);
6988 /* Initialize the receive filter. */
6989 bnx2x_set_rx_mode(bp->dev);
6990 break;
6992 case LOAD_OPEN:
6993 netif_tx_start_all_queues(bp->dev);
6994 /* Initialize the receive filter. */
6995 bnx2x_set_rx_mode(bp->dev);
6996 break;
6998 case LOAD_DIAG:
6999 /* Initialize the receive filter. */
7000 bnx2x_set_rx_mode(bp->dev);
7001 bp->state = BNX2X_STATE_DIAG;
7002 break;
7004 default:
7005 break;
7008 if (!bp->port.pmf)
7009 bnx2x__link_status_update(bp);
7011 /* start the timer */
7012 mod_timer(&bp->timer, jiffies + bp->current_interval);
7015 return 0;
7017 load_error3:
7018 bnx2x_int_disable_sync(bp, 1);
7019 if (!BP_NOMCP(bp)) {
7020 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7021 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7023 bp->port.pmf = 0;
7024 /* Free SKBs, SGEs, TPA pool and driver internals */
7025 bnx2x_free_skbs(bp);
7026 for_each_rx_queue(bp, i)
7027 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7028 load_error2:
7029 /* Release IRQs */
7030 bnx2x_free_irq(bp);
7031 load_error1:
7032 bnx2x_napi_disable(bp);
7033 for_each_rx_queue(bp, i)
7034 netif_napi_del(&bnx2x_fp(bp, i, napi));
7035 bnx2x_free_mem(bp);
7037 return rc;
7040 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7042 struct bnx2x_fastpath *fp = &bp->fp[index];
7043 int rc;
7045 /* halt the connection */
7046 fp->state = BNX2X_FP_STATE_HALTING;
7047 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7049 /* Wait for completion */
7050 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7051 &(fp->state), 1);
7052 if (rc) /* timeout */
7053 return rc;
7055 /* delete cfc entry */
7056 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7058 /* Wait for completion */
7059 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7060 &(fp->state), 1);
7061 return rc;
7064 static int bnx2x_stop_leading(struct bnx2x *bp)
7066 __le16 dsb_sp_prod_idx;
7067 /* if the other port is handling traffic,
7068 this can take a lot of time */
7069 int cnt = 500;
7070 int rc;
7072 might_sleep();
7074 /* Send HALT ramrod */
7075 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7076 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7078 /* Wait for completion */
7079 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7080 &(bp->fp[0].state), 1);
7081 if (rc) /* timeout */
7082 return rc;
7084 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7086 /* Send PORT_DELETE ramrod */
7087 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7089 /* Wait for completion to arrive on default status block
7090 we are going to reset the chip anyway
7091 so there is not much to do if this times out
7093 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7094 if (!cnt) {
7095 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7096 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7097 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7098 #ifdef BNX2X_STOP_ON_ERROR
7099 bnx2x_panic();
7100 #endif
7101 rc = -EBUSY;
7102 break;
7104 cnt--;
7105 msleep(1);
7106 rmb(); /* Refresh the dsb_sp_prod */
7108 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7109 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7111 return rc;
7114 static void bnx2x_reset_func(struct bnx2x *bp)
7116 int port = BP_PORT(bp);
7117 int func = BP_FUNC(bp);
7118 int base, i;
7120 /* Configure IGU */
7121 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7122 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7124 /* Clear ILT */
7125 base = FUNC_ILT_BASE(func);
7126 for (i = base; i < base + ILT_PER_FUNC; i++)
7127 bnx2x_ilt_wr(bp, i, 0);
7130 static void bnx2x_reset_port(struct bnx2x *bp)
7132 int port = BP_PORT(bp);
7133 u32 val;
7135 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7137 /* Do not rcv packets to BRB */
7138 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7139 /* Do not direct rcv packets that are not for MCP to the BRB */
7140 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7141 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7143 /* Configure AEU */
7144 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7146 msleep(100);
7147 /* Check for BRB port occupancy */
7148 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7149 if (val)
7150 DP(NETIF_MSG_IFDOWN,
7151 "BRB1 is not empty %d blocks are occupied\n", val);
7153 /* TODO: Close Doorbell port? */
7156 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7158 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7159 BP_FUNC(bp), reset_code);
7161 switch (reset_code) {
7162 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7163 bnx2x_reset_port(bp);
7164 bnx2x_reset_func(bp);
7165 bnx2x_reset_common(bp);
7166 break;
7168 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7169 bnx2x_reset_port(bp);
7170 bnx2x_reset_func(bp);
7171 break;
7173 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7174 bnx2x_reset_func(bp);
7175 break;
7177 default:
7178 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7179 break;
7183 /* must be called with rtnl_lock */
7184 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7186 int port = BP_PORT(bp);
7187 u32 reset_code = 0;
7188 int i, cnt, rc;
7190 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7192 bp->rx_mode = BNX2X_RX_MODE_NONE;
7193 bnx2x_set_storm_rx_mode(bp);
7195 bnx2x_netif_stop(bp, 1);
7197 del_timer_sync(&bp->timer);
7198 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7199 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7200 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7202 /* Release IRQs */
7203 bnx2x_free_irq(bp);
7205 /* Wait until tx fastpath tasks complete */
7206 for_each_tx_queue(bp, i) {
7207 struct bnx2x_fastpath *fp = &bp->fp[i];
7209 cnt = 1000;
7210 while (bnx2x_has_tx_work_unload(fp)) {
7212 bnx2x_tx_int(fp);
7213 if (!cnt) {
7214 BNX2X_ERR("timeout waiting for queue[%d]\n",
7216 #ifdef BNX2X_STOP_ON_ERROR
7217 bnx2x_panic();
7218 return -EBUSY;
7219 #else
7220 break;
7221 #endif
7223 cnt--;
7224 msleep(1);
7227 /* Give HW time to discard old tx messages */
7228 msleep(1);
7230 if (CHIP_IS_E1(bp)) {
7231 struct mac_configuration_cmd *config =
7232 bnx2x_sp(bp, mcast_config);
7234 bnx2x_set_mac_addr_e1(bp, 0);
7236 for (i = 0; i < config->hdr.length; i++)
7237 CAM_INVALIDATE(config->config_table[i]);
7239 config->hdr.length = i;
7240 if (CHIP_REV_IS_SLOW(bp))
7241 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7242 else
7243 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7244 config->hdr.client_id = bp->fp->cl_id;
7245 config->hdr.reserved1 = 0;
7247 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7248 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7249 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7251 } else { /* E1H */
7252 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7254 bnx2x_set_mac_addr_e1h(bp, 0);
7256 for (i = 0; i < MC_HASH_SIZE; i++)
7257 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7260 if (unload_mode == UNLOAD_NORMAL)
7261 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7263 else if (bp->flags & NO_WOL_FLAG) {
7264 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7265 if (CHIP_IS_E1H(bp))
7266 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7268 } else if (bp->wol) {
7269 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7270 u8 *mac_addr = bp->dev->dev_addr;
7271 u32 val;
7272 /* The mac address is written to entries 1-4 to
7273 preserve entry 0 which is used by the PMF */
7274 u8 entry = (BP_E1HVN(bp) + 1)*8;
7276 val = (mac_addr[0] << 8) | mac_addr[1];
7277 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7279 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7280 (mac_addr[4] << 8) | mac_addr[5];
7281 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7283 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7285 } else
7286 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7288 /* Close multi and leading connections
7289 Completions for ramrods are collected in a synchronous way */
7290 for_each_nondefault_queue(bp, i)
7291 if (bnx2x_stop_multi(bp, i))
7292 goto unload_error;
7294 rc = bnx2x_stop_leading(bp);
7295 if (rc) {
7296 BNX2X_ERR("Stop leading failed!\n");
7297 #ifdef BNX2X_STOP_ON_ERROR
7298 return -EBUSY;
7299 #else
7300 goto unload_error;
7301 #endif
7304 unload_error:
7305 if (!BP_NOMCP(bp))
7306 reset_code = bnx2x_fw_command(bp, reset_code);
7307 else {
7308 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
7309 load_count[0], load_count[1], load_count[2]);
7310 load_count[0]--;
7311 load_count[1 + port]--;
7312 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
7313 load_count[0], load_count[1], load_count[2]);
7314 if (load_count[0] == 0)
7315 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7316 else if (load_count[1 + port] == 0)
7317 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7318 else
7319 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7322 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7323 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7324 bnx2x__link_reset(bp);
7326 /* Reset the chip */
7327 bnx2x_reset_chip(bp, reset_code);
7329 /* Report UNLOAD_DONE to MCP */
7330 if (!BP_NOMCP(bp))
7331 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7333 bp->port.pmf = 0;
7335 /* Free SKBs, SGEs, TPA pool and driver internals */
7336 bnx2x_free_skbs(bp);
7337 for_each_rx_queue(bp, i)
7338 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7339 for_each_rx_queue(bp, i)
7340 netif_napi_del(&bnx2x_fp(bp, i, napi));
7341 bnx2x_free_mem(bp);
7343 bp->state = BNX2X_STATE_CLOSED;
7345 netif_carrier_off(bp->dev);
7347 return 0;
7350 static void bnx2x_reset_task(struct work_struct *work)
7352 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7354 #ifdef BNX2X_STOP_ON_ERROR
7355 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7356 " so reset not done to allow debug dump,\n"
7357 KERN_ERR " you will need to reboot when done\n");
7358 return;
7359 #endif
7361 rtnl_lock();
7363 if (!netif_running(bp->dev))
7364 goto reset_task_exit;
7366 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7367 bnx2x_nic_load(bp, LOAD_NORMAL);
7369 reset_task_exit:
7370 rtnl_unlock();
7373 /* end of nic load/unload */
7375 /* ethtool_ops */
7378 * Init service functions
7381 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7383 switch (func) {
7384 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7385 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7386 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7387 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7388 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7389 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7390 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7391 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7392 default:
7393 BNX2X_ERR("Unsupported function index: %d\n", func);
7394 return (u32)(-1);
7398 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7400 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7402 /* Flush all outstanding writes */
7403 mmiowb();
7405 /* Pretend to be function 0 */
7406 REG_WR(bp, reg, 0);
7407 /* Flush the GRC transaction (in the chip) */
7408 new_val = REG_RD(bp, reg);
7409 if (new_val != 0) {
7410 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7411 new_val);
7412 BUG();
7415 /* From now we are in the "like-E1" mode */
7416 bnx2x_int_disable(bp);
7418 /* Flush all outstanding writes */
7419 mmiowb();
7421 /* Restore the original funtion settings */
7422 REG_WR(bp, reg, orig_func);
7423 new_val = REG_RD(bp, reg);
7424 if (new_val != orig_func) {
7425 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7426 orig_func, new_val);
7427 BUG();
7431 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7433 if (CHIP_IS_E1H(bp))
7434 bnx2x_undi_int_disable_e1h(bp, func);
7435 else
7436 bnx2x_int_disable(bp);
7439 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7441 u32 val;
7443 /* Check if there is any driver already loaded */
7444 val = REG_RD(bp, MISC_REG_UNPREPARED);
7445 if (val == 0x1) {
7446 /* Check if it is the UNDI driver
7447 * UNDI driver initializes CID offset for normal bell to 0x7
7449 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7450 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7451 if (val == 0x7) {
7452 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7453 /* save our func */
7454 int func = BP_FUNC(bp);
7455 u32 swap_en;
7456 u32 swap_val;
7458 /* clear the UNDI indication */
7459 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7461 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7463 /* try unload UNDI on port 0 */
7464 bp->func = 0;
7465 bp->fw_seq =
7466 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7467 DRV_MSG_SEQ_NUMBER_MASK);
7468 reset_code = bnx2x_fw_command(bp, reset_code);
7470 /* if UNDI is loaded on the other port */
7471 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7473 /* send "DONE" for previous unload */
7474 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7476 /* unload UNDI on port 1 */
7477 bp->func = 1;
7478 bp->fw_seq =
7479 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7480 DRV_MSG_SEQ_NUMBER_MASK);
7481 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7483 bnx2x_fw_command(bp, reset_code);
7486 /* now it's safe to release the lock */
7487 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7489 bnx2x_undi_int_disable(bp, func);
7491 /* close input traffic and wait for it */
7492 /* Do not rcv packets to BRB */
7493 REG_WR(bp,
7494 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7495 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7496 /* Do not direct rcv packets that are not for MCP to
7497 * the BRB */
7498 REG_WR(bp,
7499 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7500 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7501 /* clear AEU */
7502 REG_WR(bp,
7503 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7504 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7505 msleep(10);
7507 /* save NIG port swap info */
7508 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7509 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7510 /* reset device */
7511 REG_WR(bp,
7512 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7513 0xd3ffffff);
7514 REG_WR(bp,
7515 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7516 0x1403);
7517 /* take the NIG out of reset and restore swap values */
7518 REG_WR(bp,
7519 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7520 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7521 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7522 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7524 /* send unload done to the MCP */
7525 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7527 /* restore our func and fw_seq */
7528 bp->func = func;
7529 bp->fw_seq =
7530 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7531 DRV_MSG_SEQ_NUMBER_MASK);
7533 } else
7534 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7538 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7540 u32 val, val2, val3, val4, id;
7541 u16 pmc;
7543 /* Get the chip revision id and number. */
7544 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7545 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7546 id = ((val & 0xffff) << 16);
7547 val = REG_RD(bp, MISC_REG_CHIP_REV);
7548 id |= ((val & 0xf) << 12);
7549 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7550 id |= ((val & 0xff) << 4);
7551 val = REG_RD(bp, MISC_REG_BOND_ID);
7552 id |= (val & 0xf);
7553 bp->common.chip_id = id;
7554 bp->link_params.chip_id = bp->common.chip_id;
7555 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7557 val = (REG_RD(bp, 0x2874) & 0x55);
7558 if ((bp->common.chip_id & 0x1) ||
7559 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7560 bp->flags |= ONE_PORT_FLAG;
7561 BNX2X_DEV_INFO("single port device\n");
7564 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7565 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7566 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7567 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7568 bp->common.flash_size, bp->common.flash_size);
7570 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7571 bp->link_params.shmem_base = bp->common.shmem_base;
7572 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7574 if (!bp->common.shmem_base ||
7575 (bp->common.shmem_base < 0xA0000) ||
7576 (bp->common.shmem_base >= 0xC0000)) {
7577 BNX2X_DEV_INFO("MCP not active\n");
7578 bp->flags |= NO_MCP_FLAG;
7579 return;
7582 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7583 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7584 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7585 BNX2X_ERR("BAD MCP validity signature\n");
7587 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7588 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7590 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7591 SHARED_HW_CFG_LED_MODE_MASK) >>
7592 SHARED_HW_CFG_LED_MODE_SHIFT);
7594 bp->link_params.feature_config_flags = 0;
7595 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7596 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7597 bp->link_params.feature_config_flags |=
7598 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7599 else
7600 bp->link_params.feature_config_flags &=
7601 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7603 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7604 bp->common.bc_ver = val;
7605 BNX2X_DEV_INFO("bc_ver %X\n", val);
7606 if (val < BNX2X_BC_VER) {
7607 /* for now only warn
7608 * later we might need to enforce this */
7609 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7610 " please upgrade BC\n", BNX2X_BC_VER, val);
7613 if (BP_E1HVN(bp) == 0) {
7614 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7615 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7616 } else {
7617 /* no WOL capability for E1HVN != 0 */
7618 bp->flags |= NO_WOL_FLAG;
7620 BNX2X_DEV_INFO("%sWoL capable\n",
7621 (bp->flags & NO_WOL_FLAG) ? "not " : "");
7623 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7624 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7625 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7626 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7628 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7629 val, val2, val3, val4);
7632 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7633 u32 switch_cfg)
7635 int port = BP_PORT(bp);
7636 u32 ext_phy_type;
7638 switch (switch_cfg) {
7639 case SWITCH_CFG_1G:
7640 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7642 ext_phy_type =
7643 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7644 switch (ext_phy_type) {
7645 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7646 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7647 ext_phy_type);
7649 bp->port.supported |= (SUPPORTED_10baseT_Half |
7650 SUPPORTED_10baseT_Full |
7651 SUPPORTED_100baseT_Half |
7652 SUPPORTED_100baseT_Full |
7653 SUPPORTED_1000baseT_Full |
7654 SUPPORTED_2500baseX_Full |
7655 SUPPORTED_TP |
7656 SUPPORTED_FIBRE |
7657 SUPPORTED_Autoneg |
7658 SUPPORTED_Pause |
7659 SUPPORTED_Asym_Pause);
7660 break;
7662 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7663 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7664 ext_phy_type);
7666 bp->port.supported |= (SUPPORTED_10baseT_Half |
7667 SUPPORTED_10baseT_Full |
7668 SUPPORTED_100baseT_Half |
7669 SUPPORTED_100baseT_Full |
7670 SUPPORTED_1000baseT_Full |
7671 SUPPORTED_TP |
7672 SUPPORTED_FIBRE |
7673 SUPPORTED_Autoneg |
7674 SUPPORTED_Pause |
7675 SUPPORTED_Asym_Pause);
7676 break;
7678 default:
7679 BNX2X_ERR("NVRAM config error. "
7680 "BAD SerDes ext_phy_config 0x%x\n",
7681 bp->link_params.ext_phy_config);
7682 return;
7685 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7686 port*0x10);
7687 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7688 break;
7690 case SWITCH_CFG_10G:
7691 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7693 ext_phy_type =
7694 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7695 switch (ext_phy_type) {
7696 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7697 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7698 ext_phy_type);
7700 bp->port.supported |= (SUPPORTED_10baseT_Half |
7701 SUPPORTED_10baseT_Full |
7702 SUPPORTED_100baseT_Half |
7703 SUPPORTED_100baseT_Full |
7704 SUPPORTED_1000baseT_Full |
7705 SUPPORTED_2500baseX_Full |
7706 SUPPORTED_10000baseT_Full |
7707 SUPPORTED_TP |
7708 SUPPORTED_FIBRE |
7709 SUPPORTED_Autoneg |
7710 SUPPORTED_Pause |
7711 SUPPORTED_Asym_Pause);
7712 break;
7714 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7715 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7716 ext_phy_type);
7718 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7719 SUPPORTED_1000baseT_Full |
7720 SUPPORTED_FIBRE |
7721 SUPPORTED_Autoneg |
7722 SUPPORTED_Pause |
7723 SUPPORTED_Asym_Pause);
7724 break;
7726 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7727 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7728 ext_phy_type);
7730 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7731 SUPPORTED_2500baseX_Full |
7732 SUPPORTED_1000baseT_Full |
7733 SUPPORTED_FIBRE |
7734 SUPPORTED_Autoneg |
7735 SUPPORTED_Pause |
7736 SUPPORTED_Asym_Pause);
7737 break;
7739 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7740 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7741 ext_phy_type);
7743 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7744 SUPPORTED_FIBRE |
7745 SUPPORTED_Pause |
7746 SUPPORTED_Asym_Pause);
7747 break;
7749 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7750 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7751 ext_phy_type);
7753 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7754 SUPPORTED_1000baseT_Full |
7755 SUPPORTED_FIBRE |
7756 SUPPORTED_Pause |
7757 SUPPORTED_Asym_Pause);
7758 break;
7760 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7761 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7762 ext_phy_type);
7764 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7765 SUPPORTED_1000baseT_Full |
7766 SUPPORTED_Autoneg |
7767 SUPPORTED_FIBRE |
7768 SUPPORTED_Pause |
7769 SUPPORTED_Asym_Pause);
7770 break;
7772 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7773 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7774 ext_phy_type);
7776 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7777 SUPPORTED_TP |
7778 SUPPORTED_Autoneg |
7779 SUPPORTED_Pause |
7780 SUPPORTED_Asym_Pause);
7781 break;
7783 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7784 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7785 ext_phy_type);
7787 bp->port.supported |= (SUPPORTED_10baseT_Half |
7788 SUPPORTED_10baseT_Full |
7789 SUPPORTED_100baseT_Half |
7790 SUPPORTED_100baseT_Full |
7791 SUPPORTED_1000baseT_Full |
7792 SUPPORTED_10000baseT_Full |
7793 SUPPORTED_TP |
7794 SUPPORTED_Autoneg |
7795 SUPPORTED_Pause |
7796 SUPPORTED_Asym_Pause);
7797 break;
7799 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7800 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7801 bp->link_params.ext_phy_config);
7802 break;
7804 default:
7805 BNX2X_ERR("NVRAM config error. "
7806 "BAD XGXS ext_phy_config 0x%x\n",
7807 bp->link_params.ext_phy_config);
7808 return;
7811 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7812 port*0x18);
7813 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7815 break;
7817 default:
7818 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7819 bp->port.link_config);
7820 return;
7822 bp->link_params.phy_addr = bp->port.phy_addr;
7824 /* mask what we support according to speed_cap_mask */
7825 if (!(bp->link_params.speed_cap_mask &
7826 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7827 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7829 if (!(bp->link_params.speed_cap_mask &
7830 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7831 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7833 if (!(bp->link_params.speed_cap_mask &
7834 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7835 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7837 if (!(bp->link_params.speed_cap_mask &
7838 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7839 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7841 if (!(bp->link_params.speed_cap_mask &
7842 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7843 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7844 SUPPORTED_1000baseT_Full);
7846 if (!(bp->link_params.speed_cap_mask &
7847 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7848 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7850 if (!(bp->link_params.speed_cap_mask &
7851 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7852 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7854 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7857 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7859 bp->link_params.req_duplex = DUPLEX_FULL;
7861 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7862 case PORT_FEATURE_LINK_SPEED_AUTO:
7863 if (bp->port.supported & SUPPORTED_Autoneg) {
7864 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7865 bp->port.advertising = bp->port.supported;
7866 } else {
7867 u32 ext_phy_type =
7868 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7870 if ((ext_phy_type ==
7871 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7872 (ext_phy_type ==
7873 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7874 /* force 10G, no AN */
7875 bp->link_params.req_line_speed = SPEED_10000;
7876 bp->port.advertising =
7877 (ADVERTISED_10000baseT_Full |
7878 ADVERTISED_FIBRE);
7879 break;
7881 BNX2X_ERR("NVRAM config error. "
7882 "Invalid link_config 0x%x"
7883 " Autoneg not supported\n",
7884 bp->port.link_config);
7885 return;
7887 break;
7889 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7890 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7891 bp->link_params.req_line_speed = SPEED_10;
7892 bp->port.advertising = (ADVERTISED_10baseT_Full |
7893 ADVERTISED_TP);
7894 } else {
7895 BNX2X_ERR("NVRAM config error. "
7896 "Invalid link_config 0x%x"
7897 " speed_cap_mask 0x%x\n",
7898 bp->port.link_config,
7899 bp->link_params.speed_cap_mask);
7900 return;
7902 break;
7904 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7905 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7906 bp->link_params.req_line_speed = SPEED_10;
7907 bp->link_params.req_duplex = DUPLEX_HALF;
7908 bp->port.advertising = (ADVERTISED_10baseT_Half |
7909 ADVERTISED_TP);
7910 } else {
7911 BNX2X_ERR("NVRAM config error. "
7912 "Invalid link_config 0x%x"
7913 " speed_cap_mask 0x%x\n",
7914 bp->port.link_config,
7915 bp->link_params.speed_cap_mask);
7916 return;
7918 break;
7920 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7921 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7922 bp->link_params.req_line_speed = SPEED_100;
7923 bp->port.advertising = (ADVERTISED_100baseT_Full |
7924 ADVERTISED_TP);
7925 } else {
7926 BNX2X_ERR("NVRAM config error. "
7927 "Invalid link_config 0x%x"
7928 " speed_cap_mask 0x%x\n",
7929 bp->port.link_config,
7930 bp->link_params.speed_cap_mask);
7931 return;
7933 break;
7935 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7936 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7937 bp->link_params.req_line_speed = SPEED_100;
7938 bp->link_params.req_duplex = DUPLEX_HALF;
7939 bp->port.advertising = (ADVERTISED_100baseT_Half |
7940 ADVERTISED_TP);
7941 } else {
7942 BNX2X_ERR("NVRAM config error. "
7943 "Invalid link_config 0x%x"
7944 " speed_cap_mask 0x%x\n",
7945 bp->port.link_config,
7946 bp->link_params.speed_cap_mask);
7947 return;
7949 break;
7951 case PORT_FEATURE_LINK_SPEED_1G:
7952 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7953 bp->link_params.req_line_speed = SPEED_1000;
7954 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7955 ADVERTISED_TP);
7956 } else {
7957 BNX2X_ERR("NVRAM config error. "
7958 "Invalid link_config 0x%x"
7959 " speed_cap_mask 0x%x\n",
7960 bp->port.link_config,
7961 bp->link_params.speed_cap_mask);
7962 return;
7964 break;
7966 case PORT_FEATURE_LINK_SPEED_2_5G:
7967 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7968 bp->link_params.req_line_speed = SPEED_2500;
7969 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7970 ADVERTISED_TP);
7971 } else {
7972 BNX2X_ERR("NVRAM config error. "
7973 "Invalid link_config 0x%x"
7974 " speed_cap_mask 0x%x\n",
7975 bp->port.link_config,
7976 bp->link_params.speed_cap_mask);
7977 return;
7979 break;
7981 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7982 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7983 case PORT_FEATURE_LINK_SPEED_10G_KR:
7984 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7985 bp->link_params.req_line_speed = SPEED_10000;
7986 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7987 ADVERTISED_FIBRE);
7988 } else {
7989 BNX2X_ERR("NVRAM config error. "
7990 "Invalid link_config 0x%x"
7991 " speed_cap_mask 0x%x\n",
7992 bp->port.link_config,
7993 bp->link_params.speed_cap_mask);
7994 return;
7996 break;
7998 default:
7999 BNX2X_ERR("NVRAM config error. "
8000 "BAD link speed link_config 0x%x\n",
8001 bp->port.link_config);
8002 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8003 bp->port.advertising = bp->port.supported;
8004 break;
8007 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8008 PORT_FEATURE_FLOW_CONTROL_MASK);
8009 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8010 !(bp->port.supported & SUPPORTED_Autoneg))
8011 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8013 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8014 " advertising 0x%x\n",
8015 bp->link_params.req_line_speed,
8016 bp->link_params.req_duplex,
8017 bp->link_params.req_flow_ctrl, bp->port.advertising);
8020 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8022 int port = BP_PORT(bp);
8023 u32 val, val2;
8024 u32 config;
8025 u16 i;
8027 bp->link_params.bp = bp;
8028 bp->link_params.port = port;
8030 bp->link_params.lane_config =
8031 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8032 bp->link_params.ext_phy_config =
8033 SHMEM_RD(bp,
8034 dev_info.port_hw_config[port].external_phy_config);
8035 bp->link_params.speed_cap_mask =
8036 SHMEM_RD(bp,
8037 dev_info.port_hw_config[port].speed_capability_mask);
8039 bp->port.link_config =
8040 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8042 /* Get the 4 lanes xgxs config rx and tx */
8043 for (i = 0; i < 2; i++) {
8044 val = SHMEM_RD(bp,
8045 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8046 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8047 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8049 val = SHMEM_RD(bp,
8050 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8051 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8052 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8055 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8056 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8057 bp->link_params.feature_config_flags |=
8058 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8059 else
8060 bp->link_params.feature_config_flags &=
8061 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8063 /* If the device is capable of WoL, set the default state according
8064 * to the HW
8066 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8067 (config & PORT_FEATURE_WOL_ENABLED));
8069 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8070 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8071 bp->link_params.lane_config,
8072 bp->link_params.ext_phy_config,
8073 bp->link_params.speed_cap_mask, bp->port.link_config);
8075 bp->link_params.switch_cfg = (bp->port.link_config &
8076 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8077 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8079 bnx2x_link_settings_requested(bp);
8081 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8082 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8083 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8084 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8085 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8086 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8087 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8088 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8089 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8090 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8093 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8095 int func = BP_FUNC(bp);
8096 u32 val, val2;
8097 int rc = 0;
8099 bnx2x_get_common_hwinfo(bp);
8101 bp->e1hov = 0;
8102 bp->e1hmf = 0;
8103 if (CHIP_IS_E1H(bp)) {
8104 bp->mf_config =
8105 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8107 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8108 FUNC_MF_CFG_E1HOV_TAG_MASK);
8109 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8111 bp->e1hov = val;
8112 bp->e1hmf = 1;
8113 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8114 "(0x%04x)\n",
8115 func, bp->e1hov, bp->e1hov);
8116 } else {
8117 BNX2X_DEV_INFO("single function mode\n");
8118 if (BP_E1HVN(bp)) {
8119 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8120 " aborting\n", func);
8121 rc = -EPERM;
8126 if (!BP_NOMCP(bp)) {
8127 bnx2x_get_port_hwinfo(bp);
8129 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8130 DRV_MSG_SEQ_NUMBER_MASK);
8131 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8134 if (IS_E1HMF(bp)) {
8135 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8136 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8137 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8138 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8139 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8140 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8141 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8142 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8143 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8144 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8145 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8146 ETH_ALEN);
8147 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8148 ETH_ALEN);
8151 return rc;
8154 if (BP_NOMCP(bp)) {
8155 /* only supposed to happen on emulation/FPGA */
8156 BNX2X_ERR("warning random MAC workaround active\n");
8157 random_ether_addr(bp->dev->dev_addr);
8158 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8161 return rc;
8164 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8166 int func = BP_FUNC(bp);
8167 int timer_interval;
8168 int rc;
8170 /* Disable interrupt handling until HW is initialized */
8171 atomic_set(&bp->intr_sem, 1);
8173 mutex_init(&bp->port.phy_mutex);
8175 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8176 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8178 rc = bnx2x_get_hwinfo(bp);
8180 /* need to reset chip if undi was active */
8181 if (!BP_NOMCP(bp))
8182 bnx2x_undi_unload(bp);
8184 if (CHIP_REV_IS_FPGA(bp))
8185 printk(KERN_ERR PFX "FPGA detected\n");
8187 if (BP_NOMCP(bp) && (func == 0))
8188 printk(KERN_ERR PFX
8189 "MCP disabled, must load devices in order!\n");
8191 /* Set multi queue mode */
8192 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8193 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8194 printk(KERN_ERR PFX
8195 "Multi disabled since int_mode requested is not MSI-X\n");
8196 multi_mode = ETH_RSS_MODE_DISABLED;
8198 bp->multi_mode = multi_mode;
8201 /* Set TPA flags */
8202 if (disable_tpa) {
8203 bp->flags &= ~TPA_ENABLE_FLAG;
8204 bp->dev->features &= ~NETIF_F_LRO;
8205 } else {
8206 bp->flags |= TPA_ENABLE_FLAG;
8207 bp->dev->features |= NETIF_F_LRO;
8210 bp->mrrs = mrrs;
8212 bp->tx_ring_size = MAX_TX_AVAIL;
8213 bp->rx_ring_size = MAX_RX_AVAIL;
8215 bp->rx_csum = 1;
8217 bp->tx_ticks = 50;
8218 bp->rx_ticks = 25;
8220 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8221 bp->current_interval = (poll ? poll : timer_interval);
8223 init_timer(&bp->timer);
8224 bp->timer.expires = jiffies + bp->current_interval;
8225 bp->timer.data = (unsigned long) bp;
8226 bp->timer.function = bnx2x_timer;
8228 return rc;
8232 * ethtool service functions
8235 /* All ethtool functions called with rtnl_lock */
8237 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8239 struct bnx2x *bp = netdev_priv(dev);
8241 cmd->supported = bp->port.supported;
8242 cmd->advertising = bp->port.advertising;
8244 if (netif_carrier_ok(dev)) {
8245 cmd->speed = bp->link_vars.line_speed;
8246 cmd->duplex = bp->link_vars.duplex;
8247 } else {
8248 cmd->speed = bp->link_params.req_line_speed;
8249 cmd->duplex = bp->link_params.req_duplex;
8251 if (IS_E1HMF(bp)) {
8252 u16 vn_max_rate;
8254 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8255 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8256 if (vn_max_rate < cmd->speed)
8257 cmd->speed = vn_max_rate;
8260 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8261 u32 ext_phy_type =
8262 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8264 switch (ext_phy_type) {
8265 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8266 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8267 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8268 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8269 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8270 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8271 cmd->port = PORT_FIBRE;
8272 break;
8274 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8275 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8276 cmd->port = PORT_TP;
8277 break;
8279 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8280 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8281 bp->link_params.ext_phy_config);
8282 break;
8284 default:
8285 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8286 bp->link_params.ext_phy_config);
8287 break;
8289 } else
8290 cmd->port = PORT_TP;
8292 cmd->phy_address = bp->port.phy_addr;
8293 cmd->transceiver = XCVR_INTERNAL;
8295 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8296 cmd->autoneg = AUTONEG_ENABLE;
8297 else
8298 cmd->autoneg = AUTONEG_DISABLE;
8300 cmd->maxtxpkt = 0;
8301 cmd->maxrxpkt = 0;
8303 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8304 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8305 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8306 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8307 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8308 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8309 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8311 return 0;
8314 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8316 struct bnx2x *bp = netdev_priv(dev);
8317 u32 advertising;
8319 if (IS_E1HMF(bp))
8320 return 0;
8322 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8323 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8324 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8325 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8326 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8327 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8328 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8330 if (cmd->autoneg == AUTONEG_ENABLE) {
8331 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8332 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8333 return -EINVAL;
8336 /* advertise the requested speed and duplex if supported */
8337 cmd->advertising &= bp->port.supported;
8339 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8340 bp->link_params.req_duplex = DUPLEX_FULL;
8341 bp->port.advertising |= (ADVERTISED_Autoneg |
8342 cmd->advertising);
8344 } else { /* forced speed */
8345 /* advertise the requested speed and duplex if supported */
8346 switch (cmd->speed) {
8347 case SPEED_10:
8348 if (cmd->duplex == DUPLEX_FULL) {
8349 if (!(bp->port.supported &
8350 SUPPORTED_10baseT_Full)) {
8351 DP(NETIF_MSG_LINK,
8352 "10M full not supported\n");
8353 return -EINVAL;
8356 advertising = (ADVERTISED_10baseT_Full |
8357 ADVERTISED_TP);
8358 } else {
8359 if (!(bp->port.supported &
8360 SUPPORTED_10baseT_Half)) {
8361 DP(NETIF_MSG_LINK,
8362 "10M half not supported\n");
8363 return -EINVAL;
8366 advertising = (ADVERTISED_10baseT_Half |
8367 ADVERTISED_TP);
8369 break;
8371 case SPEED_100:
8372 if (cmd->duplex == DUPLEX_FULL) {
8373 if (!(bp->port.supported &
8374 SUPPORTED_100baseT_Full)) {
8375 DP(NETIF_MSG_LINK,
8376 "100M full not supported\n");
8377 return -EINVAL;
8380 advertising = (ADVERTISED_100baseT_Full |
8381 ADVERTISED_TP);
8382 } else {
8383 if (!(bp->port.supported &
8384 SUPPORTED_100baseT_Half)) {
8385 DP(NETIF_MSG_LINK,
8386 "100M half not supported\n");
8387 return -EINVAL;
8390 advertising = (ADVERTISED_100baseT_Half |
8391 ADVERTISED_TP);
8393 break;
8395 case SPEED_1000:
8396 if (cmd->duplex != DUPLEX_FULL) {
8397 DP(NETIF_MSG_LINK, "1G half not supported\n");
8398 return -EINVAL;
8401 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8402 DP(NETIF_MSG_LINK, "1G full not supported\n");
8403 return -EINVAL;
8406 advertising = (ADVERTISED_1000baseT_Full |
8407 ADVERTISED_TP);
8408 break;
8410 case SPEED_2500:
8411 if (cmd->duplex != DUPLEX_FULL) {
8412 DP(NETIF_MSG_LINK,
8413 "2.5G half not supported\n");
8414 return -EINVAL;
8417 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8418 DP(NETIF_MSG_LINK,
8419 "2.5G full not supported\n");
8420 return -EINVAL;
8423 advertising = (ADVERTISED_2500baseX_Full |
8424 ADVERTISED_TP);
8425 break;
8427 case SPEED_10000:
8428 if (cmd->duplex != DUPLEX_FULL) {
8429 DP(NETIF_MSG_LINK, "10G half not supported\n");
8430 return -EINVAL;
8433 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8434 DP(NETIF_MSG_LINK, "10G full not supported\n");
8435 return -EINVAL;
8438 advertising = (ADVERTISED_10000baseT_Full |
8439 ADVERTISED_FIBRE);
8440 break;
8442 default:
8443 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8444 return -EINVAL;
8447 bp->link_params.req_line_speed = cmd->speed;
8448 bp->link_params.req_duplex = cmd->duplex;
8449 bp->port.advertising = advertising;
8452 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8453 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8454 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8455 bp->port.advertising);
8457 if (netif_running(dev)) {
8458 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8459 bnx2x_link_set(bp);
8462 return 0;
8465 #define PHY_FW_VER_LEN 10
8467 static void bnx2x_get_drvinfo(struct net_device *dev,
8468 struct ethtool_drvinfo *info)
8470 struct bnx2x *bp = netdev_priv(dev);
8471 u8 phy_fw_ver[PHY_FW_VER_LEN];
8473 strcpy(info->driver, DRV_MODULE_NAME);
8474 strcpy(info->version, DRV_MODULE_VERSION);
8476 phy_fw_ver[0] = '\0';
8477 if (bp->port.pmf) {
8478 bnx2x_acquire_phy_lock(bp);
8479 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8480 (bp->state != BNX2X_STATE_CLOSED),
8481 phy_fw_ver, PHY_FW_VER_LEN);
8482 bnx2x_release_phy_lock(bp);
8485 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8486 (bp->common.bc_ver & 0xff0000) >> 16,
8487 (bp->common.bc_ver & 0xff00) >> 8,
8488 (bp->common.bc_ver & 0xff),
8489 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8490 strcpy(info->bus_info, pci_name(bp->pdev));
8491 info->n_stats = BNX2X_NUM_STATS;
8492 info->testinfo_len = BNX2X_NUM_TESTS;
8493 info->eedump_len = bp->common.flash_size;
8494 info->regdump_len = 0;
8497 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8498 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8500 static int bnx2x_get_regs_len(struct net_device *dev)
8502 static u32 regdump_len;
8503 struct bnx2x *bp = netdev_priv(dev);
8504 int i;
8506 if (regdump_len)
8507 return regdump_len;
8509 if (CHIP_IS_E1(bp)) {
8510 for (i = 0; i < REGS_COUNT; i++)
8511 if (IS_E1_ONLINE(reg_addrs[i].info))
8512 regdump_len += reg_addrs[i].size;
8514 for (i = 0; i < WREGS_COUNT_E1; i++)
8515 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8516 regdump_len += wreg_addrs_e1[i].size *
8517 (1 + wreg_addrs_e1[i].read_regs_count);
8519 } else { /* E1H */
8520 for (i = 0; i < REGS_COUNT; i++)
8521 if (IS_E1H_ONLINE(reg_addrs[i].info))
8522 regdump_len += reg_addrs[i].size;
8524 for (i = 0; i < WREGS_COUNT_E1H; i++)
8525 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8526 regdump_len += wreg_addrs_e1h[i].size *
8527 (1 + wreg_addrs_e1h[i].read_regs_count);
8529 regdump_len *= 4;
8530 regdump_len += sizeof(struct dump_hdr);
8532 return regdump_len;
8535 static void bnx2x_get_regs(struct net_device *dev,
8536 struct ethtool_regs *regs, void *_p)
8538 u32 *p = _p, i, j;
8539 struct bnx2x *bp = netdev_priv(dev);
8540 struct dump_hdr dump_hdr = {0};
8542 regs->version = 0;
8543 memset(p, 0, regs->len);
8545 if (!netif_running(bp->dev))
8546 return;
8548 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8549 dump_hdr.dump_sign = dump_sign_all;
8550 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8551 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8552 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8553 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8554 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8556 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8557 p += dump_hdr.hdr_size + 1;
8559 if (CHIP_IS_E1(bp)) {
8560 for (i = 0; i < REGS_COUNT; i++)
8561 if (IS_E1_ONLINE(reg_addrs[i].info))
8562 for (j = 0; j < reg_addrs[i].size; j++)
8563 *p++ = REG_RD(bp,
8564 reg_addrs[i].addr + j*4);
8566 } else { /* E1H */
8567 for (i = 0; i < REGS_COUNT; i++)
8568 if (IS_E1H_ONLINE(reg_addrs[i].info))
8569 for (j = 0; j < reg_addrs[i].size; j++)
8570 *p++ = REG_RD(bp,
8571 reg_addrs[i].addr + j*4);
8575 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8577 struct bnx2x *bp = netdev_priv(dev);
8579 if (bp->flags & NO_WOL_FLAG) {
8580 wol->supported = 0;
8581 wol->wolopts = 0;
8582 } else {
8583 wol->supported = WAKE_MAGIC;
8584 if (bp->wol)
8585 wol->wolopts = WAKE_MAGIC;
8586 else
8587 wol->wolopts = 0;
8589 memset(&wol->sopass, 0, sizeof(wol->sopass));
8592 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8594 struct bnx2x *bp = netdev_priv(dev);
8596 if (wol->wolopts & ~WAKE_MAGIC)
8597 return -EINVAL;
8599 if (wol->wolopts & WAKE_MAGIC) {
8600 if (bp->flags & NO_WOL_FLAG)
8601 return -EINVAL;
8603 bp->wol = 1;
8604 } else
8605 bp->wol = 0;
8607 return 0;
8610 static u32 bnx2x_get_msglevel(struct net_device *dev)
8612 struct bnx2x *bp = netdev_priv(dev);
8614 return bp->msglevel;
8617 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8619 struct bnx2x *bp = netdev_priv(dev);
8621 if (capable(CAP_NET_ADMIN))
8622 bp->msglevel = level;
8625 static int bnx2x_nway_reset(struct net_device *dev)
8627 struct bnx2x *bp = netdev_priv(dev);
8629 if (!bp->port.pmf)
8630 return 0;
8632 if (netif_running(dev)) {
8633 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8634 bnx2x_link_set(bp);
8637 return 0;
8640 static int bnx2x_get_eeprom_len(struct net_device *dev)
8642 struct bnx2x *bp = netdev_priv(dev);
8644 return bp->common.flash_size;
8647 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8649 int port = BP_PORT(bp);
8650 int count, i;
8651 u32 val = 0;
8653 /* adjust timeout for emulation/FPGA */
8654 count = NVRAM_TIMEOUT_COUNT;
8655 if (CHIP_REV_IS_SLOW(bp))
8656 count *= 100;
8658 /* request access to nvram interface */
8659 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8660 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8662 for (i = 0; i < count*10; i++) {
8663 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8664 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8665 break;
8667 udelay(5);
8670 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8671 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8672 return -EBUSY;
8675 return 0;
8678 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8680 int port = BP_PORT(bp);
8681 int count, i;
8682 u32 val = 0;
8684 /* adjust timeout for emulation/FPGA */
8685 count = NVRAM_TIMEOUT_COUNT;
8686 if (CHIP_REV_IS_SLOW(bp))
8687 count *= 100;
8689 /* relinquish nvram interface */
8690 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8691 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8693 for (i = 0; i < count*10; i++) {
8694 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8695 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8696 break;
8698 udelay(5);
8701 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8702 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8703 return -EBUSY;
8706 return 0;
8709 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8711 u32 val;
8713 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8715 /* enable both bits, even on read */
8716 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8717 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8718 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8721 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8723 u32 val;
8725 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8727 /* disable both bits, even after read */
8728 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8729 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8730 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8733 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
8734 u32 cmd_flags)
8736 int count, i, rc;
8737 u32 val;
8739 /* build the command word */
8740 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8742 /* need to clear DONE bit separately */
8743 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8745 /* address of the NVRAM to read from */
8746 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8747 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8749 /* issue a read command */
8750 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8752 /* adjust timeout for emulation/FPGA */
8753 count = NVRAM_TIMEOUT_COUNT;
8754 if (CHIP_REV_IS_SLOW(bp))
8755 count *= 100;
8757 /* wait for completion */
8758 *ret_val = 0;
8759 rc = -EBUSY;
8760 for (i = 0; i < count; i++) {
8761 udelay(5);
8762 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8764 if (val & MCPR_NVM_COMMAND_DONE) {
8765 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8766 /* we read nvram data in cpu order
8767 * but ethtool sees it as an array of bytes
8768 * converting to big-endian will do the work */
8769 *ret_val = cpu_to_be32(val);
8770 rc = 0;
8771 break;
8775 return rc;
8778 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8779 int buf_size)
8781 int rc;
8782 u32 cmd_flags;
8783 __be32 val;
8785 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8786 DP(BNX2X_MSG_NVM,
8787 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8788 offset, buf_size);
8789 return -EINVAL;
8792 if (offset + buf_size > bp->common.flash_size) {
8793 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8794 " buf_size (0x%x) > flash_size (0x%x)\n",
8795 offset, buf_size, bp->common.flash_size);
8796 return -EINVAL;
8799 /* request access to nvram interface */
8800 rc = bnx2x_acquire_nvram_lock(bp);
8801 if (rc)
8802 return rc;
8804 /* enable access to nvram interface */
8805 bnx2x_enable_nvram_access(bp);
8807 /* read the first word(s) */
8808 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8809 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8810 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8811 memcpy(ret_buf, &val, 4);
8813 /* advance to the next dword */
8814 offset += sizeof(u32);
8815 ret_buf += sizeof(u32);
8816 buf_size -= sizeof(u32);
8817 cmd_flags = 0;
8820 if (rc == 0) {
8821 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8822 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8823 memcpy(ret_buf, &val, 4);
8826 /* disable access to nvram interface */
8827 bnx2x_disable_nvram_access(bp);
8828 bnx2x_release_nvram_lock(bp);
8830 return rc;
8833 static int bnx2x_get_eeprom(struct net_device *dev,
8834 struct ethtool_eeprom *eeprom, u8 *eebuf)
8836 struct bnx2x *bp = netdev_priv(dev);
8837 int rc;
8839 if (!netif_running(dev))
8840 return -EAGAIN;
8842 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8843 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8844 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8845 eeprom->len, eeprom->len);
8847 /* parameters already validated in ethtool_get_eeprom */
8849 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8851 return rc;
8854 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8855 u32 cmd_flags)
8857 int count, i, rc;
8859 /* build the command word */
8860 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8862 /* need to clear DONE bit separately */
8863 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8865 /* write the data */
8866 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8868 /* address of the NVRAM to write to */
8869 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8870 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8872 /* issue the write command */
8873 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8875 /* adjust timeout for emulation/FPGA */
8876 count = NVRAM_TIMEOUT_COUNT;
8877 if (CHIP_REV_IS_SLOW(bp))
8878 count *= 100;
8880 /* wait for completion */
8881 rc = -EBUSY;
8882 for (i = 0; i < count; i++) {
8883 udelay(5);
8884 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8885 if (val & MCPR_NVM_COMMAND_DONE) {
8886 rc = 0;
8887 break;
8891 return rc;
8894 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8896 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8897 int buf_size)
8899 int rc;
8900 u32 cmd_flags;
8901 u32 align_offset;
8902 __be32 val;
8904 if (offset + buf_size > bp->common.flash_size) {
8905 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8906 " buf_size (0x%x) > flash_size (0x%x)\n",
8907 offset, buf_size, bp->common.flash_size);
8908 return -EINVAL;
8911 /* request access to nvram interface */
8912 rc = bnx2x_acquire_nvram_lock(bp);
8913 if (rc)
8914 return rc;
8916 /* enable access to nvram interface */
8917 bnx2x_enable_nvram_access(bp);
8919 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8920 align_offset = (offset & ~0x03);
8921 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8923 if (rc == 0) {
8924 val &= ~(0xff << BYTE_OFFSET(offset));
8925 val |= (*data_buf << BYTE_OFFSET(offset));
8927 /* nvram data is returned as an array of bytes
8928 * convert it back to cpu order */
8929 val = be32_to_cpu(val);
8931 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8932 cmd_flags);
8935 /* disable access to nvram interface */
8936 bnx2x_disable_nvram_access(bp);
8937 bnx2x_release_nvram_lock(bp);
8939 return rc;
8942 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8943 int buf_size)
8945 int rc;
8946 u32 cmd_flags;
8947 u32 val;
8948 u32 written_so_far;
8950 if (buf_size == 1) /* ethtool */
8951 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8953 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8954 DP(BNX2X_MSG_NVM,
8955 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8956 offset, buf_size);
8957 return -EINVAL;
8960 if (offset + buf_size > bp->common.flash_size) {
8961 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8962 " buf_size (0x%x) > flash_size (0x%x)\n",
8963 offset, buf_size, bp->common.flash_size);
8964 return -EINVAL;
8967 /* request access to nvram interface */
8968 rc = bnx2x_acquire_nvram_lock(bp);
8969 if (rc)
8970 return rc;
8972 /* enable access to nvram interface */
8973 bnx2x_enable_nvram_access(bp);
8975 written_so_far = 0;
8976 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8977 while ((written_so_far < buf_size) && (rc == 0)) {
8978 if (written_so_far == (buf_size - sizeof(u32)))
8979 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8980 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8981 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8982 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8983 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8985 memcpy(&val, data_buf, 4);
8987 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8989 /* advance to the next dword */
8990 offset += sizeof(u32);
8991 data_buf += sizeof(u32);
8992 written_so_far += sizeof(u32);
8993 cmd_flags = 0;
8996 /* disable access to nvram interface */
8997 bnx2x_disable_nvram_access(bp);
8998 bnx2x_release_nvram_lock(bp);
9000 return rc;
9003 static int bnx2x_set_eeprom(struct net_device *dev,
9004 struct ethtool_eeprom *eeprom, u8 *eebuf)
9006 struct bnx2x *bp = netdev_priv(dev);
9007 int rc;
9009 if (!netif_running(dev))
9010 return -EAGAIN;
9012 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9013 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9014 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9015 eeprom->len, eeprom->len);
9017 /* parameters already validated in ethtool_set_eeprom */
9019 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
9020 if (eeprom->magic == 0x00504859)
9021 if (bp->port.pmf) {
9023 bnx2x_acquire_phy_lock(bp);
9024 rc = bnx2x_flash_download(bp, BP_PORT(bp),
9025 bp->link_params.ext_phy_config,
9026 (bp->state != BNX2X_STATE_CLOSED),
9027 eebuf, eeprom->len);
9028 if ((bp->state == BNX2X_STATE_OPEN) ||
9029 (bp->state == BNX2X_STATE_DISABLED)) {
9030 rc |= bnx2x_link_reset(&bp->link_params,
9031 &bp->link_vars, 1);
9032 rc |= bnx2x_phy_init(&bp->link_params,
9033 &bp->link_vars);
9035 bnx2x_release_phy_lock(bp);
9037 } else /* Only the PMF can access the PHY */
9038 return -EINVAL;
9039 else
9040 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9042 return rc;
9045 static int bnx2x_get_coalesce(struct net_device *dev,
9046 struct ethtool_coalesce *coal)
9048 struct bnx2x *bp = netdev_priv(dev);
9050 memset(coal, 0, sizeof(struct ethtool_coalesce));
9052 coal->rx_coalesce_usecs = bp->rx_ticks;
9053 coal->tx_coalesce_usecs = bp->tx_ticks;
9055 return 0;
9058 static int bnx2x_set_coalesce(struct net_device *dev,
9059 struct ethtool_coalesce *coal)
9061 struct bnx2x *bp = netdev_priv(dev);
9063 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9064 if (bp->rx_ticks > 3000)
9065 bp->rx_ticks = 3000;
9067 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9068 if (bp->tx_ticks > 0x3000)
9069 bp->tx_ticks = 0x3000;
9071 if (netif_running(dev))
9072 bnx2x_update_coalesce(bp);
9074 return 0;
9077 static void bnx2x_get_ringparam(struct net_device *dev,
9078 struct ethtool_ringparam *ering)
9080 struct bnx2x *bp = netdev_priv(dev);
9082 ering->rx_max_pending = MAX_RX_AVAIL;
9083 ering->rx_mini_max_pending = 0;
9084 ering->rx_jumbo_max_pending = 0;
9086 ering->rx_pending = bp->rx_ring_size;
9087 ering->rx_mini_pending = 0;
9088 ering->rx_jumbo_pending = 0;
9090 ering->tx_max_pending = MAX_TX_AVAIL;
9091 ering->tx_pending = bp->tx_ring_size;
9094 static int bnx2x_set_ringparam(struct net_device *dev,
9095 struct ethtool_ringparam *ering)
9097 struct bnx2x *bp = netdev_priv(dev);
9098 int rc = 0;
9100 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9101 (ering->tx_pending > MAX_TX_AVAIL) ||
9102 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9103 return -EINVAL;
9105 bp->rx_ring_size = ering->rx_pending;
9106 bp->tx_ring_size = ering->tx_pending;
9108 if (netif_running(dev)) {
9109 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9110 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9113 return rc;
9116 static void bnx2x_get_pauseparam(struct net_device *dev,
9117 struct ethtool_pauseparam *epause)
9119 struct bnx2x *bp = netdev_priv(dev);
9121 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9122 BNX2X_FLOW_CTRL_AUTO) &&
9123 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9125 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9126 BNX2X_FLOW_CTRL_RX);
9127 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9128 BNX2X_FLOW_CTRL_TX);
9130 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9131 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9132 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9135 static int bnx2x_set_pauseparam(struct net_device *dev,
9136 struct ethtool_pauseparam *epause)
9138 struct bnx2x *bp = netdev_priv(dev);
9140 if (IS_E1HMF(bp))
9141 return 0;
9143 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9144 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9145 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9147 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9149 if (epause->rx_pause)
9150 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9152 if (epause->tx_pause)
9153 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9155 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9156 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9158 if (epause->autoneg) {
9159 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9160 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9161 return -EINVAL;
9164 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9165 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9168 DP(NETIF_MSG_LINK,
9169 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9171 if (netif_running(dev)) {
9172 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9173 bnx2x_link_set(bp);
9176 return 0;
9179 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9181 struct bnx2x *bp = netdev_priv(dev);
9182 int changed = 0;
9183 int rc = 0;
9185 /* TPA requires Rx CSUM offloading */
9186 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9187 if (!(dev->features & NETIF_F_LRO)) {
9188 dev->features |= NETIF_F_LRO;
9189 bp->flags |= TPA_ENABLE_FLAG;
9190 changed = 1;
9193 } else if (dev->features & NETIF_F_LRO) {
9194 dev->features &= ~NETIF_F_LRO;
9195 bp->flags &= ~TPA_ENABLE_FLAG;
9196 changed = 1;
9199 if (changed && netif_running(dev)) {
9200 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9201 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9204 return rc;
9207 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9209 struct bnx2x *bp = netdev_priv(dev);
9211 return bp->rx_csum;
9214 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9216 struct bnx2x *bp = netdev_priv(dev);
9217 int rc = 0;
9219 bp->rx_csum = data;
9221 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9222 TPA'ed packets will be discarded due to wrong TCP CSUM */
9223 if (!data) {
9224 u32 flags = ethtool_op_get_flags(dev);
9226 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9229 return rc;
9232 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9234 if (data) {
9235 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9236 dev->features |= NETIF_F_TSO6;
9237 } else {
9238 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9239 dev->features &= ~NETIF_F_TSO6;
9242 return 0;
9245 static const struct {
9246 char string[ETH_GSTRING_LEN];
9247 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9248 { "register_test (offline)" },
9249 { "memory_test (offline)" },
9250 { "loopback_test (offline)" },
9251 { "nvram_test (online)" },
9252 { "interrupt_test (online)" },
9253 { "link_test (online)" },
9254 { "idle check (online)" }
9257 static int bnx2x_self_test_count(struct net_device *dev)
9259 return BNX2X_NUM_TESTS;
9262 static int bnx2x_test_registers(struct bnx2x *bp)
9264 int idx, i, rc = -ENODEV;
9265 u32 wr_val = 0;
9266 int port = BP_PORT(bp);
9267 static const struct {
9268 u32 offset0;
9269 u32 offset1;
9270 u32 mask;
9271 } reg_tbl[] = {
9272 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9273 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9274 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9275 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9276 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9277 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9278 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9279 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9280 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9281 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9282 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9283 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9284 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9285 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9286 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9287 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9288 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9289 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9290 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9291 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9292 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9293 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9294 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9295 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9296 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9297 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9298 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9299 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9300 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9301 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9302 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9303 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9304 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9305 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9306 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9307 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9308 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9309 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9311 { 0xffffffff, 0, 0x00000000 }
9314 if (!netif_running(bp->dev))
9315 return rc;
9317 /* Repeat the test twice:
9318 First by writing 0x00000000, second by writing 0xffffffff */
9319 for (idx = 0; idx < 2; idx++) {
9321 switch (idx) {
9322 case 0:
9323 wr_val = 0;
9324 break;
9325 case 1:
9326 wr_val = 0xffffffff;
9327 break;
9330 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9331 u32 offset, mask, save_val, val;
9333 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9334 mask = reg_tbl[i].mask;
9336 save_val = REG_RD(bp, offset);
9338 REG_WR(bp, offset, wr_val);
9339 val = REG_RD(bp, offset);
9341 /* Restore the original register's value */
9342 REG_WR(bp, offset, save_val);
9344 /* verify that value is as expected value */
9345 if ((val & mask) != (wr_val & mask))
9346 goto test_reg_exit;
9350 rc = 0;
9352 test_reg_exit:
9353 return rc;
9356 static int bnx2x_test_memory(struct bnx2x *bp)
9358 int i, j, rc = -ENODEV;
9359 u32 val;
9360 static const struct {
9361 u32 offset;
9362 int size;
9363 } mem_tbl[] = {
9364 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9365 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9366 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9367 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9368 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9369 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9370 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9372 { 0xffffffff, 0 }
9374 static const struct {
9375 char *name;
9376 u32 offset;
9377 u32 e1_mask;
9378 u32 e1h_mask;
9379 } prty_tbl[] = {
9380 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9381 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9382 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9383 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9384 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9385 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9387 { NULL, 0xffffffff, 0, 0 }
9390 if (!netif_running(bp->dev))
9391 return rc;
9393 /* Go through all the memories */
9394 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9395 for (j = 0; j < mem_tbl[i].size; j++)
9396 REG_RD(bp, mem_tbl[i].offset + j*4);
9398 /* Check the parity status */
9399 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9400 val = REG_RD(bp, prty_tbl[i].offset);
9401 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9402 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9403 DP(NETIF_MSG_HW,
9404 "%s is 0x%x\n", prty_tbl[i].name, val);
9405 goto test_mem_exit;
9409 rc = 0;
9411 test_mem_exit:
9412 return rc;
9415 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9417 int cnt = 1000;
9419 if (link_up)
9420 while (bnx2x_link_test(bp) && cnt--)
9421 msleep(10);
9424 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9426 unsigned int pkt_size, num_pkts, i;
9427 struct sk_buff *skb;
9428 unsigned char *packet;
9429 struct bnx2x_fastpath *fp = &bp->fp[0];
9430 u16 tx_start_idx, tx_idx;
9431 u16 rx_start_idx, rx_idx;
9432 u16 pkt_prod;
9433 struct sw_tx_bd *tx_buf;
9434 struct eth_tx_bd *tx_bd;
9435 dma_addr_t mapping;
9436 union eth_rx_cqe *cqe;
9437 u8 cqe_fp_flags;
9438 struct sw_rx_bd *rx_buf;
9439 u16 len;
9440 int rc = -ENODEV;
9442 /* check the loopback mode */
9443 switch (loopback_mode) {
9444 case BNX2X_PHY_LOOPBACK:
9445 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9446 return -EINVAL;
9447 break;
9448 case BNX2X_MAC_LOOPBACK:
9449 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9450 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9451 break;
9452 default:
9453 return -EINVAL;
9456 /* prepare the loopback packet */
9457 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9458 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9459 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9460 if (!skb) {
9461 rc = -ENOMEM;
9462 goto test_loopback_exit;
9464 packet = skb_put(skb, pkt_size);
9465 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9466 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9467 for (i = ETH_HLEN; i < pkt_size; i++)
9468 packet[i] = (unsigned char) (i & 0xff);
9470 /* send the loopback packet */
9471 num_pkts = 0;
9472 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9473 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9475 pkt_prod = fp->tx_pkt_prod++;
9476 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9477 tx_buf->first_bd = fp->tx_bd_prod;
9478 tx_buf->skb = skb;
9480 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9481 mapping = pci_map_single(bp->pdev, skb->data,
9482 skb_headlen(skb), PCI_DMA_TODEVICE);
9483 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9484 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9485 tx_bd->nbd = cpu_to_le16(1);
9486 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9487 tx_bd->vlan = cpu_to_le16(pkt_prod);
9488 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9489 ETH_TX_BD_FLAGS_END_BD);
9490 tx_bd->general_data = ((UNICAST_ADDRESS <<
9491 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9493 wmb();
9495 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
9496 mb(); /* FW restriction: must not reorder writing nbd and packets */
9497 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
9498 DOORBELL(bp, fp->index, 0);
9500 mmiowb();
9502 num_pkts++;
9503 fp->tx_bd_prod++;
9504 bp->dev->trans_start = jiffies;
9506 udelay(100);
9508 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9509 if (tx_idx != tx_start_idx + num_pkts)
9510 goto test_loopback_exit;
9512 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9513 if (rx_idx != rx_start_idx + num_pkts)
9514 goto test_loopback_exit;
9516 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9517 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9518 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9519 goto test_loopback_rx_exit;
9521 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9522 if (len != pkt_size)
9523 goto test_loopback_rx_exit;
9525 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9526 skb = rx_buf->skb;
9527 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9528 for (i = ETH_HLEN; i < pkt_size; i++)
9529 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9530 goto test_loopback_rx_exit;
9532 rc = 0;
9534 test_loopback_rx_exit:
9536 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9537 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9538 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9539 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9541 /* Update producers */
9542 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9543 fp->rx_sge_prod);
9545 test_loopback_exit:
9546 bp->link_params.loopback_mode = LOOPBACK_NONE;
9548 return rc;
9551 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9553 int rc = 0, res;
9555 if (!netif_running(bp->dev))
9556 return BNX2X_LOOPBACK_FAILED;
9558 bnx2x_netif_stop(bp, 1);
9559 bnx2x_acquire_phy_lock(bp);
9561 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9562 if (res) {
9563 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9564 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9567 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9568 if (res) {
9569 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9570 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9573 bnx2x_release_phy_lock(bp);
9574 bnx2x_netif_start(bp);
9576 return rc;
9579 #define CRC32_RESIDUAL 0xdebb20e3
9581 static int bnx2x_test_nvram(struct bnx2x *bp)
9583 static const struct {
9584 int offset;
9585 int size;
9586 } nvram_tbl[] = {
9587 { 0, 0x14 }, /* bootstrap */
9588 { 0x14, 0xec }, /* dir */
9589 { 0x100, 0x350 }, /* manuf_info */
9590 { 0x450, 0xf0 }, /* feature_info */
9591 { 0x640, 0x64 }, /* upgrade_key_info */
9592 { 0x6a4, 0x64 },
9593 { 0x708, 0x70 }, /* manuf_key_info */
9594 { 0x778, 0x70 },
9595 { 0, 0 }
9597 __be32 buf[0x350 / 4];
9598 u8 *data = (u8 *)buf;
9599 int i, rc;
9600 u32 magic, csum;
9602 rc = bnx2x_nvram_read(bp, 0, data, 4);
9603 if (rc) {
9604 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9605 goto test_nvram_exit;
9608 magic = be32_to_cpu(buf[0]);
9609 if (magic != 0x669955aa) {
9610 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9611 rc = -ENODEV;
9612 goto test_nvram_exit;
9615 for (i = 0; nvram_tbl[i].size; i++) {
9617 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9618 nvram_tbl[i].size);
9619 if (rc) {
9620 DP(NETIF_MSG_PROBE,
9621 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
9622 goto test_nvram_exit;
9625 csum = ether_crc_le(nvram_tbl[i].size, data);
9626 if (csum != CRC32_RESIDUAL) {
9627 DP(NETIF_MSG_PROBE,
9628 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9629 rc = -ENODEV;
9630 goto test_nvram_exit;
9634 test_nvram_exit:
9635 return rc;
9638 static int bnx2x_test_intr(struct bnx2x *bp)
9640 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9641 int i, rc;
9643 if (!netif_running(bp->dev))
9644 return -ENODEV;
9646 config->hdr.length = 0;
9647 if (CHIP_IS_E1(bp))
9648 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9649 else
9650 config->hdr.offset = BP_FUNC(bp);
9651 config->hdr.client_id = bp->fp->cl_id;
9652 config->hdr.reserved1 = 0;
9654 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9655 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9656 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9657 if (rc == 0) {
9658 bp->set_mac_pending++;
9659 for (i = 0; i < 10; i++) {
9660 if (!bp->set_mac_pending)
9661 break;
9662 msleep_interruptible(10);
9664 if (i == 10)
9665 rc = -ENODEV;
9668 return rc;
9671 static void bnx2x_self_test(struct net_device *dev,
9672 struct ethtool_test *etest, u64 *buf)
9674 struct bnx2x *bp = netdev_priv(dev);
9676 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9678 if (!netif_running(dev))
9679 return;
9681 /* offline tests are not supported in MF mode */
9682 if (IS_E1HMF(bp))
9683 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9685 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9686 u8 link_up;
9688 link_up = bp->link_vars.link_up;
9689 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9690 bnx2x_nic_load(bp, LOAD_DIAG);
9691 /* wait until link state is restored */
9692 bnx2x_wait_for_link(bp, link_up);
9694 if (bnx2x_test_registers(bp) != 0) {
9695 buf[0] = 1;
9696 etest->flags |= ETH_TEST_FL_FAILED;
9698 if (bnx2x_test_memory(bp) != 0) {
9699 buf[1] = 1;
9700 etest->flags |= ETH_TEST_FL_FAILED;
9702 buf[2] = bnx2x_test_loopback(bp, link_up);
9703 if (buf[2] != 0)
9704 etest->flags |= ETH_TEST_FL_FAILED;
9706 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9707 bnx2x_nic_load(bp, LOAD_NORMAL);
9708 /* wait until link state is restored */
9709 bnx2x_wait_for_link(bp, link_up);
9711 if (bnx2x_test_nvram(bp) != 0) {
9712 buf[3] = 1;
9713 etest->flags |= ETH_TEST_FL_FAILED;
9715 if (bnx2x_test_intr(bp) != 0) {
9716 buf[4] = 1;
9717 etest->flags |= ETH_TEST_FL_FAILED;
9719 if (bp->port.pmf)
9720 if (bnx2x_link_test(bp) != 0) {
9721 buf[5] = 1;
9722 etest->flags |= ETH_TEST_FL_FAILED;
9725 #ifdef BNX2X_EXTRA_DEBUG
9726 bnx2x_panic_dump(bp);
9727 #endif
9730 static const struct {
9731 long offset;
9732 int size;
9733 u8 string[ETH_GSTRING_LEN];
9734 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9735 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9736 { Q_STATS_OFFSET32(error_bytes_received_hi),
9737 8, "[%d]: rx_error_bytes" },
9738 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9739 8, "[%d]: rx_ucast_packets" },
9740 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9741 8, "[%d]: rx_mcast_packets" },
9742 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9743 8, "[%d]: rx_bcast_packets" },
9744 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9745 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9746 4, "[%d]: rx_phy_ip_err_discards"},
9747 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9748 4, "[%d]: rx_skb_alloc_discard" },
9749 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9751 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9752 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9753 8, "[%d]: tx_packets" }
9756 static const struct {
9757 long offset;
9758 int size;
9759 u32 flags;
9760 #define STATS_FLAGS_PORT 1
9761 #define STATS_FLAGS_FUNC 2
9762 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9763 u8 string[ETH_GSTRING_LEN];
9764 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9765 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9766 8, STATS_FLAGS_BOTH, "rx_bytes" },
9767 { STATS_OFFSET32(error_bytes_received_hi),
9768 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9769 { STATS_OFFSET32(total_unicast_packets_received_hi),
9770 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9771 { STATS_OFFSET32(total_multicast_packets_received_hi),
9772 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9773 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9774 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9775 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9776 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9777 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9778 8, STATS_FLAGS_PORT, "rx_align_errors" },
9779 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9780 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9781 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9782 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9783 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9784 8, STATS_FLAGS_PORT, "rx_fragments" },
9785 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9786 8, STATS_FLAGS_PORT, "rx_jabbers" },
9787 { STATS_OFFSET32(no_buff_discard_hi),
9788 8, STATS_FLAGS_BOTH, "rx_discards" },
9789 { STATS_OFFSET32(mac_filter_discard),
9790 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9791 { STATS_OFFSET32(xxoverflow_discard),
9792 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9793 { STATS_OFFSET32(brb_drop_hi),
9794 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9795 { STATS_OFFSET32(brb_truncate_hi),
9796 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9797 { STATS_OFFSET32(pause_frames_received_hi),
9798 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9799 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9800 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9801 { STATS_OFFSET32(nig_timer_max),
9802 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9803 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9804 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9805 { STATS_OFFSET32(rx_skb_alloc_failed),
9806 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9807 { STATS_OFFSET32(hw_csum_err),
9808 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9810 { STATS_OFFSET32(total_bytes_transmitted_hi),
9811 8, STATS_FLAGS_BOTH, "tx_bytes" },
9812 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9813 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9814 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9815 8, STATS_FLAGS_BOTH, "tx_packets" },
9816 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9817 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9818 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9819 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9820 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9821 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9822 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9823 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9824 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9825 8, STATS_FLAGS_PORT, "tx_deferred" },
9826 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9827 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9828 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9829 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9830 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9831 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9832 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9833 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9834 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9835 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9836 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9837 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9838 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9839 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9840 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9841 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9842 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9843 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9844 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9845 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9846 { STATS_OFFSET32(pause_frames_sent_hi),
9847 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9850 #define IS_PORT_STAT(i) \
9851 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9852 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9853 #define IS_E1HMF_MODE_STAT(bp) \
9854 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9856 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9858 struct bnx2x *bp = netdev_priv(dev);
9859 int i, j, k;
9861 switch (stringset) {
9862 case ETH_SS_STATS:
9863 if (is_multi(bp)) {
9864 k = 0;
9865 for_each_queue(bp, i) {
9866 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9867 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9868 bnx2x_q_stats_arr[j].string, i);
9869 k += BNX2X_NUM_Q_STATS;
9871 if (IS_E1HMF_MODE_STAT(bp))
9872 break;
9873 for (j = 0; j < BNX2X_NUM_STATS; j++)
9874 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9875 bnx2x_stats_arr[j].string);
9876 } else {
9877 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9878 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9879 continue;
9880 strcpy(buf + j*ETH_GSTRING_LEN,
9881 bnx2x_stats_arr[i].string);
9882 j++;
9885 break;
9887 case ETH_SS_TEST:
9888 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9889 break;
9893 static int bnx2x_get_stats_count(struct net_device *dev)
9895 struct bnx2x *bp = netdev_priv(dev);
9896 int i, num_stats;
9898 if (is_multi(bp)) {
9899 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9900 if (!IS_E1HMF_MODE_STAT(bp))
9901 num_stats += BNX2X_NUM_STATS;
9902 } else {
9903 if (IS_E1HMF_MODE_STAT(bp)) {
9904 num_stats = 0;
9905 for (i = 0; i < BNX2X_NUM_STATS; i++)
9906 if (IS_FUNC_STAT(i))
9907 num_stats++;
9908 } else
9909 num_stats = BNX2X_NUM_STATS;
9912 return num_stats;
9915 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9916 struct ethtool_stats *stats, u64 *buf)
9918 struct bnx2x *bp = netdev_priv(dev);
9919 u32 *hw_stats, *offset;
9920 int i, j, k;
9922 if (is_multi(bp)) {
9923 k = 0;
9924 for_each_queue(bp, i) {
9925 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9926 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9927 if (bnx2x_q_stats_arr[j].size == 0) {
9928 /* skip this counter */
9929 buf[k + j] = 0;
9930 continue;
9932 offset = (hw_stats +
9933 bnx2x_q_stats_arr[j].offset);
9934 if (bnx2x_q_stats_arr[j].size == 4) {
9935 /* 4-byte counter */
9936 buf[k + j] = (u64) *offset;
9937 continue;
9939 /* 8-byte counter */
9940 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9942 k += BNX2X_NUM_Q_STATS;
9944 if (IS_E1HMF_MODE_STAT(bp))
9945 return;
9946 hw_stats = (u32 *)&bp->eth_stats;
9947 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9948 if (bnx2x_stats_arr[j].size == 0) {
9949 /* skip this counter */
9950 buf[k + j] = 0;
9951 continue;
9953 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9954 if (bnx2x_stats_arr[j].size == 4) {
9955 /* 4-byte counter */
9956 buf[k + j] = (u64) *offset;
9957 continue;
9959 /* 8-byte counter */
9960 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9962 } else {
9963 hw_stats = (u32 *)&bp->eth_stats;
9964 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9965 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9966 continue;
9967 if (bnx2x_stats_arr[i].size == 0) {
9968 /* skip this counter */
9969 buf[j] = 0;
9970 j++;
9971 continue;
9973 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9974 if (bnx2x_stats_arr[i].size == 4) {
9975 /* 4-byte counter */
9976 buf[j] = (u64) *offset;
9977 j++;
9978 continue;
9980 /* 8-byte counter */
9981 buf[j] = HILO_U64(*offset, *(offset + 1));
9982 j++;
9987 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9989 struct bnx2x *bp = netdev_priv(dev);
9990 int port = BP_PORT(bp);
9991 int i;
9993 if (!netif_running(dev))
9994 return 0;
9996 if (!bp->port.pmf)
9997 return 0;
9999 if (data == 0)
10000 data = 2;
10002 for (i = 0; i < (data * 2); i++) {
10003 if ((i % 2) == 0)
10004 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10005 bp->link_params.hw_led_mode,
10006 bp->link_params.chip_id);
10007 else
10008 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10009 bp->link_params.hw_led_mode,
10010 bp->link_params.chip_id);
10012 msleep_interruptible(500);
10013 if (signal_pending(current))
10014 break;
10017 if (bp->link_vars.link_up)
10018 bnx2x_set_led(bp, port, LED_MODE_OPER,
10019 bp->link_vars.line_speed,
10020 bp->link_params.hw_led_mode,
10021 bp->link_params.chip_id);
10023 return 0;
10026 static struct ethtool_ops bnx2x_ethtool_ops = {
10027 .get_settings = bnx2x_get_settings,
10028 .set_settings = bnx2x_set_settings,
10029 .get_drvinfo = bnx2x_get_drvinfo,
10030 .get_regs_len = bnx2x_get_regs_len,
10031 .get_regs = bnx2x_get_regs,
10032 .get_wol = bnx2x_get_wol,
10033 .set_wol = bnx2x_set_wol,
10034 .get_msglevel = bnx2x_get_msglevel,
10035 .set_msglevel = bnx2x_set_msglevel,
10036 .nway_reset = bnx2x_nway_reset,
10037 .get_link = ethtool_op_get_link,
10038 .get_eeprom_len = bnx2x_get_eeprom_len,
10039 .get_eeprom = bnx2x_get_eeprom,
10040 .set_eeprom = bnx2x_set_eeprom,
10041 .get_coalesce = bnx2x_get_coalesce,
10042 .set_coalesce = bnx2x_set_coalesce,
10043 .get_ringparam = bnx2x_get_ringparam,
10044 .set_ringparam = bnx2x_set_ringparam,
10045 .get_pauseparam = bnx2x_get_pauseparam,
10046 .set_pauseparam = bnx2x_set_pauseparam,
10047 .get_rx_csum = bnx2x_get_rx_csum,
10048 .set_rx_csum = bnx2x_set_rx_csum,
10049 .get_tx_csum = ethtool_op_get_tx_csum,
10050 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10051 .set_flags = bnx2x_set_flags,
10052 .get_flags = ethtool_op_get_flags,
10053 .get_sg = ethtool_op_get_sg,
10054 .set_sg = ethtool_op_set_sg,
10055 .get_tso = ethtool_op_get_tso,
10056 .set_tso = bnx2x_set_tso,
10057 .self_test_count = bnx2x_self_test_count,
10058 .self_test = bnx2x_self_test,
10059 .get_strings = bnx2x_get_strings,
10060 .phys_id = bnx2x_phys_id,
10061 .get_stats_count = bnx2x_get_stats_count,
10062 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10065 /* end of ethtool_ops */
10067 /****************************************************************************
10068 * General service functions
10069 ****************************************************************************/
10071 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10073 u16 pmcsr;
10075 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10077 switch (state) {
10078 case PCI_D0:
10079 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10080 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10081 PCI_PM_CTRL_PME_STATUS));
10083 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10084 /* delay required during transition out of D3hot */
10085 msleep(20);
10086 break;
10088 case PCI_D3hot:
10089 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10090 pmcsr |= 3;
10092 if (bp->wol)
10093 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10095 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10096 pmcsr);
10098 /* No more memory access after this point until
10099 * device is brought back to D0.
10101 break;
10103 default:
10104 return -EINVAL;
10106 return 0;
10109 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10111 u16 rx_cons_sb;
10113 /* Tell compiler that status block fields can change */
10114 barrier();
10115 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10116 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10117 rx_cons_sb++;
10118 return (fp->rx_comp_cons != rx_cons_sb);
10122 * net_device service functions
10125 static int bnx2x_poll(struct napi_struct *napi, int budget)
10127 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10128 napi);
10129 struct bnx2x *bp = fp->bp;
10130 int work_done = 0;
10132 #ifdef BNX2X_STOP_ON_ERROR
10133 if (unlikely(bp->panic))
10134 goto poll_panic;
10135 #endif
10137 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10138 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10139 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10141 bnx2x_update_fpsb_idx(fp);
10143 if (bnx2x_has_tx_work(fp))
10144 bnx2x_tx_int(fp);
10146 if (bnx2x_has_rx_work(fp)) {
10147 work_done = bnx2x_rx_int(fp, budget);
10149 /* must not complete if we consumed full budget */
10150 if (work_done >= budget)
10151 goto poll_again;
10154 /* BNX2X_HAS_WORK() reads the status block, thus we need to
10155 * ensure that status block indices have been actually read
10156 * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10157 * so that we won't write the "newer" value of the status block to IGU
10158 * (if there was a DMA right after BNX2X_HAS_WORK and
10159 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10160 * may be postponed to right before bnx2x_ack_sb). In this case
10161 * there will never be another interrupt until there is another update
10162 * of the status block, while there is still unhandled work.
10164 rmb();
10166 if (!BNX2X_HAS_WORK(fp)) {
10167 #ifdef BNX2X_STOP_ON_ERROR
10168 poll_panic:
10169 #endif
10170 napi_complete(napi);
10172 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10173 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10174 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10175 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10178 poll_again:
10179 return work_done;
10183 /* we split the first BD into headers and data BDs
10184 * to ease the pain of our fellow microcode engineers
10185 * we use one mapping for both BDs
10186 * So far this has only been observed to happen
10187 * in Other Operating Systems(TM)
10189 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10190 struct bnx2x_fastpath *fp,
10191 struct eth_tx_bd **tx_bd, u16 hlen,
10192 u16 bd_prod, int nbd)
10194 struct eth_tx_bd *h_tx_bd = *tx_bd;
10195 struct eth_tx_bd *d_tx_bd;
10196 dma_addr_t mapping;
10197 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10199 /* first fix first BD */
10200 h_tx_bd->nbd = cpu_to_le16(nbd);
10201 h_tx_bd->nbytes = cpu_to_le16(hlen);
10203 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10204 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10205 h_tx_bd->addr_lo, h_tx_bd->nbd);
10207 /* now get a new data BD
10208 * (after the pbd) and fill it */
10209 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10210 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10212 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10213 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10215 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10216 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10217 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10218 d_tx_bd->vlan = 0;
10219 /* this marks the BD as one that has no individual mapping
10220 * the FW ignores this flag in a BD not marked start
10222 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10223 DP(NETIF_MSG_TX_QUEUED,
10224 "TSO split data size is %d (%x:%x)\n",
10225 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10227 /* update tx_bd for marking the last BD flag */
10228 *tx_bd = d_tx_bd;
10230 return bd_prod;
10233 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10235 if (fix > 0)
10236 csum = (u16) ~csum_fold(csum_sub(csum,
10237 csum_partial(t_header - fix, fix, 0)));
10239 else if (fix < 0)
10240 csum = (u16) ~csum_fold(csum_add(csum,
10241 csum_partial(t_header, -fix, 0)));
10243 return swab16(csum);
10246 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10248 u32 rc;
10250 if (skb->ip_summed != CHECKSUM_PARTIAL)
10251 rc = XMIT_PLAIN;
10253 else {
10254 if (skb->protocol == htons(ETH_P_IPV6)) {
10255 rc = XMIT_CSUM_V6;
10256 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10257 rc |= XMIT_CSUM_TCP;
10259 } else {
10260 rc = XMIT_CSUM_V4;
10261 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10262 rc |= XMIT_CSUM_TCP;
10266 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10267 rc |= XMIT_GSO_V4;
10269 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10270 rc |= XMIT_GSO_V6;
10272 return rc;
10275 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10276 /* check if packet requires linearization (packet is too fragmented)
10277 no need to check fragmentation if page size > 8K (there will be no
10278 violation to FW restrictions) */
10279 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10280 u32 xmit_type)
10282 int to_copy = 0;
10283 int hlen = 0;
10284 int first_bd_sz = 0;
10286 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10287 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10289 if (xmit_type & XMIT_GSO) {
10290 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10291 /* Check if LSO packet needs to be copied:
10292 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10293 int wnd_size = MAX_FETCH_BD - 3;
10294 /* Number of windows to check */
10295 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10296 int wnd_idx = 0;
10297 int frag_idx = 0;
10298 u32 wnd_sum = 0;
10300 /* Headers length */
10301 hlen = (int)(skb_transport_header(skb) - skb->data) +
10302 tcp_hdrlen(skb);
10304 /* Amount of data (w/o headers) on linear part of SKB*/
10305 first_bd_sz = skb_headlen(skb) - hlen;
10307 wnd_sum = first_bd_sz;
10309 /* Calculate the first sum - it's special */
10310 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10311 wnd_sum +=
10312 skb_shinfo(skb)->frags[frag_idx].size;
10314 /* If there was data on linear skb data - check it */
10315 if (first_bd_sz > 0) {
10316 if (unlikely(wnd_sum < lso_mss)) {
10317 to_copy = 1;
10318 goto exit_lbl;
10321 wnd_sum -= first_bd_sz;
10324 /* Others are easier: run through the frag list and
10325 check all windows */
10326 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10327 wnd_sum +=
10328 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10330 if (unlikely(wnd_sum < lso_mss)) {
10331 to_copy = 1;
10332 break;
10334 wnd_sum -=
10335 skb_shinfo(skb)->frags[wnd_idx].size;
10337 } else {
10338 /* in non-LSO too fragmented packet should always
10339 be linearized */
10340 to_copy = 1;
10344 exit_lbl:
10345 if (unlikely(to_copy))
10346 DP(NETIF_MSG_TX_QUEUED,
10347 "Linearization IS REQUIRED for %s packet. "
10348 "num_frags %d hlen %d first_bd_sz %d\n",
10349 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10350 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10352 return to_copy;
10354 #endif
10356 /* called with netif_tx_lock
10357 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10358 * netif_wake_queue()
10360 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10362 struct bnx2x *bp = netdev_priv(dev);
10363 struct bnx2x_fastpath *fp;
10364 struct netdev_queue *txq;
10365 struct sw_tx_bd *tx_buf;
10366 struct eth_tx_bd *tx_bd;
10367 struct eth_tx_parse_bd *pbd = NULL;
10368 u16 pkt_prod, bd_prod;
10369 int nbd, fp_index;
10370 dma_addr_t mapping;
10371 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10372 int vlan_off = (bp->e1hov ? 4 : 0);
10373 int i;
10374 u8 hlen = 0;
10376 #ifdef BNX2X_STOP_ON_ERROR
10377 if (unlikely(bp->panic))
10378 return NETDEV_TX_BUSY;
10379 #endif
10381 fp_index = skb_get_queue_mapping(skb);
10382 txq = netdev_get_tx_queue(dev, fp_index);
10384 fp = &bp->fp[fp_index];
10386 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10387 fp->eth_q_stats.driver_xoff++,
10388 netif_tx_stop_queue(txq);
10389 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10390 return NETDEV_TX_BUSY;
10393 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10394 " gso type %x xmit_type %x\n",
10395 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10396 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10398 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10399 /* First, check if we need to linearize the skb (due to FW
10400 restrictions). No need to check fragmentation if page size > 8K
10401 (there will be no violation to FW restrictions) */
10402 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10403 /* Statistics of linearization */
10404 bp->lin_cnt++;
10405 if (skb_linearize(skb) != 0) {
10406 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10407 "silently dropping this SKB\n");
10408 dev_kfree_skb_any(skb);
10409 return NETDEV_TX_OK;
10412 #endif
10415 Please read carefully. First we use one BD which we mark as start,
10416 then for TSO or xsum we have a parsing info BD,
10417 and only then we have the rest of the TSO BDs.
10418 (don't forget to mark the last one as last,
10419 and to unmap only AFTER you write to the BD ...)
10420 And above all, all pdb sizes are in words - NOT DWORDS!
10423 pkt_prod = fp->tx_pkt_prod++;
10424 bd_prod = TX_BD(fp->tx_bd_prod);
10426 /* get a tx_buf and first BD */
10427 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10428 tx_bd = &fp->tx_desc_ring[bd_prod];
10430 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10431 tx_bd->general_data = (UNICAST_ADDRESS <<
10432 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10433 /* header nbd */
10434 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10436 /* remember the first BD of the packet */
10437 tx_buf->first_bd = fp->tx_bd_prod;
10438 tx_buf->skb = skb;
10440 DP(NETIF_MSG_TX_QUEUED,
10441 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10442 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10444 #ifdef BCM_VLAN
10445 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10446 (bp->flags & HW_VLAN_TX_FLAG)) {
10447 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10448 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10449 vlan_off += 4;
10450 } else
10451 #endif
10452 tx_bd->vlan = cpu_to_le16(pkt_prod);
10454 if (xmit_type) {
10455 /* turn on parsing and get a BD */
10456 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10457 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10459 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10462 if (xmit_type & XMIT_CSUM) {
10463 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10465 /* for now NS flag is not used in Linux */
10466 pbd->global_data =
10467 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10468 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10470 pbd->ip_hlen = (skb_transport_header(skb) -
10471 skb_network_header(skb)) / 2;
10473 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10475 pbd->total_hlen = cpu_to_le16(hlen);
10476 hlen = hlen*2 - vlan_off;
10478 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10480 if (xmit_type & XMIT_CSUM_V4)
10481 tx_bd->bd_flags.as_bitfield |=
10482 ETH_TX_BD_FLAGS_IP_CSUM;
10483 else
10484 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10486 if (xmit_type & XMIT_CSUM_TCP) {
10487 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10489 } else {
10490 s8 fix = SKB_CS_OFF(skb); /* signed! */
10492 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10493 pbd->cs_offset = fix / 2;
10495 DP(NETIF_MSG_TX_QUEUED,
10496 "hlen %d offset %d fix %d csum before fix %x\n",
10497 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10498 SKB_CS(skb));
10500 /* HW bug: fixup the CSUM */
10501 pbd->tcp_pseudo_csum =
10502 bnx2x_csum_fix(skb_transport_header(skb),
10503 SKB_CS(skb), fix);
10505 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10506 pbd->tcp_pseudo_csum);
10510 mapping = pci_map_single(bp->pdev, skb->data,
10511 skb_headlen(skb), PCI_DMA_TODEVICE);
10513 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10514 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10515 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10516 tx_bd->nbd = cpu_to_le16(nbd);
10517 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10519 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
10520 " nbytes %d flags %x vlan %x\n",
10521 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10522 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10523 le16_to_cpu(tx_bd->vlan));
10525 if (xmit_type & XMIT_GSO) {
10527 DP(NETIF_MSG_TX_QUEUED,
10528 "TSO packet len %d hlen %d total len %d tso size %d\n",
10529 skb->len, hlen, skb_headlen(skb),
10530 skb_shinfo(skb)->gso_size);
10532 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10534 if (unlikely(skb_headlen(skb) > hlen))
10535 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10536 bd_prod, ++nbd);
10538 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10539 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10540 pbd->tcp_flags = pbd_tcp_flags(skb);
10542 if (xmit_type & XMIT_GSO_V4) {
10543 pbd->ip_id = swab16(ip_hdr(skb)->id);
10544 pbd->tcp_pseudo_csum =
10545 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10546 ip_hdr(skb)->daddr,
10547 0, IPPROTO_TCP, 0));
10549 } else
10550 pbd->tcp_pseudo_csum =
10551 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10552 &ipv6_hdr(skb)->daddr,
10553 0, IPPROTO_TCP, 0));
10555 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10558 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10559 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10561 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10562 tx_bd = &fp->tx_desc_ring[bd_prod];
10564 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10565 frag->size, PCI_DMA_TODEVICE);
10567 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10568 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10569 tx_bd->nbytes = cpu_to_le16(frag->size);
10570 tx_bd->vlan = cpu_to_le16(pkt_prod);
10571 tx_bd->bd_flags.as_bitfield = 0;
10573 DP(NETIF_MSG_TX_QUEUED,
10574 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10575 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10576 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10579 /* now at last mark the BD as the last BD */
10580 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10582 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10583 tx_bd, tx_bd->bd_flags.as_bitfield);
10585 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10587 /* now send a tx doorbell, counting the next BD
10588 * if the packet contains or ends with it
10590 if (TX_BD_POFF(bd_prod) < nbd)
10591 nbd++;
10593 if (pbd)
10594 DP(NETIF_MSG_TX_QUEUED,
10595 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10596 " tcp_flags %x xsum %x seq %u hlen %u\n",
10597 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10598 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10599 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10601 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
10604 * Make sure that the BD data is updated before updating the producer
10605 * since FW might read the BD right after the producer is updated.
10606 * This is only applicable for weak-ordered memory model archs such
10607 * as IA-64. The following barrier is also mandatory since FW will
10608 * assumes packets must have BDs.
10610 wmb();
10612 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
10613 mb(); /* FW restriction: must not reorder writing nbd and packets */
10614 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
10615 DOORBELL(bp, fp->index, 0);
10617 mmiowb();
10619 fp->tx_bd_prod += nbd;
10621 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10622 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10623 if we put Tx into XOFF state. */
10624 smp_mb();
10625 netif_tx_stop_queue(txq);
10626 fp->eth_q_stats.driver_xoff++;
10627 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10628 netif_tx_wake_queue(txq);
10630 fp->tx_pkt++;
10632 return NETDEV_TX_OK;
10635 /* called with rtnl_lock */
10636 static int bnx2x_open(struct net_device *dev)
10638 struct bnx2x *bp = netdev_priv(dev);
10640 netif_carrier_off(dev);
10642 bnx2x_set_power_state(bp, PCI_D0);
10644 return bnx2x_nic_load(bp, LOAD_OPEN);
10647 /* called with rtnl_lock */
10648 static int bnx2x_close(struct net_device *dev)
10650 struct bnx2x *bp = netdev_priv(dev);
10652 /* Unload the driver, release IRQs */
10653 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10654 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10655 if (!CHIP_REV_IS_SLOW(bp))
10656 bnx2x_set_power_state(bp, PCI_D3hot);
10658 return 0;
10661 /* called with netif_tx_lock from dev_mcast.c */
10662 static void bnx2x_set_rx_mode(struct net_device *dev)
10664 struct bnx2x *bp = netdev_priv(dev);
10665 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10666 int port = BP_PORT(bp);
10668 if (bp->state != BNX2X_STATE_OPEN) {
10669 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10670 return;
10673 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10675 if (dev->flags & IFF_PROMISC)
10676 rx_mode = BNX2X_RX_MODE_PROMISC;
10678 else if ((dev->flags & IFF_ALLMULTI) ||
10679 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10680 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10682 else { /* some multicasts */
10683 if (CHIP_IS_E1(bp)) {
10684 int i, old, offset;
10685 struct dev_mc_list *mclist;
10686 struct mac_configuration_cmd *config =
10687 bnx2x_sp(bp, mcast_config);
10689 for (i = 0, mclist = dev->mc_list;
10690 mclist && (i < dev->mc_count);
10691 i++, mclist = mclist->next) {
10693 config->config_table[i].
10694 cam_entry.msb_mac_addr =
10695 swab16(*(u16 *)&mclist->dmi_addr[0]);
10696 config->config_table[i].
10697 cam_entry.middle_mac_addr =
10698 swab16(*(u16 *)&mclist->dmi_addr[2]);
10699 config->config_table[i].
10700 cam_entry.lsb_mac_addr =
10701 swab16(*(u16 *)&mclist->dmi_addr[4]);
10702 config->config_table[i].cam_entry.flags =
10703 cpu_to_le16(port);
10704 config->config_table[i].
10705 target_table_entry.flags = 0;
10706 config->config_table[i].
10707 target_table_entry.client_id = 0;
10708 config->config_table[i].
10709 target_table_entry.vlan_id = 0;
10711 DP(NETIF_MSG_IFUP,
10712 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10713 config->config_table[i].
10714 cam_entry.msb_mac_addr,
10715 config->config_table[i].
10716 cam_entry.middle_mac_addr,
10717 config->config_table[i].
10718 cam_entry.lsb_mac_addr);
10720 old = config->hdr.length;
10721 if (old > i) {
10722 for (; i < old; i++) {
10723 if (CAM_IS_INVALID(config->
10724 config_table[i])) {
10725 /* already invalidated */
10726 break;
10728 /* invalidate */
10729 CAM_INVALIDATE(config->
10730 config_table[i]);
10734 if (CHIP_REV_IS_SLOW(bp))
10735 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10736 else
10737 offset = BNX2X_MAX_MULTICAST*(1 + port);
10739 config->hdr.length = i;
10740 config->hdr.offset = offset;
10741 config->hdr.client_id = bp->fp->cl_id;
10742 config->hdr.reserved1 = 0;
10744 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10745 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10746 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10748 } else { /* E1H */
10749 /* Accept one or more multicasts */
10750 struct dev_mc_list *mclist;
10751 u32 mc_filter[MC_HASH_SIZE];
10752 u32 crc, bit, regidx;
10753 int i;
10755 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10757 for (i = 0, mclist = dev->mc_list;
10758 mclist && (i < dev->mc_count);
10759 i++, mclist = mclist->next) {
10761 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10762 mclist->dmi_addr);
10764 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10765 bit = (crc >> 24) & 0xff;
10766 regidx = bit >> 5;
10767 bit &= 0x1f;
10768 mc_filter[regidx] |= (1 << bit);
10771 for (i = 0; i < MC_HASH_SIZE; i++)
10772 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10773 mc_filter[i]);
10777 bp->rx_mode = rx_mode;
10778 bnx2x_set_storm_rx_mode(bp);
10781 /* called with rtnl_lock */
10782 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10784 struct sockaddr *addr = p;
10785 struct bnx2x *bp = netdev_priv(dev);
10787 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10788 return -EINVAL;
10790 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10791 if (netif_running(dev)) {
10792 if (CHIP_IS_E1(bp))
10793 bnx2x_set_mac_addr_e1(bp, 1);
10794 else
10795 bnx2x_set_mac_addr_e1h(bp, 1);
10798 return 0;
10801 /* called with rtnl_lock */
10802 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10804 struct mii_ioctl_data *data = if_mii(ifr);
10805 struct bnx2x *bp = netdev_priv(dev);
10806 int port = BP_PORT(bp);
10807 int err;
10809 switch (cmd) {
10810 case SIOCGMIIPHY:
10811 data->phy_id = bp->port.phy_addr;
10813 /* fallthrough */
10815 case SIOCGMIIREG: {
10816 u16 mii_regval;
10818 if (!netif_running(dev))
10819 return -EAGAIN;
10821 mutex_lock(&bp->port.phy_mutex);
10822 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10823 DEFAULT_PHY_DEV_ADDR,
10824 (data->reg_num & 0x1f), &mii_regval);
10825 data->val_out = mii_regval;
10826 mutex_unlock(&bp->port.phy_mutex);
10827 return err;
10830 case SIOCSMIIREG:
10831 if (!capable(CAP_NET_ADMIN))
10832 return -EPERM;
10834 if (!netif_running(dev))
10835 return -EAGAIN;
10837 mutex_lock(&bp->port.phy_mutex);
10838 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10839 DEFAULT_PHY_DEV_ADDR,
10840 (data->reg_num & 0x1f), data->val_in);
10841 mutex_unlock(&bp->port.phy_mutex);
10842 return err;
10844 default:
10845 /* do nothing */
10846 break;
10849 return -EOPNOTSUPP;
10852 /* called with rtnl_lock */
10853 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10855 struct bnx2x *bp = netdev_priv(dev);
10856 int rc = 0;
10858 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10859 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10860 return -EINVAL;
10862 /* This does not race with packet allocation
10863 * because the actual alloc size is
10864 * only updated as part of load
10866 dev->mtu = new_mtu;
10868 if (netif_running(dev)) {
10869 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10870 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10873 return rc;
10876 static void bnx2x_tx_timeout(struct net_device *dev)
10878 struct bnx2x *bp = netdev_priv(dev);
10880 #ifdef BNX2X_STOP_ON_ERROR
10881 if (!bp->panic)
10882 bnx2x_panic();
10883 #endif
10884 /* This allows the netif to be shutdown gracefully before resetting */
10885 schedule_work(&bp->reset_task);
10888 #ifdef BCM_VLAN
10889 /* called with rtnl_lock */
10890 static void bnx2x_vlan_rx_register(struct net_device *dev,
10891 struct vlan_group *vlgrp)
10893 struct bnx2x *bp = netdev_priv(dev);
10895 bp->vlgrp = vlgrp;
10897 /* Set flags according to the required capabilities */
10898 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10900 if (dev->features & NETIF_F_HW_VLAN_TX)
10901 bp->flags |= HW_VLAN_TX_FLAG;
10903 if (dev->features & NETIF_F_HW_VLAN_RX)
10904 bp->flags |= HW_VLAN_RX_FLAG;
10906 if (netif_running(dev))
10907 bnx2x_set_client_config(bp);
10910 #endif
10912 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10913 static void poll_bnx2x(struct net_device *dev)
10915 struct bnx2x *bp = netdev_priv(dev);
10917 disable_irq(bp->pdev->irq);
10918 bnx2x_interrupt(bp->pdev->irq, dev);
10919 enable_irq(bp->pdev->irq);
10921 #endif
10923 static const struct net_device_ops bnx2x_netdev_ops = {
10924 .ndo_open = bnx2x_open,
10925 .ndo_stop = bnx2x_close,
10926 .ndo_start_xmit = bnx2x_start_xmit,
10927 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10928 .ndo_set_mac_address = bnx2x_change_mac_addr,
10929 .ndo_validate_addr = eth_validate_addr,
10930 .ndo_do_ioctl = bnx2x_ioctl,
10931 .ndo_change_mtu = bnx2x_change_mtu,
10932 .ndo_tx_timeout = bnx2x_tx_timeout,
10933 #ifdef BCM_VLAN
10934 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10935 #endif
10936 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10937 .ndo_poll_controller = poll_bnx2x,
10938 #endif
10941 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10942 struct net_device *dev)
10944 struct bnx2x *bp;
10945 int rc;
10947 SET_NETDEV_DEV(dev, &pdev->dev);
10948 bp = netdev_priv(dev);
10950 bp->dev = dev;
10951 bp->pdev = pdev;
10952 bp->flags = 0;
10953 bp->func = PCI_FUNC(pdev->devfn);
10955 rc = pci_enable_device(pdev);
10956 if (rc) {
10957 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10958 goto err_out;
10961 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10962 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10963 " aborting\n");
10964 rc = -ENODEV;
10965 goto err_out_disable;
10968 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10969 printk(KERN_ERR PFX "Cannot find second PCI device"
10970 " base address, aborting\n");
10971 rc = -ENODEV;
10972 goto err_out_disable;
10975 if (atomic_read(&pdev->enable_cnt) == 1) {
10976 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10977 if (rc) {
10978 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10979 " aborting\n");
10980 goto err_out_disable;
10983 pci_set_master(pdev);
10984 pci_save_state(pdev);
10987 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10988 if (bp->pm_cap == 0) {
10989 printk(KERN_ERR PFX "Cannot find power management"
10990 " capability, aborting\n");
10991 rc = -EIO;
10992 goto err_out_release;
10995 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10996 if (bp->pcie_cap == 0) {
10997 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10998 " aborting\n");
10999 rc = -EIO;
11000 goto err_out_release;
11003 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11004 bp->flags |= USING_DAC_FLAG;
11005 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11006 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11007 " failed, aborting\n");
11008 rc = -EIO;
11009 goto err_out_release;
11012 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11013 printk(KERN_ERR PFX "System does not support DMA,"
11014 " aborting\n");
11015 rc = -EIO;
11016 goto err_out_release;
11019 dev->mem_start = pci_resource_start(pdev, 0);
11020 dev->base_addr = dev->mem_start;
11021 dev->mem_end = pci_resource_end(pdev, 0);
11023 dev->irq = pdev->irq;
11025 bp->regview = pci_ioremap_bar(pdev, 0);
11026 if (!bp->regview) {
11027 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11028 rc = -ENOMEM;
11029 goto err_out_release;
11032 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11033 min_t(u64, BNX2X_DB_SIZE,
11034 pci_resource_len(pdev, 2)));
11035 if (!bp->doorbells) {
11036 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11037 rc = -ENOMEM;
11038 goto err_out_unmap;
11041 bnx2x_set_power_state(bp, PCI_D0);
11043 /* clean indirect addresses */
11044 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11045 PCICFG_VENDOR_ID_OFFSET);
11046 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11047 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11048 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11049 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11051 dev->watchdog_timeo = TX_TIMEOUT;
11053 dev->netdev_ops = &bnx2x_netdev_ops;
11054 dev->ethtool_ops = &bnx2x_ethtool_ops;
11055 dev->features |= NETIF_F_SG;
11056 dev->features |= NETIF_F_HW_CSUM;
11057 if (bp->flags & USING_DAC_FLAG)
11058 dev->features |= NETIF_F_HIGHDMA;
11059 #ifdef BCM_VLAN
11060 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11061 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11062 #endif
11063 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11064 dev->features |= NETIF_F_TSO6;
11066 return 0;
11068 err_out_unmap:
11069 if (bp->regview) {
11070 iounmap(bp->regview);
11071 bp->regview = NULL;
11073 if (bp->doorbells) {
11074 iounmap(bp->doorbells);
11075 bp->doorbells = NULL;
11078 err_out_release:
11079 if (atomic_read(&pdev->enable_cnt) == 1)
11080 pci_release_regions(pdev);
11082 err_out_disable:
11083 pci_disable_device(pdev);
11084 pci_set_drvdata(pdev, NULL);
11086 err_out:
11087 return rc;
11090 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11092 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11094 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11095 return val;
11098 /* return value of 1=2.5GHz 2=5GHz */
11099 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11101 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11103 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11104 return val;
11106 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11108 struct bnx2x_fw_file_hdr *fw_hdr;
11109 struct bnx2x_fw_file_section *sections;
11110 u16 *ops_offsets;
11111 u32 offset, len, num_ops;
11112 int i;
11113 const struct firmware *firmware = bp->firmware;
11114 const u8 * fw_ver;
11116 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11117 return -EINVAL;
11119 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11120 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11122 /* Make sure none of the offsets and sizes make us read beyond
11123 * the end of the firmware data */
11124 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11125 offset = be32_to_cpu(sections[i].offset);
11126 len = be32_to_cpu(sections[i].len);
11127 if (offset + len > firmware->size) {
11128 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11129 return -EINVAL;
11133 /* Likewise for the init_ops offsets */
11134 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11135 ops_offsets = (u16 *)(firmware->data + offset);
11136 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11138 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11139 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11140 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11141 return -EINVAL;
11145 /* Check FW version */
11146 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11147 fw_ver = firmware->data + offset;
11148 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11149 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11150 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11151 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11152 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11153 " Should be %d.%d.%d.%d\n",
11154 fw_ver[0], fw_ver[1], fw_ver[2],
11155 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11156 BCM_5710_FW_MINOR_VERSION,
11157 BCM_5710_FW_REVISION_VERSION,
11158 BCM_5710_FW_ENGINEERING_VERSION);
11159 return -EINVAL;
11162 return 0;
11165 static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11167 u32 i;
11168 const __be32 *source = (const __be32*)_source;
11169 u32 *target = (u32*)_target;
11171 for (i = 0; i < n/4; i++)
11172 target[i] = be32_to_cpu(source[i]);
11176 Ops array is stored in the following format:
11177 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11179 static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11181 u32 i, j, tmp;
11182 const __be32 *source = (const __be32*)_source;
11183 struct raw_op *target = (struct raw_op*)_target;
11185 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11186 tmp = be32_to_cpu(source[j]);
11187 target[i].op = (tmp >> 24) & 0xff;
11188 target[i].offset = tmp & 0xffffff;
11189 target[i].raw_data = be32_to_cpu(source[j+1]);
11192 static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11194 u32 i;
11195 u16 *target = (u16*)_target;
11196 const __be16 *source = (const __be16*)_source;
11198 for (i = 0; i < n/2; i++)
11199 target[i] = be16_to_cpu(source[i]);
11202 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11203 do { \
11204 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11205 bp->arr = kmalloc(len, GFP_KERNEL); \
11206 if (!bp->arr) { \
11207 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11208 goto lbl; \
11210 func(bp->firmware->data + \
11211 be32_to_cpu(fw_hdr->arr.offset), \
11212 (u8*)bp->arr, len); \
11213 } while (0)
11216 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11218 char fw_file_name[40] = {0};
11219 int rc, offset;
11220 struct bnx2x_fw_file_hdr *fw_hdr;
11222 /* Create a FW file name */
11223 if (CHIP_IS_E1(bp))
11224 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11225 else
11226 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11228 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11229 BCM_5710_FW_MAJOR_VERSION,
11230 BCM_5710_FW_MINOR_VERSION,
11231 BCM_5710_FW_REVISION_VERSION,
11232 BCM_5710_FW_ENGINEERING_VERSION);
11234 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11236 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11237 if (rc) {
11238 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11239 goto request_firmware_exit;
11242 rc = bnx2x_check_firmware(bp);
11243 if (rc) {
11244 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11245 goto request_firmware_exit;
11248 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11250 /* Initialize the pointers to the init arrays */
11251 /* Blob */
11252 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11254 /* Opcodes */
11255 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11257 /* Offsets */
11258 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11260 /* STORMs firmware */
11261 bp->tsem_int_table_data = bp->firmware->data +
11262 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11263 bp->tsem_pram_data = bp->firmware->data +
11264 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11265 bp->usem_int_table_data = bp->firmware->data +
11266 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11267 bp->usem_pram_data = bp->firmware->data +
11268 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11269 bp->xsem_int_table_data = bp->firmware->data +
11270 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11271 bp->xsem_pram_data = bp->firmware->data +
11272 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11273 bp->csem_int_table_data = bp->firmware->data +
11274 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11275 bp->csem_pram_data = bp->firmware->data +
11276 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11278 return 0;
11279 init_offsets_alloc_err:
11280 kfree(bp->init_ops);
11281 init_ops_alloc_err:
11282 kfree(bp->init_data);
11283 request_firmware_exit:
11284 release_firmware(bp->firmware);
11286 return rc;
11291 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11292 const struct pci_device_id *ent)
11294 static int version_printed;
11295 struct net_device *dev = NULL;
11296 struct bnx2x *bp;
11297 int rc;
11299 if (version_printed++ == 0)
11300 printk(KERN_INFO "%s", version);
11302 /* dev zeroed in init_etherdev */
11303 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11304 if (!dev) {
11305 printk(KERN_ERR PFX "Cannot allocate net device\n");
11306 return -ENOMEM;
11309 bp = netdev_priv(dev);
11310 bp->msglevel = debug;
11312 rc = bnx2x_init_dev(pdev, dev);
11313 if (rc < 0) {
11314 free_netdev(dev);
11315 return rc;
11318 pci_set_drvdata(pdev, dev);
11320 rc = bnx2x_init_bp(bp);
11321 if (rc)
11322 goto init_one_exit;
11324 /* Set init arrays */
11325 rc = bnx2x_init_firmware(bp, &pdev->dev);
11326 if (rc) {
11327 printk(KERN_ERR PFX "Error loading firmware\n");
11328 goto init_one_exit;
11331 rc = register_netdev(dev);
11332 if (rc) {
11333 dev_err(&pdev->dev, "Cannot register net device\n");
11334 goto init_one_exit;
11337 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11338 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11339 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11340 bnx2x_get_pcie_width(bp),
11341 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11342 dev->base_addr, bp->pdev->irq);
11343 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11345 return 0;
11347 init_one_exit:
11348 if (bp->regview)
11349 iounmap(bp->regview);
11351 if (bp->doorbells)
11352 iounmap(bp->doorbells);
11354 free_netdev(dev);
11356 if (atomic_read(&pdev->enable_cnt) == 1)
11357 pci_release_regions(pdev);
11359 pci_disable_device(pdev);
11360 pci_set_drvdata(pdev, NULL);
11362 return rc;
11365 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11367 struct net_device *dev = pci_get_drvdata(pdev);
11368 struct bnx2x *bp;
11370 if (!dev) {
11371 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11372 return;
11374 bp = netdev_priv(dev);
11376 unregister_netdev(dev);
11378 kfree(bp->init_ops_offsets);
11379 kfree(bp->init_ops);
11380 kfree(bp->init_data);
11381 release_firmware(bp->firmware);
11383 if (bp->regview)
11384 iounmap(bp->regview);
11386 if (bp->doorbells)
11387 iounmap(bp->doorbells);
11389 free_netdev(dev);
11391 if (atomic_read(&pdev->enable_cnt) == 1)
11392 pci_release_regions(pdev);
11394 pci_disable_device(pdev);
11395 pci_set_drvdata(pdev, NULL);
11398 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11400 struct net_device *dev = pci_get_drvdata(pdev);
11401 struct bnx2x *bp;
11403 if (!dev) {
11404 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11405 return -ENODEV;
11407 bp = netdev_priv(dev);
11409 rtnl_lock();
11411 pci_save_state(pdev);
11413 if (!netif_running(dev)) {
11414 rtnl_unlock();
11415 return 0;
11418 netif_device_detach(dev);
11420 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11422 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11424 rtnl_unlock();
11426 return 0;
11429 static int bnx2x_resume(struct pci_dev *pdev)
11431 struct net_device *dev = pci_get_drvdata(pdev);
11432 struct bnx2x *bp;
11433 int rc;
11435 if (!dev) {
11436 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11437 return -ENODEV;
11439 bp = netdev_priv(dev);
11441 rtnl_lock();
11443 pci_restore_state(pdev);
11445 if (!netif_running(dev)) {
11446 rtnl_unlock();
11447 return 0;
11450 bnx2x_set_power_state(bp, PCI_D0);
11451 netif_device_attach(dev);
11453 rc = bnx2x_nic_load(bp, LOAD_OPEN);
11455 rtnl_unlock();
11457 return rc;
11460 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11462 int i;
11464 bp->state = BNX2X_STATE_ERROR;
11466 bp->rx_mode = BNX2X_RX_MODE_NONE;
11468 bnx2x_netif_stop(bp, 0);
11470 del_timer_sync(&bp->timer);
11471 bp->stats_state = STATS_STATE_DISABLED;
11472 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11474 /* Release IRQs */
11475 bnx2x_free_irq(bp);
11477 if (CHIP_IS_E1(bp)) {
11478 struct mac_configuration_cmd *config =
11479 bnx2x_sp(bp, mcast_config);
11481 for (i = 0; i < config->hdr.length; i++)
11482 CAM_INVALIDATE(config->config_table[i]);
11485 /* Free SKBs, SGEs, TPA pool and driver internals */
11486 bnx2x_free_skbs(bp);
11487 for_each_rx_queue(bp, i)
11488 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11489 for_each_rx_queue(bp, i)
11490 netif_napi_del(&bnx2x_fp(bp, i, napi));
11491 bnx2x_free_mem(bp);
11493 bp->state = BNX2X_STATE_CLOSED;
11495 netif_carrier_off(bp->dev);
11497 return 0;
11500 static void bnx2x_eeh_recover(struct bnx2x *bp)
11502 u32 val;
11504 mutex_init(&bp->port.phy_mutex);
11506 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11507 bp->link_params.shmem_base = bp->common.shmem_base;
11508 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11510 if (!bp->common.shmem_base ||
11511 (bp->common.shmem_base < 0xA0000) ||
11512 (bp->common.shmem_base >= 0xC0000)) {
11513 BNX2X_DEV_INFO("MCP not active\n");
11514 bp->flags |= NO_MCP_FLAG;
11515 return;
11518 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11519 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11520 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11521 BNX2X_ERR("BAD MCP validity signature\n");
11523 if (!BP_NOMCP(bp)) {
11524 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11525 & DRV_MSG_SEQ_NUMBER_MASK);
11526 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11531 * bnx2x_io_error_detected - called when PCI error is detected
11532 * @pdev: Pointer to PCI device
11533 * @state: The current pci connection state
11535 * This function is called after a PCI bus error affecting
11536 * this device has been detected.
11538 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11539 pci_channel_state_t state)
11541 struct net_device *dev = pci_get_drvdata(pdev);
11542 struct bnx2x *bp = netdev_priv(dev);
11544 rtnl_lock();
11546 netif_device_detach(dev);
11548 if (netif_running(dev))
11549 bnx2x_eeh_nic_unload(bp);
11551 pci_disable_device(pdev);
11553 rtnl_unlock();
11555 /* Request a slot reset */
11556 return PCI_ERS_RESULT_NEED_RESET;
11560 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11561 * @pdev: Pointer to PCI device
11563 * Restart the card from scratch, as if from a cold-boot.
11565 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11567 struct net_device *dev = pci_get_drvdata(pdev);
11568 struct bnx2x *bp = netdev_priv(dev);
11570 rtnl_lock();
11572 if (pci_enable_device(pdev)) {
11573 dev_err(&pdev->dev,
11574 "Cannot re-enable PCI device after reset\n");
11575 rtnl_unlock();
11576 return PCI_ERS_RESULT_DISCONNECT;
11579 pci_set_master(pdev);
11580 pci_restore_state(pdev);
11582 if (netif_running(dev))
11583 bnx2x_set_power_state(bp, PCI_D0);
11585 rtnl_unlock();
11587 return PCI_ERS_RESULT_RECOVERED;
11591 * bnx2x_io_resume - called when traffic can start flowing again
11592 * @pdev: Pointer to PCI device
11594 * This callback is called when the error recovery driver tells us that
11595 * its OK to resume normal operation.
11597 static void bnx2x_io_resume(struct pci_dev *pdev)
11599 struct net_device *dev = pci_get_drvdata(pdev);
11600 struct bnx2x *bp = netdev_priv(dev);
11602 rtnl_lock();
11604 bnx2x_eeh_recover(bp);
11606 if (netif_running(dev))
11607 bnx2x_nic_load(bp, LOAD_NORMAL);
11609 netif_device_attach(dev);
11611 rtnl_unlock();
11614 static struct pci_error_handlers bnx2x_err_handler = {
11615 .error_detected = bnx2x_io_error_detected,
11616 .slot_reset = bnx2x_io_slot_reset,
11617 .resume = bnx2x_io_resume,
11620 static struct pci_driver bnx2x_pci_driver = {
11621 .name = DRV_MODULE_NAME,
11622 .id_table = bnx2x_pci_tbl,
11623 .probe = bnx2x_init_one,
11624 .remove = __devexit_p(bnx2x_remove_one),
11625 .suspend = bnx2x_suspend,
11626 .resume = bnx2x_resume,
11627 .err_handler = &bnx2x_err_handler,
11630 static int __init bnx2x_init(void)
11632 int ret;
11634 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11635 if (bnx2x_wq == NULL) {
11636 printk(KERN_ERR PFX "Cannot create workqueue\n");
11637 return -ENOMEM;
11640 ret = pci_register_driver(&bnx2x_pci_driver);
11641 if (ret) {
11642 printk(KERN_ERR PFX "Cannot register driver\n");
11643 destroy_workqueue(bnx2x_wq);
11645 return ret;
11648 static void __exit bnx2x_cleanup(void)
11650 pci_unregister_driver(&bnx2x_pci_driver);
11652 destroy_workqueue(bnx2x_wq);
11655 module_init(bnx2x_init);
11656 module_exit(bnx2x_cleanup);