bnx2x: Block nvram access when the device is inactive
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / bnx2x_main.c
blob8d44404ce081ef4a33dc9ab25e0dd120615af29e
1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
43 #endif
44 #include <net/ip.h>
45 #include <net/tcp.h>
46 #include <net/checksum.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/crc32c.h>
51 #include <linux/prefetch.h>
52 #include <linux/zlib.h>
53 #include <linux/io.h>
55 #include "bnx2x_reg.h"
56 #include "bnx2x_fw_defs.h"
57 #include "bnx2x_hsi.h"
58 #include "bnx2x_link.h"
59 #include "bnx2x.h"
60 #include "bnx2x_init.h"
62 #define DRV_MODULE_VERSION "1.45.21"
63 #define DRV_MODULE_RELDATE "2008/09/03"
64 #define BNX2X_BC_VER 0x040200
66 /* Time in jiffies before concluding the transmitter is hung */
67 #define TX_TIMEOUT (5*HZ)
69 static char version[] __devinitdata =
70 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
71 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73 MODULE_AUTHOR("Eliezer Tamir");
74 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
75 MODULE_LICENSE("GPL");
76 MODULE_VERSION(DRV_MODULE_VERSION);
78 static int disable_tpa;
79 static int use_inta;
80 static int poll;
81 static int debug;
82 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
83 static int use_multi;
85 module_param(disable_tpa, int, 0);
86 module_param(use_inta, int, 0);
87 module_param(poll, int, 0);
88 module_param(debug, int, 0);
89 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
90 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
91 MODULE_PARM_DESC(poll, "use polling (for debug)");
92 MODULE_PARM_DESC(debug, "default debug msglevel");
94 #ifdef BNX2X_MULTI
95 module_param(use_multi, int, 0);
96 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
97 #endif
99 enum bnx2x_board_type {
100 BCM57710 = 0,
101 BCM57711 = 1,
102 BCM57711E = 2,
105 /* indexed by board_type, above */
106 static struct {
107 char *name;
108 } board_info[] __devinitdata = {
109 { "Broadcom NetXtreme II BCM57710 XGb" },
110 { "Broadcom NetXtreme II BCM57711 XGb" },
111 { "Broadcom NetXtreme II BCM57711E XGb" }
115 static const struct pci_device_id bnx2x_pci_tbl[] = {
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
122 { 0 }
125 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127 /****************************************************************************
128 * General service functions
129 ****************************************************************************/
131 /* used only at init
132 * locking is done by mcp
134 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
139 PCICFG_VENDOR_ID_OFFSET);
142 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144 u32 val;
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149 PCICFG_VENDOR_ID_OFFSET);
151 return val;
154 static const u32 dmae_reg_go_c[] = {
155 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
156 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
157 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
158 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
161 /* copy command into DMAE command memory and set DMAE command go */
162 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
163 int idx)
165 u32 cmd_offset;
166 int i;
168 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
169 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
170 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
173 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
175 REG_WR(bp, dmae_reg_go_c[idx], 1);
178 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
179 u32 len32)
181 struct dmae_command *dmae = &bp->init_dmae;
182 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
183 int cnt = 200;
185 if (!bp->dmae_ready) {
186 u32 *data = bnx2x_sp(bp, wb_data[0]);
188 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
189 " using indirect\n", dst_addr, len32);
190 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
191 return;
194 mutex_lock(&bp->dmae_mutex);
196 memset(dmae, 0, sizeof(struct dmae_command));
198 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
199 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
200 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
201 #ifdef __BIG_ENDIAN
202 DMAE_CMD_ENDIANITY_B_DW_SWAP |
203 #else
204 DMAE_CMD_ENDIANITY_DW_SWAP |
205 #endif
206 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
207 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
208 dmae->src_addr_lo = U64_LO(dma_addr);
209 dmae->src_addr_hi = U64_HI(dma_addr);
210 dmae->dst_addr_lo = dst_addr >> 2;
211 dmae->dst_addr_hi = 0;
212 dmae->len = len32;
213 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
214 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_val = DMAE_COMP_VAL;
217 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
218 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
219 "dst_addr [%x:%08x (%08x)]\n"
220 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
221 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
222 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
223 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
224 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
225 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
226 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
228 *wb_comp = 0;
230 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
232 udelay(5);
234 while (*wb_comp != DMAE_COMP_VAL) {
235 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237 if (!cnt) {
238 BNX2X_ERR("dmae timeout!\n");
239 break;
241 cnt--;
242 /* adjust delay for emulation/FPGA */
243 if (CHIP_REV_IS_SLOW(bp))
244 msleep(100);
245 else
246 udelay(5);
249 mutex_unlock(&bp->dmae_mutex);
252 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
254 struct dmae_command *dmae = &bp->init_dmae;
255 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
256 int cnt = 200;
258 if (!bp->dmae_ready) {
259 u32 *data = bnx2x_sp(bp, wb_data[0]);
260 int i;
262 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
263 " using indirect\n", src_addr, len32);
264 for (i = 0; i < len32; i++)
265 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
266 return;
269 mutex_lock(&bp->dmae_mutex);
271 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
272 memset(dmae, 0, sizeof(struct dmae_command));
274 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
275 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
276 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
277 #ifdef __BIG_ENDIAN
278 DMAE_CMD_ENDIANITY_B_DW_SWAP |
279 #else
280 DMAE_CMD_ENDIANITY_DW_SWAP |
281 #endif
282 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
283 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
284 dmae->src_addr_lo = src_addr >> 2;
285 dmae->src_addr_hi = 0;
286 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
287 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
288 dmae->len = len32;
289 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
290 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
291 dmae->comp_val = DMAE_COMP_VAL;
293 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
294 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
295 "dst_addr [%x:%08x (%08x)]\n"
296 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
297 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
298 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
299 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
301 *wb_comp = 0;
303 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
305 udelay(5);
307 while (*wb_comp != DMAE_COMP_VAL) {
309 if (!cnt) {
310 BNX2X_ERR("dmae timeout!\n");
311 break;
313 cnt--;
314 /* adjust delay for emulation/FPGA */
315 if (CHIP_REV_IS_SLOW(bp))
316 msleep(100);
317 else
318 udelay(5);
320 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
321 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
322 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
324 mutex_unlock(&bp->dmae_mutex);
327 /* used only for slowpath so not inlined */
328 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330 u32 wb_write[2];
332 wb_write[0] = val_hi;
333 wb_write[1] = val_lo;
334 REG_WR_DMAE(bp, reg, wb_write, 2);
337 #ifdef USE_WB_RD
338 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340 u32 wb_data[2];
342 REG_RD_DMAE(bp, reg, wb_data, 2);
344 return HILO_U64(wb_data[0], wb_data[1]);
346 #endif
348 static int bnx2x_mc_assert(struct bnx2x *bp)
350 char last_idx;
351 int i, rc = 0;
352 u32 row0, row1, row2, row3;
354 /* XSTORM */
355 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
356 XSTORM_ASSERT_LIST_INDEX_OFFSET);
357 if (last_idx)
358 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360 /* print the asserts */
361 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
364 XSTORM_ASSERT_LIST_OFFSET(i));
365 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
366 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
367 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
368 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
369 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
370 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
373 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
374 " 0x%08x 0x%08x 0x%08x\n",
375 i, row3, row2, row1, row0);
376 rc++;
377 } else {
378 break;
382 /* TSTORM */
383 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
384 TSTORM_ASSERT_LIST_INDEX_OFFSET);
385 if (last_idx)
386 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388 /* print the asserts */
389 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
392 TSTORM_ASSERT_LIST_OFFSET(i));
393 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
394 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
395 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
396 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
397 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
398 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
401 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
402 " 0x%08x 0x%08x 0x%08x\n",
403 i, row3, row2, row1, row0);
404 rc++;
405 } else {
406 break;
410 /* CSTORM */
411 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
412 CSTORM_ASSERT_LIST_INDEX_OFFSET);
413 if (last_idx)
414 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416 /* print the asserts */
417 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
420 CSTORM_ASSERT_LIST_OFFSET(i));
421 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
422 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
423 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
424 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
425 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
426 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
429 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
430 " 0x%08x 0x%08x 0x%08x\n",
431 i, row3, row2, row1, row0);
432 rc++;
433 } else {
434 break;
438 /* USTORM */
439 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
440 USTORM_ASSERT_LIST_INDEX_OFFSET);
441 if (last_idx)
442 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444 /* print the asserts */
445 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
448 USTORM_ASSERT_LIST_OFFSET(i));
449 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
450 USTORM_ASSERT_LIST_OFFSET(i) + 4);
451 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
452 USTORM_ASSERT_LIST_OFFSET(i) + 8);
453 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
454 USTORM_ASSERT_LIST_OFFSET(i) + 12);
456 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
457 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
458 " 0x%08x 0x%08x 0x%08x\n",
459 i, row3, row2, row1, row0);
460 rc++;
461 } else {
462 break;
466 return rc;
469 static void bnx2x_fw_dump(struct bnx2x *bp)
471 u32 mark, offset;
472 u32 data[9];
473 int word;
475 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
476 mark = ((mark + 0x3) & ~0x3);
477 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
479 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
480 for (word = 0; word < 8; word++)
481 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
482 offset + 4*word));
483 data[8] = 0x0;
484 printk(KERN_CONT "%s", (char *)data);
486 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
487 for (word = 0; word < 8; word++)
488 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
489 offset + 4*word));
490 data[8] = 0x0;
491 printk(KERN_CONT "%s", (char *)data);
493 printk("\n" KERN_ERR PFX "end of fw dump\n");
496 static void bnx2x_panic_dump(struct bnx2x *bp)
498 int i;
499 u16 j, start, end;
501 bp->stats_state = STATS_STATE_DISABLED;
502 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504 BNX2X_ERR("begin crash dump -----------------\n");
506 for_each_queue(bp, i) {
507 struct bnx2x_fastpath *fp = &bp->fp[i];
508 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
510 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
511 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
512 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
513 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
514 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
515 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
516 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
517 fp->rx_bd_prod, fp->rx_bd_cons,
518 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
519 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
520 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
521 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
522 " *sb_u_idx(%x) bd data(%x,%x)\n",
523 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
524 fp->status_blk->c_status_block.status_block_index,
525 fp->fp_u_idx,
526 fp->status_blk->u_status_block.status_block_index,
527 hw_prods->packets_prod, hw_prods->bds_prod);
529 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
530 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
531 for (j = start; j < end; j++) {
532 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
534 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
535 sw_bd->skb, sw_bd->first_bd);
538 start = TX_BD(fp->tx_bd_cons - 10);
539 end = TX_BD(fp->tx_bd_cons + 254);
540 for (j = start; j < end; j++) {
541 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
543 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
544 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
547 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
548 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
549 for (j = start; j < end; j++) {
550 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
551 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
553 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
554 j, rx_bd[1], rx_bd[0], sw_bd->skb);
557 start = RX_SGE(fp->rx_sge_prod);
558 end = RX_SGE(fp->last_max_sge);
559 for (j = start; j < end; j++) {
560 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
561 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
563 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
564 j, rx_sge[1], rx_sge[0], sw_page->page);
567 start = RCQ_BD(fp->rx_comp_cons - 10);
568 end = RCQ_BD(fp->rx_comp_cons + 503);
569 for (j = start; j < end; j++) {
570 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
572 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
573 j, cqe[0], cqe[1], cqe[2], cqe[3]);
577 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
578 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
579 " spq_prod_idx(%u)\n",
580 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
581 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
583 bnx2x_fw_dump(bp);
584 bnx2x_mc_assert(bp);
585 BNX2X_ERR("end crash dump -----------------\n");
588 static void bnx2x_int_enable(struct bnx2x *bp)
590 int port = BP_PORT(bp);
591 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
592 u32 val = REG_RD(bp, addr);
593 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
595 if (msix) {
596 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
597 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
598 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
599 } else {
600 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
601 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
602 HC_CONFIG_0_REG_INT_LINE_EN_0 |
603 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
605 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
606 val, port, addr, msix);
608 REG_WR(bp, addr, val);
610 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
613 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
614 val, port, addr, msix);
616 REG_WR(bp, addr, val);
618 if (CHIP_IS_E1H(bp)) {
619 /* init leading/trailing edge */
620 if (IS_E1HMF(bp)) {
621 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
622 if (bp->port.pmf)
623 /* enable nig attention */
624 val |= 0x0100;
625 } else
626 val = 0xffff;
628 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
629 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
633 static void bnx2x_int_disable(struct bnx2x *bp)
635 int port = BP_PORT(bp);
636 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
637 u32 val = REG_RD(bp, addr);
639 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
641 HC_CONFIG_0_REG_INT_LINE_EN_0 |
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
644 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
645 val, port, addr);
647 REG_WR(bp, addr, val);
648 if (REG_RD(bp, addr) != val)
649 BNX2X_ERR("BUG! proper val not read from IGU!\n");
652 static void bnx2x_int_disable_sync(struct bnx2x *bp)
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655 int i;
657 /* disable interrupt handling */
658 atomic_inc(&bp->intr_sem);
659 /* prevent the HW from sending interrupts */
660 bnx2x_int_disable(bp);
662 /* make sure all ISRs are done */
663 if (msix) {
664 for_each_queue(bp, i)
665 synchronize_irq(bp->msix_table[i].vector);
667 /* one more for the Slow Path IRQ */
668 synchronize_irq(bp->msix_table[i].vector);
669 } else
670 synchronize_irq(bp->pdev->irq);
672 /* make sure sp_task is not running */
673 cancel_work_sync(&bp->sp_task);
676 /* fast path */
679 * General service functions
682 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
683 u8 storm, u16 index, u8 op, u8 update)
685 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
686 COMMAND_REG_INT_ACK);
687 struct igu_ack_register igu_ack;
689 igu_ack.status_block_index = index;
690 igu_ack.sb_id_and_flags =
691 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
692 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
693 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
694 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
697 (*(u32 *)&igu_ack), hc_addr);
698 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
701 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703 struct host_status_block *fpsb = fp->status_blk;
704 u16 rc = 0;
706 barrier(); /* status block is written to by the chip */
707 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
708 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
709 rc |= 1;
711 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
712 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
713 rc |= 2;
715 return rc;
718 static u16 bnx2x_ack_int(struct bnx2x *bp)
720 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
721 COMMAND_REG_SIMD_MASK);
722 u32 result = REG_RD(bp, hc_addr);
724 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
725 result, hc_addr);
727 return result;
732 * fast path service functions
735 /* free skb in the packet ring at pos idx
736 * return idx of last bd freed
738 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
739 u16 idx)
741 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
742 struct eth_tx_bd *tx_bd;
743 struct sk_buff *skb = tx_buf->skb;
744 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
745 int nbd;
747 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
748 idx, tx_buf, skb);
750 /* unmap first bd */
751 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
752 tx_bd = &fp->tx_desc_ring[bd_idx];
753 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
754 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
756 nbd = le16_to_cpu(tx_bd->nbd) - 1;
757 new_cons = nbd + tx_buf->first_bd;
758 #ifdef BNX2X_STOP_ON_ERROR
759 if (nbd > (MAX_SKB_FRAGS + 2)) {
760 BNX2X_ERR("BAD nbd!\n");
761 bnx2x_panic();
763 #endif
765 /* Skip a parse bd and the TSO split header bd
766 since they have no mapping */
767 if (nbd)
768 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
770 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
771 ETH_TX_BD_FLAGS_TCP_CSUM |
772 ETH_TX_BD_FLAGS_SW_LSO)) {
773 if (--nbd)
774 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
775 tx_bd = &fp->tx_desc_ring[bd_idx];
776 /* is this a TSO split header bd? */
777 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
778 if (--nbd)
779 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
783 /* now free frags */
784 while (nbd > 0) {
786 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
787 tx_bd = &fp->tx_desc_ring[bd_idx];
788 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
789 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
790 if (--nbd)
791 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
794 /* release skb */
795 WARN_ON(!skb);
796 dev_kfree_skb(skb);
797 tx_buf->first_bd = 0;
798 tx_buf->skb = NULL;
800 return new_cons;
803 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
805 s16 used;
806 u16 prod;
807 u16 cons;
809 barrier(); /* Tell compiler that prod and cons can change */
810 prod = fp->tx_bd_prod;
811 cons = fp->tx_bd_cons;
813 /* NUM_TX_RINGS = number of "next-page" entries
814 It will be used as a threshold */
815 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
817 #ifdef BNX2X_STOP_ON_ERROR
818 WARN_ON(used < 0);
819 WARN_ON(used > fp->bp->tx_ring_size);
820 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
821 #endif
823 return (s16)(fp->bp->tx_ring_size) - used;
826 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
828 struct bnx2x *bp = fp->bp;
829 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
830 int done = 0;
832 #ifdef BNX2X_STOP_ON_ERROR
833 if (unlikely(bp->panic))
834 return;
835 #endif
837 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
838 sw_cons = fp->tx_pkt_cons;
840 while (sw_cons != hw_cons) {
841 u16 pkt_cons;
843 pkt_cons = TX_BD(sw_cons);
845 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
847 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
848 hw_cons, sw_cons, pkt_cons);
850 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
851 rmb();
852 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
855 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
856 sw_cons++;
857 done++;
859 if (done == work)
860 break;
863 fp->tx_pkt_cons = sw_cons;
864 fp->tx_bd_cons = bd_cons;
866 /* Need to make the tx_cons update visible to start_xmit()
867 * before checking for netif_queue_stopped(). Without the
868 * memory barrier, there is a small possibility that start_xmit()
869 * will miss it and cause the queue to be stopped forever.
871 smp_mb();
873 /* TBD need a thresh? */
874 if (unlikely(netif_queue_stopped(bp->dev))) {
876 netif_tx_lock(bp->dev);
878 if (netif_queue_stopped(bp->dev) &&
879 (bp->state == BNX2X_STATE_OPEN) &&
880 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
881 netif_wake_queue(bp->dev);
883 netif_tx_unlock(bp->dev);
888 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
889 union eth_rx_cqe *rr_cqe)
891 struct bnx2x *bp = fp->bp;
892 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
893 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
895 DP(BNX2X_MSG_SP,
896 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
897 FP_IDX(fp), cid, command, bp->state,
898 rr_cqe->ramrod_cqe.ramrod_type);
900 bp->spq_left++;
902 if (FP_IDX(fp)) {
903 switch (command | fp->state) {
904 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
905 BNX2X_FP_STATE_OPENING):
906 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
907 cid);
908 fp->state = BNX2X_FP_STATE_OPEN;
909 break;
911 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
912 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
913 cid);
914 fp->state = BNX2X_FP_STATE_HALTED;
915 break;
917 default:
918 BNX2X_ERR("unexpected MC reply (%d) "
919 "fp->state is %x\n", command, fp->state);
920 break;
922 mb(); /* force bnx2x_wait_ramrod() to see the change */
923 return;
926 switch (command | bp->state) {
927 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
928 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
929 bp->state = BNX2X_STATE_OPEN;
930 break;
932 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
933 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
934 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
935 fp->state = BNX2X_FP_STATE_HALTED;
936 break;
938 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
939 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
940 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
941 break;
944 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
945 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
946 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
947 bp->set_mac_pending = 0;
948 break;
950 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
951 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
952 break;
954 default:
955 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
956 command, bp->state);
957 break;
959 mb(); /* force bnx2x_wait_ramrod() to see the change */
962 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
963 struct bnx2x_fastpath *fp, u16 index)
965 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
966 struct page *page = sw_buf->page;
967 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
969 /* Skip "next page" elements */
970 if (!page)
971 return;
973 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
974 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
975 __free_pages(page, PAGES_PER_SGE_SHIFT);
977 sw_buf->page = NULL;
978 sge->addr_hi = 0;
979 sge->addr_lo = 0;
982 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
983 struct bnx2x_fastpath *fp, int last)
985 int i;
987 for (i = 0; i < last; i++)
988 bnx2x_free_rx_sge(bp, fp, i);
991 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
992 struct bnx2x_fastpath *fp, u16 index)
994 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
995 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
996 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
997 dma_addr_t mapping;
999 if (unlikely(page == NULL))
1000 return -ENOMEM;
1002 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1003 PCI_DMA_FROMDEVICE);
1004 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1005 __free_pages(page, PAGES_PER_SGE_SHIFT);
1006 return -ENOMEM;
1009 sw_buf->page = page;
1010 pci_unmap_addr_set(sw_buf, mapping, mapping);
1012 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1013 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1015 return 0;
1018 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1019 struct bnx2x_fastpath *fp, u16 index)
1021 struct sk_buff *skb;
1022 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1023 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1024 dma_addr_t mapping;
1026 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1027 if (unlikely(skb == NULL))
1028 return -ENOMEM;
1030 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1031 PCI_DMA_FROMDEVICE);
1032 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1033 dev_kfree_skb(skb);
1034 return -ENOMEM;
1037 rx_buf->skb = skb;
1038 pci_unmap_addr_set(rx_buf, mapping, mapping);
1040 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1041 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1043 return 0;
1046 /* note that we are not allocating a new skb,
1047 * we are just moving one from cons to prod
1048 * we are not creating a new mapping,
1049 * so there is no need to check for dma_mapping_error().
1051 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1052 struct sk_buff *skb, u16 cons, u16 prod)
1054 struct bnx2x *bp = fp->bp;
1055 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1056 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1057 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1058 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1060 pci_dma_sync_single_for_device(bp->pdev,
1061 pci_unmap_addr(cons_rx_buf, mapping),
1062 bp->rx_offset + RX_COPY_THRESH,
1063 PCI_DMA_FROMDEVICE);
1065 prod_rx_buf->skb = cons_rx_buf->skb;
1066 pci_unmap_addr_set(prod_rx_buf, mapping,
1067 pci_unmap_addr(cons_rx_buf, mapping));
1068 *prod_bd = *cons_bd;
1071 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1072 u16 idx)
1074 u16 last_max = fp->last_max_sge;
1076 if (SUB_S16(idx, last_max) > 0)
1077 fp->last_max_sge = idx;
1080 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1082 int i, j;
1084 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1085 int idx = RX_SGE_CNT * i - 1;
1087 for (j = 0; j < 2; j++) {
1088 SGE_MASK_CLEAR_BIT(fp, idx);
1089 idx--;
1094 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1095 struct eth_fast_path_rx_cqe *fp_cqe)
1097 struct bnx2x *bp = fp->bp;
1098 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1099 le16_to_cpu(fp_cqe->len_on_bd)) >>
1100 BCM_PAGE_SHIFT;
1101 u16 last_max, last_elem, first_elem;
1102 u16 delta = 0;
1103 u16 i;
1105 if (!sge_len)
1106 return;
1108 /* First mark all used pages */
1109 for (i = 0; i < sge_len; i++)
1110 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1112 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1113 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1115 /* Here we assume that the last SGE index is the biggest */
1116 prefetch((void *)(fp->sge_mask));
1117 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1119 last_max = RX_SGE(fp->last_max_sge);
1120 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1121 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1123 /* If ring is not full */
1124 if (last_elem + 1 != first_elem)
1125 last_elem++;
1127 /* Now update the prod */
1128 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1129 if (likely(fp->sge_mask[i]))
1130 break;
1132 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1133 delta += RX_SGE_MASK_ELEM_SZ;
1136 if (delta > 0) {
1137 fp->rx_sge_prod += delta;
1138 /* clear page-end entries */
1139 bnx2x_clear_sge_mask_next_elems(fp);
1142 DP(NETIF_MSG_RX_STATUS,
1143 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1144 fp->last_max_sge, fp->rx_sge_prod);
1147 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1149 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1150 memset(fp->sge_mask, 0xff,
1151 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1153 /* Clear the two last indices in the page to 1:
1154 these are the indices that correspond to the "next" element,
1155 hence will never be indicated and should be removed from
1156 the calculations. */
1157 bnx2x_clear_sge_mask_next_elems(fp);
1160 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1161 struct sk_buff *skb, u16 cons, u16 prod)
1163 struct bnx2x *bp = fp->bp;
1164 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1165 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1166 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1167 dma_addr_t mapping;
1169 /* move empty skb from pool to prod and map it */
1170 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1171 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1172 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1173 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1175 /* move partial skb from cons to pool (don't unmap yet) */
1176 fp->tpa_pool[queue] = *cons_rx_buf;
1178 /* mark bin state as start - print error if current state != stop */
1179 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1180 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1182 fp->tpa_state[queue] = BNX2X_TPA_START;
1184 /* point prod_bd to new skb */
1185 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1186 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1188 #ifdef BNX2X_STOP_ON_ERROR
1189 fp->tpa_queue_used |= (1 << queue);
1190 #ifdef __powerpc64__
1191 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1192 #else
1193 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1194 #endif
1195 fp->tpa_queue_used);
1196 #endif
1199 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1200 struct sk_buff *skb,
1201 struct eth_fast_path_rx_cqe *fp_cqe,
1202 u16 cqe_idx)
1204 struct sw_rx_page *rx_pg, old_rx_pg;
1205 struct page *sge;
1206 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1207 u32 i, frag_len, frag_size, pages;
1208 int err;
1209 int j;
1211 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1212 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1214 /* This is needed in order to enable forwarding support */
1215 if (frag_size)
1216 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1217 max(frag_size, (u32)len_on_bd));
1219 #ifdef BNX2X_STOP_ON_ERROR
1220 if (pages > 8*PAGES_PER_SGE) {
1221 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1222 pages, cqe_idx);
1223 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1224 fp_cqe->pkt_len, len_on_bd);
1225 bnx2x_panic();
1226 return -EINVAL;
1228 #endif
1230 /* Run through the SGL and compose the fragmented skb */
1231 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1232 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1234 /* FW gives the indices of the SGE as if the ring is an array
1235 (meaning that "next" element will consume 2 indices) */
1236 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1237 rx_pg = &fp->rx_page_ring[sge_idx];
1238 sge = rx_pg->page;
1239 old_rx_pg = *rx_pg;
1241 /* If we fail to allocate a substitute page, we simply stop
1242 where we are and drop the whole packet */
1243 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1244 if (unlikely(err)) {
1245 bp->eth_stats.rx_skb_alloc_failed++;
1246 return err;
1249 /* Unmap the page as we r going to pass it to the stack */
1250 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1251 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1253 /* Add one frag and update the appropriate fields in the skb */
1254 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1256 skb->data_len += frag_len;
1257 skb->truesize += frag_len;
1258 skb->len += frag_len;
1260 frag_size -= frag_len;
1263 return 0;
1266 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1268 u16 cqe_idx)
1270 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1271 struct sk_buff *skb = rx_buf->skb;
1272 /* alloc new skb */
1273 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1275 /* Unmap skb in the pool anyway, as we are going to change
1276 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1277 fails. */
1278 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1279 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1281 if (likely(new_skb)) {
1282 /* fix ip xsum and give it to the stack */
1283 /* (no need to map the new skb) */
1285 prefetch(skb);
1286 prefetch(((char *)(skb)) + 128);
1288 #ifdef BNX2X_STOP_ON_ERROR
1289 if (pad + len > bp->rx_buf_size) {
1290 BNX2X_ERR("skb_put is about to fail... "
1291 "pad %d len %d rx_buf_size %d\n",
1292 pad, len, bp->rx_buf_size);
1293 bnx2x_panic();
1294 return;
1296 #endif
1298 skb_reserve(skb, pad);
1299 skb_put(skb, len);
1301 skb->protocol = eth_type_trans(skb, bp->dev);
1302 skb->ip_summed = CHECKSUM_UNNECESSARY;
1305 struct iphdr *iph;
1307 iph = (struct iphdr *)skb->data;
1308 iph->check = 0;
1309 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1312 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1313 &cqe->fast_path_cqe, cqe_idx)) {
1314 #ifdef BCM_VLAN
1315 if ((bp->vlgrp != NULL) &&
1316 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1317 PARSING_FLAGS_VLAN))
1318 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1319 le16_to_cpu(cqe->fast_path_cqe.
1320 vlan_tag));
1321 else
1322 #endif
1323 netif_receive_skb(skb);
1324 } else {
1325 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1326 " - dropping packet!\n");
1327 dev_kfree_skb(skb);
1330 bp->dev->last_rx = jiffies;
1332 /* put new skb in bin */
1333 fp->tpa_pool[queue].skb = new_skb;
1335 } else {
1336 /* else drop the packet and keep the buffer in the bin */
1337 DP(NETIF_MSG_RX_STATUS,
1338 "Failed to allocate new skb - dropping packet!\n");
1339 bp->eth_stats.rx_skb_alloc_failed++;
1342 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1345 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1346 struct bnx2x_fastpath *fp,
1347 u16 bd_prod, u16 rx_comp_prod,
1348 u16 rx_sge_prod)
1350 struct tstorm_eth_rx_producers rx_prods = {0};
1351 int i;
1353 /* Update producers */
1354 rx_prods.bd_prod = bd_prod;
1355 rx_prods.cqe_prod = rx_comp_prod;
1356 rx_prods.sge_prod = rx_sge_prod;
1358 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1359 REG_WR(bp, BAR_TSTRORM_INTMEM +
1360 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1361 ((u32 *)&rx_prods)[i]);
1363 DP(NETIF_MSG_RX_STATUS,
1364 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1365 bd_prod, rx_comp_prod, rx_sge_prod);
1368 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1370 struct bnx2x *bp = fp->bp;
1371 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1372 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1373 int rx_pkt = 0;
1375 #ifdef BNX2X_STOP_ON_ERROR
1376 if (unlikely(bp->panic))
1377 return 0;
1378 #endif
1380 /* CQ "next element" is of the size of the regular element,
1381 that's why it's ok here */
1382 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1383 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1384 hw_comp_cons++;
1386 bd_cons = fp->rx_bd_cons;
1387 bd_prod = fp->rx_bd_prod;
1388 bd_prod_fw = bd_prod;
1389 sw_comp_cons = fp->rx_comp_cons;
1390 sw_comp_prod = fp->rx_comp_prod;
1392 /* Memory barrier necessary as speculative reads of the rx
1393 * buffer can be ahead of the index in the status block
1395 rmb();
1397 DP(NETIF_MSG_RX_STATUS,
1398 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1399 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1401 while (sw_comp_cons != hw_comp_cons) {
1402 struct sw_rx_bd *rx_buf = NULL;
1403 struct sk_buff *skb;
1404 union eth_rx_cqe *cqe;
1405 u8 cqe_fp_flags;
1406 u16 len, pad;
1408 comp_ring_cons = RCQ_BD(sw_comp_cons);
1409 bd_prod = RX_BD(bd_prod);
1410 bd_cons = RX_BD(bd_cons);
1412 cqe = &fp->rx_comp_ring[comp_ring_cons];
1413 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1415 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1416 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1417 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1418 cqe->fast_path_cqe.rss_hash_result,
1419 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1420 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1422 /* is this a slowpath msg? */
1423 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1424 bnx2x_sp_event(fp, cqe);
1425 goto next_cqe;
1427 /* this is an rx packet */
1428 } else {
1429 rx_buf = &fp->rx_buf_ring[bd_cons];
1430 skb = rx_buf->skb;
1431 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1432 pad = cqe->fast_path_cqe.placement_offset;
1434 /* If CQE is marked both TPA_START and TPA_END
1435 it is a non-TPA CQE */
1436 if ((!fp->disable_tpa) &&
1437 (TPA_TYPE(cqe_fp_flags) !=
1438 (TPA_TYPE_START | TPA_TYPE_END))) {
1439 u16 queue = cqe->fast_path_cqe.queue_index;
1441 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1442 DP(NETIF_MSG_RX_STATUS,
1443 "calling tpa_start on queue %d\n",
1444 queue);
1446 bnx2x_tpa_start(fp, queue, skb,
1447 bd_cons, bd_prod);
1448 goto next_rx;
1451 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1452 DP(NETIF_MSG_RX_STATUS,
1453 "calling tpa_stop on queue %d\n",
1454 queue);
1456 if (!BNX2X_RX_SUM_FIX(cqe))
1457 BNX2X_ERR("STOP on none TCP "
1458 "data\n");
1460 /* This is a size of the linear data
1461 on this skb */
1462 len = le16_to_cpu(cqe->fast_path_cqe.
1463 len_on_bd);
1464 bnx2x_tpa_stop(bp, fp, queue, pad,
1465 len, cqe, comp_ring_cons);
1466 #ifdef BNX2X_STOP_ON_ERROR
1467 if (bp->panic)
1468 return -EINVAL;
1469 #endif
1471 bnx2x_update_sge_prod(fp,
1472 &cqe->fast_path_cqe);
1473 goto next_cqe;
1477 pci_dma_sync_single_for_device(bp->pdev,
1478 pci_unmap_addr(rx_buf, mapping),
1479 pad + RX_COPY_THRESH,
1480 PCI_DMA_FROMDEVICE);
1481 prefetch(skb);
1482 prefetch(((char *)(skb)) + 128);
1484 /* is this an error packet? */
1485 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1486 DP(NETIF_MSG_RX_ERR,
1487 "ERROR flags %x rx packet %u\n",
1488 cqe_fp_flags, sw_comp_cons);
1489 bp->eth_stats.rx_err_discard_pkt++;
1490 goto reuse_rx;
1493 /* Since we don't have a jumbo ring
1494 * copy small packets if mtu > 1500
1496 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1497 (len <= RX_COPY_THRESH)) {
1498 struct sk_buff *new_skb;
1500 new_skb = netdev_alloc_skb(bp->dev,
1501 len + pad);
1502 if (new_skb == NULL) {
1503 DP(NETIF_MSG_RX_ERR,
1504 "ERROR packet dropped "
1505 "because of alloc failure\n");
1506 bp->eth_stats.rx_skb_alloc_failed++;
1507 goto reuse_rx;
1510 /* aligned copy */
1511 skb_copy_from_linear_data_offset(skb, pad,
1512 new_skb->data + pad, len);
1513 skb_reserve(new_skb, pad);
1514 skb_put(new_skb, len);
1516 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1518 skb = new_skb;
1520 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1521 pci_unmap_single(bp->pdev,
1522 pci_unmap_addr(rx_buf, mapping),
1523 bp->rx_buf_size,
1524 PCI_DMA_FROMDEVICE);
1525 skb_reserve(skb, pad);
1526 skb_put(skb, len);
1528 } else {
1529 DP(NETIF_MSG_RX_ERR,
1530 "ERROR packet dropped because "
1531 "of alloc failure\n");
1532 bp->eth_stats.rx_skb_alloc_failed++;
1533 reuse_rx:
1534 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1535 goto next_rx;
1538 skb->protocol = eth_type_trans(skb, bp->dev);
1540 skb->ip_summed = CHECKSUM_NONE;
1541 if (bp->rx_csum) {
1542 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1543 skb->ip_summed = CHECKSUM_UNNECESSARY;
1544 else
1545 bp->eth_stats.hw_csum_err++;
1549 #ifdef BCM_VLAN
1550 if ((bp->vlgrp != NULL) &&
1551 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1552 PARSING_FLAGS_VLAN))
1553 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1554 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1555 else
1556 #endif
1557 netif_receive_skb(skb);
1559 bp->dev->last_rx = jiffies;
1561 next_rx:
1562 rx_buf->skb = NULL;
1564 bd_cons = NEXT_RX_IDX(bd_cons);
1565 bd_prod = NEXT_RX_IDX(bd_prod);
1566 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1567 rx_pkt++;
1568 next_cqe:
1569 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1570 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1572 if (rx_pkt == budget)
1573 break;
1574 } /* while */
1576 fp->rx_bd_cons = bd_cons;
1577 fp->rx_bd_prod = bd_prod_fw;
1578 fp->rx_comp_cons = sw_comp_cons;
1579 fp->rx_comp_prod = sw_comp_prod;
1581 /* Update producers */
1582 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1583 fp->rx_sge_prod);
1584 mmiowb(); /* keep prod updates ordered */
1586 fp->rx_pkt += rx_pkt;
1587 fp->rx_calls++;
1589 return rx_pkt;
1592 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1594 struct bnx2x_fastpath *fp = fp_cookie;
1595 struct bnx2x *bp = fp->bp;
1596 struct net_device *dev = bp->dev;
1597 int index = FP_IDX(fp);
1599 /* Return here if interrupt is disabled */
1600 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1601 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1602 return IRQ_HANDLED;
1605 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1606 index, FP_SB_ID(fp));
1607 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1609 #ifdef BNX2X_STOP_ON_ERROR
1610 if (unlikely(bp->panic))
1611 return IRQ_HANDLED;
1612 #endif
1614 prefetch(fp->rx_cons_sb);
1615 prefetch(fp->tx_cons_sb);
1616 prefetch(&fp->status_blk->c_status_block.status_block_index);
1617 prefetch(&fp->status_blk->u_status_block.status_block_index);
1619 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1621 return IRQ_HANDLED;
1624 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1626 struct net_device *dev = dev_instance;
1627 struct bnx2x *bp = netdev_priv(dev);
1628 u16 status = bnx2x_ack_int(bp);
1629 u16 mask;
1631 /* Return here if interrupt is shared and it's not for us */
1632 if (unlikely(status == 0)) {
1633 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1634 return IRQ_NONE;
1636 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1638 /* Return here if interrupt is disabled */
1639 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1640 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1641 return IRQ_HANDLED;
1644 #ifdef BNX2X_STOP_ON_ERROR
1645 if (unlikely(bp->panic))
1646 return IRQ_HANDLED;
1647 #endif
1649 mask = 0x2 << bp->fp[0].sb_id;
1650 if (status & mask) {
1651 struct bnx2x_fastpath *fp = &bp->fp[0];
1653 prefetch(fp->rx_cons_sb);
1654 prefetch(fp->tx_cons_sb);
1655 prefetch(&fp->status_blk->c_status_block.status_block_index);
1656 prefetch(&fp->status_blk->u_status_block.status_block_index);
1658 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1660 status &= ~mask;
1664 if (unlikely(status & 0x1)) {
1665 schedule_work(&bp->sp_task);
1667 status &= ~0x1;
1668 if (!status)
1669 return IRQ_HANDLED;
1672 if (status)
1673 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1674 status);
1676 return IRQ_HANDLED;
1679 /* end of fast path */
1681 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1683 /* Link */
1686 * General service functions
1689 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1691 u32 lock_status;
1692 u32 resource_bit = (1 << resource);
1693 int func = BP_FUNC(bp);
1694 u32 hw_lock_control_reg;
1695 int cnt;
1697 /* Validating that the resource is within range */
1698 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1699 DP(NETIF_MSG_HW,
1700 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1701 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1702 return -EINVAL;
1705 if (func <= 5) {
1706 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1707 } else {
1708 hw_lock_control_reg =
1709 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1712 /* Validating that the resource is not already taken */
1713 lock_status = REG_RD(bp, hw_lock_control_reg);
1714 if (lock_status & resource_bit) {
1715 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1716 lock_status, resource_bit);
1717 return -EEXIST;
1720 /* Try for 5 second every 5ms */
1721 for (cnt = 0; cnt < 1000; cnt++) {
1722 /* Try to acquire the lock */
1723 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1724 lock_status = REG_RD(bp, hw_lock_control_reg);
1725 if (lock_status & resource_bit)
1726 return 0;
1728 msleep(5);
1730 DP(NETIF_MSG_HW, "Timeout\n");
1731 return -EAGAIN;
1734 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1736 u32 lock_status;
1737 u32 resource_bit = (1 << resource);
1738 int func = BP_FUNC(bp);
1739 u32 hw_lock_control_reg;
1741 /* Validating that the resource is within range */
1742 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1743 DP(NETIF_MSG_HW,
1744 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1745 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1746 return -EINVAL;
1749 if (func <= 5) {
1750 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1751 } else {
1752 hw_lock_control_reg =
1753 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1756 /* Validating that the resource is currently taken */
1757 lock_status = REG_RD(bp, hw_lock_control_reg);
1758 if (!(lock_status & resource_bit)) {
1759 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1760 lock_status, resource_bit);
1761 return -EFAULT;
1764 REG_WR(bp, hw_lock_control_reg, resource_bit);
1765 return 0;
1768 /* HW Lock for shared dual port PHYs */
1769 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1771 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1773 mutex_lock(&bp->port.phy_mutex);
1775 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1776 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1777 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1780 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1782 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1784 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1785 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1786 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1788 mutex_unlock(&bp->port.phy_mutex);
1791 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1793 /* The GPIO should be swapped if swap register is set and active */
1794 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1795 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1796 int gpio_shift = gpio_num +
1797 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1798 u32 gpio_mask = (1 << gpio_shift);
1799 u32 gpio_reg;
1801 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1802 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1803 return -EINVAL;
1806 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1807 /* read GPIO and mask except the float bits */
1808 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1810 switch (mode) {
1811 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1812 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1813 gpio_num, gpio_shift);
1814 /* clear FLOAT and set CLR */
1815 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1816 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1817 break;
1819 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1820 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1821 gpio_num, gpio_shift);
1822 /* clear FLOAT and set SET */
1823 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1824 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1825 break;
1827 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1828 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1829 gpio_num, gpio_shift);
1830 /* set FLOAT */
1831 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1832 break;
1834 default:
1835 break;
1838 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1839 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1841 return 0;
1844 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1846 u32 spio_mask = (1 << spio_num);
1847 u32 spio_reg;
1849 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1850 (spio_num > MISC_REGISTERS_SPIO_7)) {
1851 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1852 return -EINVAL;
1855 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1856 /* read SPIO and mask except the float bits */
1857 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1859 switch (mode) {
1860 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1861 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1862 /* clear FLOAT and set CLR */
1863 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1864 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1865 break;
1867 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1868 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1869 /* clear FLOAT and set SET */
1870 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1871 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1872 break;
1874 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1875 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1876 /* set FLOAT */
1877 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1878 break;
1880 default:
1881 break;
1884 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1885 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1887 return 0;
1890 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1892 switch (bp->link_vars.ieee_fc) {
1893 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1894 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1895 ADVERTISED_Pause);
1896 break;
1897 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1898 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1899 ADVERTISED_Pause);
1900 break;
1901 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1902 bp->port.advertising |= ADVERTISED_Asym_Pause;
1903 break;
1904 default:
1905 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1906 ADVERTISED_Pause);
1907 break;
1911 static void bnx2x_link_report(struct bnx2x *bp)
1913 if (bp->link_vars.link_up) {
1914 if (bp->state == BNX2X_STATE_OPEN)
1915 netif_carrier_on(bp->dev);
1916 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1918 printk("%d Mbps ", bp->link_vars.line_speed);
1920 if (bp->link_vars.duplex == DUPLEX_FULL)
1921 printk("full duplex");
1922 else
1923 printk("half duplex");
1925 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1926 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1927 printk(", receive ");
1928 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1929 printk("& transmit ");
1930 } else {
1931 printk(", transmit ");
1933 printk("flow control ON");
1935 printk("\n");
1937 } else { /* link_down */
1938 netif_carrier_off(bp->dev);
1939 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1943 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1945 if (!BP_NOMCP(bp)) {
1946 u8 rc;
1948 /* Initialize link parameters structure variables */
1949 /* It is recommended to turn off RX FC for jumbo frames
1950 for better performance */
1951 if (IS_E1HMF(bp))
1952 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1953 else if (bp->dev->mtu > 5000)
1954 bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX;
1955 else
1956 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1958 bnx2x_acquire_phy_lock(bp);
1959 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1960 bnx2x_release_phy_lock(bp);
1962 if (bp->link_vars.link_up)
1963 bnx2x_link_report(bp);
1965 bnx2x_calc_fc_adv(bp);
1967 return rc;
1969 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1970 return -EINVAL;
1973 static void bnx2x_link_set(struct bnx2x *bp)
1975 if (!BP_NOMCP(bp)) {
1976 bnx2x_acquire_phy_lock(bp);
1977 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1978 bnx2x_release_phy_lock(bp);
1980 bnx2x_calc_fc_adv(bp);
1981 } else
1982 BNX2X_ERR("Bootcode is missing -not setting link\n");
1985 static void bnx2x__link_reset(struct bnx2x *bp)
1987 if (!BP_NOMCP(bp)) {
1988 bnx2x_acquire_phy_lock(bp);
1989 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1990 bnx2x_release_phy_lock(bp);
1991 } else
1992 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1995 static u8 bnx2x_link_test(struct bnx2x *bp)
1997 u8 rc;
1999 bnx2x_acquire_phy_lock(bp);
2000 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2001 bnx2x_release_phy_lock(bp);
2003 return rc;
2006 /* Calculates the sum of vn_min_rates.
2007 It's needed for further normalizing of the min_rates.
2009 Returns:
2010 sum of vn_min_rates
2012 0 - if all the min_rates are 0.
2013 In the later case fairness algorithm should be deactivated.
2014 If not all min_rates are zero then those that are zeroes will
2015 be set to 1.
2017 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2019 int i, port = BP_PORT(bp);
2020 u32 wsum = 0;
2021 int all_zero = 1;
2023 for (i = 0; i < E1HVN_MAX; i++) {
2024 u32 vn_cfg =
2025 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2026 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2027 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2028 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2029 /* If min rate is zero - set it to 1 */
2030 if (!vn_min_rate)
2031 vn_min_rate = DEF_MIN_RATE;
2032 else
2033 all_zero = 0;
2035 wsum += vn_min_rate;
2039 /* ... only if all min rates are zeros - disable FAIRNESS */
2040 if (all_zero)
2041 return 0;
2043 return wsum;
2046 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2047 int en_fness,
2048 u16 port_rate,
2049 struct cmng_struct_per_port *m_cmng_port)
2051 u32 r_param = port_rate / 8;
2052 int port = BP_PORT(bp);
2053 int i;
2055 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2057 /* Enable minmax only if we are in e1hmf mode */
2058 if (IS_E1HMF(bp)) {
2059 u32 fair_periodic_timeout_usec;
2060 u32 t_fair;
2062 /* Enable rate shaping and fairness */
2063 m_cmng_port->flags.cmng_vn_enable = 1;
2064 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2065 m_cmng_port->flags.rate_shaping_enable = 1;
2067 if (!en_fness)
2068 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2069 " fairness will be disabled\n");
2071 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2072 m_cmng_port->rs_vars.rs_periodic_timeout =
2073 RS_PERIODIC_TIMEOUT_USEC / 4;
2075 /* this is the threshold below which no timer arming will occur
2076 1.25 coefficient is for the threshold to be a little bigger
2077 than the real time, to compensate for timer in-accuracy */
2078 m_cmng_port->rs_vars.rs_threshold =
2079 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2081 /* resolution of fairness timer */
2082 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2083 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2084 t_fair = T_FAIR_COEF / port_rate;
2086 /* this is the threshold below which we won't arm
2087 the timer anymore */
2088 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2090 /* we multiply by 1e3/8 to get bytes/msec.
2091 We don't want the credits to pass a credit
2092 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2093 m_cmng_port->fair_vars.upper_bound =
2094 r_param * t_fair * FAIR_MEM;
2095 /* since each tick is 4 usec */
2096 m_cmng_port->fair_vars.fairness_timeout =
2097 fair_periodic_timeout_usec / 4;
2099 } else {
2100 /* Disable rate shaping and fairness */
2101 m_cmng_port->flags.cmng_vn_enable = 0;
2102 m_cmng_port->flags.fairness_enable = 0;
2103 m_cmng_port->flags.rate_shaping_enable = 0;
2105 DP(NETIF_MSG_IFUP,
2106 "Single function mode minmax will be disabled\n");
2109 /* Store it to internal memory */
2110 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2111 REG_WR(bp, BAR_XSTRORM_INTMEM +
2112 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2113 ((u32 *)(m_cmng_port))[i]);
2116 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2117 u32 wsum, u16 port_rate,
2118 struct cmng_struct_per_port *m_cmng_port)
2120 struct rate_shaping_vars_per_vn m_rs_vn;
2121 struct fairness_vars_per_vn m_fair_vn;
2122 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2123 u16 vn_min_rate, vn_max_rate;
2124 int i;
2126 /* If function is hidden - set min and max to zeroes */
2127 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2128 vn_min_rate = 0;
2129 vn_max_rate = 0;
2131 } else {
2132 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2133 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2134 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2135 if current min rate is zero - set it to 1.
2136 This is a requirement of the algorithm. */
2137 if ((vn_min_rate == 0) && wsum)
2138 vn_min_rate = DEF_MIN_RATE;
2139 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2140 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2143 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2144 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2146 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2147 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2149 /* global vn counter - maximal Mbps for this vn */
2150 m_rs_vn.vn_counter.rate = vn_max_rate;
2152 /* quota - number of bytes transmitted in this period */
2153 m_rs_vn.vn_counter.quota =
2154 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2156 #ifdef BNX2X_PER_PROT_QOS
2157 /* per protocol counter */
2158 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2159 /* maximal Mbps for this protocol */
2160 m_rs_vn.protocol_counters[protocol].rate =
2161 protocol_max_rate[protocol];
2162 /* the quota in each timer period -
2163 number of bytes transmitted in this period */
2164 m_rs_vn.protocol_counters[protocol].quota =
2165 (u32)(rs_periodic_timeout_usec *
2166 ((double)m_rs_vn.
2167 protocol_counters[protocol].rate/8));
2169 #endif
2171 if (wsum) {
2172 /* credit for each period of the fairness algorithm:
2173 number of bytes in T_FAIR (the vn share the port rate).
2174 wsum should not be larger than 10000, thus
2175 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2176 m_fair_vn.vn_credit_delta =
2177 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2178 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2179 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2180 m_fair_vn.vn_credit_delta);
2183 #ifdef BNX2X_PER_PROT_QOS
2184 do {
2185 u32 protocolWeightSum = 0;
2187 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2188 protocolWeightSum +=
2189 drvInit.protocol_min_rate[protocol];
2190 /* per protocol counter -
2191 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2192 if (protocolWeightSum > 0) {
2193 for (protocol = 0;
2194 protocol < NUM_OF_PROTOCOLS; protocol++)
2195 /* credit for each period of the
2196 fairness algorithm - number of bytes in
2197 T_FAIR (the protocol share the vn rate) */
2198 m_fair_vn.protocol_credit_delta[protocol] =
2199 (u32)((vn_min_rate / 8) * t_fair *
2200 protocol_min_rate / protocolWeightSum);
2202 } while (0);
2203 #endif
2205 /* Store it to internal memory */
2206 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2207 REG_WR(bp, BAR_XSTRORM_INTMEM +
2208 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2209 ((u32 *)(&m_rs_vn))[i]);
2211 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2212 REG_WR(bp, BAR_XSTRORM_INTMEM +
2213 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2214 ((u32 *)(&m_fair_vn))[i]);
2217 /* This function is called upon link interrupt */
2218 static void bnx2x_link_attn(struct bnx2x *bp)
2220 int vn;
2222 /* Make sure that we are synced with the current statistics */
2223 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2225 bnx2x_acquire_phy_lock(bp);
2226 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2227 bnx2x_release_phy_lock(bp);
2229 if (bp->link_vars.link_up) {
2231 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2232 struct host_port_stats *pstats;
2234 pstats = bnx2x_sp(bp, port_stats);
2235 /* reset old bmac stats */
2236 memset(&(pstats->mac_stx[0]), 0,
2237 sizeof(struct mac_stx));
2239 if ((bp->state == BNX2X_STATE_OPEN) ||
2240 (bp->state == BNX2X_STATE_DISABLED))
2241 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2244 /* indicate link status */
2245 bnx2x_link_report(bp);
2247 if (IS_E1HMF(bp)) {
2248 int func;
2250 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2251 if (vn == BP_E1HVN(bp))
2252 continue;
2254 func = ((vn << 1) | BP_PORT(bp));
2256 /* Set the attention towards other drivers
2257 on the same port */
2258 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2259 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2263 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2264 struct cmng_struct_per_port m_cmng_port;
2265 u32 wsum;
2266 int port = BP_PORT(bp);
2268 /* Init RATE SHAPING and FAIRNESS contexts */
2269 wsum = bnx2x_calc_vn_wsum(bp);
2270 bnx2x_init_port_minmax(bp, (int)wsum,
2271 bp->link_vars.line_speed,
2272 &m_cmng_port);
2273 if (IS_E1HMF(bp))
2274 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2275 bnx2x_init_vn_minmax(bp, 2*vn + port,
2276 wsum, bp->link_vars.line_speed,
2277 &m_cmng_port);
2281 static void bnx2x__link_status_update(struct bnx2x *bp)
2283 if (bp->state != BNX2X_STATE_OPEN)
2284 return;
2286 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2288 if (bp->link_vars.link_up)
2289 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2290 else
2291 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2293 /* indicate link status */
2294 bnx2x_link_report(bp);
2297 static void bnx2x_pmf_update(struct bnx2x *bp)
2299 int port = BP_PORT(bp);
2300 u32 val;
2302 bp->port.pmf = 1;
2303 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2305 /* enable nig attention */
2306 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2307 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2308 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2310 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2313 /* end of Link */
2315 /* slow path */
2318 * General service functions
2321 /* the slow path queue is odd since completions arrive on the fastpath ring */
2322 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2323 u32 data_hi, u32 data_lo, int common)
2325 int func = BP_FUNC(bp);
2327 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2328 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2329 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2330 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2331 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2333 #ifdef BNX2X_STOP_ON_ERROR
2334 if (unlikely(bp->panic))
2335 return -EIO;
2336 #endif
2338 spin_lock_bh(&bp->spq_lock);
2340 if (!bp->spq_left) {
2341 BNX2X_ERR("BUG! SPQ ring full!\n");
2342 spin_unlock_bh(&bp->spq_lock);
2343 bnx2x_panic();
2344 return -EBUSY;
2347 /* CID needs port number to be encoded int it */
2348 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2349 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2350 HW_CID(bp, cid)));
2351 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2352 if (common)
2353 bp->spq_prod_bd->hdr.type |=
2354 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2356 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2357 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2359 bp->spq_left--;
2361 if (bp->spq_prod_bd == bp->spq_last_bd) {
2362 bp->spq_prod_bd = bp->spq;
2363 bp->spq_prod_idx = 0;
2364 DP(NETIF_MSG_TIMER, "end of spq\n");
2366 } else {
2367 bp->spq_prod_bd++;
2368 bp->spq_prod_idx++;
2371 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2372 bp->spq_prod_idx);
2374 spin_unlock_bh(&bp->spq_lock);
2375 return 0;
2378 /* acquire split MCP access lock register */
2379 static int bnx2x_acquire_alr(struct bnx2x *bp)
2381 u32 i, j, val;
2382 int rc = 0;
2384 might_sleep();
2385 i = 100;
2386 for (j = 0; j < i*10; j++) {
2387 val = (1UL << 31);
2388 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2389 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2390 if (val & (1L << 31))
2391 break;
2393 msleep(5);
2395 if (!(val & (1L << 31))) {
2396 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2397 rc = -EBUSY;
2400 return rc;
2403 /* release split MCP access lock register */
2404 static void bnx2x_release_alr(struct bnx2x *bp)
2406 u32 val = 0;
2408 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2411 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2413 struct host_def_status_block *def_sb = bp->def_status_blk;
2414 u16 rc = 0;
2416 barrier(); /* status block is written to by the chip */
2417 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2418 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2419 rc |= 1;
2421 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2422 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2423 rc |= 2;
2425 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2426 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2427 rc |= 4;
2429 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2430 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2431 rc |= 8;
2433 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2434 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2435 rc |= 16;
2437 return rc;
2441 * slow path service functions
2444 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2446 int port = BP_PORT(bp);
2447 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2448 COMMAND_REG_ATTN_BITS_SET);
2449 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2450 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2451 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2452 NIG_REG_MASK_INTERRUPT_PORT0;
2453 u32 aeu_mask;
2455 if (bp->attn_state & asserted)
2456 BNX2X_ERR("IGU ERROR\n");
2458 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2459 aeu_mask = REG_RD(bp, aeu_addr);
2461 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2462 aeu_mask, asserted);
2463 aeu_mask &= ~(asserted & 0xff);
2464 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2466 REG_WR(bp, aeu_addr, aeu_mask);
2467 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2469 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2470 bp->attn_state |= asserted;
2471 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2473 if (asserted & ATTN_HARD_WIRED_MASK) {
2474 if (asserted & ATTN_NIG_FOR_FUNC) {
2476 /* save nig interrupt mask */
2477 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2478 REG_WR(bp, nig_int_mask_addr, 0);
2480 bnx2x_link_attn(bp);
2482 /* handle unicore attn? */
2484 if (asserted & ATTN_SW_TIMER_4_FUNC)
2485 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2487 if (asserted & GPIO_2_FUNC)
2488 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2490 if (asserted & GPIO_3_FUNC)
2491 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2493 if (asserted & GPIO_4_FUNC)
2494 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2496 if (port == 0) {
2497 if (asserted & ATTN_GENERAL_ATTN_1) {
2498 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2499 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2501 if (asserted & ATTN_GENERAL_ATTN_2) {
2502 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2503 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2505 if (asserted & ATTN_GENERAL_ATTN_3) {
2506 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2507 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2509 } else {
2510 if (asserted & ATTN_GENERAL_ATTN_4) {
2511 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2512 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2514 if (asserted & ATTN_GENERAL_ATTN_5) {
2515 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2516 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2518 if (asserted & ATTN_GENERAL_ATTN_6) {
2519 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2520 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2524 } /* if hardwired */
2526 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2527 asserted, hc_addr);
2528 REG_WR(bp, hc_addr, asserted);
2530 /* now set back the mask */
2531 if (asserted & ATTN_NIG_FOR_FUNC)
2532 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2535 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2537 int port = BP_PORT(bp);
2538 int reg_offset;
2539 u32 val;
2541 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2542 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2544 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2546 val = REG_RD(bp, reg_offset);
2547 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2548 REG_WR(bp, reg_offset, val);
2550 BNX2X_ERR("SPIO5 hw attention\n");
2552 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2553 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2554 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2555 /* Fan failure attention */
2557 /* The PHY reset is controlled by GPIO 1 */
2558 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2559 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2560 /* Low power mode is controlled by GPIO 2 */
2561 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2562 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2563 /* mark the failure */
2564 bp->link_params.ext_phy_config &=
2565 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2566 bp->link_params.ext_phy_config |=
2567 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2568 SHMEM_WR(bp,
2569 dev_info.port_hw_config[port].
2570 external_phy_config,
2571 bp->link_params.ext_phy_config);
2572 /* log the failure */
2573 printk(KERN_ERR PFX "Fan Failure on Network"
2574 " Controller %s has caused the driver to"
2575 " shutdown the card to prevent permanent"
2576 " damage. Please contact Dell Support for"
2577 " assistance\n", bp->dev->name);
2578 break;
2580 default:
2581 break;
2585 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2587 val = REG_RD(bp, reg_offset);
2588 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2589 REG_WR(bp, reg_offset, val);
2591 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2592 (attn & HW_INTERRUT_ASSERT_SET_0));
2593 bnx2x_panic();
2597 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2599 u32 val;
2601 if (attn & BNX2X_DOORQ_ASSERT) {
2603 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2604 BNX2X_ERR("DB hw attention 0x%x\n", val);
2605 /* DORQ discard attention */
2606 if (val & 0x2)
2607 BNX2X_ERR("FATAL error from DORQ\n");
2610 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2612 int port = BP_PORT(bp);
2613 int reg_offset;
2615 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2616 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2618 val = REG_RD(bp, reg_offset);
2619 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2620 REG_WR(bp, reg_offset, val);
2622 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2623 (attn & HW_INTERRUT_ASSERT_SET_1));
2624 bnx2x_panic();
2628 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2630 u32 val;
2632 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2634 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2635 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2636 /* CFC error attention */
2637 if (val & 0x2)
2638 BNX2X_ERR("FATAL error from CFC\n");
2641 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2643 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2644 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2645 /* RQ_USDMDP_FIFO_OVERFLOW */
2646 if (val & 0x18000)
2647 BNX2X_ERR("FATAL error from PXP\n");
2650 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2652 int port = BP_PORT(bp);
2653 int reg_offset;
2655 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2656 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2658 val = REG_RD(bp, reg_offset);
2659 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2660 REG_WR(bp, reg_offset, val);
2662 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2663 (attn & HW_INTERRUT_ASSERT_SET_2));
2664 bnx2x_panic();
2668 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2670 u32 val;
2672 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2674 if (attn & BNX2X_PMF_LINK_ASSERT) {
2675 int func = BP_FUNC(bp);
2677 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2678 bnx2x__link_status_update(bp);
2679 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2680 DRV_STATUS_PMF)
2681 bnx2x_pmf_update(bp);
2683 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2685 BNX2X_ERR("MC assert!\n");
2686 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2687 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2688 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2689 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2690 bnx2x_panic();
2692 } else if (attn & BNX2X_MCP_ASSERT) {
2694 BNX2X_ERR("MCP assert!\n");
2695 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2696 bnx2x_fw_dump(bp);
2698 } else
2699 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2702 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2703 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2704 if (attn & BNX2X_GRC_TIMEOUT) {
2705 val = CHIP_IS_E1H(bp) ?
2706 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2707 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2709 if (attn & BNX2X_GRC_RSV) {
2710 val = CHIP_IS_E1H(bp) ?
2711 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2712 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2714 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2718 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2720 struct attn_route attn;
2721 struct attn_route group_mask;
2722 int port = BP_PORT(bp);
2723 int index;
2724 u32 reg_addr;
2725 u32 val;
2726 u32 aeu_mask;
2728 /* need to take HW lock because MCP or other port might also
2729 try to handle this event */
2730 bnx2x_acquire_alr(bp);
2732 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2733 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2734 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2735 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2736 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2737 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2739 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2740 if (deasserted & (1 << index)) {
2741 group_mask = bp->attn_group[index];
2743 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2744 index, group_mask.sig[0], group_mask.sig[1],
2745 group_mask.sig[2], group_mask.sig[3]);
2747 bnx2x_attn_int_deasserted3(bp,
2748 attn.sig[3] & group_mask.sig[3]);
2749 bnx2x_attn_int_deasserted1(bp,
2750 attn.sig[1] & group_mask.sig[1]);
2751 bnx2x_attn_int_deasserted2(bp,
2752 attn.sig[2] & group_mask.sig[2]);
2753 bnx2x_attn_int_deasserted0(bp,
2754 attn.sig[0] & group_mask.sig[0]);
2756 if ((attn.sig[0] & group_mask.sig[0] &
2757 HW_PRTY_ASSERT_SET_0) ||
2758 (attn.sig[1] & group_mask.sig[1] &
2759 HW_PRTY_ASSERT_SET_1) ||
2760 (attn.sig[2] & group_mask.sig[2] &
2761 HW_PRTY_ASSERT_SET_2))
2762 BNX2X_ERR("FATAL HW block parity attention\n");
2766 bnx2x_release_alr(bp);
2768 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2770 val = ~deasserted;
2771 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2772 val, reg_addr);
2773 REG_WR(bp, reg_addr, val);
2775 if (~bp->attn_state & deasserted)
2776 BNX2X_ERR("IGU ERROR\n");
2778 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2779 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2781 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2782 aeu_mask = REG_RD(bp, reg_addr);
2784 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2785 aeu_mask, deasserted);
2786 aeu_mask |= (deasserted & 0xff);
2787 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2789 REG_WR(bp, reg_addr, aeu_mask);
2790 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2792 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2793 bp->attn_state &= ~deasserted;
2794 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2797 static void bnx2x_attn_int(struct bnx2x *bp)
2799 /* read local copy of bits */
2800 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2801 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2802 u32 attn_state = bp->attn_state;
2804 /* look for changed bits */
2805 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2806 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2808 DP(NETIF_MSG_HW,
2809 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2810 attn_bits, attn_ack, asserted, deasserted);
2812 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2813 BNX2X_ERR("BAD attention state\n");
2815 /* handle bits that were raised */
2816 if (asserted)
2817 bnx2x_attn_int_asserted(bp, asserted);
2819 if (deasserted)
2820 bnx2x_attn_int_deasserted(bp, deasserted);
2823 static void bnx2x_sp_task(struct work_struct *work)
2825 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2826 u16 status;
2829 /* Return here if interrupt is disabled */
2830 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2831 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2832 return;
2835 status = bnx2x_update_dsb_idx(bp);
2836 /* if (status == 0) */
2837 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2839 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2841 /* HW attentions */
2842 if (status & 0x1)
2843 bnx2x_attn_int(bp);
2845 /* CStorm events: query_stats, port delete ramrod */
2846 if (status & 0x2)
2847 bp->stats_pending = 0;
2849 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2850 IGU_INT_NOP, 1);
2851 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2852 IGU_INT_NOP, 1);
2853 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2854 IGU_INT_NOP, 1);
2855 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2856 IGU_INT_NOP, 1);
2857 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2858 IGU_INT_ENABLE, 1);
2862 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2864 struct net_device *dev = dev_instance;
2865 struct bnx2x *bp = netdev_priv(dev);
2867 /* Return here if interrupt is disabled */
2868 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2869 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2870 return IRQ_HANDLED;
2873 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2875 #ifdef BNX2X_STOP_ON_ERROR
2876 if (unlikely(bp->panic))
2877 return IRQ_HANDLED;
2878 #endif
2880 schedule_work(&bp->sp_task);
2882 return IRQ_HANDLED;
2885 /* end of slow path */
2887 /* Statistics */
2889 /****************************************************************************
2890 * Macros
2891 ****************************************************************************/
2893 /* sum[hi:lo] += add[hi:lo] */
2894 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2895 do { \
2896 s_lo += a_lo; \
2897 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2898 } while (0)
2900 /* difference = minuend - subtrahend */
2901 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2902 do { \
2903 if (m_lo < s_lo) { \
2904 /* underflow */ \
2905 d_hi = m_hi - s_hi; \
2906 if (d_hi > 0) { \
2907 /* we can 'loan' 1 */ \
2908 d_hi--; \
2909 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2910 } else { \
2911 /* m_hi <= s_hi */ \
2912 d_hi = 0; \
2913 d_lo = 0; \
2915 } else { \
2916 /* m_lo >= s_lo */ \
2917 if (m_hi < s_hi) { \
2918 d_hi = 0; \
2919 d_lo = 0; \
2920 } else { \
2921 /* m_hi >= s_hi */ \
2922 d_hi = m_hi - s_hi; \
2923 d_lo = m_lo - s_lo; \
2926 } while (0)
2928 #define UPDATE_STAT64(s, t) \
2929 do { \
2930 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2931 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2932 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2933 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2934 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2935 pstats->mac_stx[1].t##_lo, diff.lo); \
2936 } while (0)
2938 #define UPDATE_STAT64_NIG(s, t) \
2939 do { \
2940 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2941 diff.lo, new->s##_lo, old->s##_lo); \
2942 ADD_64(estats->t##_hi, diff.hi, \
2943 estats->t##_lo, diff.lo); \
2944 } while (0)
2946 /* sum[hi:lo] += add */
2947 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2948 do { \
2949 s_lo += a; \
2950 s_hi += (s_lo < a) ? 1 : 0; \
2951 } while (0)
2953 #define UPDATE_EXTEND_STAT(s) \
2954 do { \
2955 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2956 pstats->mac_stx[1].s##_lo, \
2957 new->s); \
2958 } while (0)
2960 #define UPDATE_EXTEND_TSTAT(s, t) \
2961 do { \
2962 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2963 old_tclient->s = le32_to_cpu(tclient->s); \
2964 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2965 } while (0)
2967 #define UPDATE_EXTEND_XSTAT(s, t) \
2968 do { \
2969 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2970 old_xclient->s = le32_to_cpu(xclient->s); \
2971 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2972 } while (0)
2975 * General service functions
2978 static inline long bnx2x_hilo(u32 *hiref)
2980 u32 lo = *(hiref + 1);
2981 #if (BITS_PER_LONG == 64)
2982 u32 hi = *hiref;
2984 return HILO_U64(hi, lo);
2985 #else
2986 return lo;
2987 #endif
2991 * Init service functions
2994 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2996 if (!bp->stats_pending) {
2997 struct eth_query_ramrod_data ramrod_data = {0};
2998 int rc;
3000 ramrod_data.drv_counter = bp->stats_counter++;
3001 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3002 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3004 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3005 ((u32 *)&ramrod_data)[1],
3006 ((u32 *)&ramrod_data)[0], 0);
3007 if (rc == 0) {
3008 /* stats ramrod has it's own slot on the spq */
3009 bp->spq_left++;
3010 bp->stats_pending = 1;
3015 static void bnx2x_stats_init(struct bnx2x *bp)
3017 int port = BP_PORT(bp);
3019 bp->executer_idx = 0;
3020 bp->stats_counter = 0;
3022 /* port stats */
3023 if (!BP_NOMCP(bp))
3024 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3025 else
3026 bp->port.port_stx = 0;
3027 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3029 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3030 bp->port.old_nig_stats.brb_discard =
3031 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3032 bp->port.old_nig_stats.brb_truncate =
3033 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3034 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3035 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3036 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3037 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3039 /* function stats */
3040 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3041 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3042 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3043 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3045 bp->stats_state = STATS_STATE_DISABLED;
3046 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3047 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3050 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3052 struct dmae_command *dmae = &bp->stats_dmae;
3053 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3055 *stats_comp = DMAE_COMP_VAL;
3057 /* loader */
3058 if (bp->executer_idx) {
3059 int loader_idx = PMF_DMAE_C(bp);
3061 memset(dmae, 0, sizeof(struct dmae_command));
3063 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3064 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3065 DMAE_CMD_DST_RESET |
3066 #ifdef __BIG_ENDIAN
3067 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3068 #else
3069 DMAE_CMD_ENDIANITY_DW_SWAP |
3070 #endif
3071 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3072 DMAE_CMD_PORT_0) |
3073 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3074 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3075 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3076 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3077 sizeof(struct dmae_command) *
3078 (loader_idx + 1)) >> 2;
3079 dmae->dst_addr_hi = 0;
3080 dmae->len = sizeof(struct dmae_command) >> 2;
3081 if (CHIP_IS_E1(bp))
3082 dmae->len--;
3083 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3084 dmae->comp_addr_hi = 0;
3085 dmae->comp_val = 1;
3087 *stats_comp = 0;
3088 bnx2x_post_dmae(bp, dmae, loader_idx);
3090 } else if (bp->func_stx) {
3091 *stats_comp = 0;
3092 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3096 static int bnx2x_stats_comp(struct bnx2x *bp)
3098 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3099 int cnt = 10;
3101 might_sleep();
3102 while (*stats_comp != DMAE_COMP_VAL) {
3103 if (!cnt) {
3104 BNX2X_ERR("timeout waiting for stats finished\n");
3105 break;
3107 cnt--;
3108 msleep(1);
3110 return 1;
3114 * Statistics service functions
3117 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3119 struct dmae_command *dmae;
3120 u32 opcode;
3121 int loader_idx = PMF_DMAE_C(bp);
3122 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3124 /* sanity */
3125 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3126 BNX2X_ERR("BUG!\n");
3127 return;
3130 bp->executer_idx = 0;
3132 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3133 DMAE_CMD_C_ENABLE |
3134 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3135 #ifdef __BIG_ENDIAN
3136 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3137 #else
3138 DMAE_CMD_ENDIANITY_DW_SWAP |
3139 #endif
3140 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3141 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3143 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3144 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3145 dmae->src_addr_lo = bp->port.port_stx >> 2;
3146 dmae->src_addr_hi = 0;
3147 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3148 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3149 dmae->len = DMAE_LEN32_RD_MAX;
3150 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3151 dmae->comp_addr_hi = 0;
3152 dmae->comp_val = 1;
3154 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3155 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3156 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3157 dmae->src_addr_hi = 0;
3158 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3159 DMAE_LEN32_RD_MAX * 4);
3160 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3161 DMAE_LEN32_RD_MAX * 4);
3162 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3163 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3164 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3165 dmae->comp_val = DMAE_COMP_VAL;
3167 *stats_comp = 0;
3168 bnx2x_hw_stats_post(bp);
3169 bnx2x_stats_comp(bp);
3172 static void bnx2x_port_stats_init(struct bnx2x *bp)
3174 struct dmae_command *dmae;
3175 int port = BP_PORT(bp);
3176 int vn = BP_E1HVN(bp);
3177 u32 opcode;
3178 int loader_idx = PMF_DMAE_C(bp);
3179 u32 mac_addr;
3180 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3182 /* sanity */
3183 if (!bp->link_vars.link_up || !bp->port.pmf) {
3184 BNX2X_ERR("BUG!\n");
3185 return;
3188 bp->executer_idx = 0;
3190 /* MCP */
3191 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3192 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3193 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3194 #ifdef __BIG_ENDIAN
3195 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3196 #else
3197 DMAE_CMD_ENDIANITY_DW_SWAP |
3198 #endif
3199 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3200 (vn << DMAE_CMD_E1HVN_SHIFT));
3202 if (bp->port.port_stx) {
3204 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3205 dmae->opcode = opcode;
3206 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3207 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3208 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3209 dmae->dst_addr_hi = 0;
3210 dmae->len = sizeof(struct host_port_stats) >> 2;
3211 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3212 dmae->comp_addr_hi = 0;
3213 dmae->comp_val = 1;
3216 if (bp->func_stx) {
3218 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3219 dmae->opcode = opcode;
3220 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3221 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3222 dmae->dst_addr_lo = bp->func_stx >> 2;
3223 dmae->dst_addr_hi = 0;
3224 dmae->len = sizeof(struct host_func_stats) >> 2;
3225 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3226 dmae->comp_addr_hi = 0;
3227 dmae->comp_val = 1;
3230 /* MAC */
3231 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3232 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3233 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3234 #ifdef __BIG_ENDIAN
3235 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3236 #else
3237 DMAE_CMD_ENDIANITY_DW_SWAP |
3238 #endif
3239 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3240 (vn << DMAE_CMD_E1HVN_SHIFT));
3242 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3244 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3245 NIG_REG_INGRESS_BMAC0_MEM);
3247 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3248 BIGMAC_REGISTER_TX_STAT_GTBYT */
3249 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3250 dmae->opcode = opcode;
3251 dmae->src_addr_lo = (mac_addr +
3252 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3253 dmae->src_addr_hi = 0;
3254 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3255 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3256 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3257 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3258 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3259 dmae->comp_addr_hi = 0;
3260 dmae->comp_val = 1;
3262 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3263 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3264 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3265 dmae->opcode = opcode;
3266 dmae->src_addr_lo = (mac_addr +
3267 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3268 dmae->src_addr_hi = 0;
3269 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3270 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3271 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3272 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3273 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3274 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3275 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3276 dmae->comp_addr_hi = 0;
3277 dmae->comp_val = 1;
3279 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3281 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3283 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3284 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3285 dmae->opcode = opcode;
3286 dmae->src_addr_lo = (mac_addr +
3287 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3288 dmae->src_addr_hi = 0;
3289 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3290 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3291 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3292 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3293 dmae->comp_addr_hi = 0;
3294 dmae->comp_val = 1;
3296 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3297 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3298 dmae->opcode = opcode;
3299 dmae->src_addr_lo = (mac_addr +
3300 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3301 dmae->src_addr_hi = 0;
3302 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3303 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3304 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3305 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3306 dmae->len = 1;
3307 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3308 dmae->comp_addr_hi = 0;
3309 dmae->comp_val = 1;
3311 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3312 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3313 dmae->opcode = opcode;
3314 dmae->src_addr_lo = (mac_addr +
3315 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3316 dmae->src_addr_hi = 0;
3317 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3318 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3319 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3320 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3321 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3322 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3323 dmae->comp_addr_hi = 0;
3324 dmae->comp_val = 1;
3327 /* NIG */
3328 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3329 dmae->opcode = opcode;
3330 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3331 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3332 dmae->src_addr_hi = 0;
3333 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3334 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3335 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3336 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3337 dmae->comp_addr_hi = 0;
3338 dmae->comp_val = 1;
3340 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3341 dmae->opcode = opcode;
3342 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3343 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3344 dmae->src_addr_hi = 0;
3345 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3346 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3347 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3348 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3349 dmae->len = (2*sizeof(u32)) >> 2;
3350 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3351 dmae->comp_addr_hi = 0;
3352 dmae->comp_val = 1;
3354 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3355 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3356 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3357 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3358 #ifdef __BIG_ENDIAN
3359 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3360 #else
3361 DMAE_CMD_ENDIANITY_DW_SWAP |
3362 #endif
3363 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3364 (vn << DMAE_CMD_E1HVN_SHIFT));
3365 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3366 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3367 dmae->src_addr_hi = 0;
3368 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3369 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3370 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3371 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3372 dmae->len = (2*sizeof(u32)) >> 2;
3373 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3374 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3375 dmae->comp_val = DMAE_COMP_VAL;
3377 *stats_comp = 0;
3380 static void bnx2x_func_stats_init(struct bnx2x *bp)
3382 struct dmae_command *dmae = &bp->stats_dmae;
3383 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3385 /* sanity */
3386 if (!bp->func_stx) {
3387 BNX2X_ERR("BUG!\n");
3388 return;
3391 bp->executer_idx = 0;
3392 memset(dmae, 0, sizeof(struct dmae_command));
3394 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3395 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3396 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3397 #ifdef __BIG_ENDIAN
3398 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3399 #else
3400 DMAE_CMD_ENDIANITY_DW_SWAP |
3401 #endif
3402 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3403 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3404 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3405 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3406 dmae->dst_addr_lo = bp->func_stx >> 2;
3407 dmae->dst_addr_hi = 0;
3408 dmae->len = sizeof(struct host_func_stats) >> 2;
3409 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3410 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3411 dmae->comp_val = DMAE_COMP_VAL;
3413 *stats_comp = 0;
3416 static void bnx2x_stats_start(struct bnx2x *bp)
3418 if (bp->port.pmf)
3419 bnx2x_port_stats_init(bp);
3421 else if (bp->func_stx)
3422 bnx2x_func_stats_init(bp);
3424 bnx2x_hw_stats_post(bp);
3425 bnx2x_storm_stats_post(bp);
3428 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3430 bnx2x_stats_comp(bp);
3431 bnx2x_stats_pmf_update(bp);
3432 bnx2x_stats_start(bp);
3435 static void bnx2x_stats_restart(struct bnx2x *bp)
3437 bnx2x_stats_comp(bp);
3438 bnx2x_stats_start(bp);
3441 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3443 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3444 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3445 struct regpair diff;
3447 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3448 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3449 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3450 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3451 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3452 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3453 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3454 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3455 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3456 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3457 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3458 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3459 UPDATE_STAT64(tx_stat_gt127,
3460 tx_stat_etherstatspkts65octetsto127octets);
3461 UPDATE_STAT64(tx_stat_gt255,
3462 tx_stat_etherstatspkts128octetsto255octets);
3463 UPDATE_STAT64(tx_stat_gt511,
3464 tx_stat_etherstatspkts256octetsto511octets);
3465 UPDATE_STAT64(tx_stat_gt1023,
3466 tx_stat_etherstatspkts512octetsto1023octets);
3467 UPDATE_STAT64(tx_stat_gt1518,
3468 tx_stat_etherstatspkts1024octetsto1522octets);
3469 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3470 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3471 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3472 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3473 UPDATE_STAT64(tx_stat_gterr,
3474 tx_stat_dot3statsinternalmactransmiterrors);
3475 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3478 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3480 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3481 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3483 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3484 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3485 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3486 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3487 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3488 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3489 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3490 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3491 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3492 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3493 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3494 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3495 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3496 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3497 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3498 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3499 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3500 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3501 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3502 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3503 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3504 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3505 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3506 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3507 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3508 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3509 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3510 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3511 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3512 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3513 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3516 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3518 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3519 struct nig_stats *old = &(bp->port.old_nig_stats);
3520 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3521 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3522 struct regpair diff;
3524 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3525 bnx2x_bmac_stats_update(bp);
3527 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3528 bnx2x_emac_stats_update(bp);
3530 else { /* unreached */
3531 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3532 return -1;
3535 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3536 new->brb_discard - old->brb_discard);
3537 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3538 new->brb_truncate - old->brb_truncate);
3540 UPDATE_STAT64_NIG(egress_mac_pkt0,
3541 etherstatspkts1024octetsto1522octets);
3542 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3544 memcpy(old, new, sizeof(struct nig_stats));
3546 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3547 sizeof(struct mac_stx));
3548 estats->brb_drop_hi = pstats->brb_drop_hi;
3549 estats->brb_drop_lo = pstats->brb_drop_lo;
3551 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3553 return 0;
3556 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3558 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3559 int cl_id = BP_CL_ID(bp);
3560 struct tstorm_per_port_stats *tport =
3561 &stats->tstorm_common.port_statistics;
3562 struct tstorm_per_client_stats *tclient =
3563 &stats->tstorm_common.client_statistics[cl_id];
3564 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3565 struct xstorm_per_client_stats *xclient =
3566 &stats->xstorm_common.client_statistics[cl_id];
3567 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3568 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3569 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3570 u32 diff;
3572 /* are storm stats valid? */
3573 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3574 bp->stats_counter) {
3575 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3576 " tstorm counter (%d) != stats_counter (%d)\n",
3577 tclient->stats_counter, bp->stats_counter);
3578 return -1;
3580 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3581 bp->stats_counter) {
3582 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3583 " xstorm counter (%d) != stats_counter (%d)\n",
3584 xclient->stats_counter, bp->stats_counter);
3585 return -2;
3588 fstats->total_bytes_received_hi =
3589 fstats->valid_bytes_received_hi =
3590 le32_to_cpu(tclient->total_rcv_bytes.hi);
3591 fstats->total_bytes_received_lo =
3592 fstats->valid_bytes_received_lo =
3593 le32_to_cpu(tclient->total_rcv_bytes.lo);
3595 estats->error_bytes_received_hi =
3596 le32_to_cpu(tclient->rcv_error_bytes.hi);
3597 estats->error_bytes_received_lo =
3598 le32_to_cpu(tclient->rcv_error_bytes.lo);
3599 ADD_64(estats->error_bytes_received_hi,
3600 estats->rx_stat_ifhcinbadoctets_hi,
3601 estats->error_bytes_received_lo,
3602 estats->rx_stat_ifhcinbadoctets_lo);
3604 ADD_64(fstats->total_bytes_received_hi,
3605 estats->error_bytes_received_hi,
3606 fstats->total_bytes_received_lo,
3607 estats->error_bytes_received_lo);
3609 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3610 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3611 total_multicast_packets_received);
3612 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3613 total_broadcast_packets_received);
3615 fstats->total_bytes_transmitted_hi =
3616 le32_to_cpu(xclient->total_sent_bytes.hi);
3617 fstats->total_bytes_transmitted_lo =
3618 le32_to_cpu(xclient->total_sent_bytes.lo);
3620 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3621 total_unicast_packets_transmitted);
3622 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3623 total_multicast_packets_transmitted);
3624 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3625 total_broadcast_packets_transmitted);
3627 memcpy(estats, &(fstats->total_bytes_received_hi),
3628 sizeof(struct host_func_stats) - 2*sizeof(u32));
3630 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3631 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3632 estats->brb_truncate_discard =
3633 le32_to_cpu(tport->brb_truncate_discard);
3634 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3636 old_tclient->rcv_unicast_bytes.hi =
3637 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3638 old_tclient->rcv_unicast_bytes.lo =
3639 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3640 old_tclient->rcv_broadcast_bytes.hi =
3641 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3642 old_tclient->rcv_broadcast_bytes.lo =
3643 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3644 old_tclient->rcv_multicast_bytes.hi =
3645 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3646 old_tclient->rcv_multicast_bytes.lo =
3647 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3648 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3650 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3651 old_tclient->packets_too_big_discard =
3652 le32_to_cpu(tclient->packets_too_big_discard);
3653 estats->no_buff_discard =
3654 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3655 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3657 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3658 old_xclient->unicast_bytes_sent.hi =
3659 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3660 old_xclient->unicast_bytes_sent.lo =
3661 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3662 old_xclient->multicast_bytes_sent.hi =
3663 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3664 old_xclient->multicast_bytes_sent.lo =
3665 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3666 old_xclient->broadcast_bytes_sent.hi =
3667 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3668 old_xclient->broadcast_bytes_sent.lo =
3669 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3671 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3673 return 0;
3676 static void bnx2x_net_stats_update(struct bnx2x *bp)
3678 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3679 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3680 struct net_device_stats *nstats = &bp->dev->stats;
3682 nstats->rx_packets =
3683 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3684 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3685 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3687 nstats->tx_packets =
3688 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3689 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3690 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3692 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3694 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3696 nstats->rx_dropped = old_tclient->checksum_discard +
3697 estats->mac_discard;
3698 nstats->tx_dropped = 0;
3700 nstats->multicast =
3701 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3703 nstats->collisions =
3704 estats->tx_stat_dot3statssinglecollisionframes_lo +
3705 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3706 estats->tx_stat_dot3statslatecollisions_lo +
3707 estats->tx_stat_dot3statsexcessivecollisions_lo;
3709 estats->jabber_packets_received =
3710 old_tclient->packets_too_big_discard +
3711 estats->rx_stat_dot3statsframestoolong_lo;
3713 nstats->rx_length_errors =
3714 estats->rx_stat_etherstatsundersizepkts_lo +
3715 estats->jabber_packets_received;
3716 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3717 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3718 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3719 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3720 nstats->rx_missed_errors = estats->xxoverflow_discard;
3722 nstats->rx_errors = nstats->rx_length_errors +
3723 nstats->rx_over_errors +
3724 nstats->rx_crc_errors +
3725 nstats->rx_frame_errors +
3726 nstats->rx_fifo_errors +
3727 nstats->rx_missed_errors;
3729 nstats->tx_aborted_errors =
3730 estats->tx_stat_dot3statslatecollisions_lo +
3731 estats->tx_stat_dot3statsexcessivecollisions_lo;
3732 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3733 nstats->tx_fifo_errors = 0;
3734 nstats->tx_heartbeat_errors = 0;
3735 nstats->tx_window_errors = 0;
3737 nstats->tx_errors = nstats->tx_aborted_errors +
3738 nstats->tx_carrier_errors;
3741 static void bnx2x_stats_update(struct bnx2x *bp)
3743 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3744 int update = 0;
3746 if (*stats_comp != DMAE_COMP_VAL)
3747 return;
3749 if (bp->port.pmf)
3750 update = (bnx2x_hw_stats_update(bp) == 0);
3752 update |= (bnx2x_storm_stats_update(bp) == 0);
3754 if (update)
3755 bnx2x_net_stats_update(bp);
3757 else {
3758 if (bp->stats_pending) {
3759 bp->stats_pending++;
3760 if (bp->stats_pending == 3) {
3761 BNX2X_ERR("stats not updated for 3 times\n");
3762 bnx2x_panic();
3763 return;
3768 if (bp->msglevel & NETIF_MSG_TIMER) {
3769 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3770 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3771 struct net_device_stats *nstats = &bp->dev->stats;
3772 int i;
3774 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3775 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3776 " tx pkt (%lx)\n",
3777 bnx2x_tx_avail(bp->fp),
3778 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3779 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3780 " rx pkt (%lx)\n",
3781 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3782 bp->fp->rx_comp_cons),
3783 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3784 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3785 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3786 estats->driver_xoff, estats->brb_drop_lo);
3787 printk(KERN_DEBUG "tstats: checksum_discard %u "
3788 "packets_too_big_discard %u no_buff_discard %u "
3789 "mac_discard %u mac_filter_discard %u "
3790 "xxovrflow_discard %u brb_truncate_discard %u "
3791 "ttl0_discard %u\n",
3792 old_tclient->checksum_discard,
3793 old_tclient->packets_too_big_discard,
3794 old_tclient->no_buff_discard, estats->mac_discard,
3795 estats->mac_filter_discard, estats->xxoverflow_discard,
3796 estats->brb_truncate_discard,
3797 old_tclient->ttl0_discard);
3799 for_each_queue(bp, i) {
3800 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3801 bnx2x_fp(bp, i, tx_pkt),
3802 bnx2x_fp(bp, i, rx_pkt),
3803 bnx2x_fp(bp, i, rx_calls));
3807 bnx2x_hw_stats_post(bp);
3808 bnx2x_storm_stats_post(bp);
3811 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3813 struct dmae_command *dmae;
3814 u32 opcode;
3815 int loader_idx = PMF_DMAE_C(bp);
3816 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3818 bp->executer_idx = 0;
3820 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3821 DMAE_CMD_C_ENABLE |
3822 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3823 #ifdef __BIG_ENDIAN
3824 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3825 #else
3826 DMAE_CMD_ENDIANITY_DW_SWAP |
3827 #endif
3828 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3829 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3831 if (bp->port.port_stx) {
3833 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3834 if (bp->func_stx)
3835 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3836 else
3837 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3838 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3839 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3840 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3841 dmae->dst_addr_hi = 0;
3842 dmae->len = sizeof(struct host_port_stats) >> 2;
3843 if (bp->func_stx) {
3844 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3845 dmae->comp_addr_hi = 0;
3846 dmae->comp_val = 1;
3847 } else {
3848 dmae->comp_addr_lo =
3849 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3850 dmae->comp_addr_hi =
3851 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3852 dmae->comp_val = DMAE_COMP_VAL;
3854 *stats_comp = 0;
3858 if (bp->func_stx) {
3860 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3861 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3862 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3863 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3864 dmae->dst_addr_lo = bp->func_stx >> 2;
3865 dmae->dst_addr_hi = 0;
3866 dmae->len = sizeof(struct host_func_stats) >> 2;
3867 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3868 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3869 dmae->comp_val = DMAE_COMP_VAL;
3871 *stats_comp = 0;
3875 static void bnx2x_stats_stop(struct bnx2x *bp)
3877 int update = 0;
3879 bnx2x_stats_comp(bp);
3881 if (bp->port.pmf)
3882 update = (bnx2x_hw_stats_update(bp) == 0);
3884 update |= (bnx2x_storm_stats_update(bp) == 0);
3886 if (update) {
3887 bnx2x_net_stats_update(bp);
3889 if (bp->port.pmf)
3890 bnx2x_port_stats_stop(bp);
3892 bnx2x_hw_stats_post(bp);
3893 bnx2x_stats_comp(bp);
3897 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3901 static const struct {
3902 void (*action)(struct bnx2x *bp);
3903 enum bnx2x_stats_state next_state;
3904 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3905 /* state event */
3907 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3908 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3909 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3910 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3913 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3914 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3915 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3916 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3920 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3922 enum bnx2x_stats_state state = bp->stats_state;
3924 bnx2x_stats_stm[state][event].action(bp);
3925 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3927 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3928 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3929 state, event, bp->stats_state);
3932 static void bnx2x_timer(unsigned long data)
3934 struct bnx2x *bp = (struct bnx2x *) data;
3936 if (!netif_running(bp->dev))
3937 return;
3939 if (atomic_read(&bp->intr_sem) != 0)
3940 goto timer_restart;
3942 if (poll) {
3943 struct bnx2x_fastpath *fp = &bp->fp[0];
3944 int rc;
3946 bnx2x_tx_int(fp, 1000);
3947 rc = bnx2x_rx_int(fp, 1000);
3950 if (!BP_NOMCP(bp)) {
3951 int func = BP_FUNC(bp);
3952 u32 drv_pulse;
3953 u32 mcp_pulse;
3955 ++bp->fw_drv_pulse_wr_seq;
3956 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3957 /* TBD - add SYSTEM_TIME */
3958 drv_pulse = bp->fw_drv_pulse_wr_seq;
3959 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3961 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3962 MCP_PULSE_SEQ_MASK);
3963 /* The delta between driver pulse and mcp response
3964 * should be 1 (before mcp response) or 0 (after mcp response)
3966 if ((drv_pulse != mcp_pulse) &&
3967 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3968 /* someone lost a heartbeat... */
3969 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3970 drv_pulse, mcp_pulse);
3974 if ((bp->state == BNX2X_STATE_OPEN) ||
3975 (bp->state == BNX2X_STATE_DISABLED))
3976 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3978 timer_restart:
3979 mod_timer(&bp->timer, jiffies + bp->current_interval);
3982 /* end of Statistics */
3984 /* nic init */
3987 * nic init service functions
3990 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3992 int port = BP_PORT(bp);
3994 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3995 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3996 sizeof(struct ustorm_status_block)/4);
3997 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3998 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3999 sizeof(struct cstorm_status_block)/4);
4002 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4003 dma_addr_t mapping, int sb_id)
4005 int port = BP_PORT(bp);
4006 int func = BP_FUNC(bp);
4007 int index;
4008 u64 section;
4010 /* USTORM */
4011 section = ((u64)mapping) + offsetof(struct host_status_block,
4012 u_status_block);
4013 sb->u_status_block.status_block_id = sb_id;
4015 REG_WR(bp, BAR_USTRORM_INTMEM +
4016 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4017 REG_WR(bp, BAR_USTRORM_INTMEM +
4018 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4019 U64_HI(section));
4020 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4021 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4023 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4024 REG_WR16(bp, BAR_USTRORM_INTMEM +
4025 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4027 /* CSTORM */
4028 section = ((u64)mapping) + offsetof(struct host_status_block,
4029 c_status_block);
4030 sb->c_status_block.status_block_id = sb_id;
4032 REG_WR(bp, BAR_CSTRORM_INTMEM +
4033 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4034 REG_WR(bp, BAR_CSTRORM_INTMEM +
4035 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4036 U64_HI(section));
4037 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4038 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4040 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4041 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4042 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4044 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4047 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4049 int func = BP_FUNC(bp);
4051 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4052 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4053 sizeof(struct ustorm_def_status_block)/4);
4054 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4055 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4056 sizeof(struct cstorm_def_status_block)/4);
4057 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4058 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4059 sizeof(struct xstorm_def_status_block)/4);
4060 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4061 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4062 sizeof(struct tstorm_def_status_block)/4);
4065 static void bnx2x_init_def_sb(struct bnx2x *bp,
4066 struct host_def_status_block *def_sb,
4067 dma_addr_t mapping, int sb_id)
4069 int port = BP_PORT(bp);
4070 int func = BP_FUNC(bp);
4071 int index, val, reg_offset;
4072 u64 section;
4074 /* ATTN */
4075 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4076 atten_status_block);
4077 def_sb->atten_status_block.status_block_id = sb_id;
4079 bp->attn_state = 0;
4081 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4082 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4084 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4085 bp->attn_group[index].sig[0] = REG_RD(bp,
4086 reg_offset + 0x10*index);
4087 bp->attn_group[index].sig[1] = REG_RD(bp,
4088 reg_offset + 0x4 + 0x10*index);
4089 bp->attn_group[index].sig[2] = REG_RD(bp,
4090 reg_offset + 0x8 + 0x10*index);
4091 bp->attn_group[index].sig[3] = REG_RD(bp,
4092 reg_offset + 0xc + 0x10*index);
4095 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4096 HC_REG_ATTN_MSG0_ADDR_L);
4098 REG_WR(bp, reg_offset, U64_LO(section));
4099 REG_WR(bp, reg_offset + 4, U64_HI(section));
4101 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4103 val = REG_RD(bp, reg_offset);
4104 val |= sb_id;
4105 REG_WR(bp, reg_offset, val);
4107 /* USTORM */
4108 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4109 u_def_status_block);
4110 def_sb->u_def_status_block.status_block_id = sb_id;
4112 REG_WR(bp, BAR_USTRORM_INTMEM +
4113 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4114 REG_WR(bp, BAR_USTRORM_INTMEM +
4115 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4116 U64_HI(section));
4117 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4118 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4120 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4121 REG_WR16(bp, BAR_USTRORM_INTMEM +
4122 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4124 /* CSTORM */
4125 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4126 c_def_status_block);
4127 def_sb->c_def_status_block.status_block_id = sb_id;
4129 REG_WR(bp, BAR_CSTRORM_INTMEM +
4130 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4131 REG_WR(bp, BAR_CSTRORM_INTMEM +
4132 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4133 U64_HI(section));
4134 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4135 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4137 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4138 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4139 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4141 /* TSTORM */
4142 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4143 t_def_status_block);
4144 def_sb->t_def_status_block.status_block_id = sb_id;
4146 REG_WR(bp, BAR_TSTRORM_INTMEM +
4147 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4148 REG_WR(bp, BAR_TSTRORM_INTMEM +
4149 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4150 U64_HI(section));
4151 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4152 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4154 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4155 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4156 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4158 /* XSTORM */
4159 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4160 x_def_status_block);
4161 def_sb->x_def_status_block.status_block_id = sb_id;
4163 REG_WR(bp, BAR_XSTRORM_INTMEM +
4164 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4165 REG_WR(bp, BAR_XSTRORM_INTMEM +
4166 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4167 U64_HI(section));
4168 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4169 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4171 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4172 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4173 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4175 bp->stats_pending = 0;
4176 bp->set_mac_pending = 0;
4178 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4181 static void bnx2x_update_coalesce(struct bnx2x *bp)
4183 int port = BP_PORT(bp);
4184 int i;
4186 for_each_queue(bp, i) {
4187 int sb_id = bp->fp[i].sb_id;
4189 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4190 REG_WR8(bp, BAR_USTRORM_INTMEM +
4191 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4192 U_SB_ETH_RX_CQ_INDEX),
4193 bp->rx_ticks/12);
4194 REG_WR16(bp, BAR_USTRORM_INTMEM +
4195 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4196 U_SB_ETH_RX_CQ_INDEX),
4197 bp->rx_ticks ? 0 : 1);
4198 REG_WR16(bp, BAR_USTRORM_INTMEM +
4199 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4200 U_SB_ETH_RX_BD_INDEX),
4201 bp->rx_ticks ? 0 : 1);
4203 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4204 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4205 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4206 C_SB_ETH_TX_CQ_INDEX),
4207 bp->tx_ticks/12);
4208 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4209 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4210 C_SB_ETH_TX_CQ_INDEX),
4211 bp->tx_ticks ? 0 : 1);
4215 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4216 struct bnx2x_fastpath *fp, int last)
4218 int i;
4220 for (i = 0; i < last; i++) {
4221 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4222 struct sk_buff *skb = rx_buf->skb;
4224 if (skb == NULL) {
4225 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4226 continue;
4229 if (fp->tpa_state[i] == BNX2X_TPA_START)
4230 pci_unmap_single(bp->pdev,
4231 pci_unmap_addr(rx_buf, mapping),
4232 bp->rx_buf_size,
4233 PCI_DMA_FROMDEVICE);
4235 dev_kfree_skb(skb);
4236 rx_buf->skb = NULL;
4240 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4242 int func = BP_FUNC(bp);
4243 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4244 ETH_MAX_AGGREGATION_QUEUES_E1H;
4245 u16 ring_prod, cqe_ring_prod;
4246 int i, j;
4248 bp->rx_buf_size = bp->dev->mtu;
4249 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4250 BCM_RX_ETH_PAYLOAD_ALIGN;
4252 if (bp->flags & TPA_ENABLE_FLAG) {
4253 DP(NETIF_MSG_IFUP,
4254 "rx_buf_size %d effective_mtu %d\n",
4255 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4257 for_each_queue(bp, j) {
4258 struct bnx2x_fastpath *fp = &bp->fp[j];
4260 for (i = 0; i < max_agg_queues; i++) {
4261 fp->tpa_pool[i].skb =
4262 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4263 if (!fp->tpa_pool[i].skb) {
4264 BNX2X_ERR("Failed to allocate TPA "
4265 "skb pool for queue[%d] - "
4266 "disabling TPA on this "
4267 "queue!\n", j);
4268 bnx2x_free_tpa_pool(bp, fp, i);
4269 fp->disable_tpa = 1;
4270 break;
4272 pci_unmap_addr_set((struct sw_rx_bd *)
4273 &bp->fp->tpa_pool[i],
4274 mapping, 0);
4275 fp->tpa_state[i] = BNX2X_TPA_STOP;
4280 for_each_queue(bp, j) {
4281 struct bnx2x_fastpath *fp = &bp->fp[j];
4283 fp->rx_bd_cons = 0;
4284 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4285 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4287 /* "next page" elements initialization */
4288 /* SGE ring */
4289 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4290 struct eth_rx_sge *sge;
4292 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4293 sge->addr_hi =
4294 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4295 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4296 sge->addr_lo =
4297 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4298 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4301 bnx2x_init_sge_ring_bit_mask(fp);
4303 /* RX BD ring */
4304 for (i = 1; i <= NUM_RX_RINGS; i++) {
4305 struct eth_rx_bd *rx_bd;
4307 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4308 rx_bd->addr_hi =
4309 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4310 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4311 rx_bd->addr_lo =
4312 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4313 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4316 /* CQ ring */
4317 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4318 struct eth_rx_cqe_next_page *nextpg;
4320 nextpg = (struct eth_rx_cqe_next_page *)
4321 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4322 nextpg->addr_hi =
4323 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4324 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4325 nextpg->addr_lo =
4326 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4327 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4330 /* Allocate SGEs and initialize the ring elements */
4331 for (i = 0, ring_prod = 0;
4332 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4334 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4335 BNX2X_ERR("was only able to allocate "
4336 "%d rx sges\n", i);
4337 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4338 /* Cleanup already allocated elements */
4339 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4340 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4341 fp->disable_tpa = 1;
4342 ring_prod = 0;
4343 break;
4345 ring_prod = NEXT_SGE_IDX(ring_prod);
4347 fp->rx_sge_prod = ring_prod;
4349 /* Allocate BDs and initialize BD ring */
4350 fp->rx_comp_cons = 0;
4351 cqe_ring_prod = ring_prod = 0;
4352 for (i = 0; i < bp->rx_ring_size; i++) {
4353 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4354 BNX2X_ERR("was only able to allocate "
4355 "%d rx skbs\n", i);
4356 bp->eth_stats.rx_skb_alloc_failed++;
4357 break;
4359 ring_prod = NEXT_RX_IDX(ring_prod);
4360 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4361 WARN_ON(ring_prod <= i);
4364 fp->rx_bd_prod = ring_prod;
4365 /* must not have more available CQEs than BDs */
4366 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4367 cqe_ring_prod);
4368 fp->rx_pkt = fp->rx_calls = 0;
4370 /* Warning!
4371 * this will generate an interrupt (to the TSTORM)
4372 * must only be done after chip is initialized
4374 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4375 fp->rx_sge_prod);
4376 if (j != 0)
4377 continue;
4379 REG_WR(bp, BAR_USTRORM_INTMEM +
4380 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4381 U64_LO(fp->rx_comp_mapping));
4382 REG_WR(bp, BAR_USTRORM_INTMEM +
4383 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4384 U64_HI(fp->rx_comp_mapping));
4388 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4390 int i, j;
4392 for_each_queue(bp, j) {
4393 struct bnx2x_fastpath *fp = &bp->fp[j];
4395 for (i = 1; i <= NUM_TX_RINGS; i++) {
4396 struct eth_tx_bd *tx_bd =
4397 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4399 tx_bd->addr_hi =
4400 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4401 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4402 tx_bd->addr_lo =
4403 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4404 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4407 fp->tx_pkt_prod = 0;
4408 fp->tx_pkt_cons = 0;
4409 fp->tx_bd_prod = 0;
4410 fp->tx_bd_cons = 0;
4411 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4412 fp->tx_pkt = 0;
4416 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4418 int func = BP_FUNC(bp);
4420 spin_lock_init(&bp->spq_lock);
4422 bp->spq_left = MAX_SPQ_PENDING;
4423 bp->spq_prod_idx = 0;
4424 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4425 bp->spq_prod_bd = bp->spq;
4426 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4428 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4429 U64_LO(bp->spq_mapping));
4430 REG_WR(bp,
4431 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4432 U64_HI(bp->spq_mapping));
4434 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4435 bp->spq_prod_idx);
4438 static void bnx2x_init_context(struct bnx2x *bp)
4440 int i;
4442 for_each_queue(bp, i) {
4443 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4444 struct bnx2x_fastpath *fp = &bp->fp[i];
4445 u8 sb_id = FP_SB_ID(fp);
4447 context->xstorm_st_context.tx_bd_page_base_hi =
4448 U64_HI(fp->tx_desc_mapping);
4449 context->xstorm_st_context.tx_bd_page_base_lo =
4450 U64_LO(fp->tx_desc_mapping);
4451 context->xstorm_st_context.db_data_addr_hi =
4452 U64_HI(fp->tx_prods_mapping);
4453 context->xstorm_st_context.db_data_addr_lo =
4454 U64_LO(fp->tx_prods_mapping);
4455 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4456 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4458 context->ustorm_st_context.common.sb_index_numbers =
4459 BNX2X_RX_SB_INDEX_NUM;
4460 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4461 context->ustorm_st_context.common.status_block_id = sb_id;
4462 context->ustorm_st_context.common.flags =
4463 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4464 context->ustorm_st_context.common.mc_alignment_size =
4465 BCM_RX_ETH_PAYLOAD_ALIGN;
4466 context->ustorm_st_context.common.bd_buff_size =
4467 bp->rx_buf_size;
4468 context->ustorm_st_context.common.bd_page_base_hi =
4469 U64_HI(fp->rx_desc_mapping);
4470 context->ustorm_st_context.common.bd_page_base_lo =
4471 U64_LO(fp->rx_desc_mapping);
4472 if (!fp->disable_tpa) {
4473 context->ustorm_st_context.common.flags |=
4474 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4475 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4476 context->ustorm_st_context.common.sge_buff_size =
4477 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4478 context->ustorm_st_context.common.sge_page_base_hi =
4479 U64_HI(fp->rx_sge_mapping);
4480 context->ustorm_st_context.common.sge_page_base_lo =
4481 U64_LO(fp->rx_sge_mapping);
4484 context->cstorm_st_context.sb_index_number =
4485 C_SB_ETH_TX_CQ_INDEX;
4486 context->cstorm_st_context.status_block_id = sb_id;
4488 context->xstorm_ag_context.cdu_reserved =
4489 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4490 CDU_REGION_NUMBER_XCM_AG,
4491 ETH_CONNECTION_TYPE);
4492 context->ustorm_ag_context.cdu_usage =
4493 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4494 CDU_REGION_NUMBER_UCM_AG,
4495 ETH_CONNECTION_TYPE);
4499 static void bnx2x_init_ind_table(struct bnx2x *bp)
4501 int port = BP_PORT(bp);
4502 int i;
4504 if (!is_multi(bp))
4505 return;
4507 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4508 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4509 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4510 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4511 i % bp->num_queues);
4513 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4516 static void bnx2x_set_client_config(struct bnx2x *bp)
4518 struct tstorm_eth_client_config tstorm_client = {0};
4519 int port = BP_PORT(bp);
4520 int i;
4522 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4523 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4524 tstorm_client.config_flags =
4525 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4526 #ifdef BCM_VLAN
4527 if (bp->rx_mode && bp->vlgrp) {
4528 tstorm_client.config_flags |=
4529 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4530 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4532 #endif
4534 if (bp->flags & TPA_ENABLE_FLAG) {
4535 tstorm_client.max_sges_for_packet =
4536 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4537 tstorm_client.max_sges_for_packet =
4538 ((tstorm_client.max_sges_for_packet +
4539 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4540 PAGES_PER_SGE_SHIFT;
4542 tstorm_client.config_flags |=
4543 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4546 for_each_queue(bp, i) {
4547 REG_WR(bp, BAR_TSTRORM_INTMEM +
4548 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4549 ((u32 *)&tstorm_client)[0]);
4550 REG_WR(bp, BAR_TSTRORM_INTMEM +
4551 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4552 ((u32 *)&tstorm_client)[1]);
4555 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4556 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4559 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4561 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4562 int mode = bp->rx_mode;
4563 int mask = (1 << BP_L_ID(bp));
4564 int func = BP_FUNC(bp);
4565 int i;
4567 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4569 switch (mode) {
4570 case BNX2X_RX_MODE_NONE: /* no Rx */
4571 tstorm_mac_filter.ucast_drop_all = mask;
4572 tstorm_mac_filter.mcast_drop_all = mask;
4573 tstorm_mac_filter.bcast_drop_all = mask;
4574 break;
4575 case BNX2X_RX_MODE_NORMAL:
4576 tstorm_mac_filter.bcast_accept_all = mask;
4577 break;
4578 case BNX2X_RX_MODE_ALLMULTI:
4579 tstorm_mac_filter.mcast_accept_all = mask;
4580 tstorm_mac_filter.bcast_accept_all = mask;
4581 break;
4582 case BNX2X_RX_MODE_PROMISC:
4583 tstorm_mac_filter.ucast_accept_all = mask;
4584 tstorm_mac_filter.mcast_accept_all = mask;
4585 tstorm_mac_filter.bcast_accept_all = mask;
4586 break;
4587 default:
4588 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4589 break;
4592 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4593 REG_WR(bp, BAR_TSTRORM_INTMEM +
4594 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4595 ((u32 *)&tstorm_mac_filter)[i]);
4597 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4598 ((u32 *)&tstorm_mac_filter)[i]); */
4601 if (mode != BNX2X_RX_MODE_NONE)
4602 bnx2x_set_client_config(bp);
4605 static void bnx2x_init_internal_common(struct bnx2x *bp)
4607 int i;
4609 if (bp->flags & TPA_ENABLE_FLAG) {
4610 struct tstorm_eth_tpa_exist tpa = {0};
4612 tpa.tpa_exist = 1;
4614 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4615 ((u32 *)&tpa)[0]);
4616 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4617 ((u32 *)&tpa)[1]);
4620 /* Zero this manually as its initialization is
4621 currently missing in the initTool */
4622 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4623 REG_WR(bp, BAR_USTRORM_INTMEM +
4624 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4627 static void bnx2x_init_internal_port(struct bnx2x *bp)
4629 int port = BP_PORT(bp);
4631 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4632 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4633 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4634 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4637 static void bnx2x_init_internal_func(struct bnx2x *bp)
4639 struct tstorm_eth_function_common_config tstorm_config = {0};
4640 struct stats_indication_flags stats_flags = {0};
4641 int port = BP_PORT(bp);
4642 int func = BP_FUNC(bp);
4643 int i;
4644 u16 max_agg_size;
4646 if (is_multi(bp)) {
4647 tstorm_config.config_flags = MULTI_FLAGS;
4648 tstorm_config.rss_result_mask = MULTI_MASK;
4651 tstorm_config.leading_client_id = BP_L_ID(bp);
4653 REG_WR(bp, BAR_TSTRORM_INTMEM +
4654 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4655 (*(u32 *)&tstorm_config));
4657 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4658 bnx2x_set_storm_rx_mode(bp);
4660 /* reset xstorm per client statistics */
4661 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4662 REG_WR(bp, BAR_XSTRORM_INTMEM +
4663 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4664 i*4, 0);
4666 /* reset tstorm per client statistics */
4667 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4668 REG_WR(bp, BAR_TSTRORM_INTMEM +
4669 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4670 i*4, 0);
4673 /* Init statistics related context */
4674 stats_flags.collect_eth = 1;
4676 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4677 ((u32 *)&stats_flags)[0]);
4678 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4679 ((u32 *)&stats_flags)[1]);
4681 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4682 ((u32 *)&stats_flags)[0]);
4683 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4684 ((u32 *)&stats_flags)[1]);
4686 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4687 ((u32 *)&stats_flags)[0]);
4688 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4689 ((u32 *)&stats_flags)[1]);
4691 REG_WR(bp, BAR_XSTRORM_INTMEM +
4692 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4693 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4694 REG_WR(bp, BAR_XSTRORM_INTMEM +
4695 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4696 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4698 REG_WR(bp, BAR_TSTRORM_INTMEM +
4699 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4700 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4701 REG_WR(bp, BAR_TSTRORM_INTMEM +
4702 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4703 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4705 if (CHIP_IS_E1H(bp)) {
4706 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4707 IS_E1HMF(bp));
4708 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4709 IS_E1HMF(bp));
4710 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4711 IS_E1HMF(bp));
4712 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4713 IS_E1HMF(bp));
4715 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4716 bp->e1hov);
4719 /* Init CQ ring mapping and aggregation size */
4720 max_agg_size = min((u32)(bp->rx_buf_size +
4721 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4722 (u32)0xffff);
4723 for_each_queue(bp, i) {
4724 struct bnx2x_fastpath *fp = &bp->fp[i];
4726 REG_WR(bp, BAR_USTRORM_INTMEM +
4727 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4728 U64_LO(fp->rx_comp_mapping));
4729 REG_WR(bp, BAR_USTRORM_INTMEM +
4730 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4731 U64_HI(fp->rx_comp_mapping));
4733 REG_WR16(bp, BAR_USTRORM_INTMEM +
4734 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4735 max_agg_size);
4739 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4741 switch (load_code) {
4742 case FW_MSG_CODE_DRV_LOAD_COMMON:
4743 bnx2x_init_internal_common(bp);
4744 /* no break */
4746 case FW_MSG_CODE_DRV_LOAD_PORT:
4747 bnx2x_init_internal_port(bp);
4748 /* no break */
4750 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4751 bnx2x_init_internal_func(bp);
4752 break;
4754 default:
4755 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4756 break;
4760 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4762 int i;
4764 for_each_queue(bp, i) {
4765 struct bnx2x_fastpath *fp = &bp->fp[i];
4767 fp->bp = bp;
4768 fp->state = BNX2X_FP_STATE_CLOSED;
4769 fp->index = i;
4770 fp->cl_id = BP_L_ID(bp) + i;
4771 fp->sb_id = fp->cl_id;
4772 DP(NETIF_MSG_IFUP,
4773 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4774 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4775 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4776 FP_SB_ID(fp));
4777 bnx2x_update_fpsb_idx(fp);
4780 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4781 DEF_SB_ID);
4782 bnx2x_update_dsb_idx(bp);
4783 bnx2x_update_coalesce(bp);
4784 bnx2x_init_rx_rings(bp);
4785 bnx2x_init_tx_ring(bp);
4786 bnx2x_init_sp_ring(bp);
4787 bnx2x_init_context(bp);
4788 bnx2x_init_internal(bp, load_code);
4789 bnx2x_init_ind_table(bp);
4790 bnx2x_int_enable(bp);
4793 /* end of nic init */
4796 * gzip service functions
4799 static int bnx2x_gunzip_init(struct bnx2x *bp)
4801 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4802 &bp->gunzip_mapping);
4803 if (bp->gunzip_buf == NULL)
4804 goto gunzip_nomem1;
4806 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4807 if (bp->strm == NULL)
4808 goto gunzip_nomem2;
4810 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4811 GFP_KERNEL);
4812 if (bp->strm->workspace == NULL)
4813 goto gunzip_nomem3;
4815 return 0;
4817 gunzip_nomem3:
4818 kfree(bp->strm);
4819 bp->strm = NULL;
4821 gunzip_nomem2:
4822 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4823 bp->gunzip_mapping);
4824 bp->gunzip_buf = NULL;
4826 gunzip_nomem1:
4827 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4828 " un-compression\n", bp->dev->name);
4829 return -ENOMEM;
4832 static void bnx2x_gunzip_end(struct bnx2x *bp)
4834 kfree(bp->strm->workspace);
4836 kfree(bp->strm);
4837 bp->strm = NULL;
4839 if (bp->gunzip_buf) {
4840 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4841 bp->gunzip_mapping);
4842 bp->gunzip_buf = NULL;
4846 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4848 int n, rc;
4850 /* check gzip header */
4851 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4852 return -EINVAL;
4854 n = 10;
4856 #define FNAME 0x8
4858 if (zbuf[3] & FNAME)
4859 while ((zbuf[n++] != 0) && (n < len));
4861 bp->strm->next_in = zbuf + n;
4862 bp->strm->avail_in = len - n;
4863 bp->strm->next_out = bp->gunzip_buf;
4864 bp->strm->avail_out = FW_BUF_SIZE;
4866 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4867 if (rc != Z_OK)
4868 return rc;
4870 rc = zlib_inflate(bp->strm, Z_FINISH);
4871 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4872 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4873 bp->dev->name, bp->strm->msg);
4875 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4876 if (bp->gunzip_outlen & 0x3)
4877 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4878 " gunzip_outlen (%d) not aligned\n",
4879 bp->dev->name, bp->gunzip_outlen);
4880 bp->gunzip_outlen >>= 2;
4882 zlib_inflateEnd(bp->strm);
4884 if (rc == Z_STREAM_END)
4885 return 0;
4887 return rc;
4890 /* nic load/unload */
4893 * General service functions
4896 /* send a NIG loopback debug packet */
4897 static void bnx2x_lb_pckt(struct bnx2x *bp)
4899 u32 wb_write[3];
4901 /* Ethernet source and destination addresses */
4902 wb_write[0] = 0x55555555;
4903 wb_write[1] = 0x55555555;
4904 wb_write[2] = 0x20; /* SOP */
4905 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4907 /* NON-IP protocol */
4908 wb_write[0] = 0x09000000;
4909 wb_write[1] = 0x55555555;
4910 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4911 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4914 /* some of the internal memories
4915 * are not directly readable from the driver
4916 * to test them we send debug packets
4918 static int bnx2x_int_mem_test(struct bnx2x *bp)
4920 int factor;
4921 int count, i;
4922 u32 val = 0;
4924 if (CHIP_REV_IS_FPGA(bp))
4925 factor = 120;
4926 else if (CHIP_REV_IS_EMUL(bp))
4927 factor = 200;
4928 else
4929 factor = 1;
4931 DP(NETIF_MSG_HW, "start part1\n");
4933 /* Disable inputs of parser neighbor blocks */
4934 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4935 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4936 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4937 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4939 /* Write 0 to parser credits for CFC search request */
4940 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4942 /* send Ethernet packet */
4943 bnx2x_lb_pckt(bp);
4945 /* TODO do i reset NIG statistic? */
4946 /* Wait until NIG register shows 1 packet of size 0x10 */
4947 count = 1000 * factor;
4948 while (count) {
4950 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4951 val = *bnx2x_sp(bp, wb_data[0]);
4952 if (val == 0x10)
4953 break;
4955 msleep(10);
4956 count--;
4958 if (val != 0x10) {
4959 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4960 return -1;
4963 /* Wait until PRS register shows 1 packet */
4964 count = 1000 * factor;
4965 while (count) {
4966 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4967 if (val == 1)
4968 break;
4970 msleep(10);
4971 count--;
4973 if (val != 0x1) {
4974 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4975 return -2;
4978 /* Reset and init BRB, PRS */
4979 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4980 msleep(50);
4981 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4982 msleep(50);
4983 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4984 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4986 DP(NETIF_MSG_HW, "part2\n");
4988 /* Disable inputs of parser neighbor blocks */
4989 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4990 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4991 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4992 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4994 /* Write 0 to parser credits for CFC search request */
4995 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4997 /* send 10 Ethernet packets */
4998 for (i = 0; i < 10; i++)
4999 bnx2x_lb_pckt(bp);
5001 /* Wait until NIG register shows 10 + 1
5002 packets of size 11*0x10 = 0xb0 */
5003 count = 1000 * factor;
5004 while (count) {
5006 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5007 val = *bnx2x_sp(bp, wb_data[0]);
5008 if (val == 0xb0)
5009 break;
5011 msleep(10);
5012 count--;
5014 if (val != 0xb0) {
5015 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5016 return -3;
5019 /* Wait until PRS register shows 2 packets */
5020 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5021 if (val != 2)
5022 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5024 /* Write 1 to parser credits for CFC search request */
5025 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5027 /* Wait until PRS register shows 3 packets */
5028 msleep(10 * factor);
5029 /* Wait until NIG register shows 1 packet of size 0x10 */
5030 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5031 if (val != 3)
5032 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5034 /* clear NIG EOP FIFO */
5035 for (i = 0; i < 11; i++)
5036 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5037 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5038 if (val != 1) {
5039 BNX2X_ERR("clear of NIG failed\n");
5040 return -4;
5043 /* Reset and init BRB, PRS, NIG */
5044 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5045 msleep(50);
5046 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5047 msleep(50);
5048 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5049 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5050 #ifndef BCM_ISCSI
5051 /* set NIC mode */
5052 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5053 #endif
5055 /* Enable inputs of parser neighbor blocks */
5056 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5057 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5058 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5059 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5061 DP(NETIF_MSG_HW, "done\n");
5063 return 0; /* OK */
5066 static void enable_blocks_attention(struct bnx2x *bp)
5068 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5069 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5070 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5071 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5072 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5073 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5074 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5075 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5076 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5077 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5078 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5079 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5080 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5081 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5082 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5083 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5084 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5085 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5086 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5087 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5088 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5089 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5090 if (CHIP_REV_IS_FPGA(bp))
5091 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5092 else
5093 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5094 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5095 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5096 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5097 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5098 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5099 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5100 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5101 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5102 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5106 static int bnx2x_init_common(struct bnx2x *bp)
5108 u32 val, i;
5110 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5112 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5113 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5115 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5116 if (CHIP_IS_E1H(bp))
5117 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5119 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5120 msleep(30);
5121 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5123 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5124 if (CHIP_IS_E1(bp)) {
5125 /* enable HW interrupt from PXP on USDM overflow
5126 bit 16 on INT_MASK_0 */
5127 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5130 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5131 bnx2x_init_pxp(bp);
5133 #ifdef __BIG_ENDIAN
5134 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5135 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5136 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5137 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5138 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5139 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5141 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5142 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5143 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5144 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5145 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5146 #endif
5148 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5149 #ifdef BCM_ISCSI
5150 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5151 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5152 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5153 #endif
5155 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5156 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5158 /* let the HW do it's magic ... */
5159 msleep(100);
5160 /* finish PXP init */
5161 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5162 if (val != 1) {
5163 BNX2X_ERR("PXP2 CFG failed\n");
5164 return -EBUSY;
5166 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5167 if (val != 1) {
5168 BNX2X_ERR("PXP2 RD_INIT failed\n");
5169 return -EBUSY;
5172 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5173 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5175 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5177 /* clean the DMAE memory */
5178 bp->dmae_ready = 1;
5179 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5181 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5182 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5183 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5184 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5186 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5187 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5188 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5189 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5191 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5192 /* soft reset pulse */
5193 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5194 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5196 #ifdef BCM_ISCSI
5197 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5198 #endif
5200 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5201 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5202 if (!CHIP_REV_IS_SLOW(bp)) {
5203 /* enable hw interrupt from doorbell Q */
5204 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5207 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5208 if (CHIP_REV_IS_SLOW(bp)) {
5209 /* fix for emulation and FPGA for no pause */
5210 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5211 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5212 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5213 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5216 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5217 /* set NIC mode */
5218 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5219 if (CHIP_IS_E1H(bp))
5220 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5222 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5223 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5224 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5225 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5227 if (CHIP_IS_E1H(bp)) {
5228 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5229 STORM_INTMEM_SIZE_E1H/2);
5230 bnx2x_init_fill(bp,
5231 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5232 0, STORM_INTMEM_SIZE_E1H/2);
5233 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5234 STORM_INTMEM_SIZE_E1H/2);
5235 bnx2x_init_fill(bp,
5236 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5237 0, STORM_INTMEM_SIZE_E1H/2);
5238 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5239 STORM_INTMEM_SIZE_E1H/2);
5240 bnx2x_init_fill(bp,
5241 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5242 0, STORM_INTMEM_SIZE_E1H/2);
5243 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5244 STORM_INTMEM_SIZE_E1H/2);
5245 bnx2x_init_fill(bp,
5246 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5247 0, STORM_INTMEM_SIZE_E1H/2);
5248 } else { /* E1 */
5249 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5250 STORM_INTMEM_SIZE_E1);
5251 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5252 STORM_INTMEM_SIZE_E1);
5253 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5254 STORM_INTMEM_SIZE_E1);
5255 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5256 STORM_INTMEM_SIZE_E1);
5259 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5260 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5261 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5262 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5264 /* sync semi rtc */
5265 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5266 0x80000000);
5267 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5268 0x80000000);
5270 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5271 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5272 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5274 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5275 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5276 REG_WR(bp, i, 0xc0cac01a);
5277 /* TODO: replace with something meaningful */
5279 if (CHIP_IS_E1H(bp))
5280 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5281 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5283 if (sizeof(union cdu_context) != 1024)
5284 /* we currently assume that a context is 1024 bytes */
5285 printk(KERN_ALERT PFX "please adjust the size of"
5286 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5288 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5289 val = (4 << 24) + (0 << 12) + 1024;
5290 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5291 if (CHIP_IS_E1(bp)) {
5292 /* !!! fix pxp client crdit until excel update */
5293 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5294 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5297 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5298 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5300 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5301 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5303 /* PXPCS COMMON comes here */
5304 /* Reset PCIE errors for debug */
5305 REG_WR(bp, 0x2814, 0xffffffff);
5306 REG_WR(bp, 0x3820, 0xffffffff);
5308 /* EMAC0 COMMON comes here */
5309 /* EMAC1 COMMON comes here */
5310 /* DBU COMMON comes here */
5311 /* DBG COMMON comes here */
5313 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5314 if (CHIP_IS_E1H(bp)) {
5315 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5316 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5319 if (CHIP_REV_IS_SLOW(bp))
5320 msleep(200);
5322 /* finish CFC init */
5323 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5324 if (val != 1) {
5325 BNX2X_ERR("CFC LL_INIT failed\n");
5326 return -EBUSY;
5328 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5329 if (val != 1) {
5330 BNX2X_ERR("CFC AC_INIT failed\n");
5331 return -EBUSY;
5333 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5334 if (val != 1) {
5335 BNX2X_ERR("CFC CAM_INIT failed\n");
5336 return -EBUSY;
5338 REG_WR(bp, CFC_REG_DEBUG0, 0);
5340 /* read NIG statistic
5341 to see if this is our first up since powerup */
5342 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5343 val = *bnx2x_sp(bp, wb_data[0]);
5345 /* do internal memory self test */
5346 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5347 BNX2X_ERR("internal mem self test failed\n");
5348 return -EBUSY;
5351 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5352 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5353 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5354 /* Fan failure is indicated by SPIO 5 */
5355 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5356 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5358 /* set to active low mode */
5359 val = REG_RD(bp, MISC_REG_SPIO_INT);
5360 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5361 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5362 REG_WR(bp, MISC_REG_SPIO_INT, val);
5364 /* enable interrupt to signal the IGU */
5365 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5366 val |= (1 << MISC_REGISTERS_SPIO_5);
5367 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5368 break;
5370 default:
5371 break;
5374 /* clear PXP2 attentions */
5375 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5377 enable_blocks_attention(bp);
5379 if (!BP_NOMCP(bp)) {
5380 bnx2x_acquire_phy_lock(bp);
5381 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5382 bnx2x_release_phy_lock(bp);
5383 } else
5384 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5386 return 0;
5389 static int bnx2x_init_port(struct bnx2x *bp)
5391 int port = BP_PORT(bp);
5392 u32 val;
5394 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5396 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5398 /* Port PXP comes here */
5399 /* Port PXP2 comes here */
5400 #ifdef BCM_ISCSI
5401 /* Port0 1
5402 * Port1 385 */
5403 i++;
5404 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5405 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5406 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5407 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5409 /* Port0 2
5410 * Port1 386 */
5411 i++;
5412 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5413 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5414 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5415 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5417 /* Port0 3
5418 * Port1 387 */
5419 i++;
5420 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5421 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5422 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5423 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5424 #endif
5425 /* Port CMs come here */
5427 /* Port QM comes here */
5428 #ifdef BCM_ISCSI
5429 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5430 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5432 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5433 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5434 #endif
5435 /* Port DQ comes here */
5436 /* Port BRB1 comes here */
5437 /* Port PRS comes here */
5438 /* Port TSDM comes here */
5439 /* Port CSDM comes here */
5440 /* Port USDM comes here */
5441 /* Port XSDM comes here */
5442 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5443 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5444 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5445 port ? USEM_PORT1_END : USEM_PORT0_END);
5446 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5447 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5448 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5449 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5450 /* Port UPB comes here */
5451 /* Port XPB comes here */
5453 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5454 port ? PBF_PORT1_END : PBF_PORT0_END);
5456 /* configure PBF to work without PAUSE mtu 9000 */
5457 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5459 /* update threshold */
5460 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5461 /* update init credit */
5462 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5464 /* probe changes */
5465 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5466 msleep(5);
5467 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5469 #ifdef BCM_ISCSI
5470 /* tell the searcher where the T2 table is */
5471 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5473 wb_write[0] = U64_LO(bp->t2_mapping);
5474 wb_write[1] = U64_HI(bp->t2_mapping);
5475 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5476 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5477 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5478 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5480 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5481 /* Port SRCH comes here */
5482 #endif
5483 /* Port CDU comes here */
5484 /* Port CFC comes here */
5486 if (CHIP_IS_E1(bp)) {
5487 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5488 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5490 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5491 port ? HC_PORT1_END : HC_PORT0_END);
5493 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5494 MISC_AEU_PORT0_START,
5495 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5496 /* init aeu_mask_attn_func_0/1:
5497 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5498 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5499 * bits 4-7 are used for "per vn group attention" */
5500 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5501 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5503 /* Port PXPCS comes here */
5504 /* Port EMAC0 comes here */
5505 /* Port EMAC1 comes here */
5506 /* Port DBU comes here */
5507 /* Port DBG comes here */
5508 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5509 port ? NIG_PORT1_END : NIG_PORT0_END);
5511 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5513 if (CHIP_IS_E1H(bp)) {
5514 u32 wsum;
5515 struct cmng_struct_per_port m_cmng_port;
5516 int vn;
5518 /* 0x2 disable e1hov, 0x1 enable */
5519 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5520 (IS_E1HMF(bp) ? 0x1 : 0x2));
5522 /* Init RATE SHAPING and FAIRNESS contexts.
5523 Initialize as if there is 10G link. */
5524 wsum = bnx2x_calc_vn_wsum(bp);
5525 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5526 if (IS_E1HMF(bp))
5527 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5528 bnx2x_init_vn_minmax(bp, 2*vn + port,
5529 wsum, 10000, &m_cmng_port);
5532 /* Port MCP comes here */
5533 /* Port DMAE comes here */
5535 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5536 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5537 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5538 /* add SPIO 5 to group 0 */
5539 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5540 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5541 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5542 break;
5544 default:
5545 break;
5548 bnx2x__link_reset(bp);
5550 return 0;
5553 #define ILT_PER_FUNC (768/2)
5554 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5555 /* the phys address is shifted right 12 bits and has an added
5556 1=valid bit added to the 53rd bit
5557 then since this is a wide register(TM)
5558 we split it into two 32 bit writes
5560 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5561 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5562 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5563 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5565 #define CNIC_ILT_LINES 0
5567 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5569 int reg;
5571 if (CHIP_IS_E1H(bp))
5572 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5573 else /* E1 */
5574 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5576 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5579 static int bnx2x_init_func(struct bnx2x *bp)
5581 int port = BP_PORT(bp);
5582 int func = BP_FUNC(bp);
5583 int i;
5585 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5587 i = FUNC_ILT_BASE(func);
5589 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5590 if (CHIP_IS_E1H(bp)) {
5591 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5592 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5593 } else /* E1 */
5594 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5595 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5598 if (CHIP_IS_E1H(bp)) {
5599 for (i = 0; i < 9; i++)
5600 bnx2x_init_block(bp,
5601 cm_start[func][i], cm_end[func][i]);
5603 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5604 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5607 /* HC init per function */
5608 if (CHIP_IS_E1H(bp)) {
5609 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5611 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5612 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5614 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5616 if (CHIP_IS_E1H(bp))
5617 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5619 /* Reset PCIE errors for debug */
5620 REG_WR(bp, 0x2114, 0xffffffff);
5621 REG_WR(bp, 0x2120, 0xffffffff);
5623 return 0;
5626 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5628 int i, rc = 0;
5630 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5631 BP_FUNC(bp), load_code);
5633 bp->dmae_ready = 0;
5634 mutex_init(&bp->dmae_mutex);
5635 bnx2x_gunzip_init(bp);
5637 switch (load_code) {
5638 case FW_MSG_CODE_DRV_LOAD_COMMON:
5639 rc = bnx2x_init_common(bp);
5640 if (rc)
5641 goto init_hw_err;
5642 /* no break */
5644 case FW_MSG_CODE_DRV_LOAD_PORT:
5645 bp->dmae_ready = 1;
5646 rc = bnx2x_init_port(bp);
5647 if (rc)
5648 goto init_hw_err;
5649 /* no break */
5651 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5652 bp->dmae_ready = 1;
5653 rc = bnx2x_init_func(bp);
5654 if (rc)
5655 goto init_hw_err;
5656 break;
5658 default:
5659 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5660 break;
5663 if (!BP_NOMCP(bp)) {
5664 int func = BP_FUNC(bp);
5666 bp->fw_drv_pulse_wr_seq =
5667 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5668 DRV_PULSE_SEQ_MASK);
5669 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5670 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5671 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5672 } else
5673 bp->func_stx = 0;
5675 /* this needs to be done before gunzip end */
5676 bnx2x_zero_def_sb(bp);
5677 for_each_queue(bp, i)
5678 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5680 init_hw_err:
5681 bnx2x_gunzip_end(bp);
5683 return rc;
5686 /* send the MCP a request, block until there is a reply */
5687 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5689 int func = BP_FUNC(bp);
5690 u32 seq = ++bp->fw_seq;
5691 u32 rc = 0;
5692 u32 cnt = 1;
5693 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5695 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5696 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5698 do {
5699 /* let the FW do it's magic ... */
5700 msleep(delay);
5702 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5704 /* Give the FW up to 2 second (200*10ms) */
5705 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5707 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5708 cnt*delay, rc, seq);
5710 /* is this a reply to our command? */
5711 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5712 rc &= FW_MSG_CODE_MASK;
5714 } else {
5715 /* FW BUG! */
5716 BNX2X_ERR("FW failed to respond!\n");
5717 bnx2x_fw_dump(bp);
5718 rc = 0;
5721 return rc;
5724 static void bnx2x_free_mem(struct bnx2x *bp)
5727 #define BNX2X_PCI_FREE(x, y, size) \
5728 do { \
5729 if (x) { \
5730 pci_free_consistent(bp->pdev, size, x, y); \
5731 x = NULL; \
5732 y = 0; \
5734 } while (0)
5736 #define BNX2X_FREE(x) \
5737 do { \
5738 if (x) { \
5739 vfree(x); \
5740 x = NULL; \
5742 } while (0)
5744 int i;
5746 /* fastpath */
5747 for_each_queue(bp, i) {
5749 /* Status blocks */
5750 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5751 bnx2x_fp(bp, i, status_blk_mapping),
5752 sizeof(struct host_status_block) +
5753 sizeof(struct eth_tx_db_data));
5755 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5756 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5757 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5758 bnx2x_fp(bp, i, tx_desc_mapping),
5759 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5761 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5762 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5763 bnx2x_fp(bp, i, rx_desc_mapping),
5764 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5766 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5767 bnx2x_fp(bp, i, rx_comp_mapping),
5768 sizeof(struct eth_fast_path_rx_cqe) *
5769 NUM_RCQ_BD);
5771 /* SGE ring */
5772 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5773 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5774 bnx2x_fp(bp, i, rx_sge_mapping),
5775 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5777 /* end of fastpath */
5779 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5780 sizeof(struct host_def_status_block));
5782 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5783 sizeof(struct bnx2x_slowpath));
5785 #ifdef BCM_ISCSI
5786 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5787 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5788 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5789 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5790 #endif
5791 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5793 #undef BNX2X_PCI_FREE
5794 #undef BNX2X_KFREE
5797 static int bnx2x_alloc_mem(struct bnx2x *bp)
5800 #define BNX2X_PCI_ALLOC(x, y, size) \
5801 do { \
5802 x = pci_alloc_consistent(bp->pdev, size, y); \
5803 if (x == NULL) \
5804 goto alloc_mem_err; \
5805 memset(x, 0, size); \
5806 } while (0)
5808 #define BNX2X_ALLOC(x, size) \
5809 do { \
5810 x = vmalloc(size); \
5811 if (x == NULL) \
5812 goto alloc_mem_err; \
5813 memset(x, 0, size); \
5814 } while (0)
5816 int i;
5818 /* fastpath */
5819 for_each_queue(bp, i) {
5820 bnx2x_fp(bp, i, bp) = bp;
5822 /* Status blocks */
5823 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5824 &bnx2x_fp(bp, i, status_blk_mapping),
5825 sizeof(struct host_status_block) +
5826 sizeof(struct eth_tx_db_data));
5828 bnx2x_fp(bp, i, hw_tx_prods) =
5829 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5831 bnx2x_fp(bp, i, tx_prods_mapping) =
5832 bnx2x_fp(bp, i, status_blk_mapping) +
5833 sizeof(struct host_status_block);
5835 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5836 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5837 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5838 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5839 &bnx2x_fp(bp, i, tx_desc_mapping),
5840 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5842 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5843 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5844 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5845 &bnx2x_fp(bp, i, rx_desc_mapping),
5846 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5848 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5849 &bnx2x_fp(bp, i, rx_comp_mapping),
5850 sizeof(struct eth_fast_path_rx_cqe) *
5851 NUM_RCQ_BD);
5853 /* SGE ring */
5854 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5855 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5856 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5857 &bnx2x_fp(bp, i, rx_sge_mapping),
5858 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5860 /* end of fastpath */
5862 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5863 sizeof(struct host_def_status_block));
5865 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5866 sizeof(struct bnx2x_slowpath));
5868 #ifdef BCM_ISCSI
5869 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5871 /* Initialize T1 */
5872 for (i = 0; i < 64*1024; i += 64) {
5873 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5874 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5877 /* allocate searcher T2 table
5878 we allocate 1/4 of alloc num for T2
5879 (which is not entered into the ILT) */
5880 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5882 /* Initialize T2 */
5883 for (i = 0; i < 16*1024; i += 64)
5884 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5886 /* now fixup the last line in the block to point to the next block */
5887 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5889 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5890 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5892 /* QM queues (128*MAX_CONN) */
5893 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5894 #endif
5896 /* Slow path ring */
5897 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5899 return 0;
5901 alloc_mem_err:
5902 bnx2x_free_mem(bp);
5903 return -ENOMEM;
5905 #undef BNX2X_PCI_ALLOC
5906 #undef BNX2X_ALLOC
5909 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5911 int i;
5913 for_each_queue(bp, i) {
5914 struct bnx2x_fastpath *fp = &bp->fp[i];
5916 u16 bd_cons = fp->tx_bd_cons;
5917 u16 sw_prod = fp->tx_pkt_prod;
5918 u16 sw_cons = fp->tx_pkt_cons;
5920 while (sw_cons != sw_prod) {
5921 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5922 sw_cons++;
5927 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5929 int i, j;
5931 for_each_queue(bp, j) {
5932 struct bnx2x_fastpath *fp = &bp->fp[j];
5934 for (i = 0; i < NUM_RX_BD; i++) {
5935 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5936 struct sk_buff *skb = rx_buf->skb;
5938 if (skb == NULL)
5939 continue;
5941 pci_unmap_single(bp->pdev,
5942 pci_unmap_addr(rx_buf, mapping),
5943 bp->rx_buf_size,
5944 PCI_DMA_FROMDEVICE);
5946 rx_buf->skb = NULL;
5947 dev_kfree_skb(skb);
5949 if (!fp->disable_tpa)
5950 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5951 ETH_MAX_AGGREGATION_QUEUES_E1 :
5952 ETH_MAX_AGGREGATION_QUEUES_E1H);
5956 static void bnx2x_free_skbs(struct bnx2x *bp)
5958 bnx2x_free_tx_skbs(bp);
5959 bnx2x_free_rx_skbs(bp);
5962 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5964 int i, offset = 1;
5966 free_irq(bp->msix_table[0].vector, bp->dev);
5967 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5968 bp->msix_table[0].vector);
5970 for_each_queue(bp, i) {
5971 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
5972 "state %x\n", i, bp->msix_table[i + offset].vector,
5973 bnx2x_fp(bp, i, state));
5975 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5976 BNX2X_ERR("IRQ of fp #%d being freed while "
5977 "state != closed\n", i);
5979 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
5983 static void bnx2x_free_irq(struct bnx2x *bp)
5985 if (bp->flags & USING_MSIX_FLAG) {
5986 bnx2x_free_msix_irqs(bp);
5987 pci_disable_msix(bp->pdev);
5988 bp->flags &= ~USING_MSIX_FLAG;
5990 } else
5991 free_irq(bp->pdev->irq, bp->dev);
5994 static int bnx2x_enable_msix(struct bnx2x *bp)
5996 int i, rc, offset;
5998 bp->msix_table[0].entry = 0;
5999 offset = 1;
6000 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6002 for_each_queue(bp, i) {
6003 int igu_vec = offset + i + BP_L_ID(bp);
6005 bp->msix_table[i + offset].entry = igu_vec;
6006 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6007 "(fastpath #%u)\n", i + offset, igu_vec, i);
6010 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6011 bp->num_queues + offset);
6012 if (rc) {
6013 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6014 return -1;
6016 bp->flags |= USING_MSIX_FLAG;
6018 return 0;
6021 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6023 int i, rc, offset = 1;
6025 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6026 bp->dev->name, bp->dev);
6027 if (rc) {
6028 BNX2X_ERR("request sp irq failed\n");
6029 return -EBUSY;
6032 for_each_queue(bp, i) {
6033 rc = request_irq(bp->msix_table[i + offset].vector,
6034 bnx2x_msix_fp_int, 0,
6035 bp->dev->name, &bp->fp[i]);
6036 if (rc) {
6037 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6038 i + offset, -rc);
6039 bnx2x_free_msix_irqs(bp);
6040 return -EBUSY;
6043 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6046 return 0;
6049 static int bnx2x_req_irq(struct bnx2x *bp)
6051 int rc;
6053 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6054 bp->dev->name, bp->dev);
6055 if (!rc)
6056 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6058 return rc;
6061 static void bnx2x_napi_enable(struct bnx2x *bp)
6063 int i;
6065 for_each_queue(bp, i)
6066 napi_enable(&bnx2x_fp(bp, i, napi));
6069 static void bnx2x_napi_disable(struct bnx2x *bp)
6071 int i;
6073 for_each_queue(bp, i)
6074 napi_disable(&bnx2x_fp(bp, i, napi));
6077 static void bnx2x_netif_start(struct bnx2x *bp)
6079 if (atomic_dec_and_test(&bp->intr_sem)) {
6080 if (netif_running(bp->dev)) {
6081 if (bp->state == BNX2X_STATE_OPEN)
6082 netif_wake_queue(bp->dev);
6083 bnx2x_napi_enable(bp);
6084 bnx2x_int_enable(bp);
6089 static void bnx2x_netif_stop(struct bnx2x *bp)
6091 bnx2x_int_disable_sync(bp);
6092 if (netif_running(bp->dev)) {
6093 bnx2x_napi_disable(bp);
6094 netif_tx_disable(bp->dev);
6095 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6100 * Init service functions
6103 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6105 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6106 int port = BP_PORT(bp);
6108 /* CAM allocation
6109 * unicasts 0-31:port0 32-63:port1
6110 * multicast 64-127:port0 128-191:port1
6112 config->hdr.length_6b = 2;
6113 config->hdr.offset = port ? 31 : 0;
6114 config->hdr.client_id = BP_CL_ID(bp);
6115 config->hdr.reserved1 = 0;
6117 /* primary MAC */
6118 config->config_table[0].cam_entry.msb_mac_addr =
6119 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6120 config->config_table[0].cam_entry.middle_mac_addr =
6121 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6122 config->config_table[0].cam_entry.lsb_mac_addr =
6123 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6124 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6125 if (set)
6126 config->config_table[0].target_table_entry.flags = 0;
6127 else
6128 CAM_INVALIDATE(config->config_table[0]);
6129 config->config_table[0].target_table_entry.client_id = 0;
6130 config->config_table[0].target_table_entry.vlan_id = 0;
6132 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6133 (set ? "setting" : "clearing"),
6134 config->config_table[0].cam_entry.msb_mac_addr,
6135 config->config_table[0].cam_entry.middle_mac_addr,
6136 config->config_table[0].cam_entry.lsb_mac_addr);
6138 /* broadcast */
6139 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6140 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6141 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6142 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6143 if (set)
6144 config->config_table[1].target_table_entry.flags =
6145 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6146 else
6147 CAM_INVALIDATE(config->config_table[1]);
6148 config->config_table[1].target_table_entry.client_id = 0;
6149 config->config_table[1].target_table_entry.vlan_id = 0;
6151 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6152 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6153 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6156 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6158 struct mac_configuration_cmd_e1h *config =
6159 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6161 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6162 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6163 return;
6166 /* CAM allocation for E1H
6167 * unicasts: by func number
6168 * multicast: 20+FUNC*20, 20 each
6170 config->hdr.length_6b = 1;
6171 config->hdr.offset = BP_FUNC(bp);
6172 config->hdr.client_id = BP_CL_ID(bp);
6173 config->hdr.reserved1 = 0;
6175 /* primary MAC */
6176 config->config_table[0].msb_mac_addr =
6177 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6178 config->config_table[0].middle_mac_addr =
6179 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6180 config->config_table[0].lsb_mac_addr =
6181 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6182 config->config_table[0].client_id = BP_L_ID(bp);
6183 config->config_table[0].vlan_id = 0;
6184 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6185 if (set)
6186 config->config_table[0].flags = BP_PORT(bp);
6187 else
6188 config->config_table[0].flags =
6189 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6191 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6192 (set ? "setting" : "clearing"),
6193 config->config_table[0].msb_mac_addr,
6194 config->config_table[0].middle_mac_addr,
6195 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6197 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6198 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6199 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6202 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6203 int *state_p, int poll)
6205 /* can take a while if any port is running */
6206 int cnt = 500;
6208 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6209 poll ? "polling" : "waiting", state, idx);
6211 might_sleep();
6212 while (cnt--) {
6213 if (poll) {
6214 bnx2x_rx_int(bp->fp, 10);
6215 /* if index is different from 0
6216 * the reply for some commands will
6217 * be on the non default queue
6219 if (idx)
6220 bnx2x_rx_int(&bp->fp[idx], 10);
6223 mb(); /* state is changed by bnx2x_sp_event() */
6224 if (*state_p == state)
6225 return 0;
6227 msleep(1);
6230 /* timeout! */
6231 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6232 poll ? "polling" : "waiting", state, idx);
6233 #ifdef BNX2X_STOP_ON_ERROR
6234 bnx2x_panic();
6235 #endif
6237 return -EBUSY;
6240 static int bnx2x_setup_leading(struct bnx2x *bp)
6242 int rc;
6244 /* reset IGU state */
6245 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6247 /* SETUP ramrod */
6248 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6250 /* Wait for completion */
6251 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6253 return rc;
6256 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6258 /* reset IGU state */
6259 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6261 /* SETUP ramrod */
6262 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6263 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6265 /* Wait for completion */
6266 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6267 &(bp->fp[index].state), 0);
6270 static int bnx2x_poll(struct napi_struct *napi, int budget);
6271 static void bnx2x_set_rx_mode(struct net_device *dev);
6273 /* must be called with rtnl_lock */
6274 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6276 u32 load_code;
6277 int i, rc;
6278 #ifdef BNX2X_STOP_ON_ERROR
6279 if (unlikely(bp->panic))
6280 return -EPERM;
6281 #endif
6283 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6285 /* Send LOAD_REQUEST command to MCP
6286 Returns the type of LOAD command:
6287 if it is the first port to be initialized
6288 common blocks should be initialized, otherwise - not
6290 if (!BP_NOMCP(bp)) {
6291 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6292 if (!load_code) {
6293 BNX2X_ERR("MCP response failure, aborting\n");
6294 return -EBUSY;
6296 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6297 return -EBUSY; /* other port in diagnostic mode */
6299 } else {
6300 int port = BP_PORT(bp);
6302 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6303 load_count[0], load_count[1], load_count[2]);
6304 load_count[0]++;
6305 load_count[1 + port]++;
6306 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6307 load_count[0], load_count[1], load_count[2]);
6308 if (load_count[0] == 1)
6309 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6310 else if (load_count[1 + port] == 1)
6311 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6312 else
6313 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6316 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6317 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6318 bp->port.pmf = 1;
6319 else
6320 bp->port.pmf = 0;
6321 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6323 /* if we can't use MSI-X we only need one fp,
6324 * so try to enable MSI-X with the requested number of fp's
6325 * and fallback to inta with one fp
6327 if (use_inta) {
6328 bp->num_queues = 1;
6330 } else {
6331 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6332 /* user requested number */
6333 bp->num_queues = use_multi;
6335 else if (use_multi)
6336 bp->num_queues = min_t(u32, num_online_cpus(),
6337 BP_MAX_QUEUES(bp));
6338 else
6339 bp->num_queues = 1;
6341 if (bnx2x_enable_msix(bp)) {
6342 /* failed to enable MSI-X */
6343 bp->num_queues = 1;
6344 if (use_multi)
6345 BNX2X_ERR("Multi requested but failed"
6346 " to enable MSI-X\n");
6349 DP(NETIF_MSG_IFUP,
6350 "set number of queues to %d\n", bp->num_queues);
6352 if (bnx2x_alloc_mem(bp))
6353 return -ENOMEM;
6355 for_each_queue(bp, i)
6356 bnx2x_fp(bp, i, disable_tpa) =
6357 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6359 if (bp->flags & USING_MSIX_FLAG) {
6360 rc = bnx2x_req_msix_irqs(bp);
6361 if (rc) {
6362 pci_disable_msix(bp->pdev);
6363 goto load_error;
6365 } else {
6366 bnx2x_ack_int(bp);
6367 rc = bnx2x_req_irq(bp);
6368 if (rc) {
6369 BNX2X_ERR("IRQ request failed, aborting\n");
6370 goto load_error;
6374 for_each_queue(bp, i)
6375 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6376 bnx2x_poll, 128);
6378 /* Initialize HW */
6379 rc = bnx2x_init_hw(bp, load_code);
6380 if (rc) {
6381 BNX2X_ERR("HW init failed, aborting\n");
6382 goto load_int_disable;
6385 /* Setup NIC internals and enable interrupts */
6386 bnx2x_nic_init(bp, load_code);
6388 /* Send LOAD_DONE command to MCP */
6389 if (!BP_NOMCP(bp)) {
6390 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6391 if (!load_code) {
6392 BNX2X_ERR("MCP response failure, aborting\n");
6393 rc = -EBUSY;
6394 goto load_rings_free;
6398 bnx2x_stats_init(bp);
6400 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6402 /* Enable Rx interrupt handling before sending the ramrod
6403 as it's completed on Rx FP queue */
6404 bnx2x_napi_enable(bp);
6406 /* Enable interrupt handling */
6407 atomic_set(&bp->intr_sem, 0);
6409 rc = bnx2x_setup_leading(bp);
6410 if (rc) {
6411 BNX2X_ERR("Setup leading failed!\n");
6412 goto load_netif_stop;
6415 if (CHIP_IS_E1H(bp))
6416 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6417 BNX2X_ERR("!!! mf_cfg function disabled\n");
6418 bp->state = BNX2X_STATE_DISABLED;
6421 if (bp->state == BNX2X_STATE_OPEN)
6422 for_each_nondefault_queue(bp, i) {
6423 rc = bnx2x_setup_multi(bp, i);
6424 if (rc)
6425 goto load_netif_stop;
6428 if (CHIP_IS_E1(bp))
6429 bnx2x_set_mac_addr_e1(bp, 1);
6430 else
6431 bnx2x_set_mac_addr_e1h(bp, 1);
6433 if (bp->port.pmf)
6434 bnx2x_initial_phy_init(bp);
6436 /* Start fast path */
6437 switch (load_mode) {
6438 case LOAD_NORMAL:
6439 /* Tx queue should be only reenabled */
6440 netif_wake_queue(bp->dev);
6441 bnx2x_set_rx_mode(bp->dev);
6442 break;
6444 case LOAD_OPEN:
6445 netif_start_queue(bp->dev);
6446 bnx2x_set_rx_mode(bp->dev);
6447 if (bp->flags & USING_MSIX_FLAG)
6448 printk(KERN_INFO PFX "%s: using MSI-X\n",
6449 bp->dev->name);
6450 break;
6452 case LOAD_DIAG:
6453 bnx2x_set_rx_mode(bp->dev);
6454 bp->state = BNX2X_STATE_DIAG;
6455 break;
6457 default:
6458 break;
6461 if (!bp->port.pmf)
6462 bnx2x__link_status_update(bp);
6464 /* start the timer */
6465 mod_timer(&bp->timer, jiffies + bp->current_interval);
6468 return 0;
6470 load_netif_stop:
6471 bnx2x_napi_disable(bp);
6472 load_rings_free:
6473 /* Free SKBs, SGEs, TPA pool and driver internals */
6474 bnx2x_free_skbs(bp);
6475 for_each_queue(bp, i)
6476 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6477 load_int_disable:
6478 bnx2x_int_disable_sync(bp);
6479 /* Release IRQs */
6480 bnx2x_free_irq(bp);
6481 load_error:
6482 bnx2x_free_mem(bp);
6483 bp->port.pmf = 0;
6485 /* TBD we really need to reset the chip
6486 if we want to recover from this */
6487 return rc;
6490 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6492 int rc;
6494 /* halt the connection */
6495 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6496 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6498 /* Wait for completion */
6499 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6500 &(bp->fp[index].state), 1);
6501 if (rc) /* timeout */
6502 return rc;
6504 /* delete cfc entry */
6505 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6507 /* Wait for completion */
6508 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6509 &(bp->fp[index].state), 1);
6510 return rc;
6513 static int bnx2x_stop_leading(struct bnx2x *bp)
6515 u16 dsb_sp_prod_idx;
6516 /* if the other port is handling traffic,
6517 this can take a lot of time */
6518 int cnt = 500;
6519 int rc;
6521 might_sleep();
6523 /* Send HALT ramrod */
6524 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6525 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6527 /* Wait for completion */
6528 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6529 &(bp->fp[0].state), 1);
6530 if (rc) /* timeout */
6531 return rc;
6533 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6535 /* Send PORT_DELETE ramrod */
6536 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6538 /* Wait for completion to arrive on default status block
6539 we are going to reset the chip anyway
6540 so there is not much to do if this times out
6542 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6543 if (!cnt) {
6544 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6545 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6546 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6547 #ifdef BNX2X_STOP_ON_ERROR
6548 bnx2x_panic();
6549 #else
6550 rc = -EBUSY;
6551 #endif
6552 break;
6554 cnt--;
6555 msleep(1);
6557 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6558 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6560 return rc;
6563 static void bnx2x_reset_func(struct bnx2x *bp)
6565 int port = BP_PORT(bp);
6566 int func = BP_FUNC(bp);
6567 int base, i;
6569 /* Configure IGU */
6570 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6571 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6573 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6575 /* Clear ILT */
6576 base = FUNC_ILT_BASE(func);
6577 for (i = base; i < base + ILT_PER_FUNC; i++)
6578 bnx2x_ilt_wr(bp, i, 0);
6581 static void bnx2x_reset_port(struct bnx2x *bp)
6583 int port = BP_PORT(bp);
6584 u32 val;
6586 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6588 /* Do not rcv packets to BRB */
6589 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6590 /* Do not direct rcv packets that are not for MCP to the BRB */
6591 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6592 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6594 /* Configure AEU */
6595 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6597 msleep(100);
6598 /* Check for BRB port occupancy */
6599 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6600 if (val)
6601 DP(NETIF_MSG_IFDOWN,
6602 "BRB1 is not empty %d blocks are occupied\n", val);
6604 /* TODO: Close Doorbell port? */
6607 static void bnx2x_reset_common(struct bnx2x *bp)
6609 /* reset_common */
6610 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6611 0xd3ffff7f);
6612 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6615 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6617 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6618 BP_FUNC(bp), reset_code);
6620 switch (reset_code) {
6621 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6622 bnx2x_reset_port(bp);
6623 bnx2x_reset_func(bp);
6624 bnx2x_reset_common(bp);
6625 break;
6627 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6628 bnx2x_reset_port(bp);
6629 bnx2x_reset_func(bp);
6630 break;
6632 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6633 bnx2x_reset_func(bp);
6634 break;
6636 default:
6637 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6638 break;
6642 /* must be called with rtnl_lock */
6643 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6645 int port = BP_PORT(bp);
6646 u32 reset_code = 0;
6647 int i, cnt, rc;
6649 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6651 bp->rx_mode = BNX2X_RX_MODE_NONE;
6652 bnx2x_set_storm_rx_mode(bp);
6654 bnx2x_netif_stop(bp);
6655 if (!netif_running(bp->dev))
6656 bnx2x_napi_disable(bp);
6657 del_timer_sync(&bp->timer);
6658 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6659 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6660 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6662 /* Wait until tx fast path tasks complete */
6663 for_each_queue(bp, i) {
6664 struct bnx2x_fastpath *fp = &bp->fp[i];
6666 cnt = 1000;
6667 smp_rmb();
6668 while (BNX2X_HAS_TX_WORK(fp)) {
6670 bnx2x_tx_int(fp, 1000);
6671 if (!cnt) {
6672 BNX2X_ERR("timeout waiting for queue[%d]\n",
6674 #ifdef BNX2X_STOP_ON_ERROR
6675 bnx2x_panic();
6676 return -EBUSY;
6677 #else
6678 break;
6679 #endif
6681 cnt--;
6682 msleep(1);
6683 smp_rmb();
6686 /* Give HW time to discard old tx messages */
6687 msleep(1);
6689 /* Release IRQs */
6690 bnx2x_free_irq(bp);
6692 if (CHIP_IS_E1(bp)) {
6693 struct mac_configuration_cmd *config =
6694 bnx2x_sp(bp, mcast_config);
6696 bnx2x_set_mac_addr_e1(bp, 0);
6698 for (i = 0; i < config->hdr.length_6b; i++)
6699 CAM_INVALIDATE(config->config_table[i]);
6701 config->hdr.length_6b = i;
6702 if (CHIP_REV_IS_SLOW(bp))
6703 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6704 else
6705 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6706 config->hdr.client_id = BP_CL_ID(bp);
6707 config->hdr.reserved1 = 0;
6709 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6710 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6711 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6713 } else { /* E1H */
6714 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6716 bnx2x_set_mac_addr_e1h(bp, 0);
6718 for (i = 0; i < MC_HASH_SIZE; i++)
6719 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6722 if (unload_mode == UNLOAD_NORMAL)
6723 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6725 else if (bp->flags & NO_WOL_FLAG) {
6726 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6727 if (CHIP_IS_E1H(bp))
6728 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6730 } else if (bp->wol) {
6731 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6732 u8 *mac_addr = bp->dev->dev_addr;
6733 u32 val;
6734 /* The mac address is written to entries 1-4 to
6735 preserve entry 0 which is used by the PMF */
6736 u8 entry = (BP_E1HVN(bp) + 1)*8;
6738 val = (mac_addr[0] << 8) | mac_addr[1];
6739 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6741 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6742 (mac_addr[4] << 8) | mac_addr[5];
6743 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6745 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6747 } else
6748 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6750 /* Close multi and leading connections
6751 Completions for ramrods are collected in a synchronous way */
6752 for_each_nondefault_queue(bp, i)
6753 if (bnx2x_stop_multi(bp, i))
6754 goto unload_error;
6756 rc = bnx2x_stop_leading(bp);
6757 if (rc) {
6758 BNX2X_ERR("Stop leading failed!\n");
6759 #ifdef BNX2X_STOP_ON_ERROR
6760 return -EBUSY;
6761 #else
6762 goto unload_error;
6763 #endif
6766 unload_error:
6767 if (!BP_NOMCP(bp))
6768 reset_code = bnx2x_fw_command(bp, reset_code);
6769 else {
6770 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6771 load_count[0], load_count[1], load_count[2]);
6772 load_count[0]--;
6773 load_count[1 + port]--;
6774 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6775 load_count[0], load_count[1], load_count[2]);
6776 if (load_count[0] == 0)
6777 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6778 else if (load_count[1 + port] == 0)
6779 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6780 else
6781 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6784 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6785 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6786 bnx2x__link_reset(bp);
6788 /* Reset the chip */
6789 bnx2x_reset_chip(bp, reset_code);
6791 /* Report UNLOAD_DONE to MCP */
6792 if (!BP_NOMCP(bp))
6793 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6794 bp->port.pmf = 0;
6796 /* Free SKBs, SGEs, TPA pool and driver internals */
6797 bnx2x_free_skbs(bp);
6798 for_each_queue(bp, i)
6799 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6800 bnx2x_free_mem(bp);
6802 bp->state = BNX2X_STATE_CLOSED;
6804 netif_carrier_off(bp->dev);
6806 return 0;
6809 static void bnx2x_reset_task(struct work_struct *work)
6811 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6813 #ifdef BNX2X_STOP_ON_ERROR
6814 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6815 " so reset not done to allow debug dump,\n"
6816 KERN_ERR " you will need to reboot when done\n");
6817 return;
6818 #endif
6820 rtnl_lock();
6822 if (!netif_running(bp->dev))
6823 goto reset_task_exit;
6825 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6826 bnx2x_nic_load(bp, LOAD_NORMAL);
6828 reset_task_exit:
6829 rtnl_unlock();
6832 /* end of nic load/unload */
6834 /* ethtool_ops */
6837 * Init service functions
6840 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6842 u32 val;
6844 /* Check if there is any driver already loaded */
6845 val = REG_RD(bp, MISC_REG_UNPREPARED);
6846 if (val == 0x1) {
6847 /* Check if it is the UNDI driver
6848 * UNDI driver initializes CID offset for normal bell to 0x7
6850 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6851 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6852 if (val == 0x7)
6853 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6854 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6856 if (val == 0x7) {
6857 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6858 /* save our func */
6859 int func = BP_FUNC(bp);
6860 u32 swap_en;
6861 u32 swap_val;
6863 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6865 /* try unload UNDI on port 0 */
6866 bp->func = 0;
6867 bp->fw_seq =
6868 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6869 DRV_MSG_SEQ_NUMBER_MASK);
6870 reset_code = bnx2x_fw_command(bp, reset_code);
6872 /* if UNDI is loaded on the other port */
6873 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6875 /* send "DONE" for previous unload */
6876 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6878 /* unload UNDI on port 1 */
6879 bp->func = 1;
6880 bp->fw_seq =
6881 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6882 DRV_MSG_SEQ_NUMBER_MASK);
6883 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6885 bnx2x_fw_command(bp, reset_code);
6888 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6889 HC_REG_CONFIG_0), 0x1000);
6891 /* close input traffic and wait for it */
6892 /* Do not rcv packets to BRB */
6893 REG_WR(bp,
6894 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6895 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6896 /* Do not direct rcv packets that are not for MCP to
6897 * the BRB */
6898 REG_WR(bp,
6899 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6900 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6901 /* clear AEU */
6902 REG_WR(bp,
6903 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6904 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6905 msleep(10);
6907 /* save NIG port swap info */
6908 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6909 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6910 /* reset device */
6911 REG_WR(bp,
6912 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6913 0xd3ffffff);
6914 REG_WR(bp,
6915 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6916 0x1403);
6917 /* take the NIG out of reset and restore swap values */
6918 REG_WR(bp,
6919 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6920 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6921 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6922 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6924 /* send unload done to the MCP */
6925 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6927 /* restore our func and fw_seq */
6928 bp->func = func;
6929 bp->fw_seq =
6930 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6931 DRV_MSG_SEQ_NUMBER_MASK);
6936 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6938 u32 val, val2, val3, val4, id;
6939 u16 pmc;
6941 /* Get the chip revision id and number. */
6942 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6943 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6944 id = ((val & 0xffff) << 16);
6945 val = REG_RD(bp, MISC_REG_CHIP_REV);
6946 id |= ((val & 0xf) << 12);
6947 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6948 id |= ((val & 0xff) << 4);
6949 REG_RD(bp, MISC_REG_BOND_ID);
6950 id |= (val & 0xf);
6951 bp->common.chip_id = id;
6952 bp->link_params.chip_id = bp->common.chip_id;
6953 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6955 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6956 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6957 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6958 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6959 bp->common.flash_size, bp->common.flash_size);
6961 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6962 bp->link_params.shmem_base = bp->common.shmem_base;
6963 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6965 if (!bp->common.shmem_base ||
6966 (bp->common.shmem_base < 0xA0000) ||
6967 (bp->common.shmem_base >= 0xC0000)) {
6968 BNX2X_DEV_INFO("MCP not active\n");
6969 bp->flags |= NO_MCP_FLAG;
6970 return;
6973 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6974 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6975 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6976 BNX2X_ERR("BAD MCP validity signature\n");
6978 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6979 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6981 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6982 bp->common.hw_config, bp->common.board);
6984 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6985 SHARED_HW_CFG_LED_MODE_MASK) >>
6986 SHARED_HW_CFG_LED_MODE_SHIFT);
6988 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6989 bp->common.bc_ver = val;
6990 BNX2X_DEV_INFO("bc_ver %X\n", val);
6991 if (val < BNX2X_BC_VER) {
6992 /* for now only warn
6993 * later we might need to enforce this */
6994 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6995 " please upgrade BC\n", BNX2X_BC_VER, val);
6998 if (BP_E1HVN(bp) == 0) {
6999 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7000 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7001 } else {
7002 /* no WOL capability for E1HVN != 0 */
7003 bp->flags |= NO_WOL_FLAG;
7005 BNX2X_DEV_INFO("%sWoL capable\n",
7006 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7008 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7009 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7010 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7011 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7013 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7014 val, val2, val3, val4);
7017 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7018 u32 switch_cfg)
7020 int port = BP_PORT(bp);
7021 u32 ext_phy_type;
7023 switch (switch_cfg) {
7024 case SWITCH_CFG_1G:
7025 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7027 ext_phy_type =
7028 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7029 switch (ext_phy_type) {
7030 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7031 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7032 ext_phy_type);
7034 bp->port.supported |= (SUPPORTED_10baseT_Half |
7035 SUPPORTED_10baseT_Full |
7036 SUPPORTED_100baseT_Half |
7037 SUPPORTED_100baseT_Full |
7038 SUPPORTED_1000baseT_Full |
7039 SUPPORTED_2500baseX_Full |
7040 SUPPORTED_TP |
7041 SUPPORTED_FIBRE |
7042 SUPPORTED_Autoneg |
7043 SUPPORTED_Pause |
7044 SUPPORTED_Asym_Pause);
7045 break;
7047 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7048 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7049 ext_phy_type);
7051 bp->port.supported |= (SUPPORTED_10baseT_Half |
7052 SUPPORTED_10baseT_Full |
7053 SUPPORTED_100baseT_Half |
7054 SUPPORTED_100baseT_Full |
7055 SUPPORTED_1000baseT_Full |
7056 SUPPORTED_TP |
7057 SUPPORTED_FIBRE |
7058 SUPPORTED_Autoneg |
7059 SUPPORTED_Pause |
7060 SUPPORTED_Asym_Pause);
7061 break;
7063 default:
7064 BNX2X_ERR("NVRAM config error. "
7065 "BAD SerDes ext_phy_config 0x%x\n",
7066 bp->link_params.ext_phy_config);
7067 return;
7070 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7071 port*0x10);
7072 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7073 break;
7075 case SWITCH_CFG_10G:
7076 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7078 ext_phy_type =
7079 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7080 switch (ext_phy_type) {
7081 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7082 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7083 ext_phy_type);
7085 bp->port.supported |= (SUPPORTED_10baseT_Half |
7086 SUPPORTED_10baseT_Full |
7087 SUPPORTED_100baseT_Half |
7088 SUPPORTED_100baseT_Full |
7089 SUPPORTED_1000baseT_Full |
7090 SUPPORTED_2500baseX_Full |
7091 SUPPORTED_10000baseT_Full |
7092 SUPPORTED_TP |
7093 SUPPORTED_FIBRE |
7094 SUPPORTED_Autoneg |
7095 SUPPORTED_Pause |
7096 SUPPORTED_Asym_Pause);
7097 break;
7099 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7100 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7101 ext_phy_type);
7103 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7104 SUPPORTED_FIBRE |
7105 SUPPORTED_Pause |
7106 SUPPORTED_Asym_Pause);
7107 break;
7109 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7110 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7111 ext_phy_type);
7113 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7114 SUPPORTED_1000baseT_Full |
7115 SUPPORTED_FIBRE |
7116 SUPPORTED_Pause |
7117 SUPPORTED_Asym_Pause);
7118 break;
7120 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7121 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7122 ext_phy_type);
7124 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7125 SUPPORTED_1000baseT_Full |
7126 SUPPORTED_FIBRE |
7127 SUPPORTED_Autoneg |
7128 SUPPORTED_Pause |
7129 SUPPORTED_Asym_Pause);
7130 break;
7132 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7133 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7134 ext_phy_type);
7136 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7137 SUPPORTED_2500baseX_Full |
7138 SUPPORTED_1000baseT_Full |
7139 SUPPORTED_FIBRE |
7140 SUPPORTED_Autoneg |
7141 SUPPORTED_Pause |
7142 SUPPORTED_Asym_Pause);
7143 break;
7145 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7146 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7147 ext_phy_type);
7149 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7150 SUPPORTED_TP |
7151 SUPPORTED_Autoneg |
7152 SUPPORTED_Pause |
7153 SUPPORTED_Asym_Pause);
7154 break;
7156 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7157 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7158 bp->link_params.ext_phy_config);
7159 break;
7161 default:
7162 BNX2X_ERR("NVRAM config error. "
7163 "BAD XGXS ext_phy_config 0x%x\n",
7164 bp->link_params.ext_phy_config);
7165 return;
7168 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7169 port*0x18);
7170 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7172 break;
7174 default:
7175 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7176 bp->port.link_config);
7177 return;
7179 bp->link_params.phy_addr = bp->port.phy_addr;
7181 /* mask what we support according to speed_cap_mask */
7182 if (!(bp->link_params.speed_cap_mask &
7183 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7184 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7186 if (!(bp->link_params.speed_cap_mask &
7187 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7188 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7190 if (!(bp->link_params.speed_cap_mask &
7191 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7192 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7194 if (!(bp->link_params.speed_cap_mask &
7195 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7196 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7198 if (!(bp->link_params.speed_cap_mask &
7199 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7200 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7201 SUPPORTED_1000baseT_Full);
7203 if (!(bp->link_params.speed_cap_mask &
7204 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7205 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7207 if (!(bp->link_params.speed_cap_mask &
7208 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7209 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7211 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7214 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7216 bp->link_params.req_duplex = DUPLEX_FULL;
7218 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7219 case PORT_FEATURE_LINK_SPEED_AUTO:
7220 if (bp->port.supported & SUPPORTED_Autoneg) {
7221 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7222 bp->port.advertising = bp->port.supported;
7223 } else {
7224 u32 ext_phy_type =
7225 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7227 if ((ext_phy_type ==
7228 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7229 (ext_phy_type ==
7230 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7231 /* force 10G, no AN */
7232 bp->link_params.req_line_speed = SPEED_10000;
7233 bp->port.advertising =
7234 (ADVERTISED_10000baseT_Full |
7235 ADVERTISED_FIBRE);
7236 break;
7238 BNX2X_ERR("NVRAM config error. "
7239 "Invalid link_config 0x%x"
7240 " Autoneg not supported\n",
7241 bp->port.link_config);
7242 return;
7244 break;
7246 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7247 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7248 bp->link_params.req_line_speed = SPEED_10;
7249 bp->port.advertising = (ADVERTISED_10baseT_Full |
7250 ADVERTISED_TP);
7251 } else {
7252 BNX2X_ERR("NVRAM config error. "
7253 "Invalid link_config 0x%x"
7254 " speed_cap_mask 0x%x\n",
7255 bp->port.link_config,
7256 bp->link_params.speed_cap_mask);
7257 return;
7259 break;
7261 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7262 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7263 bp->link_params.req_line_speed = SPEED_10;
7264 bp->link_params.req_duplex = DUPLEX_HALF;
7265 bp->port.advertising = (ADVERTISED_10baseT_Half |
7266 ADVERTISED_TP);
7267 } else {
7268 BNX2X_ERR("NVRAM config error. "
7269 "Invalid link_config 0x%x"
7270 " speed_cap_mask 0x%x\n",
7271 bp->port.link_config,
7272 bp->link_params.speed_cap_mask);
7273 return;
7275 break;
7277 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7278 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7279 bp->link_params.req_line_speed = SPEED_100;
7280 bp->port.advertising = (ADVERTISED_100baseT_Full |
7281 ADVERTISED_TP);
7282 } else {
7283 BNX2X_ERR("NVRAM config error. "
7284 "Invalid link_config 0x%x"
7285 " speed_cap_mask 0x%x\n",
7286 bp->port.link_config,
7287 bp->link_params.speed_cap_mask);
7288 return;
7290 break;
7292 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7293 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7294 bp->link_params.req_line_speed = SPEED_100;
7295 bp->link_params.req_duplex = DUPLEX_HALF;
7296 bp->port.advertising = (ADVERTISED_100baseT_Half |
7297 ADVERTISED_TP);
7298 } else {
7299 BNX2X_ERR("NVRAM config error. "
7300 "Invalid link_config 0x%x"
7301 " speed_cap_mask 0x%x\n",
7302 bp->port.link_config,
7303 bp->link_params.speed_cap_mask);
7304 return;
7306 break;
7308 case PORT_FEATURE_LINK_SPEED_1G:
7309 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7310 bp->link_params.req_line_speed = SPEED_1000;
7311 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7312 ADVERTISED_TP);
7313 } else {
7314 BNX2X_ERR("NVRAM config error. "
7315 "Invalid link_config 0x%x"
7316 " speed_cap_mask 0x%x\n",
7317 bp->port.link_config,
7318 bp->link_params.speed_cap_mask);
7319 return;
7321 break;
7323 case PORT_FEATURE_LINK_SPEED_2_5G:
7324 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7325 bp->link_params.req_line_speed = SPEED_2500;
7326 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7327 ADVERTISED_TP);
7328 } else {
7329 BNX2X_ERR("NVRAM config error. "
7330 "Invalid link_config 0x%x"
7331 " speed_cap_mask 0x%x\n",
7332 bp->port.link_config,
7333 bp->link_params.speed_cap_mask);
7334 return;
7336 break;
7338 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7339 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7340 case PORT_FEATURE_LINK_SPEED_10G_KR:
7341 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7342 bp->link_params.req_line_speed = SPEED_10000;
7343 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7344 ADVERTISED_FIBRE);
7345 } else {
7346 BNX2X_ERR("NVRAM config error. "
7347 "Invalid link_config 0x%x"
7348 " speed_cap_mask 0x%x\n",
7349 bp->port.link_config,
7350 bp->link_params.speed_cap_mask);
7351 return;
7353 break;
7355 default:
7356 BNX2X_ERR("NVRAM config error. "
7357 "BAD link speed link_config 0x%x\n",
7358 bp->port.link_config);
7359 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7360 bp->port.advertising = bp->port.supported;
7361 break;
7364 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7365 PORT_FEATURE_FLOW_CONTROL_MASK);
7366 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
7367 !(bp->port.supported & SUPPORTED_Autoneg))
7368 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
7370 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7371 " advertising 0x%x\n",
7372 bp->link_params.req_line_speed,
7373 bp->link_params.req_duplex,
7374 bp->link_params.req_flow_ctrl, bp->port.advertising);
7377 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7379 int port = BP_PORT(bp);
7380 u32 val, val2;
7382 bp->link_params.bp = bp;
7383 bp->link_params.port = port;
7385 bp->link_params.serdes_config =
7386 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7387 bp->link_params.lane_config =
7388 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7389 bp->link_params.ext_phy_config =
7390 SHMEM_RD(bp,
7391 dev_info.port_hw_config[port].external_phy_config);
7392 bp->link_params.speed_cap_mask =
7393 SHMEM_RD(bp,
7394 dev_info.port_hw_config[port].speed_capability_mask);
7396 bp->port.link_config =
7397 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7399 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7400 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7401 " link_config 0x%08x\n",
7402 bp->link_params.serdes_config,
7403 bp->link_params.lane_config,
7404 bp->link_params.ext_phy_config,
7405 bp->link_params.speed_cap_mask, bp->port.link_config);
7407 bp->link_params.switch_cfg = (bp->port.link_config &
7408 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7409 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7411 bnx2x_link_settings_requested(bp);
7413 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7414 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7415 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7416 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7417 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7418 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7419 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7420 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7421 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7422 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7425 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7427 int func = BP_FUNC(bp);
7428 u32 val, val2;
7429 int rc = 0;
7431 bnx2x_get_common_hwinfo(bp);
7433 bp->e1hov = 0;
7434 bp->e1hmf = 0;
7435 if (CHIP_IS_E1H(bp)) {
7436 bp->mf_config =
7437 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7439 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7440 FUNC_MF_CFG_E1HOV_TAG_MASK);
7441 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7443 bp->e1hov = val;
7444 bp->e1hmf = 1;
7445 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7446 "(0x%04x)\n",
7447 func, bp->e1hov, bp->e1hov);
7448 } else {
7449 BNX2X_DEV_INFO("Single function mode\n");
7450 if (BP_E1HVN(bp)) {
7451 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7452 " aborting\n", func);
7453 rc = -EPERM;
7458 if (!BP_NOMCP(bp)) {
7459 bnx2x_get_port_hwinfo(bp);
7461 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7462 DRV_MSG_SEQ_NUMBER_MASK);
7463 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7466 if (IS_E1HMF(bp)) {
7467 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7468 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7469 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7470 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7471 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7472 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7473 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7474 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7475 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7476 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7477 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7478 ETH_ALEN);
7479 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7480 ETH_ALEN);
7483 return rc;
7486 if (BP_NOMCP(bp)) {
7487 /* only supposed to happen on emulation/FPGA */
7488 BNX2X_ERR("warning random MAC workaround active\n");
7489 random_ether_addr(bp->dev->dev_addr);
7490 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7493 return rc;
7496 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7498 int func = BP_FUNC(bp);
7499 int rc;
7501 /* Disable interrupt handling until HW is initialized */
7502 atomic_set(&bp->intr_sem, 1);
7504 mutex_init(&bp->port.phy_mutex);
7506 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7507 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7509 rc = bnx2x_get_hwinfo(bp);
7511 /* need to reset chip if undi was active */
7512 if (!BP_NOMCP(bp))
7513 bnx2x_undi_unload(bp);
7515 if (CHIP_REV_IS_FPGA(bp))
7516 printk(KERN_ERR PFX "FPGA detected\n");
7518 if (BP_NOMCP(bp) && (func == 0))
7519 printk(KERN_ERR PFX
7520 "MCP disabled, must load devices in order!\n");
7522 /* Set TPA flags */
7523 if (disable_tpa) {
7524 bp->flags &= ~TPA_ENABLE_FLAG;
7525 bp->dev->features &= ~NETIF_F_LRO;
7526 } else {
7527 bp->flags |= TPA_ENABLE_FLAG;
7528 bp->dev->features |= NETIF_F_LRO;
7532 bp->tx_ring_size = MAX_TX_AVAIL;
7533 bp->rx_ring_size = MAX_RX_AVAIL;
7535 bp->rx_csum = 1;
7536 bp->rx_offset = 0;
7538 bp->tx_ticks = 50;
7539 bp->rx_ticks = 25;
7541 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7542 bp->current_interval = (poll ? poll : bp->timer_interval);
7544 init_timer(&bp->timer);
7545 bp->timer.expires = jiffies + bp->current_interval;
7546 bp->timer.data = (unsigned long) bp;
7547 bp->timer.function = bnx2x_timer;
7549 return rc;
7553 * ethtool service functions
7556 /* All ethtool functions called with rtnl_lock */
7558 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7560 struct bnx2x *bp = netdev_priv(dev);
7562 cmd->supported = bp->port.supported;
7563 cmd->advertising = bp->port.advertising;
7565 if (netif_carrier_ok(dev)) {
7566 cmd->speed = bp->link_vars.line_speed;
7567 cmd->duplex = bp->link_vars.duplex;
7568 } else {
7569 cmd->speed = bp->link_params.req_line_speed;
7570 cmd->duplex = bp->link_params.req_duplex;
7572 if (IS_E1HMF(bp)) {
7573 u16 vn_max_rate;
7575 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7576 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7577 if (vn_max_rate < cmd->speed)
7578 cmd->speed = vn_max_rate;
7581 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7582 u32 ext_phy_type =
7583 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7585 switch (ext_phy_type) {
7586 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7587 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7588 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7589 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7590 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7591 cmd->port = PORT_FIBRE;
7592 break;
7594 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7595 cmd->port = PORT_TP;
7596 break;
7598 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7599 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7600 bp->link_params.ext_phy_config);
7601 break;
7603 default:
7604 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7605 bp->link_params.ext_phy_config);
7606 break;
7608 } else
7609 cmd->port = PORT_TP;
7611 cmd->phy_address = bp->port.phy_addr;
7612 cmd->transceiver = XCVR_INTERNAL;
7614 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7615 cmd->autoneg = AUTONEG_ENABLE;
7616 else
7617 cmd->autoneg = AUTONEG_DISABLE;
7619 cmd->maxtxpkt = 0;
7620 cmd->maxrxpkt = 0;
7622 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7623 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7624 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7625 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7626 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7627 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7628 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7630 return 0;
7633 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7635 struct bnx2x *bp = netdev_priv(dev);
7636 u32 advertising;
7638 if (IS_E1HMF(bp))
7639 return 0;
7641 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7642 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7643 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7644 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7645 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7646 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7647 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7649 if (cmd->autoneg == AUTONEG_ENABLE) {
7650 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7651 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7652 return -EINVAL;
7655 /* advertise the requested speed and duplex if supported */
7656 cmd->advertising &= bp->port.supported;
7658 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7659 bp->link_params.req_duplex = DUPLEX_FULL;
7660 bp->port.advertising |= (ADVERTISED_Autoneg |
7661 cmd->advertising);
7663 } else { /* forced speed */
7664 /* advertise the requested speed and duplex if supported */
7665 switch (cmd->speed) {
7666 case SPEED_10:
7667 if (cmd->duplex == DUPLEX_FULL) {
7668 if (!(bp->port.supported &
7669 SUPPORTED_10baseT_Full)) {
7670 DP(NETIF_MSG_LINK,
7671 "10M full not supported\n");
7672 return -EINVAL;
7675 advertising = (ADVERTISED_10baseT_Full |
7676 ADVERTISED_TP);
7677 } else {
7678 if (!(bp->port.supported &
7679 SUPPORTED_10baseT_Half)) {
7680 DP(NETIF_MSG_LINK,
7681 "10M half not supported\n");
7682 return -EINVAL;
7685 advertising = (ADVERTISED_10baseT_Half |
7686 ADVERTISED_TP);
7688 break;
7690 case SPEED_100:
7691 if (cmd->duplex == DUPLEX_FULL) {
7692 if (!(bp->port.supported &
7693 SUPPORTED_100baseT_Full)) {
7694 DP(NETIF_MSG_LINK,
7695 "100M full not supported\n");
7696 return -EINVAL;
7699 advertising = (ADVERTISED_100baseT_Full |
7700 ADVERTISED_TP);
7701 } else {
7702 if (!(bp->port.supported &
7703 SUPPORTED_100baseT_Half)) {
7704 DP(NETIF_MSG_LINK,
7705 "100M half not supported\n");
7706 return -EINVAL;
7709 advertising = (ADVERTISED_100baseT_Half |
7710 ADVERTISED_TP);
7712 break;
7714 case SPEED_1000:
7715 if (cmd->duplex != DUPLEX_FULL) {
7716 DP(NETIF_MSG_LINK, "1G half not supported\n");
7717 return -EINVAL;
7720 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7721 DP(NETIF_MSG_LINK, "1G full not supported\n");
7722 return -EINVAL;
7725 advertising = (ADVERTISED_1000baseT_Full |
7726 ADVERTISED_TP);
7727 break;
7729 case SPEED_2500:
7730 if (cmd->duplex != DUPLEX_FULL) {
7731 DP(NETIF_MSG_LINK,
7732 "2.5G half not supported\n");
7733 return -EINVAL;
7736 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7737 DP(NETIF_MSG_LINK,
7738 "2.5G full not supported\n");
7739 return -EINVAL;
7742 advertising = (ADVERTISED_2500baseX_Full |
7743 ADVERTISED_TP);
7744 break;
7746 case SPEED_10000:
7747 if (cmd->duplex != DUPLEX_FULL) {
7748 DP(NETIF_MSG_LINK, "10G half not supported\n");
7749 return -EINVAL;
7752 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7753 DP(NETIF_MSG_LINK, "10G full not supported\n");
7754 return -EINVAL;
7757 advertising = (ADVERTISED_10000baseT_Full |
7758 ADVERTISED_FIBRE);
7759 break;
7761 default:
7762 DP(NETIF_MSG_LINK, "Unsupported speed\n");
7763 return -EINVAL;
7766 bp->link_params.req_line_speed = cmd->speed;
7767 bp->link_params.req_duplex = cmd->duplex;
7768 bp->port.advertising = advertising;
7771 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7772 DP_LEVEL " req_duplex %d advertising 0x%x\n",
7773 bp->link_params.req_line_speed, bp->link_params.req_duplex,
7774 bp->port.advertising);
7776 if (netif_running(dev)) {
7777 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7778 bnx2x_link_set(bp);
7781 return 0;
7784 #define PHY_FW_VER_LEN 10
7786 static void bnx2x_get_drvinfo(struct net_device *dev,
7787 struct ethtool_drvinfo *info)
7789 struct bnx2x *bp = netdev_priv(dev);
7790 u8 phy_fw_ver[PHY_FW_VER_LEN];
7792 strcpy(info->driver, DRV_MODULE_NAME);
7793 strcpy(info->version, DRV_MODULE_VERSION);
7795 phy_fw_ver[0] = '\0';
7796 if (bp->port.pmf) {
7797 bnx2x_acquire_phy_lock(bp);
7798 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7799 (bp->state != BNX2X_STATE_CLOSED),
7800 phy_fw_ver, PHY_FW_VER_LEN);
7801 bnx2x_release_phy_lock(bp);
7804 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7805 (bp->common.bc_ver & 0xff0000) >> 16,
7806 (bp->common.bc_ver & 0xff00) >> 8,
7807 (bp->common.bc_ver & 0xff),
7808 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7809 strcpy(info->bus_info, pci_name(bp->pdev));
7810 info->n_stats = BNX2X_NUM_STATS;
7811 info->testinfo_len = BNX2X_NUM_TESTS;
7812 info->eedump_len = bp->common.flash_size;
7813 info->regdump_len = 0;
7816 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7818 struct bnx2x *bp = netdev_priv(dev);
7820 if (bp->flags & NO_WOL_FLAG) {
7821 wol->supported = 0;
7822 wol->wolopts = 0;
7823 } else {
7824 wol->supported = WAKE_MAGIC;
7825 if (bp->wol)
7826 wol->wolopts = WAKE_MAGIC;
7827 else
7828 wol->wolopts = 0;
7830 memset(&wol->sopass, 0, sizeof(wol->sopass));
7833 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7835 struct bnx2x *bp = netdev_priv(dev);
7837 if (wol->wolopts & ~WAKE_MAGIC)
7838 return -EINVAL;
7840 if (wol->wolopts & WAKE_MAGIC) {
7841 if (bp->flags & NO_WOL_FLAG)
7842 return -EINVAL;
7844 bp->wol = 1;
7845 } else
7846 bp->wol = 0;
7848 return 0;
7851 static u32 bnx2x_get_msglevel(struct net_device *dev)
7853 struct bnx2x *bp = netdev_priv(dev);
7855 return bp->msglevel;
7858 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7860 struct bnx2x *bp = netdev_priv(dev);
7862 if (capable(CAP_NET_ADMIN))
7863 bp->msglevel = level;
7866 static int bnx2x_nway_reset(struct net_device *dev)
7868 struct bnx2x *bp = netdev_priv(dev);
7870 if (!bp->port.pmf)
7871 return 0;
7873 if (netif_running(dev)) {
7874 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7875 bnx2x_link_set(bp);
7878 return 0;
7881 static int bnx2x_get_eeprom_len(struct net_device *dev)
7883 struct bnx2x *bp = netdev_priv(dev);
7885 return bp->common.flash_size;
7888 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7890 int port = BP_PORT(bp);
7891 int count, i;
7892 u32 val = 0;
7894 /* adjust timeout for emulation/FPGA */
7895 count = NVRAM_TIMEOUT_COUNT;
7896 if (CHIP_REV_IS_SLOW(bp))
7897 count *= 100;
7899 /* request access to nvram interface */
7900 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7901 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7903 for (i = 0; i < count*10; i++) {
7904 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7905 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7906 break;
7908 udelay(5);
7911 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7912 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7913 return -EBUSY;
7916 return 0;
7919 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7921 int port = BP_PORT(bp);
7922 int count, i;
7923 u32 val = 0;
7925 /* adjust timeout for emulation/FPGA */
7926 count = NVRAM_TIMEOUT_COUNT;
7927 if (CHIP_REV_IS_SLOW(bp))
7928 count *= 100;
7930 /* relinquish nvram interface */
7931 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7932 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7934 for (i = 0; i < count*10; i++) {
7935 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7936 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7937 break;
7939 udelay(5);
7942 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7943 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7944 return -EBUSY;
7947 return 0;
7950 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7952 u32 val;
7954 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7956 /* enable both bits, even on read */
7957 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7958 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7959 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7962 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7964 u32 val;
7966 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7968 /* disable both bits, even after read */
7969 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7970 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7971 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7974 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7975 u32 cmd_flags)
7977 int count, i, rc;
7978 u32 val;
7980 /* build the command word */
7981 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7983 /* need to clear DONE bit separately */
7984 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7986 /* address of the NVRAM to read from */
7987 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7988 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7990 /* issue a read command */
7991 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7993 /* adjust timeout for emulation/FPGA */
7994 count = NVRAM_TIMEOUT_COUNT;
7995 if (CHIP_REV_IS_SLOW(bp))
7996 count *= 100;
7998 /* wait for completion */
7999 *ret_val = 0;
8000 rc = -EBUSY;
8001 for (i = 0; i < count; i++) {
8002 udelay(5);
8003 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8005 if (val & MCPR_NVM_COMMAND_DONE) {
8006 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8007 /* we read nvram data in cpu order
8008 * but ethtool sees it as an array of bytes
8009 * converting to big-endian will do the work */
8010 val = cpu_to_be32(val);
8011 *ret_val = val;
8012 rc = 0;
8013 break;
8017 return rc;
8020 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8021 int buf_size)
8023 int rc;
8024 u32 cmd_flags;
8025 u32 val;
8027 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8028 DP(BNX2X_MSG_NVM,
8029 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8030 offset, buf_size);
8031 return -EINVAL;
8034 if (offset + buf_size > bp->common.flash_size) {
8035 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8036 " buf_size (0x%x) > flash_size (0x%x)\n",
8037 offset, buf_size, bp->common.flash_size);
8038 return -EINVAL;
8041 /* request access to nvram interface */
8042 rc = bnx2x_acquire_nvram_lock(bp);
8043 if (rc)
8044 return rc;
8046 /* enable access to nvram interface */
8047 bnx2x_enable_nvram_access(bp);
8049 /* read the first word(s) */
8050 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8051 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8052 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8053 memcpy(ret_buf, &val, 4);
8055 /* advance to the next dword */
8056 offset += sizeof(u32);
8057 ret_buf += sizeof(u32);
8058 buf_size -= sizeof(u32);
8059 cmd_flags = 0;
8062 if (rc == 0) {
8063 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8064 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8065 memcpy(ret_buf, &val, 4);
8068 /* disable access to nvram interface */
8069 bnx2x_disable_nvram_access(bp);
8070 bnx2x_release_nvram_lock(bp);
8072 return rc;
8075 static int bnx2x_get_eeprom(struct net_device *dev,
8076 struct ethtool_eeprom *eeprom, u8 *eebuf)
8078 struct bnx2x *bp = netdev_priv(dev);
8079 int rc;
8081 if (!netif_running(dev))
8082 return -EAGAIN;
8084 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8085 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8086 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8087 eeprom->len, eeprom->len);
8089 /* parameters already validated in ethtool_get_eeprom */
8091 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8093 return rc;
8096 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8097 u32 cmd_flags)
8099 int count, i, rc;
8101 /* build the command word */
8102 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8104 /* need to clear DONE bit separately */
8105 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8107 /* write the data */
8108 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8110 /* address of the NVRAM to write to */
8111 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8112 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8114 /* issue the write command */
8115 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8117 /* adjust timeout for emulation/FPGA */
8118 count = NVRAM_TIMEOUT_COUNT;
8119 if (CHIP_REV_IS_SLOW(bp))
8120 count *= 100;
8122 /* wait for completion */
8123 rc = -EBUSY;
8124 for (i = 0; i < count; i++) {
8125 udelay(5);
8126 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8127 if (val & MCPR_NVM_COMMAND_DONE) {
8128 rc = 0;
8129 break;
8133 return rc;
8136 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8138 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8139 int buf_size)
8141 int rc;
8142 u32 cmd_flags;
8143 u32 align_offset;
8144 u32 val;
8146 if (offset + buf_size > bp->common.flash_size) {
8147 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8148 " buf_size (0x%x) > flash_size (0x%x)\n",
8149 offset, buf_size, bp->common.flash_size);
8150 return -EINVAL;
8153 /* request access to nvram interface */
8154 rc = bnx2x_acquire_nvram_lock(bp);
8155 if (rc)
8156 return rc;
8158 /* enable access to nvram interface */
8159 bnx2x_enable_nvram_access(bp);
8161 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8162 align_offset = (offset & ~0x03);
8163 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8165 if (rc == 0) {
8166 val &= ~(0xff << BYTE_OFFSET(offset));
8167 val |= (*data_buf << BYTE_OFFSET(offset));
8169 /* nvram data is returned as an array of bytes
8170 * convert it back to cpu order */
8171 val = be32_to_cpu(val);
8173 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8174 cmd_flags);
8177 /* disable access to nvram interface */
8178 bnx2x_disable_nvram_access(bp);
8179 bnx2x_release_nvram_lock(bp);
8181 return rc;
8184 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8185 int buf_size)
8187 int rc;
8188 u32 cmd_flags;
8189 u32 val;
8190 u32 written_so_far;
8192 if (buf_size == 1) /* ethtool */
8193 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8195 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8196 DP(BNX2X_MSG_NVM,
8197 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8198 offset, buf_size);
8199 return -EINVAL;
8202 if (offset + buf_size > bp->common.flash_size) {
8203 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8204 " buf_size (0x%x) > flash_size (0x%x)\n",
8205 offset, buf_size, bp->common.flash_size);
8206 return -EINVAL;
8209 /* request access to nvram interface */
8210 rc = bnx2x_acquire_nvram_lock(bp);
8211 if (rc)
8212 return rc;
8214 /* enable access to nvram interface */
8215 bnx2x_enable_nvram_access(bp);
8217 written_so_far = 0;
8218 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8219 while ((written_so_far < buf_size) && (rc == 0)) {
8220 if (written_so_far == (buf_size - sizeof(u32)))
8221 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8222 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8223 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8224 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8225 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8227 memcpy(&val, data_buf, 4);
8229 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8231 /* advance to the next dword */
8232 offset += sizeof(u32);
8233 data_buf += sizeof(u32);
8234 written_so_far += sizeof(u32);
8235 cmd_flags = 0;
8238 /* disable access to nvram interface */
8239 bnx2x_disable_nvram_access(bp);
8240 bnx2x_release_nvram_lock(bp);
8242 return rc;
8245 static int bnx2x_set_eeprom(struct net_device *dev,
8246 struct ethtool_eeprom *eeprom, u8 *eebuf)
8248 struct bnx2x *bp = netdev_priv(dev);
8249 int rc;
8251 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8252 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8253 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8254 eeprom->len, eeprom->len);
8256 /* parameters already validated in ethtool_set_eeprom */
8258 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8259 if (eeprom->magic == 0x00504859)
8260 if (bp->port.pmf) {
8262 bnx2x_acquire_phy_lock(bp);
8263 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8264 bp->link_params.ext_phy_config,
8265 (bp->state != BNX2X_STATE_CLOSED),
8266 eebuf, eeprom->len);
8267 if ((bp->state == BNX2X_STATE_OPEN) ||
8268 (bp->state == BNX2X_STATE_DISABLED)) {
8269 rc |= bnx2x_link_reset(&bp->link_params,
8270 &bp->link_vars);
8271 rc |= bnx2x_phy_init(&bp->link_params,
8272 &bp->link_vars);
8274 bnx2x_release_phy_lock(bp);
8276 } else /* Only the PMF can access the PHY */
8277 return -EINVAL;
8278 else
8279 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8281 return rc;
8284 static int bnx2x_get_coalesce(struct net_device *dev,
8285 struct ethtool_coalesce *coal)
8287 struct bnx2x *bp = netdev_priv(dev);
8289 memset(coal, 0, sizeof(struct ethtool_coalesce));
8291 coal->rx_coalesce_usecs = bp->rx_ticks;
8292 coal->tx_coalesce_usecs = bp->tx_ticks;
8294 return 0;
8297 static int bnx2x_set_coalesce(struct net_device *dev,
8298 struct ethtool_coalesce *coal)
8300 struct bnx2x *bp = netdev_priv(dev);
8302 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8303 if (bp->rx_ticks > 3000)
8304 bp->rx_ticks = 3000;
8306 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8307 if (bp->tx_ticks > 0x3000)
8308 bp->tx_ticks = 0x3000;
8310 if (netif_running(dev))
8311 bnx2x_update_coalesce(bp);
8313 return 0;
8316 static void bnx2x_get_ringparam(struct net_device *dev,
8317 struct ethtool_ringparam *ering)
8319 struct bnx2x *bp = netdev_priv(dev);
8321 ering->rx_max_pending = MAX_RX_AVAIL;
8322 ering->rx_mini_max_pending = 0;
8323 ering->rx_jumbo_max_pending = 0;
8325 ering->rx_pending = bp->rx_ring_size;
8326 ering->rx_mini_pending = 0;
8327 ering->rx_jumbo_pending = 0;
8329 ering->tx_max_pending = MAX_TX_AVAIL;
8330 ering->tx_pending = bp->tx_ring_size;
8333 static int bnx2x_set_ringparam(struct net_device *dev,
8334 struct ethtool_ringparam *ering)
8336 struct bnx2x *bp = netdev_priv(dev);
8337 int rc = 0;
8339 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8340 (ering->tx_pending > MAX_TX_AVAIL) ||
8341 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8342 return -EINVAL;
8344 bp->rx_ring_size = ering->rx_pending;
8345 bp->tx_ring_size = ering->tx_pending;
8347 if (netif_running(dev)) {
8348 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8349 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8352 return rc;
8355 static void bnx2x_get_pauseparam(struct net_device *dev,
8356 struct ethtool_pauseparam *epause)
8358 struct bnx2x *bp = netdev_priv(dev);
8360 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8361 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8363 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8364 FLOW_CTRL_RX);
8365 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8366 FLOW_CTRL_TX);
8368 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8369 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8370 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8373 static int bnx2x_set_pauseparam(struct net_device *dev,
8374 struct ethtool_pauseparam *epause)
8376 struct bnx2x *bp = netdev_priv(dev);
8378 if (IS_E1HMF(bp))
8379 return 0;
8381 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8382 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8383 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8385 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8387 if (epause->rx_pause)
8388 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8390 if (epause->tx_pause)
8391 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8393 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8394 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
8396 if (epause->autoneg) {
8397 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8398 DP(NETIF_MSG_LINK, "autoneg not supported\n");
8399 return -EINVAL;
8402 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8403 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8406 DP(NETIF_MSG_LINK,
8407 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8409 if (netif_running(dev)) {
8410 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8411 bnx2x_link_set(bp);
8414 return 0;
8417 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8419 struct bnx2x *bp = netdev_priv(dev);
8420 int changed = 0;
8421 int rc = 0;
8423 /* TPA requires Rx CSUM offloading */
8424 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8425 if (!(dev->features & NETIF_F_LRO)) {
8426 dev->features |= NETIF_F_LRO;
8427 bp->flags |= TPA_ENABLE_FLAG;
8428 changed = 1;
8431 } else if (dev->features & NETIF_F_LRO) {
8432 dev->features &= ~NETIF_F_LRO;
8433 bp->flags &= ~TPA_ENABLE_FLAG;
8434 changed = 1;
8437 if (changed && netif_running(dev)) {
8438 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8439 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8442 return rc;
8445 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8447 struct bnx2x *bp = netdev_priv(dev);
8449 return bp->rx_csum;
8452 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8454 struct bnx2x *bp = netdev_priv(dev);
8455 int rc = 0;
8457 bp->rx_csum = data;
8459 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8460 TPA'ed packets will be discarded due to wrong TCP CSUM */
8461 if (!data) {
8462 u32 flags = ethtool_op_get_flags(dev);
8464 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8467 return rc;
8470 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8472 if (data) {
8473 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8474 dev->features |= NETIF_F_TSO6;
8475 } else {
8476 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8477 dev->features &= ~NETIF_F_TSO6;
8480 return 0;
8483 static const struct {
8484 char string[ETH_GSTRING_LEN];
8485 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8486 { "register_test (offline)" },
8487 { "memory_test (offline)" },
8488 { "loopback_test (offline)" },
8489 { "nvram_test (online)" },
8490 { "interrupt_test (online)" },
8491 { "link_test (online)" },
8492 { "idle check (online)" },
8493 { "MC errors (online)" }
8496 static int bnx2x_self_test_count(struct net_device *dev)
8498 return BNX2X_NUM_TESTS;
8501 static int bnx2x_test_registers(struct bnx2x *bp)
8503 int idx, i, rc = -ENODEV;
8504 u32 wr_val = 0;
8505 int port = BP_PORT(bp);
8506 static const struct {
8507 u32 offset0;
8508 u32 offset1;
8509 u32 mask;
8510 } reg_tbl[] = {
8511 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8512 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8513 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8514 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8515 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8516 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8517 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8518 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8519 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8520 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8521 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8522 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8523 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8524 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8525 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8526 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8527 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8528 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8529 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8530 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8531 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8532 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8533 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8534 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8535 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8536 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8537 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8538 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8539 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8540 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8541 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8542 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8543 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8544 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8545 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8546 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8547 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8548 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8550 { 0xffffffff, 0, 0x00000000 }
8553 if (!netif_running(bp->dev))
8554 return rc;
8556 /* Repeat the test twice:
8557 First by writing 0x00000000, second by writing 0xffffffff */
8558 for (idx = 0; idx < 2; idx++) {
8560 switch (idx) {
8561 case 0:
8562 wr_val = 0;
8563 break;
8564 case 1:
8565 wr_val = 0xffffffff;
8566 break;
8569 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8570 u32 offset, mask, save_val, val;
8572 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8573 mask = reg_tbl[i].mask;
8575 save_val = REG_RD(bp, offset);
8577 REG_WR(bp, offset, wr_val);
8578 val = REG_RD(bp, offset);
8580 /* Restore the original register's value */
8581 REG_WR(bp, offset, save_val);
8583 /* verify that value is as expected value */
8584 if ((val & mask) != (wr_val & mask))
8585 goto test_reg_exit;
8589 rc = 0;
8591 test_reg_exit:
8592 return rc;
8595 static int bnx2x_test_memory(struct bnx2x *bp)
8597 int i, j, rc = -ENODEV;
8598 u32 val;
8599 static const struct {
8600 u32 offset;
8601 int size;
8602 } mem_tbl[] = {
8603 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8604 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8605 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8606 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8607 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8608 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8609 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8611 { 0xffffffff, 0 }
8613 static const struct {
8614 char *name;
8615 u32 offset;
8616 u32 e1_mask;
8617 u32 e1h_mask;
8618 } prty_tbl[] = {
8619 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8620 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8621 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8622 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8623 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8624 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8626 { NULL, 0xffffffff, 0, 0 }
8629 if (!netif_running(bp->dev))
8630 return rc;
8632 /* Go through all the memories */
8633 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8634 for (j = 0; j < mem_tbl[i].size; j++)
8635 REG_RD(bp, mem_tbl[i].offset + j*4);
8637 /* Check the parity status */
8638 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8639 val = REG_RD(bp, prty_tbl[i].offset);
8640 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8641 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8642 DP(NETIF_MSG_HW,
8643 "%s is 0x%x\n", prty_tbl[i].name, val);
8644 goto test_mem_exit;
8648 rc = 0;
8650 test_mem_exit:
8651 return rc;
8654 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8656 int cnt = 1000;
8658 if (link_up)
8659 while (bnx2x_link_test(bp) && cnt--)
8660 msleep(10);
8663 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8665 unsigned int pkt_size, num_pkts, i;
8666 struct sk_buff *skb;
8667 unsigned char *packet;
8668 struct bnx2x_fastpath *fp = &bp->fp[0];
8669 u16 tx_start_idx, tx_idx;
8670 u16 rx_start_idx, rx_idx;
8671 u16 pkt_prod;
8672 struct sw_tx_bd *tx_buf;
8673 struct eth_tx_bd *tx_bd;
8674 dma_addr_t mapping;
8675 union eth_rx_cqe *cqe;
8676 u8 cqe_fp_flags;
8677 struct sw_rx_bd *rx_buf;
8678 u16 len;
8679 int rc = -ENODEV;
8681 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8682 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8683 bnx2x_acquire_phy_lock(bp);
8684 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8685 bnx2x_release_phy_lock(bp);
8687 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8688 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8689 bnx2x_acquire_phy_lock(bp);
8690 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8691 bnx2x_release_phy_lock(bp);
8692 /* wait until link state is restored */
8693 bnx2x_wait_for_link(bp, link_up);
8695 } else
8696 return -EINVAL;
8698 pkt_size = 1514;
8699 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8700 if (!skb) {
8701 rc = -ENOMEM;
8702 goto test_loopback_exit;
8704 packet = skb_put(skb, pkt_size);
8705 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8706 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8707 for (i = ETH_HLEN; i < pkt_size; i++)
8708 packet[i] = (unsigned char) (i & 0xff);
8710 num_pkts = 0;
8711 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8712 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8714 pkt_prod = fp->tx_pkt_prod++;
8715 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8716 tx_buf->first_bd = fp->tx_bd_prod;
8717 tx_buf->skb = skb;
8719 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8720 mapping = pci_map_single(bp->pdev, skb->data,
8721 skb_headlen(skb), PCI_DMA_TODEVICE);
8722 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8723 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8724 tx_bd->nbd = cpu_to_le16(1);
8725 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8726 tx_bd->vlan = cpu_to_le16(pkt_prod);
8727 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8728 ETH_TX_BD_FLAGS_END_BD);
8729 tx_bd->general_data = ((UNICAST_ADDRESS <<
8730 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8732 fp->hw_tx_prods->bds_prod =
8733 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8734 mb(); /* FW restriction: must not reorder writing nbd and packets */
8735 fp->hw_tx_prods->packets_prod =
8736 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8737 DOORBELL(bp, FP_IDX(fp), 0);
8739 mmiowb();
8741 num_pkts++;
8742 fp->tx_bd_prod++;
8743 bp->dev->trans_start = jiffies;
8745 udelay(100);
8747 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8748 if (tx_idx != tx_start_idx + num_pkts)
8749 goto test_loopback_exit;
8751 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8752 if (rx_idx != rx_start_idx + num_pkts)
8753 goto test_loopback_exit;
8755 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8756 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8757 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8758 goto test_loopback_rx_exit;
8760 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8761 if (len != pkt_size)
8762 goto test_loopback_rx_exit;
8764 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8765 skb = rx_buf->skb;
8766 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8767 for (i = ETH_HLEN; i < pkt_size; i++)
8768 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8769 goto test_loopback_rx_exit;
8771 rc = 0;
8773 test_loopback_rx_exit:
8774 bp->dev->last_rx = jiffies;
8776 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8777 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8778 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8779 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8781 /* Update producers */
8782 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8783 fp->rx_sge_prod);
8784 mmiowb(); /* keep prod updates ordered */
8786 test_loopback_exit:
8787 bp->link_params.loopback_mode = LOOPBACK_NONE;
8789 return rc;
8792 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8794 int rc = 0;
8796 if (!netif_running(bp->dev))
8797 return BNX2X_LOOPBACK_FAILED;
8799 bnx2x_netif_stop(bp);
8801 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8802 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8803 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8806 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8807 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8808 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8811 bnx2x_netif_start(bp);
8813 return rc;
8816 #define CRC32_RESIDUAL 0xdebb20e3
8818 static int bnx2x_test_nvram(struct bnx2x *bp)
8820 static const struct {
8821 int offset;
8822 int size;
8823 } nvram_tbl[] = {
8824 { 0, 0x14 }, /* bootstrap */
8825 { 0x14, 0xec }, /* dir */
8826 { 0x100, 0x350 }, /* manuf_info */
8827 { 0x450, 0xf0 }, /* feature_info */
8828 { 0x640, 0x64 }, /* upgrade_key_info */
8829 { 0x6a4, 0x64 },
8830 { 0x708, 0x70 }, /* manuf_key_info */
8831 { 0x778, 0x70 },
8832 { 0, 0 }
8834 u32 buf[0x350 / 4];
8835 u8 *data = (u8 *)buf;
8836 int i, rc;
8837 u32 magic, csum;
8839 rc = bnx2x_nvram_read(bp, 0, data, 4);
8840 if (rc) {
8841 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8842 goto test_nvram_exit;
8845 magic = be32_to_cpu(buf[0]);
8846 if (magic != 0x669955aa) {
8847 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8848 rc = -ENODEV;
8849 goto test_nvram_exit;
8852 for (i = 0; nvram_tbl[i].size; i++) {
8854 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8855 nvram_tbl[i].size);
8856 if (rc) {
8857 DP(NETIF_MSG_PROBE,
8858 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8859 goto test_nvram_exit;
8862 csum = ether_crc_le(nvram_tbl[i].size, data);
8863 if (csum != CRC32_RESIDUAL) {
8864 DP(NETIF_MSG_PROBE,
8865 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8866 rc = -ENODEV;
8867 goto test_nvram_exit;
8871 test_nvram_exit:
8872 return rc;
8875 static int bnx2x_test_intr(struct bnx2x *bp)
8877 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8878 int i, rc;
8880 if (!netif_running(bp->dev))
8881 return -ENODEV;
8883 config->hdr.length_6b = 0;
8884 config->hdr.offset = 0;
8885 config->hdr.client_id = BP_CL_ID(bp);
8886 config->hdr.reserved1 = 0;
8888 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8889 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8890 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8891 if (rc == 0) {
8892 bp->set_mac_pending++;
8893 for (i = 0; i < 10; i++) {
8894 if (!bp->set_mac_pending)
8895 break;
8896 msleep_interruptible(10);
8898 if (i == 10)
8899 rc = -ENODEV;
8902 return rc;
8905 static void bnx2x_self_test(struct net_device *dev,
8906 struct ethtool_test *etest, u64 *buf)
8908 struct bnx2x *bp = netdev_priv(dev);
8910 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8912 if (!netif_running(dev))
8913 return;
8915 /* offline tests are not supported in MF mode */
8916 if (IS_E1HMF(bp))
8917 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8919 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8920 u8 link_up;
8922 link_up = bp->link_vars.link_up;
8923 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8924 bnx2x_nic_load(bp, LOAD_DIAG);
8925 /* wait until link state is restored */
8926 bnx2x_wait_for_link(bp, link_up);
8928 if (bnx2x_test_registers(bp) != 0) {
8929 buf[0] = 1;
8930 etest->flags |= ETH_TEST_FL_FAILED;
8932 if (bnx2x_test_memory(bp) != 0) {
8933 buf[1] = 1;
8934 etest->flags |= ETH_TEST_FL_FAILED;
8936 buf[2] = bnx2x_test_loopback(bp, link_up);
8937 if (buf[2] != 0)
8938 etest->flags |= ETH_TEST_FL_FAILED;
8940 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8941 bnx2x_nic_load(bp, LOAD_NORMAL);
8942 /* wait until link state is restored */
8943 bnx2x_wait_for_link(bp, link_up);
8945 if (bnx2x_test_nvram(bp) != 0) {
8946 buf[3] = 1;
8947 etest->flags |= ETH_TEST_FL_FAILED;
8949 if (bnx2x_test_intr(bp) != 0) {
8950 buf[4] = 1;
8951 etest->flags |= ETH_TEST_FL_FAILED;
8953 if (bp->port.pmf)
8954 if (bnx2x_link_test(bp) != 0) {
8955 buf[5] = 1;
8956 etest->flags |= ETH_TEST_FL_FAILED;
8958 buf[7] = bnx2x_mc_assert(bp);
8959 if (buf[7] != 0)
8960 etest->flags |= ETH_TEST_FL_FAILED;
8962 #ifdef BNX2X_EXTRA_DEBUG
8963 bnx2x_panic_dump(bp);
8964 #endif
8967 static const struct {
8968 long offset;
8969 int size;
8970 u32 flags;
8971 #define STATS_FLAGS_PORT 1
8972 #define STATS_FLAGS_FUNC 2
8973 u8 string[ETH_GSTRING_LEN];
8974 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8975 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8976 8, STATS_FLAGS_FUNC, "rx_bytes" },
8977 { STATS_OFFSET32(error_bytes_received_hi),
8978 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8979 { STATS_OFFSET32(total_bytes_transmitted_hi),
8980 8, STATS_FLAGS_FUNC, "tx_bytes" },
8981 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8982 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8983 { STATS_OFFSET32(total_unicast_packets_received_hi),
8984 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8985 { STATS_OFFSET32(total_multicast_packets_received_hi),
8986 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8987 { STATS_OFFSET32(total_broadcast_packets_received_hi),
8988 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8989 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8990 8, STATS_FLAGS_FUNC, "tx_packets" },
8991 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8992 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8993 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8994 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8995 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8996 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8997 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8998 8, STATS_FLAGS_PORT, "rx_align_errors" },
8999 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9000 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9001 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9002 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9003 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9004 8, STATS_FLAGS_PORT, "tx_deferred" },
9005 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9006 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9007 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9008 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9009 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9010 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9011 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9012 8, STATS_FLAGS_PORT, "rx_fragments" },
9013 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9014 8, STATS_FLAGS_PORT, "rx_jabbers" },
9015 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9016 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9017 { STATS_OFFSET32(jabber_packets_received),
9018 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9019 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9020 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9021 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9022 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9023 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9024 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9025 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9026 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9027 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9028 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9029 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9030 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9031 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9032 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9033 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9034 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9035 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9036 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9037 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9038 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9039 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9040 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9041 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9042 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9043 { STATS_OFFSET32(mac_filter_discard),
9044 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9045 { STATS_OFFSET32(no_buff_discard),
9046 4, STATS_FLAGS_FUNC, "rx_discards" },
9047 { STATS_OFFSET32(xxoverflow_discard),
9048 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9049 { STATS_OFFSET32(brb_drop_hi),
9050 8, STATS_FLAGS_PORT, "brb_discard" },
9051 { STATS_OFFSET32(brb_truncate_hi),
9052 8, STATS_FLAGS_PORT, "brb_truncate" },
9053 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9054 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9055 { STATS_OFFSET32(rx_skb_alloc_failed),
9056 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9057 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9058 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9061 #define IS_NOT_E1HMF_STAT(bp, i) \
9062 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9064 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9066 struct bnx2x *bp = netdev_priv(dev);
9067 int i, j;
9069 switch (stringset) {
9070 case ETH_SS_STATS:
9071 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9072 if (IS_NOT_E1HMF_STAT(bp, i))
9073 continue;
9074 strcpy(buf + j*ETH_GSTRING_LEN,
9075 bnx2x_stats_arr[i].string);
9076 j++;
9078 break;
9080 case ETH_SS_TEST:
9081 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9082 break;
9086 static int bnx2x_get_stats_count(struct net_device *dev)
9088 struct bnx2x *bp = netdev_priv(dev);
9089 int i, num_stats = 0;
9091 for (i = 0; i < BNX2X_NUM_STATS; i++) {
9092 if (IS_NOT_E1HMF_STAT(bp, i))
9093 continue;
9094 num_stats++;
9096 return num_stats;
9099 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9100 struct ethtool_stats *stats, u64 *buf)
9102 struct bnx2x *bp = netdev_priv(dev);
9103 u32 *hw_stats = (u32 *)&bp->eth_stats;
9104 int i, j;
9106 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9107 if (IS_NOT_E1HMF_STAT(bp, i))
9108 continue;
9110 if (bnx2x_stats_arr[i].size == 0) {
9111 /* skip this counter */
9112 buf[j] = 0;
9113 j++;
9114 continue;
9116 if (bnx2x_stats_arr[i].size == 4) {
9117 /* 4-byte counter */
9118 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9119 j++;
9120 continue;
9122 /* 8-byte counter */
9123 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9124 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9125 j++;
9129 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9131 struct bnx2x *bp = netdev_priv(dev);
9132 int port = BP_PORT(bp);
9133 int i;
9135 if (!netif_running(dev))
9136 return 0;
9138 if (!bp->port.pmf)
9139 return 0;
9141 if (data == 0)
9142 data = 2;
9144 for (i = 0; i < (data * 2); i++) {
9145 if ((i % 2) == 0)
9146 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9147 bp->link_params.hw_led_mode,
9148 bp->link_params.chip_id);
9149 else
9150 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9151 bp->link_params.hw_led_mode,
9152 bp->link_params.chip_id);
9154 msleep_interruptible(500);
9155 if (signal_pending(current))
9156 break;
9159 if (bp->link_vars.link_up)
9160 bnx2x_set_led(bp, port, LED_MODE_OPER,
9161 bp->link_vars.line_speed,
9162 bp->link_params.hw_led_mode,
9163 bp->link_params.chip_id);
9165 return 0;
9168 static struct ethtool_ops bnx2x_ethtool_ops = {
9169 .get_settings = bnx2x_get_settings,
9170 .set_settings = bnx2x_set_settings,
9171 .get_drvinfo = bnx2x_get_drvinfo,
9172 .get_wol = bnx2x_get_wol,
9173 .set_wol = bnx2x_set_wol,
9174 .get_msglevel = bnx2x_get_msglevel,
9175 .set_msglevel = bnx2x_set_msglevel,
9176 .nway_reset = bnx2x_nway_reset,
9177 .get_link = ethtool_op_get_link,
9178 .get_eeprom_len = bnx2x_get_eeprom_len,
9179 .get_eeprom = bnx2x_get_eeprom,
9180 .set_eeprom = bnx2x_set_eeprom,
9181 .get_coalesce = bnx2x_get_coalesce,
9182 .set_coalesce = bnx2x_set_coalesce,
9183 .get_ringparam = bnx2x_get_ringparam,
9184 .set_ringparam = bnx2x_set_ringparam,
9185 .get_pauseparam = bnx2x_get_pauseparam,
9186 .set_pauseparam = bnx2x_set_pauseparam,
9187 .get_rx_csum = bnx2x_get_rx_csum,
9188 .set_rx_csum = bnx2x_set_rx_csum,
9189 .get_tx_csum = ethtool_op_get_tx_csum,
9190 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9191 .set_flags = bnx2x_set_flags,
9192 .get_flags = ethtool_op_get_flags,
9193 .get_sg = ethtool_op_get_sg,
9194 .set_sg = ethtool_op_set_sg,
9195 .get_tso = ethtool_op_get_tso,
9196 .set_tso = bnx2x_set_tso,
9197 .self_test_count = bnx2x_self_test_count,
9198 .self_test = bnx2x_self_test,
9199 .get_strings = bnx2x_get_strings,
9200 .phys_id = bnx2x_phys_id,
9201 .get_stats_count = bnx2x_get_stats_count,
9202 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9205 /* end of ethtool_ops */
9207 /****************************************************************************
9208 * General service functions
9209 ****************************************************************************/
9211 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9213 u16 pmcsr;
9215 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9217 switch (state) {
9218 case PCI_D0:
9219 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9220 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9221 PCI_PM_CTRL_PME_STATUS));
9223 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9224 /* delay required during transition out of D3hot */
9225 msleep(20);
9226 break;
9228 case PCI_D3hot:
9229 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9230 pmcsr |= 3;
9232 if (bp->wol)
9233 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9235 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9236 pmcsr);
9238 /* No more memory access after this point until
9239 * device is brought back to D0.
9241 break;
9243 default:
9244 return -EINVAL;
9246 return 0;
9250 * net_device service functions
9253 static int bnx2x_poll(struct napi_struct *napi, int budget)
9255 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9256 napi);
9257 struct bnx2x *bp = fp->bp;
9258 int work_done = 0;
9259 u16 rx_cons_sb;
9261 #ifdef BNX2X_STOP_ON_ERROR
9262 if (unlikely(bp->panic))
9263 goto poll_panic;
9264 #endif
9266 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9267 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9268 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9270 bnx2x_update_fpsb_idx(fp);
9272 if (BNX2X_HAS_TX_WORK(fp))
9273 bnx2x_tx_int(fp, budget);
9275 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9276 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9277 rx_cons_sb++;
9278 if (BNX2X_HAS_RX_WORK(fp))
9279 work_done = bnx2x_rx_int(fp, budget);
9281 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9282 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9283 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9284 rx_cons_sb++;
9286 /* must not complete if we consumed full budget */
9287 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9289 #ifdef BNX2X_STOP_ON_ERROR
9290 poll_panic:
9291 #endif
9292 netif_rx_complete(bp->dev, napi);
9294 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9295 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9296 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9297 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9299 return work_done;
9303 /* we split the first BD into headers and data BDs
9304 * to ease the pain of our fellow microcode engineers
9305 * we use one mapping for both BDs
9306 * So far this has only been observed to happen
9307 * in Other Operating Systems(TM)
9309 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9310 struct bnx2x_fastpath *fp,
9311 struct eth_tx_bd **tx_bd, u16 hlen,
9312 u16 bd_prod, int nbd)
9314 struct eth_tx_bd *h_tx_bd = *tx_bd;
9315 struct eth_tx_bd *d_tx_bd;
9316 dma_addr_t mapping;
9317 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9319 /* first fix first BD */
9320 h_tx_bd->nbd = cpu_to_le16(nbd);
9321 h_tx_bd->nbytes = cpu_to_le16(hlen);
9323 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9324 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9325 h_tx_bd->addr_lo, h_tx_bd->nbd);
9327 /* now get a new data BD
9328 * (after the pbd) and fill it */
9329 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9330 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9332 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9333 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9335 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9336 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9337 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9338 d_tx_bd->vlan = 0;
9339 /* this marks the BD as one that has no individual mapping
9340 * the FW ignores this flag in a BD not marked start
9342 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9343 DP(NETIF_MSG_TX_QUEUED,
9344 "TSO split data size is %d (%x:%x)\n",
9345 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9347 /* update tx_bd for marking the last BD flag */
9348 *tx_bd = d_tx_bd;
9350 return bd_prod;
9353 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9355 if (fix > 0)
9356 csum = (u16) ~csum_fold(csum_sub(csum,
9357 csum_partial(t_header - fix, fix, 0)));
9359 else if (fix < 0)
9360 csum = (u16) ~csum_fold(csum_add(csum,
9361 csum_partial(t_header, -fix, 0)));
9363 return swab16(csum);
9366 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9368 u32 rc;
9370 if (skb->ip_summed != CHECKSUM_PARTIAL)
9371 rc = XMIT_PLAIN;
9373 else {
9374 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9375 rc = XMIT_CSUM_V6;
9376 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9377 rc |= XMIT_CSUM_TCP;
9379 } else {
9380 rc = XMIT_CSUM_V4;
9381 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9382 rc |= XMIT_CSUM_TCP;
9386 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9387 rc |= XMIT_GSO_V4;
9389 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9390 rc |= XMIT_GSO_V6;
9392 return rc;
9395 /* check if packet requires linearization (packet is too fragmented) */
9396 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9397 u32 xmit_type)
9399 int to_copy = 0;
9400 int hlen = 0;
9401 int first_bd_sz = 0;
9403 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9404 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9406 if (xmit_type & XMIT_GSO) {
9407 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9408 /* Check if LSO packet needs to be copied:
9409 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9410 int wnd_size = MAX_FETCH_BD - 3;
9411 /* Number of windows to check */
9412 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9413 int wnd_idx = 0;
9414 int frag_idx = 0;
9415 u32 wnd_sum = 0;
9417 /* Headers length */
9418 hlen = (int)(skb_transport_header(skb) - skb->data) +
9419 tcp_hdrlen(skb);
9421 /* Amount of data (w/o headers) on linear part of SKB*/
9422 first_bd_sz = skb_headlen(skb) - hlen;
9424 wnd_sum = first_bd_sz;
9426 /* Calculate the first sum - it's special */
9427 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9428 wnd_sum +=
9429 skb_shinfo(skb)->frags[frag_idx].size;
9431 /* If there was data on linear skb data - check it */
9432 if (first_bd_sz > 0) {
9433 if (unlikely(wnd_sum < lso_mss)) {
9434 to_copy = 1;
9435 goto exit_lbl;
9438 wnd_sum -= first_bd_sz;
9441 /* Others are easier: run through the frag list and
9442 check all windows */
9443 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9444 wnd_sum +=
9445 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9447 if (unlikely(wnd_sum < lso_mss)) {
9448 to_copy = 1;
9449 break;
9451 wnd_sum -=
9452 skb_shinfo(skb)->frags[wnd_idx].size;
9455 } else {
9456 /* in non-LSO too fragmented packet should always
9457 be linearized */
9458 to_copy = 1;
9462 exit_lbl:
9463 if (unlikely(to_copy))
9464 DP(NETIF_MSG_TX_QUEUED,
9465 "Linearization IS REQUIRED for %s packet. "
9466 "num_frags %d hlen %d first_bd_sz %d\n",
9467 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9468 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9470 return to_copy;
9473 /* called with netif_tx_lock
9474 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9475 * netif_wake_queue()
9477 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9479 struct bnx2x *bp = netdev_priv(dev);
9480 struct bnx2x_fastpath *fp;
9481 struct sw_tx_bd *tx_buf;
9482 struct eth_tx_bd *tx_bd;
9483 struct eth_tx_parse_bd *pbd = NULL;
9484 u16 pkt_prod, bd_prod;
9485 int nbd, fp_index;
9486 dma_addr_t mapping;
9487 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9488 int vlan_off = (bp->e1hov ? 4 : 0);
9489 int i;
9490 u8 hlen = 0;
9492 #ifdef BNX2X_STOP_ON_ERROR
9493 if (unlikely(bp->panic))
9494 return NETDEV_TX_BUSY;
9495 #endif
9497 fp_index = (smp_processor_id() % bp->num_queues);
9498 fp = &bp->fp[fp_index];
9500 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9501 bp->eth_stats.driver_xoff++,
9502 netif_stop_queue(dev);
9503 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9504 return NETDEV_TX_BUSY;
9507 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9508 " gso type %x xmit_type %x\n",
9509 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9510 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9512 /* First, check if we need to linearize the skb
9513 (due to FW restrictions) */
9514 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9515 /* Statistics of linearization */
9516 bp->lin_cnt++;
9517 if (skb_linearize(skb) != 0) {
9518 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9519 "silently dropping this SKB\n");
9520 dev_kfree_skb_any(skb);
9521 return NETDEV_TX_OK;
9526 Please read carefully. First we use one BD which we mark as start,
9527 then for TSO or xsum we have a parsing info BD,
9528 and only then we have the rest of the TSO BDs.
9529 (don't forget to mark the last one as last,
9530 and to unmap only AFTER you write to the BD ...)
9531 And above all, all pdb sizes are in words - NOT DWORDS!
9534 pkt_prod = fp->tx_pkt_prod++;
9535 bd_prod = TX_BD(fp->tx_bd_prod);
9537 /* get a tx_buf and first BD */
9538 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9539 tx_bd = &fp->tx_desc_ring[bd_prod];
9541 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9542 tx_bd->general_data = (UNICAST_ADDRESS <<
9543 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9544 /* header nbd */
9545 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9547 /* remember the first BD of the packet */
9548 tx_buf->first_bd = fp->tx_bd_prod;
9549 tx_buf->skb = skb;
9551 DP(NETIF_MSG_TX_QUEUED,
9552 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9553 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9555 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9556 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9557 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9558 vlan_off += 4;
9559 } else
9560 tx_bd->vlan = cpu_to_le16(pkt_prod);
9562 if (xmit_type) {
9563 /* turn on parsing and get a BD */
9564 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9565 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9567 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9570 if (xmit_type & XMIT_CSUM) {
9571 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9573 /* for now NS flag is not used in Linux */
9574 pbd->global_data = (hlen |
9575 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9576 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9578 pbd->ip_hlen = (skb_transport_header(skb) -
9579 skb_network_header(skb)) / 2;
9581 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9583 pbd->total_hlen = cpu_to_le16(hlen);
9584 hlen = hlen*2 - vlan_off;
9586 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9588 if (xmit_type & XMIT_CSUM_V4)
9589 tx_bd->bd_flags.as_bitfield |=
9590 ETH_TX_BD_FLAGS_IP_CSUM;
9591 else
9592 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9594 if (xmit_type & XMIT_CSUM_TCP) {
9595 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9597 } else {
9598 s8 fix = SKB_CS_OFF(skb); /* signed! */
9600 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9601 pbd->cs_offset = fix / 2;
9603 DP(NETIF_MSG_TX_QUEUED,
9604 "hlen %d offset %d fix %d csum before fix %x\n",
9605 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9606 SKB_CS(skb));
9608 /* HW bug: fixup the CSUM */
9609 pbd->tcp_pseudo_csum =
9610 bnx2x_csum_fix(skb_transport_header(skb),
9611 SKB_CS(skb), fix);
9613 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9614 pbd->tcp_pseudo_csum);
9618 mapping = pci_map_single(bp->pdev, skb->data,
9619 skb_headlen(skb), PCI_DMA_TODEVICE);
9621 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9622 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9623 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9624 tx_bd->nbd = cpu_to_le16(nbd);
9625 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9627 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9628 " nbytes %d flags %x vlan %x\n",
9629 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9630 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9631 le16_to_cpu(tx_bd->vlan));
9633 if (xmit_type & XMIT_GSO) {
9635 DP(NETIF_MSG_TX_QUEUED,
9636 "TSO packet len %d hlen %d total len %d tso size %d\n",
9637 skb->len, hlen, skb_headlen(skb),
9638 skb_shinfo(skb)->gso_size);
9640 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9642 if (unlikely(skb_headlen(skb) > hlen))
9643 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9644 bd_prod, ++nbd);
9646 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9647 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9648 pbd->tcp_flags = pbd_tcp_flags(skb);
9650 if (xmit_type & XMIT_GSO_V4) {
9651 pbd->ip_id = swab16(ip_hdr(skb)->id);
9652 pbd->tcp_pseudo_csum =
9653 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9654 ip_hdr(skb)->daddr,
9655 0, IPPROTO_TCP, 0));
9657 } else
9658 pbd->tcp_pseudo_csum =
9659 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9660 &ipv6_hdr(skb)->daddr,
9661 0, IPPROTO_TCP, 0));
9663 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9666 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9667 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9669 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9670 tx_bd = &fp->tx_desc_ring[bd_prod];
9672 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9673 frag->size, PCI_DMA_TODEVICE);
9675 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9676 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9677 tx_bd->nbytes = cpu_to_le16(frag->size);
9678 tx_bd->vlan = cpu_to_le16(pkt_prod);
9679 tx_bd->bd_flags.as_bitfield = 0;
9681 DP(NETIF_MSG_TX_QUEUED,
9682 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9683 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9684 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9687 /* now at last mark the BD as the last BD */
9688 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9690 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9691 tx_bd, tx_bd->bd_flags.as_bitfield);
9693 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9695 /* now send a tx doorbell, counting the next BD
9696 * if the packet contains or ends with it
9698 if (TX_BD_POFF(bd_prod) < nbd)
9699 nbd++;
9701 if (pbd)
9702 DP(NETIF_MSG_TX_QUEUED,
9703 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9704 " tcp_flags %x xsum %x seq %u hlen %u\n",
9705 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9706 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9707 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9709 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
9711 fp->hw_tx_prods->bds_prod =
9712 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9713 mb(); /* FW restriction: must not reorder writing nbd and packets */
9714 fp->hw_tx_prods->packets_prod =
9715 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9716 DOORBELL(bp, FP_IDX(fp), 0);
9718 mmiowb();
9720 fp->tx_bd_prod += nbd;
9721 dev->trans_start = jiffies;
9723 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9724 netif_stop_queue(dev);
9725 bp->eth_stats.driver_xoff++;
9726 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9727 netif_wake_queue(dev);
9729 fp->tx_pkt++;
9731 return NETDEV_TX_OK;
9734 /* called with rtnl_lock */
9735 static int bnx2x_open(struct net_device *dev)
9737 struct bnx2x *bp = netdev_priv(dev);
9739 bnx2x_set_power_state(bp, PCI_D0);
9741 return bnx2x_nic_load(bp, LOAD_OPEN);
9744 /* called with rtnl_lock */
9745 static int bnx2x_close(struct net_device *dev)
9747 struct bnx2x *bp = netdev_priv(dev);
9749 /* Unload the driver, release IRQs */
9750 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9751 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9752 if (!CHIP_REV_IS_SLOW(bp))
9753 bnx2x_set_power_state(bp, PCI_D3hot);
9755 return 0;
9758 /* called with netif_tx_lock from set_multicast */
9759 static void bnx2x_set_rx_mode(struct net_device *dev)
9761 struct bnx2x *bp = netdev_priv(dev);
9762 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9763 int port = BP_PORT(bp);
9765 if (bp->state != BNX2X_STATE_OPEN) {
9766 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9767 return;
9770 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9772 if (dev->flags & IFF_PROMISC)
9773 rx_mode = BNX2X_RX_MODE_PROMISC;
9775 else if ((dev->flags & IFF_ALLMULTI) ||
9776 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9777 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9779 else { /* some multicasts */
9780 if (CHIP_IS_E1(bp)) {
9781 int i, old, offset;
9782 struct dev_mc_list *mclist;
9783 struct mac_configuration_cmd *config =
9784 bnx2x_sp(bp, mcast_config);
9786 for (i = 0, mclist = dev->mc_list;
9787 mclist && (i < dev->mc_count);
9788 i++, mclist = mclist->next) {
9790 config->config_table[i].
9791 cam_entry.msb_mac_addr =
9792 swab16(*(u16 *)&mclist->dmi_addr[0]);
9793 config->config_table[i].
9794 cam_entry.middle_mac_addr =
9795 swab16(*(u16 *)&mclist->dmi_addr[2]);
9796 config->config_table[i].
9797 cam_entry.lsb_mac_addr =
9798 swab16(*(u16 *)&mclist->dmi_addr[4]);
9799 config->config_table[i].cam_entry.flags =
9800 cpu_to_le16(port);
9801 config->config_table[i].
9802 target_table_entry.flags = 0;
9803 config->config_table[i].
9804 target_table_entry.client_id = 0;
9805 config->config_table[i].
9806 target_table_entry.vlan_id = 0;
9808 DP(NETIF_MSG_IFUP,
9809 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9810 config->config_table[i].
9811 cam_entry.msb_mac_addr,
9812 config->config_table[i].
9813 cam_entry.middle_mac_addr,
9814 config->config_table[i].
9815 cam_entry.lsb_mac_addr);
9817 old = config->hdr.length_6b;
9818 if (old > i) {
9819 for (; i < old; i++) {
9820 if (CAM_IS_INVALID(config->
9821 config_table[i])) {
9822 i--; /* already invalidated */
9823 break;
9825 /* invalidate */
9826 CAM_INVALIDATE(config->
9827 config_table[i]);
9831 if (CHIP_REV_IS_SLOW(bp))
9832 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9833 else
9834 offset = BNX2X_MAX_MULTICAST*(1 + port);
9836 config->hdr.length_6b = i;
9837 config->hdr.offset = offset;
9838 config->hdr.client_id = BP_CL_ID(bp);
9839 config->hdr.reserved1 = 0;
9841 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9842 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9843 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9845 } else { /* E1H */
9846 /* Accept one or more multicasts */
9847 struct dev_mc_list *mclist;
9848 u32 mc_filter[MC_HASH_SIZE];
9849 u32 crc, bit, regidx;
9850 int i;
9852 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9854 for (i = 0, mclist = dev->mc_list;
9855 mclist && (i < dev->mc_count);
9856 i++, mclist = mclist->next) {
9858 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9859 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9860 mclist->dmi_addr[0], mclist->dmi_addr[1],
9861 mclist->dmi_addr[2], mclist->dmi_addr[3],
9862 mclist->dmi_addr[4], mclist->dmi_addr[5]);
9864 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9865 bit = (crc >> 24) & 0xff;
9866 regidx = bit >> 5;
9867 bit &= 0x1f;
9868 mc_filter[regidx] |= (1 << bit);
9871 for (i = 0; i < MC_HASH_SIZE; i++)
9872 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9873 mc_filter[i]);
9877 bp->rx_mode = rx_mode;
9878 bnx2x_set_storm_rx_mode(bp);
9881 /* called with rtnl_lock */
9882 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9884 struct sockaddr *addr = p;
9885 struct bnx2x *bp = netdev_priv(dev);
9887 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9888 return -EINVAL;
9890 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9891 if (netif_running(dev)) {
9892 if (CHIP_IS_E1(bp))
9893 bnx2x_set_mac_addr_e1(bp, 1);
9894 else
9895 bnx2x_set_mac_addr_e1h(bp, 1);
9898 return 0;
9901 /* called with rtnl_lock */
9902 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9904 struct mii_ioctl_data *data = if_mii(ifr);
9905 struct bnx2x *bp = netdev_priv(dev);
9906 int port = BP_PORT(bp);
9907 int err;
9909 switch (cmd) {
9910 case SIOCGMIIPHY:
9911 data->phy_id = bp->port.phy_addr;
9913 /* fallthrough */
9915 case SIOCGMIIREG: {
9916 u16 mii_regval;
9918 if (!netif_running(dev))
9919 return -EAGAIN;
9921 mutex_lock(&bp->port.phy_mutex);
9922 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9923 DEFAULT_PHY_DEV_ADDR,
9924 (data->reg_num & 0x1f), &mii_regval);
9925 data->val_out = mii_regval;
9926 mutex_unlock(&bp->port.phy_mutex);
9927 return err;
9930 case SIOCSMIIREG:
9931 if (!capable(CAP_NET_ADMIN))
9932 return -EPERM;
9934 if (!netif_running(dev))
9935 return -EAGAIN;
9937 mutex_lock(&bp->port.phy_mutex);
9938 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
9939 DEFAULT_PHY_DEV_ADDR,
9940 (data->reg_num & 0x1f), data->val_in);
9941 mutex_unlock(&bp->port.phy_mutex);
9942 return err;
9944 default:
9945 /* do nothing */
9946 break;
9949 return -EOPNOTSUPP;
9952 /* called with rtnl_lock */
9953 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9955 struct bnx2x *bp = netdev_priv(dev);
9956 int rc = 0;
9958 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9959 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9960 return -EINVAL;
9962 /* This does not race with packet allocation
9963 * because the actual alloc size is
9964 * only updated as part of load
9966 dev->mtu = new_mtu;
9968 if (netif_running(dev)) {
9969 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9970 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9973 return rc;
9976 static void bnx2x_tx_timeout(struct net_device *dev)
9978 struct bnx2x *bp = netdev_priv(dev);
9980 #ifdef BNX2X_STOP_ON_ERROR
9981 if (!bp->panic)
9982 bnx2x_panic();
9983 #endif
9984 /* This allows the netif to be shutdown gracefully before resetting */
9985 schedule_work(&bp->reset_task);
9988 #ifdef BCM_VLAN
9989 /* called with rtnl_lock */
9990 static void bnx2x_vlan_rx_register(struct net_device *dev,
9991 struct vlan_group *vlgrp)
9993 struct bnx2x *bp = netdev_priv(dev);
9995 bp->vlgrp = vlgrp;
9996 if (netif_running(dev))
9997 bnx2x_set_client_config(bp);
10000 #endif
10002 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10003 static void poll_bnx2x(struct net_device *dev)
10005 struct bnx2x *bp = netdev_priv(dev);
10007 disable_irq(bp->pdev->irq);
10008 bnx2x_interrupt(bp->pdev->irq, dev);
10009 enable_irq(bp->pdev->irq);
10011 #endif
10013 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10014 struct net_device *dev)
10016 struct bnx2x *bp;
10017 int rc;
10019 SET_NETDEV_DEV(dev, &pdev->dev);
10020 bp = netdev_priv(dev);
10022 bp->dev = dev;
10023 bp->pdev = pdev;
10024 bp->flags = 0;
10025 bp->func = PCI_FUNC(pdev->devfn);
10027 rc = pci_enable_device(pdev);
10028 if (rc) {
10029 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10030 goto err_out;
10033 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10034 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10035 " aborting\n");
10036 rc = -ENODEV;
10037 goto err_out_disable;
10040 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10041 printk(KERN_ERR PFX "Cannot find second PCI device"
10042 " base address, aborting\n");
10043 rc = -ENODEV;
10044 goto err_out_disable;
10047 if (atomic_read(&pdev->enable_cnt) == 1) {
10048 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10049 if (rc) {
10050 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10051 " aborting\n");
10052 goto err_out_disable;
10055 pci_set_master(pdev);
10056 pci_save_state(pdev);
10059 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10060 if (bp->pm_cap == 0) {
10061 printk(KERN_ERR PFX "Cannot find power management"
10062 " capability, aborting\n");
10063 rc = -EIO;
10064 goto err_out_release;
10067 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10068 if (bp->pcie_cap == 0) {
10069 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10070 " aborting\n");
10071 rc = -EIO;
10072 goto err_out_release;
10075 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10076 bp->flags |= USING_DAC_FLAG;
10077 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10078 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10079 " failed, aborting\n");
10080 rc = -EIO;
10081 goto err_out_release;
10084 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10085 printk(KERN_ERR PFX "System does not support DMA,"
10086 " aborting\n");
10087 rc = -EIO;
10088 goto err_out_release;
10091 dev->mem_start = pci_resource_start(pdev, 0);
10092 dev->base_addr = dev->mem_start;
10093 dev->mem_end = pci_resource_end(pdev, 0);
10095 dev->irq = pdev->irq;
10097 bp->regview = ioremap_nocache(dev->base_addr,
10098 pci_resource_len(pdev, 0));
10099 if (!bp->regview) {
10100 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10101 rc = -ENOMEM;
10102 goto err_out_release;
10105 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10106 min_t(u64, BNX2X_DB_SIZE,
10107 pci_resource_len(pdev, 2)));
10108 if (!bp->doorbells) {
10109 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10110 rc = -ENOMEM;
10111 goto err_out_unmap;
10114 bnx2x_set_power_state(bp, PCI_D0);
10116 /* clean indirect addresses */
10117 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10118 PCICFG_VENDOR_ID_OFFSET);
10119 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10120 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10121 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10122 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10124 dev->hard_start_xmit = bnx2x_start_xmit;
10125 dev->watchdog_timeo = TX_TIMEOUT;
10127 dev->ethtool_ops = &bnx2x_ethtool_ops;
10128 dev->open = bnx2x_open;
10129 dev->stop = bnx2x_close;
10130 dev->set_multicast_list = bnx2x_set_rx_mode;
10131 dev->set_mac_address = bnx2x_change_mac_addr;
10132 dev->do_ioctl = bnx2x_ioctl;
10133 dev->change_mtu = bnx2x_change_mtu;
10134 dev->tx_timeout = bnx2x_tx_timeout;
10135 #ifdef BCM_VLAN
10136 dev->vlan_rx_register = bnx2x_vlan_rx_register;
10137 #endif
10138 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10139 dev->poll_controller = poll_bnx2x;
10140 #endif
10141 dev->features |= NETIF_F_SG;
10142 dev->features |= NETIF_F_HW_CSUM;
10143 if (bp->flags & USING_DAC_FLAG)
10144 dev->features |= NETIF_F_HIGHDMA;
10145 #ifdef BCM_VLAN
10146 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10147 #endif
10148 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10149 dev->features |= NETIF_F_TSO6;
10151 return 0;
10153 err_out_unmap:
10154 if (bp->regview) {
10155 iounmap(bp->regview);
10156 bp->regview = NULL;
10158 if (bp->doorbells) {
10159 iounmap(bp->doorbells);
10160 bp->doorbells = NULL;
10163 err_out_release:
10164 if (atomic_read(&pdev->enable_cnt) == 1)
10165 pci_release_regions(pdev);
10167 err_out_disable:
10168 pci_disable_device(pdev);
10169 pci_set_drvdata(pdev, NULL);
10171 err_out:
10172 return rc;
10175 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10177 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10179 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10180 return val;
10183 /* return value of 1=2.5GHz 2=5GHz */
10184 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10186 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10188 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10189 return val;
10192 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10193 const struct pci_device_id *ent)
10195 static int version_printed;
10196 struct net_device *dev = NULL;
10197 struct bnx2x *bp;
10198 int rc;
10199 DECLARE_MAC_BUF(mac);
10201 if (version_printed++ == 0)
10202 printk(KERN_INFO "%s", version);
10204 /* dev zeroed in init_etherdev */
10205 dev = alloc_etherdev(sizeof(*bp));
10206 if (!dev) {
10207 printk(KERN_ERR PFX "Cannot allocate net device\n");
10208 return -ENOMEM;
10211 bp = netdev_priv(dev);
10212 bp->msglevel = debug;
10214 rc = bnx2x_init_dev(pdev, dev);
10215 if (rc < 0) {
10216 free_netdev(dev);
10217 return rc;
10220 rc = register_netdev(dev);
10221 if (rc) {
10222 dev_err(&pdev->dev, "Cannot register net device\n");
10223 goto init_one_exit;
10226 pci_set_drvdata(pdev, dev);
10228 rc = bnx2x_init_bp(bp);
10229 if (rc) {
10230 unregister_netdev(dev);
10231 goto init_one_exit;
10234 netif_carrier_off(dev);
10236 bp->common.name = board_info[ent->driver_data].name;
10237 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10238 " IRQ %d, ", dev->name, bp->common.name,
10239 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10240 bnx2x_get_pcie_width(bp),
10241 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10242 dev->base_addr, bp->pdev->irq);
10243 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
10244 return 0;
10246 init_one_exit:
10247 if (bp->regview)
10248 iounmap(bp->regview);
10250 if (bp->doorbells)
10251 iounmap(bp->doorbells);
10253 free_netdev(dev);
10255 if (atomic_read(&pdev->enable_cnt) == 1)
10256 pci_release_regions(pdev);
10258 pci_disable_device(pdev);
10259 pci_set_drvdata(pdev, NULL);
10261 return rc;
10264 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10266 struct net_device *dev = pci_get_drvdata(pdev);
10267 struct bnx2x *bp;
10269 if (!dev) {
10270 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10271 return;
10273 bp = netdev_priv(dev);
10275 unregister_netdev(dev);
10277 if (bp->regview)
10278 iounmap(bp->regview);
10280 if (bp->doorbells)
10281 iounmap(bp->doorbells);
10283 free_netdev(dev);
10285 if (atomic_read(&pdev->enable_cnt) == 1)
10286 pci_release_regions(pdev);
10288 pci_disable_device(pdev);
10289 pci_set_drvdata(pdev, NULL);
10292 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10294 struct net_device *dev = pci_get_drvdata(pdev);
10295 struct bnx2x *bp;
10297 if (!dev) {
10298 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10299 return -ENODEV;
10301 bp = netdev_priv(dev);
10303 rtnl_lock();
10305 pci_save_state(pdev);
10307 if (!netif_running(dev)) {
10308 rtnl_unlock();
10309 return 0;
10312 netif_device_detach(dev);
10314 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10316 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10318 rtnl_unlock();
10320 return 0;
10323 static int bnx2x_resume(struct pci_dev *pdev)
10325 struct net_device *dev = pci_get_drvdata(pdev);
10326 struct bnx2x *bp;
10327 int rc;
10329 if (!dev) {
10330 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10331 return -ENODEV;
10333 bp = netdev_priv(dev);
10335 rtnl_lock();
10337 pci_restore_state(pdev);
10339 if (!netif_running(dev)) {
10340 rtnl_unlock();
10341 return 0;
10344 bnx2x_set_power_state(bp, PCI_D0);
10345 netif_device_attach(dev);
10347 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10349 rtnl_unlock();
10351 return rc;
10355 * bnx2x_io_error_detected - called when PCI error is detected
10356 * @pdev: Pointer to PCI device
10357 * @state: The current pci connection state
10359 * This function is called after a PCI bus error affecting
10360 * this device has been detected.
10362 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10363 pci_channel_state_t state)
10365 struct net_device *dev = pci_get_drvdata(pdev);
10366 struct bnx2x *bp = netdev_priv(dev);
10368 rtnl_lock();
10370 netif_device_detach(dev);
10372 if (netif_running(dev))
10373 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10375 pci_disable_device(pdev);
10377 rtnl_unlock();
10379 /* Request a slot reset */
10380 return PCI_ERS_RESULT_NEED_RESET;
10384 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10385 * @pdev: Pointer to PCI device
10387 * Restart the card from scratch, as if from a cold-boot.
10389 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10391 struct net_device *dev = pci_get_drvdata(pdev);
10392 struct bnx2x *bp = netdev_priv(dev);
10394 rtnl_lock();
10396 if (pci_enable_device(pdev)) {
10397 dev_err(&pdev->dev,
10398 "Cannot re-enable PCI device after reset\n");
10399 rtnl_unlock();
10400 return PCI_ERS_RESULT_DISCONNECT;
10403 pci_set_master(pdev);
10404 pci_restore_state(pdev);
10406 if (netif_running(dev))
10407 bnx2x_set_power_state(bp, PCI_D0);
10409 rtnl_unlock();
10411 return PCI_ERS_RESULT_RECOVERED;
10415 * bnx2x_io_resume - called when traffic can start flowing again
10416 * @pdev: Pointer to PCI device
10418 * This callback is called when the error recovery driver tells us that
10419 * its OK to resume normal operation.
10421 static void bnx2x_io_resume(struct pci_dev *pdev)
10423 struct net_device *dev = pci_get_drvdata(pdev);
10424 struct bnx2x *bp = netdev_priv(dev);
10426 rtnl_lock();
10428 if (netif_running(dev))
10429 bnx2x_nic_load(bp, LOAD_OPEN);
10431 netif_device_attach(dev);
10433 rtnl_unlock();
10436 static struct pci_error_handlers bnx2x_err_handler = {
10437 .error_detected = bnx2x_io_error_detected,
10438 .slot_reset = bnx2x_io_slot_reset,
10439 .resume = bnx2x_io_resume,
10442 static struct pci_driver bnx2x_pci_driver = {
10443 .name = DRV_MODULE_NAME,
10444 .id_table = bnx2x_pci_tbl,
10445 .probe = bnx2x_init_one,
10446 .remove = __devexit_p(bnx2x_remove_one),
10447 .suspend = bnx2x_suspend,
10448 .resume = bnx2x_resume,
10449 .err_handler = &bnx2x_err_handler,
10452 static int __init bnx2x_init(void)
10454 return pci_register_driver(&bnx2x_pci_driver);
10457 static void __exit bnx2x_cleanup(void)
10459 pci_unregister_driver(&bnx2x_pci_driver);
10462 module_init(bnx2x_init);
10463 module_exit(bnx2x_cleanup);