bnx2x: MTU Filter
[linux-2.6/x86.git] / drivers / net / bnx2x_main.c
blobb573951600de742bfd637a0b6af9d2a90e574805
1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
57 #include "bnx2x.h"
58 #include "bnx2x_init.h"
60 #define DRV_MODULE_VERSION "1.45.23"
61 #define DRV_MODULE_RELDATE "2008/11/03"
62 #define BNX2X_BC_VER 0x040200
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT (5*HZ)
67 static char version[] __devinitdata =
68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
76 static int disable_tpa;
77 static int use_inta;
78 static int poll;
79 static int debug;
80 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
81 static int use_multi;
83 module_param(disable_tpa, int, 0);
84 module_param(use_inta, int, 0);
85 module_param(poll, int, 0);
86 module_param(debug, int, 0);
87 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
88 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
89 MODULE_PARM_DESC(poll, "use polling (for debug)");
90 MODULE_PARM_DESC(debug, "default debug msglevel");
92 #ifdef BNX2X_MULTI
93 module_param(use_multi, int, 0);
94 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
95 #endif
96 static struct workqueue_struct *bnx2x_wq;
98 enum bnx2x_board_type {
99 BCM57710 = 0,
100 BCM57711 = 1,
101 BCM57711E = 2,
104 /* indexed by board_type, above */
105 static struct {
106 char *name;
107 } board_info[] __devinitdata = {
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
121 { 0 }
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
130 /* used only at init
131 * locking is done by mcp
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
143 u32 val;
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
150 return val;
153 static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162 int idx)
164 u32 cmd_offset;
165 int i;
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178 u32 len32)
180 struct dmae_command *dmae = &bp->init_dmae;
181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
182 int cnt = 200;
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190 return;
193 mutex_lock(&bp->dmae_mutex);
195 memset(dmae, 0, sizeof(struct dmae_command));
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200 #ifdef __BIG_ENDIAN
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
202 #else
203 DMAE_CMD_ENDIANITY_DW_SWAP |
204 #endif
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
211 dmae->len = len32;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214 dmae->comp_val = DMAE_COMP_VAL;
216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
227 *wb_comp = 0;
229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
231 udelay(5);
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
236 if (!cnt) {
237 BNX2X_ERR("dmae timeout!\n");
238 break;
240 cnt--;
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
243 msleep(100);
244 else
245 udelay(5);
248 mutex_unlock(&bp->dmae_mutex);
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
253 struct dmae_command *dmae = &bp->init_dmae;
254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
255 int cnt = 200;
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
259 int i;
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265 return;
268 mutex_lock(&bp->dmae_mutex);
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276 #ifdef __BIG_ENDIAN
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
278 #else
279 DMAE_CMD_ENDIANITY_DW_SWAP |
280 #endif
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287 dmae->len = len32;
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290 dmae->comp_val = DMAE_COMP_VAL;
292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
300 *wb_comp = 0;
302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
304 udelay(5);
306 while (*wb_comp != DMAE_COMP_VAL) {
308 if (!cnt) {
309 BNX2X_ERR("dmae timeout!\n");
310 break;
312 cnt--;
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
315 msleep(100);
316 else
317 udelay(5);
319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
323 mutex_unlock(&bp->dmae_mutex);
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
329 u32 wb_write[2];
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
336 #ifdef USE_WB_RD
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
339 u32 wb_data[2];
341 REG_RD_DMAE(bp, reg, wb_data, 2);
343 return HILO_U64(wb_data[0], wb_data[1]);
345 #endif
347 static int bnx2x_mc_assert(struct bnx2x *bp)
349 char last_idx;
350 int i, rc = 0;
351 u32 row0, row1, row2, row3;
353 /* XSTORM */
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
356 if (last_idx)
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
375 rc++;
376 } else {
377 break;
381 /* TSTORM */
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
384 if (last_idx)
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
403 rc++;
404 } else {
405 break;
409 /* CSTORM */
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
412 if (last_idx)
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
431 rc++;
432 } else {
433 break;
437 /* USTORM */
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
440 if (last_idx)
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
459 rc++;
460 } else {
461 break;
465 return rc;
468 static void bnx2x_fw_dump(struct bnx2x *bp)
470 u32 mark, offset;
471 u32 data[9];
472 int word;
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481 offset + 4*word));
482 data[8] = 0x0;
483 printk(KERN_CONT "%s", (char *)data);
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488 offset + 4*word));
489 data[8] = 0x0;
490 printk(KERN_CONT "%s", (char *)data);
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
495 static void bnx2x_panic_dump(struct bnx2x *bp)
497 int i;
498 u16 j, start, end;
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
503 BNX2X_ERR("begin crash dump -----------------\n");
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
524 fp->fp_u_idx,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
578 " spq_prod_idx(%u)\n",
579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
582 bnx2x_fw_dump(bp);
583 bnx2x_mc_assert(bp);
584 BNX2X_ERR("end crash dump -----------------\n");
587 static void bnx2x_int_enable(struct bnx2x *bp)
589 int port = BP_PORT(bp);
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
594 if (msix) {
595 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
596 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
597 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
598 } else {
599 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
600 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601 HC_CONFIG_0_REG_INT_LINE_EN_0 |
602 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
604 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
605 val, port, addr, msix);
607 REG_WR(bp, addr, val);
609 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
612 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
613 val, port, addr, msix);
615 REG_WR(bp, addr, val);
617 if (CHIP_IS_E1H(bp)) {
618 /* init leading/trailing edge */
619 if (IS_E1HMF(bp)) {
620 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
621 if (bp->port.pmf)
622 /* enable nig attention */
623 val |= 0x0100;
624 } else
625 val = 0xffff;
627 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
628 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
632 static void bnx2x_int_disable(struct bnx2x *bp)
634 int port = BP_PORT(bp);
635 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
636 u32 val = REG_RD(bp, addr);
638 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0 |
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644 val, port, addr);
646 REG_WR(bp, addr, val);
647 if (REG_RD(bp, addr) != val)
648 BNX2X_ERR("BUG! proper val not read from IGU!\n");
651 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
653 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
654 int i;
656 /* disable interrupt handling */
657 atomic_inc(&bp->intr_sem);
658 if (disable_hw)
659 /* prevent the HW from sending interrupts */
660 bnx2x_int_disable(bp);
662 /* make sure all ISRs are done */
663 if (msix) {
664 for_each_queue(bp, i)
665 synchronize_irq(bp->msix_table[i].vector);
667 /* one more for the Slow Path IRQ */
668 synchronize_irq(bp->msix_table[i].vector);
669 } else
670 synchronize_irq(bp->pdev->irq);
672 /* make sure sp_task is not running */
673 cancel_delayed_work(&bp->sp_task);
674 flush_workqueue(bnx2x_wq);
677 /* fast path */
680 * General service functions
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684 u8 storm, u16 index, u8 op, u8 update)
686 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687 COMMAND_REG_INT_ACK);
688 struct igu_ack_register igu_ack;
690 igu_ack.status_block_index = index;
691 igu_ack.sb_id_and_flags =
692 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
697 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
704 struct host_status_block *fpsb = fp->status_blk;
705 u16 rc = 0;
707 barrier(); /* status block is written to by the chip */
708 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710 rc |= 1;
712 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714 rc |= 2;
716 return rc;
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722 COMMAND_REG_SIMD_MASK);
723 u32 result = REG_RD(bp, hc_addr);
725 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726 result, hc_addr);
728 return result;
733 * fast path service functions
736 /* free skb in the packet ring at pos idx
737 * return idx of last bd freed
739 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
740 u16 idx)
742 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
743 struct eth_tx_bd *tx_bd;
744 struct sk_buff *skb = tx_buf->skb;
745 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
746 int nbd;
748 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
749 idx, tx_buf, skb);
751 /* unmap first bd */
752 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
753 tx_bd = &fp->tx_desc_ring[bd_idx];
754 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
755 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
757 nbd = le16_to_cpu(tx_bd->nbd) - 1;
758 new_cons = nbd + tx_buf->first_bd;
759 #ifdef BNX2X_STOP_ON_ERROR
760 if (nbd > (MAX_SKB_FRAGS + 2)) {
761 BNX2X_ERR("BAD nbd!\n");
762 bnx2x_panic();
764 #endif
766 /* Skip a parse bd and the TSO split header bd
767 since they have no mapping */
768 if (nbd)
769 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
771 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
772 ETH_TX_BD_FLAGS_TCP_CSUM |
773 ETH_TX_BD_FLAGS_SW_LSO)) {
774 if (--nbd)
775 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776 tx_bd = &fp->tx_desc_ring[bd_idx];
777 /* is this a TSO split header bd? */
778 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
779 if (--nbd)
780 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
784 /* now free frags */
785 while (nbd > 0) {
787 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
788 tx_bd = &fp->tx_desc_ring[bd_idx];
789 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
790 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
791 if (--nbd)
792 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
795 /* release skb */
796 WARN_ON(!skb);
797 dev_kfree_skb(skb);
798 tx_buf->first_bd = 0;
799 tx_buf->skb = NULL;
801 return new_cons;
804 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
806 s16 used;
807 u16 prod;
808 u16 cons;
810 barrier(); /* Tell compiler that prod and cons can change */
811 prod = fp->tx_bd_prod;
812 cons = fp->tx_bd_cons;
814 /* NUM_TX_RINGS = number of "next-page" entries
815 It will be used as a threshold */
816 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
818 #ifdef BNX2X_STOP_ON_ERROR
819 WARN_ON(used < 0);
820 WARN_ON(used > fp->bp->tx_ring_size);
821 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
822 #endif
824 return (s16)(fp->bp->tx_ring_size) - used;
827 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
829 struct bnx2x *bp = fp->bp;
830 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
831 int done = 0;
833 #ifdef BNX2X_STOP_ON_ERROR
834 if (unlikely(bp->panic))
835 return;
836 #endif
838 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
839 sw_cons = fp->tx_pkt_cons;
841 while (sw_cons != hw_cons) {
842 u16 pkt_cons;
844 pkt_cons = TX_BD(sw_cons);
846 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
848 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
849 hw_cons, sw_cons, pkt_cons);
851 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
852 rmb();
853 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
856 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
857 sw_cons++;
858 done++;
860 if (done == work)
861 break;
864 fp->tx_pkt_cons = sw_cons;
865 fp->tx_bd_cons = bd_cons;
867 /* Need to make the tx_cons update visible to start_xmit()
868 * before checking for netif_queue_stopped(). Without the
869 * memory barrier, there is a small possibility that start_xmit()
870 * will miss it and cause the queue to be stopped forever.
872 smp_mb();
874 /* TBD need a thresh? */
875 if (unlikely(netif_queue_stopped(bp->dev))) {
877 netif_tx_lock(bp->dev);
879 if (netif_queue_stopped(bp->dev) &&
880 (bp->state == BNX2X_STATE_OPEN) &&
881 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
882 netif_wake_queue(bp->dev);
884 netif_tx_unlock(bp->dev);
889 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
890 union eth_rx_cqe *rr_cqe)
892 struct bnx2x *bp = fp->bp;
893 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
896 DP(BNX2X_MSG_SP,
897 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
898 FP_IDX(fp), cid, command, bp->state,
899 rr_cqe->ramrod_cqe.ramrod_type);
901 bp->spq_left++;
903 if (FP_IDX(fp)) {
904 switch (command | fp->state) {
905 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
906 BNX2X_FP_STATE_OPENING):
907 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
908 cid);
909 fp->state = BNX2X_FP_STATE_OPEN;
910 break;
912 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
913 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
914 cid);
915 fp->state = BNX2X_FP_STATE_HALTED;
916 break;
918 default:
919 BNX2X_ERR("unexpected MC reply (%d) "
920 "fp->state is %x\n", command, fp->state);
921 break;
923 mb(); /* force bnx2x_wait_ramrod() to see the change */
924 return;
927 switch (command | bp->state) {
928 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
929 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
930 bp->state = BNX2X_STATE_OPEN;
931 break;
933 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
934 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
935 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
936 fp->state = BNX2X_FP_STATE_HALTED;
937 break;
939 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
940 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
941 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
942 break;
945 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
946 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
947 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
948 bp->set_mac_pending = 0;
949 break;
951 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
952 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
953 break;
955 default:
956 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
957 command, bp->state);
958 break;
960 mb(); /* force bnx2x_wait_ramrod() to see the change */
963 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
964 struct bnx2x_fastpath *fp, u16 index)
966 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
967 struct page *page = sw_buf->page;
968 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
970 /* Skip "next page" elements */
971 if (!page)
972 return;
974 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
975 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
976 __free_pages(page, PAGES_PER_SGE_SHIFT);
978 sw_buf->page = NULL;
979 sge->addr_hi = 0;
980 sge->addr_lo = 0;
983 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
984 struct bnx2x_fastpath *fp, int last)
986 int i;
988 for (i = 0; i < last; i++)
989 bnx2x_free_rx_sge(bp, fp, i);
992 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
995 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
996 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998 dma_addr_t mapping;
1000 if (unlikely(page == NULL))
1001 return -ENOMEM;
1003 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1004 PCI_DMA_FROMDEVICE);
1005 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1006 __free_pages(page, PAGES_PER_SGE_SHIFT);
1007 return -ENOMEM;
1010 sw_buf->page = page;
1011 pci_unmap_addr_set(sw_buf, mapping, mapping);
1013 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1014 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1016 return 0;
1019 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1020 struct bnx2x_fastpath *fp, u16 index)
1022 struct sk_buff *skb;
1023 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1024 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1025 dma_addr_t mapping;
1027 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1028 if (unlikely(skb == NULL))
1029 return -ENOMEM;
1031 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1032 PCI_DMA_FROMDEVICE);
1033 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1034 dev_kfree_skb(skb);
1035 return -ENOMEM;
1038 rx_buf->skb = skb;
1039 pci_unmap_addr_set(rx_buf, mapping, mapping);
1041 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1042 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1044 return 0;
1047 /* note that we are not allocating a new skb,
1048 * we are just moving one from cons to prod
1049 * we are not creating a new mapping,
1050 * so there is no need to check for dma_mapping_error().
1052 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1053 struct sk_buff *skb, u16 cons, u16 prod)
1055 struct bnx2x *bp = fp->bp;
1056 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1057 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1058 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1059 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1061 pci_dma_sync_single_for_device(bp->pdev,
1062 pci_unmap_addr(cons_rx_buf, mapping),
1063 bp->rx_offset + RX_COPY_THRESH,
1064 PCI_DMA_FROMDEVICE);
1066 prod_rx_buf->skb = cons_rx_buf->skb;
1067 pci_unmap_addr_set(prod_rx_buf, mapping,
1068 pci_unmap_addr(cons_rx_buf, mapping));
1069 *prod_bd = *cons_bd;
1072 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1073 u16 idx)
1075 u16 last_max = fp->last_max_sge;
1077 if (SUB_S16(idx, last_max) > 0)
1078 fp->last_max_sge = idx;
1081 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1083 int i, j;
1085 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1086 int idx = RX_SGE_CNT * i - 1;
1088 for (j = 0; j < 2; j++) {
1089 SGE_MASK_CLEAR_BIT(fp, idx);
1090 idx--;
1095 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1096 struct eth_fast_path_rx_cqe *fp_cqe)
1098 struct bnx2x *bp = fp->bp;
1099 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1100 le16_to_cpu(fp_cqe->len_on_bd)) >>
1101 SGE_PAGE_SHIFT;
1102 u16 last_max, last_elem, first_elem;
1103 u16 delta = 0;
1104 u16 i;
1106 if (!sge_len)
1107 return;
1109 /* First mark all used pages */
1110 for (i = 0; i < sge_len; i++)
1111 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1113 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1114 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1116 /* Here we assume that the last SGE index is the biggest */
1117 prefetch((void *)(fp->sge_mask));
1118 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1120 last_max = RX_SGE(fp->last_max_sge);
1121 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1122 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1124 /* If ring is not full */
1125 if (last_elem + 1 != first_elem)
1126 last_elem++;
1128 /* Now update the prod */
1129 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1130 if (likely(fp->sge_mask[i]))
1131 break;
1133 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1134 delta += RX_SGE_MASK_ELEM_SZ;
1137 if (delta > 0) {
1138 fp->rx_sge_prod += delta;
1139 /* clear page-end entries */
1140 bnx2x_clear_sge_mask_next_elems(fp);
1143 DP(NETIF_MSG_RX_STATUS,
1144 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1145 fp->last_max_sge, fp->rx_sge_prod);
1148 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1150 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1151 memset(fp->sge_mask, 0xff,
1152 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1154 /* Clear the two last indices in the page to 1:
1155 these are the indices that correspond to the "next" element,
1156 hence will never be indicated and should be removed from
1157 the calculations. */
1158 bnx2x_clear_sge_mask_next_elems(fp);
1161 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1162 struct sk_buff *skb, u16 cons, u16 prod)
1164 struct bnx2x *bp = fp->bp;
1165 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1166 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1167 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1168 dma_addr_t mapping;
1170 /* move empty skb from pool to prod and map it */
1171 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1172 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1173 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1174 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1176 /* move partial skb from cons to pool (don't unmap yet) */
1177 fp->tpa_pool[queue] = *cons_rx_buf;
1179 /* mark bin state as start - print error if current state != stop */
1180 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1181 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1183 fp->tpa_state[queue] = BNX2X_TPA_START;
1185 /* point prod_bd to new skb */
1186 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1187 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1189 #ifdef BNX2X_STOP_ON_ERROR
1190 fp->tpa_queue_used |= (1 << queue);
1191 #ifdef __powerpc64__
1192 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1193 #else
1194 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1195 #endif
1196 fp->tpa_queue_used);
1197 #endif
1200 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1201 struct sk_buff *skb,
1202 struct eth_fast_path_rx_cqe *fp_cqe,
1203 u16 cqe_idx)
1205 struct sw_rx_page *rx_pg, old_rx_pg;
1206 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1207 u32 i, frag_len, frag_size, pages;
1208 int err;
1209 int j;
1211 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1212 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1214 /* This is needed in order to enable forwarding support */
1215 if (frag_size)
1216 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1217 max(frag_size, (u32)len_on_bd));
1219 #ifdef BNX2X_STOP_ON_ERROR
1220 if (pages >
1221 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1222 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1223 pages, cqe_idx);
1224 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1225 fp_cqe->pkt_len, len_on_bd);
1226 bnx2x_panic();
1227 return -EINVAL;
1229 #endif
1231 /* Run through the SGL and compose the fragmented skb */
1232 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1233 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1235 /* FW gives the indices of the SGE as if the ring is an array
1236 (meaning that "next" element will consume 2 indices) */
1237 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1238 rx_pg = &fp->rx_page_ring[sge_idx];
1239 old_rx_pg = *rx_pg;
1241 /* If we fail to allocate a substitute page, we simply stop
1242 where we are and drop the whole packet */
1243 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1244 if (unlikely(err)) {
1245 bp->eth_stats.rx_skb_alloc_failed++;
1246 return err;
1249 /* Unmap the page as we r going to pass it to the stack */
1250 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1251 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1253 /* Add one frag and update the appropriate fields in the skb */
1254 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1256 skb->data_len += frag_len;
1257 skb->truesize += frag_len;
1258 skb->len += frag_len;
1260 frag_size -= frag_len;
1263 return 0;
1266 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1268 u16 cqe_idx)
1270 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1271 struct sk_buff *skb = rx_buf->skb;
1272 /* alloc new skb */
1273 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1275 /* Unmap skb in the pool anyway, as we are going to change
1276 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1277 fails. */
1278 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1279 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1281 if (likely(new_skb)) {
1282 /* fix ip xsum and give it to the stack */
1283 /* (no need to map the new skb) */
1284 #ifdef BCM_VLAN
1285 int is_vlan_cqe =
1286 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1287 PARSING_FLAGS_VLAN);
1288 int is_not_hwaccel_vlan_cqe =
1289 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1290 #endif
1292 prefetch(skb);
1293 prefetch(((char *)(skb)) + 128);
1295 #ifdef BNX2X_STOP_ON_ERROR
1296 if (pad + len > bp->rx_buf_size) {
1297 BNX2X_ERR("skb_put is about to fail... "
1298 "pad %d len %d rx_buf_size %d\n",
1299 pad, len, bp->rx_buf_size);
1300 bnx2x_panic();
1301 return;
1303 #endif
1305 skb_reserve(skb, pad);
1306 skb_put(skb, len);
1308 skb->protocol = eth_type_trans(skb, bp->dev);
1309 skb->ip_summed = CHECKSUM_UNNECESSARY;
1312 struct iphdr *iph;
1314 iph = (struct iphdr *)skb->data;
1315 #ifdef BCM_VLAN
1316 /* If there is no Rx VLAN offloading -
1317 take VLAN tag into an account */
1318 if (unlikely(is_not_hwaccel_vlan_cqe))
1319 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1320 #endif
1321 iph->check = 0;
1322 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1325 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1326 &cqe->fast_path_cqe, cqe_idx)) {
1327 #ifdef BCM_VLAN
1328 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1329 (!is_not_hwaccel_vlan_cqe))
1330 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1331 le16_to_cpu(cqe->fast_path_cqe.
1332 vlan_tag));
1333 else
1334 #endif
1335 netif_receive_skb(skb);
1336 } else {
1337 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1338 " - dropping packet!\n");
1339 dev_kfree_skb(skb);
1343 /* put new skb in bin */
1344 fp->tpa_pool[queue].skb = new_skb;
1346 } else {
1347 /* else drop the packet and keep the buffer in the bin */
1348 DP(NETIF_MSG_RX_STATUS,
1349 "Failed to allocate new skb - dropping packet!\n");
1350 bp->eth_stats.rx_skb_alloc_failed++;
1353 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1356 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1357 struct bnx2x_fastpath *fp,
1358 u16 bd_prod, u16 rx_comp_prod,
1359 u16 rx_sge_prod)
1361 struct tstorm_eth_rx_producers rx_prods = {0};
1362 int i;
1364 /* Update producers */
1365 rx_prods.bd_prod = bd_prod;
1366 rx_prods.cqe_prod = rx_comp_prod;
1367 rx_prods.sge_prod = rx_sge_prod;
1370 * Make sure that the BD and SGE data is updated before updating the
1371 * producers since FW might read the BD/SGE right after the producer
1372 * is updated.
1373 * This is only applicable for weak-ordered memory model archs such
1374 * as IA-64. The following barrier is also mandatory since FW will
1375 * assumes BDs must have buffers.
1377 wmb();
1379 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1380 REG_WR(bp, BAR_TSTRORM_INTMEM +
1381 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1382 ((u32 *)&rx_prods)[i]);
1384 mmiowb(); /* keep prod updates ordered */
1386 DP(NETIF_MSG_RX_STATUS,
1387 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1388 bd_prod, rx_comp_prod, rx_sge_prod);
1391 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1393 struct bnx2x *bp = fp->bp;
1394 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1395 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1396 int rx_pkt = 0;
1398 #ifdef BNX2X_STOP_ON_ERROR
1399 if (unlikely(bp->panic))
1400 return 0;
1401 #endif
1403 /* CQ "next element" is of the size of the regular element,
1404 that's why it's ok here */
1405 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1406 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1407 hw_comp_cons++;
1409 bd_cons = fp->rx_bd_cons;
1410 bd_prod = fp->rx_bd_prod;
1411 bd_prod_fw = bd_prod;
1412 sw_comp_cons = fp->rx_comp_cons;
1413 sw_comp_prod = fp->rx_comp_prod;
1415 /* Memory barrier necessary as speculative reads of the rx
1416 * buffer can be ahead of the index in the status block
1418 rmb();
1420 DP(NETIF_MSG_RX_STATUS,
1421 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1422 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1424 while (sw_comp_cons != hw_comp_cons) {
1425 struct sw_rx_bd *rx_buf = NULL;
1426 struct sk_buff *skb;
1427 union eth_rx_cqe *cqe;
1428 u8 cqe_fp_flags;
1429 u16 len, pad;
1431 comp_ring_cons = RCQ_BD(sw_comp_cons);
1432 bd_prod = RX_BD(bd_prod);
1433 bd_cons = RX_BD(bd_cons);
1435 cqe = &fp->rx_comp_ring[comp_ring_cons];
1436 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1438 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1439 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1440 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1441 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1442 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1443 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1445 /* is this a slowpath msg? */
1446 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1447 bnx2x_sp_event(fp, cqe);
1448 goto next_cqe;
1450 /* this is an rx packet */
1451 } else {
1452 rx_buf = &fp->rx_buf_ring[bd_cons];
1453 skb = rx_buf->skb;
1454 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1455 pad = cqe->fast_path_cqe.placement_offset;
1457 /* If CQE is marked both TPA_START and TPA_END
1458 it is a non-TPA CQE */
1459 if ((!fp->disable_tpa) &&
1460 (TPA_TYPE(cqe_fp_flags) !=
1461 (TPA_TYPE_START | TPA_TYPE_END))) {
1462 u16 queue = cqe->fast_path_cqe.queue_index;
1464 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1465 DP(NETIF_MSG_RX_STATUS,
1466 "calling tpa_start on queue %d\n",
1467 queue);
1469 bnx2x_tpa_start(fp, queue, skb,
1470 bd_cons, bd_prod);
1471 goto next_rx;
1474 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1475 DP(NETIF_MSG_RX_STATUS,
1476 "calling tpa_stop on queue %d\n",
1477 queue);
1479 if (!BNX2X_RX_SUM_FIX(cqe))
1480 BNX2X_ERR("STOP on none TCP "
1481 "data\n");
1483 /* This is a size of the linear data
1484 on this skb */
1485 len = le16_to_cpu(cqe->fast_path_cqe.
1486 len_on_bd);
1487 bnx2x_tpa_stop(bp, fp, queue, pad,
1488 len, cqe, comp_ring_cons);
1489 #ifdef BNX2X_STOP_ON_ERROR
1490 if (bp->panic)
1491 return -EINVAL;
1492 #endif
1494 bnx2x_update_sge_prod(fp,
1495 &cqe->fast_path_cqe);
1496 goto next_cqe;
1500 pci_dma_sync_single_for_device(bp->pdev,
1501 pci_unmap_addr(rx_buf, mapping),
1502 pad + RX_COPY_THRESH,
1503 PCI_DMA_FROMDEVICE);
1504 prefetch(skb);
1505 prefetch(((char *)(skb)) + 128);
1507 /* is this an error packet? */
1508 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1509 DP(NETIF_MSG_RX_ERR,
1510 "ERROR flags %x rx packet %u\n",
1511 cqe_fp_flags, sw_comp_cons);
1512 bp->eth_stats.rx_err_discard_pkt++;
1513 goto reuse_rx;
1516 /* Since we don't have a jumbo ring
1517 * copy small packets if mtu > 1500
1519 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1520 (len <= RX_COPY_THRESH)) {
1521 struct sk_buff *new_skb;
1523 new_skb = netdev_alloc_skb(bp->dev,
1524 len + pad);
1525 if (new_skb == NULL) {
1526 DP(NETIF_MSG_RX_ERR,
1527 "ERROR packet dropped "
1528 "because of alloc failure\n");
1529 bp->eth_stats.rx_skb_alloc_failed++;
1530 goto reuse_rx;
1533 /* aligned copy */
1534 skb_copy_from_linear_data_offset(skb, pad,
1535 new_skb->data + pad, len);
1536 skb_reserve(new_skb, pad);
1537 skb_put(new_skb, len);
1539 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1541 skb = new_skb;
1543 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1544 pci_unmap_single(bp->pdev,
1545 pci_unmap_addr(rx_buf, mapping),
1546 bp->rx_buf_size,
1547 PCI_DMA_FROMDEVICE);
1548 skb_reserve(skb, pad);
1549 skb_put(skb, len);
1551 } else {
1552 DP(NETIF_MSG_RX_ERR,
1553 "ERROR packet dropped because "
1554 "of alloc failure\n");
1555 bp->eth_stats.rx_skb_alloc_failed++;
1556 reuse_rx:
1557 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1558 goto next_rx;
1561 skb->protocol = eth_type_trans(skb, bp->dev);
1563 skb->ip_summed = CHECKSUM_NONE;
1564 if (bp->rx_csum) {
1565 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1566 skb->ip_summed = CHECKSUM_UNNECESSARY;
1567 else
1568 bp->eth_stats.hw_csum_err++;
1572 #ifdef BCM_VLAN
1573 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1574 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1575 PARSING_FLAGS_VLAN))
1576 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1577 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1578 else
1579 #endif
1580 netif_receive_skb(skb);
1583 next_rx:
1584 rx_buf->skb = NULL;
1586 bd_cons = NEXT_RX_IDX(bd_cons);
1587 bd_prod = NEXT_RX_IDX(bd_prod);
1588 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1589 rx_pkt++;
1590 next_cqe:
1591 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1592 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1594 if (rx_pkt == budget)
1595 break;
1596 } /* while */
1598 fp->rx_bd_cons = bd_cons;
1599 fp->rx_bd_prod = bd_prod_fw;
1600 fp->rx_comp_cons = sw_comp_cons;
1601 fp->rx_comp_prod = sw_comp_prod;
1603 /* Update producers */
1604 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1605 fp->rx_sge_prod);
1607 fp->rx_pkt += rx_pkt;
1608 fp->rx_calls++;
1610 return rx_pkt;
1613 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1615 struct bnx2x_fastpath *fp = fp_cookie;
1616 struct bnx2x *bp = fp->bp;
1617 int index = FP_IDX(fp);
1619 /* Return here if interrupt is disabled */
1620 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1621 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1622 return IRQ_HANDLED;
1625 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1626 index, FP_SB_ID(fp));
1627 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1629 #ifdef BNX2X_STOP_ON_ERROR
1630 if (unlikely(bp->panic))
1631 return IRQ_HANDLED;
1632 #endif
1634 prefetch(fp->rx_cons_sb);
1635 prefetch(fp->tx_cons_sb);
1636 prefetch(&fp->status_blk->c_status_block.status_block_index);
1637 prefetch(&fp->status_blk->u_status_block.status_block_index);
1639 netif_rx_schedule(&bnx2x_fp(bp, index, napi));
1641 return IRQ_HANDLED;
1644 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1646 struct net_device *dev = dev_instance;
1647 struct bnx2x *bp = netdev_priv(dev);
1648 u16 status = bnx2x_ack_int(bp);
1649 u16 mask;
1651 /* Return here if interrupt is shared and it's not for us */
1652 if (unlikely(status == 0)) {
1653 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1654 return IRQ_NONE;
1656 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1658 /* Return here if interrupt is disabled */
1659 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1660 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1661 return IRQ_HANDLED;
1664 #ifdef BNX2X_STOP_ON_ERROR
1665 if (unlikely(bp->panic))
1666 return IRQ_HANDLED;
1667 #endif
1669 mask = 0x2 << bp->fp[0].sb_id;
1670 if (status & mask) {
1671 struct bnx2x_fastpath *fp = &bp->fp[0];
1673 prefetch(fp->rx_cons_sb);
1674 prefetch(fp->tx_cons_sb);
1675 prefetch(&fp->status_blk->c_status_block.status_block_index);
1676 prefetch(&fp->status_blk->u_status_block.status_block_index);
1678 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
1680 status &= ~mask;
1684 if (unlikely(status & 0x1)) {
1685 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1687 status &= ~0x1;
1688 if (!status)
1689 return IRQ_HANDLED;
1692 if (status)
1693 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1694 status);
1696 return IRQ_HANDLED;
1699 /* end of fast path */
1701 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1703 /* Link */
1706 * General service functions
1709 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1711 u32 lock_status;
1712 u32 resource_bit = (1 << resource);
1713 int func = BP_FUNC(bp);
1714 u32 hw_lock_control_reg;
1715 int cnt;
1717 /* Validating that the resource is within range */
1718 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1719 DP(NETIF_MSG_HW,
1720 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1721 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1722 return -EINVAL;
1725 if (func <= 5) {
1726 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1727 } else {
1728 hw_lock_control_reg =
1729 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1732 /* Validating that the resource is not already taken */
1733 lock_status = REG_RD(bp, hw_lock_control_reg);
1734 if (lock_status & resource_bit) {
1735 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1736 lock_status, resource_bit);
1737 return -EEXIST;
1740 /* Try for 5 second every 5ms */
1741 for (cnt = 0; cnt < 1000; cnt++) {
1742 /* Try to acquire the lock */
1743 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1744 lock_status = REG_RD(bp, hw_lock_control_reg);
1745 if (lock_status & resource_bit)
1746 return 0;
1748 msleep(5);
1750 DP(NETIF_MSG_HW, "Timeout\n");
1751 return -EAGAIN;
1754 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1756 u32 lock_status;
1757 u32 resource_bit = (1 << resource);
1758 int func = BP_FUNC(bp);
1759 u32 hw_lock_control_reg;
1761 /* Validating that the resource is within range */
1762 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1763 DP(NETIF_MSG_HW,
1764 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1765 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1766 return -EINVAL;
1769 if (func <= 5) {
1770 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1771 } else {
1772 hw_lock_control_reg =
1773 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1776 /* Validating that the resource is currently taken */
1777 lock_status = REG_RD(bp, hw_lock_control_reg);
1778 if (!(lock_status & resource_bit)) {
1779 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1780 lock_status, resource_bit);
1781 return -EFAULT;
1784 REG_WR(bp, hw_lock_control_reg, resource_bit);
1785 return 0;
1788 /* HW Lock for shared dual port PHYs */
1789 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1791 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1793 mutex_lock(&bp->port.phy_mutex);
1795 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1796 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1797 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1800 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1802 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1804 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1805 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1806 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1808 mutex_unlock(&bp->port.phy_mutex);
1811 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1813 /* The GPIO should be swapped if swap register is set and active */
1814 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1815 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1816 int gpio_shift = gpio_num +
1817 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1818 u32 gpio_mask = (1 << gpio_shift);
1819 u32 gpio_reg;
1821 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1822 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1823 return -EINVAL;
1826 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1827 /* read GPIO and mask except the float bits */
1828 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1830 switch (mode) {
1831 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1832 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1833 gpio_num, gpio_shift);
1834 /* clear FLOAT and set CLR */
1835 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1836 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1837 break;
1839 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1840 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1841 gpio_num, gpio_shift);
1842 /* clear FLOAT and set SET */
1843 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1844 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1845 break;
1847 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1848 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1849 gpio_num, gpio_shift);
1850 /* set FLOAT */
1851 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1852 break;
1854 default:
1855 break;
1858 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1859 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1861 return 0;
1864 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1866 u32 spio_mask = (1 << spio_num);
1867 u32 spio_reg;
1869 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1870 (spio_num > MISC_REGISTERS_SPIO_7)) {
1871 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1872 return -EINVAL;
1875 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1876 /* read SPIO and mask except the float bits */
1877 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1879 switch (mode) {
1880 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1881 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1882 /* clear FLOAT and set CLR */
1883 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1884 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1885 break;
1887 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1888 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1889 /* clear FLOAT and set SET */
1890 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1891 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1892 break;
1894 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1895 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1896 /* set FLOAT */
1897 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1898 break;
1900 default:
1901 break;
1904 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1905 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1907 return 0;
1910 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1912 switch (bp->link_vars.ieee_fc &
1913 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1914 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1915 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1916 ADVERTISED_Pause);
1917 break;
1918 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1919 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1920 ADVERTISED_Pause);
1921 break;
1922 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1923 bp->port.advertising |= ADVERTISED_Asym_Pause;
1924 break;
1925 default:
1926 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1927 ADVERTISED_Pause);
1928 break;
1932 static void bnx2x_link_report(struct bnx2x *bp)
1934 if (bp->link_vars.link_up) {
1935 if (bp->state == BNX2X_STATE_OPEN)
1936 netif_carrier_on(bp->dev);
1937 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1939 printk("%d Mbps ", bp->link_vars.line_speed);
1941 if (bp->link_vars.duplex == DUPLEX_FULL)
1942 printk("full duplex");
1943 else
1944 printk("half duplex");
1946 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1947 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1948 printk(", receive ");
1949 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1950 printk("& transmit ");
1951 } else {
1952 printk(", transmit ");
1954 printk("flow control ON");
1956 printk("\n");
1958 } else { /* link_down */
1959 netif_carrier_off(bp->dev);
1960 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1964 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1966 if (!BP_NOMCP(bp)) {
1967 u8 rc;
1969 /* Initialize link parameters structure variables */
1970 /* It is recommended to turn off RX FC for jumbo frames
1971 for better performance */
1972 if (IS_E1HMF(bp))
1973 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1974 else if (bp->dev->mtu > 5000)
1975 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1976 else
1977 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1979 bnx2x_acquire_phy_lock(bp);
1980 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1981 bnx2x_release_phy_lock(bp);
1983 bnx2x_calc_fc_adv(bp);
1985 if (bp->link_vars.link_up)
1986 bnx2x_link_report(bp);
1989 return rc;
1991 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1992 return -EINVAL;
1995 static void bnx2x_link_set(struct bnx2x *bp)
1997 if (!BP_NOMCP(bp)) {
1998 bnx2x_acquire_phy_lock(bp);
1999 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2000 bnx2x_release_phy_lock(bp);
2002 bnx2x_calc_fc_adv(bp);
2003 } else
2004 BNX2X_ERR("Bootcode is missing -not setting link\n");
2007 static void bnx2x__link_reset(struct bnx2x *bp)
2009 if (!BP_NOMCP(bp)) {
2010 bnx2x_acquire_phy_lock(bp);
2011 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2012 bnx2x_release_phy_lock(bp);
2013 } else
2014 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2017 static u8 bnx2x_link_test(struct bnx2x *bp)
2019 u8 rc;
2021 bnx2x_acquire_phy_lock(bp);
2022 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2023 bnx2x_release_phy_lock(bp);
2025 return rc;
2028 /* Calculates the sum of vn_min_rates.
2029 It's needed for further normalizing of the min_rates.
2031 Returns:
2032 sum of vn_min_rates
2034 0 - if all the min_rates are 0.
2035 In the later case fairness algorithm should be deactivated.
2036 If not all min_rates are zero then those that are zeroes will
2037 be set to 1.
2039 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2041 int i, port = BP_PORT(bp);
2042 u32 wsum = 0;
2043 int all_zero = 1;
2045 for (i = 0; i < E1HVN_MAX; i++) {
2046 u32 vn_cfg =
2047 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2048 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2049 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2050 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2051 /* If min rate is zero - set it to 1 */
2052 if (!vn_min_rate)
2053 vn_min_rate = DEF_MIN_RATE;
2054 else
2055 all_zero = 0;
2057 wsum += vn_min_rate;
2061 /* ... only if all min rates are zeros - disable FAIRNESS */
2062 if (all_zero)
2063 return 0;
2065 return wsum;
2068 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2069 int en_fness,
2070 u16 port_rate,
2071 struct cmng_struct_per_port *m_cmng_port)
2073 u32 r_param = port_rate / 8;
2074 int port = BP_PORT(bp);
2075 int i;
2077 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2079 /* Enable minmax only if we are in e1hmf mode */
2080 if (IS_E1HMF(bp)) {
2081 u32 fair_periodic_timeout_usec;
2082 u32 t_fair;
2084 /* Enable rate shaping and fairness */
2085 m_cmng_port->flags.cmng_vn_enable = 1;
2086 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2087 m_cmng_port->flags.rate_shaping_enable = 1;
2089 if (!en_fness)
2090 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2091 " fairness will be disabled\n");
2093 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2094 m_cmng_port->rs_vars.rs_periodic_timeout =
2095 RS_PERIODIC_TIMEOUT_USEC / 4;
2097 /* this is the threshold below which no timer arming will occur
2098 1.25 coefficient is for the threshold to be a little bigger
2099 than the real time, to compensate for timer in-accuracy */
2100 m_cmng_port->rs_vars.rs_threshold =
2101 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2103 /* resolution of fairness timer */
2104 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2105 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2106 t_fair = T_FAIR_COEF / port_rate;
2108 /* this is the threshold below which we won't arm
2109 the timer anymore */
2110 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2112 /* we multiply by 1e3/8 to get bytes/msec.
2113 We don't want the credits to pass a credit
2114 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2115 m_cmng_port->fair_vars.upper_bound =
2116 r_param * t_fair * FAIR_MEM;
2117 /* since each tick is 4 usec */
2118 m_cmng_port->fair_vars.fairness_timeout =
2119 fair_periodic_timeout_usec / 4;
2121 } else {
2122 /* Disable rate shaping and fairness */
2123 m_cmng_port->flags.cmng_vn_enable = 0;
2124 m_cmng_port->flags.fairness_enable = 0;
2125 m_cmng_port->flags.rate_shaping_enable = 0;
2127 DP(NETIF_MSG_IFUP,
2128 "Single function mode minmax will be disabled\n");
2131 /* Store it to internal memory */
2132 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2133 REG_WR(bp, BAR_XSTRORM_INTMEM +
2134 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2135 ((u32 *)(m_cmng_port))[i]);
2138 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2139 u32 wsum, u16 port_rate,
2140 struct cmng_struct_per_port *m_cmng_port)
2142 struct rate_shaping_vars_per_vn m_rs_vn;
2143 struct fairness_vars_per_vn m_fair_vn;
2144 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2145 u16 vn_min_rate, vn_max_rate;
2146 int i;
2148 /* If function is hidden - set min and max to zeroes */
2149 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2150 vn_min_rate = 0;
2151 vn_max_rate = 0;
2153 } else {
2154 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2155 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2156 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2157 if current min rate is zero - set it to 1.
2158 This is a requirement of the algorithm. */
2159 if ((vn_min_rate == 0) && wsum)
2160 vn_min_rate = DEF_MIN_RATE;
2161 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2162 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2165 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2166 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2168 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2169 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2171 /* global vn counter - maximal Mbps for this vn */
2172 m_rs_vn.vn_counter.rate = vn_max_rate;
2174 /* quota - number of bytes transmitted in this period */
2175 m_rs_vn.vn_counter.quota =
2176 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2178 #ifdef BNX2X_PER_PROT_QOS
2179 /* per protocol counter */
2180 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2181 /* maximal Mbps for this protocol */
2182 m_rs_vn.protocol_counters[protocol].rate =
2183 protocol_max_rate[protocol];
2184 /* the quota in each timer period -
2185 number of bytes transmitted in this period */
2186 m_rs_vn.protocol_counters[protocol].quota =
2187 (u32)(rs_periodic_timeout_usec *
2188 ((double)m_rs_vn.
2189 protocol_counters[protocol].rate/8));
2191 #endif
2193 if (wsum) {
2194 /* credit for each period of the fairness algorithm:
2195 number of bytes in T_FAIR (the vn share the port rate).
2196 wsum should not be larger than 10000, thus
2197 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2198 m_fair_vn.vn_credit_delta =
2199 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2200 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2201 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2202 m_fair_vn.vn_credit_delta);
2205 #ifdef BNX2X_PER_PROT_QOS
2206 do {
2207 u32 protocolWeightSum = 0;
2209 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2210 protocolWeightSum +=
2211 drvInit.protocol_min_rate[protocol];
2212 /* per protocol counter -
2213 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2214 if (protocolWeightSum > 0) {
2215 for (protocol = 0;
2216 protocol < NUM_OF_PROTOCOLS; protocol++)
2217 /* credit for each period of the
2218 fairness algorithm - number of bytes in
2219 T_FAIR (the protocol share the vn rate) */
2220 m_fair_vn.protocol_credit_delta[protocol] =
2221 (u32)((vn_min_rate / 8) * t_fair *
2222 protocol_min_rate / protocolWeightSum);
2224 } while (0);
2225 #endif
2227 /* Store it to internal memory */
2228 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2229 REG_WR(bp, BAR_XSTRORM_INTMEM +
2230 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2231 ((u32 *)(&m_rs_vn))[i]);
2233 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2234 REG_WR(bp, BAR_XSTRORM_INTMEM +
2235 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2236 ((u32 *)(&m_fair_vn))[i]);
2239 /* This function is called upon link interrupt */
2240 static void bnx2x_link_attn(struct bnx2x *bp)
2242 int vn;
2244 /* Make sure that we are synced with the current statistics */
2245 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2247 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2249 if (bp->link_vars.link_up) {
2251 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2252 struct host_port_stats *pstats;
2254 pstats = bnx2x_sp(bp, port_stats);
2255 /* reset old bmac stats */
2256 memset(&(pstats->mac_stx[0]), 0,
2257 sizeof(struct mac_stx));
2259 if ((bp->state == BNX2X_STATE_OPEN) ||
2260 (bp->state == BNX2X_STATE_DISABLED))
2261 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2264 /* indicate link status */
2265 bnx2x_link_report(bp);
2267 if (IS_E1HMF(bp)) {
2268 int func;
2270 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2271 if (vn == BP_E1HVN(bp))
2272 continue;
2274 func = ((vn << 1) | BP_PORT(bp));
2276 /* Set the attention towards other drivers
2277 on the same port */
2278 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2279 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2283 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2284 struct cmng_struct_per_port m_cmng_port;
2285 u32 wsum;
2286 int port = BP_PORT(bp);
2288 /* Init RATE SHAPING and FAIRNESS contexts */
2289 wsum = bnx2x_calc_vn_wsum(bp);
2290 bnx2x_init_port_minmax(bp, (int)wsum,
2291 bp->link_vars.line_speed,
2292 &m_cmng_port);
2293 if (IS_E1HMF(bp))
2294 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2295 bnx2x_init_vn_minmax(bp, 2*vn + port,
2296 wsum, bp->link_vars.line_speed,
2297 &m_cmng_port);
2301 static void bnx2x__link_status_update(struct bnx2x *bp)
2303 if (bp->state != BNX2X_STATE_OPEN)
2304 return;
2306 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2308 if (bp->link_vars.link_up)
2309 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2310 else
2311 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2313 /* indicate link status */
2314 bnx2x_link_report(bp);
2317 static void bnx2x_pmf_update(struct bnx2x *bp)
2319 int port = BP_PORT(bp);
2320 u32 val;
2322 bp->port.pmf = 1;
2323 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2325 /* enable nig attention */
2326 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2327 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2328 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2330 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2333 /* end of Link */
2335 /* slow path */
2338 * General service functions
2341 /* the slow path queue is odd since completions arrive on the fastpath ring */
2342 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2343 u32 data_hi, u32 data_lo, int common)
2345 int func = BP_FUNC(bp);
2347 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2348 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2349 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2350 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2351 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2353 #ifdef BNX2X_STOP_ON_ERROR
2354 if (unlikely(bp->panic))
2355 return -EIO;
2356 #endif
2358 spin_lock_bh(&bp->spq_lock);
2360 if (!bp->spq_left) {
2361 BNX2X_ERR("BUG! SPQ ring full!\n");
2362 spin_unlock_bh(&bp->spq_lock);
2363 bnx2x_panic();
2364 return -EBUSY;
2367 /* CID needs port number to be encoded int it */
2368 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2369 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2370 HW_CID(bp, cid)));
2371 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2372 if (common)
2373 bp->spq_prod_bd->hdr.type |=
2374 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2376 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2377 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2379 bp->spq_left--;
2381 if (bp->spq_prod_bd == bp->spq_last_bd) {
2382 bp->spq_prod_bd = bp->spq;
2383 bp->spq_prod_idx = 0;
2384 DP(NETIF_MSG_TIMER, "end of spq\n");
2386 } else {
2387 bp->spq_prod_bd++;
2388 bp->spq_prod_idx++;
2391 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2392 bp->spq_prod_idx);
2394 spin_unlock_bh(&bp->spq_lock);
2395 return 0;
2398 /* acquire split MCP access lock register */
2399 static int bnx2x_acquire_alr(struct bnx2x *bp)
2401 u32 i, j, val;
2402 int rc = 0;
2404 might_sleep();
2405 i = 100;
2406 for (j = 0; j < i*10; j++) {
2407 val = (1UL << 31);
2408 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2409 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2410 if (val & (1L << 31))
2411 break;
2413 msleep(5);
2415 if (!(val & (1L << 31))) {
2416 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2417 rc = -EBUSY;
2420 return rc;
2423 /* release split MCP access lock register */
2424 static void bnx2x_release_alr(struct bnx2x *bp)
2426 u32 val = 0;
2428 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2431 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2433 struct host_def_status_block *def_sb = bp->def_status_blk;
2434 u16 rc = 0;
2436 barrier(); /* status block is written to by the chip */
2437 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2438 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2439 rc |= 1;
2441 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2442 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2443 rc |= 2;
2445 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2446 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2447 rc |= 4;
2449 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2450 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2451 rc |= 8;
2453 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2454 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2455 rc |= 16;
2457 return rc;
2461 * slow path service functions
2464 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2466 int port = BP_PORT(bp);
2467 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2468 COMMAND_REG_ATTN_BITS_SET);
2469 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2470 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2471 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2472 NIG_REG_MASK_INTERRUPT_PORT0;
2473 u32 aeu_mask;
2475 if (bp->attn_state & asserted)
2476 BNX2X_ERR("IGU ERROR\n");
2478 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2479 aeu_mask = REG_RD(bp, aeu_addr);
2481 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2482 aeu_mask, asserted);
2483 aeu_mask &= ~(asserted & 0xff);
2484 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2486 REG_WR(bp, aeu_addr, aeu_mask);
2487 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2489 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2490 bp->attn_state |= asserted;
2491 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2493 if (asserted & ATTN_HARD_WIRED_MASK) {
2494 if (asserted & ATTN_NIG_FOR_FUNC) {
2496 bnx2x_acquire_phy_lock(bp);
2498 /* save nig interrupt mask */
2499 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2500 REG_WR(bp, nig_int_mask_addr, 0);
2502 bnx2x_link_attn(bp);
2504 /* handle unicore attn? */
2506 if (asserted & ATTN_SW_TIMER_4_FUNC)
2507 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2509 if (asserted & GPIO_2_FUNC)
2510 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2512 if (asserted & GPIO_3_FUNC)
2513 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2515 if (asserted & GPIO_4_FUNC)
2516 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2518 if (port == 0) {
2519 if (asserted & ATTN_GENERAL_ATTN_1) {
2520 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2521 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2523 if (asserted & ATTN_GENERAL_ATTN_2) {
2524 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2525 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2527 if (asserted & ATTN_GENERAL_ATTN_3) {
2528 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2529 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2531 } else {
2532 if (asserted & ATTN_GENERAL_ATTN_4) {
2533 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2534 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2536 if (asserted & ATTN_GENERAL_ATTN_5) {
2537 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2538 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2540 if (asserted & ATTN_GENERAL_ATTN_6) {
2541 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2542 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2546 } /* if hardwired */
2548 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2549 asserted, hc_addr);
2550 REG_WR(bp, hc_addr, asserted);
2552 /* now set back the mask */
2553 if (asserted & ATTN_NIG_FOR_FUNC) {
2554 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2555 bnx2x_release_phy_lock(bp);
2559 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2561 int port = BP_PORT(bp);
2562 int reg_offset;
2563 u32 val;
2565 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2566 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2568 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2570 val = REG_RD(bp, reg_offset);
2571 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2572 REG_WR(bp, reg_offset, val);
2574 BNX2X_ERR("SPIO5 hw attention\n");
2576 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2577 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2578 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2579 /* Fan failure attention */
2581 /* The PHY reset is controlled by GPIO 1 */
2582 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2583 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2584 /* Low power mode is controlled by GPIO 2 */
2585 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2586 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2587 /* mark the failure */
2588 bp->link_params.ext_phy_config &=
2589 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2590 bp->link_params.ext_phy_config |=
2591 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2592 SHMEM_WR(bp,
2593 dev_info.port_hw_config[port].
2594 external_phy_config,
2595 bp->link_params.ext_phy_config);
2596 /* log the failure */
2597 printk(KERN_ERR PFX "Fan Failure on Network"
2598 " Controller %s has caused the driver to"
2599 " shutdown the card to prevent permanent"
2600 " damage. Please contact Dell Support for"
2601 " assistance\n", bp->dev->name);
2602 break;
2604 default:
2605 break;
2609 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2611 val = REG_RD(bp, reg_offset);
2612 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2613 REG_WR(bp, reg_offset, val);
2615 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2616 (attn & HW_INTERRUT_ASSERT_SET_0));
2617 bnx2x_panic();
2621 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2623 u32 val;
2625 if (attn & BNX2X_DOORQ_ASSERT) {
2627 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2628 BNX2X_ERR("DB hw attention 0x%x\n", val);
2629 /* DORQ discard attention */
2630 if (val & 0x2)
2631 BNX2X_ERR("FATAL error from DORQ\n");
2634 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2636 int port = BP_PORT(bp);
2637 int reg_offset;
2639 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2640 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2642 val = REG_RD(bp, reg_offset);
2643 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2644 REG_WR(bp, reg_offset, val);
2646 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2647 (attn & HW_INTERRUT_ASSERT_SET_1));
2648 bnx2x_panic();
2652 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2654 u32 val;
2656 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2658 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2659 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2660 /* CFC error attention */
2661 if (val & 0x2)
2662 BNX2X_ERR("FATAL error from CFC\n");
2665 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2667 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2668 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2669 /* RQ_USDMDP_FIFO_OVERFLOW */
2670 if (val & 0x18000)
2671 BNX2X_ERR("FATAL error from PXP\n");
2674 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2676 int port = BP_PORT(bp);
2677 int reg_offset;
2679 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2680 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2682 val = REG_RD(bp, reg_offset);
2683 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2684 REG_WR(bp, reg_offset, val);
2686 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2687 (attn & HW_INTERRUT_ASSERT_SET_2));
2688 bnx2x_panic();
2692 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2694 u32 val;
2696 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2698 if (attn & BNX2X_PMF_LINK_ASSERT) {
2699 int func = BP_FUNC(bp);
2701 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2702 bnx2x__link_status_update(bp);
2703 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2704 DRV_STATUS_PMF)
2705 bnx2x_pmf_update(bp);
2707 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2709 BNX2X_ERR("MC assert!\n");
2710 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2711 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2712 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2713 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2714 bnx2x_panic();
2716 } else if (attn & BNX2X_MCP_ASSERT) {
2718 BNX2X_ERR("MCP assert!\n");
2719 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2720 bnx2x_fw_dump(bp);
2722 } else
2723 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2726 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2727 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2728 if (attn & BNX2X_GRC_TIMEOUT) {
2729 val = CHIP_IS_E1H(bp) ?
2730 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2731 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2733 if (attn & BNX2X_GRC_RSV) {
2734 val = CHIP_IS_E1H(bp) ?
2735 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2736 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2738 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2742 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2744 struct attn_route attn;
2745 struct attn_route group_mask;
2746 int port = BP_PORT(bp);
2747 int index;
2748 u32 reg_addr;
2749 u32 val;
2750 u32 aeu_mask;
2752 /* need to take HW lock because MCP or other port might also
2753 try to handle this event */
2754 bnx2x_acquire_alr(bp);
2756 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2757 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2758 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2759 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2760 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2761 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2763 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2764 if (deasserted & (1 << index)) {
2765 group_mask = bp->attn_group[index];
2767 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2768 index, group_mask.sig[0], group_mask.sig[1],
2769 group_mask.sig[2], group_mask.sig[3]);
2771 bnx2x_attn_int_deasserted3(bp,
2772 attn.sig[3] & group_mask.sig[3]);
2773 bnx2x_attn_int_deasserted1(bp,
2774 attn.sig[1] & group_mask.sig[1]);
2775 bnx2x_attn_int_deasserted2(bp,
2776 attn.sig[2] & group_mask.sig[2]);
2777 bnx2x_attn_int_deasserted0(bp,
2778 attn.sig[0] & group_mask.sig[0]);
2780 if ((attn.sig[0] & group_mask.sig[0] &
2781 HW_PRTY_ASSERT_SET_0) ||
2782 (attn.sig[1] & group_mask.sig[1] &
2783 HW_PRTY_ASSERT_SET_1) ||
2784 (attn.sig[2] & group_mask.sig[2] &
2785 HW_PRTY_ASSERT_SET_2))
2786 BNX2X_ERR("FATAL HW block parity attention\n");
2790 bnx2x_release_alr(bp);
2792 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2794 val = ~deasserted;
2795 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2796 val, reg_addr);
2797 REG_WR(bp, reg_addr, val);
2799 if (~bp->attn_state & deasserted)
2800 BNX2X_ERR("IGU ERROR\n");
2802 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2803 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2805 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2806 aeu_mask = REG_RD(bp, reg_addr);
2808 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2809 aeu_mask, deasserted);
2810 aeu_mask |= (deasserted & 0xff);
2811 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2813 REG_WR(bp, reg_addr, aeu_mask);
2814 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2816 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2817 bp->attn_state &= ~deasserted;
2818 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2821 static void bnx2x_attn_int(struct bnx2x *bp)
2823 /* read local copy of bits */
2824 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2825 attn_bits);
2826 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2827 attn_bits_ack);
2828 u32 attn_state = bp->attn_state;
2830 /* look for changed bits */
2831 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2832 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2834 DP(NETIF_MSG_HW,
2835 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2836 attn_bits, attn_ack, asserted, deasserted);
2838 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2839 BNX2X_ERR("BAD attention state\n");
2841 /* handle bits that were raised */
2842 if (asserted)
2843 bnx2x_attn_int_asserted(bp, asserted);
2845 if (deasserted)
2846 bnx2x_attn_int_deasserted(bp, deasserted);
2849 static void bnx2x_sp_task(struct work_struct *work)
2851 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2852 u16 status;
2855 /* Return here if interrupt is disabled */
2856 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2857 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2858 return;
2861 status = bnx2x_update_dsb_idx(bp);
2862 /* if (status == 0) */
2863 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2865 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2867 /* HW attentions */
2868 if (status & 0x1)
2869 bnx2x_attn_int(bp);
2871 /* CStorm events: query_stats, port delete ramrod */
2872 if (status & 0x2)
2873 bp->stats_pending = 0;
2875 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2876 IGU_INT_NOP, 1);
2877 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2878 IGU_INT_NOP, 1);
2879 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2880 IGU_INT_NOP, 1);
2881 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2882 IGU_INT_NOP, 1);
2883 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2884 IGU_INT_ENABLE, 1);
2888 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2890 struct net_device *dev = dev_instance;
2891 struct bnx2x *bp = netdev_priv(dev);
2893 /* Return here if interrupt is disabled */
2894 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2895 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2896 return IRQ_HANDLED;
2899 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2901 #ifdef BNX2X_STOP_ON_ERROR
2902 if (unlikely(bp->panic))
2903 return IRQ_HANDLED;
2904 #endif
2906 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2908 return IRQ_HANDLED;
2911 /* end of slow path */
2913 /* Statistics */
2915 /****************************************************************************
2916 * Macros
2917 ****************************************************************************/
2919 /* sum[hi:lo] += add[hi:lo] */
2920 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2921 do { \
2922 s_lo += a_lo; \
2923 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2924 } while (0)
2926 /* difference = minuend - subtrahend */
2927 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2928 do { \
2929 if (m_lo < s_lo) { \
2930 /* underflow */ \
2931 d_hi = m_hi - s_hi; \
2932 if (d_hi > 0) { \
2933 /* we can 'loan' 1 */ \
2934 d_hi--; \
2935 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2936 } else { \
2937 /* m_hi <= s_hi */ \
2938 d_hi = 0; \
2939 d_lo = 0; \
2941 } else { \
2942 /* m_lo >= s_lo */ \
2943 if (m_hi < s_hi) { \
2944 d_hi = 0; \
2945 d_lo = 0; \
2946 } else { \
2947 /* m_hi >= s_hi */ \
2948 d_hi = m_hi - s_hi; \
2949 d_lo = m_lo - s_lo; \
2952 } while (0)
2954 #define UPDATE_STAT64(s, t) \
2955 do { \
2956 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2957 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2958 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2959 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2960 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2961 pstats->mac_stx[1].t##_lo, diff.lo); \
2962 } while (0)
2964 #define UPDATE_STAT64_NIG(s, t) \
2965 do { \
2966 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2967 diff.lo, new->s##_lo, old->s##_lo); \
2968 ADD_64(estats->t##_hi, diff.hi, \
2969 estats->t##_lo, diff.lo); \
2970 } while (0)
2972 /* sum[hi:lo] += add */
2973 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2974 do { \
2975 s_lo += a; \
2976 s_hi += (s_lo < a) ? 1 : 0; \
2977 } while (0)
2979 #define UPDATE_EXTEND_STAT(s) \
2980 do { \
2981 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2982 pstats->mac_stx[1].s##_lo, \
2983 new->s); \
2984 } while (0)
2986 #define UPDATE_EXTEND_TSTAT(s, t) \
2987 do { \
2988 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2989 old_tclient->s = le32_to_cpu(tclient->s); \
2990 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2991 } while (0)
2993 #define UPDATE_EXTEND_XSTAT(s, t) \
2994 do { \
2995 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2996 old_xclient->s = le32_to_cpu(xclient->s); \
2997 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2998 } while (0)
3001 * General service functions
3004 static inline long bnx2x_hilo(u32 *hiref)
3006 u32 lo = *(hiref + 1);
3007 #if (BITS_PER_LONG == 64)
3008 u32 hi = *hiref;
3010 return HILO_U64(hi, lo);
3011 #else
3012 return lo;
3013 #endif
3017 * Init service functions
3020 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3022 if (!bp->stats_pending) {
3023 struct eth_query_ramrod_data ramrod_data = {0};
3024 int rc;
3026 ramrod_data.drv_counter = bp->stats_counter++;
3027 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3028 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3030 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3031 ((u32 *)&ramrod_data)[1],
3032 ((u32 *)&ramrod_data)[0], 0);
3033 if (rc == 0) {
3034 /* stats ramrod has it's own slot on the spq */
3035 bp->spq_left++;
3036 bp->stats_pending = 1;
3041 static void bnx2x_stats_init(struct bnx2x *bp)
3043 int port = BP_PORT(bp);
3045 bp->executer_idx = 0;
3046 bp->stats_counter = 0;
3048 /* port stats */
3049 if (!BP_NOMCP(bp))
3050 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3051 else
3052 bp->port.port_stx = 0;
3053 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3055 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3056 bp->port.old_nig_stats.brb_discard =
3057 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3058 bp->port.old_nig_stats.brb_truncate =
3059 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3060 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3061 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3062 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3063 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3065 /* function stats */
3066 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3067 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3068 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3069 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3071 bp->stats_state = STATS_STATE_DISABLED;
3072 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3073 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3076 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3078 struct dmae_command *dmae = &bp->stats_dmae;
3079 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3081 *stats_comp = DMAE_COMP_VAL;
3083 /* loader */
3084 if (bp->executer_idx) {
3085 int loader_idx = PMF_DMAE_C(bp);
3087 memset(dmae, 0, sizeof(struct dmae_command));
3089 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3090 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3091 DMAE_CMD_DST_RESET |
3092 #ifdef __BIG_ENDIAN
3093 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3094 #else
3095 DMAE_CMD_ENDIANITY_DW_SWAP |
3096 #endif
3097 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3098 DMAE_CMD_PORT_0) |
3099 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3100 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3101 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3102 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3103 sizeof(struct dmae_command) *
3104 (loader_idx + 1)) >> 2;
3105 dmae->dst_addr_hi = 0;
3106 dmae->len = sizeof(struct dmae_command) >> 2;
3107 if (CHIP_IS_E1(bp))
3108 dmae->len--;
3109 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3110 dmae->comp_addr_hi = 0;
3111 dmae->comp_val = 1;
3113 *stats_comp = 0;
3114 bnx2x_post_dmae(bp, dmae, loader_idx);
3116 } else if (bp->func_stx) {
3117 *stats_comp = 0;
3118 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3122 static int bnx2x_stats_comp(struct bnx2x *bp)
3124 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3125 int cnt = 10;
3127 might_sleep();
3128 while (*stats_comp != DMAE_COMP_VAL) {
3129 if (!cnt) {
3130 BNX2X_ERR("timeout waiting for stats finished\n");
3131 break;
3133 cnt--;
3134 msleep(1);
3136 return 1;
3140 * Statistics service functions
3143 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3145 struct dmae_command *dmae;
3146 u32 opcode;
3147 int loader_idx = PMF_DMAE_C(bp);
3148 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3150 /* sanity */
3151 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3152 BNX2X_ERR("BUG!\n");
3153 return;
3156 bp->executer_idx = 0;
3158 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3159 DMAE_CMD_C_ENABLE |
3160 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3161 #ifdef __BIG_ENDIAN
3162 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3163 #else
3164 DMAE_CMD_ENDIANITY_DW_SWAP |
3165 #endif
3166 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3167 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3169 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3170 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3171 dmae->src_addr_lo = bp->port.port_stx >> 2;
3172 dmae->src_addr_hi = 0;
3173 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3174 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3175 dmae->len = DMAE_LEN32_RD_MAX;
3176 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3177 dmae->comp_addr_hi = 0;
3178 dmae->comp_val = 1;
3180 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3181 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3182 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3183 dmae->src_addr_hi = 0;
3184 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3185 DMAE_LEN32_RD_MAX * 4);
3186 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3187 DMAE_LEN32_RD_MAX * 4);
3188 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3189 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3190 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3191 dmae->comp_val = DMAE_COMP_VAL;
3193 *stats_comp = 0;
3194 bnx2x_hw_stats_post(bp);
3195 bnx2x_stats_comp(bp);
3198 static void bnx2x_port_stats_init(struct bnx2x *bp)
3200 struct dmae_command *dmae;
3201 int port = BP_PORT(bp);
3202 int vn = BP_E1HVN(bp);
3203 u32 opcode;
3204 int loader_idx = PMF_DMAE_C(bp);
3205 u32 mac_addr;
3206 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3208 /* sanity */
3209 if (!bp->link_vars.link_up || !bp->port.pmf) {
3210 BNX2X_ERR("BUG!\n");
3211 return;
3214 bp->executer_idx = 0;
3216 /* MCP */
3217 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3218 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3219 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3220 #ifdef __BIG_ENDIAN
3221 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3222 #else
3223 DMAE_CMD_ENDIANITY_DW_SWAP |
3224 #endif
3225 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3226 (vn << DMAE_CMD_E1HVN_SHIFT));
3228 if (bp->port.port_stx) {
3230 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3231 dmae->opcode = opcode;
3232 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3233 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3234 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3235 dmae->dst_addr_hi = 0;
3236 dmae->len = sizeof(struct host_port_stats) >> 2;
3237 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3238 dmae->comp_addr_hi = 0;
3239 dmae->comp_val = 1;
3242 if (bp->func_stx) {
3244 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3245 dmae->opcode = opcode;
3246 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3247 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3248 dmae->dst_addr_lo = bp->func_stx >> 2;
3249 dmae->dst_addr_hi = 0;
3250 dmae->len = sizeof(struct host_func_stats) >> 2;
3251 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3252 dmae->comp_addr_hi = 0;
3253 dmae->comp_val = 1;
3256 /* MAC */
3257 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3258 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3259 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3260 #ifdef __BIG_ENDIAN
3261 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3262 #else
3263 DMAE_CMD_ENDIANITY_DW_SWAP |
3264 #endif
3265 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3266 (vn << DMAE_CMD_E1HVN_SHIFT));
3268 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3270 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3271 NIG_REG_INGRESS_BMAC0_MEM);
3273 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3274 BIGMAC_REGISTER_TX_STAT_GTBYT */
3275 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3276 dmae->opcode = opcode;
3277 dmae->src_addr_lo = (mac_addr +
3278 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3279 dmae->src_addr_hi = 0;
3280 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3281 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3282 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3283 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3284 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3285 dmae->comp_addr_hi = 0;
3286 dmae->comp_val = 1;
3288 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3289 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3290 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3291 dmae->opcode = opcode;
3292 dmae->src_addr_lo = (mac_addr +
3293 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3294 dmae->src_addr_hi = 0;
3295 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3296 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3297 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3298 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3299 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3300 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3301 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3302 dmae->comp_addr_hi = 0;
3303 dmae->comp_val = 1;
3305 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3307 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3309 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3310 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3311 dmae->opcode = opcode;
3312 dmae->src_addr_lo = (mac_addr +
3313 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3314 dmae->src_addr_hi = 0;
3315 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3316 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3317 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3318 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3319 dmae->comp_addr_hi = 0;
3320 dmae->comp_val = 1;
3322 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3323 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3324 dmae->opcode = opcode;
3325 dmae->src_addr_lo = (mac_addr +
3326 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3327 dmae->src_addr_hi = 0;
3328 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3329 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3330 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3331 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3332 dmae->len = 1;
3333 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3334 dmae->comp_addr_hi = 0;
3335 dmae->comp_val = 1;
3337 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3338 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3339 dmae->opcode = opcode;
3340 dmae->src_addr_lo = (mac_addr +
3341 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3342 dmae->src_addr_hi = 0;
3343 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3344 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3345 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3346 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3347 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3348 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3349 dmae->comp_addr_hi = 0;
3350 dmae->comp_val = 1;
3353 /* NIG */
3354 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3355 dmae->opcode = opcode;
3356 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3357 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3358 dmae->src_addr_hi = 0;
3359 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3360 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3361 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3362 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3363 dmae->comp_addr_hi = 0;
3364 dmae->comp_val = 1;
3366 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3367 dmae->opcode = opcode;
3368 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3369 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3370 dmae->src_addr_hi = 0;
3371 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3372 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3373 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3374 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3375 dmae->len = (2*sizeof(u32)) >> 2;
3376 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3377 dmae->comp_addr_hi = 0;
3378 dmae->comp_val = 1;
3380 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3381 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3382 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3383 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3384 #ifdef __BIG_ENDIAN
3385 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3386 #else
3387 DMAE_CMD_ENDIANITY_DW_SWAP |
3388 #endif
3389 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3390 (vn << DMAE_CMD_E1HVN_SHIFT));
3391 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3392 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3393 dmae->src_addr_hi = 0;
3394 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3395 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3396 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3397 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3398 dmae->len = (2*sizeof(u32)) >> 2;
3399 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3400 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3401 dmae->comp_val = DMAE_COMP_VAL;
3403 *stats_comp = 0;
3406 static void bnx2x_func_stats_init(struct bnx2x *bp)
3408 struct dmae_command *dmae = &bp->stats_dmae;
3409 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3411 /* sanity */
3412 if (!bp->func_stx) {
3413 BNX2X_ERR("BUG!\n");
3414 return;
3417 bp->executer_idx = 0;
3418 memset(dmae, 0, sizeof(struct dmae_command));
3420 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3421 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3422 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3423 #ifdef __BIG_ENDIAN
3424 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3425 #else
3426 DMAE_CMD_ENDIANITY_DW_SWAP |
3427 #endif
3428 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3429 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3430 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3431 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3432 dmae->dst_addr_lo = bp->func_stx >> 2;
3433 dmae->dst_addr_hi = 0;
3434 dmae->len = sizeof(struct host_func_stats) >> 2;
3435 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3436 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3437 dmae->comp_val = DMAE_COMP_VAL;
3439 *stats_comp = 0;
3442 static void bnx2x_stats_start(struct bnx2x *bp)
3444 if (bp->port.pmf)
3445 bnx2x_port_stats_init(bp);
3447 else if (bp->func_stx)
3448 bnx2x_func_stats_init(bp);
3450 bnx2x_hw_stats_post(bp);
3451 bnx2x_storm_stats_post(bp);
3454 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3456 bnx2x_stats_comp(bp);
3457 bnx2x_stats_pmf_update(bp);
3458 bnx2x_stats_start(bp);
3461 static void bnx2x_stats_restart(struct bnx2x *bp)
3463 bnx2x_stats_comp(bp);
3464 bnx2x_stats_start(bp);
3467 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3469 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3470 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3471 struct regpair diff;
3473 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3474 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3475 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3476 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3477 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3478 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3479 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3480 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3481 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3482 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3483 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3484 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3485 UPDATE_STAT64(tx_stat_gt127,
3486 tx_stat_etherstatspkts65octetsto127octets);
3487 UPDATE_STAT64(tx_stat_gt255,
3488 tx_stat_etherstatspkts128octetsto255octets);
3489 UPDATE_STAT64(tx_stat_gt511,
3490 tx_stat_etherstatspkts256octetsto511octets);
3491 UPDATE_STAT64(tx_stat_gt1023,
3492 tx_stat_etherstatspkts512octetsto1023octets);
3493 UPDATE_STAT64(tx_stat_gt1518,
3494 tx_stat_etherstatspkts1024octetsto1522octets);
3495 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3496 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3497 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3498 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3499 UPDATE_STAT64(tx_stat_gterr,
3500 tx_stat_dot3statsinternalmactransmiterrors);
3501 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3504 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3506 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3507 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3509 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3510 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3511 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3512 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3513 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3514 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3515 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3516 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3517 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3518 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3519 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3520 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3521 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3522 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3523 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3524 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3525 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3526 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3527 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3528 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3529 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3530 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3531 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3532 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3533 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3534 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3535 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3536 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3537 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3538 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3539 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3542 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3544 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3545 struct nig_stats *old = &(bp->port.old_nig_stats);
3546 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3547 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3548 struct regpair diff;
3550 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3551 bnx2x_bmac_stats_update(bp);
3553 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3554 bnx2x_emac_stats_update(bp);
3556 else { /* unreached */
3557 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3558 return -1;
3561 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3562 new->brb_discard - old->brb_discard);
3563 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3564 new->brb_truncate - old->brb_truncate);
3566 UPDATE_STAT64_NIG(egress_mac_pkt0,
3567 etherstatspkts1024octetsto1522octets);
3568 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3570 memcpy(old, new, sizeof(struct nig_stats));
3572 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3573 sizeof(struct mac_stx));
3574 estats->brb_drop_hi = pstats->brb_drop_hi;
3575 estats->brb_drop_lo = pstats->brb_drop_lo;
3577 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3579 return 0;
3582 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3584 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3585 int cl_id = BP_CL_ID(bp);
3586 struct tstorm_per_port_stats *tport =
3587 &stats->tstorm_common.port_statistics;
3588 struct tstorm_per_client_stats *tclient =
3589 &stats->tstorm_common.client_statistics[cl_id];
3590 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3591 struct xstorm_per_client_stats *xclient =
3592 &stats->xstorm_common.client_statistics[cl_id];
3593 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3594 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3595 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3596 u32 diff;
3598 /* are storm stats valid? */
3599 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3600 bp->stats_counter) {
3601 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3602 " tstorm counter (%d) != stats_counter (%d)\n",
3603 tclient->stats_counter, bp->stats_counter);
3604 return -1;
3606 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3607 bp->stats_counter) {
3608 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3609 " xstorm counter (%d) != stats_counter (%d)\n",
3610 xclient->stats_counter, bp->stats_counter);
3611 return -2;
3614 fstats->total_bytes_received_hi =
3615 fstats->valid_bytes_received_hi =
3616 le32_to_cpu(tclient->total_rcv_bytes.hi);
3617 fstats->total_bytes_received_lo =
3618 fstats->valid_bytes_received_lo =
3619 le32_to_cpu(tclient->total_rcv_bytes.lo);
3621 estats->error_bytes_received_hi =
3622 le32_to_cpu(tclient->rcv_error_bytes.hi);
3623 estats->error_bytes_received_lo =
3624 le32_to_cpu(tclient->rcv_error_bytes.lo);
3625 ADD_64(estats->error_bytes_received_hi,
3626 estats->rx_stat_ifhcinbadoctets_hi,
3627 estats->error_bytes_received_lo,
3628 estats->rx_stat_ifhcinbadoctets_lo);
3630 ADD_64(fstats->total_bytes_received_hi,
3631 estats->error_bytes_received_hi,
3632 fstats->total_bytes_received_lo,
3633 estats->error_bytes_received_lo);
3635 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3636 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3637 total_multicast_packets_received);
3638 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3639 total_broadcast_packets_received);
3641 fstats->total_bytes_transmitted_hi =
3642 le32_to_cpu(xclient->total_sent_bytes.hi);
3643 fstats->total_bytes_transmitted_lo =
3644 le32_to_cpu(xclient->total_sent_bytes.lo);
3646 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3647 total_unicast_packets_transmitted);
3648 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3649 total_multicast_packets_transmitted);
3650 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3651 total_broadcast_packets_transmitted);
3653 memcpy(estats, &(fstats->total_bytes_received_hi),
3654 sizeof(struct host_func_stats) - 2*sizeof(u32));
3656 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3657 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3658 estats->brb_truncate_discard =
3659 le32_to_cpu(tport->brb_truncate_discard);
3660 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3662 old_tclient->rcv_unicast_bytes.hi =
3663 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3664 old_tclient->rcv_unicast_bytes.lo =
3665 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3666 old_tclient->rcv_broadcast_bytes.hi =
3667 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3668 old_tclient->rcv_broadcast_bytes.lo =
3669 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3670 old_tclient->rcv_multicast_bytes.hi =
3671 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3672 old_tclient->rcv_multicast_bytes.lo =
3673 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3674 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3676 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3677 old_tclient->packets_too_big_discard =
3678 le32_to_cpu(tclient->packets_too_big_discard);
3679 estats->no_buff_discard =
3680 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3681 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3683 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3684 old_xclient->unicast_bytes_sent.hi =
3685 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3686 old_xclient->unicast_bytes_sent.lo =
3687 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3688 old_xclient->multicast_bytes_sent.hi =
3689 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3690 old_xclient->multicast_bytes_sent.lo =
3691 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3692 old_xclient->broadcast_bytes_sent.hi =
3693 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3694 old_xclient->broadcast_bytes_sent.lo =
3695 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3697 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3699 return 0;
3702 static void bnx2x_net_stats_update(struct bnx2x *bp)
3704 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3705 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3706 struct net_device_stats *nstats = &bp->dev->stats;
3708 nstats->rx_packets =
3709 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3710 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3711 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3713 nstats->tx_packets =
3714 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3715 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3716 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3718 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3720 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3722 nstats->rx_dropped = old_tclient->checksum_discard +
3723 estats->mac_discard;
3724 nstats->tx_dropped = 0;
3726 nstats->multicast =
3727 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3729 nstats->collisions =
3730 estats->tx_stat_dot3statssinglecollisionframes_lo +
3731 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3732 estats->tx_stat_dot3statslatecollisions_lo +
3733 estats->tx_stat_dot3statsexcessivecollisions_lo;
3735 estats->jabber_packets_received =
3736 old_tclient->packets_too_big_discard +
3737 estats->rx_stat_dot3statsframestoolong_lo;
3739 nstats->rx_length_errors =
3740 estats->rx_stat_etherstatsundersizepkts_lo +
3741 estats->jabber_packets_received;
3742 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3743 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3744 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3745 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3746 nstats->rx_missed_errors = estats->xxoverflow_discard;
3748 nstats->rx_errors = nstats->rx_length_errors +
3749 nstats->rx_over_errors +
3750 nstats->rx_crc_errors +
3751 nstats->rx_frame_errors +
3752 nstats->rx_fifo_errors +
3753 nstats->rx_missed_errors;
3755 nstats->tx_aborted_errors =
3756 estats->tx_stat_dot3statslatecollisions_lo +
3757 estats->tx_stat_dot3statsexcessivecollisions_lo;
3758 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3759 nstats->tx_fifo_errors = 0;
3760 nstats->tx_heartbeat_errors = 0;
3761 nstats->tx_window_errors = 0;
3763 nstats->tx_errors = nstats->tx_aborted_errors +
3764 nstats->tx_carrier_errors;
3767 static void bnx2x_stats_update(struct bnx2x *bp)
3769 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3770 int update = 0;
3772 if (*stats_comp != DMAE_COMP_VAL)
3773 return;
3775 if (bp->port.pmf)
3776 update = (bnx2x_hw_stats_update(bp) == 0);
3778 update |= (bnx2x_storm_stats_update(bp) == 0);
3780 if (update)
3781 bnx2x_net_stats_update(bp);
3783 else {
3784 if (bp->stats_pending) {
3785 bp->stats_pending++;
3786 if (bp->stats_pending == 3) {
3787 BNX2X_ERR("stats not updated for 3 times\n");
3788 bnx2x_panic();
3789 return;
3794 if (bp->msglevel & NETIF_MSG_TIMER) {
3795 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3796 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3797 struct net_device_stats *nstats = &bp->dev->stats;
3798 int i;
3800 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3801 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3802 " tx pkt (%lx)\n",
3803 bnx2x_tx_avail(bp->fp),
3804 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3805 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3806 " rx pkt (%lx)\n",
3807 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3808 bp->fp->rx_comp_cons),
3809 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3810 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3811 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3812 estats->driver_xoff, estats->brb_drop_lo);
3813 printk(KERN_DEBUG "tstats: checksum_discard %u "
3814 "packets_too_big_discard %u no_buff_discard %u "
3815 "mac_discard %u mac_filter_discard %u "
3816 "xxovrflow_discard %u brb_truncate_discard %u "
3817 "ttl0_discard %u\n",
3818 old_tclient->checksum_discard,
3819 old_tclient->packets_too_big_discard,
3820 old_tclient->no_buff_discard, estats->mac_discard,
3821 estats->mac_filter_discard, estats->xxoverflow_discard,
3822 estats->brb_truncate_discard,
3823 old_tclient->ttl0_discard);
3825 for_each_queue(bp, i) {
3826 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3827 bnx2x_fp(bp, i, tx_pkt),
3828 bnx2x_fp(bp, i, rx_pkt),
3829 bnx2x_fp(bp, i, rx_calls));
3833 bnx2x_hw_stats_post(bp);
3834 bnx2x_storm_stats_post(bp);
3837 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3839 struct dmae_command *dmae;
3840 u32 opcode;
3841 int loader_idx = PMF_DMAE_C(bp);
3842 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3844 bp->executer_idx = 0;
3846 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3847 DMAE_CMD_C_ENABLE |
3848 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3849 #ifdef __BIG_ENDIAN
3850 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3851 #else
3852 DMAE_CMD_ENDIANITY_DW_SWAP |
3853 #endif
3854 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3855 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3857 if (bp->port.port_stx) {
3859 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3860 if (bp->func_stx)
3861 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3862 else
3863 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3864 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3865 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3866 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3867 dmae->dst_addr_hi = 0;
3868 dmae->len = sizeof(struct host_port_stats) >> 2;
3869 if (bp->func_stx) {
3870 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3871 dmae->comp_addr_hi = 0;
3872 dmae->comp_val = 1;
3873 } else {
3874 dmae->comp_addr_lo =
3875 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3876 dmae->comp_addr_hi =
3877 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3878 dmae->comp_val = DMAE_COMP_VAL;
3880 *stats_comp = 0;
3884 if (bp->func_stx) {
3886 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3887 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3888 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3889 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3890 dmae->dst_addr_lo = bp->func_stx >> 2;
3891 dmae->dst_addr_hi = 0;
3892 dmae->len = sizeof(struct host_func_stats) >> 2;
3893 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3894 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3895 dmae->comp_val = DMAE_COMP_VAL;
3897 *stats_comp = 0;
3901 static void bnx2x_stats_stop(struct bnx2x *bp)
3903 int update = 0;
3905 bnx2x_stats_comp(bp);
3907 if (bp->port.pmf)
3908 update = (bnx2x_hw_stats_update(bp) == 0);
3910 update |= (bnx2x_storm_stats_update(bp) == 0);
3912 if (update) {
3913 bnx2x_net_stats_update(bp);
3915 if (bp->port.pmf)
3916 bnx2x_port_stats_stop(bp);
3918 bnx2x_hw_stats_post(bp);
3919 bnx2x_stats_comp(bp);
3923 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3927 static const struct {
3928 void (*action)(struct bnx2x *bp);
3929 enum bnx2x_stats_state next_state;
3930 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3931 /* state event */
3933 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3934 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3935 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3936 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3939 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3940 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3941 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3942 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3946 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3948 enum bnx2x_stats_state state = bp->stats_state;
3950 bnx2x_stats_stm[state][event].action(bp);
3951 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3953 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3954 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3955 state, event, bp->stats_state);
3958 static void bnx2x_timer(unsigned long data)
3960 struct bnx2x *bp = (struct bnx2x *) data;
3962 if (!netif_running(bp->dev))
3963 return;
3965 if (atomic_read(&bp->intr_sem) != 0)
3966 goto timer_restart;
3968 if (poll) {
3969 struct bnx2x_fastpath *fp = &bp->fp[0];
3970 int rc;
3972 bnx2x_tx_int(fp, 1000);
3973 rc = bnx2x_rx_int(fp, 1000);
3976 if (!BP_NOMCP(bp)) {
3977 int func = BP_FUNC(bp);
3978 u32 drv_pulse;
3979 u32 mcp_pulse;
3981 ++bp->fw_drv_pulse_wr_seq;
3982 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3983 /* TBD - add SYSTEM_TIME */
3984 drv_pulse = bp->fw_drv_pulse_wr_seq;
3985 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3987 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3988 MCP_PULSE_SEQ_MASK);
3989 /* The delta between driver pulse and mcp response
3990 * should be 1 (before mcp response) or 0 (after mcp response)
3992 if ((drv_pulse != mcp_pulse) &&
3993 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3994 /* someone lost a heartbeat... */
3995 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3996 drv_pulse, mcp_pulse);
4000 if ((bp->state == BNX2X_STATE_OPEN) ||
4001 (bp->state == BNX2X_STATE_DISABLED))
4002 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4004 timer_restart:
4005 mod_timer(&bp->timer, jiffies + bp->current_interval);
4008 /* end of Statistics */
4010 /* nic init */
4013 * nic init service functions
4016 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4018 int port = BP_PORT(bp);
4020 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4021 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4022 sizeof(struct ustorm_status_block)/4);
4023 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4024 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4025 sizeof(struct cstorm_status_block)/4);
4028 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4029 dma_addr_t mapping, int sb_id)
4031 int port = BP_PORT(bp);
4032 int func = BP_FUNC(bp);
4033 int index;
4034 u64 section;
4036 /* USTORM */
4037 section = ((u64)mapping) + offsetof(struct host_status_block,
4038 u_status_block);
4039 sb->u_status_block.status_block_id = sb_id;
4041 REG_WR(bp, BAR_USTRORM_INTMEM +
4042 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4043 REG_WR(bp, BAR_USTRORM_INTMEM +
4044 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4045 U64_HI(section));
4046 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4047 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4049 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4050 REG_WR16(bp, BAR_USTRORM_INTMEM +
4051 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4053 /* CSTORM */
4054 section = ((u64)mapping) + offsetof(struct host_status_block,
4055 c_status_block);
4056 sb->c_status_block.status_block_id = sb_id;
4058 REG_WR(bp, BAR_CSTRORM_INTMEM +
4059 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4060 REG_WR(bp, BAR_CSTRORM_INTMEM +
4061 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4062 U64_HI(section));
4063 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4064 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4066 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4067 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4068 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4070 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4073 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4075 int func = BP_FUNC(bp);
4077 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4078 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4079 sizeof(struct ustorm_def_status_block)/4);
4080 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4081 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4082 sizeof(struct cstorm_def_status_block)/4);
4083 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4084 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4085 sizeof(struct xstorm_def_status_block)/4);
4086 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4087 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4088 sizeof(struct tstorm_def_status_block)/4);
4091 static void bnx2x_init_def_sb(struct bnx2x *bp,
4092 struct host_def_status_block *def_sb,
4093 dma_addr_t mapping, int sb_id)
4095 int port = BP_PORT(bp);
4096 int func = BP_FUNC(bp);
4097 int index, val, reg_offset;
4098 u64 section;
4100 /* ATTN */
4101 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4102 atten_status_block);
4103 def_sb->atten_status_block.status_block_id = sb_id;
4105 bp->attn_state = 0;
4107 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4108 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4110 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4111 bp->attn_group[index].sig[0] = REG_RD(bp,
4112 reg_offset + 0x10*index);
4113 bp->attn_group[index].sig[1] = REG_RD(bp,
4114 reg_offset + 0x4 + 0x10*index);
4115 bp->attn_group[index].sig[2] = REG_RD(bp,
4116 reg_offset + 0x8 + 0x10*index);
4117 bp->attn_group[index].sig[3] = REG_RD(bp,
4118 reg_offset + 0xc + 0x10*index);
4121 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4122 HC_REG_ATTN_MSG0_ADDR_L);
4124 REG_WR(bp, reg_offset, U64_LO(section));
4125 REG_WR(bp, reg_offset + 4, U64_HI(section));
4127 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4129 val = REG_RD(bp, reg_offset);
4130 val |= sb_id;
4131 REG_WR(bp, reg_offset, val);
4133 /* USTORM */
4134 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4135 u_def_status_block);
4136 def_sb->u_def_status_block.status_block_id = sb_id;
4138 REG_WR(bp, BAR_USTRORM_INTMEM +
4139 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4140 REG_WR(bp, BAR_USTRORM_INTMEM +
4141 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4142 U64_HI(section));
4143 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4144 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4146 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4147 REG_WR16(bp, BAR_USTRORM_INTMEM +
4148 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4150 /* CSTORM */
4151 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4152 c_def_status_block);
4153 def_sb->c_def_status_block.status_block_id = sb_id;
4155 REG_WR(bp, BAR_CSTRORM_INTMEM +
4156 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4157 REG_WR(bp, BAR_CSTRORM_INTMEM +
4158 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4159 U64_HI(section));
4160 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4161 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4163 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4164 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4165 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4167 /* TSTORM */
4168 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4169 t_def_status_block);
4170 def_sb->t_def_status_block.status_block_id = sb_id;
4172 REG_WR(bp, BAR_TSTRORM_INTMEM +
4173 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4174 REG_WR(bp, BAR_TSTRORM_INTMEM +
4175 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4176 U64_HI(section));
4177 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4178 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4180 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4181 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4182 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4184 /* XSTORM */
4185 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4186 x_def_status_block);
4187 def_sb->x_def_status_block.status_block_id = sb_id;
4189 REG_WR(bp, BAR_XSTRORM_INTMEM +
4190 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4191 REG_WR(bp, BAR_XSTRORM_INTMEM +
4192 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4193 U64_HI(section));
4194 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4195 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4197 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4198 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4199 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4201 bp->stats_pending = 0;
4202 bp->set_mac_pending = 0;
4204 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4207 static void bnx2x_update_coalesce(struct bnx2x *bp)
4209 int port = BP_PORT(bp);
4210 int i;
4212 for_each_queue(bp, i) {
4213 int sb_id = bp->fp[i].sb_id;
4215 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4216 REG_WR8(bp, BAR_USTRORM_INTMEM +
4217 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4218 U_SB_ETH_RX_CQ_INDEX),
4219 bp->rx_ticks/12);
4220 REG_WR16(bp, BAR_USTRORM_INTMEM +
4221 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4222 U_SB_ETH_RX_CQ_INDEX),
4223 bp->rx_ticks ? 0 : 1);
4224 REG_WR16(bp, BAR_USTRORM_INTMEM +
4225 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4226 U_SB_ETH_RX_BD_INDEX),
4227 bp->rx_ticks ? 0 : 1);
4229 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4230 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4231 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4232 C_SB_ETH_TX_CQ_INDEX),
4233 bp->tx_ticks/12);
4234 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4235 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4236 C_SB_ETH_TX_CQ_INDEX),
4237 bp->tx_ticks ? 0 : 1);
4241 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4242 struct bnx2x_fastpath *fp, int last)
4244 int i;
4246 for (i = 0; i < last; i++) {
4247 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4248 struct sk_buff *skb = rx_buf->skb;
4250 if (skb == NULL) {
4251 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4252 continue;
4255 if (fp->tpa_state[i] == BNX2X_TPA_START)
4256 pci_unmap_single(bp->pdev,
4257 pci_unmap_addr(rx_buf, mapping),
4258 bp->rx_buf_size,
4259 PCI_DMA_FROMDEVICE);
4261 dev_kfree_skb(skb);
4262 rx_buf->skb = NULL;
4266 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4268 int func = BP_FUNC(bp);
4269 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4270 ETH_MAX_AGGREGATION_QUEUES_E1H;
4271 u16 ring_prod, cqe_ring_prod;
4272 int i, j;
4274 bp->rx_buf_size = bp->dev->mtu;
4275 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4276 BCM_RX_ETH_PAYLOAD_ALIGN;
4278 if (bp->flags & TPA_ENABLE_FLAG) {
4279 DP(NETIF_MSG_IFUP,
4280 "rx_buf_size %d effective_mtu %d\n",
4281 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4283 for_each_queue(bp, j) {
4284 struct bnx2x_fastpath *fp = &bp->fp[j];
4286 for (i = 0; i < max_agg_queues; i++) {
4287 fp->tpa_pool[i].skb =
4288 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4289 if (!fp->tpa_pool[i].skb) {
4290 BNX2X_ERR("Failed to allocate TPA "
4291 "skb pool for queue[%d] - "
4292 "disabling TPA on this "
4293 "queue!\n", j);
4294 bnx2x_free_tpa_pool(bp, fp, i);
4295 fp->disable_tpa = 1;
4296 break;
4298 pci_unmap_addr_set((struct sw_rx_bd *)
4299 &bp->fp->tpa_pool[i],
4300 mapping, 0);
4301 fp->tpa_state[i] = BNX2X_TPA_STOP;
4306 for_each_queue(bp, j) {
4307 struct bnx2x_fastpath *fp = &bp->fp[j];
4309 fp->rx_bd_cons = 0;
4310 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4311 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4313 /* "next page" elements initialization */
4314 /* SGE ring */
4315 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4316 struct eth_rx_sge *sge;
4318 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4319 sge->addr_hi =
4320 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4321 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4322 sge->addr_lo =
4323 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4324 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4327 bnx2x_init_sge_ring_bit_mask(fp);
4329 /* RX BD ring */
4330 for (i = 1; i <= NUM_RX_RINGS; i++) {
4331 struct eth_rx_bd *rx_bd;
4333 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4334 rx_bd->addr_hi =
4335 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4336 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4337 rx_bd->addr_lo =
4338 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4339 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4342 /* CQ ring */
4343 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4344 struct eth_rx_cqe_next_page *nextpg;
4346 nextpg = (struct eth_rx_cqe_next_page *)
4347 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4348 nextpg->addr_hi =
4349 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4350 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4351 nextpg->addr_lo =
4352 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4353 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4356 /* Allocate SGEs and initialize the ring elements */
4357 for (i = 0, ring_prod = 0;
4358 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4360 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4361 BNX2X_ERR("was only able to allocate "
4362 "%d rx sges\n", i);
4363 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4364 /* Cleanup already allocated elements */
4365 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4366 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4367 fp->disable_tpa = 1;
4368 ring_prod = 0;
4369 break;
4371 ring_prod = NEXT_SGE_IDX(ring_prod);
4373 fp->rx_sge_prod = ring_prod;
4375 /* Allocate BDs and initialize BD ring */
4376 fp->rx_comp_cons = 0;
4377 cqe_ring_prod = ring_prod = 0;
4378 for (i = 0; i < bp->rx_ring_size; i++) {
4379 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4380 BNX2X_ERR("was only able to allocate "
4381 "%d rx skbs\n", i);
4382 bp->eth_stats.rx_skb_alloc_failed++;
4383 break;
4385 ring_prod = NEXT_RX_IDX(ring_prod);
4386 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4387 WARN_ON(ring_prod <= i);
4390 fp->rx_bd_prod = ring_prod;
4391 /* must not have more available CQEs than BDs */
4392 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4393 cqe_ring_prod);
4394 fp->rx_pkt = fp->rx_calls = 0;
4396 /* Warning!
4397 * this will generate an interrupt (to the TSTORM)
4398 * must only be done after chip is initialized
4400 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4401 fp->rx_sge_prod);
4402 if (j != 0)
4403 continue;
4405 REG_WR(bp, BAR_USTRORM_INTMEM +
4406 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4407 U64_LO(fp->rx_comp_mapping));
4408 REG_WR(bp, BAR_USTRORM_INTMEM +
4409 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4410 U64_HI(fp->rx_comp_mapping));
4414 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4416 int i, j;
4418 for_each_queue(bp, j) {
4419 struct bnx2x_fastpath *fp = &bp->fp[j];
4421 for (i = 1; i <= NUM_TX_RINGS; i++) {
4422 struct eth_tx_bd *tx_bd =
4423 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4425 tx_bd->addr_hi =
4426 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4427 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4428 tx_bd->addr_lo =
4429 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4430 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4433 fp->tx_pkt_prod = 0;
4434 fp->tx_pkt_cons = 0;
4435 fp->tx_bd_prod = 0;
4436 fp->tx_bd_cons = 0;
4437 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4438 fp->tx_pkt = 0;
4442 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4444 int func = BP_FUNC(bp);
4446 spin_lock_init(&bp->spq_lock);
4448 bp->spq_left = MAX_SPQ_PENDING;
4449 bp->spq_prod_idx = 0;
4450 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4451 bp->spq_prod_bd = bp->spq;
4452 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4454 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4455 U64_LO(bp->spq_mapping));
4456 REG_WR(bp,
4457 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4458 U64_HI(bp->spq_mapping));
4460 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4461 bp->spq_prod_idx);
4464 static void bnx2x_init_context(struct bnx2x *bp)
4466 int i;
4468 for_each_queue(bp, i) {
4469 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4470 struct bnx2x_fastpath *fp = &bp->fp[i];
4471 u8 sb_id = FP_SB_ID(fp);
4473 context->xstorm_st_context.tx_bd_page_base_hi =
4474 U64_HI(fp->tx_desc_mapping);
4475 context->xstorm_st_context.tx_bd_page_base_lo =
4476 U64_LO(fp->tx_desc_mapping);
4477 context->xstorm_st_context.db_data_addr_hi =
4478 U64_HI(fp->tx_prods_mapping);
4479 context->xstorm_st_context.db_data_addr_lo =
4480 U64_LO(fp->tx_prods_mapping);
4481 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4482 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4484 context->ustorm_st_context.common.sb_index_numbers =
4485 BNX2X_RX_SB_INDEX_NUM;
4486 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4487 context->ustorm_st_context.common.status_block_id = sb_id;
4488 context->ustorm_st_context.common.flags =
4489 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4490 context->ustorm_st_context.common.mc_alignment_size =
4491 BCM_RX_ETH_PAYLOAD_ALIGN;
4492 context->ustorm_st_context.common.bd_buff_size =
4493 bp->rx_buf_size;
4494 context->ustorm_st_context.common.bd_page_base_hi =
4495 U64_HI(fp->rx_desc_mapping);
4496 context->ustorm_st_context.common.bd_page_base_lo =
4497 U64_LO(fp->rx_desc_mapping);
4498 if (!fp->disable_tpa) {
4499 context->ustorm_st_context.common.flags |=
4500 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4501 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4502 context->ustorm_st_context.common.sge_buff_size =
4503 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4504 context->ustorm_st_context.common.sge_page_base_hi =
4505 U64_HI(fp->rx_sge_mapping);
4506 context->ustorm_st_context.common.sge_page_base_lo =
4507 U64_LO(fp->rx_sge_mapping);
4510 context->cstorm_st_context.sb_index_number =
4511 C_SB_ETH_TX_CQ_INDEX;
4512 context->cstorm_st_context.status_block_id = sb_id;
4514 context->xstorm_ag_context.cdu_reserved =
4515 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4516 CDU_REGION_NUMBER_XCM_AG,
4517 ETH_CONNECTION_TYPE);
4518 context->ustorm_ag_context.cdu_usage =
4519 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4520 CDU_REGION_NUMBER_UCM_AG,
4521 ETH_CONNECTION_TYPE);
4525 static void bnx2x_init_ind_table(struct bnx2x *bp)
4527 int func = BP_FUNC(bp);
4528 int i;
4530 if (!is_multi(bp))
4531 return;
4533 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4534 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4535 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4536 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4537 BP_CL_ID(bp) + (i % bp->num_queues));
4540 static void bnx2x_set_client_config(struct bnx2x *bp)
4542 struct tstorm_eth_client_config tstorm_client = {0};
4543 int port = BP_PORT(bp);
4544 int i;
4546 tstorm_client.mtu = bp->dev->mtu;
4547 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4548 tstorm_client.config_flags =
4549 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4550 #ifdef BCM_VLAN
4551 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4552 tstorm_client.config_flags |=
4553 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4554 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4556 #endif
4558 if (bp->flags & TPA_ENABLE_FLAG) {
4559 tstorm_client.max_sges_for_packet =
4560 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4561 tstorm_client.max_sges_for_packet =
4562 ((tstorm_client.max_sges_for_packet +
4563 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4564 PAGES_PER_SGE_SHIFT;
4566 tstorm_client.config_flags |=
4567 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4570 for_each_queue(bp, i) {
4571 REG_WR(bp, BAR_TSTRORM_INTMEM +
4572 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4573 ((u32 *)&tstorm_client)[0]);
4574 REG_WR(bp, BAR_TSTRORM_INTMEM +
4575 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4576 ((u32 *)&tstorm_client)[1]);
4579 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4580 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4583 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4585 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4586 int mode = bp->rx_mode;
4587 int mask = (1 << BP_L_ID(bp));
4588 int func = BP_FUNC(bp);
4589 int i;
4591 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4593 switch (mode) {
4594 case BNX2X_RX_MODE_NONE: /* no Rx */
4595 tstorm_mac_filter.ucast_drop_all = mask;
4596 tstorm_mac_filter.mcast_drop_all = mask;
4597 tstorm_mac_filter.bcast_drop_all = mask;
4598 break;
4599 case BNX2X_RX_MODE_NORMAL:
4600 tstorm_mac_filter.bcast_accept_all = mask;
4601 break;
4602 case BNX2X_RX_MODE_ALLMULTI:
4603 tstorm_mac_filter.mcast_accept_all = mask;
4604 tstorm_mac_filter.bcast_accept_all = mask;
4605 break;
4606 case BNX2X_RX_MODE_PROMISC:
4607 tstorm_mac_filter.ucast_accept_all = mask;
4608 tstorm_mac_filter.mcast_accept_all = mask;
4609 tstorm_mac_filter.bcast_accept_all = mask;
4610 break;
4611 default:
4612 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4613 break;
4616 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4617 REG_WR(bp, BAR_TSTRORM_INTMEM +
4618 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4619 ((u32 *)&tstorm_mac_filter)[i]);
4621 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4622 ((u32 *)&tstorm_mac_filter)[i]); */
4625 if (mode != BNX2X_RX_MODE_NONE)
4626 bnx2x_set_client_config(bp);
4629 static void bnx2x_init_internal_common(struct bnx2x *bp)
4631 int i;
4633 if (bp->flags & TPA_ENABLE_FLAG) {
4634 struct tstorm_eth_tpa_exist tpa = {0};
4636 tpa.tpa_exist = 1;
4638 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4639 ((u32 *)&tpa)[0]);
4640 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4641 ((u32 *)&tpa)[1]);
4644 /* Zero this manually as its initialization is
4645 currently missing in the initTool */
4646 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4647 REG_WR(bp, BAR_USTRORM_INTMEM +
4648 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4651 static void bnx2x_init_internal_port(struct bnx2x *bp)
4653 int port = BP_PORT(bp);
4655 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4656 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4657 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4658 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4661 static void bnx2x_init_internal_func(struct bnx2x *bp)
4663 struct tstorm_eth_function_common_config tstorm_config = {0};
4664 struct stats_indication_flags stats_flags = {0};
4665 int port = BP_PORT(bp);
4666 int func = BP_FUNC(bp);
4667 int i;
4668 u16 max_agg_size;
4670 if (is_multi(bp)) {
4671 tstorm_config.config_flags = MULTI_FLAGS;
4672 tstorm_config.rss_result_mask = MULTI_MASK;
4675 tstorm_config.leading_client_id = BP_L_ID(bp);
4677 REG_WR(bp, BAR_TSTRORM_INTMEM +
4678 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4679 (*(u32 *)&tstorm_config));
4681 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4682 bnx2x_set_storm_rx_mode(bp);
4684 /* reset xstorm per client statistics */
4685 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4686 REG_WR(bp, BAR_XSTRORM_INTMEM +
4687 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4688 i*4, 0);
4690 /* reset tstorm per client statistics */
4691 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4692 REG_WR(bp, BAR_TSTRORM_INTMEM +
4693 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4694 i*4, 0);
4697 /* Init statistics related context */
4698 stats_flags.collect_eth = 1;
4700 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4701 ((u32 *)&stats_flags)[0]);
4702 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4703 ((u32 *)&stats_flags)[1]);
4705 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4706 ((u32 *)&stats_flags)[0]);
4707 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4708 ((u32 *)&stats_flags)[1]);
4710 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4711 ((u32 *)&stats_flags)[0]);
4712 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4713 ((u32 *)&stats_flags)[1]);
4715 REG_WR(bp, BAR_XSTRORM_INTMEM +
4716 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4717 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4718 REG_WR(bp, BAR_XSTRORM_INTMEM +
4719 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4720 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4722 REG_WR(bp, BAR_TSTRORM_INTMEM +
4723 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4724 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4725 REG_WR(bp, BAR_TSTRORM_INTMEM +
4726 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4727 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4729 if (CHIP_IS_E1H(bp)) {
4730 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4731 IS_E1HMF(bp));
4732 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4733 IS_E1HMF(bp));
4734 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4735 IS_E1HMF(bp));
4736 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4737 IS_E1HMF(bp));
4739 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4740 bp->e1hov);
4743 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4744 max_agg_size =
4745 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4746 SGE_PAGE_SIZE * PAGES_PER_SGE),
4747 (u32)0xffff);
4748 for_each_queue(bp, i) {
4749 struct bnx2x_fastpath *fp = &bp->fp[i];
4751 REG_WR(bp, BAR_USTRORM_INTMEM +
4752 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4753 U64_LO(fp->rx_comp_mapping));
4754 REG_WR(bp, BAR_USTRORM_INTMEM +
4755 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4756 U64_HI(fp->rx_comp_mapping));
4758 REG_WR16(bp, BAR_USTRORM_INTMEM +
4759 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4760 max_agg_size);
4764 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4766 switch (load_code) {
4767 case FW_MSG_CODE_DRV_LOAD_COMMON:
4768 bnx2x_init_internal_common(bp);
4769 /* no break */
4771 case FW_MSG_CODE_DRV_LOAD_PORT:
4772 bnx2x_init_internal_port(bp);
4773 /* no break */
4775 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4776 bnx2x_init_internal_func(bp);
4777 break;
4779 default:
4780 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4781 break;
4785 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4787 int i;
4789 for_each_queue(bp, i) {
4790 struct bnx2x_fastpath *fp = &bp->fp[i];
4792 fp->bp = bp;
4793 fp->state = BNX2X_FP_STATE_CLOSED;
4794 fp->index = i;
4795 fp->cl_id = BP_L_ID(bp) + i;
4796 fp->sb_id = fp->cl_id;
4797 DP(NETIF_MSG_IFUP,
4798 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4799 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4800 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4801 FP_SB_ID(fp));
4802 bnx2x_update_fpsb_idx(fp);
4805 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4806 DEF_SB_ID);
4807 bnx2x_update_dsb_idx(bp);
4808 bnx2x_update_coalesce(bp);
4809 bnx2x_init_rx_rings(bp);
4810 bnx2x_init_tx_ring(bp);
4811 bnx2x_init_sp_ring(bp);
4812 bnx2x_init_context(bp);
4813 bnx2x_init_internal(bp, load_code);
4814 bnx2x_init_ind_table(bp);
4815 bnx2x_int_enable(bp);
4818 /* end of nic init */
4821 * gzip service functions
4824 static int bnx2x_gunzip_init(struct bnx2x *bp)
4826 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4827 &bp->gunzip_mapping);
4828 if (bp->gunzip_buf == NULL)
4829 goto gunzip_nomem1;
4831 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4832 if (bp->strm == NULL)
4833 goto gunzip_nomem2;
4835 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4836 GFP_KERNEL);
4837 if (bp->strm->workspace == NULL)
4838 goto gunzip_nomem3;
4840 return 0;
4842 gunzip_nomem3:
4843 kfree(bp->strm);
4844 bp->strm = NULL;
4846 gunzip_nomem2:
4847 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4848 bp->gunzip_mapping);
4849 bp->gunzip_buf = NULL;
4851 gunzip_nomem1:
4852 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4853 " un-compression\n", bp->dev->name);
4854 return -ENOMEM;
4857 static void bnx2x_gunzip_end(struct bnx2x *bp)
4859 kfree(bp->strm->workspace);
4861 kfree(bp->strm);
4862 bp->strm = NULL;
4864 if (bp->gunzip_buf) {
4865 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4866 bp->gunzip_mapping);
4867 bp->gunzip_buf = NULL;
4871 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4873 int n, rc;
4875 /* check gzip header */
4876 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4877 return -EINVAL;
4879 n = 10;
4881 #define FNAME 0x8
4883 if (zbuf[3] & FNAME)
4884 while ((zbuf[n++] != 0) && (n < len));
4886 bp->strm->next_in = zbuf + n;
4887 bp->strm->avail_in = len - n;
4888 bp->strm->next_out = bp->gunzip_buf;
4889 bp->strm->avail_out = FW_BUF_SIZE;
4891 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4892 if (rc != Z_OK)
4893 return rc;
4895 rc = zlib_inflate(bp->strm, Z_FINISH);
4896 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4897 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4898 bp->dev->name, bp->strm->msg);
4900 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4901 if (bp->gunzip_outlen & 0x3)
4902 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4903 " gunzip_outlen (%d) not aligned\n",
4904 bp->dev->name, bp->gunzip_outlen);
4905 bp->gunzip_outlen >>= 2;
4907 zlib_inflateEnd(bp->strm);
4909 if (rc == Z_STREAM_END)
4910 return 0;
4912 return rc;
4915 /* nic load/unload */
4918 * General service functions
4921 /* send a NIG loopback debug packet */
4922 static void bnx2x_lb_pckt(struct bnx2x *bp)
4924 u32 wb_write[3];
4926 /* Ethernet source and destination addresses */
4927 wb_write[0] = 0x55555555;
4928 wb_write[1] = 0x55555555;
4929 wb_write[2] = 0x20; /* SOP */
4930 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4932 /* NON-IP protocol */
4933 wb_write[0] = 0x09000000;
4934 wb_write[1] = 0x55555555;
4935 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4936 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4939 /* some of the internal memories
4940 * are not directly readable from the driver
4941 * to test them we send debug packets
4943 static int bnx2x_int_mem_test(struct bnx2x *bp)
4945 int factor;
4946 int count, i;
4947 u32 val = 0;
4949 if (CHIP_REV_IS_FPGA(bp))
4950 factor = 120;
4951 else if (CHIP_REV_IS_EMUL(bp))
4952 factor = 200;
4953 else
4954 factor = 1;
4956 DP(NETIF_MSG_HW, "start part1\n");
4958 /* Disable inputs of parser neighbor blocks */
4959 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4960 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4961 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4962 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4964 /* Write 0 to parser credits for CFC search request */
4965 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4967 /* send Ethernet packet */
4968 bnx2x_lb_pckt(bp);
4970 /* TODO do i reset NIG statistic? */
4971 /* Wait until NIG register shows 1 packet of size 0x10 */
4972 count = 1000 * factor;
4973 while (count) {
4975 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4976 val = *bnx2x_sp(bp, wb_data[0]);
4977 if (val == 0x10)
4978 break;
4980 msleep(10);
4981 count--;
4983 if (val != 0x10) {
4984 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4985 return -1;
4988 /* Wait until PRS register shows 1 packet */
4989 count = 1000 * factor;
4990 while (count) {
4991 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4992 if (val == 1)
4993 break;
4995 msleep(10);
4996 count--;
4998 if (val != 0x1) {
4999 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5000 return -2;
5003 /* Reset and init BRB, PRS */
5004 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5005 msleep(50);
5006 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5007 msleep(50);
5008 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5009 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5011 DP(NETIF_MSG_HW, "part2\n");
5013 /* Disable inputs of parser neighbor blocks */
5014 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5015 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5016 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5017 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5019 /* Write 0 to parser credits for CFC search request */
5020 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5022 /* send 10 Ethernet packets */
5023 for (i = 0; i < 10; i++)
5024 bnx2x_lb_pckt(bp);
5026 /* Wait until NIG register shows 10 + 1
5027 packets of size 11*0x10 = 0xb0 */
5028 count = 1000 * factor;
5029 while (count) {
5031 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5032 val = *bnx2x_sp(bp, wb_data[0]);
5033 if (val == 0xb0)
5034 break;
5036 msleep(10);
5037 count--;
5039 if (val != 0xb0) {
5040 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5041 return -3;
5044 /* Wait until PRS register shows 2 packets */
5045 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5046 if (val != 2)
5047 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5049 /* Write 1 to parser credits for CFC search request */
5050 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5052 /* Wait until PRS register shows 3 packets */
5053 msleep(10 * factor);
5054 /* Wait until NIG register shows 1 packet of size 0x10 */
5055 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5056 if (val != 3)
5057 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5059 /* clear NIG EOP FIFO */
5060 for (i = 0; i < 11; i++)
5061 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5062 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5063 if (val != 1) {
5064 BNX2X_ERR("clear of NIG failed\n");
5065 return -4;
5068 /* Reset and init BRB, PRS, NIG */
5069 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5070 msleep(50);
5071 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5072 msleep(50);
5073 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5074 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5075 #ifndef BCM_ISCSI
5076 /* set NIC mode */
5077 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5078 #endif
5080 /* Enable inputs of parser neighbor blocks */
5081 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5082 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5083 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5084 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5086 DP(NETIF_MSG_HW, "done\n");
5088 return 0; /* OK */
5091 static void enable_blocks_attention(struct bnx2x *bp)
5093 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5094 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5095 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5096 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5097 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5098 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5099 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5100 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5101 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5102 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5103 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5104 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5105 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5106 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5107 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5108 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5109 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5110 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5111 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5112 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5113 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5114 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5115 if (CHIP_REV_IS_FPGA(bp))
5116 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5117 else
5118 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5119 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5120 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5121 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5122 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5123 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5124 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5125 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5126 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5127 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5131 static int bnx2x_init_common(struct bnx2x *bp)
5133 u32 val, i;
5135 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5137 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5138 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5140 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5141 if (CHIP_IS_E1H(bp))
5142 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5144 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5145 msleep(30);
5146 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5148 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5149 if (CHIP_IS_E1(bp)) {
5150 /* enable HW interrupt from PXP on USDM overflow
5151 bit 16 on INT_MASK_0 */
5152 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5155 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5156 bnx2x_init_pxp(bp);
5158 #ifdef __BIG_ENDIAN
5159 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5160 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5161 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5162 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5163 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5165 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5166 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5167 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5168 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5169 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5170 #endif
5172 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5173 #ifdef BCM_ISCSI
5174 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5175 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5176 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5177 #endif
5179 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5180 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5182 /* let the HW do it's magic ... */
5183 msleep(100);
5184 /* finish PXP init */
5185 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5186 if (val != 1) {
5187 BNX2X_ERR("PXP2 CFG failed\n");
5188 return -EBUSY;
5190 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5191 if (val != 1) {
5192 BNX2X_ERR("PXP2 RD_INIT failed\n");
5193 return -EBUSY;
5196 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5197 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5199 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5201 /* clean the DMAE memory */
5202 bp->dmae_ready = 1;
5203 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5205 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5206 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5207 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5208 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5210 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5211 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5212 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5213 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5215 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5216 /* soft reset pulse */
5217 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5218 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5220 #ifdef BCM_ISCSI
5221 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5222 #endif
5224 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5225 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5226 if (!CHIP_REV_IS_SLOW(bp)) {
5227 /* enable hw interrupt from doorbell Q */
5228 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5231 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5232 if (CHIP_REV_IS_SLOW(bp)) {
5233 /* fix for emulation and FPGA for no pause */
5234 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5235 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5236 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5237 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5240 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5241 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5242 /* set NIC mode */
5243 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5244 if (CHIP_IS_E1H(bp))
5245 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5247 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5248 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5249 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5250 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5252 if (CHIP_IS_E1H(bp)) {
5253 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5254 STORM_INTMEM_SIZE_E1H/2);
5255 bnx2x_init_fill(bp,
5256 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5257 0, STORM_INTMEM_SIZE_E1H/2);
5258 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5259 STORM_INTMEM_SIZE_E1H/2);
5260 bnx2x_init_fill(bp,
5261 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5262 0, STORM_INTMEM_SIZE_E1H/2);
5263 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5264 STORM_INTMEM_SIZE_E1H/2);
5265 bnx2x_init_fill(bp,
5266 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5267 0, STORM_INTMEM_SIZE_E1H/2);
5268 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5269 STORM_INTMEM_SIZE_E1H/2);
5270 bnx2x_init_fill(bp,
5271 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5272 0, STORM_INTMEM_SIZE_E1H/2);
5273 } else { /* E1 */
5274 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5275 STORM_INTMEM_SIZE_E1);
5276 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5277 STORM_INTMEM_SIZE_E1);
5278 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5279 STORM_INTMEM_SIZE_E1);
5280 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5281 STORM_INTMEM_SIZE_E1);
5284 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5285 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5286 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5287 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5289 /* sync semi rtc */
5290 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5291 0x80000000);
5292 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5293 0x80000000);
5295 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5296 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5297 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5299 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5300 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5301 REG_WR(bp, i, 0xc0cac01a);
5302 /* TODO: replace with something meaningful */
5304 if (CHIP_IS_E1H(bp))
5305 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5306 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5308 if (sizeof(union cdu_context) != 1024)
5309 /* we currently assume that a context is 1024 bytes */
5310 printk(KERN_ALERT PFX "please adjust the size of"
5311 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5313 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5314 val = (4 << 24) + (0 << 12) + 1024;
5315 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5316 if (CHIP_IS_E1(bp)) {
5317 /* !!! fix pxp client crdit until excel update */
5318 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5319 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5322 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5323 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5325 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5326 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5328 /* PXPCS COMMON comes here */
5329 /* Reset PCIE errors for debug */
5330 REG_WR(bp, 0x2814, 0xffffffff);
5331 REG_WR(bp, 0x3820, 0xffffffff);
5333 /* EMAC0 COMMON comes here */
5334 /* EMAC1 COMMON comes here */
5335 /* DBU COMMON comes here */
5336 /* DBG COMMON comes here */
5338 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5339 if (CHIP_IS_E1H(bp)) {
5340 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5341 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5344 if (CHIP_REV_IS_SLOW(bp))
5345 msleep(200);
5347 /* finish CFC init */
5348 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5349 if (val != 1) {
5350 BNX2X_ERR("CFC LL_INIT failed\n");
5351 return -EBUSY;
5353 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5354 if (val != 1) {
5355 BNX2X_ERR("CFC AC_INIT failed\n");
5356 return -EBUSY;
5358 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5359 if (val != 1) {
5360 BNX2X_ERR("CFC CAM_INIT failed\n");
5361 return -EBUSY;
5363 REG_WR(bp, CFC_REG_DEBUG0, 0);
5365 /* read NIG statistic
5366 to see if this is our first up since powerup */
5367 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5368 val = *bnx2x_sp(bp, wb_data[0]);
5370 /* do internal memory self test */
5371 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5372 BNX2X_ERR("internal mem self test failed\n");
5373 return -EBUSY;
5376 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5377 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5378 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5379 /* Fan failure is indicated by SPIO 5 */
5380 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5381 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5383 /* set to active low mode */
5384 val = REG_RD(bp, MISC_REG_SPIO_INT);
5385 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5386 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5387 REG_WR(bp, MISC_REG_SPIO_INT, val);
5389 /* enable interrupt to signal the IGU */
5390 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5391 val |= (1 << MISC_REGISTERS_SPIO_5);
5392 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5393 break;
5395 default:
5396 break;
5399 /* clear PXP2 attentions */
5400 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5402 enable_blocks_attention(bp);
5404 if (!BP_NOMCP(bp)) {
5405 bnx2x_acquire_phy_lock(bp);
5406 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5407 bnx2x_release_phy_lock(bp);
5408 } else
5409 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5411 return 0;
5414 static int bnx2x_init_port(struct bnx2x *bp)
5416 int port = BP_PORT(bp);
5417 u32 val;
5419 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5421 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5423 /* Port PXP comes here */
5424 /* Port PXP2 comes here */
5425 #ifdef BCM_ISCSI
5426 /* Port0 1
5427 * Port1 385 */
5428 i++;
5429 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5430 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5431 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5432 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5434 /* Port0 2
5435 * Port1 386 */
5436 i++;
5437 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5438 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5439 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5440 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5442 /* Port0 3
5443 * Port1 387 */
5444 i++;
5445 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5446 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5447 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5448 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5449 #endif
5450 /* Port CMs come here */
5452 /* Port QM comes here */
5453 #ifdef BCM_ISCSI
5454 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5455 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5457 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5458 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5459 #endif
5460 /* Port DQ comes here */
5461 /* Port BRB1 comes here */
5462 /* Port PRS comes here */
5463 /* Port TSDM comes here */
5464 /* Port CSDM comes here */
5465 /* Port USDM comes here */
5466 /* Port XSDM comes here */
5467 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5468 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5469 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5470 port ? USEM_PORT1_END : USEM_PORT0_END);
5471 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5472 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5473 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5474 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5475 /* Port UPB comes here */
5476 /* Port XPB comes here */
5478 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5479 port ? PBF_PORT1_END : PBF_PORT0_END);
5481 /* configure PBF to work without PAUSE mtu 9000 */
5482 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5484 /* update threshold */
5485 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5486 /* update init credit */
5487 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5489 /* probe changes */
5490 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5491 msleep(5);
5492 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5494 #ifdef BCM_ISCSI
5495 /* tell the searcher where the T2 table is */
5496 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5498 wb_write[0] = U64_LO(bp->t2_mapping);
5499 wb_write[1] = U64_HI(bp->t2_mapping);
5500 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5501 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5502 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5503 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5505 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5506 /* Port SRCH comes here */
5507 #endif
5508 /* Port CDU comes here */
5509 /* Port CFC comes here */
5511 if (CHIP_IS_E1(bp)) {
5512 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5513 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5515 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5516 port ? HC_PORT1_END : HC_PORT0_END);
5518 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5519 MISC_AEU_PORT0_START,
5520 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5521 /* init aeu_mask_attn_func_0/1:
5522 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5523 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5524 * bits 4-7 are used for "per vn group attention" */
5525 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5526 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5528 /* Port PXPCS comes here */
5529 /* Port EMAC0 comes here */
5530 /* Port EMAC1 comes here */
5531 /* Port DBU comes here */
5532 /* Port DBG comes here */
5533 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5534 port ? NIG_PORT1_END : NIG_PORT0_END);
5536 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5538 if (CHIP_IS_E1H(bp)) {
5539 u32 wsum;
5540 struct cmng_struct_per_port m_cmng_port;
5541 int vn;
5543 /* 0x2 disable e1hov, 0x1 enable */
5544 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5545 (IS_E1HMF(bp) ? 0x1 : 0x2));
5547 /* Init RATE SHAPING and FAIRNESS contexts.
5548 Initialize as if there is 10G link. */
5549 wsum = bnx2x_calc_vn_wsum(bp);
5550 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5551 if (IS_E1HMF(bp))
5552 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5553 bnx2x_init_vn_minmax(bp, 2*vn + port,
5554 wsum, 10000, &m_cmng_port);
5557 /* Port MCP comes here */
5558 /* Port DMAE comes here */
5560 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5561 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5562 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5563 /* add SPIO 5 to group 0 */
5564 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5565 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5566 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5567 break;
5569 default:
5570 break;
5573 bnx2x__link_reset(bp);
5575 return 0;
5578 #define ILT_PER_FUNC (768/2)
5579 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5580 /* the phys address is shifted right 12 bits and has an added
5581 1=valid bit added to the 53rd bit
5582 then since this is a wide register(TM)
5583 we split it into two 32 bit writes
5585 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5586 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5587 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5588 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5590 #define CNIC_ILT_LINES 0
5592 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5594 int reg;
5596 if (CHIP_IS_E1H(bp))
5597 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5598 else /* E1 */
5599 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5601 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5604 static int bnx2x_init_func(struct bnx2x *bp)
5606 int port = BP_PORT(bp);
5607 int func = BP_FUNC(bp);
5608 int i;
5610 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5612 i = FUNC_ILT_BASE(func);
5614 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5615 if (CHIP_IS_E1H(bp)) {
5616 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5617 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5618 } else /* E1 */
5619 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5620 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5623 if (CHIP_IS_E1H(bp)) {
5624 for (i = 0; i < 9; i++)
5625 bnx2x_init_block(bp,
5626 cm_start[func][i], cm_end[func][i]);
5628 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5629 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5632 /* HC init per function */
5633 if (CHIP_IS_E1H(bp)) {
5634 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5636 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5637 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5639 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5641 if (CHIP_IS_E1H(bp))
5642 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5644 /* Reset PCIE errors for debug */
5645 REG_WR(bp, 0x2114, 0xffffffff);
5646 REG_WR(bp, 0x2120, 0xffffffff);
5648 return 0;
5651 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5653 int i, rc = 0;
5655 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5656 BP_FUNC(bp), load_code);
5658 bp->dmae_ready = 0;
5659 mutex_init(&bp->dmae_mutex);
5660 bnx2x_gunzip_init(bp);
5662 switch (load_code) {
5663 case FW_MSG_CODE_DRV_LOAD_COMMON:
5664 rc = bnx2x_init_common(bp);
5665 if (rc)
5666 goto init_hw_err;
5667 /* no break */
5669 case FW_MSG_CODE_DRV_LOAD_PORT:
5670 bp->dmae_ready = 1;
5671 rc = bnx2x_init_port(bp);
5672 if (rc)
5673 goto init_hw_err;
5674 /* no break */
5676 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5677 bp->dmae_ready = 1;
5678 rc = bnx2x_init_func(bp);
5679 if (rc)
5680 goto init_hw_err;
5681 break;
5683 default:
5684 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5685 break;
5688 if (!BP_NOMCP(bp)) {
5689 int func = BP_FUNC(bp);
5691 bp->fw_drv_pulse_wr_seq =
5692 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5693 DRV_PULSE_SEQ_MASK);
5694 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5695 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5696 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5697 } else
5698 bp->func_stx = 0;
5700 /* this needs to be done before gunzip end */
5701 bnx2x_zero_def_sb(bp);
5702 for_each_queue(bp, i)
5703 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5705 init_hw_err:
5706 bnx2x_gunzip_end(bp);
5708 return rc;
5711 /* send the MCP a request, block until there is a reply */
5712 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5714 int func = BP_FUNC(bp);
5715 u32 seq = ++bp->fw_seq;
5716 u32 rc = 0;
5717 u32 cnt = 1;
5718 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5720 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5721 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5723 do {
5724 /* let the FW do it's magic ... */
5725 msleep(delay);
5727 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5729 /* Give the FW up to 2 second (200*10ms) */
5730 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5732 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5733 cnt*delay, rc, seq);
5735 /* is this a reply to our command? */
5736 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5737 rc &= FW_MSG_CODE_MASK;
5739 } else {
5740 /* FW BUG! */
5741 BNX2X_ERR("FW failed to respond!\n");
5742 bnx2x_fw_dump(bp);
5743 rc = 0;
5746 return rc;
5749 static void bnx2x_free_mem(struct bnx2x *bp)
5752 #define BNX2X_PCI_FREE(x, y, size) \
5753 do { \
5754 if (x) { \
5755 pci_free_consistent(bp->pdev, size, x, y); \
5756 x = NULL; \
5757 y = 0; \
5759 } while (0)
5761 #define BNX2X_FREE(x) \
5762 do { \
5763 if (x) { \
5764 vfree(x); \
5765 x = NULL; \
5767 } while (0)
5769 int i;
5771 /* fastpath */
5772 for_each_queue(bp, i) {
5774 /* Status blocks */
5775 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5776 bnx2x_fp(bp, i, status_blk_mapping),
5777 sizeof(struct host_status_block) +
5778 sizeof(struct eth_tx_db_data));
5780 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5781 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5782 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5783 bnx2x_fp(bp, i, tx_desc_mapping),
5784 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5786 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5787 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5788 bnx2x_fp(bp, i, rx_desc_mapping),
5789 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5791 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5792 bnx2x_fp(bp, i, rx_comp_mapping),
5793 sizeof(struct eth_fast_path_rx_cqe) *
5794 NUM_RCQ_BD);
5796 /* SGE ring */
5797 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5798 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5799 bnx2x_fp(bp, i, rx_sge_mapping),
5800 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5802 /* end of fastpath */
5804 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5805 sizeof(struct host_def_status_block));
5807 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5808 sizeof(struct bnx2x_slowpath));
5810 #ifdef BCM_ISCSI
5811 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5812 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5813 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5814 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5815 #endif
5816 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5818 #undef BNX2X_PCI_FREE
5819 #undef BNX2X_KFREE
5822 static int bnx2x_alloc_mem(struct bnx2x *bp)
5825 #define BNX2X_PCI_ALLOC(x, y, size) \
5826 do { \
5827 x = pci_alloc_consistent(bp->pdev, size, y); \
5828 if (x == NULL) \
5829 goto alloc_mem_err; \
5830 memset(x, 0, size); \
5831 } while (0)
5833 #define BNX2X_ALLOC(x, size) \
5834 do { \
5835 x = vmalloc(size); \
5836 if (x == NULL) \
5837 goto alloc_mem_err; \
5838 memset(x, 0, size); \
5839 } while (0)
5841 int i;
5843 /* fastpath */
5844 for_each_queue(bp, i) {
5845 bnx2x_fp(bp, i, bp) = bp;
5847 /* Status blocks */
5848 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5849 &bnx2x_fp(bp, i, status_blk_mapping),
5850 sizeof(struct host_status_block) +
5851 sizeof(struct eth_tx_db_data));
5853 bnx2x_fp(bp, i, hw_tx_prods) =
5854 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5856 bnx2x_fp(bp, i, tx_prods_mapping) =
5857 bnx2x_fp(bp, i, status_blk_mapping) +
5858 sizeof(struct host_status_block);
5860 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5861 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5862 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5863 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5864 &bnx2x_fp(bp, i, tx_desc_mapping),
5865 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5867 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5868 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5869 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5870 &bnx2x_fp(bp, i, rx_desc_mapping),
5871 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5873 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5874 &bnx2x_fp(bp, i, rx_comp_mapping),
5875 sizeof(struct eth_fast_path_rx_cqe) *
5876 NUM_RCQ_BD);
5878 /* SGE ring */
5879 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5880 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5881 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5882 &bnx2x_fp(bp, i, rx_sge_mapping),
5883 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5885 /* end of fastpath */
5887 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5888 sizeof(struct host_def_status_block));
5890 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5891 sizeof(struct bnx2x_slowpath));
5893 #ifdef BCM_ISCSI
5894 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5896 /* Initialize T1 */
5897 for (i = 0; i < 64*1024; i += 64) {
5898 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5899 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5902 /* allocate searcher T2 table
5903 we allocate 1/4 of alloc num for T2
5904 (which is not entered into the ILT) */
5905 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5907 /* Initialize T2 */
5908 for (i = 0; i < 16*1024; i += 64)
5909 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5911 /* now fixup the last line in the block to point to the next block */
5912 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5914 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5915 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5917 /* QM queues (128*MAX_CONN) */
5918 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5919 #endif
5921 /* Slow path ring */
5922 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5924 return 0;
5926 alloc_mem_err:
5927 bnx2x_free_mem(bp);
5928 return -ENOMEM;
5930 #undef BNX2X_PCI_ALLOC
5931 #undef BNX2X_ALLOC
5934 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5936 int i;
5938 for_each_queue(bp, i) {
5939 struct bnx2x_fastpath *fp = &bp->fp[i];
5941 u16 bd_cons = fp->tx_bd_cons;
5942 u16 sw_prod = fp->tx_pkt_prod;
5943 u16 sw_cons = fp->tx_pkt_cons;
5945 while (sw_cons != sw_prod) {
5946 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5947 sw_cons++;
5952 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5954 int i, j;
5956 for_each_queue(bp, j) {
5957 struct bnx2x_fastpath *fp = &bp->fp[j];
5959 for (i = 0; i < NUM_RX_BD; i++) {
5960 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5961 struct sk_buff *skb = rx_buf->skb;
5963 if (skb == NULL)
5964 continue;
5966 pci_unmap_single(bp->pdev,
5967 pci_unmap_addr(rx_buf, mapping),
5968 bp->rx_buf_size,
5969 PCI_DMA_FROMDEVICE);
5971 rx_buf->skb = NULL;
5972 dev_kfree_skb(skb);
5974 if (!fp->disable_tpa)
5975 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5976 ETH_MAX_AGGREGATION_QUEUES_E1 :
5977 ETH_MAX_AGGREGATION_QUEUES_E1H);
5981 static void bnx2x_free_skbs(struct bnx2x *bp)
5983 bnx2x_free_tx_skbs(bp);
5984 bnx2x_free_rx_skbs(bp);
5987 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5989 int i, offset = 1;
5991 free_irq(bp->msix_table[0].vector, bp->dev);
5992 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5993 bp->msix_table[0].vector);
5995 for_each_queue(bp, i) {
5996 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
5997 "state %x\n", i, bp->msix_table[i + offset].vector,
5998 bnx2x_fp(bp, i, state));
6000 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
6001 BNX2X_ERR("IRQ of fp #%d being freed while "
6002 "state != closed\n", i);
6004 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6008 static void bnx2x_free_irq(struct bnx2x *bp)
6010 if (bp->flags & USING_MSIX_FLAG) {
6011 bnx2x_free_msix_irqs(bp);
6012 pci_disable_msix(bp->pdev);
6013 bp->flags &= ~USING_MSIX_FLAG;
6015 } else
6016 free_irq(bp->pdev->irq, bp->dev);
6019 static int bnx2x_enable_msix(struct bnx2x *bp)
6021 int i, rc, offset;
6023 bp->msix_table[0].entry = 0;
6024 offset = 1;
6025 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6027 for_each_queue(bp, i) {
6028 int igu_vec = offset + i + BP_L_ID(bp);
6030 bp->msix_table[i + offset].entry = igu_vec;
6031 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6032 "(fastpath #%u)\n", i + offset, igu_vec, i);
6035 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6036 bp->num_queues + offset);
6037 if (rc) {
6038 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6039 return -1;
6041 bp->flags |= USING_MSIX_FLAG;
6043 return 0;
6046 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6048 int i, rc, offset = 1;
6050 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6051 bp->dev->name, bp->dev);
6052 if (rc) {
6053 BNX2X_ERR("request sp irq failed\n");
6054 return -EBUSY;
6057 for_each_queue(bp, i) {
6058 rc = request_irq(bp->msix_table[i + offset].vector,
6059 bnx2x_msix_fp_int, 0,
6060 bp->dev->name, &bp->fp[i]);
6061 if (rc) {
6062 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6063 i + offset, -rc);
6064 bnx2x_free_msix_irqs(bp);
6065 return -EBUSY;
6068 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6071 return 0;
6074 static int bnx2x_req_irq(struct bnx2x *bp)
6076 int rc;
6078 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6079 bp->dev->name, bp->dev);
6080 if (!rc)
6081 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6083 return rc;
6086 static void bnx2x_napi_enable(struct bnx2x *bp)
6088 int i;
6090 for_each_queue(bp, i)
6091 napi_enable(&bnx2x_fp(bp, i, napi));
6094 static void bnx2x_napi_disable(struct bnx2x *bp)
6096 int i;
6098 for_each_queue(bp, i)
6099 napi_disable(&bnx2x_fp(bp, i, napi));
6102 static void bnx2x_netif_start(struct bnx2x *bp)
6104 if (atomic_dec_and_test(&bp->intr_sem)) {
6105 if (netif_running(bp->dev)) {
6106 if (bp->state == BNX2X_STATE_OPEN)
6107 netif_wake_queue(bp->dev);
6108 bnx2x_napi_enable(bp);
6109 bnx2x_int_enable(bp);
6114 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6116 bnx2x_int_disable_sync(bp, disable_hw);
6117 if (netif_running(bp->dev)) {
6118 bnx2x_napi_disable(bp);
6119 netif_tx_disable(bp->dev);
6120 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6125 * Init service functions
6128 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6130 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6131 int port = BP_PORT(bp);
6133 /* CAM allocation
6134 * unicasts 0-31:port0 32-63:port1
6135 * multicast 64-127:port0 128-191:port1
6137 config->hdr.length_6b = 2;
6138 config->hdr.offset = port ? 31 : 0;
6139 config->hdr.client_id = BP_CL_ID(bp);
6140 config->hdr.reserved1 = 0;
6142 /* primary MAC */
6143 config->config_table[0].cam_entry.msb_mac_addr =
6144 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6145 config->config_table[0].cam_entry.middle_mac_addr =
6146 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6147 config->config_table[0].cam_entry.lsb_mac_addr =
6148 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6149 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6150 if (set)
6151 config->config_table[0].target_table_entry.flags = 0;
6152 else
6153 CAM_INVALIDATE(config->config_table[0]);
6154 config->config_table[0].target_table_entry.client_id = 0;
6155 config->config_table[0].target_table_entry.vlan_id = 0;
6157 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6158 (set ? "setting" : "clearing"),
6159 config->config_table[0].cam_entry.msb_mac_addr,
6160 config->config_table[0].cam_entry.middle_mac_addr,
6161 config->config_table[0].cam_entry.lsb_mac_addr);
6163 /* broadcast */
6164 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6165 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6166 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6167 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6168 if (set)
6169 config->config_table[1].target_table_entry.flags =
6170 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6171 else
6172 CAM_INVALIDATE(config->config_table[1]);
6173 config->config_table[1].target_table_entry.client_id = 0;
6174 config->config_table[1].target_table_entry.vlan_id = 0;
6176 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6177 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6178 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6181 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6183 struct mac_configuration_cmd_e1h *config =
6184 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6186 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6187 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6188 return;
6191 /* CAM allocation for E1H
6192 * unicasts: by func number
6193 * multicast: 20+FUNC*20, 20 each
6195 config->hdr.length_6b = 1;
6196 config->hdr.offset = BP_FUNC(bp);
6197 config->hdr.client_id = BP_CL_ID(bp);
6198 config->hdr.reserved1 = 0;
6200 /* primary MAC */
6201 config->config_table[0].msb_mac_addr =
6202 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6203 config->config_table[0].middle_mac_addr =
6204 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6205 config->config_table[0].lsb_mac_addr =
6206 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6207 config->config_table[0].client_id = BP_L_ID(bp);
6208 config->config_table[0].vlan_id = 0;
6209 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6210 if (set)
6211 config->config_table[0].flags = BP_PORT(bp);
6212 else
6213 config->config_table[0].flags =
6214 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6216 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6217 (set ? "setting" : "clearing"),
6218 config->config_table[0].msb_mac_addr,
6219 config->config_table[0].middle_mac_addr,
6220 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6222 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6223 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6224 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6227 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6228 int *state_p, int poll)
6230 /* can take a while if any port is running */
6231 int cnt = 500;
6233 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6234 poll ? "polling" : "waiting", state, idx);
6236 might_sleep();
6237 while (cnt--) {
6238 if (poll) {
6239 bnx2x_rx_int(bp->fp, 10);
6240 /* if index is different from 0
6241 * the reply for some commands will
6242 * be on the non default queue
6244 if (idx)
6245 bnx2x_rx_int(&bp->fp[idx], 10);
6248 mb(); /* state is changed by bnx2x_sp_event() */
6249 if (*state_p == state)
6250 return 0;
6252 msleep(1);
6255 /* timeout! */
6256 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6257 poll ? "polling" : "waiting", state, idx);
6258 #ifdef BNX2X_STOP_ON_ERROR
6259 bnx2x_panic();
6260 #endif
6262 return -EBUSY;
6265 static int bnx2x_setup_leading(struct bnx2x *bp)
6267 int rc;
6269 /* reset IGU state */
6270 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6272 /* SETUP ramrod */
6273 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6275 /* Wait for completion */
6276 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6278 return rc;
6281 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6283 /* reset IGU state */
6284 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6286 /* SETUP ramrod */
6287 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6288 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6290 /* Wait for completion */
6291 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6292 &(bp->fp[index].state), 0);
6295 static int bnx2x_poll(struct napi_struct *napi, int budget);
6296 static void bnx2x_set_rx_mode(struct net_device *dev);
6298 /* must be called with rtnl_lock */
6299 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6301 u32 load_code;
6302 int i, rc;
6303 #ifdef BNX2X_STOP_ON_ERROR
6304 if (unlikely(bp->panic))
6305 return -EPERM;
6306 #endif
6308 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6310 /* Send LOAD_REQUEST command to MCP
6311 Returns the type of LOAD command:
6312 if it is the first port to be initialized
6313 common blocks should be initialized, otherwise - not
6315 if (!BP_NOMCP(bp)) {
6316 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6317 if (!load_code) {
6318 BNX2X_ERR("MCP response failure, aborting\n");
6319 return -EBUSY;
6321 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6322 return -EBUSY; /* other port in diagnostic mode */
6324 } else {
6325 int port = BP_PORT(bp);
6327 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6328 load_count[0], load_count[1], load_count[2]);
6329 load_count[0]++;
6330 load_count[1 + port]++;
6331 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6332 load_count[0], load_count[1], load_count[2]);
6333 if (load_count[0] == 1)
6334 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6335 else if (load_count[1 + port] == 1)
6336 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6337 else
6338 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6341 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6342 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6343 bp->port.pmf = 1;
6344 else
6345 bp->port.pmf = 0;
6346 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6348 /* if we can't use MSI-X we only need one fp,
6349 * so try to enable MSI-X with the requested number of fp's
6350 * and fallback to inta with one fp
6352 if (use_inta) {
6353 bp->num_queues = 1;
6355 } else {
6356 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6357 /* user requested number */
6358 bp->num_queues = use_multi;
6360 else if (use_multi)
6361 bp->num_queues = min_t(u32, num_online_cpus(),
6362 BP_MAX_QUEUES(bp));
6363 else
6364 bp->num_queues = 1;
6366 if (bnx2x_enable_msix(bp)) {
6367 /* failed to enable MSI-X */
6368 bp->num_queues = 1;
6369 if (use_multi)
6370 BNX2X_ERR("Multi requested but failed"
6371 " to enable MSI-X\n");
6374 DP(NETIF_MSG_IFUP,
6375 "set number of queues to %d\n", bp->num_queues);
6377 if (bnx2x_alloc_mem(bp))
6378 return -ENOMEM;
6380 for_each_queue(bp, i)
6381 bnx2x_fp(bp, i, disable_tpa) =
6382 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6384 if (bp->flags & USING_MSIX_FLAG) {
6385 rc = bnx2x_req_msix_irqs(bp);
6386 if (rc) {
6387 pci_disable_msix(bp->pdev);
6388 goto load_error;
6390 } else {
6391 bnx2x_ack_int(bp);
6392 rc = bnx2x_req_irq(bp);
6393 if (rc) {
6394 BNX2X_ERR("IRQ request failed, aborting\n");
6395 goto load_error;
6399 for_each_queue(bp, i)
6400 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6401 bnx2x_poll, 128);
6403 /* Initialize HW */
6404 rc = bnx2x_init_hw(bp, load_code);
6405 if (rc) {
6406 BNX2X_ERR("HW init failed, aborting\n");
6407 goto load_int_disable;
6410 /* Setup NIC internals and enable interrupts */
6411 bnx2x_nic_init(bp, load_code);
6413 /* Send LOAD_DONE command to MCP */
6414 if (!BP_NOMCP(bp)) {
6415 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6416 if (!load_code) {
6417 BNX2X_ERR("MCP response failure, aborting\n");
6418 rc = -EBUSY;
6419 goto load_rings_free;
6423 bnx2x_stats_init(bp);
6425 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6427 /* Enable Rx interrupt handling before sending the ramrod
6428 as it's completed on Rx FP queue */
6429 bnx2x_napi_enable(bp);
6431 /* Enable interrupt handling */
6432 atomic_set(&bp->intr_sem, 0);
6434 rc = bnx2x_setup_leading(bp);
6435 if (rc) {
6436 BNX2X_ERR("Setup leading failed!\n");
6437 goto load_netif_stop;
6440 if (CHIP_IS_E1H(bp))
6441 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6442 BNX2X_ERR("!!! mf_cfg function disabled\n");
6443 bp->state = BNX2X_STATE_DISABLED;
6446 if (bp->state == BNX2X_STATE_OPEN)
6447 for_each_nondefault_queue(bp, i) {
6448 rc = bnx2x_setup_multi(bp, i);
6449 if (rc)
6450 goto load_netif_stop;
6453 if (CHIP_IS_E1(bp))
6454 bnx2x_set_mac_addr_e1(bp, 1);
6455 else
6456 bnx2x_set_mac_addr_e1h(bp, 1);
6458 if (bp->port.pmf)
6459 bnx2x_initial_phy_init(bp);
6461 /* Start fast path */
6462 switch (load_mode) {
6463 case LOAD_NORMAL:
6464 /* Tx queue should be only reenabled */
6465 netif_wake_queue(bp->dev);
6466 bnx2x_set_rx_mode(bp->dev);
6467 break;
6469 case LOAD_OPEN:
6470 netif_start_queue(bp->dev);
6471 bnx2x_set_rx_mode(bp->dev);
6472 if (bp->flags & USING_MSIX_FLAG)
6473 printk(KERN_INFO PFX "%s: using MSI-X\n",
6474 bp->dev->name);
6475 break;
6477 case LOAD_DIAG:
6478 bnx2x_set_rx_mode(bp->dev);
6479 bp->state = BNX2X_STATE_DIAG;
6480 break;
6482 default:
6483 break;
6486 if (!bp->port.pmf)
6487 bnx2x__link_status_update(bp);
6489 /* start the timer */
6490 mod_timer(&bp->timer, jiffies + bp->current_interval);
6493 return 0;
6495 load_netif_stop:
6496 bnx2x_napi_disable(bp);
6497 load_rings_free:
6498 /* Free SKBs, SGEs, TPA pool and driver internals */
6499 bnx2x_free_skbs(bp);
6500 for_each_queue(bp, i)
6501 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6502 load_int_disable:
6503 bnx2x_int_disable_sync(bp, 1);
6504 /* Release IRQs */
6505 bnx2x_free_irq(bp);
6506 load_error:
6507 bnx2x_free_mem(bp);
6508 bp->port.pmf = 0;
6510 /* TBD we really need to reset the chip
6511 if we want to recover from this */
6512 return rc;
6515 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6517 int rc;
6519 /* halt the connection */
6520 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6521 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6523 /* Wait for completion */
6524 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6525 &(bp->fp[index].state), 1);
6526 if (rc) /* timeout */
6527 return rc;
6529 /* delete cfc entry */
6530 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6532 /* Wait for completion */
6533 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6534 &(bp->fp[index].state), 1);
6535 return rc;
6538 static int bnx2x_stop_leading(struct bnx2x *bp)
6540 u16 dsb_sp_prod_idx;
6541 /* if the other port is handling traffic,
6542 this can take a lot of time */
6543 int cnt = 500;
6544 int rc;
6546 might_sleep();
6548 /* Send HALT ramrod */
6549 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6550 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6552 /* Wait for completion */
6553 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6554 &(bp->fp[0].state), 1);
6555 if (rc) /* timeout */
6556 return rc;
6558 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6560 /* Send PORT_DELETE ramrod */
6561 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6563 /* Wait for completion to arrive on default status block
6564 we are going to reset the chip anyway
6565 so there is not much to do if this times out
6567 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6568 if (!cnt) {
6569 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6570 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6571 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6572 #ifdef BNX2X_STOP_ON_ERROR
6573 bnx2x_panic();
6574 #else
6575 rc = -EBUSY;
6576 #endif
6577 break;
6579 cnt--;
6580 msleep(1);
6582 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6583 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6585 return rc;
6588 static void bnx2x_reset_func(struct bnx2x *bp)
6590 int port = BP_PORT(bp);
6591 int func = BP_FUNC(bp);
6592 int base, i;
6594 /* Configure IGU */
6595 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6596 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6598 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6600 /* Clear ILT */
6601 base = FUNC_ILT_BASE(func);
6602 for (i = base; i < base + ILT_PER_FUNC; i++)
6603 bnx2x_ilt_wr(bp, i, 0);
6606 static void bnx2x_reset_port(struct bnx2x *bp)
6608 int port = BP_PORT(bp);
6609 u32 val;
6611 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6613 /* Do not rcv packets to BRB */
6614 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6615 /* Do not direct rcv packets that are not for MCP to the BRB */
6616 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6617 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6619 /* Configure AEU */
6620 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6622 msleep(100);
6623 /* Check for BRB port occupancy */
6624 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6625 if (val)
6626 DP(NETIF_MSG_IFDOWN,
6627 "BRB1 is not empty %d blocks are occupied\n", val);
6629 /* TODO: Close Doorbell port? */
6632 static void bnx2x_reset_common(struct bnx2x *bp)
6634 /* reset_common */
6635 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6636 0xd3ffff7f);
6637 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6640 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6642 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6643 BP_FUNC(bp), reset_code);
6645 switch (reset_code) {
6646 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6647 bnx2x_reset_port(bp);
6648 bnx2x_reset_func(bp);
6649 bnx2x_reset_common(bp);
6650 break;
6652 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6653 bnx2x_reset_port(bp);
6654 bnx2x_reset_func(bp);
6655 break;
6657 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6658 bnx2x_reset_func(bp);
6659 break;
6661 default:
6662 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6663 break;
6667 /* must be called with rtnl_lock */
6668 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6670 int port = BP_PORT(bp);
6671 u32 reset_code = 0;
6672 int i, cnt, rc;
6674 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6676 bp->rx_mode = BNX2X_RX_MODE_NONE;
6677 bnx2x_set_storm_rx_mode(bp);
6679 bnx2x_netif_stop(bp, 1);
6680 if (!netif_running(bp->dev))
6681 bnx2x_napi_disable(bp);
6682 del_timer_sync(&bp->timer);
6683 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6684 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6685 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6687 /* Wait until tx fast path tasks complete */
6688 for_each_queue(bp, i) {
6689 struct bnx2x_fastpath *fp = &bp->fp[i];
6691 cnt = 1000;
6692 smp_rmb();
6693 while (BNX2X_HAS_TX_WORK(fp)) {
6695 bnx2x_tx_int(fp, 1000);
6696 if (!cnt) {
6697 BNX2X_ERR("timeout waiting for queue[%d]\n",
6699 #ifdef BNX2X_STOP_ON_ERROR
6700 bnx2x_panic();
6701 return -EBUSY;
6702 #else
6703 break;
6704 #endif
6706 cnt--;
6707 msleep(1);
6708 smp_rmb();
6711 /* Give HW time to discard old tx messages */
6712 msleep(1);
6714 /* Release IRQs */
6715 bnx2x_free_irq(bp);
6717 if (CHIP_IS_E1(bp)) {
6718 struct mac_configuration_cmd *config =
6719 bnx2x_sp(bp, mcast_config);
6721 bnx2x_set_mac_addr_e1(bp, 0);
6723 for (i = 0; i < config->hdr.length_6b; i++)
6724 CAM_INVALIDATE(config->config_table[i]);
6726 config->hdr.length_6b = i;
6727 if (CHIP_REV_IS_SLOW(bp))
6728 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6729 else
6730 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6731 config->hdr.client_id = BP_CL_ID(bp);
6732 config->hdr.reserved1 = 0;
6734 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6735 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6736 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6738 } else { /* E1H */
6739 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6741 bnx2x_set_mac_addr_e1h(bp, 0);
6743 for (i = 0; i < MC_HASH_SIZE; i++)
6744 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6747 if (unload_mode == UNLOAD_NORMAL)
6748 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6750 else if (bp->flags & NO_WOL_FLAG) {
6751 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6752 if (CHIP_IS_E1H(bp))
6753 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6755 } else if (bp->wol) {
6756 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6757 u8 *mac_addr = bp->dev->dev_addr;
6758 u32 val;
6759 /* The mac address is written to entries 1-4 to
6760 preserve entry 0 which is used by the PMF */
6761 u8 entry = (BP_E1HVN(bp) + 1)*8;
6763 val = (mac_addr[0] << 8) | mac_addr[1];
6764 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6766 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6767 (mac_addr[4] << 8) | mac_addr[5];
6768 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6770 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6772 } else
6773 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6775 /* Close multi and leading connections
6776 Completions for ramrods are collected in a synchronous way */
6777 for_each_nondefault_queue(bp, i)
6778 if (bnx2x_stop_multi(bp, i))
6779 goto unload_error;
6781 rc = bnx2x_stop_leading(bp);
6782 if (rc) {
6783 BNX2X_ERR("Stop leading failed!\n");
6784 #ifdef BNX2X_STOP_ON_ERROR
6785 return -EBUSY;
6786 #else
6787 goto unload_error;
6788 #endif
6791 unload_error:
6792 if (!BP_NOMCP(bp))
6793 reset_code = bnx2x_fw_command(bp, reset_code);
6794 else {
6795 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6796 load_count[0], load_count[1], load_count[2]);
6797 load_count[0]--;
6798 load_count[1 + port]--;
6799 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6800 load_count[0], load_count[1], load_count[2]);
6801 if (load_count[0] == 0)
6802 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6803 else if (load_count[1 + port] == 0)
6804 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6805 else
6806 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6809 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6810 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6811 bnx2x__link_reset(bp);
6813 /* Reset the chip */
6814 bnx2x_reset_chip(bp, reset_code);
6816 /* Report UNLOAD_DONE to MCP */
6817 if (!BP_NOMCP(bp))
6818 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6819 bp->port.pmf = 0;
6821 /* Free SKBs, SGEs, TPA pool and driver internals */
6822 bnx2x_free_skbs(bp);
6823 for_each_queue(bp, i)
6824 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6825 bnx2x_free_mem(bp);
6827 bp->state = BNX2X_STATE_CLOSED;
6829 netif_carrier_off(bp->dev);
6831 return 0;
6834 static void bnx2x_reset_task(struct work_struct *work)
6836 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6838 #ifdef BNX2X_STOP_ON_ERROR
6839 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6840 " so reset not done to allow debug dump,\n"
6841 KERN_ERR " you will need to reboot when done\n");
6842 return;
6843 #endif
6845 rtnl_lock();
6847 if (!netif_running(bp->dev))
6848 goto reset_task_exit;
6850 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6851 bnx2x_nic_load(bp, LOAD_NORMAL);
6853 reset_task_exit:
6854 rtnl_unlock();
6857 /* end of nic load/unload */
6859 /* ethtool_ops */
6862 * Init service functions
6865 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6867 u32 val;
6869 /* Check if there is any driver already loaded */
6870 val = REG_RD(bp, MISC_REG_UNPREPARED);
6871 if (val == 0x1) {
6872 /* Check if it is the UNDI driver
6873 * UNDI driver initializes CID offset for normal bell to 0x7
6875 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6876 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6877 if (val == 0x7)
6878 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6879 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6881 if (val == 0x7) {
6882 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6883 /* save our func */
6884 int func = BP_FUNC(bp);
6885 u32 swap_en;
6886 u32 swap_val;
6888 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6890 /* try unload UNDI on port 0 */
6891 bp->func = 0;
6892 bp->fw_seq =
6893 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6894 DRV_MSG_SEQ_NUMBER_MASK);
6895 reset_code = bnx2x_fw_command(bp, reset_code);
6897 /* if UNDI is loaded on the other port */
6898 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6900 /* send "DONE" for previous unload */
6901 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6903 /* unload UNDI on port 1 */
6904 bp->func = 1;
6905 bp->fw_seq =
6906 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6907 DRV_MSG_SEQ_NUMBER_MASK);
6908 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6910 bnx2x_fw_command(bp, reset_code);
6913 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6914 HC_REG_CONFIG_0), 0x1000);
6916 /* close input traffic and wait for it */
6917 /* Do not rcv packets to BRB */
6918 REG_WR(bp,
6919 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6920 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6921 /* Do not direct rcv packets that are not for MCP to
6922 * the BRB */
6923 REG_WR(bp,
6924 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6925 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6926 /* clear AEU */
6927 REG_WR(bp,
6928 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6929 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6930 msleep(10);
6932 /* save NIG port swap info */
6933 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6934 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6935 /* reset device */
6936 REG_WR(bp,
6937 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6938 0xd3ffffff);
6939 REG_WR(bp,
6940 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6941 0x1403);
6942 /* take the NIG out of reset and restore swap values */
6943 REG_WR(bp,
6944 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6945 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6946 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6947 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6949 /* send unload done to the MCP */
6950 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6952 /* restore our func and fw_seq */
6953 bp->func = func;
6954 bp->fw_seq =
6955 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6956 DRV_MSG_SEQ_NUMBER_MASK);
6961 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6963 u32 val, val2, val3, val4, id;
6964 u16 pmc;
6966 /* Get the chip revision id and number. */
6967 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6968 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6969 id = ((val & 0xffff) << 16);
6970 val = REG_RD(bp, MISC_REG_CHIP_REV);
6971 id |= ((val & 0xf) << 12);
6972 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6973 id |= ((val & 0xff) << 4);
6974 REG_RD(bp, MISC_REG_BOND_ID);
6975 id |= (val & 0xf);
6976 bp->common.chip_id = id;
6977 bp->link_params.chip_id = bp->common.chip_id;
6978 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6980 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6981 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6982 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6983 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6984 bp->common.flash_size, bp->common.flash_size);
6986 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6987 bp->link_params.shmem_base = bp->common.shmem_base;
6988 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6990 if (!bp->common.shmem_base ||
6991 (bp->common.shmem_base < 0xA0000) ||
6992 (bp->common.shmem_base >= 0xC0000)) {
6993 BNX2X_DEV_INFO("MCP not active\n");
6994 bp->flags |= NO_MCP_FLAG;
6995 return;
6998 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6999 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7000 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7001 BNX2X_ERR("BAD MCP validity signature\n");
7003 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7004 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7006 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
7007 bp->common.hw_config, bp->common.board);
7009 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7010 SHARED_HW_CFG_LED_MODE_MASK) >>
7011 SHARED_HW_CFG_LED_MODE_SHIFT);
7013 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7014 bp->common.bc_ver = val;
7015 BNX2X_DEV_INFO("bc_ver %X\n", val);
7016 if (val < BNX2X_BC_VER) {
7017 /* for now only warn
7018 * later we might need to enforce this */
7019 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7020 " please upgrade BC\n", BNX2X_BC_VER, val);
7023 if (BP_E1HVN(bp) == 0) {
7024 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7025 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7026 } else {
7027 /* no WOL capability for E1HVN != 0 */
7028 bp->flags |= NO_WOL_FLAG;
7030 BNX2X_DEV_INFO("%sWoL capable\n",
7031 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7033 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7034 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7035 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7036 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7038 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7039 val, val2, val3, val4);
7042 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7043 u32 switch_cfg)
7045 int port = BP_PORT(bp);
7046 u32 ext_phy_type;
7048 switch (switch_cfg) {
7049 case SWITCH_CFG_1G:
7050 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7052 ext_phy_type =
7053 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7054 switch (ext_phy_type) {
7055 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7056 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7057 ext_phy_type);
7059 bp->port.supported |= (SUPPORTED_10baseT_Half |
7060 SUPPORTED_10baseT_Full |
7061 SUPPORTED_100baseT_Half |
7062 SUPPORTED_100baseT_Full |
7063 SUPPORTED_1000baseT_Full |
7064 SUPPORTED_2500baseX_Full |
7065 SUPPORTED_TP |
7066 SUPPORTED_FIBRE |
7067 SUPPORTED_Autoneg |
7068 SUPPORTED_Pause |
7069 SUPPORTED_Asym_Pause);
7070 break;
7072 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7073 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7074 ext_phy_type);
7076 bp->port.supported |= (SUPPORTED_10baseT_Half |
7077 SUPPORTED_10baseT_Full |
7078 SUPPORTED_100baseT_Half |
7079 SUPPORTED_100baseT_Full |
7080 SUPPORTED_1000baseT_Full |
7081 SUPPORTED_TP |
7082 SUPPORTED_FIBRE |
7083 SUPPORTED_Autoneg |
7084 SUPPORTED_Pause |
7085 SUPPORTED_Asym_Pause);
7086 break;
7088 default:
7089 BNX2X_ERR("NVRAM config error. "
7090 "BAD SerDes ext_phy_config 0x%x\n",
7091 bp->link_params.ext_phy_config);
7092 return;
7095 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7096 port*0x10);
7097 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7098 break;
7100 case SWITCH_CFG_10G:
7101 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7103 ext_phy_type =
7104 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7105 switch (ext_phy_type) {
7106 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7107 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7108 ext_phy_type);
7110 bp->port.supported |= (SUPPORTED_10baseT_Half |
7111 SUPPORTED_10baseT_Full |
7112 SUPPORTED_100baseT_Half |
7113 SUPPORTED_100baseT_Full |
7114 SUPPORTED_1000baseT_Full |
7115 SUPPORTED_2500baseX_Full |
7116 SUPPORTED_10000baseT_Full |
7117 SUPPORTED_TP |
7118 SUPPORTED_FIBRE |
7119 SUPPORTED_Autoneg |
7120 SUPPORTED_Pause |
7121 SUPPORTED_Asym_Pause);
7122 break;
7124 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7125 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7126 ext_phy_type);
7128 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7129 SUPPORTED_FIBRE |
7130 SUPPORTED_Pause |
7131 SUPPORTED_Asym_Pause);
7132 break;
7134 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7135 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7136 ext_phy_type);
7138 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7139 SUPPORTED_1000baseT_Full |
7140 SUPPORTED_FIBRE |
7141 SUPPORTED_Pause |
7142 SUPPORTED_Asym_Pause);
7143 break;
7145 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7146 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7147 ext_phy_type);
7149 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7150 SUPPORTED_1000baseT_Full |
7151 SUPPORTED_FIBRE |
7152 SUPPORTED_Autoneg |
7153 SUPPORTED_Pause |
7154 SUPPORTED_Asym_Pause);
7155 break;
7157 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7158 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7159 ext_phy_type);
7161 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7162 SUPPORTED_2500baseX_Full |
7163 SUPPORTED_1000baseT_Full |
7164 SUPPORTED_FIBRE |
7165 SUPPORTED_Autoneg |
7166 SUPPORTED_Pause |
7167 SUPPORTED_Asym_Pause);
7168 break;
7170 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7171 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7172 ext_phy_type);
7174 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7175 SUPPORTED_TP |
7176 SUPPORTED_Autoneg |
7177 SUPPORTED_Pause |
7178 SUPPORTED_Asym_Pause);
7179 break;
7181 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7182 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7183 bp->link_params.ext_phy_config);
7184 break;
7186 default:
7187 BNX2X_ERR("NVRAM config error. "
7188 "BAD XGXS ext_phy_config 0x%x\n",
7189 bp->link_params.ext_phy_config);
7190 return;
7193 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7194 port*0x18);
7195 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7197 break;
7199 default:
7200 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7201 bp->port.link_config);
7202 return;
7204 bp->link_params.phy_addr = bp->port.phy_addr;
7206 /* mask what we support according to speed_cap_mask */
7207 if (!(bp->link_params.speed_cap_mask &
7208 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7209 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7211 if (!(bp->link_params.speed_cap_mask &
7212 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7213 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7215 if (!(bp->link_params.speed_cap_mask &
7216 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7217 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7219 if (!(bp->link_params.speed_cap_mask &
7220 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7221 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7223 if (!(bp->link_params.speed_cap_mask &
7224 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7225 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7226 SUPPORTED_1000baseT_Full);
7228 if (!(bp->link_params.speed_cap_mask &
7229 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7230 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7232 if (!(bp->link_params.speed_cap_mask &
7233 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7234 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7236 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7239 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7241 bp->link_params.req_duplex = DUPLEX_FULL;
7243 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7244 case PORT_FEATURE_LINK_SPEED_AUTO:
7245 if (bp->port.supported & SUPPORTED_Autoneg) {
7246 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7247 bp->port.advertising = bp->port.supported;
7248 } else {
7249 u32 ext_phy_type =
7250 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7252 if ((ext_phy_type ==
7253 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7254 (ext_phy_type ==
7255 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7256 /* force 10G, no AN */
7257 bp->link_params.req_line_speed = SPEED_10000;
7258 bp->port.advertising =
7259 (ADVERTISED_10000baseT_Full |
7260 ADVERTISED_FIBRE);
7261 break;
7263 BNX2X_ERR("NVRAM config error. "
7264 "Invalid link_config 0x%x"
7265 " Autoneg not supported\n",
7266 bp->port.link_config);
7267 return;
7269 break;
7271 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7272 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7273 bp->link_params.req_line_speed = SPEED_10;
7274 bp->port.advertising = (ADVERTISED_10baseT_Full |
7275 ADVERTISED_TP);
7276 } else {
7277 BNX2X_ERR("NVRAM config error. "
7278 "Invalid link_config 0x%x"
7279 " speed_cap_mask 0x%x\n",
7280 bp->port.link_config,
7281 bp->link_params.speed_cap_mask);
7282 return;
7284 break;
7286 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7287 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7288 bp->link_params.req_line_speed = SPEED_10;
7289 bp->link_params.req_duplex = DUPLEX_HALF;
7290 bp->port.advertising = (ADVERTISED_10baseT_Half |
7291 ADVERTISED_TP);
7292 } else {
7293 BNX2X_ERR("NVRAM config error. "
7294 "Invalid link_config 0x%x"
7295 " speed_cap_mask 0x%x\n",
7296 bp->port.link_config,
7297 bp->link_params.speed_cap_mask);
7298 return;
7300 break;
7302 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7303 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7304 bp->link_params.req_line_speed = SPEED_100;
7305 bp->port.advertising = (ADVERTISED_100baseT_Full |
7306 ADVERTISED_TP);
7307 } else {
7308 BNX2X_ERR("NVRAM config error. "
7309 "Invalid link_config 0x%x"
7310 " speed_cap_mask 0x%x\n",
7311 bp->port.link_config,
7312 bp->link_params.speed_cap_mask);
7313 return;
7315 break;
7317 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7318 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7319 bp->link_params.req_line_speed = SPEED_100;
7320 bp->link_params.req_duplex = DUPLEX_HALF;
7321 bp->port.advertising = (ADVERTISED_100baseT_Half |
7322 ADVERTISED_TP);
7323 } else {
7324 BNX2X_ERR("NVRAM config error. "
7325 "Invalid link_config 0x%x"
7326 " speed_cap_mask 0x%x\n",
7327 bp->port.link_config,
7328 bp->link_params.speed_cap_mask);
7329 return;
7331 break;
7333 case PORT_FEATURE_LINK_SPEED_1G:
7334 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7335 bp->link_params.req_line_speed = SPEED_1000;
7336 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7337 ADVERTISED_TP);
7338 } else {
7339 BNX2X_ERR("NVRAM config error. "
7340 "Invalid link_config 0x%x"
7341 " speed_cap_mask 0x%x\n",
7342 bp->port.link_config,
7343 bp->link_params.speed_cap_mask);
7344 return;
7346 break;
7348 case PORT_FEATURE_LINK_SPEED_2_5G:
7349 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7350 bp->link_params.req_line_speed = SPEED_2500;
7351 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7352 ADVERTISED_TP);
7353 } else {
7354 BNX2X_ERR("NVRAM config error. "
7355 "Invalid link_config 0x%x"
7356 " speed_cap_mask 0x%x\n",
7357 bp->port.link_config,
7358 bp->link_params.speed_cap_mask);
7359 return;
7361 break;
7363 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7364 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7365 case PORT_FEATURE_LINK_SPEED_10G_KR:
7366 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7367 bp->link_params.req_line_speed = SPEED_10000;
7368 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7369 ADVERTISED_FIBRE);
7370 } else {
7371 BNX2X_ERR("NVRAM config error. "
7372 "Invalid link_config 0x%x"
7373 " speed_cap_mask 0x%x\n",
7374 bp->port.link_config,
7375 bp->link_params.speed_cap_mask);
7376 return;
7378 break;
7380 default:
7381 BNX2X_ERR("NVRAM config error. "
7382 "BAD link speed link_config 0x%x\n",
7383 bp->port.link_config);
7384 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7385 bp->port.advertising = bp->port.supported;
7386 break;
7389 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7390 PORT_FEATURE_FLOW_CONTROL_MASK);
7391 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7392 !(bp->port.supported & SUPPORTED_Autoneg))
7393 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7395 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7396 " advertising 0x%x\n",
7397 bp->link_params.req_line_speed,
7398 bp->link_params.req_duplex,
7399 bp->link_params.req_flow_ctrl, bp->port.advertising);
7402 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7404 int port = BP_PORT(bp);
7405 u32 val, val2;
7407 bp->link_params.bp = bp;
7408 bp->link_params.port = port;
7410 bp->link_params.serdes_config =
7411 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7412 bp->link_params.lane_config =
7413 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7414 bp->link_params.ext_phy_config =
7415 SHMEM_RD(bp,
7416 dev_info.port_hw_config[port].external_phy_config);
7417 bp->link_params.speed_cap_mask =
7418 SHMEM_RD(bp,
7419 dev_info.port_hw_config[port].speed_capability_mask);
7421 bp->port.link_config =
7422 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7424 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7425 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7426 " link_config 0x%08x\n",
7427 bp->link_params.serdes_config,
7428 bp->link_params.lane_config,
7429 bp->link_params.ext_phy_config,
7430 bp->link_params.speed_cap_mask, bp->port.link_config);
7432 bp->link_params.switch_cfg = (bp->port.link_config &
7433 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7434 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7436 bnx2x_link_settings_requested(bp);
7438 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7439 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7440 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7441 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7442 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7443 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7444 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7445 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7446 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7447 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7450 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7452 int func = BP_FUNC(bp);
7453 u32 val, val2;
7454 int rc = 0;
7456 bnx2x_get_common_hwinfo(bp);
7458 bp->e1hov = 0;
7459 bp->e1hmf = 0;
7460 if (CHIP_IS_E1H(bp)) {
7461 bp->mf_config =
7462 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7464 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7465 FUNC_MF_CFG_E1HOV_TAG_MASK);
7466 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7468 bp->e1hov = val;
7469 bp->e1hmf = 1;
7470 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7471 "(0x%04x)\n",
7472 func, bp->e1hov, bp->e1hov);
7473 } else {
7474 BNX2X_DEV_INFO("Single function mode\n");
7475 if (BP_E1HVN(bp)) {
7476 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7477 " aborting\n", func);
7478 rc = -EPERM;
7483 if (!BP_NOMCP(bp)) {
7484 bnx2x_get_port_hwinfo(bp);
7486 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7487 DRV_MSG_SEQ_NUMBER_MASK);
7488 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7491 if (IS_E1HMF(bp)) {
7492 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7493 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7494 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7495 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7496 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7497 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7498 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7499 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7500 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7501 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7502 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7503 ETH_ALEN);
7504 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7505 ETH_ALEN);
7508 return rc;
7511 if (BP_NOMCP(bp)) {
7512 /* only supposed to happen on emulation/FPGA */
7513 BNX2X_ERR("warning random MAC workaround active\n");
7514 random_ether_addr(bp->dev->dev_addr);
7515 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7518 return rc;
7521 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7523 int func = BP_FUNC(bp);
7524 int rc;
7526 /* Disable interrupt handling until HW is initialized */
7527 atomic_set(&bp->intr_sem, 1);
7529 mutex_init(&bp->port.phy_mutex);
7531 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7532 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7534 rc = bnx2x_get_hwinfo(bp);
7536 /* need to reset chip if undi was active */
7537 if (!BP_NOMCP(bp))
7538 bnx2x_undi_unload(bp);
7540 if (CHIP_REV_IS_FPGA(bp))
7541 printk(KERN_ERR PFX "FPGA detected\n");
7543 if (BP_NOMCP(bp) && (func == 0))
7544 printk(KERN_ERR PFX
7545 "MCP disabled, must load devices in order!\n");
7547 /* Set TPA flags */
7548 if (disable_tpa) {
7549 bp->flags &= ~TPA_ENABLE_FLAG;
7550 bp->dev->features &= ~NETIF_F_LRO;
7551 } else {
7552 bp->flags |= TPA_ENABLE_FLAG;
7553 bp->dev->features |= NETIF_F_LRO;
7557 bp->tx_ring_size = MAX_TX_AVAIL;
7558 bp->rx_ring_size = MAX_RX_AVAIL;
7560 bp->rx_csum = 1;
7561 bp->rx_offset = 0;
7563 bp->tx_ticks = 50;
7564 bp->rx_ticks = 25;
7566 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7567 bp->current_interval = (poll ? poll : bp->timer_interval);
7569 init_timer(&bp->timer);
7570 bp->timer.expires = jiffies + bp->current_interval;
7571 bp->timer.data = (unsigned long) bp;
7572 bp->timer.function = bnx2x_timer;
7574 return rc;
7578 * ethtool service functions
7581 /* All ethtool functions called with rtnl_lock */
7583 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7585 struct bnx2x *bp = netdev_priv(dev);
7587 cmd->supported = bp->port.supported;
7588 cmd->advertising = bp->port.advertising;
7590 if (netif_carrier_ok(dev)) {
7591 cmd->speed = bp->link_vars.line_speed;
7592 cmd->duplex = bp->link_vars.duplex;
7593 } else {
7594 cmd->speed = bp->link_params.req_line_speed;
7595 cmd->duplex = bp->link_params.req_duplex;
7597 if (IS_E1HMF(bp)) {
7598 u16 vn_max_rate;
7600 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7601 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7602 if (vn_max_rate < cmd->speed)
7603 cmd->speed = vn_max_rate;
7606 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7607 u32 ext_phy_type =
7608 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7610 switch (ext_phy_type) {
7611 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7612 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7613 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7614 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7615 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7616 cmd->port = PORT_FIBRE;
7617 break;
7619 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7620 cmd->port = PORT_TP;
7621 break;
7623 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7624 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7625 bp->link_params.ext_phy_config);
7626 break;
7628 default:
7629 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7630 bp->link_params.ext_phy_config);
7631 break;
7633 } else
7634 cmd->port = PORT_TP;
7636 cmd->phy_address = bp->port.phy_addr;
7637 cmd->transceiver = XCVR_INTERNAL;
7639 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7640 cmd->autoneg = AUTONEG_ENABLE;
7641 else
7642 cmd->autoneg = AUTONEG_DISABLE;
7644 cmd->maxtxpkt = 0;
7645 cmd->maxrxpkt = 0;
7647 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7648 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7649 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7650 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7651 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7652 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7653 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7655 return 0;
7658 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7660 struct bnx2x *bp = netdev_priv(dev);
7661 u32 advertising;
7663 if (IS_E1HMF(bp))
7664 return 0;
7666 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7667 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7668 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7669 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7670 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7671 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7672 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7674 if (cmd->autoneg == AUTONEG_ENABLE) {
7675 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7676 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7677 return -EINVAL;
7680 /* advertise the requested speed and duplex if supported */
7681 cmd->advertising &= bp->port.supported;
7683 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7684 bp->link_params.req_duplex = DUPLEX_FULL;
7685 bp->port.advertising |= (ADVERTISED_Autoneg |
7686 cmd->advertising);
7688 } else { /* forced speed */
7689 /* advertise the requested speed and duplex if supported */
7690 switch (cmd->speed) {
7691 case SPEED_10:
7692 if (cmd->duplex == DUPLEX_FULL) {
7693 if (!(bp->port.supported &
7694 SUPPORTED_10baseT_Full)) {
7695 DP(NETIF_MSG_LINK,
7696 "10M full not supported\n");
7697 return -EINVAL;
7700 advertising = (ADVERTISED_10baseT_Full |
7701 ADVERTISED_TP);
7702 } else {
7703 if (!(bp->port.supported &
7704 SUPPORTED_10baseT_Half)) {
7705 DP(NETIF_MSG_LINK,
7706 "10M half not supported\n");
7707 return -EINVAL;
7710 advertising = (ADVERTISED_10baseT_Half |
7711 ADVERTISED_TP);
7713 break;
7715 case SPEED_100:
7716 if (cmd->duplex == DUPLEX_FULL) {
7717 if (!(bp->port.supported &
7718 SUPPORTED_100baseT_Full)) {
7719 DP(NETIF_MSG_LINK,
7720 "100M full not supported\n");
7721 return -EINVAL;
7724 advertising = (ADVERTISED_100baseT_Full |
7725 ADVERTISED_TP);
7726 } else {
7727 if (!(bp->port.supported &
7728 SUPPORTED_100baseT_Half)) {
7729 DP(NETIF_MSG_LINK,
7730 "100M half not supported\n");
7731 return -EINVAL;
7734 advertising = (ADVERTISED_100baseT_Half |
7735 ADVERTISED_TP);
7737 break;
7739 case SPEED_1000:
7740 if (cmd->duplex != DUPLEX_FULL) {
7741 DP(NETIF_MSG_LINK, "1G half not supported\n");
7742 return -EINVAL;
7745 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7746 DP(NETIF_MSG_LINK, "1G full not supported\n");
7747 return -EINVAL;
7750 advertising = (ADVERTISED_1000baseT_Full |
7751 ADVERTISED_TP);
7752 break;
7754 case SPEED_2500:
7755 if (cmd->duplex != DUPLEX_FULL) {
7756 DP(NETIF_MSG_LINK,
7757 "2.5G half not supported\n");
7758 return -EINVAL;
7761 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7762 DP(NETIF_MSG_LINK,
7763 "2.5G full not supported\n");
7764 return -EINVAL;
7767 advertising = (ADVERTISED_2500baseX_Full |
7768 ADVERTISED_TP);
7769 break;
7771 case SPEED_10000:
7772 if (cmd->duplex != DUPLEX_FULL) {
7773 DP(NETIF_MSG_LINK, "10G half not supported\n");
7774 return -EINVAL;
7777 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7778 DP(NETIF_MSG_LINK, "10G full not supported\n");
7779 return -EINVAL;
7782 advertising = (ADVERTISED_10000baseT_Full |
7783 ADVERTISED_FIBRE);
7784 break;
7786 default:
7787 DP(NETIF_MSG_LINK, "Unsupported speed\n");
7788 return -EINVAL;
7791 bp->link_params.req_line_speed = cmd->speed;
7792 bp->link_params.req_duplex = cmd->duplex;
7793 bp->port.advertising = advertising;
7796 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7797 DP_LEVEL " req_duplex %d advertising 0x%x\n",
7798 bp->link_params.req_line_speed, bp->link_params.req_duplex,
7799 bp->port.advertising);
7801 if (netif_running(dev)) {
7802 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7803 bnx2x_link_set(bp);
7806 return 0;
7809 #define PHY_FW_VER_LEN 10
7811 static void bnx2x_get_drvinfo(struct net_device *dev,
7812 struct ethtool_drvinfo *info)
7814 struct bnx2x *bp = netdev_priv(dev);
7815 u8 phy_fw_ver[PHY_FW_VER_LEN];
7817 strcpy(info->driver, DRV_MODULE_NAME);
7818 strcpy(info->version, DRV_MODULE_VERSION);
7820 phy_fw_ver[0] = '\0';
7821 if (bp->port.pmf) {
7822 bnx2x_acquire_phy_lock(bp);
7823 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7824 (bp->state != BNX2X_STATE_CLOSED),
7825 phy_fw_ver, PHY_FW_VER_LEN);
7826 bnx2x_release_phy_lock(bp);
7829 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7830 (bp->common.bc_ver & 0xff0000) >> 16,
7831 (bp->common.bc_ver & 0xff00) >> 8,
7832 (bp->common.bc_ver & 0xff),
7833 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7834 strcpy(info->bus_info, pci_name(bp->pdev));
7835 info->n_stats = BNX2X_NUM_STATS;
7836 info->testinfo_len = BNX2X_NUM_TESTS;
7837 info->eedump_len = bp->common.flash_size;
7838 info->regdump_len = 0;
7841 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7843 struct bnx2x *bp = netdev_priv(dev);
7845 if (bp->flags & NO_WOL_FLAG) {
7846 wol->supported = 0;
7847 wol->wolopts = 0;
7848 } else {
7849 wol->supported = WAKE_MAGIC;
7850 if (bp->wol)
7851 wol->wolopts = WAKE_MAGIC;
7852 else
7853 wol->wolopts = 0;
7855 memset(&wol->sopass, 0, sizeof(wol->sopass));
7858 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7860 struct bnx2x *bp = netdev_priv(dev);
7862 if (wol->wolopts & ~WAKE_MAGIC)
7863 return -EINVAL;
7865 if (wol->wolopts & WAKE_MAGIC) {
7866 if (bp->flags & NO_WOL_FLAG)
7867 return -EINVAL;
7869 bp->wol = 1;
7870 } else
7871 bp->wol = 0;
7873 return 0;
7876 static u32 bnx2x_get_msglevel(struct net_device *dev)
7878 struct bnx2x *bp = netdev_priv(dev);
7880 return bp->msglevel;
7883 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7885 struct bnx2x *bp = netdev_priv(dev);
7887 if (capable(CAP_NET_ADMIN))
7888 bp->msglevel = level;
7891 static int bnx2x_nway_reset(struct net_device *dev)
7893 struct bnx2x *bp = netdev_priv(dev);
7895 if (!bp->port.pmf)
7896 return 0;
7898 if (netif_running(dev)) {
7899 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7900 bnx2x_link_set(bp);
7903 return 0;
7906 static int bnx2x_get_eeprom_len(struct net_device *dev)
7908 struct bnx2x *bp = netdev_priv(dev);
7910 return bp->common.flash_size;
7913 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7915 int port = BP_PORT(bp);
7916 int count, i;
7917 u32 val = 0;
7919 /* adjust timeout for emulation/FPGA */
7920 count = NVRAM_TIMEOUT_COUNT;
7921 if (CHIP_REV_IS_SLOW(bp))
7922 count *= 100;
7924 /* request access to nvram interface */
7925 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7926 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7928 for (i = 0; i < count*10; i++) {
7929 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7930 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7931 break;
7933 udelay(5);
7936 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7937 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7938 return -EBUSY;
7941 return 0;
7944 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7946 int port = BP_PORT(bp);
7947 int count, i;
7948 u32 val = 0;
7950 /* adjust timeout for emulation/FPGA */
7951 count = NVRAM_TIMEOUT_COUNT;
7952 if (CHIP_REV_IS_SLOW(bp))
7953 count *= 100;
7955 /* relinquish nvram interface */
7956 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7957 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7959 for (i = 0; i < count*10; i++) {
7960 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7961 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7962 break;
7964 udelay(5);
7967 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7968 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7969 return -EBUSY;
7972 return 0;
7975 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7977 u32 val;
7979 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7981 /* enable both bits, even on read */
7982 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7983 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7984 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7987 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7989 u32 val;
7991 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7993 /* disable both bits, even after read */
7994 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7995 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7996 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7999 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8000 u32 cmd_flags)
8002 int count, i, rc;
8003 u32 val;
8005 /* build the command word */
8006 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8008 /* need to clear DONE bit separately */
8009 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8011 /* address of the NVRAM to read from */
8012 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8013 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8015 /* issue a read command */
8016 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8018 /* adjust timeout for emulation/FPGA */
8019 count = NVRAM_TIMEOUT_COUNT;
8020 if (CHIP_REV_IS_SLOW(bp))
8021 count *= 100;
8023 /* wait for completion */
8024 *ret_val = 0;
8025 rc = -EBUSY;
8026 for (i = 0; i < count; i++) {
8027 udelay(5);
8028 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8030 if (val & MCPR_NVM_COMMAND_DONE) {
8031 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8032 /* we read nvram data in cpu order
8033 * but ethtool sees it as an array of bytes
8034 * converting to big-endian will do the work */
8035 val = cpu_to_be32(val);
8036 *ret_val = val;
8037 rc = 0;
8038 break;
8042 return rc;
8045 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8046 int buf_size)
8048 int rc;
8049 u32 cmd_flags;
8050 u32 val;
8052 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8053 DP(BNX2X_MSG_NVM,
8054 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8055 offset, buf_size);
8056 return -EINVAL;
8059 if (offset + buf_size > bp->common.flash_size) {
8060 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8061 " buf_size (0x%x) > flash_size (0x%x)\n",
8062 offset, buf_size, bp->common.flash_size);
8063 return -EINVAL;
8066 /* request access to nvram interface */
8067 rc = bnx2x_acquire_nvram_lock(bp);
8068 if (rc)
8069 return rc;
8071 /* enable access to nvram interface */
8072 bnx2x_enable_nvram_access(bp);
8074 /* read the first word(s) */
8075 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8076 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8077 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8078 memcpy(ret_buf, &val, 4);
8080 /* advance to the next dword */
8081 offset += sizeof(u32);
8082 ret_buf += sizeof(u32);
8083 buf_size -= sizeof(u32);
8084 cmd_flags = 0;
8087 if (rc == 0) {
8088 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8089 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8090 memcpy(ret_buf, &val, 4);
8093 /* disable access to nvram interface */
8094 bnx2x_disable_nvram_access(bp);
8095 bnx2x_release_nvram_lock(bp);
8097 return rc;
8100 static int bnx2x_get_eeprom(struct net_device *dev,
8101 struct ethtool_eeprom *eeprom, u8 *eebuf)
8103 struct bnx2x *bp = netdev_priv(dev);
8104 int rc;
8106 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8107 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8108 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8109 eeprom->len, eeprom->len);
8111 /* parameters already validated in ethtool_get_eeprom */
8113 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8115 return rc;
8118 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8119 u32 cmd_flags)
8121 int count, i, rc;
8123 /* build the command word */
8124 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8126 /* need to clear DONE bit separately */
8127 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8129 /* write the data */
8130 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8132 /* address of the NVRAM to write to */
8133 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8134 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8136 /* issue the write command */
8137 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8139 /* adjust timeout for emulation/FPGA */
8140 count = NVRAM_TIMEOUT_COUNT;
8141 if (CHIP_REV_IS_SLOW(bp))
8142 count *= 100;
8144 /* wait for completion */
8145 rc = -EBUSY;
8146 for (i = 0; i < count; i++) {
8147 udelay(5);
8148 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8149 if (val & MCPR_NVM_COMMAND_DONE) {
8150 rc = 0;
8151 break;
8155 return rc;
8158 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8160 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8161 int buf_size)
8163 int rc;
8164 u32 cmd_flags;
8165 u32 align_offset;
8166 u32 val;
8168 if (offset + buf_size > bp->common.flash_size) {
8169 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8170 " buf_size (0x%x) > flash_size (0x%x)\n",
8171 offset, buf_size, bp->common.flash_size);
8172 return -EINVAL;
8175 /* request access to nvram interface */
8176 rc = bnx2x_acquire_nvram_lock(bp);
8177 if (rc)
8178 return rc;
8180 /* enable access to nvram interface */
8181 bnx2x_enable_nvram_access(bp);
8183 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8184 align_offset = (offset & ~0x03);
8185 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8187 if (rc == 0) {
8188 val &= ~(0xff << BYTE_OFFSET(offset));
8189 val |= (*data_buf << BYTE_OFFSET(offset));
8191 /* nvram data is returned as an array of bytes
8192 * convert it back to cpu order */
8193 val = be32_to_cpu(val);
8195 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8196 cmd_flags);
8199 /* disable access to nvram interface */
8200 bnx2x_disable_nvram_access(bp);
8201 bnx2x_release_nvram_lock(bp);
8203 return rc;
8206 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8207 int buf_size)
8209 int rc;
8210 u32 cmd_flags;
8211 u32 val;
8212 u32 written_so_far;
8214 if (buf_size == 1) /* ethtool */
8215 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8217 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8218 DP(BNX2X_MSG_NVM,
8219 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8220 offset, buf_size);
8221 return -EINVAL;
8224 if (offset + buf_size > bp->common.flash_size) {
8225 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8226 " buf_size (0x%x) > flash_size (0x%x)\n",
8227 offset, buf_size, bp->common.flash_size);
8228 return -EINVAL;
8231 /* request access to nvram interface */
8232 rc = bnx2x_acquire_nvram_lock(bp);
8233 if (rc)
8234 return rc;
8236 /* enable access to nvram interface */
8237 bnx2x_enable_nvram_access(bp);
8239 written_so_far = 0;
8240 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8241 while ((written_so_far < buf_size) && (rc == 0)) {
8242 if (written_so_far == (buf_size - sizeof(u32)))
8243 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8244 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8245 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8246 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8247 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8249 memcpy(&val, data_buf, 4);
8251 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8253 /* advance to the next dword */
8254 offset += sizeof(u32);
8255 data_buf += sizeof(u32);
8256 written_so_far += sizeof(u32);
8257 cmd_flags = 0;
8260 /* disable access to nvram interface */
8261 bnx2x_disable_nvram_access(bp);
8262 bnx2x_release_nvram_lock(bp);
8264 return rc;
8267 static int bnx2x_set_eeprom(struct net_device *dev,
8268 struct ethtool_eeprom *eeprom, u8 *eebuf)
8270 struct bnx2x *bp = netdev_priv(dev);
8271 int rc;
8273 if (!netif_running(dev))
8274 return -EAGAIN;
8276 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8277 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8278 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8279 eeprom->len, eeprom->len);
8281 /* parameters already validated in ethtool_set_eeprom */
8283 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8284 if (eeprom->magic == 0x00504859)
8285 if (bp->port.pmf) {
8287 bnx2x_acquire_phy_lock(bp);
8288 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8289 bp->link_params.ext_phy_config,
8290 (bp->state != BNX2X_STATE_CLOSED),
8291 eebuf, eeprom->len);
8292 if ((bp->state == BNX2X_STATE_OPEN) ||
8293 (bp->state == BNX2X_STATE_DISABLED)) {
8294 rc |= bnx2x_link_reset(&bp->link_params,
8295 &bp->link_vars);
8296 rc |= bnx2x_phy_init(&bp->link_params,
8297 &bp->link_vars);
8299 bnx2x_release_phy_lock(bp);
8301 } else /* Only the PMF can access the PHY */
8302 return -EINVAL;
8303 else
8304 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8306 return rc;
8309 static int bnx2x_get_coalesce(struct net_device *dev,
8310 struct ethtool_coalesce *coal)
8312 struct bnx2x *bp = netdev_priv(dev);
8314 memset(coal, 0, sizeof(struct ethtool_coalesce));
8316 coal->rx_coalesce_usecs = bp->rx_ticks;
8317 coal->tx_coalesce_usecs = bp->tx_ticks;
8319 return 0;
8322 static int bnx2x_set_coalesce(struct net_device *dev,
8323 struct ethtool_coalesce *coal)
8325 struct bnx2x *bp = netdev_priv(dev);
8327 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8328 if (bp->rx_ticks > 3000)
8329 bp->rx_ticks = 3000;
8331 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8332 if (bp->tx_ticks > 0x3000)
8333 bp->tx_ticks = 0x3000;
8335 if (netif_running(dev))
8336 bnx2x_update_coalesce(bp);
8338 return 0;
8341 static void bnx2x_get_ringparam(struct net_device *dev,
8342 struct ethtool_ringparam *ering)
8344 struct bnx2x *bp = netdev_priv(dev);
8346 ering->rx_max_pending = MAX_RX_AVAIL;
8347 ering->rx_mini_max_pending = 0;
8348 ering->rx_jumbo_max_pending = 0;
8350 ering->rx_pending = bp->rx_ring_size;
8351 ering->rx_mini_pending = 0;
8352 ering->rx_jumbo_pending = 0;
8354 ering->tx_max_pending = MAX_TX_AVAIL;
8355 ering->tx_pending = bp->tx_ring_size;
8358 static int bnx2x_set_ringparam(struct net_device *dev,
8359 struct ethtool_ringparam *ering)
8361 struct bnx2x *bp = netdev_priv(dev);
8362 int rc = 0;
8364 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8365 (ering->tx_pending > MAX_TX_AVAIL) ||
8366 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8367 return -EINVAL;
8369 bp->rx_ring_size = ering->rx_pending;
8370 bp->tx_ring_size = ering->tx_pending;
8372 if (netif_running(dev)) {
8373 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8374 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8377 return rc;
8380 static void bnx2x_get_pauseparam(struct net_device *dev,
8381 struct ethtool_pauseparam *epause)
8383 struct bnx2x *bp = netdev_priv(dev);
8385 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8386 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8388 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8389 BNX2X_FLOW_CTRL_RX);
8390 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8391 BNX2X_FLOW_CTRL_TX);
8393 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8394 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8395 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8398 static int bnx2x_set_pauseparam(struct net_device *dev,
8399 struct ethtool_pauseparam *epause)
8401 struct bnx2x *bp = netdev_priv(dev);
8403 if (IS_E1HMF(bp))
8404 return 0;
8406 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8407 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8408 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8410 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8412 if (epause->rx_pause)
8413 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8415 if (epause->tx_pause)
8416 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8418 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8419 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8421 if (epause->autoneg) {
8422 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8423 DP(NETIF_MSG_LINK, "autoneg not supported\n");
8424 return -EINVAL;
8427 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8428 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8431 DP(NETIF_MSG_LINK,
8432 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8434 if (netif_running(dev)) {
8435 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8436 bnx2x_link_set(bp);
8439 return 0;
8442 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8444 struct bnx2x *bp = netdev_priv(dev);
8445 int changed = 0;
8446 int rc = 0;
8448 /* TPA requires Rx CSUM offloading */
8449 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8450 if (!(dev->features & NETIF_F_LRO)) {
8451 dev->features |= NETIF_F_LRO;
8452 bp->flags |= TPA_ENABLE_FLAG;
8453 changed = 1;
8456 } else if (dev->features & NETIF_F_LRO) {
8457 dev->features &= ~NETIF_F_LRO;
8458 bp->flags &= ~TPA_ENABLE_FLAG;
8459 changed = 1;
8462 if (changed && netif_running(dev)) {
8463 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8464 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8467 return rc;
8470 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8472 struct bnx2x *bp = netdev_priv(dev);
8474 return bp->rx_csum;
8477 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8479 struct bnx2x *bp = netdev_priv(dev);
8480 int rc = 0;
8482 bp->rx_csum = data;
8484 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8485 TPA'ed packets will be discarded due to wrong TCP CSUM */
8486 if (!data) {
8487 u32 flags = ethtool_op_get_flags(dev);
8489 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8492 return rc;
8495 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8497 if (data) {
8498 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8499 dev->features |= NETIF_F_TSO6;
8500 } else {
8501 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8502 dev->features &= ~NETIF_F_TSO6;
8505 return 0;
8508 static const struct {
8509 char string[ETH_GSTRING_LEN];
8510 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8511 { "register_test (offline)" },
8512 { "memory_test (offline)" },
8513 { "loopback_test (offline)" },
8514 { "nvram_test (online)" },
8515 { "interrupt_test (online)" },
8516 { "link_test (online)" },
8517 { "idle check (online)" },
8518 { "MC errors (online)" }
8521 static int bnx2x_self_test_count(struct net_device *dev)
8523 return BNX2X_NUM_TESTS;
8526 static int bnx2x_test_registers(struct bnx2x *bp)
8528 int idx, i, rc = -ENODEV;
8529 u32 wr_val = 0;
8530 int port = BP_PORT(bp);
8531 static const struct {
8532 u32 offset0;
8533 u32 offset1;
8534 u32 mask;
8535 } reg_tbl[] = {
8536 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8537 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8538 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8539 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8540 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8541 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8542 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8543 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8544 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8545 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8546 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8547 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8548 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8549 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8550 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8551 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8552 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8553 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8554 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8555 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8556 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8557 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8558 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8559 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8560 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8561 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8562 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8563 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8564 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8565 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8566 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8567 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8568 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8569 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8570 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8571 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8572 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8573 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8575 { 0xffffffff, 0, 0x00000000 }
8578 if (!netif_running(bp->dev))
8579 return rc;
8581 /* Repeat the test twice:
8582 First by writing 0x00000000, second by writing 0xffffffff */
8583 for (idx = 0; idx < 2; idx++) {
8585 switch (idx) {
8586 case 0:
8587 wr_val = 0;
8588 break;
8589 case 1:
8590 wr_val = 0xffffffff;
8591 break;
8594 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8595 u32 offset, mask, save_val, val;
8597 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8598 mask = reg_tbl[i].mask;
8600 save_val = REG_RD(bp, offset);
8602 REG_WR(bp, offset, wr_val);
8603 val = REG_RD(bp, offset);
8605 /* Restore the original register's value */
8606 REG_WR(bp, offset, save_val);
8608 /* verify that value is as expected value */
8609 if ((val & mask) != (wr_val & mask))
8610 goto test_reg_exit;
8614 rc = 0;
8616 test_reg_exit:
8617 return rc;
8620 static int bnx2x_test_memory(struct bnx2x *bp)
8622 int i, j, rc = -ENODEV;
8623 u32 val;
8624 static const struct {
8625 u32 offset;
8626 int size;
8627 } mem_tbl[] = {
8628 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8629 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8630 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8631 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8632 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8633 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8634 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8636 { 0xffffffff, 0 }
8638 static const struct {
8639 char *name;
8640 u32 offset;
8641 u32 e1_mask;
8642 u32 e1h_mask;
8643 } prty_tbl[] = {
8644 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8645 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8646 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8647 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8648 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8649 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8651 { NULL, 0xffffffff, 0, 0 }
8654 if (!netif_running(bp->dev))
8655 return rc;
8657 /* Go through all the memories */
8658 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8659 for (j = 0; j < mem_tbl[i].size; j++)
8660 REG_RD(bp, mem_tbl[i].offset + j*4);
8662 /* Check the parity status */
8663 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8664 val = REG_RD(bp, prty_tbl[i].offset);
8665 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8666 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8667 DP(NETIF_MSG_HW,
8668 "%s is 0x%x\n", prty_tbl[i].name, val);
8669 goto test_mem_exit;
8673 rc = 0;
8675 test_mem_exit:
8676 return rc;
8679 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8681 int cnt = 1000;
8683 if (link_up)
8684 while (bnx2x_link_test(bp) && cnt--)
8685 msleep(10);
8688 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8690 unsigned int pkt_size, num_pkts, i;
8691 struct sk_buff *skb;
8692 unsigned char *packet;
8693 struct bnx2x_fastpath *fp = &bp->fp[0];
8694 u16 tx_start_idx, tx_idx;
8695 u16 rx_start_idx, rx_idx;
8696 u16 pkt_prod;
8697 struct sw_tx_bd *tx_buf;
8698 struct eth_tx_bd *tx_bd;
8699 dma_addr_t mapping;
8700 union eth_rx_cqe *cqe;
8701 u8 cqe_fp_flags;
8702 struct sw_rx_bd *rx_buf;
8703 u16 len;
8704 int rc = -ENODEV;
8706 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8707 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8708 bnx2x_acquire_phy_lock(bp);
8709 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8710 bnx2x_release_phy_lock(bp);
8712 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8713 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8714 bnx2x_acquire_phy_lock(bp);
8715 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8716 bnx2x_release_phy_lock(bp);
8717 /* wait until link state is restored */
8718 bnx2x_wait_for_link(bp, link_up);
8720 } else
8721 return -EINVAL;
8723 pkt_size = 1514;
8724 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8725 if (!skb) {
8726 rc = -ENOMEM;
8727 goto test_loopback_exit;
8729 packet = skb_put(skb, pkt_size);
8730 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8731 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8732 for (i = ETH_HLEN; i < pkt_size; i++)
8733 packet[i] = (unsigned char) (i & 0xff);
8735 num_pkts = 0;
8736 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8737 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8739 pkt_prod = fp->tx_pkt_prod++;
8740 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8741 tx_buf->first_bd = fp->tx_bd_prod;
8742 tx_buf->skb = skb;
8744 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8745 mapping = pci_map_single(bp->pdev, skb->data,
8746 skb_headlen(skb), PCI_DMA_TODEVICE);
8747 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8748 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8749 tx_bd->nbd = cpu_to_le16(1);
8750 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8751 tx_bd->vlan = cpu_to_le16(pkt_prod);
8752 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8753 ETH_TX_BD_FLAGS_END_BD);
8754 tx_bd->general_data = ((UNICAST_ADDRESS <<
8755 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8757 wmb();
8759 fp->hw_tx_prods->bds_prod =
8760 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8761 mb(); /* FW restriction: must not reorder writing nbd and packets */
8762 fp->hw_tx_prods->packets_prod =
8763 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8764 DOORBELL(bp, FP_IDX(fp), 0);
8766 mmiowb();
8768 num_pkts++;
8769 fp->tx_bd_prod++;
8770 bp->dev->trans_start = jiffies;
8772 udelay(100);
8774 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8775 if (tx_idx != tx_start_idx + num_pkts)
8776 goto test_loopback_exit;
8778 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8779 if (rx_idx != rx_start_idx + num_pkts)
8780 goto test_loopback_exit;
8782 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8783 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8784 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8785 goto test_loopback_rx_exit;
8787 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8788 if (len != pkt_size)
8789 goto test_loopback_rx_exit;
8791 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8792 skb = rx_buf->skb;
8793 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8794 for (i = ETH_HLEN; i < pkt_size; i++)
8795 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8796 goto test_loopback_rx_exit;
8798 rc = 0;
8800 test_loopback_rx_exit:
8802 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8803 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8804 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8805 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8807 /* Update producers */
8808 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8809 fp->rx_sge_prod);
8811 test_loopback_exit:
8812 bp->link_params.loopback_mode = LOOPBACK_NONE;
8814 return rc;
8817 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8819 int rc = 0;
8821 if (!netif_running(bp->dev))
8822 return BNX2X_LOOPBACK_FAILED;
8824 bnx2x_netif_stop(bp, 1);
8826 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8827 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8828 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8831 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8832 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8833 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8836 bnx2x_netif_start(bp);
8838 return rc;
8841 #define CRC32_RESIDUAL 0xdebb20e3
8843 static int bnx2x_test_nvram(struct bnx2x *bp)
8845 static const struct {
8846 int offset;
8847 int size;
8848 } nvram_tbl[] = {
8849 { 0, 0x14 }, /* bootstrap */
8850 { 0x14, 0xec }, /* dir */
8851 { 0x100, 0x350 }, /* manuf_info */
8852 { 0x450, 0xf0 }, /* feature_info */
8853 { 0x640, 0x64 }, /* upgrade_key_info */
8854 { 0x6a4, 0x64 },
8855 { 0x708, 0x70 }, /* manuf_key_info */
8856 { 0x778, 0x70 },
8857 { 0, 0 }
8859 u32 buf[0x350 / 4];
8860 u8 *data = (u8 *)buf;
8861 int i, rc;
8862 u32 magic, csum;
8864 rc = bnx2x_nvram_read(bp, 0, data, 4);
8865 if (rc) {
8866 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8867 goto test_nvram_exit;
8870 magic = be32_to_cpu(buf[0]);
8871 if (magic != 0x669955aa) {
8872 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8873 rc = -ENODEV;
8874 goto test_nvram_exit;
8877 for (i = 0; nvram_tbl[i].size; i++) {
8879 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8880 nvram_tbl[i].size);
8881 if (rc) {
8882 DP(NETIF_MSG_PROBE,
8883 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8884 goto test_nvram_exit;
8887 csum = ether_crc_le(nvram_tbl[i].size, data);
8888 if (csum != CRC32_RESIDUAL) {
8889 DP(NETIF_MSG_PROBE,
8890 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8891 rc = -ENODEV;
8892 goto test_nvram_exit;
8896 test_nvram_exit:
8897 return rc;
8900 static int bnx2x_test_intr(struct bnx2x *bp)
8902 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8903 int i, rc;
8905 if (!netif_running(bp->dev))
8906 return -ENODEV;
8908 config->hdr.length_6b = 0;
8909 config->hdr.offset = 0;
8910 config->hdr.client_id = BP_CL_ID(bp);
8911 config->hdr.reserved1 = 0;
8913 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8914 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8915 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8916 if (rc == 0) {
8917 bp->set_mac_pending++;
8918 for (i = 0; i < 10; i++) {
8919 if (!bp->set_mac_pending)
8920 break;
8921 msleep_interruptible(10);
8923 if (i == 10)
8924 rc = -ENODEV;
8927 return rc;
8930 static void bnx2x_self_test(struct net_device *dev,
8931 struct ethtool_test *etest, u64 *buf)
8933 struct bnx2x *bp = netdev_priv(dev);
8935 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8937 if (!netif_running(dev))
8938 return;
8940 /* offline tests are not supported in MF mode */
8941 if (IS_E1HMF(bp))
8942 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8944 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8945 u8 link_up;
8947 link_up = bp->link_vars.link_up;
8948 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8949 bnx2x_nic_load(bp, LOAD_DIAG);
8950 /* wait until link state is restored */
8951 bnx2x_wait_for_link(bp, link_up);
8953 if (bnx2x_test_registers(bp) != 0) {
8954 buf[0] = 1;
8955 etest->flags |= ETH_TEST_FL_FAILED;
8957 if (bnx2x_test_memory(bp) != 0) {
8958 buf[1] = 1;
8959 etest->flags |= ETH_TEST_FL_FAILED;
8961 buf[2] = bnx2x_test_loopback(bp, link_up);
8962 if (buf[2] != 0)
8963 etest->flags |= ETH_TEST_FL_FAILED;
8965 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8966 bnx2x_nic_load(bp, LOAD_NORMAL);
8967 /* wait until link state is restored */
8968 bnx2x_wait_for_link(bp, link_up);
8970 if (bnx2x_test_nvram(bp) != 0) {
8971 buf[3] = 1;
8972 etest->flags |= ETH_TEST_FL_FAILED;
8974 if (bnx2x_test_intr(bp) != 0) {
8975 buf[4] = 1;
8976 etest->flags |= ETH_TEST_FL_FAILED;
8978 if (bp->port.pmf)
8979 if (bnx2x_link_test(bp) != 0) {
8980 buf[5] = 1;
8981 etest->flags |= ETH_TEST_FL_FAILED;
8983 buf[7] = bnx2x_mc_assert(bp);
8984 if (buf[7] != 0)
8985 etest->flags |= ETH_TEST_FL_FAILED;
8987 #ifdef BNX2X_EXTRA_DEBUG
8988 bnx2x_panic_dump(bp);
8989 #endif
8992 static const struct {
8993 long offset;
8994 int size;
8995 u32 flags;
8996 #define STATS_FLAGS_PORT 1
8997 #define STATS_FLAGS_FUNC 2
8998 u8 string[ETH_GSTRING_LEN];
8999 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9000 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
9001 8, STATS_FLAGS_FUNC, "rx_bytes" },
9002 { STATS_OFFSET32(error_bytes_received_hi),
9003 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
9004 { STATS_OFFSET32(total_bytes_transmitted_hi),
9005 8, STATS_FLAGS_FUNC, "tx_bytes" },
9006 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9007 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9008 { STATS_OFFSET32(total_unicast_packets_received_hi),
9009 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
9010 { STATS_OFFSET32(total_multicast_packets_received_hi),
9011 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
9012 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9013 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
9014 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9015 8, STATS_FLAGS_FUNC, "tx_packets" },
9016 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9017 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9018 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9019 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9020 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9021 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9022 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9023 8, STATS_FLAGS_PORT, "rx_align_errors" },
9024 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9025 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9026 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9027 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9028 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9029 8, STATS_FLAGS_PORT, "tx_deferred" },
9030 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9031 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9032 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9033 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9034 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9035 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9036 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9037 8, STATS_FLAGS_PORT, "rx_fragments" },
9038 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9039 8, STATS_FLAGS_PORT, "rx_jabbers" },
9040 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9041 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9042 { STATS_OFFSET32(jabber_packets_received),
9043 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9044 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9045 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9046 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9047 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9048 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9049 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9050 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9051 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9052 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9053 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9054 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9055 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9056 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9057 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9058 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9059 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9060 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9061 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9062 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9063 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9064 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9065 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9066 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9067 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9068 { STATS_OFFSET32(mac_filter_discard),
9069 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9070 { STATS_OFFSET32(no_buff_discard),
9071 4, STATS_FLAGS_FUNC, "rx_discards" },
9072 { STATS_OFFSET32(xxoverflow_discard),
9073 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9074 { STATS_OFFSET32(brb_drop_hi),
9075 8, STATS_FLAGS_PORT, "brb_discard" },
9076 { STATS_OFFSET32(brb_truncate_hi),
9077 8, STATS_FLAGS_PORT, "brb_truncate" },
9078 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9079 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9080 { STATS_OFFSET32(rx_skb_alloc_failed),
9081 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9082 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9083 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9086 #define IS_NOT_E1HMF_STAT(bp, i) \
9087 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9089 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9091 struct bnx2x *bp = netdev_priv(dev);
9092 int i, j;
9094 switch (stringset) {
9095 case ETH_SS_STATS:
9096 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9097 if (IS_NOT_E1HMF_STAT(bp, i))
9098 continue;
9099 strcpy(buf + j*ETH_GSTRING_LEN,
9100 bnx2x_stats_arr[i].string);
9101 j++;
9103 break;
9105 case ETH_SS_TEST:
9106 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9107 break;
9111 static int bnx2x_get_stats_count(struct net_device *dev)
9113 struct bnx2x *bp = netdev_priv(dev);
9114 int i, num_stats = 0;
9116 for (i = 0; i < BNX2X_NUM_STATS; i++) {
9117 if (IS_NOT_E1HMF_STAT(bp, i))
9118 continue;
9119 num_stats++;
9121 return num_stats;
9124 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9125 struct ethtool_stats *stats, u64 *buf)
9127 struct bnx2x *bp = netdev_priv(dev);
9128 u32 *hw_stats = (u32 *)&bp->eth_stats;
9129 int i, j;
9131 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9132 if (IS_NOT_E1HMF_STAT(bp, i))
9133 continue;
9135 if (bnx2x_stats_arr[i].size == 0) {
9136 /* skip this counter */
9137 buf[j] = 0;
9138 j++;
9139 continue;
9141 if (bnx2x_stats_arr[i].size == 4) {
9142 /* 4-byte counter */
9143 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9144 j++;
9145 continue;
9147 /* 8-byte counter */
9148 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9149 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9150 j++;
9154 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9156 struct bnx2x *bp = netdev_priv(dev);
9157 int port = BP_PORT(bp);
9158 int i;
9160 if (!netif_running(dev))
9161 return 0;
9163 if (!bp->port.pmf)
9164 return 0;
9166 if (data == 0)
9167 data = 2;
9169 for (i = 0; i < (data * 2); i++) {
9170 if ((i % 2) == 0)
9171 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9172 bp->link_params.hw_led_mode,
9173 bp->link_params.chip_id);
9174 else
9175 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9176 bp->link_params.hw_led_mode,
9177 bp->link_params.chip_id);
9179 msleep_interruptible(500);
9180 if (signal_pending(current))
9181 break;
9184 if (bp->link_vars.link_up)
9185 bnx2x_set_led(bp, port, LED_MODE_OPER,
9186 bp->link_vars.line_speed,
9187 bp->link_params.hw_led_mode,
9188 bp->link_params.chip_id);
9190 return 0;
9193 static struct ethtool_ops bnx2x_ethtool_ops = {
9194 .get_settings = bnx2x_get_settings,
9195 .set_settings = bnx2x_set_settings,
9196 .get_drvinfo = bnx2x_get_drvinfo,
9197 .get_wol = bnx2x_get_wol,
9198 .set_wol = bnx2x_set_wol,
9199 .get_msglevel = bnx2x_get_msglevel,
9200 .set_msglevel = bnx2x_set_msglevel,
9201 .nway_reset = bnx2x_nway_reset,
9202 .get_link = ethtool_op_get_link,
9203 .get_eeprom_len = bnx2x_get_eeprom_len,
9204 .get_eeprom = bnx2x_get_eeprom,
9205 .set_eeprom = bnx2x_set_eeprom,
9206 .get_coalesce = bnx2x_get_coalesce,
9207 .set_coalesce = bnx2x_set_coalesce,
9208 .get_ringparam = bnx2x_get_ringparam,
9209 .set_ringparam = bnx2x_set_ringparam,
9210 .get_pauseparam = bnx2x_get_pauseparam,
9211 .set_pauseparam = bnx2x_set_pauseparam,
9212 .get_rx_csum = bnx2x_get_rx_csum,
9213 .set_rx_csum = bnx2x_set_rx_csum,
9214 .get_tx_csum = ethtool_op_get_tx_csum,
9215 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9216 .set_flags = bnx2x_set_flags,
9217 .get_flags = ethtool_op_get_flags,
9218 .get_sg = ethtool_op_get_sg,
9219 .set_sg = ethtool_op_set_sg,
9220 .get_tso = ethtool_op_get_tso,
9221 .set_tso = bnx2x_set_tso,
9222 .self_test_count = bnx2x_self_test_count,
9223 .self_test = bnx2x_self_test,
9224 .get_strings = bnx2x_get_strings,
9225 .phys_id = bnx2x_phys_id,
9226 .get_stats_count = bnx2x_get_stats_count,
9227 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9230 /* end of ethtool_ops */
9232 /****************************************************************************
9233 * General service functions
9234 ****************************************************************************/
9236 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9238 u16 pmcsr;
9240 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9242 switch (state) {
9243 case PCI_D0:
9244 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9245 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9246 PCI_PM_CTRL_PME_STATUS));
9248 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9249 /* delay required during transition out of D3hot */
9250 msleep(20);
9251 break;
9253 case PCI_D3hot:
9254 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9255 pmcsr |= 3;
9257 if (bp->wol)
9258 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9260 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9261 pmcsr);
9263 /* No more memory access after this point until
9264 * device is brought back to D0.
9266 break;
9268 default:
9269 return -EINVAL;
9271 return 0;
9275 * net_device service functions
9278 static int bnx2x_poll(struct napi_struct *napi, int budget)
9280 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9281 napi);
9282 struct bnx2x *bp = fp->bp;
9283 int work_done = 0;
9284 u16 rx_cons_sb;
9286 #ifdef BNX2X_STOP_ON_ERROR
9287 if (unlikely(bp->panic))
9288 goto poll_panic;
9289 #endif
9291 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9292 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9293 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9295 bnx2x_update_fpsb_idx(fp);
9297 if (BNX2X_HAS_TX_WORK(fp))
9298 bnx2x_tx_int(fp, budget);
9300 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9301 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9302 rx_cons_sb++;
9303 if (BNX2X_HAS_RX_WORK(fp))
9304 work_done = bnx2x_rx_int(fp, budget);
9306 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9307 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9308 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9309 rx_cons_sb++;
9311 /* must not complete if we consumed full budget */
9312 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9314 #ifdef BNX2X_STOP_ON_ERROR
9315 poll_panic:
9316 #endif
9317 netif_rx_complete(napi);
9319 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9320 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9321 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9322 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9324 return work_done;
9328 /* we split the first BD into headers and data BDs
9329 * to ease the pain of our fellow microcode engineers
9330 * we use one mapping for both BDs
9331 * So far this has only been observed to happen
9332 * in Other Operating Systems(TM)
9334 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9335 struct bnx2x_fastpath *fp,
9336 struct eth_tx_bd **tx_bd, u16 hlen,
9337 u16 bd_prod, int nbd)
9339 struct eth_tx_bd *h_tx_bd = *tx_bd;
9340 struct eth_tx_bd *d_tx_bd;
9341 dma_addr_t mapping;
9342 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9344 /* first fix first BD */
9345 h_tx_bd->nbd = cpu_to_le16(nbd);
9346 h_tx_bd->nbytes = cpu_to_le16(hlen);
9348 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9349 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9350 h_tx_bd->addr_lo, h_tx_bd->nbd);
9352 /* now get a new data BD
9353 * (after the pbd) and fill it */
9354 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9355 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9357 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9358 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9360 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9361 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9362 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9363 d_tx_bd->vlan = 0;
9364 /* this marks the BD as one that has no individual mapping
9365 * the FW ignores this flag in a BD not marked start
9367 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9368 DP(NETIF_MSG_TX_QUEUED,
9369 "TSO split data size is %d (%x:%x)\n",
9370 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9372 /* update tx_bd for marking the last BD flag */
9373 *tx_bd = d_tx_bd;
9375 return bd_prod;
9378 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9380 if (fix > 0)
9381 csum = (u16) ~csum_fold(csum_sub(csum,
9382 csum_partial(t_header - fix, fix, 0)));
9384 else if (fix < 0)
9385 csum = (u16) ~csum_fold(csum_add(csum,
9386 csum_partial(t_header, -fix, 0)));
9388 return swab16(csum);
9391 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9393 u32 rc;
9395 if (skb->ip_summed != CHECKSUM_PARTIAL)
9396 rc = XMIT_PLAIN;
9398 else {
9399 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9400 rc = XMIT_CSUM_V6;
9401 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9402 rc |= XMIT_CSUM_TCP;
9404 } else {
9405 rc = XMIT_CSUM_V4;
9406 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9407 rc |= XMIT_CSUM_TCP;
9411 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9412 rc |= XMIT_GSO_V4;
9414 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9415 rc |= XMIT_GSO_V6;
9417 return rc;
9420 /* check if packet requires linearization (packet is too fragmented) */
9421 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9422 u32 xmit_type)
9424 int to_copy = 0;
9425 int hlen = 0;
9426 int first_bd_sz = 0;
9428 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9429 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9431 if (xmit_type & XMIT_GSO) {
9432 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9433 /* Check if LSO packet needs to be copied:
9434 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9435 int wnd_size = MAX_FETCH_BD - 3;
9436 /* Number of windows to check */
9437 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9438 int wnd_idx = 0;
9439 int frag_idx = 0;
9440 u32 wnd_sum = 0;
9442 /* Headers length */
9443 hlen = (int)(skb_transport_header(skb) - skb->data) +
9444 tcp_hdrlen(skb);
9446 /* Amount of data (w/o headers) on linear part of SKB*/
9447 first_bd_sz = skb_headlen(skb) - hlen;
9449 wnd_sum = first_bd_sz;
9451 /* Calculate the first sum - it's special */
9452 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9453 wnd_sum +=
9454 skb_shinfo(skb)->frags[frag_idx].size;
9456 /* If there was data on linear skb data - check it */
9457 if (first_bd_sz > 0) {
9458 if (unlikely(wnd_sum < lso_mss)) {
9459 to_copy = 1;
9460 goto exit_lbl;
9463 wnd_sum -= first_bd_sz;
9466 /* Others are easier: run through the frag list and
9467 check all windows */
9468 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9469 wnd_sum +=
9470 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9472 if (unlikely(wnd_sum < lso_mss)) {
9473 to_copy = 1;
9474 break;
9476 wnd_sum -=
9477 skb_shinfo(skb)->frags[wnd_idx].size;
9480 } else {
9481 /* in non-LSO too fragmented packet should always
9482 be linearized */
9483 to_copy = 1;
9487 exit_lbl:
9488 if (unlikely(to_copy))
9489 DP(NETIF_MSG_TX_QUEUED,
9490 "Linearization IS REQUIRED for %s packet. "
9491 "num_frags %d hlen %d first_bd_sz %d\n",
9492 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9493 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9495 return to_copy;
9498 /* called with netif_tx_lock
9499 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9500 * netif_wake_queue()
9502 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9504 struct bnx2x *bp = netdev_priv(dev);
9505 struct bnx2x_fastpath *fp;
9506 struct sw_tx_bd *tx_buf;
9507 struct eth_tx_bd *tx_bd;
9508 struct eth_tx_parse_bd *pbd = NULL;
9509 u16 pkt_prod, bd_prod;
9510 int nbd, fp_index;
9511 dma_addr_t mapping;
9512 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9513 int vlan_off = (bp->e1hov ? 4 : 0);
9514 int i;
9515 u8 hlen = 0;
9517 #ifdef BNX2X_STOP_ON_ERROR
9518 if (unlikely(bp->panic))
9519 return NETDEV_TX_BUSY;
9520 #endif
9522 fp_index = (smp_processor_id() % bp->num_queues);
9523 fp = &bp->fp[fp_index];
9525 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9526 bp->eth_stats.driver_xoff++,
9527 netif_stop_queue(dev);
9528 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9529 return NETDEV_TX_BUSY;
9532 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9533 " gso type %x xmit_type %x\n",
9534 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9535 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9537 /* First, check if we need to linearize the skb
9538 (due to FW restrictions) */
9539 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9540 /* Statistics of linearization */
9541 bp->lin_cnt++;
9542 if (skb_linearize(skb) != 0) {
9543 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9544 "silently dropping this SKB\n");
9545 dev_kfree_skb_any(skb);
9546 return NETDEV_TX_OK;
9551 Please read carefully. First we use one BD which we mark as start,
9552 then for TSO or xsum we have a parsing info BD,
9553 and only then we have the rest of the TSO BDs.
9554 (don't forget to mark the last one as last,
9555 and to unmap only AFTER you write to the BD ...)
9556 And above all, all pdb sizes are in words - NOT DWORDS!
9559 pkt_prod = fp->tx_pkt_prod++;
9560 bd_prod = TX_BD(fp->tx_bd_prod);
9562 /* get a tx_buf and first BD */
9563 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9564 tx_bd = &fp->tx_desc_ring[bd_prod];
9566 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9567 tx_bd->general_data = (UNICAST_ADDRESS <<
9568 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9569 /* header nbd */
9570 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9572 /* remember the first BD of the packet */
9573 tx_buf->first_bd = fp->tx_bd_prod;
9574 tx_buf->skb = skb;
9576 DP(NETIF_MSG_TX_QUEUED,
9577 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9578 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9580 #ifdef BCM_VLAN
9581 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9582 (bp->flags & HW_VLAN_TX_FLAG)) {
9583 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9584 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9585 vlan_off += 4;
9586 } else
9587 #endif
9588 tx_bd->vlan = cpu_to_le16(pkt_prod);
9590 if (xmit_type) {
9591 /* turn on parsing and get a BD */
9592 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9593 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9595 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9598 if (xmit_type & XMIT_CSUM) {
9599 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9601 /* for now NS flag is not used in Linux */
9602 pbd->global_data = (hlen |
9603 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9604 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9606 pbd->ip_hlen = (skb_transport_header(skb) -
9607 skb_network_header(skb)) / 2;
9609 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9611 pbd->total_hlen = cpu_to_le16(hlen);
9612 hlen = hlen*2 - vlan_off;
9614 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9616 if (xmit_type & XMIT_CSUM_V4)
9617 tx_bd->bd_flags.as_bitfield |=
9618 ETH_TX_BD_FLAGS_IP_CSUM;
9619 else
9620 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9622 if (xmit_type & XMIT_CSUM_TCP) {
9623 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9625 } else {
9626 s8 fix = SKB_CS_OFF(skb); /* signed! */
9628 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9629 pbd->cs_offset = fix / 2;
9631 DP(NETIF_MSG_TX_QUEUED,
9632 "hlen %d offset %d fix %d csum before fix %x\n",
9633 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9634 SKB_CS(skb));
9636 /* HW bug: fixup the CSUM */
9637 pbd->tcp_pseudo_csum =
9638 bnx2x_csum_fix(skb_transport_header(skb),
9639 SKB_CS(skb), fix);
9641 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9642 pbd->tcp_pseudo_csum);
9646 mapping = pci_map_single(bp->pdev, skb->data,
9647 skb_headlen(skb), PCI_DMA_TODEVICE);
9649 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9650 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9651 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9652 tx_bd->nbd = cpu_to_le16(nbd);
9653 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9655 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9656 " nbytes %d flags %x vlan %x\n",
9657 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9658 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9659 le16_to_cpu(tx_bd->vlan));
9661 if (xmit_type & XMIT_GSO) {
9663 DP(NETIF_MSG_TX_QUEUED,
9664 "TSO packet len %d hlen %d total len %d tso size %d\n",
9665 skb->len, hlen, skb_headlen(skb),
9666 skb_shinfo(skb)->gso_size);
9668 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9670 if (unlikely(skb_headlen(skb) > hlen))
9671 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9672 bd_prod, ++nbd);
9674 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9675 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9676 pbd->tcp_flags = pbd_tcp_flags(skb);
9678 if (xmit_type & XMIT_GSO_V4) {
9679 pbd->ip_id = swab16(ip_hdr(skb)->id);
9680 pbd->tcp_pseudo_csum =
9681 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9682 ip_hdr(skb)->daddr,
9683 0, IPPROTO_TCP, 0));
9685 } else
9686 pbd->tcp_pseudo_csum =
9687 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9688 &ipv6_hdr(skb)->daddr,
9689 0, IPPROTO_TCP, 0));
9691 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9694 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9695 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9697 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9698 tx_bd = &fp->tx_desc_ring[bd_prod];
9700 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9701 frag->size, PCI_DMA_TODEVICE);
9703 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9704 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9705 tx_bd->nbytes = cpu_to_le16(frag->size);
9706 tx_bd->vlan = cpu_to_le16(pkt_prod);
9707 tx_bd->bd_flags.as_bitfield = 0;
9709 DP(NETIF_MSG_TX_QUEUED,
9710 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9711 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9712 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9715 /* now at last mark the BD as the last BD */
9716 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9718 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9719 tx_bd, tx_bd->bd_flags.as_bitfield);
9721 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9723 /* now send a tx doorbell, counting the next BD
9724 * if the packet contains or ends with it
9726 if (TX_BD_POFF(bd_prod) < nbd)
9727 nbd++;
9729 if (pbd)
9730 DP(NETIF_MSG_TX_QUEUED,
9731 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9732 " tcp_flags %x xsum %x seq %u hlen %u\n",
9733 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9734 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9735 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9737 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
9740 * Make sure that the BD data is updated before updating the producer
9741 * since FW might read the BD right after the producer is updated.
9742 * This is only applicable for weak-ordered memory model archs such
9743 * as IA-64. The following barrier is also mandatory since FW will
9744 * assumes packets must have BDs.
9746 wmb();
9748 fp->hw_tx_prods->bds_prod =
9749 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9750 mb(); /* FW restriction: must not reorder writing nbd and packets */
9751 fp->hw_tx_prods->packets_prod =
9752 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9753 DOORBELL(bp, FP_IDX(fp), 0);
9755 mmiowb();
9757 fp->tx_bd_prod += nbd;
9758 dev->trans_start = jiffies;
9760 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9761 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9762 if we put Tx into XOFF state. */
9763 smp_mb();
9764 netif_stop_queue(dev);
9765 bp->eth_stats.driver_xoff++;
9766 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9767 netif_wake_queue(dev);
9769 fp->tx_pkt++;
9771 return NETDEV_TX_OK;
9774 /* called with rtnl_lock */
9775 static int bnx2x_open(struct net_device *dev)
9777 struct bnx2x *bp = netdev_priv(dev);
9779 bnx2x_set_power_state(bp, PCI_D0);
9781 return bnx2x_nic_load(bp, LOAD_OPEN);
9784 /* called with rtnl_lock */
9785 static int bnx2x_close(struct net_device *dev)
9787 struct bnx2x *bp = netdev_priv(dev);
9789 /* Unload the driver, release IRQs */
9790 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9791 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9792 if (!CHIP_REV_IS_SLOW(bp))
9793 bnx2x_set_power_state(bp, PCI_D3hot);
9795 return 0;
9798 /* called with netif_tx_lock from set_multicast */
9799 static void bnx2x_set_rx_mode(struct net_device *dev)
9801 struct bnx2x *bp = netdev_priv(dev);
9802 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9803 int port = BP_PORT(bp);
9805 if (bp->state != BNX2X_STATE_OPEN) {
9806 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9807 return;
9810 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9812 if (dev->flags & IFF_PROMISC)
9813 rx_mode = BNX2X_RX_MODE_PROMISC;
9815 else if ((dev->flags & IFF_ALLMULTI) ||
9816 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9817 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9819 else { /* some multicasts */
9820 if (CHIP_IS_E1(bp)) {
9821 int i, old, offset;
9822 struct dev_mc_list *mclist;
9823 struct mac_configuration_cmd *config =
9824 bnx2x_sp(bp, mcast_config);
9826 for (i = 0, mclist = dev->mc_list;
9827 mclist && (i < dev->mc_count);
9828 i++, mclist = mclist->next) {
9830 config->config_table[i].
9831 cam_entry.msb_mac_addr =
9832 swab16(*(u16 *)&mclist->dmi_addr[0]);
9833 config->config_table[i].
9834 cam_entry.middle_mac_addr =
9835 swab16(*(u16 *)&mclist->dmi_addr[2]);
9836 config->config_table[i].
9837 cam_entry.lsb_mac_addr =
9838 swab16(*(u16 *)&mclist->dmi_addr[4]);
9839 config->config_table[i].cam_entry.flags =
9840 cpu_to_le16(port);
9841 config->config_table[i].
9842 target_table_entry.flags = 0;
9843 config->config_table[i].
9844 target_table_entry.client_id = 0;
9845 config->config_table[i].
9846 target_table_entry.vlan_id = 0;
9848 DP(NETIF_MSG_IFUP,
9849 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9850 config->config_table[i].
9851 cam_entry.msb_mac_addr,
9852 config->config_table[i].
9853 cam_entry.middle_mac_addr,
9854 config->config_table[i].
9855 cam_entry.lsb_mac_addr);
9857 old = config->hdr.length_6b;
9858 if (old > i) {
9859 for (; i < old; i++) {
9860 if (CAM_IS_INVALID(config->
9861 config_table[i])) {
9862 i--; /* already invalidated */
9863 break;
9865 /* invalidate */
9866 CAM_INVALIDATE(config->
9867 config_table[i]);
9871 if (CHIP_REV_IS_SLOW(bp))
9872 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9873 else
9874 offset = BNX2X_MAX_MULTICAST*(1 + port);
9876 config->hdr.length_6b = i;
9877 config->hdr.offset = offset;
9878 config->hdr.client_id = BP_CL_ID(bp);
9879 config->hdr.reserved1 = 0;
9881 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9882 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9883 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9885 } else { /* E1H */
9886 /* Accept one or more multicasts */
9887 struct dev_mc_list *mclist;
9888 u32 mc_filter[MC_HASH_SIZE];
9889 u32 crc, bit, regidx;
9890 int i;
9892 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9894 for (i = 0, mclist = dev->mc_list;
9895 mclist && (i < dev->mc_count);
9896 i++, mclist = mclist->next) {
9898 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9899 mclist->dmi_addr);
9901 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9902 bit = (crc >> 24) & 0xff;
9903 regidx = bit >> 5;
9904 bit &= 0x1f;
9905 mc_filter[regidx] |= (1 << bit);
9908 for (i = 0; i < MC_HASH_SIZE; i++)
9909 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9910 mc_filter[i]);
9914 bp->rx_mode = rx_mode;
9915 bnx2x_set_storm_rx_mode(bp);
9918 /* called with rtnl_lock */
9919 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9921 struct sockaddr *addr = p;
9922 struct bnx2x *bp = netdev_priv(dev);
9924 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9925 return -EINVAL;
9927 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9928 if (netif_running(dev)) {
9929 if (CHIP_IS_E1(bp))
9930 bnx2x_set_mac_addr_e1(bp, 1);
9931 else
9932 bnx2x_set_mac_addr_e1h(bp, 1);
9935 return 0;
9938 /* called with rtnl_lock */
9939 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9941 struct mii_ioctl_data *data = if_mii(ifr);
9942 struct bnx2x *bp = netdev_priv(dev);
9943 int port = BP_PORT(bp);
9944 int err;
9946 switch (cmd) {
9947 case SIOCGMIIPHY:
9948 data->phy_id = bp->port.phy_addr;
9950 /* fallthrough */
9952 case SIOCGMIIREG: {
9953 u16 mii_regval;
9955 if (!netif_running(dev))
9956 return -EAGAIN;
9958 mutex_lock(&bp->port.phy_mutex);
9959 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9960 DEFAULT_PHY_DEV_ADDR,
9961 (data->reg_num & 0x1f), &mii_regval);
9962 data->val_out = mii_regval;
9963 mutex_unlock(&bp->port.phy_mutex);
9964 return err;
9967 case SIOCSMIIREG:
9968 if (!capable(CAP_NET_ADMIN))
9969 return -EPERM;
9971 if (!netif_running(dev))
9972 return -EAGAIN;
9974 mutex_lock(&bp->port.phy_mutex);
9975 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
9976 DEFAULT_PHY_DEV_ADDR,
9977 (data->reg_num & 0x1f), data->val_in);
9978 mutex_unlock(&bp->port.phy_mutex);
9979 return err;
9981 default:
9982 /* do nothing */
9983 break;
9986 return -EOPNOTSUPP;
9989 /* called with rtnl_lock */
9990 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9992 struct bnx2x *bp = netdev_priv(dev);
9993 int rc = 0;
9995 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9996 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9997 return -EINVAL;
9999 /* This does not race with packet allocation
10000 * because the actual alloc size is
10001 * only updated as part of load
10003 dev->mtu = new_mtu;
10005 if (netif_running(dev)) {
10006 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10007 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10010 return rc;
10013 static void bnx2x_tx_timeout(struct net_device *dev)
10015 struct bnx2x *bp = netdev_priv(dev);
10017 #ifdef BNX2X_STOP_ON_ERROR
10018 if (!bp->panic)
10019 bnx2x_panic();
10020 #endif
10021 /* This allows the netif to be shutdown gracefully before resetting */
10022 schedule_work(&bp->reset_task);
10025 #ifdef BCM_VLAN
10026 /* called with rtnl_lock */
10027 static void bnx2x_vlan_rx_register(struct net_device *dev,
10028 struct vlan_group *vlgrp)
10030 struct bnx2x *bp = netdev_priv(dev);
10032 bp->vlgrp = vlgrp;
10034 /* Set flags according to the required capabilities */
10035 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10037 if (dev->features & NETIF_F_HW_VLAN_TX)
10038 bp->flags |= HW_VLAN_TX_FLAG;
10040 if (dev->features & NETIF_F_HW_VLAN_RX)
10041 bp->flags |= HW_VLAN_RX_FLAG;
10043 if (netif_running(dev))
10044 bnx2x_set_client_config(bp);
10047 #endif
10049 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10050 static void poll_bnx2x(struct net_device *dev)
10052 struct bnx2x *bp = netdev_priv(dev);
10054 disable_irq(bp->pdev->irq);
10055 bnx2x_interrupt(bp->pdev->irq, dev);
10056 enable_irq(bp->pdev->irq);
10058 #endif
10060 static const struct net_device_ops bnx2x_netdev_ops = {
10061 .ndo_open = bnx2x_open,
10062 .ndo_stop = bnx2x_close,
10063 .ndo_start_xmit = bnx2x_start_xmit,
10064 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10065 .ndo_set_mac_address = bnx2x_change_mac_addr,
10066 .ndo_validate_addr = eth_validate_addr,
10067 .ndo_do_ioctl = bnx2x_ioctl,
10068 .ndo_change_mtu = bnx2x_change_mtu,
10069 .ndo_tx_timeout = bnx2x_tx_timeout,
10070 #ifdef BCM_VLAN
10071 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10072 #endif
10073 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10074 .ndo_poll_controller = poll_bnx2x,
10075 #endif
10079 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10080 struct net_device *dev)
10082 struct bnx2x *bp;
10083 int rc;
10085 SET_NETDEV_DEV(dev, &pdev->dev);
10086 bp = netdev_priv(dev);
10088 bp->dev = dev;
10089 bp->pdev = pdev;
10090 bp->flags = 0;
10091 bp->func = PCI_FUNC(pdev->devfn);
10093 rc = pci_enable_device(pdev);
10094 if (rc) {
10095 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10096 goto err_out;
10099 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10100 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10101 " aborting\n");
10102 rc = -ENODEV;
10103 goto err_out_disable;
10106 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10107 printk(KERN_ERR PFX "Cannot find second PCI device"
10108 " base address, aborting\n");
10109 rc = -ENODEV;
10110 goto err_out_disable;
10113 if (atomic_read(&pdev->enable_cnt) == 1) {
10114 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10115 if (rc) {
10116 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10117 " aborting\n");
10118 goto err_out_disable;
10121 pci_set_master(pdev);
10122 pci_save_state(pdev);
10125 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10126 if (bp->pm_cap == 0) {
10127 printk(KERN_ERR PFX "Cannot find power management"
10128 " capability, aborting\n");
10129 rc = -EIO;
10130 goto err_out_release;
10133 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10134 if (bp->pcie_cap == 0) {
10135 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10136 " aborting\n");
10137 rc = -EIO;
10138 goto err_out_release;
10141 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10142 bp->flags |= USING_DAC_FLAG;
10143 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10144 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10145 " failed, aborting\n");
10146 rc = -EIO;
10147 goto err_out_release;
10150 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10151 printk(KERN_ERR PFX "System does not support DMA,"
10152 " aborting\n");
10153 rc = -EIO;
10154 goto err_out_release;
10157 dev->mem_start = pci_resource_start(pdev, 0);
10158 dev->base_addr = dev->mem_start;
10159 dev->mem_end = pci_resource_end(pdev, 0);
10161 dev->irq = pdev->irq;
10163 bp->regview = pci_ioremap_bar(pdev, 0);
10164 if (!bp->regview) {
10165 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10166 rc = -ENOMEM;
10167 goto err_out_release;
10170 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10171 min_t(u64, BNX2X_DB_SIZE,
10172 pci_resource_len(pdev, 2)));
10173 if (!bp->doorbells) {
10174 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10175 rc = -ENOMEM;
10176 goto err_out_unmap;
10179 bnx2x_set_power_state(bp, PCI_D0);
10181 /* clean indirect addresses */
10182 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10183 PCICFG_VENDOR_ID_OFFSET);
10184 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10185 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10186 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10187 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10189 dev->watchdog_timeo = TX_TIMEOUT;
10191 dev->netdev_ops = &bnx2x_netdev_ops;
10192 dev->ethtool_ops = &bnx2x_ethtool_ops;
10193 dev->features |= NETIF_F_SG;
10194 dev->features |= NETIF_F_HW_CSUM;
10195 if (bp->flags & USING_DAC_FLAG)
10196 dev->features |= NETIF_F_HIGHDMA;
10197 #ifdef BCM_VLAN
10198 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10199 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10200 #endif
10201 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10202 dev->features |= NETIF_F_TSO6;
10204 return 0;
10206 err_out_unmap:
10207 if (bp->regview) {
10208 iounmap(bp->regview);
10209 bp->regview = NULL;
10211 if (bp->doorbells) {
10212 iounmap(bp->doorbells);
10213 bp->doorbells = NULL;
10216 err_out_release:
10217 if (atomic_read(&pdev->enable_cnt) == 1)
10218 pci_release_regions(pdev);
10220 err_out_disable:
10221 pci_disable_device(pdev);
10222 pci_set_drvdata(pdev, NULL);
10224 err_out:
10225 return rc;
10228 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10230 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10232 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10233 return val;
10236 /* return value of 1=2.5GHz 2=5GHz */
10237 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10239 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10241 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10242 return val;
10245 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10246 const struct pci_device_id *ent)
10248 static int version_printed;
10249 struct net_device *dev = NULL;
10250 struct bnx2x *bp;
10251 int rc;
10253 if (version_printed++ == 0)
10254 printk(KERN_INFO "%s", version);
10256 /* dev zeroed in init_etherdev */
10257 dev = alloc_etherdev(sizeof(*bp));
10258 if (!dev) {
10259 printk(KERN_ERR PFX "Cannot allocate net device\n");
10260 return -ENOMEM;
10263 bp = netdev_priv(dev);
10264 bp->msglevel = debug;
10266 rc = bnx2x_init_dev(pdev, dev);
10267 if (rc < 0) {
10268 free_netdev(dev);
10269 return rc;
10272 rc = register_netdev(dev);
10273 if (rc) {
10274 dev_err(&pdev->dev, "Cannot register net device\n");
10275 goto init_one_exit;
10278 pci_set_drvdata(pdev, dev);
10280 rc = bnx2x_init_bp(bp);
10281 if (rc) {
10282 unregister_netdev(dev);
10283 goto init_one_exit;
10286 netif_carrier_off(dev);
10288 bp->common.name = board_info[ent->driver_data].name;
10289 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10290 " IRQ %d, ", dev->name, bp->common.name,
10291 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10292 bnx2x_get_pcie_width(bp),
10293 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10294 dev->base_addr, bp->pdev->irq);
10295 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10296 return 0;
10298 init_one_exit:
10299 if (bp->regview)
10300 iounmap(bp->regview);
10302 if (bp->doorbells)
10303 iounmap(bp->doorbells);
10305 free_netdev(dev);
10307 if (atomic_read(&pdev->enable_cnt) == 1)
10308 pci_release_regions(pdev);
10310 pci_disable_device(pdev);
10311 pci_set_drvdata(pdev, NULL);
10313 return rc;
10316 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10318 struct net_device *dev = pci_get_drvdata(pdev);
10319 struct bnx2x *bp;
10321 if (!dev) {
10322 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10323 return;
10325 bp = netdev_priv(dev);
10327 unregister_netdev(dev);
10329 if (bp->regview)
10330 iounmap(bp->regview);
10332 if (bp->doorbells)
10333 iounmap(bp->doorbells);
10335 free_netdev(dev);
10337 if (atomic_read(&pdev->enable_cnt) == 1)
10338 pci_release_regions(pdev);
10340 pci_disable_device(pdev);
10341 pci_set_drvdata(pdev, NULL);
10344 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10346 struct net_device *dev = pci_get_drvdata(pdev);
10347 struct bnx2x *bp;
10349 if (!dev) {
10350 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10351 return -ENODEV;
10353 bp = netdev_priv(dev);
10355 rtnl_lock();
10357 pci_save_state(pdev);
10359 if (!netif_running(dev)) {
10360 rtnl_unlock();
10361 return 0;
10364 netif_device_detach(dev);
10366 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10368 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10370 rtnl_unlock();
10372 return 0;
10375 static int bnx2x_resume(struct pci_dev *pdev)
10377 struct net_device *dev = pci_get_drvdata(pdev);
10378 struct bnx2x *bp;
10379 int rc;
10381 if (!dev) {
10382 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10383 return -ENODEV;
10385 bp = netdev_priv(dev);
10387 rtnl_lock();
10389 pci_restore_state(pdev);
10391 if (!netif_running(dev)) {
10392 rtnl_unlock();
10393 return 0;
10396 bnx2x_set_power_state(bp, PCI_D0);
10397 netif_device_attach(dev);
10399 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10401 rtnl_unlock();
10403 return rc;
10406 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10408 int i;
10410 bp->state = BNX2X_STATE_ERROR;
10412 bp->rx_mode = BNX2X_RX_MODE_NONE;
10414 bnx2x_netif_stop(bp, 0);
10416 del_timer_sync(&bp->timer);
10417 bp->stats_state = STATS_STATE_DISABLED;
10418 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10420 /* Release IRQs */
10421 bnx2x_free_irq(bp);
10423 if (CHIP_IS_E1(bp)) {
10424 struct mac_configuration_cmd *config =
10425 bnx2x_sp(bp, mcast_config);
10427 for (i = 0; i < config->hdr.length_6b; i++)
10428 CAM_INVALIDATE(config->config_table[i]);
10431 /* Free SKBs, SGEs, TPA pool and driver internals */
10432 bnx2x_free_skbs(bp);
10433 for_each_queue(bp, i)
10434 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10435 bnx2x_free_mem(bp);
10437 bp->state = BNX2X_STATE_CLOSED;
10439 netif_carrier_off(bp->dev);
10441 return 0;
10444 static void bnx2x_eeh_recover(struct bnx2x *bp)
10446 u32 val;
10448 mutex_init(&bp->port.phy_mutex);
10450 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10451 bp->link_params.shmem_base = bp->common.shmem_base;
10452 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10454 if (!bp->common.shmem_base ||
10455 (bp->common.shmem_base < 0xA0000) ||
10456 (bp->common.shmem_base >= 0xC0000)) {
10457 BNX2X_DEV_INFO("MCP not active\n");
10458 bp->flags |= NO_MCP_FLAG;
10459 return;
10462 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10463 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10464 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10465 BNX2X_ERR("BAD MCP validity signature\n");
10467 if (!BP_NOMCP(bp)) {
10468 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10469 & DRV_MSG_SEQ_NUMBER_MASK);
10470 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10475 * bnx2x_io_error_detected - called when PCI error is detected
10476 * @pdev: Pointer to PCI device
10477 * @state: The current pci connection state
10479 * This function is called after a PCI bus error affecting
10480 * this device has been detected.
10482 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10483 pci_channel_state_t state)
10485 struct net_device *dev = pci_get_drvdata(pdev);
10486 struct bnx2x *bp = netdev_priv(dev);
10488 rtnl_lock();
10490 netif_device_detach(dev);
10492 if (netif_running(dev))
10493 bnx2x_eeh_nic_unload(bp);
10495 pci_disable_device(pdev);
10497 rtnl_unlock();
10499 /* Request a slot reset */
10500 return PCI_ERS_RESULT_NEED_RESET;
10504 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10505 * @pdev: Pointer to PCI device
10507 * Restart the card from scratch, as if from a cold-boot.
10509 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10511 struct net_device *dev = pci_get_drvdata(pdev);
10512 struct bnx2x *bp = netdev_priv(dev);
10514 rtnl_lock();
10516 if (pci_enable_device(pdev)) {
10517 dev_err(&pdev->dev,
10518 "Cannot re-enable PCI device after reset\n");
10519 rtnl_unlock();
10520 return PCI_ERS_RESULT_DISCONNECT;
10523 pci_set_master(pdev);
10524 pci_restore_state(pdev);
10526 if (netif_running(dev))
10527 bnx2x_set_power_state(bp, PCI_D0);
10529 rtnl_unlock();
10531 return PCI_ERS_RESULT_RECOVERED;
10535 * bnx2x_io_resume - called when traffic can start flowing again
10536 * @pdev: Pointer to PCI device
10538 * This callback is called when the error recovery driver tells us that
10539 * its OK to resume normal operation.
10541 static void bnx2x_io_resume(struct pci_dev *pdev)
10543 struct net_device *dev = pci_get_drvdata(pdev);
10544 struct bnx2x *bp = netdev_priv(dev);
10546 rtnl_lock();
10548 bnx2x_eeh_recover(bp);
10550 if (netif_running(dev))
10551 bnx2x_nic_load(bp, LOAD_NORMAL);
10553 netif_device_attach(dev);
10555 rtnl_unlock();
10558 static struct pci_error_handlers bnx2x_err_handler = {
10559 .error_detected = bnx2x_io_error_detected,
10560 .slot_reset = bnx2x_io_slot_reset,
10561 .resume = bnx2x_io_resume,
10564 static struct pci_driver bnx2x_pci_driver = {
10565 .name = DRV_MODULE_NAME,
10566 .id_table = bnx2x_pci_tbl,
10567 .probe = bnx2x_init_one,
10568 .remove = __devexit_p(bnx2x_remove_one),
10569 .suspend = bnx2x_suspend,
10570 .resume = bnx2x_resume,
10571 .err_handler = &bnx2x_err_handler,
10574 static int __init bnx2x_init(void)
10576 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10577 if (bnx2x_wq == NULL) {
10578 printk(KERN_ERR PFX "Cannot create workqueue\n");
10579 return -ENOMEM;
10582 return pci_register_driver(&bnx2x_pci_driver);
10585 static void __exit bnx2x_cleanup(void)
10587 pci_unregister_driver(&bnx2x_pci_driver);
10589 destroy_workqueue(bnx2x_wq);
10592 module_init(bnx2x_init);
10593 module_exit(bnx2x_cleanup);