1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
58 #include "bnx2x_init.h"
60 #define DRV_MODULE_VERSION "1.45.24"
61 #define DRV_MODULE_RELDATE "2009/01/14"
62 #define BNX2X_BC_VER 0x040200
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT (5*HZ)
67 static char version
[] __devinitdata
=
68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69 DRV_MODULE_NAME
" " DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION
);
76 static int disable_tpa
;
80 static int load_count
[3]; /* 0-common, 1-port0, 2-port1 */
83 module_param(disable_tpa
, int, 0);
84 module_param(use_inta
, int, 0);
85 module_param(poll
, int, 0);
86 module_param(debug
, int, 0);
87 MODULE_PARM_DESC(disable_tpa
, "disable the TPA (LRO) feature");
88 MODULE_PARM_DESC(use_inta
, "use INT#A instead of MSI-X");
89 MODULE_PARM_DESC(poll
, "use polling (for debug)");
90 MODULE_PARM_DESC(debug
, "default debug msglevel");
93 module_param(use_multi
, int, 0);
94 MODULE_PARM_DESC(use_multi
, "use per-CPU queues");
96 static struct workqueue_struct
*bnx2x_wq
;
98 enum bnx2x_board_type
{
104 /* indexed by board_type, above */
107 } board_info
[] __devinitdata
= {
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
114 static const struct pci_device_id bnx2x_pci_tbl
[] = {
115 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_57710
,
116 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM57710
},
117 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_57711
,
118 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM57711
},
119 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_57711E
,
120 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM57711E
},
124 MODULE_DEVICE_TABLE(pci
, bnx2x_pci_tbl
);
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
131 * locking is done by mcp
133 static void bnx2x_reg_wr_ind(struct bnx2x
*bp
, u32 addr
, u32 val
)
135 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
136 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, val
);
137 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
138 PCICFG_VENDOR_ID_OFFSET
);
141 static u32
bnx2x_reg_rd_ind(struct bnx2x
*bp
, u32 addr
)
145 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
146 pci_read_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, &val
);
147 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
148 PCICFG_VENDOR_ID_OFFSET
);
153 static const u32 dmae_reg_go_c
[] = {
154 DMAE_REG_GO_C0
, DMAE_REG_GO_C1
, DMAE_REG_GO_C2
, DMAE_REG_GO_C3
,
155 DMAE_REG_GO_C4
, DMAE_REG_GO_C5
, DMAE_REG_GO_C6
, DMAE_REG_GO_C7
,
156 DMAE_REG_GO_C8
, DMAE_REG_GO_C9
, DMAE_REG_GO_C10
, DMAE_REG_GO_C11
,
157 DMAE_REG_GO_C12
, DMAE_REG_GO_C13
, DMAE_REG_GO_C14
, DMAE_REG_GO_C15
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x
*bp
, struct dmae_command
*dmae
,
167 cmd_offset
= (DMAE_REG_CMD_MEM
+ sizeof(struct dmae_command
) * idx
);
168 for (i
= 0; i
< (sizeof(struct dmae_command
)/4); i
++) {
169 REG_WR(bp
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
171 DP(BNX2X_MSG_OFF
, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx
, i
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
174 REG_WR(bp
, dmae_reg_go_c
[idx
], 1);
177 void bnx2x_write_dmae(struct bnx2x
*bp
, dma_addr_t dma_addr
, u32 dst_addr
,
180 struct dmae_command
*dmae
= &bp
->init_dmae
;
181 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
184 if (!bp
->dmae_ready
) {
185 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
187 DP(BNX2X_MSG_OFF
, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr
, len32
);
189 bnx2x_init_ind_wr(bp
, dst_addr
, data
, len32
);
193 mutex_lock(&bp
->dmae_mutex
);
195 memset(dmae
, 0, sizeof(struct dmae_command
));
197 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
198 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
199 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
201 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
203 DMAE_CMD_ENDIANITY_DW_SWAP
|
205 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
206 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
207 dmae
->src_addr_lo
= U64_LO(dma_addr
);
208 dmae
->src_addr_hi
= U64_HI(dma_addr
);
209 dmae
->dst_addr_lo
= dst_addr
>> 2;
210 dmae
->dst_addr_hi
= 0;
212 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
213 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
214 dmae
->comp_val
= DMAE_COMP_VAL
;
216 DP(BNX2X_MSG_OFF
, "dmae: opcode 0x%08x\n"
217 DP_LEVEL
"src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
221 dmae
->len
, dmae
->dst_addr_hi
, dmae
->dst_addr_lo
, dst_addr
,
222 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
, dmae
->comp_val
);
223 DP(BNX2X_MSG_OFF
, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
225 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
229 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
233 while (*wb_comp
!= DMAE_COMP_VAL
) {
234 DP(BNX2X_MSG_OFF
, "wb_comp 0x%08x\n", *wb_comp
);
237 BNX2X_ERR("dmae timeout!\n");
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp
))
248 mutex_unlock(&bp
->dmae_mutex
);
251 void bnx2x_read_dmae(struct bnx2x
*bp
, u32 src_addr
, u32 len32
)
253 struct dmae_command
*dmae
= &bp
->init_dmae
;
254 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
257 if (!bp
->dmae_ready
) {
258 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
261 DP(BNX2X_MSG_OFF
, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr
, len32
);
263 for (i
= 0; i
< len32
; i
++)
264 data
[i
] = bnx2x_reg_rd_ind(bp
, src_addr
+ i
*4);
268 mutex_lock(&bp
->dmae_mutex
);
270 memset(bnx2x_sp(bp
, wb_data
[0]), 0, sizeof(u32
) * 4);
271 memset(dmae
, 0, sizeof(struct dmae_command
));
273 dmae
->opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
274 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
275 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
277 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
279 DMAE_CMD_ENDIANITY_DW_SWAP
|
281 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
282 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
283 dmae
->src_addr_lo
= src_addr
>> 2;
284 dmae
->src_addr_hi
= 0;
285 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_data
));
286 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_data
));
288 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
289 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
290 dmae
->comp_val
= DMAE_COMP_VAL
;
292 DP(BNX2X_MSG_OFF
, "dmae: opcode 0x%08x\n"
293 DP_LEVEL
"src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
297 dmae
->len
, dmae
->dst_addr_hi
, dmae
->dst_addr_lo
, src_addr
,
298 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
, dmae
->comp_val
);
302 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
306 while (*wb_comp
!= DMAE_COMP_VAL
) {
309 BNX2X_ERR("dmae timeout!\n");
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp
))
319 DP(BNX2X_MSG_OFF
, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
321 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
323 mutex_unlock(&bp
->dmae_mutex
);
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x
*bp
, int reg
, u32 val_hi
, u32 val_lo
)
331 wb_write
[0] = val_hi
;
332 wb_write
[1] = val_lo
;
333 REG_WR_DMAE(bp
, reg
, wb_write
, 2);
337 static u64
bnx2x_wb_rd(struct bnx2x
*bp
, int reg
)
341 REG_RD_DMAE(bp
, reg
, wb_data
, 2);
343 return HILO_U64(wb_data
[0], wb_data
[1]);
347 static int bnx2x_mc_assert(struct bnx2x
*bp
)
351 u32 row0
, row1
, row2
, row3
;
354 last_idx
= REG_RD8(bp
, BAR_XSTRORM_INTMEM
+
355 XSTORM_ASSERT_LIST_INDEX_OFFSET
);
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
359 /* print the asserts */
360 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
362 row0
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
363 XSTORM_ASSERT_LIST_OFFSET(i
));
364 row1
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
365 XSTORM_ASSERT_LIST_OFFSET(i
) + 4);
366 row2
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
367 XSTORM_ASSERT_LIST_OFFSET(i
) + 8);
368 row3
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
369 XSTORM_ASSERT_LIST_OFFSET(i
) + 12);
371 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i
, row3
, row2
, row1
, row0
);
382 last_idx
= REG_RD8(bp
, BAR_TSTRORM_INTMEM
+
383 TSTORM_ASSERT_LIST_INDEX_OFFSET
);
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
387 /* print the asserts */
388 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
390 row0
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
391 TSTORM_ASSERT_LIST_OFFSET(i
));
392 row1
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
393 TSTORM_ASSERT_LIST_OFFSET(i
) + 4);
394 row2
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
395 TSTORM_ASSERT_LIST_OFFSET(i
) + 8);
396 row3
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
397 TSTORM_ASSERT_LIST_OFFSET(i
) + 12);
399 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i
, row3
, row2
, row1
, row0
);
410 last_idx
= REG_RD8(bp
, BAR_CSTRORM_INTMEM
+
411 CSTORM_ASSERT_LIST_INDEX_OFFSET
);
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
415 /* print the asserts */
416 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
418 row0
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
419 CSTORM_ASSERT_LIST_OFFSET(i
));
420 row1
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
421 CSTORM_ASSERT_LIST_OFFSET(i
) + 4);
422 row2
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
423 CSTORM_ASSERT_LIST_OFFSET(i
) + 8);
424 row3
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
425 CSTORM_ASSERT_LIST_OFFSET(i
) + 12);
427 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i
, row3
, row2
, row1
, row0
);
438 last_idx
= REG_RD8(bp
, BAR_USTRORM_INTMEM
+
439 USTORM_ASSERT_LIST_INDEX_OFFSET
);
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
443 /* print the asserts */
444 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
446 row0
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
447 USTORM_ASSERT_LIST_OFFSET(i
));
448 row1
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
449 USTORM_ASSERT_LIST_OFFSET(i
) + 4);
450 row2
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
451 USTORM_ASSERT_LIST_OFFSET(i
) + 8);
452 row3
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
453 USTORM_ASSERT_LIST_OFFSET(i
) + 12);
455 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i
, row3
, row2
, row1
, row0
);
468 static void bnx2x_fw_dump(struct bnx2x
*bp
)
474 mark
= REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+ 0xf104);
475 mark
= ((mark
+ 0x3) & ~0x3);
476 printk(KERN_ERR PFX
"begin fw dump (mark 0x%x)\n" KERN_ERR
, mark
);
478 for (offset
= mark
- 0x08000000; offset
<= 0xF900; offset
+= 0x8*4) {
479 for (word
= 0; word
< 8; word
++)
480 data
[word
] = htonl(REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+
483 printk(KERN_CONT
"%s", (char *)data
);
485 for (offset
= 0xF108; offset
<= mark
- 0x08000000; offset
+= 0x8*4) {
486 for (word
= 0; word
< 8; word
++)
487 data
[word
] = htonl(REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+
490 printk(KERN_CONT
"%s", (char *)data
);
492 printk("\n" KERN_ERR PFX
"end of fw dump\n");
495 static void bnx2x_panic_dump(struct bnx2x
*bp
)
500 bp
->stats_state
= STATS_STATE_DISABLED
;
501 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
503 BNX2X_ERR("begin crash dump -----------------\n");
505 for_each_queue(bp
, i
) {
506 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
507 struct eth_tx_db_data
*hw_prods
= fp
->hw_tx_prods
;
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
511 i
, fp
->tx_pkt_prod
, fp
->tx_pkt_cons
, fp
->tx_bd_prod
,
512 fp
->tx_bd_cons
, le16_to_cpu(*fp
->tx_cons_sb
));
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp
->rx_bd_prod
, fp
->rx_bd_cons
,
517 le16_to_cpu(*fp
->rx_bd_cons_sb
), fp
->rx_comp_prod
,
518 fp
->rx_comp_cons
, le16_to_cpu(*fp
->rx_cons_sb
));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp
->rx_sge_prod
, fp
->last_max_sge
, fp
->fp_c_idx
,
523 fp
->status_blk
->c_status_block
.status_block_index
,
525 fp
->status_blk
->u_status_block
.status_block_index
,
526 hw_prods
->packets_prod
, hw_prods
->bds_prod
);
528 start
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) - 10);
529 end
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) + 245);
530 for (j
= start
; j
< end
; j
++) {
531 struct sw_tx_bd
*sw_bd
= &fp
->tx_buf_ring
[j
];
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j
,
534 sw_bd
->skb
, sw_bd
->first_bd
);
537 start
= TX_BD(fp
->tx_bd_cons
- 10);
538 end
= TX_BD(fp
->tx_bd_cons
+ 254);
539 for (j
= start
; j
< end
; j
++) {
540 u32
*tx_bd
= (u32
*)&fp
->tx_desc_ring
[j
];
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j
, tx_bd
[0], tx_bd
[1], tx_bd
[2], tx_bd
[3]);
546 start
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) - 10);
547 end
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) + 503);
548 for (j
= start
; j
< end
; j
++) {
549 u32
*rx_bd
= (u32
*)&fp
->rx_desc_ring
[j
];
550 struct sw_rx_bd
*sw_bd
= &fp
->rx_buf_ring
[j
];
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
553 j
, rx_bd
[1], rx_bd
[0], sw_bd
->skb
);
556 start
= RX_SGE(fp
->rx_sge_prod
);
557 end
= RX_SGE(fp
->last_max_sge
);
558 for (j
= start
; j
< end
; j
++) {
559 u32
*rx_sge
= (u32
*)&fp
->rx_sge_ring
[j
];
560 struct sw_rx_page
*sw_page
= &fp
->rx_page_ring
[j
];
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j
, rx_sge
[1], rx_sge
[0], sw_page
->page
);
566 start
= RCQ_BD(fp
->rx_comp_cons
- 10);
567 end
= RCQ_BD(fp
->rx_comp_cons
+ 503);
568 for (j
= start
; j
< end
; j
++) {
569 u32
*cqe
= (u32
*)&fp
->rx_comp_ring
[j
];
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j
, cqe
[0], cqe
[1], cqe
[2], cqe
[3]);
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
578 " spq_prod_idx(%u)\n",
579 bp
->def_c_idx
, bp
->def_u_idx
, bp
->def_x_idx
, bp
->def_t_idx
,
580 bp
->def_att_idx
, bp
->attn_state
, bp
->spq_prod_idx
);
584 BNX2X_ERR("end crash dump -----------------\n");
587 static void bnx2x_int_enable(struct bnx2x
*bp
)
589 int port
= BP_PORT(bp
);
590 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
591 u32 val
= REG_RD(bp
, addr
);
592 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
595 val
&= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0
;
596 val
|= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
597 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
599 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
600 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
601 HC_CONFIG_0_REG_INT_LINE_EN_0
|
602 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
604 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
605 val
, port
, addr
, msix
);
607 REG_WR(bp
, addr
, val
);
609 val
&= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
;
612 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
613 val
, port
, addr
, msix
);
615 REG_WR(bp
, addr
, val
);
617 if (CHIP_IS_E1H(bp
)) {
618 /* init leading/trailing edge */
620 val
= (0xfe0f | (1 << (BP_E1HVN(bp
) + 4)));
622 /* enable nig attention */
627 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
628 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
632 static void bnx2x_int_disable(struct bnx2x
*bp
)
634 int port
= BP_PORT(bp
);
635 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
636 u32 val
= REG_RD(bp
, addr
);
638 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
640 HC_CONFIG_0_REG_INT_LINE_EN_0
|
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
643 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
646 REG_WR(bp
, addr
, val
);
647 if (REG_RD(bp
, addr
) != val
)
648 BNX2X_ERR("BUG! proper val not read from IGU!\n");
651 static void bnx2x_int_disable_sync(struct bnx2x
*bp
, int disable_hw
)
653 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
656 /* disable interrupt handling */
657 atomic_inc(&bp
->intr_sem
);
659 /* prevent the HW from sending interrupts */
660 bnx2x_int_disable(bp
);
662 /* make sure all ISRs are done */
664 for_each_queue(bp
, i
)
665 synchronize_irq(bp
->msix_table
[i
].vector
);
667 /* one more for the Slow Path IRQ */
668 synchronize_irq(bp
->msix_table
[i
].vector
);
670 synchronize_irq(bp
->pdev
->irq
);
672 /* make sure sp_task is not running */
673 cancel_delayed_work(&bp
->sp_task
);
674 flush_workqueue(bnx2x_wq
);
680 * General service functions
683 static inline void bnx2x_ack_sb(struct bnx2x
*bp
, u8 sb_id
,
684 u8 storm
, u16 index
, u8 op
, u8 update
)
686 u32 hc_addr
= (HC_REG_COMMAND_REG
+ BP_PORT(bp
)*32 +
687 COMMAND_REG_INT_ACK
);
688 struct igu_ack_register igu_ack
;
690 igu_ack
.status_block_index
= index
;
691 igu_ack
.sb_id_and_flags
=
692 ((sb_id
<< IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT
) |
693 (storm
<< IGU_ACK_REGISTER_STORM_ID_SHIFT
) |
694 (update
<< IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT
) |
695 (op
<< IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT
));
697 DP(BNX2X_MSG_OFF
, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32
*)&igu_ack
), hc_addr
);
699 REG_WR(bp
, hc_addr
, (*(u32
*)&igu_ack
));
702 static inline u16
bnx2x_update_fpsb_idx(struct bnx2x_fastpath
*fp
)
704 struct host_status_block
*fpsb
= fp
->status_blk
;
707 barrier(); /* status block is written to by the chip */
708 if (fp
->fp_c_idx
!= fpsb
->c_status_block
.status_block_index
) {
709 fp
->fp_c_idx
= fpsb
->c_status_block
.status_block_index
;
712 if (fp
->fp_u_idx
!= fpsb
->u_status_block
.status_block_index
) {
713 fp
->fp_u_idx
= fpsb
->u_status_block
.status_block_index
;
719 static u16
bnx2x_ack_int(struct bnx2x
*bp
)
721 u32 hc_addr
= (HC_REG_COMMAND_REG
+ BP_PORT(bp
)*32 +
722 COMMAND_REG_SIMD_MASK
);
723 u32 result
= REG_RD(bp
, hc_addr
);
725 DP(BNX2X_MSG_OFF
, "read 0x%08x from HC addr 0x%x\n",
733 * fast path service functions
736 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath
*fp
)
740 /* Tell compiler that status block fields can change */
742 tx_cons_sb
= le16_to_cpu(*fp
->tx_cons_sb
);
743 return ((fp
->tx_pkt_prod
!= tx_cons_sb
) ||
744 (fp
->tx_pkt_prod
!= fp
->tx_pkt_cons
));
747 /* free skb in the packet ring at pos idx
748 * return idx of last bd freed
750 static u16
bnx2x_free_tx_pkt(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
753 struct sw_tx_bd
*tx_buf
= &fp
->tx_buf_ring
[idx
];
754 struct eth_tx_bd
*tx_bd
;
755 struct sk_buff
*skb
= tx_buf
->skb
;
756 u16 bd_idx
= TX_BD(tx_buf
->first_bd
), new_cons
;
759 DP(BNX2X_MSG_OFF
, "pkt_idx %d buff @(%p)->skb %p\n",
763 DP(BNX2X_MSG_OFF
, "free bd_idx %d\n", bd_idx
);
764 tx_bd
= &fp
->tx_desc_ring
[bd_idx
];
765 pci_unmap_single(bp
->pdev
, BD_UNMAP_ADDR(tx_bd
),
766 BD_UNMAP_LEN(tx_bd
), PCI_DMA_TODEVICE
);
768 nbd
= le16_to_cpu(tx_bd
->nbd
) - 1;
769 new_cons
= nbd
+ tx_buf
->first_bd
;
770 #ifdef BNX2X_STOP_ON_ERROR
771 if (nbd
> (MAX_SKB_FRAGS
+ 2)) {
772 BNX2X_ERR("BAD nbd!\n");
777 /* Skip a parse bd and the TSO split header bd
778 since they have no mapping */
780 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
782 if (tx_bd
->bd_flags
.as_bitfield
& (ETH_TX_BD_FLAGS_IP_CSUM
|
783 ETH_TX_BD_FLAGS_TCP_CSUM
|
784 ETH_TX_BD_FLAGS_SW_LSO
)) {
786 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
787 tx_bd
= &fp
->tx_desc_ring
[bd_idx
];
788 /* is this a TSO split header bd? */
789 if (tx_bd
->bd_flags
.as_bitfield
& ETH_TX_BD_FLAGS_SW_LSO
) {
791 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
798 DP(BNX2X_MSG_OFF
, "free frag bd_idx %d\n", bd_idx
);
799 tx_bd
= &fp
->tx_desc_ring
[bd_idx
];
800 pci_unmap_page(bp
->pdev
, BD_UNMAP_ADDR(tx_bd
),
801 BD_UNMAP_LEN(tx_bd
), PCI_DMA_TODEVICE
);
803 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
809 tx_buf
->first_bd
= 0;
815 static inline u16
bnx2x_tx_avail(struct bnx2x_fastpath
*fp
)
821 barrier(); /* Tell compiler that prod and cons can change */
822 prod
= fp
->tx_bd_prod
;
823 cons
= fp
->tx_bd_cons
;
825 /* NUM_TX_RINGS = number of "next-page" entries
826 It will be used as a threshold */
827 used
= SUB_S16(prod
, cons
) + (s16
)NUM_TX_RINGS
;
829 #ifdef BNX2X_STOP_ON_ERROR
831 WARN_ON(used
> fp
->bp
->tx_ring_size
);
832 WARN_ON((fp
->bp
->tx_ring_size
- used
) > MAX_TX_AVAIL
);
835 return (s16
)(fp
->bp
->tx_ring_size
) - used
;
838 static void bnx2x_tx_int(struct bnx2x_fastpath
*fp
, int work
)
840 struct bnx2x
*bp
= fp
->bp
;
841 u16 hw_cons
, sw_cons
, bd_cons
= fp
->tx_bd_cons
;
844 #ifdef BNX2X_STOP_ON_ERROR
845 if (unlikely(bp
->panic
))
849 hw_cons
= le16_to_cpu(*fp
->tx_cons_sb
);
850 sw_cons
= fp
->tx_pkt_cons
;
852 while (sw_cons
!= hw_cons
) {
855 pkt_cons
= TX_BD(sw_cons
);
857 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
859 DP(NETIF_MSG_TX_DONE
, "hw_cons %u sw_cons %u pkt_cons %u\n",
860 hw_cons
, sw_cons
, pkt_cons
);
862 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
864 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
867 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, pkt_cons
);
875 fp
->tx_pkt_cons
= sw_cons
;
876 fp
->tx_bd_cons
= bd_cons
;
878 /* Need to make the tx_cons update visible to start_xmit()
879 * before checking for netif_queue_stopped(). Without the
880 * memory barrier, there is a small possibility that start_xmit()
881 * will miss it and cause the queue to be stopped forever.
885 /* TBD need a thresh? */
886 if (unlikely(netif_queue_stopped(bp
->dev
))) {
888 netif_tx_lock(bp
->dev
);
890 if (netif_queue_stopped(bp
->dev
) &&
891 (bp
->state
== BNX2X_STATE_OPEN
) &&
892 (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3))
893 netif_wake_queue(bp
->dev
);
895 netif_tx_unlock(bp
->dev
);
900 static void bnx2x_sp_event(struct bnx2x_fastpath
*fp
,
901 union eth_rx_cqe
*rr_cqe
)
903 struct bnx2x
*bp
= fp
->bp
;
904 int cid
= SW_CID(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
905 int command
= CQE_CMD(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
908 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
909 FP_IDX(fp
), cid
, command
, bp
->state
,
910 rr_cqe
->ramrod_cqe
.ramrod_type
);
915 switch (command
| fp
->state
) {
916 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP
|
917 BNX2X_FP_STATE_OPENING
):
918 DP(NETIF_MSG_IFUP
, "got MULTI[%d] setup ramrod\n",
920 fp
->state
= BNX2X_FP_STATE_OPEN
;
923 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_FP_STATE_HALTING
):
924 DP(NETIF_MSG_IFDOWN
, "got MULTI[%d] halt ramrod\n",
926 fp
->state
= BNX2X_FP_STATE_HALTED
;
930 BNX2X_ERR("unexpected MC reply (%d) "
931 "fp->state is %x\n", command
, fp
->state
);
934 mb(); /* force bnx2x_wait_ramrod() to see the change */
938 switch (command
| bp
->state
) {
939 case (RAMROD_CMD_ID_ETH_PORT_SETUP
| BNX2X_STATE_OPENING_WAIT4_PORT
):
940 DP(NETIF_MSG_IFUP
, "got setup ramrod\n");
941 bp
->state
= BNX2X_STATE_OPEN
;
944 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
945 DP(NETIF_MSG_IFDOWN
, "got halt ramrod\n");
946 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_DELETE
;
947 fp
->state
= BNX2X_FP_STATE_HALTED
;
950 case (RAMROD_CMD_ID_ETH_CFC_DEL
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
951 DP(NETIF_MSG_IFDOWN
, "got delete ramrod for MULTI[%d]\n", cid
);
952 bnx2x_fp(bp
, cid
, state
) = BNX2X_FP_STATE_CLOSED
;
956 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_OPEN
):
957 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_DIAG
):
958 DP(NETIF_MSG_IFUP
, "got set mac ramrod\n");
959 bp
->set_mac_pending
= 0;
962 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
963 DP(NETIF_MSG_IFDOWN
, "got (un)set mac ramrod\n");
967 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
971 mb(); /* force bnx2x_wait_ramrod() to see the change */
974 static inline void bnx2x_free_rx_sge(struct bnx2x
*bp
,
975 struct bnx2x_fastpath
*fp
, u16 index
)
977 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
978 struct page
*page
= sw_buf
->page
;
979 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
981 /* Skip "next page" elements */
985 pci_unmap_page(bp
->pdev
, pci_unmap_addr(sw_buf
, mapping
),
986 SGE_PAGE_SIZE
*PAGES_PER_SGE
, PCI_DMA_FROMDEVICE
);
987 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
994 static inline void bnx2x_free_rx_sge_range(struct bnx2x
*bp
,
995 struct bnx2x_fastpath
*fp
, int last
)
999 for (i
= 0; i
< last
; i
++)
1000 bnx2x_free_rx_sge(bp
, fp
, i
);
1003 static inline int bnx2x_alloc_rx_sge(struct bnx2x
*bp
,
1004 struct bnx2x_fastpath
*fp
, u16 index
)
1006 struct page
*page
= alloc_pages(GFP_ATOMIC
, PAGES_PER_SGE_SHIFT
);
1007 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
1008 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
1011 if (unlikely(page
== NULL
))
1014 mapping
= pci_map_page(bp
->pdev
, page
, 0, SGE_PAGE_SIZE
*PAGES_PER_SGE
,
1015 PCI_DMA_FROMDEVICE
);
1016 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
1017 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
1021 sw_buf
->page
= page
;
1022 pci_unmap_addr_set(sw_buf
, mapping
, mapping
);
1024 sge
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1025 sge
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1030 static inline int bnx2x_alloc_rx_skb(struct bnx2x
*bp
,
1031 struct bnx2x_fastpath
*fp
, u16 index
)
1033 struct sk_buff
*skb
;
1034 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[index
];
1035 struct eth_rx_bd
*rx_bd
= &fp
->rx_desc_ring
[index
];
1038 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
1039 if (unlikely(skb
== NULL
))
1042 mapping
= pci_map_single(bp
->pdev
, skb
->data
, bp
->rx_buf_size
,
1043 PCI_DMA_FROMDEVICE
);
1044 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
1050 pci_unmap_addr_set(rx_buf
, mapping
, mapping
);
1052 rx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1053 rx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1058 /* note that we are not allocating a new skb,
1059 * we are just moving one from cons to prod
1060 * we are not creating a new mapping,
1061 * so there is no need to check for dma_mapping_error().
1063 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath
*fp
,
1064 struct sk_buff
*skb
, u16 cons
, u16 prod
)
1066 struct bnx2x
*bp
= fp
->bp
;
1067 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
1068 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
1069 struct eth_rx_bd
*cons_bd
= &fp
->rx_desc_ring
[cons
];
1070 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
1072 pci_dma_sync_single_for_device(bp
->pdev
,
1073 pci_unmap_addr(cons_rx_buf
, mapping
),
1074 bp
->rx_offset
+ RX_COPY_THRESH
,
1075 PCI_DMA_FROMDEVICE
);
1077 prod_rx_buf
->skb
= cons_rx_buf
->skb
;
1078 pci_unmap_addr_set(prod_rx_buf
, mapping
,
1079 pci_unmap_addr(cons_rx_buf
, mapping
));
1080 *prod_bd
= *cons_bd
;
1083 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath
*fp
,
1086 u16 last_max
= fp
->last_max_sge
;
1088 if (SUB_S16(idx
, last_max
) > 0)
1089 fp
->last_max_sge
= idx
;
1092 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath
*fp
)
1096 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
1097 int idx
= RX_SGE_CNT
* i
- 1;
1099 for (j
= 0; j
< 2; j
++) {
1100 SGE_MASK_CLEAR_BIT(fp
, idx
);
1106 static void bnx2x_update_sge_prod(struct bnx2x_fastpath
*fp
,
1107 struct eth_fast_path_rx_cqe
*fp_cqe
)
1109 struct bnx2x
*bp
= fp
->bp
;
1110 u16 sge_len
= SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe
->pkt_len
) -
1111 le16_to_cpu(fp_cqe
->len_on_bd
)) >>
1113 u16 last_max
, last_elem
, first_elem
;
1120 /* First mark all used pages */
1121 for (i
= 0; i
< sge_len
; i
++)
1122 SGE_MASK_CLEAR_BIT(fp
, RX_SGE(le16_to_cpu(fp_cqe
->sgl
[i
])));
1124 DP(NETIF_MSG_RX_STATUS
, "fp_cqe->sgl[%d] = %d\n",
1125 sge_len
- 1, le16_to_cpu(fp_cqe
->sgl
[sge_len
- 1]));
1127 /* Here we assume that the last SGE index is the biggest */
1128 prefetch((void *)(fp
->sge_mask
));
1129 bnx2x_update_last_max_sge(fp
, le16_to_cpu(fp_cqe
->sgl
[sge_len
- 1]));
1131 last_max
= RX_SGE(fp
->last_max_sge
);
1132 last_elem
= last_max
>> RX_SGE_MASK_ELEM_SHIFT
;
1133 first_elem
= RX_SGE(fp
->rx_sge_prod
) >> RX_SGE_MASK_ELEM_SHIFT
;
1135 /* If ring is not full */
1136 if (last_elem
+ 1 != first_elem
)
1139 /* Now update the prod */
1140 for (i
= first_elem
; i
!= last_elem
; i
= NEXT_SGE_MASK_ELEM(i
)) {
1141 if (likely(fp
->sge_mask
[i
]))
1144 fp
->sge_mask
[i
] = RX_SGE_MASK_ELEM_ONE_MASK
;
1145 delta
+= RX_SGE_MASK_ELEM_SZ
;
1149 fp
->rx_sge_prod
+= delta
;
1150 /* clear page-end entries */
1151 bnx2x_clear_sge_mask_next_elems(fp
);
1154 DP(NETIF_MSG_RX_STATUS
,
1155 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1156 fp
->last_max_sge
, fp
->rx_sge_prod
);
1159 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath
*fp
)
1161 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1162 memset(fp
->sge_mask
, 0xff,
1163 (NUM_RX_SGE
>> RX_SGE_MASK_ELEM_SHIFT
)*sizeof(u64
));
1165 /* Clear the two last indices in the page to 1:
1166 these are the indices that correspond to the "next" element,
1167 hence will never be indicated and should be removed from
1168 the calculations. */
1169 bnx2x_clear_sge_mask_next_elems(fp
);
1172 static void bnx2x_tpa_start(struct bnx2x_fastpath
*fp
, u16 queue
,
1173 struct sk_buff
*skb
, u16 cons
, u16 prod
)
1175 struct bnx2x
*bp
= fp
->bp
;
1176 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
1177 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
1178 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
1181 /* move empty skb from pool to prod and map it */
1182 prod_rx_buf
->skb
= fp
->tpa_pool
[queue
].skb
;
1183 mapping
= pci_map_single(bp
->pdev
, fp
->tpa_pool
[queue
].skb
->data
,
1184 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
1185 pci_unmap_addr_set(prod_rx_buf
, mapping
, mapping
);
1187 /* move partial skb from cons to pool (don't unmap yet) */
1188 fp
->tpa_pool
[queue
] = *cons_rx_buf
;
1190 /* mark bin state as start - print error if current state != stop */
1191 if (fp
->tpa_state
[queue
] != BNX2X_TPA_STOP
)
1192 BNX2X_ERR("start of bin not in stop [%d]\n", queue
);
1194 fp
->tpa_state
[queue
] = BNX2X_TPA_START
;
1196 /* point prod_bd to new skb */
1197 prod_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1198 prod_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1200 #ifdef BNX2X_STOP_ON_ERROR
1201 fp
->tpa_queue_used
|= (1 << queue
);
1202 #ifdef __powerpc64__
1203 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%lx\n",
1205 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%llx\n",
1207 fp
->tpa_queue_used
);
1211 static int bnx2x_fill_frag_skb(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
1212 struct sk_buff
*skb
,
1213 struct eth_fast_path_rx_cqe
*fp_cqe
,
1216 struct sw_rx_page
*rx_pg
, old_rx_pg
;
1217 u16 len_on_bd
= le16_to_cpu(fp_cqe
->len_on_bd
);
1218 u32 i
, frag_len
, frag_size
, pages
;
1222 frag_size
= le16_to_cpu(fp_cqe
->pkt_len
) - len_on_bd
;
1223 pages
= SGE_PAGE_ALIGN(frag_size
) >> SGE_PAGE_SHIFT
;
1225 /* This is needed in order to enable forwarding support */
1227 skb_shinfo(skb
)->gso_size
= min((u32
)SGE_PAGE_SIZE
,
1228 max(frag_size
, (u32
)len_on_bd
));
1230 #ifdef BNX2X_STOP_ON_ERROR
1232 min((u32
)8, (u32
)MAX_SKB_FRAGS
) * SGE_PAGE_SIZE
* PAGES_PER_SGE
) {
1233 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1235 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1236 fp_cqe
->pkt_len
, len_on_bd
);
1242 /* Run through the SGL and compose the fragmented skb */
1243 for (i
= 0, j
= 0; i
< pages
; i
+= PAGES_PER_SGE
, j
++) {
1244 u16 sge_idx
= RX_SGE(le16_to_cpu(fp_cqe
->sgl
[j
]));
1246 /* FW gives the indices of the SGE as if the ring is an array
1247 (meaning that "next" element will consume 2 indices) */
1248 frag_len
= min(frag_size
, (u32
)(SGE_PAGE_SIZE
*PAGES_PER_SGE
));
1249 rx_pg
= &fp
->rx_page_ring
[sge_idx
];
1252 /* If we fail to allocate a substitute page, we simply stop
1253 where we are and drop the whole packet */
1254 err
= bnx2x_alloc_rx_sge(bp
, fp
, sge_idx
);
1255 if (unlikely(err
)) {
1256 bp
->eth_stats
.rx_skb_alloc_failed
++;
1260 /* Unmap the page as we r going to pass it to the stack */
1261 pci_unmap_page(bp
->pdev
, pci_unmap_addr(&old_rx_pg
, mapping
),
1262 SGE_PAGE_SIZE
*PAGES_PER_SGE
, PCI_DMA_FROMDEVICE
);
1264 /* Add one frag and update the appropriate fields in the skb */
1265 skb_fill_page_desc(skb
, j
, old_rx_pg
.page
, 0, frag_len
);
1267 skb
->data_len
+= frag_len
;
1268 skb
->truesize
+= frag_len
;
1269 skb
->len
+= frag_len
;
1271 frag_size
-= frag_len
;
1277 static void bnx2x_tpa_stop(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
1278 u16 queue
, int pad
, int len
, union eth_rx_cqe
*cqe
,
1281 struct sw_rx_bd
*rx_buf
= &fp
->tpa_pool
[queue
];
1282 struct sk_buff
*skb
= rx_buf
->skb
;
1284 struct sk_buff
*new_skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
1286 /* Unmap skb in the pool anyway, as we are going to change
1287 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1289 pci_unmap_single(bp
->pdev
, pci_unmap_addr(rx_buf
, mapping
),
1290 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
1292 if (likely(new_skb
)) {
1293 /* fix ip xsum and give it to the stack */
1294 /* (no need to map the new skb) */
1297 (le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
) &
1298 PARSING_FLAGS_VLAN
);
1299 int is_not_hwaccel_vlan_cqe
=
1300 (is_vlan_cqe
&& (!(bp
->flags
& HW_VLAN_RX_FLAG
)));
1304 prefetch(((char *)(skb
)) + 128);
1306 #ifdef BNX2X_STOP_ON_ERROR
1307 if (pad
+ len
> bp
->rx_buf_size
) {
1308 BNX2X_ERR("skb_put is about to fail... "
1309 "pad %d len %d rx_buf_size %d\n",
1310 pad
, len
, bp
->rx_buf_size
);
1316 skb_reserve(skb
, pad
);
1319 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1320 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1325 iph
= (struct iphdr
*)skb
->data
;
1327 /* If there is no Rx VLAN offloading -
1328 take VLAN tag into an account */
1329 if (unlikely(is_not_hwaccel_vlan_cqe
))
1330 iph
= (struct iphdr
*)((u8
*)iph
+ VLAN_HLEN
);
1333 iph
->check
= ip_fast_csum((u8
*)iph
, iph
->ihl
);
1336 if (!bnx2x_fill_frag_skb(bp
, fp
, skb
,
1337 &cqe
->fast_path_cqe
, cqe_idx
)) {
1339 if ((bp
->vlgrp
!= NULL
) && is_vlan_cqe
&&
1340 (!is_not_hwaccel_vlan_cqe
))
1341 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
,
1342 le16_to_cpu(cqe
->fast_path_cqe
.
1346 netif_receive_skb(skb
);
1348 DP(NETIF_MSG_RX_STATUS
, "Failed to allocate new pages"
1349 " - dropping packet!\n");
1354 /* put new skb in bin */
1355 fp
->tpa_pool
[queue
].skb
= new_skb
;
1358 /* else drop the packet and keep the buffer in the bin */
1359 DP(NETIF_MSG_RX_STATUS
,
1360 "Failed to allocate new skb - dropping packet!\n");
1361 bp
->eth_stats
.rx_skb_alloc_failed
++;
1364 fp
->tpa_state
[queue
] = BNX2X_TPA_STOP
;
1367 static inline void bnx2x_update_rx_prod(struct bnx2x
*bp
,
1368 struct bnx2x_fastpath
*fp
,
1369 u16 bd_prod
, u16 rx_comp_prod
,
1372 struct tstorm_eth_rx_producers rx_prods
= {0};
1375 /* Update producers */
1376 rx_prods
.bd_prod
= bd_prod
;
1377 rx_prods
.cqe_prod
= rx_comp_prod
;
1378 rx_prods
.sge_prod
= rx_sge_prod
;
1381 * Make sure that the BD and SGE data is updated before updating the
1382 * producers since FW might read the BD/SGE right after the producer
1384 * This is only applicable for weak-ordered memory model archs such
1385 * as IA-64. The following barrier is also mandatory since FW will
1386 * assumes BDs must have buffers.
1390 for (i
= 0; i
< sizeof(struct tstorm_eth_rx_producers
)/4; i
++)
1391 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
1392 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp
), FP_CL_ID(fp
)) + i
*4,
1393 ((u32
*)&rx_prods
)[i
]);
1395 mmiowb(); /* keep prod updates ordered */
1397 DP(NETIF_MSG_RX_STATUS
,
1398 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1399 bd_prod
, rx_comp_prod
, rx_sge_prod
);
1402 static int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
)
1404 struct bnx2x
*bp
= fp
->bp
;
1405 u16 bd_cons
, bd_prod
, bd_prod_fw
, comp_ring_cons
;
1406 u16 hw_comp_cons
, sw_comp_cons
, sw_comp_prod
;
1409 #ifdef BNX2X_STOP_ON_ERROR
1410 if (unlikely(bp
->panic
))
1414 /* CQ "next element" is of the size of the regular element,
1415 that's why it's ok here */
1416 hw_comp_cons
= le16_to_cpu(*fp
->rx_cons_sb
);
1417 if ((hw_comp_cons
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
1420 bd_cons
= fp
->rx_bd_cons
;
1421 bd_prod
= fp
->rx_bd_prod
;
1422 bd_prod_fw
= bd_prod
;
1423 sw_comp_cons
= fp
->rx_comp_cons
;
1424 sw_comp_prod
= fp
->rx_comp_prod
;
1426 /* Memory barrier necessary as speculative reads of the rx
1427 * buffer can be ahead of the index in the status block
1431 DP(NETIF_MSG_RX_STATUS
,
1432 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1433 FP_IDX(fp
), hw_comp_cons
, sw_comp_cons
);
1435 while (sw_comp_cons
!= hw_comp_cons
) {
1436 struct sw_rx_bd
*rx_buf
= NULL
;
1437 struct sk_buff
*skb
;
1438 union eth_rx_cqe
*cqe
;
1442 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
1443 bd_prod
= RX_BD(bd_prod
);
1444 bd_cons
= RX_BD(bd_cons
);
1446 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
1447 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
1449 DP(NETIF_MSG_RX_STATUS
, "CQE type %x err %x status %x"
1450 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags
),
1451 cqe_fp_flags
, cqe
->fast_path_cqe
.status_flags
,
1452 le32_to_cpu(cqe
->fast_path_cqe
.rss_hash_result
),
1453 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
),
1454 le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
));
1456 /* is this a slowpath msg? */
1457 if (unlikely(CQE_TYPE(cqe_fp_flags
))) {
1458 bnx2x_sp_event(fp
, cqe
);
1461 /* this is an rx packet */
1463 rx_buf
= &fp
->rx_buf_ring
[bd_cons
];
1465 len
= le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
);
1466 pad
= cqe
->fast_path_cqe
.placement_offset
;
1468 /* If CQE is marked both TPA_START and TPA_END
1469 it is a non-TPA CQE */
1470 if ((!fp
->disable_tpa
) &&
1471 (TPA_TYPE(cqe_fp_flags
) !=
1472 (TPA_TYPE_START
| TPA_TYPE_END
))) {
1473 u16 queue
= cqe
->fast_path_cqe
.queue_index
;
1475 if (TPA_TYPE(cqe_fp_flags
) == TPA_TYPE_START
) {
1476 DP(NETIF_MSG_RX_STATUS
,
1477 "calling tpa_start on queue %d\n",
1480 bnx2x_tpa_start(fp
, queue
, skb
,
1485 if (TPA_TYPE(cqe_fp_flags
) == TPA_TYPE_END
) {
1486 DP(NETIF_MSG_RX_STATUS
,
1487 "calling tpa_stop on queue %d\n",
1490 if (!BNX2X_RX_SUM_FIX(cqe
))
1491 BNX2X_ERR("STOP on none TCP "
1494 /* This is a size of the linear data
1496 len
= le16_to_cpu(cqe
->fast_path_cqe
.
1498 bnx2x_tpa_stop(bp
, fp
, queue
, pad
,
1499 len
, cqe
, comp_ring_cons
);
1500 #ifdef BNX2X_STOP_ON_ERROR
1505 bnx2x_update_sge_prod(fp
,
1506 &cqe
->fast_path_cqe
);
1511 pci_dma_sync_single_for_device(bp
->pdev
,
1512 pci_unmap_addr(rx_buf
, mapping
),
1513 pad
+ RX_COPY_THRESH
,
1514 PCI_DMA_FROMDEVICE
);
1516 prefetch(((char *)(skb
)) + 128);
1518 /* is this an error packet? */
1519 if (unlikely(cqe_fp_flags
& ETH_RX_ERROR_FALGS
)) {
1520 DP(NETIF_MSG_RX_ERR
,
1521 "ERROR flags %x rx packet %u\n",
1522 cqe_fp_flags
, sw_comp_cons
);
1523 bp
->eth_stats
.rx_err_discard_pkt
++;
1527 /* Since we don't have a jumbo ring
1528 * copy small packets if mtu > 1500
1530 if ((bp
->dev
->mtu
> ETH_MAX_PACKET_SIZE
) &&
1531 (len
<= RX_COPY_THRESH
)) {
1532 struct sk_buff
*new_skb
;
1534 new_skb
= netdev_alloc_skb(bp
->dev
,
1536 if (new_skb
== NULL
) {
1537 DP(NETIF_MSG_RX_ERR
,
1538 "ERROR packet dropped "
1539 "because of alloc failure\n");
1540 bp
->eth_stats
.rx_skb_alloc_failed
++;
1545 skb_copy_from_linear_data_offset(skb
, pad
,
1546 new_skb
->data
+ pad
, len
);
1547 skb_reserve(new_skb
, pad
);
1548 skb_put(new_skb
, len
);
1550 bnx2x_reuse_rx_skb(fp
, skb
, bd_cons
, bd_prod
);
1554 } else if (bnx2x_alloc_rx_skb(bp
, fp
, bd_prod
) == 0) {
1555 pci_unmap_single(bp
->pdev
,
1556 pci_unmap_addr(rx_buf
, mapping
),
1558 PCI_DMA_FROMDEVICE
);
1559 skb_reserve(skb
, pad
);
1563 DP(NETIF_MSG_RX_ERR
,
1564 "ERROR packet dropped because "
1565 "of alloc failure\n");
1566 bp
->eth_stats
.rx_skb_alloc_failed
++;
1568 bnx2x_reuse_rx_skb(fp
, skb
, bd_cons
, bd_prod
);
1572 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1574 skb
->ip_summed
= CHECKSUM_NONE
;
1576 if (likely(BNX2X_RX_CSUM_OK(cqe
)))
1577 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1579 bp
->eth_stats
.hw_csum_err
++;
1584 if ((bp
->vlgrp
!= NULL
) && (bp
->flags
& HW_VLAN_RX_FLAG
) &&
1585 (le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
) &
1586 PARSING_FLAGS_VLAN
))
1587 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
,
1588 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
));
1591 netif_receive_skb(skb
);
1597 bd_cons
= NEXT_RX_IDX(bd_cons
);
1598 bd_prod
= NEXT_RX_IDX(bd_prod
);
1599 bd_prod_fw
= NEXT_RX_IDX(bd_prod_fw
);
1602 sw_comp_prod
= NEXT_RCQ_IDX(sw_comp_prod
);
1603 sw_comp_cons
= NEXT_RCQ_IDX(sw_comp_cons
);
1605 if (rx_pkt
== budget
)
1609 fp
->rx_bd_cons
= bd_cons
;
1610 fp
->rx_bd_prod
= bd_prod_fw
;
1611 fp
->rx_comp_cons
= sw_comp_cons
;
1612 fp
->rx_comp_prod
= sw_comp_prod
;
1614 /* Update producers */
1615 bnx2x_update_rx_prod(bp
, fp
, bd_prod_fw
, sw_comp_prod
,
1618 fp
->rx_pkt
+= rx_pkt
;
1624 static irqreturn_t
bnx2x_msix_fp_int(int irq
, void *fp_cookie
)
1626 struct bnx2x_fastpath
*fp
= fp_cookie
;
1627 struct bnx2x
*bp
= fp
->bp
;
1628 int index
= FP_IDX(fp
);
1630 /* Return here if interrupt is disabled */
1631 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
1632 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
1636 DP(BNX2X_MSG_FP
, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1637 index
, FP_SB_ID(fp
));
1638 bnx2x_ack_sb(bp
, FP_SB_ID(fp
), USTORM_ID
, 0, IGU_INT_DISABLE
, 0);
1640 #ifdef BNX2X_STOP_ON_ERROR
1641 if (unlikely(bp
->panic
))
1645 prefetch(fp
->rx_cons_sb
);
1646 prefetch(fp
->tx_cons_sb
);
1647 prefetch(&fp
->status_blk
->c_status_block
.status_block_index
);
1648 prefetch(&fp
->status_blk
->u_status_block
.status_block_index
);
1650 netif_rx_schedule(&bnx2x_fp(bp
, index
, napi
));
1655 static irqreturn_t
bnx2x_interrupt(int irq
, void *dev_instance
)
1657 struct net_device
*dev
= dev_instance
;
1658 struct bnx2x
*bp
= netdev_priv(dev
);
1659 u16 status
= bnx2x_ack_int(bp
);
1662 /* Return here if interrupt is shared and it's not for us */
1663 if (unlikely(status
== 0)) {
1664 DP(NETIF_MSG_INTR
, "not our interrupt!\n");
1667 DP(NETIF_MSG_INTR
, "got an interrupt status %u\n", status
);
1669 /* Return here if interrupt is disabled */
1670 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
1671 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
1675 #ifdef BNX2X_STOP_ON_ERROR
1676 if (unlikely(bp
->panic
))
1680 mask
= 0x2 << bp
->fp
[0].sb_id
;
1681 if (status
& mask
) {
1682 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
1684 prefetch(fp
->rx_cons_sb
);
1685 prefetch(fp
->tx_cons_sb
);
1686 prefetch(&fp
->status_blk
->c_status_block
.status_block_index
);
1687 prefetch(&fp
->status_blk
->u_status_block
.status_block_index
);
1689 netif_rx_schedule(&bnx2x_fp(bp
, 0, napi
));
1695 if (unlikely(status
& 0x1)) {
1696 queue_delayed_work(bnx2x_wq
, &bp
->sp_task
, 0);
1704 DP(NETIF_MSG_INTR
, "got an unknown interrupt! (status %u)\n",
1710 /* end of fast path */
1712 static void bnx2x_stats_handle(struct bnx2x
*bp
, enum bnx2x_stats_event event
);
1717 * General service functions
1720 static int bnx2x_acquire_hw_lock(struct bnx2x
*bp
, u32 resource
)
1723 u32 resource_bit
= (1 << resource
);
1724 int func
= BP_FUNC(bp
);
1725 u32 hw_lock_control_reg
;
1728 /* Validating that the resource is within range */
1729 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1731 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1732 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1737 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1739 hw_lock_control_reg
=
1740 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1743 /* Validating that the resource is not already taken */
1744 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1745 if (lock_status
& resource_bit
) {
1746 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1747 lock_status
, resource_bit
);
1751 /* Try for 5 second every 5ms */
1752 for (cnt
= 0; cnt
< 1000; cnt
++) {
1753 /* Try to acquire the lock */
1754 REG_WR(bp
, hw_lock_control_reg
+ 4, resource_bit
);
1755 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1756 if (lock_status
& resource_bit
)
1761 DP(NETIF_MSG_HW
, "Timeout\n");
1765 static int bnx2x_release_hw_lock(struct bnx2x
*bp
, u32 resource
)
1768 u32 resource_bit
= (1 << resource
);
1769 int func
= BP_FUNC(bp
);
1770 u32 hw_lock_control_reg
;
1772 /* Validating that the resource is within range */
1773 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1775 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1776 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1781 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1783 hw_lock_control_reg
=
1784 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1787 /* Validating that the resource is currently taken */
1788 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1789 if (!(lock_status
& resource_bit
)) {
1790 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1791 lock_status
, resource_bit
);
1795 REG_WR(bp
, hw_lock_control_reg
, resource_bit
);
1799 /* HW Lock for shared dual port PHYs */
1800 static void bnx2x_acquire_phy_lock(struct bnx2x
*bp
)
1802 u32 ext_phy_type
= XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
1804 mutex_lock(&bp
->port
.phy_mutex
);
1806 if ((ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
) ||
1807 (ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
))
1808 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_8072_MDIO
);
1811 static void bnx2x_release_phy_lock(struct bnx2x
*bp
)
1813 u32 ext_phy_type
= XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
1815 if ((ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
) ||
1816 (ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
))
1817 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_8072_MDIO
);
1819 mutex_unlock(&bp
->port
.phy_mutex
);
1822 int bnx2x_set_gpio(struct bnx2x
*bp
, int gpio_num
, u32 mode
, u8 port
)
1824 /* The GPIO should be swapped if swap register is set and active */
1825 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1826 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1827 int gpio_shift
= gpio_num
+
1828 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1829 u32 gpio_mask
= (1 << gpio_shift
);
1832 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1833 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1837 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1838 /* read GPIO and mask except the float bits */
1839 gpio_reg
= (REG_RD(bp
, MISC_REG_GPIO
) & MISC_REGISTERS_GPIO_FLOAT
);
1842 case MISC_REGISTERS_GPIO_OUTPUT_LOW
:
1843 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output low\n",
1844 gpio_num
, gpio_shift
);
1845 /* clear FLOAT and set CLR */
1846 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1847 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_CLR_POS
);
1850 case MISC_REGISTERS_GPIO_OUTPUT_HIGH
:
1851 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output high\n",
1852 gpio_num
, gpio_shift
);
1853 /* clear FLOAT and set SET */
1854 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1855 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_SET_POS
);
1858 case MISC_REGISTERS_GPIO_INPUT_HI_Z
:
1859 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> input\n",
1860 gpio_num
, gpio_shift
);
1862 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1869 REG_WR(bp
, MISC_REG_GPIO
, gpio_reg
);
1870 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1875 static int bnx2x_set_spio(struct bnx2x
*bp
, int spio_num
, u32 mode
)
1877 u32 spio_mask
= (1 << spio_num
);
1880 if ((spio_num
< MISC_REGISTERS_SPIO_4
) ||
1881 (spio_num
> MISC_REGISTERS_SPIO_7
)) {
1882 BNX2X_ERR("Invalid SPIO %d\n", spio_num
);
1886 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
1887 /* read SPIO and mask except the float bits */
1888 spio_reg
= (REG_RD(bp
, MISC_REG_SPIO
) & MISC_REGISTERS_SPIO_FLOAT
);
1891 case MISC_REGISTERS_SPIO_OUTPUT_LOW
:
1892 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output low\n", spio_num
);
1893 /* clear FLOAT and set CLR */
1894 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1895 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_CLR_POS
);
1898 case MISC_REGISTERS_SPIO_OUTPUT_HIGH
:
1899 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output high\n", spio_num
);
1900 /* clear FLOAT and set SET */
1901 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1902 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_SET_POS
);
1905 case MISC_REGISTERS_SPIO_INPUT_HI_Z
:
1906 DP(NETIF_MSG_LINK
, "Set SPIO %d -> input\n", spio_num
);
1908 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1915 REG_WR(bp
, MISC_REG_SPIO
, spio_reg
);
1916 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
1921 static void bnx2x_calc_fc_adv(struct bnx2x
*bp
)
1923 switch (bp
->link_vars
.ieee_fc
&
1924 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK
) {
1925 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE
:
1926 bp
->port
.advertising
&= ~(ADVERTISED_Asym_Pause
|
1929 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH
:
1930 bp
->port
.advertising
|= (ADVERTISED_Asym_Pause
|
1933 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC
:
1934 bp
->port
.advertising
|= ADVERTISED_Asym_Pause
;
1937 bp
->port
.advertising
&= ~(ADVERTISED_Asym_Pause
|
1943 static void bnx2x_link_report(struct bnx2x
*bp
)
1945 if (bp
->link_vars
.link_up
) {
1946 if (bp
->state
== BNX2X_STATE_OPEN
)
1947 netif_carrier_on(bp
->dev
);
1948 printk(KERN_INFO PFX
"%s NIC Link is Up, ", bp
->dev
->name
);
1950 printk("%d Mbps ", bp
->link_vars
.line_speed
);
1952 if (bp
->link_vars
.duplex
== DUPLEX_FULL
)
1953 printk("full duplex");
1955 printk("half duplex");
1957 if (bp
->link_vars
.flow_ctrl
!= BNX2X_FLOW_CTRL_NONE
) {
1958 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
) {
1959 printk(", receive ");
1960 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
)
1961 printk("& transmit ");
1963 printk(", transmit ");
1965 printk("flow control ON");
1969 } else { /* link_down */
1970 netif_carrier_off(bp
->dev
);
1971 printk(KERN_ERR PFX
"%s NIC Link is Down\n", bp
->dev
->name
);
1975 static u8
bnx2x_initial_phy_init(struct bnx2x
*bp
)
1977 if (!BP_NOMCP(bp
)) {
1980 /* Initialize link parameters structure variables */
1981 /* It is recommended to turn off RX FC for jumbo frames
1982 for better performance */
1984 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_BOTH
;
1985 else if (bp
->dev
->mtu
> 5000)
1986 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_TX
;
1988 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_BOTH
;
1990 bnx2x_acquire_phy_lock(bp
);
1991 rc
= bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
1992 bnx2x_release_phy_lock(bp
);
1994 bnx2x_calc_fc_adv(bp
);
1996 if (bp
->link_vars
.link_up
)
1997 bnx2x_link_report(bp
);
2002 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2006 static void bnx2x_link_set(struct bnx2x
*bp
)
2008 if (!BP_NOMCP(bp
)) {
2009 bnx2x_acquire_phy_lock(bp
);
2010 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
2011 bnx2x_release_phy_lock(bp
);
2013 bnx2x_calc_fc_adv(bp
);
2015 BNX2X_ERR("Bootcode is missing -not setting link\n");
2018 static void bnx2x__link_reset(struct bnx2x
*bp
)
2020 if (!BP_NOMCP(bp
)) {
2021 bnx2x_acquire_phy_lock(bp
);
2022 bnx2x_link_reset(&bp
->link_params
, &bp
->link_vars
);
2023 bnx2x_release_phy_lock(bp
);
2025 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2028 static u8
bnx2x_link_test(struct bnx2x
*bp
)
2032 bnx2x_acquire_phy_lock(bp
);
2033 rc
= bnx2x_test_link(&bp
->link_params
, &bp
->link_vars
);
2034 bnx2x_release_phy_lock(bp
);
2039 /* Calculates the sum of vn_min_rates.
2040 It's needed for further normalizing of the min_rates.
2045 0 - if all the min_rates are 0.
2046 In the later case fairness algorithm should be deactivated.
2047 If not all min_rates are zero then those that are zeroes will
2050 static u32
bnx2x_calc_vn_wsum(struct bnx2x
*bp
)
2052 int i
, port
= BP_PORT(bp
);
2056 for (i
= 0; i
< E1HVN_MAX
; i
++) {
2058 SHMEM_RD(bp
, mf_cfg
.func_mf_config
[2*i
+ port
].config
);
2059 u32 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
2060 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
2061 if (!(vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
)) {
2062 /* If min rate is zero - set it to 1 */
2064 vn_min_rate
= DEF_MIN_RATE
;
2068 wsum
+= vn_min_rate
;
2072 /* ... only if all min rates are zeros - disable FAIRNESS */
2079 static void bnx2x_init_port_minmax(struct bnx2x
*bp
,
2082 struct cmng_struct_per_port
*m_cmng_port
)
2084 u32 r_param
= port_rate
/ 8;
2085 int port
= BP_PORT(bp
);
2088 memset(m_cmng_port
, 0, sizeof(struct cmng_struct_per_port
));
2090 /* Enable minmax only if we are in e1hmf mode */
2092 u32 fair_periodic_timeout_usec
;
2095 /* Enable rate shaping and fairness */
2096 m_cmng_port
->flags
.cmng_vn_enable
= 1;
2097 m_cmng_port
->flags
.fairness_enable
= en_fness
? 1 : 0;
2098 m_cmng_port
->flags
.rate_shaping_enable
= 1;
2101 DP(NETIF_MSG_IFUP
, "All MIN values are zeroes"
2102 " fairness will be disabled\n");
2104 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2105 m_cmng_port
->rs_vars
.rs_periodic_timeout
=
2106 RS_PERIODIC_TIMEOUT_USEC
/ 4;
2108 /* this is the threshold below which no timer arming will occur
2109 1.25 coefficient is for the threshold to be a little bigger
2110 than the real time, to compensate for timer in-accuracy */
2111 m_cmng_port
->rs_vars
.rs_threshold
=
2112 (RS_PERIODIC_TIMEOUT_USEC
* r_param
* 5) / 4;
2114 /* resolution of fairness timer */
2115 fair_periodic_timeout_usec
= QM_ARB_BYTES
/ r_param
;
2116 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2117 t_fair
= T_FAIR_COEF
/ port_rate
;
2119 /* this is the threshold below which we won't arm
2120 the timer anymore */
2121 m_cmng_port
->fair_vars
.fair_threshold
= QM_ARB_BYTES
;
2123 /* we multiply by 1e3/8 to get bytes/msec.
2124 We don't want the credits to pass a credit
2125 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2126 m_cmng_port
->fair_vars
.upper_bound
=
2127 r_param
* t_fair
* FAIR_MEM
;
2128 /* since each tick is 4 usec */
2129 m_cmng_port
->fair_vars
.fairness_timeout
=
2130 fair_periodic_timeout_usec
/ 4;
2133 /* Disable rate shaping and fairness */
2134 m_cmng_port
->flags
.cmng_vn_enable
= 0;
2135 m_cmng_port
->flags
.fairness_enable
= 0;
2136 m_cmng_port
->flags
.rate_shaping_enable
= 0;
2139 "Single function mode minmax will be disabled\n");
2142 /* Store it to internal memory */
2143 for (i
= 0; i
< sizeof(struct cmng_struct_per_port
) / 4; i
++)
2144 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2145 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
) + i
* 4,
2146 ((u32
*)(m_cmng_port
))[i
]);
2149 static void bnx2x_init_vn_minmax(struct bnx2x
*bp
, int func
,
2150 u32 wsum
, u16 port_rate
,
2151 struct cmng_struct_per_port
*m_cmng_port
)
2153 struct rate_shaping_vars_per_vn m_rs_vn
;
2154 struct fairness_vars_per_vn m_fair_vn
;
2155 u32 vn_cfg
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
2156 u16 vn_min_rate
, vn_max_rate
;
2159 /* If function is hidden - set min and max to zeroes */
2160 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
) {
2165 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
2166 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
2167 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2168 if current min rate is zero - set it to 1.
2169 This is a requirement of the algorithm. */
2170 if ((vn_min_rate
== 0) && wsum
)
2171 vn_min_rate
= DEF_MIN_RATE
;
2172 vn_max_rate
= ((vn_cfg
& FUNC_MF_CFG_MAX_BW_MASK
) >>
2173 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
2176 DP(NETIF_MSG_IFUP
, "func %d: vn_min_rate=%d vn_max_rate=%d "
2177 "wsum=%d\n", func
, vn_min_rate
, vn_max_rate
, wsum
);
2179 memset(&m_rs_vn
, 0, sizeof(struct rate_shaping_vars_per_vn
));
2180 memset(&m_fair_vn
, 0, sizeof(struct fairness_vars_per_vn
));
2182 /* global vn counter - maximal Mbps for this vn */
2183 m_rs_vn
.vn_counter
.rate
= vn_max_rate
;
2185 /* quota - number of bytes transmitted in this period */
2186 m_rs_vn
.vn_counter
.quota
=
2187 (vn_max_rate
* RS_PERIODIC_TIMEOUT_USEC
) / 8;
2189 #ifdef BNX2X_PER_PROT_QOS
2190 /* per protocol counter */
2191 for (protocol
= 0; protocol
< NUM_OF_PROTOCOLS
; protocol
++) {
2192 /* maximal Mbps for this protocol */
2193 m_rs_vn
.protocol_counters
[protocol
].rate
=
2194 protocol_max_rate
[protocol
];
2195 /* the quota in each timer period -
2196 number of bytes transmitted in this period */
2197 m_rs_vn
.protocol_counters
[protocol
].quota
=
2198 (u32
)(rs_periodic_timeout_usec
*
2200 protocol_counters
[protocol
].rate
/8));
2205 /* credit for each period of the fairness algorithm:
2206 number of bytes in T_FAIR (the vn share the port rate).
2207 wsum should not be larger than 10000, thus
2208 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2209 m_fair_vn
.vn_credit_delta
=
2210 max((u64
)(vn_min_rate
* (T_FAIR_COEF
/ (8 * wsum
))),
2211 (u64
)(m_cmng_port
->fair_vars
.fair_threshold
* 2));
2212 DP(NETIF_MSG_IFUP
, "m_fair_vn.vn_credit_delta=%d\n",
2213 m_fair_vn
.vn_credit_delta
);
2216 #ifdef BNX2X_PER_PROT_QOS
2218 u32 protocolWeightSum
= 0;
2220 for (protocol
= 0; protocol
< NUM_OF_PROTOCOLS
; protocol
++)
2221 protocolWeightSum
+=
2222 drvInit
.protocol_min_rate
[protocol
];
2223 /* per protocol counter -
2224 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2225 if (protocolWeightSum
> 0) {
2227 protocol
< NUM_OF_PROTOCOLS
; protocol
++)
2228 /* credit for each period of the
2229 fairness algorithm - number of bytes in
2230 T_FAIR (the protocol share the vn rate) */
2231 m_fair_vn
.protocol_credit_delta
[protocol
] =
2232 (u32
)((vn_min_rate
/ 8) * t_fair
*
2233 protocol_min_rate
/ protocolWeightSum
);
2238 /* Store it to internal memory */
2239 for (i
= 0; i
< sizeof(struct rate_shaping_vars_per_vn
)/4; i
++)
2240 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2241 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func
) + i
* 4,
2242 ((u32
*)(&m_rs_vn
))[i
]);
2244 for (i
= 0; i
< sizeof(struct fairness_vars_per_vn
)/4; i
++)
2245 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2246 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func
) + i
* 4,
2247 ((u32
*)(&m_fair_vn
))[i
]);
2250 /* This function is called upon link interrupt */
2251 static void bnx2x_link_attn(struct bnx2x
*bp
)
2255 /* Make sure that we are synced with the current statistics */
2256 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2258 bnx2x_link_update(&bp
->link_params
, &bp
->link_vars
);
2260 if (bp
->link_vars
.link_up
) {
2262 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
2263 struct host_port_stats
*pstats
;
2265 pstats
= bnx2x_sp(bp
, port_stats
);
2266 /* reset old bmac stats */
2267 memset(&(pstats
->mac_stx
[0]), 0,
2268 sizeof(struct mac_stx
));
2270 if ((bp
->state
== BNX2X_STATE_OPEN
) ||
2271 (bp
->state
== BNX2X_STATE_DISABLED
))
2272 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2275 /* indicate link status */
2276 bnx2x_link_report(bp
);
2281 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
2282 if (vn
== BP_E1HVN(bp
))
2285 func
= ((vn
<< 1) | BP_PORT(bp
));
2287 /* Set the attention towards other drivers
2289 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_0
+
2290 (LINK_SYNC_ATTENTION_BIT_FUNC_0
+ func
)*4, 1);
2294 if (CHIP_IS_E1H(bp
) && (bp
->link_vars
.line_speed
> 0)) {
2295 struct cmng_struct_per_port m_cmng_port
;
2297 int port
= BP_PORT(bp
);
2299 /* Init RATE SHAPING and FAIRNESS contexts */
2300 wsum
= bnx2x_calc_vn_wsum(bp
);
2301 bnx2x_init_port_minmax(bp
, (int)wsum
,
2302 bp
->link_vars
.line_speed
,
2305 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
2306 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
,
2307 wsum
, bp
->link_vars
.line_speed
,
2312 static void bnx2x__link_status_update(struct bnx2x
*bp
)
2314 if (bp
->state
!= BNX2X_STATE_OPEN
)
2317 bnx2x_link_status_update(&bp
->link_params
, &bp
->link_vars
);
2319 if (bp
->link_vars
.link_up
)
2320 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2322 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2324 /* indicate link status */
2325 bnx2x_link_report(bp
);
2328 static void bnx2x_pmf_update(struct bnx2x
*bp
)
2330 int port
= BP_PORT(bp
);
2334 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
2336 /* enable nig attention */
2337 val
= (0xff0f | (1 << (BP_E1HVN(bp
) + 4)));
2338 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
2339 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
2341 bnx2x_stats_handle(bp
, STATS_EVENT_PMF
);
2349 * General service functions
2352 /* the slow path queue is odd since completions arrive on the fastpath ring */
2353 static int bnx2x_sp_post(struct bnx2x
*bp
, int command
, int cid
,
2354 u32 data_hi
, u32 data_lo
, int common
)
2356 int func
= BP_FUNC(bp
);
2358 DP(BNX2X_MSG_SP
/*NETIF_MSG_TIMER*/,
2359 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2360 (u32
)U64_HI(bp
->spq_mapping
), (u32
)(U64_LO(bp
->spq_mapping
) +
2361 (void *)bp
->spq_prod_bd
- (void *)bp
->spq
), command
,
2362 HW_CID(bp
, cid
), data_hi
, data_lo
, bp
->spq_left
);
2364 #ifdef BNX2X_STOP_ON_ERROR
2365 if (unlikely(bp
->panic
))
2369 spin_lock_bh(&bp
->spq_lock
);
2371 if (!bp
->spq_left
) {
2372 BNX2X_ERR("BUG! SPQ ring full!\n");
2373 spin_unlock_bh(&bp
->spq_lock
);
2378 /* CID needs port number to be encoded int it */
2379 bp
->spq_prod_bd
->hdr
.conn_and_cmd_data
=
2380 cpu_to_le32(((command
<< SPE_HDR_CMD_ID_SHIFT
) |
2382 bp
->spq_prod_bd
->hdr
.type
= cpu_to_le16(ETH_CONNECTION_TYPE
);
2384 bp
->spq_prod_bd
->hdr
.type
|=
2385 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT
));
2387 bp
->spq_prod_bd
->data
.mac_config_addr
.hi
= cpu_to_le32(data_hi
);
2388 bp
->spq_prod_bd
->data
.mac_config_addr
.lo
= cpu_to_le32(data_lo
);
2392 if (bp
->spq_prod_bd
== bp
->spq_last_bd
) {
2393 bp
->spq_prod_bd
= bp
->spq
;
2394 bp
->spq_prod_idx
= 0;
2395 DP(NETIF_MSG_TIMER
, "end of spq\n");
2402 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_SPQ_PROD_OFFSET(func
),
2405 spin_unlock_bh(&bp
->spq_lock
);
2409 /* acquire split MCP access lock register */
2410 static int bnx2x_acquire_alr(struct bnx2x
*bp
)
2417 for (j
= 0; j
< i
*10; j
++) {
2419 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
2420 val
= REG_RD(bp
, GRCBASE_MCP
+ 0x9c);
2421 if (val
& (1L << 31))
2426 if (!(val
& (1L << 31))) {
2427 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2434 /* release split MCP access lock register */
2435 static void bnx2x_release_alr(struct bnx2x
*bp
)
2439 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
2442 static inline u16
bnx2x_update_dsb_idx(struct bnx2x
*bp
)
2444 struct host_def_status_block
*def_sb
= bp
->def_status_blk
;
2447 barrier(); /* status block is written to by the chip */
2448 if (bp
->def_att_idx
!= def_sb
->atten_status_block
.attn_bits_index
) {
2449 bp
->def_att_idx
= def_sb
->atten_status_block
.attn_bits_index
;
2452 if (bp
->def_c_idx
!= def_sb
->c_def_status_block
.status_block_index
) {
2453 bp
->def_c_idx
= def_sb
->c_def_status_block
.status_block_index
;
2456 if (bp
->def_u_idx
!= def_sb
->u_def_status_block
.status_block_index
) {
2457 bp
->def_u_idx
= def_sb
->u_def_status_block
.status_block_index
;
2460 if (bp
->def_x_idx
!= def_sb
->x_def_status_block
.status_block_index
) {
2461 bp
->def_x_idx
= def_sb
->x_def_status_block
.status_block_index
;
2464 if (bp
->def_t_idx
!= def_sb
->t_def_status_block
.status_block_index
) {
2465 bp
->def_t_idx
= def_sb
->t_def_status_block
.status_block_index
;
2472 * slow path service functions
2475 static void bnx2x_attn_int_asserted(struct bnx2x
*bp
, u32 asserted
)
2477 int port
= BP_PORT(bp
);
2478 u32 hc_addr
= (HC_REG_COMMAND_REG
+ port
*32 +
2479 COMMAND_REG_ATTN_BITS_SET
);
2480 u32 aeu_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
2481 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
2482 u32 nig_int_mask_addr
= port
? NIG_REG_MASK_INTERRUPT_PORT1
:
2483 NIG_REG_MASK_INTERRUPT_PORT0
;
2486 if (bp
->attn_state
& asserted
)
2487 BNX2X_ERR("IGU ERROR\n");
2489 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2490 aeu_mask
= REG_RD(bp
, aeu_addr
);
2492 DP(NETIF_MSG_HW
, "aeu_mask %x newly asserted %x\n",
2493 aeu_mask
, asserted
);
2494 aeu_mask
&= ~(asserted
& 0xff);
2495 DP(NETIF_MSG_HW
, "new mask %x\n", aeu_mask
);
2497 REG_WR(bp
, aeu_addr
, aeu_mask
);
2498 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2500 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
2501 bp
->attn_state
|= asserted
;
2502 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
2504 if (asserted
& ATTN_HARD_WIRED_MASK
) {
2505 if (asserted
& ATTN_NIG_FOR_FUNC
) {
2507 bnx2x_acquire_phy_lock(bp
);
2509 /* save nig interrupt mask */
2510 bp
->nig_mask
= REG_RD(bp
, nig_int_mask_addr
);
2511 REG_WR(bp
, nig_int_mask_addr
, 0);
2513 bnx2x_link_attn(bp
);
2515 /* handle unicore attn? */
2517 if (asserted
& ATTN_SW_TIMER_4_FUNC
)
2518 DP(NETIF_MSG_HW
, "ATTN_SW_TIMER_4_FUNC!\n");
2520 if (asserted
& GPIO_2_FUNC
)
2521 DP(NETIF_MSG_HW
, "GPIO_2_FUNC!\n");
2523 if (asserted
& GPIO_3_FUNC
)
2524 DP(NETIF_MSG_HW
, "GPIO_3_FUNC!\n");
2526 if (asserted
& GPIO_4_FUNC
)
2527 DP(NETIF_MSG_HW
, "GPIO_4_FUNC!\n");
2530 if (asserted
& ATTN_GENERAL_ATTN_1
) {
2531 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_1!\n");
2532 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_1
, 0x0);
2534 if (asserted
& ATTN_GENERAL_ATTN_2
) {
2535 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_2!\n");
2536 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_2
, 0x0);
2538 if (asserted
& ATTN_GENERAL_ATTN_3
) {
2539 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_3!\n");
2540 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_3
, 0x0);
2543 if (asserted
& ATTN_GENERAL_ATTN_4
) {
2544 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_4!\n");
2545 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_4
, 0x0);
2547 if (asserted
& ATTN_GENERAL_ATTN_5
) {
2548 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_5!\n");
2549 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_5
, 0x0);
2551 if (asserted
& ATTN_GENERAL_ATTN_6
) {
2552 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_6!\n");
2553 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_6
, 0x0);
2557 } /* if hardwired */
2559 DP(NETIF_MSG_HW
, "about to mask 0x%08x at HC addr 0x%x\n",
2561 REG_WR(bp
, hc_addr
, asserted
);
2563 /* now set back the mask */
2564 if (asserted
& ATTN_NIG_FOR_FUNC
) {
2565 REG_WR(bp
, nig_int_mask_addr
, bp
->nig_mask
);
2566 bnx2x_release_phy_lock(bp
);
2570 static inline void bnx2x_attn_int_deasserted0(struct bnx2x
*bp
, u32 attn
)
2572 int port
= BP_PORT(bp
);
2576 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
2577 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
2579 if (attn
& AEU_INPUTS_ATTN_BITS_SPIO5
) {
2581 val
= REG_RD(bp
, reg_offset
);
2582 val
&= ~AEU_INPUTS_ATTN_BITS_SPIO5
;
2583 REG_WR(bp
, reg_offset
, val
);
2585 BNX2X_ERR("SPIO5 hw attention\n");
2587 switch (bp
->common
.board
& SHARED_HW_CFG_BOARD_TYPE_MASK
) {
2588 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G
:
2589 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G
:
2590 /* Fan failure attention */
2592 /* The PHY reset is controlled by GPIO 1 */
2593 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_1
,
2594 MISC_REGISTERS_GPIO_OUTPUT_LOW
, port
);
2595 /* Low power mode is controlled by GPIO 2 */
2596 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_2
,
2597 MISC_REGISTERS_GPIO_OUTPUT_LOW
, port
);
2598 /* mark the failure */
2599 bp
->link_params
.ext_phy_config
&=
2600 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
2601 bp
->link_params
.ext_phy_config
|=
2602 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
;
2604 dev_info
.port_hw_config
[port
].
2605 external_phy_config
,
2606 bp
->link_params
.ext_phy_config
);
2607 /* log the failure */
2608 printk(KERN_ERR PFX
"Fan Failure on Network"
2609 " Controller %s has caused the driver to"
2610 " shutdown the card to prevent permanent"
2611 " damage. Please contact Dell Support for"
2612 " assistance\n", bp
->dev
->name
);
2620 if (attn
& HW_INTERRUT_ASSERT_SET_0
) {
2622 val
= REG_RD(bp
, reg_offset
);
2623 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_0
);
2624 REG_WR(bp
, reg_offset
, val
);
2626 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2627 (attn
& HW_INTERRUT_ASSERT_SET_0
));
2632 static inline void bnx2x_attn_int_deasserted1(struct bnx2x
*bp
, u32 attn
)
2636 if (attn
& BNX2X_DOORQ_ASSERT
) {
2638 val
= REG_RD(bp
, DORQ_REG_DORQ_INT_STS_CLR
);
2639 BNX2X_ERR("DB hw attention 0x%x\n", val
);
2640 /* DORQ discard attention */
2642 BNX2X_ERR("FATAL error from DORQ\n");
2645 if (attn
& HW_INTERRUT_ASSERT_SET_1
) {
2647 int port
= BP_PORT(bp
);
2650 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1
:
2651 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1
);
2653 val
= REG_RD(bp
, reg_offset
);
2654 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_1
);
2655 REG_WR(bp
, reg_offset
, val
);
2657 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2658 (attn
& HW_INTERRUT_ASSERT_SET_1
));
2663 static inline void bnx2x_attn_int_deasserted2(struct bnx2x
*bp
, u32 attn
)
2667 if (attn
& AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT
) {
2669 val
= REG_RD(bp
, CFC_REG_CFC_INT_STS_CLR
);
2670 BNX2X_ERR("CFC hw attention 0x%x\n", val
);
2671 /* CFC error attention */
2673 BNX2X_ERR("FATAL error from CFC\n");
2676 if (attn
& AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT
) {
2678 val
= REG_RD(bp
, PXP_REG_PXP_INT_STS_CLR_0
);
2679 BNX2X_ERR("PXP hw attention 0x%x\n", val
);
2680 /* RQ_USDMDP_FIFO_OVERFLOW */
2682 BNX2X_ERR("FATAL error from PXP\n");
2685 if (attn
& HW_INTERRUT_ASSERT_SET_2
) {
2687 int port
= BP_PORT(bp
);
2690 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2
:
2691 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2
);
2693 val
= REG_RD(bp
, reg_offset
);
2694 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_2
);
2695 REG_WR(bp
, reg_offset
, val
);
2697 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2698 (attn
& HW_INTERRUT_ASSERT_SET_2
));
2703 static inline void bnx2x_attn_int_deasserted3(struct bnx2x
*bp
, u32 attn
)
2707 if (attn
& EVEREST_GEN_ATTN_IN_USE_MASK
) {
2709 if (attn
& BNX2X_PMF_LINK_ASSERT
) {
2710 int func
= BP_FUNC(bp
);
2712 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
2713 bnx2x__link_status_update(bp
);
2714 if (SHMEM_RD(bp
, func_mb
[func
].drv_status
) &
2716 bnx2x_pmf_update(bp
);
2718 } else if (attn
& BNX2X_MC_ASSERT_BITS
) {
2720 BNX2X_ERR("MC assert!\n");
2721 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_10
, 0);
2722 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_9
, 0);
2723 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_8
, 0);
2724 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_7
, 0);
2727 } else if (attn
& BNX2X_MCP_ASSERT
) {
2729 BNX2X_ERR("MCP assert!\n");
2730 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_11
, 0);
2734 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn
);
2737 if (attn
& EVEREST_LATCHED_ATTN_IN_USE_MASK
) {
2738 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn
);
2739 if (attn
& BNX2X_GRC_TIMEOUT
) {
2740 val
= CHIP_IS_E1H(bp
) ?
2741 REG_RD(bp
, MISC_REG_GRC_TIMEOUT_ATTN
) : 0;
2742 BNX2X_ERR("GRC time-out 0x%08x\n", val
);
2744 if (attn
& BNX2X_GRC_RSV
) {
2745 val
= CHIP_IS_E1H(bp
) ?
2746 REG_RD(bp
, MISC_REG_GRC_RSV_ATTN
) : 0;
2747 BNX2X_ERR("GRC reserved 0x%08x\n", val
);
2749 REG_WR(bp
, MISC_REG_AEU_CLR_LATCH_SIGNAL
, 0x7ff);
2753 static void bnx2x_attn_int_deasserted(struct bnx2x
*bp
, u32 deasserted
)
2755 struct attn_route attn
;
2756 struct attn_route group_mask
;
2757 int port
= BP_PORT(bp
);
2763 /* need to take HW lock because MCP or other port might also
2764 try to handle this event */
2765 bnx2x_acquire_alr(bp
);
2767 attn
.sig
[0] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ port
*4);
2768 attn
.sig
[1] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0
+ port
*4);
2769 attn
.sig
[2] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0
+ port
*4);
2770 attn
.sig
[3] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0
+ port
*4);
2771 DP(NETIF_MSG_HW
, "attn: %08x %08x %08x %08x\n",
2772 attn
.sig
[0], attn
.sig
[1], attn
.sig
[2], attn
.sig
[3]);
2774 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
2775 if (deasserted
& (1 << index
)) {
2776 group_mask
= bp
->attn_group
[index
];
2778 DP(NETIF_MSG_HW
, "group[%d]: %08x %08x %08x %08x\n",
2779 index
, group_mask
.sig
[0], group_mask
.sig
[1],
2780 group_mask
.sig
[2], group_mask
.sig
[3]);
2782 bnx2x_attn_int_deasserted3(bp
,
2783 attn
.sig
[3] & group_mask
.sig
[3]);
2784 bnx2x_attn_int_deasserted1(bp
,
2785 attn
.sig
[1] & group_mask
.sig
[1]);
2786 bnx2x_attn_int_deasserted2(bp
,
2787 attn
.sig
[2] & group_mask
.sig
[2]);
2788 bnx2x_attn_int_deasserted0(bp
,
2789 attn
.sig
[0] & group_mask
.sig
[0]);
2791 if ((attn
.sig
[0] & group_mask
.sig
[0] &
2792 HW_PRTY_ASSERT_SET_0
) ||
2793 (attn
.sig
[1] & group_mask
.sig
[1] &
2794 HW_PRTY_ASSERT_SET_1
) ||
2795 (attn
.sig
[2] & group_mask
.sig
[2] &
2796 HW_PRTY_ASSERT_SET_2
))
2797 BNX2X_ERR("FATAL HW block parity attention\n");
2801 bnx2x_release_alr(bp
);
2803 reg_addr
= (HC_REG_COMMAND_REG
+ port
*32 + COMMAND_REG_ATTN_BITS_CLR
);
2806 DP(NETIF_MSG_HW
, "about to mask 0x%08x at HC addr 0x%x\n",
2808 REG_WR(bp
, reg_addr
, val
);
2810 if (~bp
->attn_state
& deasserted
)
2811 BNX2X_ERR("IGU ERROR\n");
2813 reg_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
2814 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
2816 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2817 aeu_mask
= REG_RD(bp
, reg_addr
);
2819 DP(NETIF_MSG_HW
, "aeu_mask %x newly deasserted %x\n",
2820 aeu_mask
, deasserted
);
2821 aeu_mask
|= (deasserted
& 0xff);
2822 DP(NETIF_MSG_HW
, "new mask %x\n", aeu_mask
);
2824 REG_WR(bp
, reg_addr
, aeu_mask
);
2825 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2827 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
2828 bp
->attn_state
&= ~deasserted
;
2829 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
2832 static void bnx2x_attn_int(struct bnx2x
*bp
)
2834 /* read local copy of bits */
2835 u32 attn_bits
= le32_to_cpu(bp
->def_status_blk
->atten_status_block
.
2837 u32 attn_ack
= le32_to_cpu(bp
->def_status_blk
->atten_status_block
.
2839 u32 attn_state
= bp
->attn_state
;
2841 /* look for changed bits */
2842 u32 asserted
= attn_bits
& ~attn_ack
& ~attn_state
;
2843 u32 deasserted
= ~attn_bits
& attn_ack
& attn_state
;
2846 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2847 attn_bits
, attn_ack
, asserted
, deasserted
);
2849 if (~(attn_bits
^ attn_ack
) & (attn_bits
^ attn_state
))
2850 BNX2X_ERR("BAD attention state\n");
2852 /* handle bits that were raised */
2854 bnx2x_attn_int_asserted(bp
, asserted
);
2857 bnx2x_attn_int_deasserted(bp
, deasserted
);
2860 static void bnx2x_sp_task(struct work_struct
*work
)
2862 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, sp_task
.work
);
2866 /* Return here if interrupt is disabled */
2867 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
2868 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
2872 status
= bnx2x_update_dsb_idx(bp
);
2873 /* if (status == 0) */
2874 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2876 DP(NETIF_MSG_INTR
, "got a slowpath interrupt (updated %x)\n", status
);
2882 /* CStorm events: query_stats, port delete ramrod */
2884 bp
->stats_pending
= 0;
2886 bnx2x_ack_sb(bp
, DEF_SB_ID
, ATTENTION_ID
, le16_to_cpu(bp
->def_att_idx
),
2888 bnx2x_ack_sb(bp
, DEF_SB_ID
, USTORM_ID
, le16_to_cpu(bp
->def_u_idx
),
2890 bnx2x_ack_sb(bp
, DEF_SB_ID
, CSTORM_ID
, le16_to_cpu(bp
->def_c_idx
),
2892 bnx2x_ack_sb(bp
, DEF_SB_ID
, XSTORM_ID
, le16_to_cpu(bp
->def_x_idx
),
2894 bnx2x_ack_sb(bp
, DEF_SB_ID
, TSTORM_ID
, le16_to_cpu(bp
->def_t_idx
),
2899 static irqreturn_t
bnx2x_msix_sp_int(int irq
, void *dev_instance
)
2901 struct net_device
*dev
= dev_instance
;
2902 struct bnx2x
*bp
= netdev_priv(dev
);
2904 /* Return here if interrupt is disabled */
2905 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
2906 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
2910 bnx2x_ack_sb(bp
, DEF_SB_ID
, XSTORM_ID
, 0, IGU_INT_DISABLE
, 0);
2912 #ifdef BNX2X_STOP_ON_ERROR
2913 if (unlikely(bp
->panic
))
2917 queue_delayed_work(bnx2x_wq
, &bp
->sp_task
, 0);
2922 /* end of slow path */
2926 /****************************************************************************
2928 ****************************************************************************/
2930 /* sum[hi:lo] += add[hi:lo] */
2931 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2934 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2937 /* difference = minuend - subtrahend */
2938 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2940 if (m_lo < s_lo) { \
2942 d_hi = m_hi - s_hi; \
2944 /* we can 'loan' 1 */ \
2946 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2948 /* m_hi <= s_hi */ \
2953 /* m_lo >= s_lo */ \
2954 if (m_hi < s_hi) { \
2958 /* m_hi >= s_hi */ \
2959 d_hi = m_hi - s_hi; \
2960 d_lo = m_lo - s_lo; \
2965 #define UPDATE_STAT64(s, t) \
2967 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2968 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2969 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2970 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2971 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2972 pstats->mac_stx[1].t##_lo, diff.lo); \
2975 #define UPDATE_STAT64_NIG(s, t) \
2977 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2978 diff.lo, new->s##_lo, old->s##_lo); \
2979 ADD_64(estats->t##_hi, diff.hi, \
2980 estats->t##_lo, diff.lo); \
2983 /* sum[hi:lo] += add */
2984 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2987 s_hi += (s_lo < a) ? 1 : 0; \
2990 #define UPDATE_EXTEND_STAT(s) \
2992 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2993 pstats->mac_stx[1].s##_lo, \
2997 #define UPDATE_EXTEND_TSTAT(s, t) \
2999 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3000 old_tclient->s = le32_to_cpu(tclient->s); \
3001 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3004 #define UPDATE_EXTEND_XSTAT(s, t) \
3006 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3007 old_xclient->s = le32_to_cpu(xclient->s); \
3008 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3012 * General service functions
3015 static inline long bnx2x_hilo(u32
*hiref
)
3017 u32 lo
= *(hiref
+ 1);
3018 #if (BITS_PER_LONG == 64)
3021 return HILO_U64(hi
, lo
);
3028 * Init service functions
3031 static void bnx2x_storm_stats_post(struct bnx2x
*bp
)
3033 if (!bp
->stats_pending
) {
3034 struct eth_query_ramrod_data ramrod_data
= {0};
3037 ramrod_data
.drv_counter
= bp
->stats_counter
++;
3038 ramrod_data
.collect_port_1b
= bp
->port
.pmf
? 1 : 0;
3039 ramrod_data
.ctr_id_vector
= (1 << BP_CL_ID(bp
));
3041 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_STAT_QUERY
, 0,
3042 ((u32
*)&ramrod_data
)[1],
3043 ((u32
*)&ramrod_data
)[0], 0);
3045 /* stats ramrod has it's own slot on the spq */
3047 bp
->stats_pending
= 1;
3052 static void bnx2x_stats_init(struct bnx2x
*bp
)
3054 int port
= BP_PORT(bp
);
3056 bp
->executer_idx
= 0;
3057 bp
->stats_counter
= 0;
3061 bp
->port
.port_stx
= SHMEM_RD(bp
, port_mb
[port
].port_stx
);
3063 bp
->port
.port_stx
= 0;
3064 DP(BNX2X_MSG_STATS
, "port_stx 0x%x\n", bp
->port
.port_stx
);
3066 memset(&(bp
->port
.old_nig_stats
), 0, sizeof(struct nig_stats
));
3067 bp
->port
.old_nig_stats
.brb_discard
=
3068 REG_RD(bp
, NIG_REG_STAT0_BRB_DISCARD
+ port
*0x38);
3069 bp
->port
.old_nig_stats
.brb_truncate
=
3070 REG_RD(bp
, NIG_REG_STAT0_BRB_TRUNCATE
+ port
*0x38);
3071 REG_RD_DMAE(bp
, NIG_REG_STAT0_EGRESS_MAC_PKT0
+ port
*0x50,
3072 &(bp
->port
.old_nig_stats
.egress_mac_pkt0_lo
), 2);
3073 REG_RD_DMAE(bp
, NIG_REG_STAT0_EGRESS_MAC_PKT1
+ port
*0x50,
3074 &(bp
->port
.old_nig_stats
.egress_mac_pkt1_lo
), 2);
3076 /* function stats */
3077 memset(&bp
->dev
->stats
, 0, sizeof(struct net_device_stats
));
3078 memset(&bp
->old_tclient
, 0, sizeof(struct tstorm_per_client_stats
));
3079 memset(&bp
->old_xclient
, 0, sizeof(struct xstorm_per_client_stats
));
3080 memset(&bp
->eth_stats
, 0, sizeof(struct bnx2x_eth_stats
));
3082 bp
->stats_state
= STATS_STATE_DISABLED
;
3083 if (IS_E1HMF(bp
) && bp
->port
.pmf
&& bp
->port
.port_stx
)
3084 bnx2x_stats_handle(bp
, STATS_EVENT_PMF
);
3087 static void bnx2x_hw_stats_post(struct bnx2x
*bp
)
3089 struct dmae_command
*dmae
= &bp
->stats_dmae
;
3090 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3092 *stats_comp
= DMAE_COMP_VAL
;
3095 if (bp
->executer_idx
) {
3096 int loader_idx
= PMF_DMAE_C(bp
);
3098 memset(dmae
, 0, sizeof(struct dmae_command
));
3100 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3101 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3102 DMAE_CMD_DST_RESET
|
3104 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3106 DMAE_CMD_ENDIANITY_DW_SWAP
|
3108 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
:
3110 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3111 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, dmae
[0]));
3112 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, dmae
[0]));
3113 dmae
->dst_addr_lo
= (DMAE_REG_CMD_MEM
+
3114 sizeof(struct dmae_command
) *
3115 (loader_idx
+ 1)) >> 2;
3116 dmae
->dst_addr_hi
= 0;
3117 dmae
->len
= sizeof(struct dmae_command
) >> 2;
3120 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
+ 1] >> 2;
3121 dmae
->comp_addr_hi
= 0;
3125 bnx2x_post_dmae(bp
, dmae
, loader_idx
);
3127 } else if (bp
->func_stx
) {
3129 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
3133 static int bnx2x_stats_comp(struct bnx2x
*bp
)
3135 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3139 while (*stats_comp
!= DMAE_COMP_VAL
) {
3141 BNX2X_ERR("timeout waiting for stats finished\n");
3151 * Statistics service functions
3154 static void bnx2x_stats_pmf_update(struct bnx2x
*bp
)
3156 struct dmae_command
*dmae
;
3158 int loader_idx
= PMF_DMAE_C(bp
);
3159 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3162 if (!IS_E1HMF(bp
) || !bp
->port
.pmf
|| !bp
->port
.port_stx
) {
3163 BNX2X_ERR("BUG!\n");
3167 bp
->executer_idx
= 0;
3169 opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3171 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3173 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3175 DMAE_CMD_ENDIANITY_DW_SWAP
|
3177 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3178 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3180 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3181 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_GRC
);
3182 dmae
->src_addr_lo
= bp
->port
.port_stx
>> 2;
3183 dmae
->src_addr_hi
= 0;
3184 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
3185 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
3186 dmae
->len
= DMAE_LEN32_RD_MAX
;
3187 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3188 dmae
->comp_addr_hi
= 0;
3191 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3192 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
3193 dmae
->src_addr_lo
= (bp
->port
.port_stx
>> 2) + DMAE_LEN32_RD_MAX
;
3194 dmae
->src_addr_hi
= 0;
3195 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
) +
3196 DMAE_LEN32_RD_MAX
* 4);
3197 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
) +
3198 DMAE_LEN32_RD_MAX
* 4);
3199 dmae
->len
= (sizeof(struct host_port_stats
) >> 2) - DMAE_LEN32_RD_MAX
;
3200 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3201 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3202 dmae
->comp_val
= DMAE_COMP_VAL
;
3205 bnx2x_hw_stats_post(bp
);
3206 bnx2x_stats_comp(bp
);
3209 static void bnx2x_port_stats_init(struct bnx2x
*bp
)
3211 struct dmae_command
*dmae
;
3212 int port
= BP_PORT(bp
);
3213 int vn
= BP_E1HVN(bp
);
3215 int loader_idx
= PMF_DMAE_C(bp
);
3217 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3220 if (!bp
->link_vars
.link_up
|| !bp
->port
.pmf
) {
3221 BNX2X_ERR("BUG!\n");
3225 bp
->executer_idx
= 0;
3228 opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3229 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3230 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3232 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3234 DMAE_CMD_ENDIANITY_DW_SWAP
|
3236 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3237 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3239 if (bp
->port
.port_stx
) {
3241 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3242 dmae
->opcode
= opcode
;
3243 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
3244 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
3245 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
3246 dmae
->dst_addr_hi
= 0;
3247 dmae
->len
= sizeof(struct host_port_stats
) >> 2;
3248 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3249 dmae
->comp_addr_hi
= 0;
3255 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3256 dmae
->opcode
= opcode
;
3257 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
3258 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
3259 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
3260 dmae
->dst_addr_hi
= 0;
3261 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
3262 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3263 dmae
->comp_addr_hi
= 0;
3268 opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3269 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3270 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3272 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3274 DMAE_CMD_ENDIANITY_DW_SWAP
|
3276 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3277 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3279 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
3281 mac_addr
= (port
? NIG_REG_INGRESS_BMAC1_MEM
:
3282 NIG_REG_INGRESS_BMAC0_MEM
);
3284 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3285 BIGMAC_REGISTER_TX_STAT_GTBYT */
3286 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3287 dmae
->opcode
= opcode
;
3288 dmae
->src_addr_lo
= (mac_addr
+
3289 BIGMAC_REGISTER_TX_STAT_GTPKT
) >> 2;
3290 dmae
->src_addr_hi
= 0;
3291 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
));
3292 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
));
3293 dmae
->len
= (8 + BIGMAC_REGISTER_TX_STAT_GTBYT
-
3294 BIGMAC_REGISTER_TX_STAT_GTPKT
) >> 2;
3295 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3296 dmae
->comp_addr_hi
= 0;
3299 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3300 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3301 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3302 dmae
->opcode
= opcode
;
3303 dmae
->src_addr_lo
= (mac_addr
+
3304 BIGMAC_REGISTER_RX_STAT_GR64
) >> 2;
3305 dmae
->src_addr_hi
= 0;
3306 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3307 offsetof(struct bmac_stats
, rx_stat_gr64_lo
));
3308 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3309 offsetof(struct bmac_stats
, rx_stat_gr64_lo
));
3310 dmae
->len
= (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ
-
3311 BIGMAC_REGISTER_RX_STAT_GR64
) >> 2;
3312 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3313 dmae
->comp_addr_hi
= 0;
3316 } else if (bp
->link_vars
.mac_type
== MAC_TYPE_EMAC
) {
3318 mac_addr
= (port
? GRCBASE_EMAC1
: GRCBASE_EMAC0
);
3320 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3321 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3322 dmae
->opcode
= opcode
;
3323 dmae
->src_addr_lo
= (mac_addr
+
3324 EMAC_REG_EMAC_RX_STAT_AC
) >> 2;
3325 dmae
->src_addr_hi
= 0;
3326 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
));
3327 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
));
3328 dmae
->len
= EMAC_REG_EMAC_RX_STAT_AC_COUNT
;
3329 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3330 dmae
->comp_addr_hi
= 0;
3333 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3334 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3335 dmae
->opcode
= opcode
;
3336 dmae
->src_addr_lo
= (mac_addr
+
3337 EMAC_REG_EMAC_RX_STAT_AC_28
) >> 2;
3338 dmae
->src_addr_hi
= 0;
3339 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3340 offsetof(struct emac_stats
, rx_stat_falsecarriererrors
));
3341 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3342 offsetof(struct emac_stats
, rx_stat_falsecarriererrors
));
3344 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3345 dmae
->comp_addr_hi
= 0;
3348 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3349 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3350 dmae
->opcode
= opcode
;
3351 dmae
->src_addr_lo
= (mac_addr
+
3352 EMAC_REG_EMAC_TX_STAT_AC
) >> 2;
3353 dmae
->src_addr_hi
= 0;
3354 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3355 offsetof(struct emac_stats
, tx_stat_ifhcoutoctets
));
3356 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3357 offsetof(struct emac_stats
, tx_stat_ifhcoutoctets
));
3358 dmae
->len
= EMAC_REG_EMAC_TX_STAT_AC_COUNT
;
3359 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3360 dmae
->comp_addr_hi
= 0;
3365 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3366 dmae
->opcode
= opcode
;
3367 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_BRB_DISCARD
:
3368 NIG_REG_STAT0_BRB_DISCARD
) >> 2;
3369 dmae
->src_addr_hi
= 0;
3370 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
));
3371 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
));
3372 dmae
->len
= (sizeof(struct nig_stats
) - 4*sizeof(u32
)) >> 2;
3373 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3374 dmae
->comp_addr_hi
= 0;
3377 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3378 dmae
->opcode
= opcode
;
3379 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_EGRESS_MAC_PKT0
:
3380 NIG_REG_STAT0_EGRESS_MAC_PKT0
) >> 2;
3381 dmae
->src_addr_hi
= 0;
3382 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
) +
3383 offsetof(struct nig_stats
, egress_mac_pkt0_lo
));
3384 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
) +
3385 offsetof(struct nig_stats
, egress_mac_pkt0_lo
));
3386 dmae
->len
= (2*sizeof(u32
)) >> 2;
3387 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3388 dmae
->comp_addr_hi
= 0;
3391 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3392 dmae
->opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3393 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
3394 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3396 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3398 DMAE_CMD_ENDIANITY_DW_SWAP
|
3400 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3401 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3402 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_EGRESS_MAC_PKT1
:
3403 NIG_REG_STAT0_EGRESS_MAC_PKT1
) >> 2;
3404 dmae
->src_addr_hi
= 0;
3405 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
) +
3406 offsetof(struct nig_stats
, egress_mac_pkt1_lo
));
3407 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
) +
3408 offsetof(struct nig_stats
, egress_mac_pkt1_lo
));
3409 dmae
->len
= (2*sizeof(u32
)) >> 2;
3410 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3411 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3412 dmae
->comp_val
= DMAE_COMP_VAL
;
3417 static void bnx2x_func_stats_init(struct bnx2x
*bp
)
3419 struct dmae_command
*dmae
= &bp
->stats_dmae
;
3420 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3423 if (!bp
->func_stx
) {
3424 BNX2X_ERR("BUG!\n");
3428 bp
->executer_idx
= 0;
3429 memset(dmae
, 0, sizeof(struct dmae_command
));
3431 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3432 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
3433 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3435 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3437 DMAE_CMD_ENDIANITY_DW_SWAP
|
3439 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3440 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3441 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
3442 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
3443 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
3444 dmae
->dst_addr_hi
= 0;
3445 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
3446 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3447 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3448 dmae
->comp_val
= DMAE_COMP_VAL
;
3453 static void bnx2x_stats_start(struct bnx2x
*bp
)
3456 bnx2x_port_stats_init(bp
);
3458 else if (bp
->func_stx
)
3459 bnx2x_func_stats_init(bp
);
3461 bnx2x_hw_stats_post(bp
);
3462 bnx2x_storm_stats_post(bp
);
3465 static void bnx2x_stats_pmf_start(struct bnx2x
*bp
)
3467 bnx2x_stats_comp(bp
);
3468 bnx2x_stats_pmf_update(bp
);
3469 bnx2x_stats_start(bp
);
3472 static void bnx2x_stats_restart(struct bnx2x
*bp
)
3474 bnx2x_stats_comp(bp
);
3475 bnx2x_stats_start(bp
);
3478 static void bnx2x_bmac_stats_update(struct bnx2x
*bp
)
3480 struct bmac_stats
*new = bnx2x_sp(bp
, mac_stats
.bmac_stats
);
3481 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3482 struct regpair diff
;
3484 UPDATE_STAT64(rx_stat_grerb
, rx_stat_ifhcinbadoctets
);
3485 UPDATE_STAT64(rx_stat_grfcs
, rx_stat_dot3statsfcserrors
);
3486 UPDATE_STAT64(rx_stat_grund
, rx_stat_etherstatsundersizepkts
);
3487 UPDATE_STAT64(rx_stat_grovr
, rx_stat_dot3statsframestoolong
);
3488 UPDATE_STAT64(rx_stat_grfrg
, rx_stat_etherstatsfragments
);
3489 UPDATE_STAT64(rx_stat_grjbr
, rx_stat_etherstatsjabbers
);
3490 UPDATE_STAT64(rx_stat_grxcf
, rx_stat_maccontrolframesreceived
);
3491 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_xoffstateentered
);
3492 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_xoffpauseframesreceived
);
3493 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_outxoffsent
);
3494 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_flowcontroldone
);
3495 UPDATE_STAT64(tx_stat_gt64
, tx_stat_etherstatspkts64octets
);
3496 UPDATE_STAT64(tx_stat_gt127
,
3497 tx_stat_etherstatspkts65octetsto127octets
);
3498 UPDATE_STAT64(tx_stat_gt255
,
3499 tx_stat_etherstatspkts128octetsto255octets
);
3500 UPDATE_STAT64(tx_stat_gt511
,
3501 tx_stat_etherstatspkts256octetsto511octets
);
3502 UPDATE_STAT64(tx_stat_gt1023
,
3503 tx_stat_etherstatspkts512octetsto1023octets
);
3504 UPDATE_STAT64(tx_stat_gt1518
,
3505 tx_stat_etherstatspkts1024octetsto1522octets
);
3506 UPDATE_STAT64(tx_stat_gt2047
, tx_stat_bmac_2047
);
3507 UPDATE_STAT64(tx_stat_gt4095
, tx_stat_bmac_4095
);
3508 UPDATE_STAT64(tx_stat_gt9216
, tx_stat_bmac_9216
);
3509 UPDATE_STAT64(tx_stat_gt16383
, tx_stat_bmac_16383
);
3510 UPDATE_STAT64(tx_stat_gterr
,
3511 tx_stat_dot3statsinternalmactransmiterrors
);
3512 UPDATE_STAT64(tx_stat_gtufl
, tx_stat_bmac_ufl
);
3515 static void bnx2x_emac_stats_update(struct bnx2x
*bp
)
3517 struct emac_stats
*new = bnx2x_sp(bp
, mac_stats
.emac_stats
);
3518 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3520 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets
);
3521 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets
);
3522 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors
);
3523 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors
);
3524 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors
);
3525 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors
);
3526 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts
);
3527 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong
);
3528 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments
);
3529 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers
);
3530 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived
);
3531 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered
);
3532 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived
);
3533 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived
);
3534 UPDATE_EXTEND_STAT(tx_stat_outxonsent
);
3535 UPDATE_EXTEND_STAT(tx_stat_outxoffsent
);
3536 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone
);
3537 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions
);
3538 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes
);
3539 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes
);
3540 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions
);
3541 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions
);
3542 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions
);
3543 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets
);
3544 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets
);
3545 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets
);
3546 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets
);
3547 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets
);
3548 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets
);
3549 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets
);
3550 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors
);
3553 static int bnx2x_hw_stats_update(struct bnx2x
*bp
)
3555 struct nig_stats
*new = bnx2x_sp(bp
, nig_stats
);
3556 struct nig_stats
*old
= &(bp
->port
.old_nig_stats
);
3557 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3558 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3559 struct regpair diff
;
3561 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
)
3562 bnx2x_bmac_stats_update(bp
);
3564 else if (bp
->link_vars
.mac_type
== MAC_TYPE_EMAC
)
3565 bnx2x_emac_stats_update(bp
);
3567 else { /* unreached */
3568 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3572 ADD_EXTEND_64(pstats
->brb_drop_hi
, pstats
->brb_drop_lo
,
3573 new->brb_discard
- old
->brb_discard
);
3574 ADD_EXTEND_64(estats
->brb_truncate_hi
, estats
->brb_truncate_lo
,
3575 new->brb_truncate
- old
->brb_truncate
);
3577 UPDATE_STAT64_NIG(egress_mac_pkt0
,
3578 etherstatspkts1024octetsto1522octets
);
3579 UPDATE_STAT64_NIG(egress_mac_pkt1
, etherstatspktsover1522octets
);
3581 memcpy(old
, new, sizeof(struct nig_stats
));
3583 memcpy(&(estats
->rx_stat_ifhcinbadoctets_hi
), &(pstats
->mac_stx
[1]),
3584 sizeof(struct mac_stx
));
3585 estats
->brb_drop_hi
= pstats
->brb_drop_hi
;
3586 estats
->brb_drop_lo
= pstats
->brb_drop_lo
;
3588 pstats
->host_port_stats_start
= ++pstats
->host_port_stats_end
;
3593 static int bnx2x_storm_stats_update(struct bnx2x
*bp
)
3595 struct eth_stats_query
*stats
= bnx2x_sp(bp
, fw_stats
);
3596 int cl_id
= BP_CL_ID(bp
);
3597 struct tstorm_per_port_stats
*tport
=
3598 &stats
->tstorm_common
.port_statistics
;
3599 struct tstorm_per_client_stats
*tclient
=
3600 &stats
->tstorm_common
.client_statistics
[cl_id
];
3601 struct tstorm_per_client_stats
*old_tclient
= &bp
->old_tclient
;
3602 struct xstorm_per_client_stats
*xclient
=
3603 &stats
->xstorm_common
.client_statistics
[cl_id
];
3604 struct xstorm_per_client_stats
*old_xclient
= &bp
->old_xclient
;
3605 struct host_func_stats
*fstats
= bnx2x_sp(bp
, func_stats
);
3606 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3609 /* are storm stats valid? */
3610 if ((u16
)(le16_to_cpu(tclient
->stats_counter
) + 1) !=
3611 bp
->stats_counter
) {
3612 DP(BNX2X_MSG_STATS
, "stats not updated by tstorm"
3613 " tstorm counter (%d) != stats_counter (%d)\n",
3614 tclient
->stats_counter
, bp
->stats_counter
);
3617 if ((u16
)(le16_to_cpu(xclient
->stats_counter
) + 1) !=
3618 bp
->stats_counter
) {
3619 DP(BNX2X_MSG_STATS
, "stats not updated by xstorm"
3620 " xstorm counter (%d) != stats_counter (%d)\n",
3621 xclient
->stats_counter
, bp
->stats_counter
);
3625 fstats
->total_bytes_received_hi
=
3626 fstats
->valid_bytes_received_hi
=
3627 le32_to_cpu(tclient
->total_rcv_bytes
.hi
);
3628 fstats
->total_bytes_received_lo
=
3629 fstats
->valid_bytes_received_lo
=
3630 le32_to_cpu(tclient
->total_rcv_bytes
.lo
);
3632 estats
->error_bytes_received_hi
=
3633 le32_to_cpu(tclient
->rcv_error_bytes
.hi
);
3634 estats
->error_bytes_received_lo
=
3635 le32_to_cpu(tclient
->rcv_error_bytes
.lo
);
3636 ADD_64(estats
->error_bytes_received_hi
,
3637 estats
->rx_stat_ifhcinbadoctets_hi
,
3638 estats
->error_bytes_received_lo
,
3639 estats
->rx_stat_ifhcinbadoctets_lo
);
3641 ADD_64(fstats
->total_bytes_received_hi
,
3642 estats
->error_bytes_received_hi
,
3643 fstats
->total_bytes_received_lo
,
3644 estats
->error_bytes_received_lo
);
3646 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts
, total_unicast_packets_received
);
3647 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts
,
3648 total_multicast_packets_received
);
3649 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts
,
3650 total_broadcast_packets_received
);
3652 fstats
->total_bytes_transmitted_hi
=
3653 le32_to_cpu(xclient
->total_sent_bytes
.hi
);
3654 fstats
->total_bytes_transmitted_lo
=
3655 le32_to_cpu(xclient
->total_sent_bytes
.lo
);
3657 UPDATE_EXTEND_XSTAT(unicast_pkts_sent
,
3658 total_unicast_packets_transmitted
);
3659 UPDATE_EXTEND_XSTAT(multicast_pkts_sent
,
3660 total_multicast_packets_transmitted
);
3661 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent
,
3662 total_broadcast_packets_transmitted
);
3664 memcpy(estats
, &(fstats
->total_bytes_received_hi
),
3665 sizeof(struct host_func_stats
) - 2*sizeof(u32
));
3667 estats
->mac_filter_discard
= le32_to_cpu(tport
->mac_filter_discard
);
3668 estats
->xxoverflow_discard
= le32_to_cpu(tport
->xxoverflow_discard
);
3669 estats
->brb_truncate_discard
=
3670 le32_to_cpu(tport
->brb_truncate_discard
);
3671 estats
->mac_discard
= le32_to_cpu(tport
->mac_discard
);
3673 old_tclient
->rcv_unicast_bytes
.hi
=
3674 le32_to_cpu(tclient
->rcv_unicast_bytes
.hi
);
3675 old_tclient
->rcv_unicast_bytes
.lo
=
3676 le32_to_cpu(tclient
->rcv_unicast_bytes
.lo
);
3677 old_tclient
->rcv_broadcast_bytes
.hi
=
3678 le32_to_cpu(tclient
->rcv_broadcast_bytes
.hi
);
3679 old_tclient
->rcv_broadcast_bytes
.lo
=
3680 le32_to_cpu(tclient
->rcv_broadcast_bytes
.lo
);
3681 old_tclient
->rcv_multicast_bytes
.hi
=
3682 le32_to_cpu(tclient
->rcv_multicast_bytes
.hi
);
3683 old_tclient
->rcv_multicast_bytes
.lo
=
3684 le32_to_cpu(tclient
->rcv_multicast_bytes
.lo
);
3685 old_tclient
->total_rcv_pkts
= le32_to_cpu(tclient
->total_rcv_pkts
);
3687 old_tclient
->checksum_discard
= le32_to_cpu(tclient
->checksum_discard
);
3688 old_tclient
->packets_too_big_discard
=
3689 le32_to_cpu(tclient
->packets_too_big_discard
);
3690 estats
->no_buff_discard
=
3691 old_tclient
->no_buff_discard
= le32_to_cpu(tclient
->no_buff_discard
);
3692 old_tclient
->ttl0_discard
= le32_to_cpu(tclient
->ttl0_discard
);
3694 old_xclient
->total_sent_pkts
= le32_to_cpu(xclient
->total_sent_pkts
);
3695 old_xclient
->unicast_bytes_sent
.hi
=
3696 le32_to_cpu(xclient
->unicast_bytes_sent
.hi
);
3697 old_xclient
->unicast_bytes_sent
.lo
=
3698 le32_to_cpu(xclient
->unicast_bytes_sent
.lo
);
3699 old_xclient
->multicast_bytes_sent
.hi
=
3700 le32_to_cpu(xclient
->multicast_bytes_sent
.hi
);
3701 old_xclient
->multicast_bytes_sent
.lo
=
3702 le32_to_cpu(xclient
->multicast_bytes_sent
.lo
);
3703 old_xclient
->broadcast_bytes_sent
.hi
=
3704 le32_to_cpu(xclient
->broadcast_bytes_sent
.hi
);
3705 old_xclient
->broadcast_bytes_sent
.lo
=
3706 le32_to_cpu(xclient
->broadcast_bytes_sent
.lo
);
3708 fstats
->host_func_stats_start
= ++fstats
->host_func_stats_end
;
3713 static void bnx2x_net_stats_update(struct bnx2x
*bp
)
3715 struct tstorm_per_client_stats
*old_tclient
= &bp
->old_tclient
;
3716 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3717 struct net_device_stats
*nstats
= &bp
->dev
->stats
;
3719 nstats
->rx_packets
=
3720 bnx2x_hilo(&estats
->total_unicast_packets_received_hi
) +
3721 bnx2x_hilo(&estats
->total_multicast_packets_received_hi
) +
3722 bnx2x_hilo(&estats
->total_broadcast_packets_received_hi
);
3724 nstats
->tx_packets
=
3725 bnx2x_hilo(&estats
->total_unicast_packets_transmitted_hi
) +
3726 bnx2x_hilo(&estats
->total_multicast_packets_transmitted_hi
) +
3727 bnx2x_hilo(&estats
->total_broadcast_packets_transmitted_hi
);
3729 nstats
->rx_bytes
= bnx2x_hilo(&estats
->valid_bytes_received_hi
);
3731 nstats
->tx_bytes
= bnx2x_hilo(&estats
->total_bytes_transmitted_hi
);
3733 nstats
->rx_dropped
= old_tclient
->checksum_discard
+
3734 estats
->mac_discard
;
3735 nstats
->tx_dropped
= 0;
3738 bnx2x_hilo(&estats
->total_multicast_packets_transmitted_hi
);
3740 nstats
->collisions
=
3741 estats
->tx_stat_dot3statssinglecollisionframes_lo
+
3742 estats
->tx_stat_dot3statsmultiplecollisionframes_lo
+
3743 estats
->tx_stat_dot3statslatecollisions_lo
+
3744 estats
->tx_stat_dot3statsexcessivecollisions_lo
;
3746 estats
->jabber_packets_received
=
3747 old_tclient
->packets_too_big_discard
+
3748 estats
->rx_stat_dot3statsframestoolong_lo
;
3750 nstats
->rx_length_errors
=
3751 estats
->rx_stat_etherstatsundersizepkts_lo
+
3752 estats
->jabber_packets_received
;
3753 nstats
->rx_over_errors
= estats
->brb_drop_lo
+ estats
->brb_truncate_lo
;
3754 nstats
->rx_crc_errors
= estats
->rx_stat_dot3statsfcserrors_lo
;
3755 nstats
->rx_frame_errors
= estats
->rx_stat_dot3statsalignmenterrors_lo
;
3756 nstats
->rx_fifo_errors
= old_tclient
->no_buff_discard
;
3757 nstats
->rx_missed_errors
= estats
->xxoverflow_discard
;
3759 nstats
->rx_errors
= nstats
->rx_length_errors
+
3760 nstats
->rx_over_errors
+
3761 nstats
->rx_crc_errors
+
3762 nstats
->rx_frame_errors
+
3763 nstats
->rx_fifo_errors
+
3764 nstats
->rx_missed_errors
;
3766 nstats
->tx_aborted_errors
=
3767 estats
->tx_stat_dot3statslatecollisions_lo
+
3768 estats
->tx_stat_dot3statsexcessivecollisions_lo
;
3769 nstats
->tx_carrier_errors
= estats
->rx_stat_falsecarriererrors_lo
;
3770 nstats
->tx_fifo_errors
= 0;
3771 nstats
->tx_heartbeat_errors
= 0;
3772 nstats
->tx_window_errors
= 0;
3774 nstats
->tx_errors
= nstats
->tx_aborted_errors
+
3775 nstats
->tx_carrier_errors
;
3778 static void bnx2x_stats_update(struct bnx2x
*bp
)
3780 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3783 if (*stats_comp
!= DMAE_COMP_VAL
)
3787 update
= (bnx2x_hw_stats_update(bp
) == 0);
3789 update
|= (bnx2x_storm_stats_update(bp
) == 0);
3792 bnx2x_net_stats_update(bp
);
3795 if (bp
->stats_pending
) {
3796 bp
->stats_pending
++;
3797 if (bp
->stats_pending
== 3) {
3798 BNX2X_ERR("stats not updated for 3 times\n");
3805 if (bp
->msglevel
& NETIF_MSG_TIMER
) {
3806 struct tstorm_per_client_stats
*old_tclient
= &bp
->old_tclient
;
3807 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3808 struct net_device_stats
*nstats
= &bp
->dev
->stats
;
3811 printk(KERN_DEBUG
"%s:\n", bp
->dev
->name
);
3812 printk(KERN_DEBUG
" tx avail (%4x) tx hc idx (%x)"
3814 bnx2x_tx_avail(bp
->fp
),
3815 le16_to_cpu(*bp
->fp
->tx_cons_sb
), nstats
->tx_packets
);
3816 printk(KERN_DEBUG
" rx usage (%4x) rx hc idx (%x)"
3818 (u16
)(le16_to_cpu(*bp
->fp
->rx_cons_sb
) -
3819 bp
->fp
->rx_comp_cons
),
3820 le16_to_cpu(*bp
->fp
->rx_cons_sb
), nstats
->rx_packets
);
3821 printk(KERN_DEBUG
" %s (Xoff events %u) brb drops %u\n",
3822 netif_queue_stopped(bp
->dev
) ? "Xoff" : "Xon",
3823 estats
->driver_xoff
, estats
->brb_drop_lo
);
3824 printk(KERN_DEBUG
"tstats: checksum_discard %u "
3825 "packets_too_big_discard %u no_buff_discard %u "
3826 "mac_discard %u mac_filter_discard %u "
3827 "xxovrflow_discard %u brb_truncate_discard %u "
3828 "ttl0_discard %u\n",
3829 old_tclient
->checksum_discard
,
3830 old_tclient
->packets_too_big_discard
,
3831 old_tclient
->no_buff_discard
, estats
->mac_discard
,
3832 estats
->mac_filter_discard
, estats
->xxoverflow_discard
,
3833 estats
->brb_truncate_discard
,
3834 old_tclient
->ttl0_discard
);
3836 for_each_queue(bp
, i
) {
3837 printk(KERN_DEBUG
"[%d]: %lu\t%lu\t%lu\n", i
,
3838 bnx2x_fp(bp
, i
, tx_pkt
),
3839 bnx2x_fp(bp
, i
, rx_pkt
),
3840 bnx2x_fp(bp
, i
, rx_calls
));
3844 bnx2x_hw_stats_post(bp
);
3845 bnx2x_storm_stats_post(bp
);
3848 static void bnx2x_port_stats_stop(struct bnx2x
*bp
)
3850 struct dmae_command
*dmae
;
3852 int loader_idx
= PMF_DMAE_C(bp
);
3853 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3855 bp
->executer_idx
= 0;
3857 opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3859 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3861 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3863 DMAE_CMD_ENDIANITY_DW_SWAP
|
3865 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3866 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3868 if (bp
->port
.port_stx
) {
3870 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3872 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_GRC
);
3874 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
3875 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
3876 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
3877 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
3878 dmae
->dst_addr_hi
= 0;
3879 dmae
->len
= sizeof(struct host_port_stats
) >> 2;
3881 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3882 dmae
->comp_addr_hi
= 0;
3885 dmae
->comp_addr_lo
=
3886 U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3887 dmae
->comp_addr_hi
=
3888 U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3889 dmae
->comp_val
= DMAE_COMP_VAL
;
3897 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3898 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
3899 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
3900 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
3901 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
3902 dmae
->dst_addr_hi
= 0;
3903 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
3904 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3905 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3906 dmae
->comp_val
= DMAE_COMP_VAL
;
3912 static void bnx2x_stats_stop(struct bnx2x
*bp
)
3916 bnx2x_stats_comp(bp
);
3919 update
= (bnx2x_hw_stats_update(bp
) == 0);
3921 update
|= (bnx2x_storm_stats_update(bp
) == 0);
3924 bnx2x_net_stats_update(bp
);
3927 bnx2x_port_stats_stop(bp
);
3929 bnx2x_hw_stats_post(bp
);
3930 bnx2x_stats_comp(bp
);
3934 static void bnx2x_stats_do_nothing(struct bnx2x
*bp
)
3938 static const struct {
3939 void (*action
)(struct bnx2x
*bp
);
3940 enum bnx2x_stats_state next_state
;
3941 } bnx2x_stats_stm
[STATS_STATE_MAX
][STATS_EVENT_MAX
] = {
3944 /* DISABLED PMF */ {bnx2x_stats_pmf_update
, STATS_STATE_DISABLED
},
3945 /* LINK_UP */ {bnx2x_stats_start
, STATS_STATE_ENABLED
},
3946 /* UPDATE */ {bnx2x_stats_do_nothing
, STATS_STATE_DISABLED
},
3947 /* STOP */ {bnx2x_stats_do_nothing
, STATS_STATE_DISABLED
}
3950 /* ENABLED PMF */ {bnx2x_stats_pmf_start
, STATS_STATE_ENABLED
},
3951 /* LINK_UP */ {bnx2x_stats_restart
, STATS_STATE_ENABLED
},
3952 /* UPDATE */ {bnx2x_stats_update
, STATS_STATE_ENABLED
},
3953 /* STOP */ {bnx2x_stats_stop
, STATS_STATE_DISABLED
}
3957 static void bnx2x_stats_handle(struct bnx2x
*bp
, enum bnx2x_stats_event event
)
3959 enum bnx2x_stats_state state
= bp
->stats_state
;
3961 bnx2x_stats_stm
[state
][event
].action(bp
);
3962 bp
->stats_state
= bnx2x_stats_stm
[state
][event
].next_state
;
3964 if ((event
!= STATS_EVENT_UPDATE
) || (bp
->msglevel
& NETIF_MSG_TIMER
))
3965 DP(BNX2X_MSG_STATS
, "state %d -> event %d -> state %d\n",
3966 state
, event
, bp
->stats_state
);
3969 static void bnx2x_timer(unsigned long data
)
3971 struct bnx2x
*bp
= (struct bnx2x
*) data
;
3973 if (!netif_running(bp
->dev
))
3976 if (atomic_read(&bp
->intr_sem
) != 0)
3980 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
3983 bnx2x_tx_int(fp
, 1000);
3984 rc
= bnx2x_rx_int(fp
, 1000);
3987 if (!BP_NOMCP(bp
)) {
3988 int func
= BP_FUNC(bp
);
3992 ++bp
->fw_drv_pulse_wr_seq
;
3993 bp
->fw_drv_pulse_wr_seq
&= DRV_PULSE_SEQ_MASK
;
3994 /* TBD - add SYSTEM_TIME */
3995 drv_pulse
= bp
->fw_drv_pulse_wr_seq
;
3996 SHMEM_WR(bp
, func_mb
[func
].drv_pulse_mb
, drv_pulse
);
3998 mcp_pulse
= (SHMEM_RD(bp
, func_mb
[func
].mcp_pulse_mb
) &
3999 MCP_PULSE_SEQ_MASK
);
4000 /* The delta between driver pulse and mcp response
4001 * should be 1 (before mcp response) or 0 (after mcp response)
4003 if ((drv_pulse
!= mcp_pulse
) &&
4004 (drv_pulse
!= ((mcp_pulse
+ 1) & MCP_PULSE_SEQ_MASK
))) {
4005 /* someone lost a heartbeat... */
4006 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4007 drv_pulse
, mcp_pulse
);
4011 if ((bp
->state
== BNX2X_STATE_OPEN
) ||
4012 (bp
->state
== BNX2X_STATE_DISABLED
))
4013 bnx2x_stats_handle(bp
, STATS_EVENT_UPDATE
);
4016 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
4019 /* end of Statistics */
4024 * nic init service functions
4027 static void bnx2x_zero_sb(struct bnx2x
*bp
, int sb_id
)
4029 int port
= BP_PORT(bp
);
4031 bnx2x_init_fill(bp
, BAR_USTRORM_INTMEM
+
4032 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port
, sb_id
), 0,
4033 sizeof(struct ustorm_status_block
)/4);
4034 bnx2x_init_fill(bp
, BAR_CSTRORM_INTMEM
+
4035 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port
, sb_id
), 0,
4036 sizeof(struct cstorm_status_block
)/4);
4039 static void bnx2x_init_sb(struct bnx2x
*bp
, struct host_status_block
*sb
,
4040 dma_addr_t mapping
, int sb_id
)
4042 int port
= BP_PORT(bp
);
4043 int func
= BP_FUNC(bp
);
4048 section
= ((u64
)mapping
) + offsetof(struct host_status_block
,
4050 sb
->u_status_block
.status_block_id
= sb_id
;
4052 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4053 USTORM_SB_HOST_SB_ADDR_OFFSET(port
, sb_id
), U64_LO(section
));
4054 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4055 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port
, sb_id
)) + 4),
4057 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ FP_USB_FUNC_OFF
+
4058 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port
, sb_id
), func
);
4060 for (index
= 0; index
< HC_USTORM_SB_NUM_INDICES
; index
++)
4061 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
4062 USTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
, index
), 1);
4065 section
= ((u64
)mapping
) + offsetof(struct host_status_block
,
4067 sb
->c_status_block
.status_block_id
= sb_id
;
4069 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4070 CSTORM_SB_HOST_SB_ADDR_OFFSET(port
, sb_id
), U64_LO(section
));
4071 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4072 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port
, sb_id
)) + 4),
4074 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ FP_CSB_FUNC_OFF
+
4075 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port
, sb_id
), func
);
4077 for (index
= 0; index
< HC_CSTORM_SB_NUM_INDICES
; index
++)
4078 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4079 CSTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
, index
), 1);
4081 bnx2x_ack_sb(bp
, sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4084 static void bnx2x_zero_def_sb(struct bnx2x
*bp
)
4086 int func
= BP_FUNC(bp
);
4088 bnx2x_init_fill(bp
, BAR_USTRORM_INTMEM
+
4089 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4090 sizeof(struct ustorm_def_status_block
)/4);
4091 bnx2x_init_fill(bp
, BAR_CSTRORM_INTMEM
+
4092 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4093 sizeof(struct cstorm_def_status_block
)/4);
4094 bnx2x_init_fill(bp
, BAR_XSTRORM_INTMEM
+
4095 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4096 sizeof(struct xstorm_def_status_block
)/4);
4097 bnx2x_init_fill(bp
, BAR_TSTRORM_INTMEM
+
4098 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4099 sizeof(struct tstorm_def_status_block
)/4);
4102 static void bnx2x_init_def_sb(struct bnx2x
*bp
,
4103 struct host_def_status_block
*def_sb
,
4104 dma_addr_t mapping
, int sb_id
)
4106 int port
= BP_PORT(bp
);
4107 int func
= BP_FUNC(bp
);
4108 int index
, val
, reg_offset
;
4112 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4113 atten_status_block
);
4114 def_sb
->atten_status_block
.status_block_id
= sb_id
;
4118 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
4119 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
4121 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
4122 bp
->attn_group
[index
].sig
[0] = REG_RD(bp
,
4123 reg_offset
+ 0x10*index
);
4124 bp
->attn_group
[index
].sig
[1] = REG_RD(bp
,
4125 reg_offset
+ 0x4 + 0x10*index
);
4126 bp
->attn_group
[index
].sig
[2] = REG_RD(bp
,
4127 reg_offset
+ 0x8 + 0x10*index
);
4128 bp
->attn_group
[index
].sig
[3] = REG_RD(bp
,
4129 reg_offset
+ 0xc + 0x10*index
);
4132 reg_offset
= (port
? HC_REG_ATTN_MSG1_ADDR_L
:
4133 HC_REG_ATTN_MSG0_ADDR_L
);
4135 REG_WR(bp
, reg_offset
, U64_LO(section
));
4136 REG_WR(bp
, reg_offset
+ 4, U64_HI(section
));
4138 reg_offset
= (port
? HC_REG_ATTN_NUM_P1
: HC_REG_ATTN_NUM_P0
);
4140 val
= REG_RD(bp
, reg_offset
);
4142 REG_WR(bp
, reg_offset
, val
);
4145 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4146 u_def_status_block
);
4147 def_sb
->u_def_status_block
.status_block_id
= sb_id
;
4149 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4150 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4151 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4152 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4154 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ DEF_USB_FUNC_OFF
+
4155 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4157 for (index
= 0; index
< HC_USTORM_DEF_SB_NUM_INDICES
; index
++)
4158 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
4159 USTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4162 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4163 c_def_status_block
);
4164 def_sb
->c_def_status_block
.status_block_id
= sb_id
;
4166 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4167 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4168 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4169 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4171 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ DEF_CSB_FUNC_OFF
+
4172 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4174 for (index
= 0; index
< HC_CSTORM_DEF_SB_NUM_INDICES
; index
++)
4175 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4176 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4179 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4180 t_def_status_block
);
4181 def_sb
->t_def_status_block
.status_block_id
= sb_id
;
4183 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4184 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4185 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4186 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4188 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ DEF_TSB_FUNC_OFF
+
4189 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4191 for (index
= 0; index
< HC_TSTORM_DEF_SB_NUM_INDICES
; index
++)
4192 REG_WR16(bp
, BAR_TSTRORM_INTMEM
+
4193 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4196 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4197 x_def_status_block
);
4198 def_sb
->x_def_status_block
.status_block_id
= sb_id
;
4200 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4201 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4202 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4203 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4205 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ DEF_XSB_FUNC_OFF
+
4206 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4208 for (index
= 0; index
< HC_XSTORM_DEF_SB_NUM_INDICES
; index
++)
4209 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+
4210 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4212 bp
->stats_pending
= 0;
4213 bp
->set_mac_pending
= 0;
4215 bnx2x_ack_sb(bp
, sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4218 static void bnx2x_update_coalesce(struct bnx2x
*bp
)
4220 int port
= BP_PORT(bp
);
4223 for_each_queue(bp
, i
) {
4224 int sb_id
= bp
->fp
[i
].sb_id
;
4226 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4227 REG_WR8(bp
, BAR_USTRORM_INTMEM
+
4228 USTORM_SB_HC_TIMEOUT_OFFSET(port
, sb_id
,
4229 U_SB_ETH_RX_CQ_INDEX
),
4231 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
4232 USTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
,
4233 U_SB_ETH_RX_CQ_INDEX
),
4234 bp
->rx_ticks
? 0 : 1);
4235 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
4236 USTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
,
4237 U_SB_ETH_RX_BD_INDEX
),
4238 bp
->rx_ticks
? 0 : 1);
4240 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4241 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+
4242 CSTORM_SB_HC_TIMEOUT_OFFSET(port
, sb_id
,
4243 C_SB_ETH_TX_CQ_INDEX
),
4245 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4246 CSTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
,
4247 C_SB_ETH_TX_CQ_INDEX
),
4248 bp
->tx_ticks
? 0 : 1);
4252 static inline void bnx2x_free_tpa_pool(struct bnx2x
*bp
,
4253 struct bnx2x_fastpath
*fp
, int last
)
4257 for (i
= 0; i
< last
; i
++) {
4258 struct sw_rx_bd
*rx_buf
= &(fp
->tpa_pool
[i
]);
4259 struct sk_buff
*skb
= rx_buf
->skb
;
4262 DP(NETIF_MSG_IFDOWN
, "tpa bin %d empty on free\n", i
);
4266 if (fp
->tpa_state
[i
] == BNX2X_TPA_START
)
4267 pci_unmap_single(bp
->pdev
,
4268 pci_unmap_addr(rx_buf
, mapping
),
4270 PCI_DMA_FROMDEVICE
);
4277 static void bnx2x_init_rx_rings(struct bnx2x
*bp
)
4279 int func
= BP_FUNC(bp
);
4280 int max_agg_queues
= CHIP_IS_E1(bp
) ? ETH_MAX_AGGREGATION_QUEUES_E1
:
4281 ETH_MAX_AGGREGATION_QUEUES_E1H
;
4282 u16 ring_prod
, cqe_ring_prod
;
4285 bp
->rx_buf_size
= bp
->dev
->mtu
;
4286 bp
->rx_buf_size
+= bp
->rx_offset
+ ETH_OVREHEAD
+
4287 BCM_RX_ETH_PAYLOAD_ALIGN
;
4289 if (bp
->flags
& TPA_ENABLE_FLAG
) {
4291 "rx_buf_size %d effective_mtu %d\n",
4292 bp
->rx_buf_size
, bp
->dev
->mtu
+ ETH_OVREHEAD
);
4294 for_each_queue(bp
, j
) {
4295 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4297 for (i
= 0; i
< max_agg_queues
; i
++) {
4298 fp
->tpa_pool
[i
].skb
=
4299 netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
4300 if (!fp
->tpa_pool
[i
].skb
) {
4301 BNX2X_ERR("Failed to allocate TPA "
4302 "skb pool for queue[%d] - "
4303 "disabling TPA on this "
4305 bnx2x_free_tpa_pool(bp
, fp
, i
);
4306 fp
->disable_tpa
= 1;
4309 pci_unmap_addr_set((struct sw_rx_bd
*)
4310 &bp
->fp
->tpa_pool
[i
],
4312 fp
->tpa_state
[i
] = BNX2X_TPA_STOP
;
4317 for_each_queue(bp
, j
) {
4318 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4321 fp
->rx_cons_sb
= BNX2X_RX_SB_INDEX
;
4322 fp
->rx_bd_cons_sb
= BNX2X_RX_SB_BD_INDEX
;
4324 /* "next page" elements initialization */
4326 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
4327 struct eth_rx_sge
*sge
;
4329 sge
= &fp
->rx_sge_ring
[RX_SGE_CNT
* i
- 2];
4331 cpu_to_le32(U64_HI(fp
->rx_sge_mapping
+
4332 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
4334 cpu_to_le32(U64_LO(fp
->rx_sge_mapping
+
4335 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
4338 bnx2x_init_sge_ring_bit_mask(fp
);
4341 for (i
= 1; i
<= NUM_RX_RINGS
; i
++) {
4342 struct eth_rx_bd
*rx_bd
;
4344 rx_bd
= &fp
->rx_desc_ring
[RX_DESC_CNT
* i
- 2];
4346 cpu_to_le32(U64_HI(fp
->rx_desc_mapping
+
4347 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
4349 cpu_to_le32(U64_LO(fp
->rx_desc_mapping
+
4350 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
4354 for (i
= 1; i
<= NUM_RCQ_RINGS
; i
++) {
4355 struct eth_rx_cqe_next_page
*nextpg
;
4357 nextpg
= (struct eth_rx_cqe_next_page
*)
4358 &fp
->rx_comp_ring
[RCQ_DESC_CNT
* i
- 1];
4360 cpu_to_le32(U64_HI(fp
->rx_comp_mapping
+
4361 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
4363 cpu_to_le32(U64_LO(fp
->rx_comp_mapping
+
4364 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
4367 /* Allocate SGEs and initialize the ring elements */
4368 for (i
= 0, ring_prod
= 0;
4369 i
< MAX_RX_SGE_CNT
*NUM_RX_SGE_PAGES
; i
++) {
4371 if (bnx2x_alloc_rx_sge(bp
, fp
, ring_prod
) < 0) {
4372 BNX2X_ERR("was only able to allocate "
4374 BNX2X_ERR("disabling TPA for queue[%d]\n", j
);
4375 /* Cleanup already allocated elements */
4376 bnx2x_free_rx_sge_range(bp
, fp
, ring_prod
);
4377 bnx2x_free_tpa_pool(bp
, fp
, max_agg_queues
);
4378 fp
->disable_tpa
= 1;
4382 ring_prod
= NEXT_SGE_IDX(ring_prod
);
4384 fp
->rx_sge_prod
= ring_prod
;
4386 /* Allocate BDs and initialize BD ring */
4387 fp
->rx_comp_cons
= 0;
4388 cqe_ring_prod
= ring_prod
= 0;
4389 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
4390 if (bnx2x_alloc_rx_skb(bp
, fp
, ring_prod
) < 0) {
4391 BNX2X_ERR("was only able to allocate "
4393 bp
->eth_stats
.rx_skb_alloc_failed
++;
4396 ring_prod
= NEXT_RX_IDX(ring_prod
);
4397 cqe_ring_prod
= NEXT_RCQ_IDX(cqe_ring_prod
);
4398 WARN_ON(ring_prod
<= i
);
4401 fp
->rx_bd_prod
= ring_prod
;
4402 /* must not have more available CQEs than BDs */
4403 fp
->rx_comp_prod
= min((u16
)(NUM_RCQ_RINGS
*RCQ_DESC_CNT
),
4405 fp
->rx_pkt
= fp
->rx_calls
= 0;
4408 * this will generate an interrupt (to the TSTORM)
4409 * must only be done after chip is initialized
4411 bnx2x_update_rx_prod(bp
, fp
, ring_prod
, fp
->rx_comp_prod
,
4416 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4417 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
),
4418 U64_LO(fp
->rx_comp_mapping
));
4419 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4420 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
) + 4,
4421 U64_HI(fp
->rx_comp_mapping
));
4425 static void bnx2x_init_tx_ring(struct bnx2x
*bp
)
4429 for_each_queue(bp
, j
) {
4430 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4432 for (i
= 1; i
<= NUM_TX_RINGS
; i
++) {
4433 struct eth_tx_bd
*tx_bd
=
4434 &fp
->tx_desc_ring
[TX_DESC_CNT
* i
- 1];
4437 cpu_to_le32(U64_HI(fp
->tx_desc_mapping
+
4438 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
4440 cpu_to_le32(U64_LO(fp
->tx_desc_mapping
+
4441 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
4444 fp
->tx_pkt_prod
= 0;
4445 fp
->tx_pkt_cons
= 0;
4448 fp
->tx_cons_sb
= BNX2X_TX_SB_INDEX
;
4453 static void bnx2x_init_sp_ring(struct bnx2x
*bp
)
4455 int func
= BP_FUNC(bp
);
4457 spin_lock_init(&bp
->spq_lock
);
4459 bp
->spq_left
= MAX_SPQ_PENDING
;
4460 bp
->spq_prod_idx
= 0;
4461 bp
->dsb_sp_prod
= BNX2X_SP_DSB_INDEX
;
4462 bp
->spq_prod_bd
= bp
->spq
;
4463 bp
->spq_last_bd
= bp
->spq_prod_bd
+ MAX_SP_DESC_CNT
;
4465 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PAGE_BASE_OFFSET(func
),
4466 U64_LO(bp
->spq_mapping
));
4468 XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PAGE_BASE_OFFSET(func
) + 4,
4469 U64_HI(bp
->spq_mapping
));
4471 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PROD_OFFSET(func
),
4475 static void bnx2x_init_context(struct bnx2x
*bp
)
4479 for_each_queue(bp
, i
) {
4480 struct eth_context
*context
= bnx2x_sp(bp
, context
[i
].eth
);
4481 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
4482 u8 sb_id
= FP_SB_ID(fp
);
4484 context
->xstorm_st_context
.tx_bd_page_base_hi
=
4485 U64_HI(fp
->tx_desc_mapping
);
4486 context
->xstorm_st_context
.tx_bd_page_base_lo
=
4487 U64_LO(fp
->tx_desc_mapping
);
4488 context
->xstorm_st_context
.db_data_addr_hi
=
4489 U64_HI(fp
->tx_prods_mapping
);
4490 context
->xstorm_st_context
.db_data_addr_lo
=
4491 U64_LO(fp
->tx_prods_mapping
);
4492 context
->xstorm_st_context
.statistics_data
= (BP_CL_ID(bp
) |
4493 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE
);
4495 context
->ustorm_st_context
.common
.sb_index_numbers
=
4496 BNX2X_RX_SB_INDEX_NUM
;
4497 context
->ustorm_st_context
.common
.clientId
= FP_CL_ID(fp
);
4498 context
->ustorm_st_context
.common
.status_block_id
= sb_id
;
4499 context
->ustorm_st_context
.common
.flags
=
4500 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT
;
4501 context
->ustorm_st_context
.common
.mc_alignment_size
=
4502 BCM_RX_ETH_PAYLOAD_ALIGN
;
4503 context
->ustorm_st_context
.common
.bd_buff_size
=
4505 context
->ustorm_st_context
.common
.bd_page_base_hi
=
4506 U64_HI(fp
->rx_desc_mapping
);
4507 context
->ustorm_st_context
.common
.bd_page_base_lo
=
4508 U64_LO(fp
->rx_desc_mapping
);
4509 if (!fp
->disable_tpa
) {
4510 context
->ustorm_st_context
.common
.flags
|=
4511 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA
|
4512 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING
);
4513 context
->ustorm_st_context
.common
.sge_buff_size
=
4514 (u16
)(BCM_PAGE_SIZE
*PAGES_PER_SGE
);
4515 context
->ustorm_st_context
.common
.sge_page_base_hi
=
4516 U64_HI(fp
->rx_sge_mapping
);
4517 context
->ustorm_st_context
.common
.sge_page_base_lo
=
4518 U64_LO(fp
->rx_sge_mapping
);
4521 context
->cstorm_st_context
.sb_index_number
=
4522 C_SB_ETH_TX_CQ_INDEX
;
4523 context
->cstorm_st_context
.status_block_id
= sb_id
;
4525 context
->xstorm_ag_context
.cdu_reserved
=
4526 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, i
),
4527 CDU_REGION_NUMBER_XCM_AG
,
4528 ETH_CONNECTION_TYPE
);
4529 context
->ustorm_ag_context
.cdu_usage
=
4530 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, i
),
4531 CDU_REGION_NUMBER_UCM_AG
,
4532 ETH_CONNECTION_TYPE
);
4536 static void bnx2x_init_ind_table(struct bnx2x
*bp
)
4538 int func
= BP_FUNC(bp
);
4544 DP(NETIF_MSG_IFUP
, "Initializing indirection table\n");
4545 for (i
= 0; i
< TSTORM_INDIRECTION_TABLE_SIZE
; i
++)
4546 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+
4547 TSTORM_INDIRECTION_TABLE_OFFSET(func
) + i
,
4548 BP_CL_ID(bp
) + (i
% bp
->num_queues
));
4551 static void bnx2x_set_client_config(struct bnx2x
*bp
)
4553 struct tstorm_eth_client_config tstorm_client
= {0};
4554 int port
= BP_PORT(bp
);
4557 tstorm_client
.mtu
= bp
->dev
->mtu
;
4558 tstorm_client
.statistics_counter_id
= BP_CL_ID(bp
);
4559 tstorm_client
.config_flags
=
4560 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE
;
4562 if (bp
->rx_mode
&& bp
->vlgrp
&& (bp
->flags
& HW_VLAN_RX_FLAG
)) {
4563 tstorm_client
.config_flags
|=
4564 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE
;
4565 DP(NETIF_MSG_IFUP
, "vlan removal enabled\n");
4569 if (bp
->flags
& TPA_ENABLE_FLAG
) {
4570 tstorm_client
.max_sges_for_packet
=
4571 SGE_PAGE_ALIGN(tstorm_client
.mtu
) >> SGE_PAGE_SHIFT
;
4572 tstorm_client
.max_sges_for_packet
=
4573 ((tstorm_client
.max_sges_for_packet
+
4574 PAGES_PER_SGE
- 1) & (~(PAGES_PER_SGE
- 1))) >>
4575 PAGES_PER_SGE_SHIFT
;
4577 tstorm_client
.config_flags
|=
4578 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING
;
4581 for_each_queue(bp
, i
) {
4582 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4583 TSTORM_CLIENT_CONFIG_OFFSET(port
, bp
->fp
[i
].cl_id
),
4584 ((u32
*)&tstorm_client
)[0]);
4585 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4586 TSTORM_CLIENT_CONFIG_OFFSET(port
, bp
->fp
[i
].cl_id
) + 4,
4587 ((u32
*)&tstorm_client
)[1]);
4590 DP(BNX2X_MSG_OFF
, "tstorm_client: 0x%08x 0x%08x\n",
4591 ((u32
*)&tstorm_client
)[0], ((u32
*)&tstorm_client
)[1]);
4594 static void bnx2x_set_storm_rx_mode(struct bnx2x
*bp
)
4596 struct tstorm_eth_mac_filter_config tstorm_mac_filter
= {0};
4597 int mode
= bp
->rx_mode
;
4598 int mask
= (1 << BP_L_ID(bp
));
4599 int func
= BP_FUNC(bp
);
4602 DP(NETIF_MSG_IFUP
, "rx mode %d mask 0x%x\n", mode
, mask
);
4605 case BNX2X_RX_MODE_NONE
: /* no Rx */
4606 tstorm_mac_filter
.ucast_drop_all
= mask
;
4607 tstorm_mac_filter
.mcast_drop_all
= mask
;
4608 tstorm_mac_filter
.bcast_drop_all
= mask
;
4610 case BNX2X_RX_MODE_NORMAL
:
4611 tstorm_mac_filter
.bcast_accept_all
= mask
;
4613 case BNX2X_RX_MODE_ALLMULTI
:
4614 tstorm_mac_filter
.mcast_accept_all
= mask
;
4615 tstorm_mac_filter
.bcast_accept_all
= mask
;
4617 case BNX2X_RX_MODE_PROMISC
:
4618 tstorm_mac_filter
.ucast_accept_all
= mask
;
4619 tstorm_mac_filter
.mcast_accept_all
= mask
;
4620 tstorm_mac_filter
.bcast_accept_all
= mask
;
4623 BNX2X_ERR("BAD rx mode (%d)\n", mode
);
4627 for (i
= 0; i
< sizeof(struct tstorm_eth_mac_filter_config
)/4; i
++) {
4628 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4629 TSTORM_MAC_FILTER_CONFIG_OFFSET(func
) + i
* 4,
4630 ((u32
*)&tstorm_mac_filter
)[i
]);
4632 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4633 ((u32 *)&tstorm_mac_filter)[i]); */
4636 if (mode
!= BNX2X_RX_MODE_NONE
)
4637 bnx2x_set_client_config(bp
);
4640 static void bnx2x_init_internal_common(struct bnx2x
*bp
)
4644 if (bp
->flags
& TPA_ENABLE_FLAG
) {
4645 struct tstorm_eth_tpa_exist tpa
= {0};
4649 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_TPA_EXIST_OFFSET
,
4651 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_TPA_EXIST_OFFSET
+ 4,
4655 /* Zero this manually as its initialization is
4656 currently missing in the initTool */
4657 for (i
= 0; i
< (USTORM_AGG_DATA_SIZE
>> 2); i
++)
4658 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4659 USTORM_AGG_DATA_OFFSET
+ i
* 4, 0);
4662 static void bnx2x_init_internal_port(struct bnx2x
*bp
)
4664 int port
= BP_PORT(bp
);
4666 REG_WR(bp
, BAR_USTRORM_INTMEM
+ USTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
4667 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
4668 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
4669 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
4672 static void bnx2x_init_internal_func(struct bnx2x
*bp
)
4674 struct tstorm_eth_function_common_config tstorm_config
= {0};
4675 struct stats_indication_flags stats_flags
= {0};
4676 int port
= BP_PORT(bp
);
4677 int func
= BP_FUNC(bp
);
4682 tstorm_config
.config_flags
= MULTI_FLAGS
;
4683 tstorm_config
.rss_result_mask
= MULTI_MASK
;
4686 tstorm_config
.leading_client_id
= BP_L_ID(bp
);
4688 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4689 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func
),
4690 (*(u32
*)&tstorm_config
));
4692 bp
->rx_mode
= BNX2X_RX_MODE_NONE
; /* no rx until link is up */
4693 bnx2x_set_storm_rx_mode(bp
);
4695 /* reset xstorm per client statistics */
4696 for (i
= 0; i
< sizeof(struct xstorm_per_client_stats
) / 4; i
++) {
4697 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4698 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, BP_CL_ID(bp
)) +
4701 /* reset tstorm per client statistics */
4702 for (i
= 0; i
< sizeof(struct tstorm_per_client_stats
) / 4; i
++) {
4703 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4704 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, BP_CL_ID(bp
)) +
4708 /* Init statistics related context */
4709 stats_flags
.collect_eth
= 1;
4711 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(func
),
4712 ((u32
*)&stats_flags
)[0]);
4713 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(func
) + 4,
4714 ((u32
*)&stats_flags
)[1]);
4716 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(func
),
4717 ((u32
*)&stats_flags
)[0]);
4718 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(func
) + 4,
4719 ((u32
*)&stats_flags
)[1]);
4721 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(func
),
4722 ((u32
*)&stats_flags
)[0]);
4723 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(func
) + 4,
4724 ((u32
*)&stats_flags
)[1]);
4726 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4727 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
4728 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
4729 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4730 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
4731 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
4733 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4734 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
4735 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
4736 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4737 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
4738 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
4740 if (CHIP_IS_E1H(bp
)) {
4741 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_FUNCTION_MODE_OFFSET
,
4743 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_FUNCTION_MODE_OFFSET
,
4745 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_FUNCTION_MODE_OFFSET
,
4747 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_FUNCTION_MODE_OFFSET
,
4750 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_E1HOV_OFFSET(func
),
4754 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4756 min((u32
)(min((u32
)8, (u32
)MAX_SKB_FRAGS
) *
4757 SGE_PAGE_SIZE
* PAGES_PER_SGE
),
4759 for_each_queue(bp
, i
) {
4760 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
4762 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4763 USTORM_CQE_PAGE_BASE_OFFSET(port
, FP_CL_ID(fp
)),
4764 U64_LO(fp
->rx_comp_mapping
));
4765 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4766 USTORM_CQE_PAGE_BASE_OFFSET(port
, FP_CL_ID(fp
)) + 4,
4767 U64_HI(fp
->rx_comp_mapping
));
4769 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
4770 USTORM_MAX_AGG_SIZE_OFFSET(port
, FP_CL_ID(fp
)),
4775 static void bnx2x_init_internal(struct bnx2x
*bp
, u32 load_code
)
4777 switch (load_code
) {
4778 case FW_MSG_CODE_DRV_LOAD_COMMON
:
4779 bnx2x_init_internal_common(bp
);
4782 case FW_MSG_CODE_DRV_LOAD_PORT
:
4783 bnx2x_init_internal_port(bp
);
4786 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
4787 bnx2x_init_internal_func(bp
);
4791 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
4796 static void bnx2x_nic_init(struct bnx2x
*bp
, u32 load_code
)
4800 for_each_queue(bp
, i
) {
4801 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
4804 fp
->state
= BNX2X_FP_STATE_CLOSED
;
4806 fp
->cl_id
= BP_L_ID(bp
) + i
;
4807 fp
->sb_id
= fp
->cl_id
;
4809 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4810 bp
, fp
->status_blk
, i
, FP_CL_ID(fp
), FP_SB_ID(fp
));
4811 bnx2x_init_sb(bp
, fp
->status_blk
, fp
->status_blk_mapping
,
4813 bnx2x_update_fpsb_idx(fp
);
4816 bnx2x_init_def_sb(bp
, bp
->def_status_blk
, bp
->def_status_blk_mapping
,
4818 bnx2x_update_dsb_idx(bp
);
4819 bnx2x_update_coalesce(bp
);
4820 bnx2x_init_rx_rings(bp
);
4821 bnx2x_init_tx_ring(bp
);
4822 bnx2x_init_sp_ring(bp
);
4823 bnx2x_init_context(bp
);
4824 bnx2x_init_internal(bp
, load_code
);
4825 bnx2x_init_ind_table(bp
);
4826 bnx2x_stats_init(bp
);
4828 /* At this point, we are ready for interrupts */
4829 atomic_set(&bp
->intr_sem
, 0);
4831 /* flush all before enabling interrupts */
4835 bnx2x_int_enable(bp
);
4838 /* end of nic init */
4841 * gzip service functions
4844 static int bnx2x_gunzip_init(struct bnx2x
*bp
)
4846 bp
->gunzip_buf
= pci_alloc_consistent(bp
->pdev
, FW_BUF_SIZE
,
4847 &bp
->gunzip_mapping
);
4848 if (bp
->gunzip_buf
== NULL
)
4851 bp
->strm
= kmalloc(sizeof(*bp
->strm
), GFP_KERNEL
);
4852 if (bp
->strm
== NULL
)
4855 bp
->strm
->workspace
= kmalloc(zlib_inflate_workspacesize(),
4857 if (bp
->strm
->workspace
== NULL
)
4867 pci_free_consistent(bp
->pdev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
4868 bp
->gunzip_mapping
);
4869 bp
->gunzip_buf
= NULL
;
4872 printk(KERN_ERR PFX
"%s: Cannot allocate firmware buffer for"
4873 " un-compression\n", bp
->dev
->name
);
4877 static void bnx2x_gunzip_end(struct bnx2x
*bp
)
4879 kfree(bp
->strm
->workspace
);
4884 if (bp
->gunzip_buf
) {
4885 pci_free_consistent(bp
->pdev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
4886 bp
->gunzip_mapping
);
4887 bp
->gunzip_buf
= NULL
;
4891 static int bnx2x_gunzip(struct bnx2x
*bp
, u8
*zbuf
, int len
)
4895 /* check gzip header */
4896 if ((zbuf
[0] != 0x1f) || (zbuf
[1] != 0x8b) || (zbuf
[2] != Z_DEFLATED
))
4903 if (zbuf
[3] & FNAME
)
4904 while ((zbuf
[n
++] != 0) && (n
< len
));
4906 bp
->strm
->next_in
= zbuf
+ n
;
4907 bp
->strm
->avail_in
= len
- n
;
4908 bp
->strm
->next_out
= bp
->gunzip_buf
;
4909 bp
->strm
->avail_out
= FW_BUF_SIZE
;
4911 rc
= zlib_inflateInit2(bp
->strm
, -MAX_WBITS
);
4915 rc
= zlib_inflate(bp
->strm
, Z_FINISH
);
4916 if ((rc
!= Z_OK
) && (rc
!= Z_STREAM_END
))
4917 printk(KERN_ERR PFX
"%s: Firmware decompression error: %s\n",
4918 bp
->dev
->name
, bp
->strm
->msg
);
4920 bp
->gunzip_outlen
= (FW_BUF_SIZE
- bp
->strm
->avail_out
);
4921 if (bp
->gunzip_outlen
& 0x3)
4922 printk(KERN_ERR PFX
"%s: Firmware decompression error:"
4923 " gunzip_outlen (%d) not aligned\n",
4924 bp
->dev
->name
, bp
->gunzip_outlen
);
4925 bp
->gunzip_outlen
>>= 2;
4927 zlib_inflateEnd(bp
->strm
);
4929 if (rc
== Z_STREAM_END
)
4935 /* nic load/unload */
4938 * General service functions
4941 /* send a NIG loopback debug packet */
4942 static void bnx2x_lb_pckt(struct bnx2x
*bp
)
4946 /* Ethernet source and destination addresses */
4947 wb_write
[0] = 0x55555555;
4948 wb_write
[1] = 0x55555555;
4949 wb_write
[2] = 0x20; /* SOP */
4950 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
4952 /* NON-IP protocol */
4953 wb_write
[0] = 0x09000000;
4954 wb_write
[1] = 0x55555555;
4955 wb_write
[2] = 0x10; /* EOP, eop_bvalid = 0 */
4956 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
4959 /* some of the internal memories
4960 * are not directly readable from the driver
4961 * to test them we send debug packets
4963 static int bnx2x_int_mem_test(struct bnx2x
*bp
)
4969 if (CHIP_REV_IS_FPGA(bp
))
4971 else if (CHIP_REV_IS_EMUL(bp
))
4976 DP(NETIF_MSG_HW
, "start part1\n");
4978 /* Disable inputs of parser neighbor blocks */
4979 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
4980 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
4981 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
4982 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x0);
4984 /* Write 0 to parser credits for CFC search request */
4985 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
4987 /* send Ethernet packet */
4990 /* TODO do i reset NIG statistic? */
4991 /* Wait until NIG register shows 1 packet of size 0x10 */
4992 count
= 1000 * factor
;
4995 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
4996 val
= *bnx2x_sp(bp
, wb_data
[0]);
5004 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
5008 /* Wait until PRS register shows 1 packet */
5009 count
= 1000 * factor
;
5011 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
5019 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
5023 /* Reset and init BRB, PRS */
5024 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
5026 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
5028 bnx2x_init_block(bp
, BRB1_COMMON_START
, BRB1_COMMON_END
);
5029 bnx2x_init_block(bp
, PRS_COMMON_START
, PRS_COMMON_END
);
5031 DP(NETIF_MSG_HW
, "part2\n");
5033 /* Disable inputs of parser neighbor blocks */
5034 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
5035 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
5036 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
5037 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x0);
5039 /* Write 0 to parser credits for CFC search request */
5040 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
5042 /* send 10 Ethernet packets */
5043 for (i
= 0; i
< 10; i
++)
5046 /* Wait until NIG register shows 10 + 1
5047 packets of size 11*0x10 = 0xb0 */
5048 count
= 1000 * factor
;
5051 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
5052 val
= *bnx2x_sp(bp
, wb_data
[0]);
5060 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
5064 /* Wait until PRS register shows 2 packets */
5065 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
5067 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
5069 /* Write 1 to parser credits for CFC search request */
5070 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x1);
5072 /* Wait until PRS register shows 3 packets */
5073 msleep(10 * factor
);
5074 /* Wait until NIG register shows 1 packet of size 0x10 */
5075 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
5077 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
5079 /* clear NIG EOP FIFO */
5080 for (i
= 0; i
< 11; i
++)
5081 REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_FIFO
);
5082 val
= REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_EMPTY
);
5084 BNX2X_ERR("clear of NIG failed\n");
5088 /* Reset and init BRB, PRS, NIG */
5089 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
5091 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
5093 bnx2x_init_block(bp
, BRB1_COMMON_START
, BRB1_COMMON_END
);
5094 bnx2x_init_block(bp
, PRS_COMMON_START
, PRS_COMMON_END
);
5097 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
5100 /* Enable inputs of parser neighbor blocks */
5101 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x7fffffff);
5102 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x1);
5103 REG_WR(bp
, CFC_REG_DEBUG0
, 0x0);
5104 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x1);
5106 DP(NETIF_MSG_HW
, "done\n");
5111 static void enable_blocks_attention(struct bnx2x
*bp
)
5113 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
5114 REG_WR(bp
, PXP_REG_PXP_INT_MASK_1
, 0);
5115 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
5116 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
5117 REG_WR(bp
, QM_REG_QM_INT_MASK
, 0);
5118 REG_WR(bp
, TM_REG_TM_INT_MASK
, 0);
5119 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_0
, 0);
5120 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_1
, 0);
5121 REG_WR(bp
, XCM_REG_XCM_INT_MASK
, 0);
5122 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5123 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5124 REG_WR(bp
, USDM_REG_USDM_INT_MASK_0
, 0);
5125 REG_WR(bp
, USDM_REG_USDM_INT_MASK_1
, 0);
5126 REG_WR(bp
, UCM_REG_UCM_INT_MASK
, 0);
5127 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5128 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5129 REG_WR(bp
, GRCBASE_UPB
+ PB_REG_PB_INT_MASK
, 0);
5130 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_0
, 0);
5131 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_1
, 0);
5132 REG_WR(bp
, CCM_REG_CCM_INT_MASK
, 0);
5133 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5134 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5135 if (CHIP_REV_IS_FPGA(bp
))
5136 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x580000);
5138 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x480000);
5139 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_0
, 0);
5140 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_1
, 0);
5141 REG_WR(bp
, TCM_REG_TCM_INT_MASK
, 0);
5142 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5143 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5144 REG_WR(bp
, CDU_REG_CDU_INT_MASK
, 0);
5145 REG_WR(bp
, DMAE_REG_DMAE_INT_MASK
, 0);
5146 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5147 REG_WR(bp
, PBF_REG_PBF_INT_MASK
, 0X18); /* bit 3,4 masked */
5151 static void bnx2x_reset_common(struct bnx2x
*bp
)
5154 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
5156 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
, 0x1403);
5159 static int bnx2x_init_common(struct bnx2x
*bp
)
5163 DP(BNX2X_MSG_MCP
, "starting common init func %d\n", BP_FUNC(bp
));
5165 bnx2x_reset_common(bp
);
5166 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0xffffffff);
5167 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_SET
, 0xfffc);
5169 bnx2x_init_block(bp
, MISC_COMMON_START
, MISC_COMMON_END
);
5170 if (CHIP_IS_E1H(bp
))
5171 REG_WR(bp
, MISC_REG_E1HMF_MODE
, IS_E1HMF(bp
));
5173 REG_WR(bp
, MISC_REG_LCPLL_CTRL_REG_2
, 0x100);
5175 REG_WR(bp
, MISC_REG_LCPLL_CTRL_REG_2
, 0x0);
5177 bnx2x_init_block(bp
, PXP_COMMON_START
, PXP_COMMON_END
);
5178 if (CHIP_IS_E1(bp
)) {
5179 /* enable HW interrupt from PXP on USDM overflow
5180 bit 16 on INT_MASK_0 */
5181 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
5184 bnx2x_init_block(bp
, PXP2_COMMON_START
, PXP2_COMMON_END
);
5188 REG_WR(bp
, PXP2_REG_RQ_QM_ENDIAN_M
, 1);
5189 REG_WR(bp
, PXP2_REG_RQ_TM_ENDIAN_M
, 1);
5190 REG_WR(bp
, PXP2_REG_RQ_SRC_ENDIAN_M
, 1);
5191 REG_WR(bp
, PXP2_REG_RQ_CDU_ENDIAN_M
, 1);
5192 REG_WR(bp
, PXP2_REG_RQ_DBG_ENDIAN_M
, 1);
5194 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5195 REG_WR(bp
, PXP2_REG_RD_QM_SWAP_MODE
, 1);
5196 REG_WR(bp
, PXP2_REG_RD_TM_SWAP_MODE
, 1);
5197 REG_WR(bp
, PXP2_REG_RD_SRC_SWAP_MODE
, 1);
5198 REG_WR(bp
, PXP2_REG_RD_CDURD_SWAP_MODE
, 1);
5201 REG_WR(bp
, PXP2_REG_RQ_CDU_P_SIZE
, 2);
5203 REG_WR(bp
, PXP2_REG_RQ_TM_P_SIZE
, 5);
5204 REG_WR(bp
, PXP2_REG_RQ_QM_P_SIZE
, 5);
5205 REG_WR(bp
, PXP2_REG_RQ_SRC_P_SIZE
, 5);
5208 if (CHIP_REV_IS_FPGA(bp
) && CHIP_IS_E1H(bp
))
5209 REG_WR(bp
, PXP2_REG_PGL_TAGS_LIMIT
, 0x1);
5211 /* let the HW do it's magic ... */
5213 /* finish PXP init */
5214 val
= REG_RD(bp
, PXP2_REG_RQ_CFG_DONE
);
5216 BNX2X_ERR("PXP2 CFG failed\n");
5219 val
= REG_RD(bp
, PXP2_REG_RD_INIT_DONE
);
5221 BNX2X_ERR("PXP2 RD_INIT failed\n");
5225 REG_WR(bp
, PXP2_REG_RQ_DISABLE_INPUTS
, 0);
5226 REG_WR(bp
, PXP2_REG_RD_DISABLE_INPUTS
, 0);
5228 bnx2x_init_block(bp
, DMAE_COMMON_START
, DMAE_COMMON_END
);
5230 /* clean the DMAE memory */
5232 bnx2x_init_fill(bp
, TSEM_REG_PRAM
, 0, 8);
5234 bnx2x_init_block(bp
, TCM_COMMON_START
, TCM_COMMON_END
);
5235 bnx2x_init_block(bp
, UCM_COMMON_START
, UCM_COMMON_END
);
5236 bnx2x_init_block(bp
, CCM_COMMON_START
, CCM_COMMON_END
);
5237 bnx2x_init_block(bp
, XCM_COMMON_START
, XCM_COMMON_END
);
5239 bnx2x_read_dmae(bp
, XSEM_REG_PASSIVE_BUFFER
, 3);
5240 bnx2x_read_dmae(bp
, CSEM_REG_PASSIVE_BUFFER
, 3);
5241 bnx2x_read_dmae(bp
, TSEM_REG_PASSIVE_BUFFER
, 3);
5242 bnx2x_read_dmae(bp
, USEM_REG_PASSIVE_BUFFER
, 3);
5244 bnx2x_init_block(bp
, QM_COMMON_START
, QM_COMMON_END
);
5245 /* soft reset pulse */
5246 REG_WR(bp
, QM_REG_SOFT_RESET
, 1);
5247 REG_WR(bp
, QM_REG_SOFT_RESET
, 0);
5250 bnx2x_init_block(bp
, TIMERS_COMMON_START
, TIMERS_COMMON_END
);
5253 bnx2x_init_block(bp
, DQ_COMMON_START
, DQ_COMMON_END
);
5254 REG_WR(bp
, DORQ_REG_DPM_CID_OFST
, BCM_PAGE_SHIFT
);
5255 if (!CHIP_REV_IS_SLOW(bp
)) {
5256 /* enable hw interrupt from doorbell Q */
5257 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
5260 bnx2x_init_block(bp
, BRB1_COMMON_START
, BRB1_COMMON_END
);
5261 if (CHIP_REV_IS_SLOW(bp
)) {
5262 /* fix for emulation and FPGA for no pause */
5263 REG_WR(bp
, BRB1_REG_PAUSE_HIGH_THRESHOLD_0
, 513);
5264 REG_WR(bp
, BRB1_REG_PAUSE_HIGH_THRESHOLD_1
, 513);
5265 REG_WR(bp
, BRB1_REG_PAUSE_LOW_THRESHOLD_0
, 0);
5266 REG_WR(bp
, BRB1_REG_PAUSE_LOW_THRESHOLD_1
, 0);
5269 bnx2x_init_block(bp
, PRS_COMMON_START
, PRS_COMMON_END
);
5270 REG_WR(bp
, PRS_REG_A_PRSU_20
, 0xf);
5272 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
5273 if (CHIP_IS_E1H(bp
))
5274 REG_WR(bp
, PRS_REG_E1HOV_MODE
, IS_E1HMF(bp
));
5276 bnx2x_init_block(bp
, TSDM_COMMON_START
, TSDM_COMMON_END
);
5277 bnx2x_init_block(bp
, CSDM_COMMON_START
, CSDM_COMMON_END
);
5278 bnx2x_init_block(bp
, USDM_COMMON_START
, USDM_COMMON_END
);
5279 bnx2x_init_block(bp
, XSDM_COMMON_START
, XSDM_COMMON_END
);
5281 if (CHIP_IS_E1H(bp
)) {
5282 bnx2x_init_fill(bp
, TSTORM_INTMEM_ADDR
, 0,
5283 STORM_INTMEM_SIZE_E1H
/2);
5285 TSTORM_INTMEM_ADDR
+ STORM_INTMEM_SIZE_E1H
/2,
5286 0, STORM_INTMEM_SIZE_E1H
/2);
5287 bnx2x_init_fill(bp
, CSTORM_INTMEM_ADDR
, 0,
5288 STORM_INTMEM_SIZE_E1H
/2);
5290 CSTORM_INTMEM_ADDR
+ STORM_INTMEM_SIZE_E1H
/2,
5291 0, STORM_INTMEM_SIZE_E1H
/2);
5292 bnx2x_init_fill(bp
, XSTORM_INTMEM_ADDR
, 0,
5293 STORM_INTMEM_SIZE_E1H
/2);
5295 XSTORM_INTMEM_ADDR
+ STORM_INTMEM_SIZE_E1H
/2,
5296 0, STORM_INTMEM_SIZE_E1H
/2);
5297 bnx2x_init_fill(bp
, USTORM_INTMEM_ADDR
, 0,
5298 STORM_INTMEM_SIZE_E1H
/2);
5300 USTORM_INTMEM_ADDR
+ STORM_INTMEM_SIZE_E1H
/2,
5301 0, STORM_INTMEM_SIZE_E1H
/2);
5303 bnx2x_init_fill(bp
, TSTORM_INTMEM_ADDR
, 0,
5304 STORM_INTMEM_SIZE_E1
);
5305 bnx2x_init_fill(bp
, CSTORM_INTMEM_ADDR
, 0,
5306 STORM_INTMEM_SIZE_E1
);
5307 bnx2x_init_fill(bp
, XSTORM_INTMEM_ADDR
, 0,
5308 STORM_INTMEM_SIZE_E1
);
5309 bnx2x_init_fill(bp
, USTORM_INTMEM_ADDR
, 0,
5310 STORM_INTMEM_SIZE_E1
);
5313 bnx2x_init_block(bp
, TSEM_COMMON_START
, TSEM_COMMON_END
);
5314 bnx2x_init_block(bp
, USEM_COMMON_START
, USEM_COMMON_END
);
5315 bnx2x_init_block(bp
, CSEM_COMMON_START
, CSEM_COMMON_END
);
5316 bnx2x_init_block(bp
, XSEM_COMMON_START
, XSEM_COMMON_END
);
5319 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
5321 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
5324 bnx2x_init_block(bp
, UPB_COMMON_START
, UPB_COMMON_END
);
5325 bnx2x_init_block(bp
, XPB_COMMON_START
, XPB_COMMON_END
);
5326 bnx2x_init_block(bp
, PBF_COMMON_START
, PBF_COMMON_END
);
5328 REG_WR(bp
, SRC_REG_SOFT_RST
, 1);
5329 for (i
= SRC_REG_KEYRSS0_0
; i
<= SRC_REG_KEYRSS1_9
; i
+= 4) {
5330 REG_WR(bp
, i
, 0xc0cac01a);
5331 /* TODO: replace with something meaningful */
5333 if (CHIP_IS_E1H(bp
))
5334 bnx2x_init_block(bp
, SRCH_COMMON_START
, SRCH_COMMON_END
);
5335 REG_WR(bp
, SRC_REG_SOFT_RST
, 0);
5337 if (sizeof(union cdu_context
) != 1024)
5338 /* we currently assume that a context is 1024 bytes */
5339 printk(KERN_ALERT PFX
"please adjust the size of"
5340 " cdu_context(%ld)\n", (long)sizeof(union cdu_context
));
5342 bnx2x_init_block(bp
, CDU_COMMON_START
, CDU_COMMON_END
);
5343 val
= (4 << 24) + (0 << 12) + 1024;
5344 REG_WR(bp
, CDU_REG_CDU_GLOBAL_PARAMS
, val
);
5345 if (CHIP_IS_E1(bp
)) {
5346 /* !!! fix pxp client crdit until excel update */
5347 REG_WR(bp
, CDU_REG_CDU_DEBUG
, 0x264);
5348 REG_WR(bp
, CDU_REG_CDU_DEBUG
, 0);
5351 bnx2x_init_block(bp
, CFC_COMMON_START
, CFC_COMMON_END
);
5352 REG_WR(bp
, CFC_REG_INIT_REG
, 0x7FF);
5354 bnx2x_init_block(bp
, HC_COMMON_START
, HC_COMMON_END
);
5355 bnx2x_init_block(bp
, MISC_AEU_COMMON_START
, MISC_AEU_COMMON_END
);
5357 /* PXPCS COMMON comes here */
5358 /* Reset PCIE errors for debug */
5359 REG_WR(bp
, 0x2814, 0xffffffff);
5360 REG_WR(bp
, 0x3820, 0xffffffff);
5362 /* EMAC0 COMMON comes here */
5363 /* EMAC1 COMMON comes here */
5364 /* DBU COMMON comes here */
5365 /* DBG COMMON comes here */
5367 bnx2x_init_block(bp
, NIG_COMMON_START
, NIG_COMMON_END
);
5368 if (CHIP_IS_E1H(bp
)) {
5369 REG_WR(bp
, NIG_REG_LLH_MF_MODE
, IS_E1HMF(bp
));
5370 REG_WR(bp
, NIG_REG_LLH_E1HOV_MODE
, IS_E1HMF(bp
));
5373 if (CHIP_REV_IS_SLOW(bp
))
5376 /* finish CFC init */
5377 val
= reg_poll(bp
, CFC_REG_LL_INIT_DONE
, 1, 100, 10);
5379 BNX2X_ERR("CFC LL_INIT failed\n");
5382 val
= reg_poll(bp
, CFC_REG_AC_INIT_DONE
, 1, 100, 10);
5384 BNX2X_ERR("CFC AC_INIT failed\n");
5387 val
= reg_poll(bp
, CFC_REG_CAM_INIT_DONE
, 1, 100, 10);
5389 BNX2X_ERR("CFC CAM_INIT failed\n");
5392 REG_WR(bp
, CFC_REG_DEBUG0
, 0);
5394 /* read NIG statistic
5395 to see if this is our first up since powerup */
5396 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
5397 val
= *bnx2x_sp(bp
, wb_data
[0]);
5399 /* do internal memory self test */
5400 if ((CHIP_IS_E1(bp
)) && (val
== 0) && bnx2x_int_mem_test(bp
)) {
5401 BNX2X_ERR("internal mem self test failed\n");
5405 switch (bp
->common
.board
& SHARED_HW_CFG_BOARD_TYPE_MASK
) {
5406 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G
:
5407 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G
:
5408 /* Fan failure is indicated by SPIO 5 */
5409 bnx2x_set_spio(bp
, MISC_REGISTERS_SPIO_5
,
5410 MISC_REGISTERS_SPIO_INPUT_HI_Z
);
5412 /* set to active low mode */
5413 val
= REG_RD(bp
, MISC_REG_SPIO_INT
);
5414 val
|= ((1 << MISC_REGISTERS_SPIO_5
) <<
5415 MISC_REGISTERS_SPIO_INT_OLD_SET_POS
);
5416 REG_WR(bp
, MISC_REG_SPIO_INT
, val
);
5418 /* enable interrupt to signal the IGU */
5419 val
= REG_RD(bp
, MISC_REG_SPIO_EVENT_EN
);
5420 val
|= (1 << MISC_REGISTERS_SPIO_5
);
5421 REG_WR(bp
, MISC_REG_SPIO_EVENT_EN
, val
);
5428 /* clear PXP2 attentions */
5429 REG_RD(bp
, PXP2_REG_PXP2_INT_STS_CLR_0
);
5431 enable_blocks_attention(bp
);
5433 if (!BP_NOMCP(bp
)) {
5434 bnx2x_acquire_phy_lock(bp
);
5435 bnx2x_common_init_phy(bp
, bp
->common
.shmem_base
);
5436 bnx2x_release_phy_lock(bp
);
5438 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5443 static int bnx2x_init_port(struct bnx2x
*bp
)
5445 int port
= BP_PORT(bp
);
5448 DP(BNX2X_MSG_MCP
, "starting port init port %x\n", port
);
5450 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
5452 /* Port PXP comes here */
5453 /* Port PXP2 comes here */
5458 wb_write
[0] = ONCHIP_ADDR1(bp
->timers_mapping
);
5459 wb_write
[1] = ONCHIP_ADDR2(bp
->timers_mapping
);
5460 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
5461 REG_WR(bp
, PXP2_REG_PSWRQ_TM0_L2P
+ func
*4, PXP_ONE_ILT(i
));
5466 wb_write
[0] = ONCHIP_ADDR1(bp
->qm_mapping
);
5467 wb_write
[1] = ONCHIP_ADDR2(bp
->qm_mapping
);
5468 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
5469 REG_WR(bp
, PXP2_REG_PSWRQ_QM0_L2P
+ func
*4, PXP_ONE_ILT(i
));
5474 wb_write
[0] = ONCHIP_ADDR1(bp
->t1_mapping
);
5475 wb_write
[1] = ONCHIP_ADDR2(bp
->t1_mapping
);
5476 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
5477 REG_WR(bp
, PXP2_REG_PSWRQ_SRC0_L2P
+ func
*4, PXP_ONE_ILT(i
));
5479 /* Port CMs come here */
5481 /* Port QM comes here */
5483 REG_WR(bp
, TM_REG_LIN0_SCAN_TIME
+ func
*4, 1024/64*20);
5484 REG_WR(bp
, TM_REG_LIN0_MAX_ACTIVE_CID
+ func
*4, 31);
5486 bnx2x_init_block(bp
, func
? TIMERS_PORT1_START
: TIMERS_PORT0_START
,
5487 func
? TIMERS_PORT1_END
: TIMERS_PORT0_END
);
5489 /* Port DQ comes here */
5490 /* Port BRB1 comes here */
5491 /* Port PRS comes here */
5492 /* Port TSDM comes here */
5493 /* Port CSDM comes here */
5494 /* Port USDM comes here */
5495 /* Port XSDM comes here */
5496 bnx2x_init_block(bp
, port
? TSEM_PORT1_START
: TSEM_PORT0_START
,
5497 port
? TSEM_PORT1_END
: TSEM_PORT0_END
);
5498 bnx2x_init_block(bp
, port
? USEM_PORT1_START
: USEM_PORT0_START
,
5499 port
? USEM_PORT1_END
: USEM_PORT0_END
);
5500 bnx2x_init_block(bp
, port
? CSEM_PORT1_START
: CSEM_PORT0_START
,
5501 port
? CSEM_PORT1_END
: CSEM_PORT0_END
);
5502 bnx2x_init_block(bp
, port
? XSEM_PORT1_START
: XSEM_PORT0_START
,
5503 port
? XSEM_PORT1_END
: XSEM_PORT0_END
);
5504 /* Port UPB comes here */
5505 /* Port XPB comes here */
5507 bnx2x_init_block(bp
, port
? PBF_PORT1_START
: PBF_PORT0_START
,
5508 port
? PBF_PORT1_END
: PBF_PORT0_END
);
5510 /* configure PBF to work without PAUSE mtu 9000 */
5511 REG_WR(bp
, PBF_REG_P0_PAUSE_ENABLE
+ port
*4, 0);
5513 /* update threshold */
5514 REG_WR(bp
, PBF_REG_P0_ARB_THRSH
+ port
*4, (9040/16));
5515 /* update init credit */
5516 REG_WR(bp
, PBF_REG_P0_INIT_CRD
+ port
*4, (9040/16) + 553 - 22);
5519 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 1);
5521 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 0);
5524 /* tell the searcher where the T2 table is */
5525 REG_WR(bp
, SRC_REG_COUNTFREE0
+ func
*4, 16*1024/64);
5527 wb_write
[0] = U64_LO(bp
->t2_mapping
);
5528 wb_write
[1] = U64_HI(bp
->t2_mapping
);
5529 REG_WR_DMAE(bp
, SRC_REG_FIRSTFREE0
+ func
*4, wb_write
, 2);
5530 wb_write
[0] = U64_LO((u64
)bp
->t2_mapping
+ 16*1024 - 64);
5531 wb_write
[1] = U64_HI((u64
)bp
->t2_mapping
+ 16*1024 - 64);
5532 REG_WR_DMAE(bp
, SRC_REG_LASTFREE0
+ func
*4, wb_write
, 2);
5534 REG_WR(bp
, SRC_REG_NUMBER_HASH_BITS0
+ func
*4, 10);
5535 /* Port SRCH comes here */
5537 /* Port CDU comes here */
5538 /* Port CFC comes here */
5540 if (CHIP_IS_E1(bp
)) {
5541 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
5542 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
5544 bnx2x_init_block(bp
, port
? HC_PORT1_START
: HC_PORT0_START
,
5545 port
? HC_PORT1_END
: HC_PORT0_END
);
5547 bnx2x_init_block(bp
, port
? MISC_AEU_PORT1_START
:
5548 MISC_AEU_PORT0_START
,
5549 port
? MISC_AEU_PORT1_END
: MISC_AEU_PORT0_END
);
5550 /* init aeu_mask_attn_func_0/1:
5551 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5552 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5553 * bits 4-7 are used for "per vn group attention" */
5554 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4,
5555 (IS_E1HMF(bp
) ? 0xF7 : 0x7));
5557 /* Port PXPCS comes here */
5558 /* Port EMAC0 comes here */
5559 /* Port EMAC1 comes here */
5560 /* Port DBU comes here */
5561 /* Port DBG comes here */
5562 bnx2x_init_block(bp
, port
? NIG_PORT1_START
: NIG_PORT0_START
,
5563 port
? NIG_PORT1_END
: NIG_PORT0_END
);
5565 REG_WR(bp
, NIG_REG_XGXS_SERDES0_MODE_SEL
+ port
*4, 1);
5567 if (CHIP_IS_E1H(bp
)) {
5569 struct cmng_struct_per_port m_cmng_port
;
5572 /* 0x2 disable e1hov, 0x1 enable */
5573 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK_MF
+ port
*4,
5574 (IS_E1HMF(bp
) ? 0x1 : 0x2));
5576 /* Init RATE SHAPING and FAIRNESS contexts.
5577 Initialize as if there is 10G link. */
5578 wsum
= bnx2x_calc_vn_wsum(bp
);
5579 bnx2x_init_port_minmax(bp
, (int)wsum
, 10000, &m_cmng_port
);
5581 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
5582 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
,
5583 wsum
, 10000, &m_cmng_port
);
5586 /* Port MCP comes here */
5587 /* Port DMAE comes here */
5589 switch (bp
->common
.board
& SHARED_HW_CFG_BOARD_TYPE_MASK
) {
5590 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G
:
5591 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G
:
5592 /* add SPIO 5 to group 0 */
5593 val
= REG_RD(bp
, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
5594 val
|= AEU_INPUTS_ATTN_BITS_SPIO5
;
5595 REG_WR(bp
, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
, val
);
5602 bnx2x__link_reset(bp
);
5607 #define ILT_PER_FUNC (768/2)
5608 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5609 /* the phys address is shifted right 12 bits and has an added
5610 1=valid bit added to the 53rd bit
5611 then since this is a wide register(TM)
5612 we split it into two 32 bit writes
5614 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5615 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5616 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5617 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5619 #define CNIC_ILT_LINES 0
5621 static void bnx2x_ilt_wr(struct bnx2x
*bp
, u32 index
, dma_addr_t addr
)
5625 if (CHIP_IS_E1H(bp
))
5626 reg
= PXP2_REG_RQ_ONCHIP_AT_B0
+ index
*8;
5628 reg
= PXP2_REG_RQ_ONCHIP_AT
+ index
*8;
5630 bnx2x_wb_wr(bp
, reg
, ONCHIP_ADDR1(addr
), ONCHIP_ADDR2(addr
));
5633 static int bnx2x_init_func(struct bnx2x
*bp
)
5635 int port
= BP_PORT(bp
);
5636 int func
= BP_FUNC(bp
);
5639 DP(BNX2X_MSG_MCP
, "starting func init func %x\n", func
);
5641 i
= FUNC_ILT_BASE(func
);
5643 bnx2x_ilt_wr(bp
, i
, bnx2x_sp_mapping(bp
, context
));
5644 if (CHIP_IS_E1H(bp
)) {
5645 REG_WR(bp
, PXP2_REG_RQ_CDU_FIRST_ILT
, i
);
5646 REG_WR(bp
, PXP2_REG_RQ_CDU_LAST_ILT
, i
+ CNIC_ILT_LINES
);
5648 REG_WR(bp
, PXP2_REG_PSWRQ_CDU0_L2P
+ func
*4,
5649 PXP_ILT_RANGE(i
, i
+ CNIC_ILT_LINES
));
5652 if (CHIP_IS_E1H(bp
)) {
5653 for (i
= 0; i
< 9; i
++)
5654 bnx2x_init_block(bp
,
5655 cm_start
[func
][i
], cm_end
[func
][i
]);
5657 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 1);
5658 REG_WR(bp
, NIG_REG_LLH0_FUNC_VLAN_ID
+ port
*8, bp
->e1hov
);
5661 /* HC init per function */
5662 if (CHIP_IS_E1H(bp
)) {
5663 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
5665 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
5666 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
5668 bnx2x_init_block(bp
, hc_limits
[func
][0], hc_limits
[func
][1]);
5670 if (CHIP_IS_E1H(bp
))
5671 REG_WR(bp
, HC_REG_FUNC_NUM_P0
+ port
*4, func
);
5673 /* Reset PCIE errors for debug */
5674 REG_WR(bp
, 0x2114, 0xffffffff);
5675 REG_WR(bp
, 0x2120, 0xffffffff);
5680 static int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
)
5684 DP(BNX2X_MSG_MCP
, "function %d load_code %x\n",
5685 BP_FUNC(bp
), load_code
);
5688 mutex_init(&bp
->dmae_mutex
);
5689 bnx2x_gunzip_init(bp
);
5691 switch (load_code
) {
5692 case FW_MSG_CODE_DRV_LOAD_COMMON
:
5693 rc
= bnx2x_init_common(bp
);
5698 case FW_MSG_CODE_DRV_LOAD_PORT
:
5700 rc
= bnx2x_init_port(bp
);
5705 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
5707 rc
= bnx2x_init_func(bp
);
5713 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
5717 if (!BP_NOMCP(bp
)) {
5718 int func
= BP_FUNC(bp
);
5720 bp
->fw_drv_pulse_wr_seq
=
5721 (SHMEM_RD(bp
, func_mb
[func
].drv_pulse_mb
) &
5722 DRV_PULSE_SEQ_MASK
);
5723 bp
->func_stx
= SHMEM_RD(bp
, func_mb
[func
].fw_mb_param
);
5724 DP(BNX2X_MSG_MCP
, "drv_pulse 0x%x func_stx 0x%x\n",
5725 bp
->fw_drv_pulse_wr_seq
, bp
->func_stx
);
5729 /* this needs to be done before gunzip end */
5730 bnx2x_zero_def_sb(bp
);
5731 for_each_queue(bp
, i
)
5732 bnx2x_zero_sb(bp
, BP_L_ID(bp
) + i
);
5735 bnx2x_gunzip_end(bp
);
5740 /* send the MCP a request, block until there is a reply */
5741 static u32
bnx2x_fw_command(struct bnx2x
*bp
, u32 command
)
5743 int func
= BP_FUNC(bp
);
5744 u32 seq
= ++bp
->fw_seq
;
5747 u8 delay
= CHIP_REV_IS_SLOW(bp
) ? 100 : 10;
5749 SHMEM_WR(bp
, func_mb
[func
].drv_mb_header
, (command
| seq
));
5750 DP(BNX2X_MSG_MCP
, "wrote command (%x) to FW MB\n", (command
| seq
));
5753 /* let the FW do it's magic ... */
5756 rc
= SHMEM_RD(bp
, func_mb
[func
].fw_mb_header
);
5758 /* Give the FW up to 2 second (200*10ms) */
5759 } while ((seq
!= (rc
& FW_MSG_SEQ_NUMBER_MASK
)) && (cnt
++ < 200));
5761 DP(BNX2X_MSG_MCP
, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5762 cnt
*delay
, rc
, seq
);
5764 /* is this a reply to our command? */
5765 if (seq
== (rc
& FW_MSG_SEQ_NUMBER_MASK
)) {
5766 rc
&= FW_MSG_CODE_MASK
;
5770 BNX2X_ERR("FW failed to respond!\n");
5778 static void bnx2x_free_mem(struct bnx2x
*bp
)
5781 #define BNX2X_PCI_FREE(x, y, size) \
5784 pci_free_consistent(bp->pdev, size, x, y); \
5790 #define BNX2X_FREE(x) \
5801 for_each_queue(bp
, i
) {
5804 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, status_blk
),
5805 bnx2x_fp(bp
, i
, status_blk_mapping
),
5806 sizeof(struct host_status_block
) +
5807 sizeof(struct eth_tx_db_data
));
5809 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5810 BNX2X_FREE(bnx2x_fp(bp
, i
, tx_buf_ring
));
5811 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, tx_desc_ring
),
5812 bnx2x_fp(bp
, i
, tx_desc_mapping
),
5813 sizeof(struct eth_tx_bd
) * NUM_TX_BD
);
5815 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_buf_ring
));
5816 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_desc_ring
),
5817 bnx2x_fp(bp
, i
, rx_desc_mapping
),
5818 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
5820 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_comp_ring
),
5821 bnx2x_fp(bp
, i
, rx_comp_mapping
),
5822 sizeof(struct eth_fast_path_rx_cqe
) *
5826 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_page_ring
));
5827 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_sge_ring
),
5828 bnx2x_fp(bp
, i
, rx_sge_mapping
),
5829 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
5831 /* end of fastpath */
5833 BNX2X_PCI_FREE(bp
->def_status_blk
, bp
->def_status_blk_mapping
,
5834 sizeof(struct host_def_status_block
));
5836 BNX2X_PCI_FREE(bp
->slowpath
, bp
->slowpath_mapping
,
5837 sizeof(struct bnx2x_slowpath
));
5840 BNX2X_PCI_FREE(bp
->t1
, bp
->t1_mapping
, 64*1024);
5841 BNX2X_PCI_FREE(bp
->t2
, bp
->t2_mapping
, 16*1024);
5842 BNX2X_PCI_FREE(bp
->timers
, bp
->timers_mapping
, 8*1024);
5843 BNX2X_PCI_FREE(bp
->qm
, bp
->qm_mapping
, 128*1024);
5845 BNX2X_PCI_FREE(bp
->spq
, bp
->spq_mapping
, BCM_PAGE_SIZE
);
5847 #undef BNX2X_PCI_FREE
5851 static int bnx2x_alloc_mem(struct bnx2x
*bp
)
5854 #define BNX2X_PCI_ALLOC(x, y, size) \
5856 x = pci_alloc_consistent(bp->pdev, size, y); \
5858 goto alloc_mem_err; \
5859 memset(x, 0, size); \
5862 #define BNX2X_ALLOC(x, size) \
5864 x = vmalloc(size); \
5866 goto alloc_mem_err; \
5867 memset(x, 0, size); \
5873 for_each_queue(bp
, i
) {
5874 bnx2x_fp(bp
, i
, bp
) = bp
;
5877 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, status_blk
),
5878 &bnx2x_fp(bp
, i
, status_blk_mapping
),
5879 sizeof(struct host_status_block
) +
5880 sizeof(struct eth_tx_db_data
));
5882 bnx2x_fp(bp
, i
, hw_tx_prods
) =
5883 (void *)(bnx2x_fp(bp
, i
, status_blk
) + 1);
5885 bnx2x_fp(bp
, i
, tx_prods_mapping
) =
5886 bnx2x_fp(bp
, i
, status_blk_mapping
) +
5887 sizeof(struct host_status_block
);
5889 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5890 BNX2X_ALLOC(bnx2x_fp(bp
, i
, tx_buf_ring
),
5891 sizeof(struct sw_tx_bd
) * NUM_TX_BD
);
5892 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, tx_desc_ring
),
5893 &bnx2x_fp(bp
, i
, tx_desc_mapping
),
5894 sizeof(struct eth_tx_bd
) * NUM_TX_BD
);
5896 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_buf_ring
),
5897 sizeof(struct sw_rx_bd
) * NUM_RX_BD
);
5898 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_desc_ring
),
5899 &bnx2x_fp(bp
, i
, rx_desc_mapping
),
5900 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
5902 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_comp_ring
),
5903 &bnx2x_fp(bp
, i
, rx_comp_mapping
),
5904 sizeof(struct eth_fast_path_rx_cqe
) *
5908 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_page_ring
),
5909 sizeof(struct sw_rx_page
) * NUM_RX_SGE
);
5910 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_sge_ring
),
5911 &bnx2x_fp(bp
, i
, rx_sge_mapping
),
5912 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
5914 /* end of fastpath */
5916 BNX2X_PCI_ALLOC(bp
->def_status_blk
, &bp
->def_status_blk_mapping
,
5917 sizeof(struct host_def_status_block
));
5919 BNX2X_PCI_ALLOC(bp
->slowpath
, &bp
->slowpath_mapping
,
5920 sizeof(struct bnx2x_slowpath
));
5923 BNX2X_PCI_ALLOC(bp
->t1
, &bp
->t1_mapping
, 64*1024);
5926 for (i
= 0; i
< 64*1024; i
+= 64) {
5927 *(u64
*)((char *)bp
->t1
+ i
+ 56) = 0x0UL
;
5928 *(u64
*)((char *)bp
->t1
+ i
+ 3) = 0x0UL
;
5931 /* allocate searcher T2 table
5932 we allocate 1/4 of alloc num for T2
5933 (which is not entered into the ILT) */
5934 BNX2X_PCI_ALLOC(bp
->t2
, &bp
->t2_mapping
, 16*1024);
5937 for (i
= 0; i
< 16*1024; i
+= 64)
5938 * (u64
*)((char *)bp
->t2
+ i
+ 56) = bp
->t2_mapping
+ i
+ 64;
5940 /* now fixup the last line in the block to point to the next block */
5941 *(u64
*)((char *)bp
->t2
+ 1024*16-8) = bp
->t2_mapping
;
5943 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5944 BNX2X_PCI_ALLOC(bp
->timers
, &bp
->timers_mapping
, 8*1024);
5946 /* QM queues (128*MAX_CONN) */
5947 BNX2X_PCI_ALLOC(bp
->qm
, &bp
->qm_mapping
, 128*1024);
5950 /* Slow path ring */
5951 BNX2X_PCI_ALLOC(bp
->spq
, &bp
->spq_mapping
, BCM_PAGE_SIZE
);
5959 #undef BNX2X_PCI_ALLOC
5963 static void bnx2x_free_tx_skbs(struct bnx2x
*bp
)
5967 for_each_queue(bp
, i
) {
5968 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5970 u16 bd_cons
= fp
->tx_bd_cons
;
5971 u16 sw_prod
= fp
->tx_pkt_prod
;
5972 u16 sw_cons
= fp
->tx_pkt_cons
;
5974 while (sw_cons
!= sw_prod
) {
5975 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, TX_BD(sw_cons
));
5981 static void bnx2x_free_rx_skbs(struct bnx2x
*bp
)
5985 for_each_queue(bp
, j
) {
5986 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
5988 for (i
= 0; i
< NUM_RX_BD
; i
++) {
5989 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[i
];
5990 struct sk_buff
*skb
= rx_buf
->skb
;
5995 pci_unmap_single(bp
->pdev
,
5996 pci_unmap_addr(rx_buf
, mapping
),
5998 PCI_DMA_FROMDEVICE
);
6003 if (!fp
->disable_tpa
)
6004 bnx2x_free_tpa_pool(bp
, fp
, CHIP_IS_E1(bp
) ?
6005 ETH_MAX_AGGREGATION_QUEUES_E1
:
6006 ETH_MAX_AGGREGATION_QUEUES_E1H
);
6010 static void bnx2x_free_skbs(struct bnx2x
*bp
)
6012 bnx2x_free_tx_skbs(bp
);
6013 bnx2x_free_rx_skbs(bp
);
6016 static void bnx2x_free_msix_irqs(struct bnx2x
*bp
)
6020 free_irq(bp
->msix_table
[0].vector
, bp
->dev
);
6021 DP(NETIF_MSG_IFDOWN
, "released sp irq (%d)\n",
6022 bp
->msix_table
[0].vector
);
6024 for_each_queue(bp
, i
) {
6025 DP(NETIF_MSG_IFDOWN
, "about to release fp #%d->%d irq "
6026 "state %x\n", i
, bp
->msix_table
[i
+ offset
].vector
,
6027 bnx2x_fp(bp
, i
, state
));
6029 if (bnx2x_fp(bp
, i
, state
) != BNX2X_FP_STATE_CLOSED
)
6030 BNX2X_ERR("IRQ of fp #%d being freed while "
6031 "state != closed\n", i
);
6033 free_irq(bp
->msix_table
[i
+ offset
].vector
, &bp
->fp
[i
]);
6037 static void bnx2x_free_irq(struct bnx2x
*bp
)
6039 if (bp
->flags
& USING_MSIX_FLAG
) {
6040 bnx2x_free_msix_irqs(bp
);
6041 pci_disable_msix(bp
->pdev
);
6042 bp
->flags
&= ~USING_MSIX_FLAG
;
6045 free_irq(bp
->pdev
->irq
, bp
->dev
);
6048 static int bnx2x_enable_msix(struct bnx2x
*bp
)
6052 bp
->msix_table
[0].entry
= 0;
6054 DP(NETIF_MSG_IFUP
, "msix_table[0].entry = 0 (slowpath)\n");
6056 for_each_queue(bp
, i
) {
6057 int igu_vec
= offset
+ i
+ BP_L_ID(bp
);
6059 bp
->msix_table
[i
+ offset
].entry
= igu_vec
;
6060 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d "
6061 "(fastpath #%u)\n", i
+ offset
, igu_vec
, i
);
6064 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0],
6065 bp
->num_queues
+ offset
);
6067 DP(NETIF_MSG_IFUP
, "MSI-X is not attainable\n");
6070 bp
->flags
|= USING_MSIX_FLAG
;
6075 static int bnx2x_req_msix_irqs(struct bnx2x
*bp
)
6077 int i
, rc
, offset
= 1;
6079 rc
= request_irq(bp
->msix_table
[0].vector
, bnx2x_msix_sp_int
, 0,
6080 bp
->dev
->name
, bp
->dev
);
6082 BNX2X_ERR("request sp irq failed\n");
6086 for_each_queue(bp
, i
) {
6087 rc
= request_irq(bp
->msix_table
[i
+ offset
].vector
,
6088 bnx2x_msix_fp_int
, 0,
6089 bp
->dev
->name
, &bp
->fp
[i
]);
6091 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6093 bnx2x_free_msix_irqs(bp
);
6097 bnx2x_fp(bp
, i
, state
) = BNX2X_FP_STATE_IRQ
;
6103 static int bnx2x_req_irq(struct bnx2x
*bp
)
6107 rc
= request_irq(bp
->pdev
->irq
, bnx2x_interrupt
, IRQF_SHARED
,
6108 bp
->dev
->name
, bp
->dev
);
6110 bnx2x_fp(bp
, 0, state
) = BNX2X_FP_STATE_IRQ
;
6115 static void bnx2x_napi_enable(struct bnx2x
*bp
)
6119 for_each_queue(bp
, i
)
6120 napi_enable(&bnx2x_fp(bp
, i
, napi
));
6123 static void bnx2x_napi_disable(struct bnx2x
*bp
)
6127 for_each_queue(bp
, i
)
6128 napi_disable(&bnx2x_fp(bp
, i
, napi
));
6131 static void bnx2x_netif_start(struct bnx2x
*bp
)
6133 if (atomic_dec_and_test(&bp
->intr_sem
)) {
6134 if (netif_running(bp
->dev
)) {
6135 if (bp
->state
== BNX2X_STATE_OPEN
)
6136 netif_wake_queue(bp
->dev
);
6137 bnx2x_napi_enable(bp
);
6138 bnx2x_int_enable(bp
);
6143 static void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
)
6145 bnx2x_int_disable_sync(bp
, disable_hw
);
6146 bnx2x_napi_disable(bp
);
6147 if (netif_running(bp
->dev
)) {
6148 netif_tx_disable(bp
->dev
);
6149 bp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
6154 * Init service functions
6157 static void bnx2x_set_mac_addr_e1(struct bnx2x
*bp
, int set
)
6159 struct mac_configuration_cmd
*config
= bnx2x_sp(bp
, mac_config
);
6160 int port
= BP_PORT(bp
);
6163 * unicasts 0-31:port0 32-63:port1
6164 * multicast 64-127:port0 128-191:port1
6166 config
->hdr
.length_6b
= 2;
6167 config
->hdr
.offset
= port
? 32 : 0;
6168 config
->hdr
.client_id
= BP_CL_ID(bp
);
6169 config
->hdr
.reserved1
= 0;
6172 config
->config_table
[0].cam_entry
.msb_mac_addr
=
6173 swab16(*(u16
*)&bp
->dev
->dev_addr
[0]);
6174 config
->config_table
[0].cam_entry
.middle_mac_addr
=
6175 swab16(*(u16
*)&bp
->dev
->dev_addr
[2]);
6176 config
->config_table
[0].cam_entry
.lsb_mac_addr
=
6177 swab16(*(u16
*)&bp
->dev
->dev_addr
[4]);
6178 config
->config_table
[0].cam_entry
.flags
= cpu_to_le16(port
);
6180 config
->config_table
[0].target_table_entry
.flags
= 0;
6182 CAM_INVALIDATE(config
->config_table
[0]);
6183 config
->config_table
[0].target_table_entry
.client_id
= 0;
6184 config
->config_table
[0].target_table_entry
.vlan_id
= 0;
6186 DP(NETIF_MSG_IFUP
, "%s MAC (%04x:%04x:%04x)\n",
6187 (set
? "setting" : "clearing"),
6188 config
->config_table
[0].cam_entry
.msb_mac_addr
,
6189 config
->config_table
[0].cam_entry
.middle_mac_addr
,
6190 config
->config_table
[0].cam_entry
.lsb_mac_addr
);
6193 config
->config_table
[1].cam_entry
.msb_mac_addr
= 0xffff;
6194 config
->config_table
[1].cam_entry
.middle_mac_addr
= 0xffff;
6195 config
->config_table
[1].cam_entry
.lsb_mac_addr
= 0xffff;
6196 config
->config_table
[1].cam_entry
.flags
= cpu_to_le16(port
);
6198 config
->config_table
[1].target_table_entry
.flags
=
6199 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST
;
6201 CAM_INVALIDATE(config
->config_table
[1]);
6202 config
->config_table
[1].target_table_entry
.client_id
= 0;
6203 config
->config_table
[1].target_table_entry
.vlan_id
= 0;
6205 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
6206 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
6207 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
6210 static void bnx2x_set_mac_addr_e1h(struct bnx2x
*bp
, int set
)
6212 struct mac_configuration_cmd_e1h
*config
=
6213 (struct mac_configuration_cmd_e1h
*)bnx2x_sp(bp
, mac_config
);
6215 if (set
&& (bp
->state
!= BNX2X_STATE_OPEN
)) {
6216 DP(NETIF_MSG_IFUP
, "state is %x, returning\n", bp
->state
);
6220 /* CAM allocation for E1H
6221 * unicasts: by func number
6222 * multicast: 20+FUNC*20, 20 each
6224 config
->hdr
.length_6b
= 1;
6225 config
->hdr
.offset
= BP_FUNC(bp
);
6226 config
->hdr
.client_id
= BP_CL_ID(bp
);
6227 config
->hdr
.reserved1
= 0;
6230 config
->config_table
[0].msb_mac_addr
=
6231 swab16(*(u16
*)&bp
->dev
->dev_addr
[0]);
6232 config
->config_table
[0].middle_mac_addr
=
6233 swab16(*(u16
*)&bp
->dev
->dev_addr
[2]);
6234 config
->config_table
[0].lsb_mac_addr
=
6235 swab16(*(u16
*)&bp
->dev
->dev_addr
[4]);
6236 config
->config_table
[0].client_id
= BP_L_ID(bp
);
6237 config
->config_table
[0].vlan_id
= 0;
6238 config
->config_table
[0].e1hov_id
= cpu_to_le16(bp
->e1hov
);
6240 config
->config_table
[0].flags
= BP_PORT(bp
);
6242 config
->config_table
[0].flags
=
6243 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE
;
6245 DP(NETIF_MSG_IFUP
, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6246 (set
? "setting" : "clearing"),
6247 config
->config_table
[0].msb_mac_addr
,
6248 config
->config_table
[0].middle_mac_addr
,
6249 config
->config_table
[0].lsb_mac_addr
, bp
->e1hov
, BP_L_ID(bp
));
6251 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
6252 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
6253 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
6256 static int bnx2x_wait_ramrod(struct bnx2x
*bp
, int state
, int idx
,
6257 int *state_p
, int poll
)
6259 /* can take a while if any port is running */
6262 DP(NETIF_MSG_IFUP
, "%s for state to become %x on IDX [%d]\n",
6263 poll
? "polling" : "waiting", state
, idx
);
6268 bnx2x_rx_int(bp
->fp
, 10);
6269 /* if index is different from 0
6270 * the reply for some commands will
6271 * be on the non default queue
6274 bnx2x_rx_int(&bp
->fp
[idx
], 10);
6277 mb(); /* state is changed by bnx2x_sp_event() */
6278 if (*state_p
== state
)
6285 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6286 poll
? "polling" : "waiting", state
, idx
);
6287 #ifdef BNX2X_STOP_ON_ERROR
6294 static int bnx2x_setup_leading(struct bnx2x
*bp
)
6298 /* reset IGU state */
6299 bnx2x_ack_sb(bp
, bp
->fp
[0].sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
6302 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_PORT_SETUP
, 0, 0, 0, 0);
6304 /* Wait for completion */
6305 rc
= bnx2x_wait_ramrod(bp
, BNX2X_STATE_OPEN
, 0, &(bp
->state
), 0);
6310 static int bnx2x_setup_multi(struct bnx2x
*bp
, int index
)
6312 /* reset IGU state */
6313 bnx2x_ack_sb(bp
, bp
->fp
[index
].sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
6316 bp
->fp
[index
].state
= BNX2X_FP_STATE_OPENING
;
6317 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CLIENT_SETUP
, index
, 0, index
, 0);
6319 /* Wait for completion */
6320 return bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_OPEN
, index
,
6321 &(bp
->fp
[index
].state
), 0);
6324 static int bnx2x_poll(struct napi_struct
*napi
, int budget
);
6325 static void bnx2x_set_rx_mode(struct net_device
*dev
);
6327 /* must be called with rtnl_lock */
6328 static int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
)
6332 #ifdef BNX2X_STOP_ON_ERROR
6333 if (unlikely(bp
->panic
))
6337 bp
->state
= BNX2X_STATE_OPENING_WAIT4_LOAD
;
6343 if ((use_multi
> 1) && (use_multi
<= BP_MAX_QUEUES(bp
)))
6344 /* user requested number */
6345 bp
->num_queues
= use_multi
;
6348 bp
->num_queues
= min_t(u32
, num_online_cpus(),
6354 "set number of queues to %d\n", bp
->num_queues
);
6356 /* if we can't use MSI-X we only need one fp,
6357 * so try to enable MSI-X with the requested number of fp's
6358 * and fallback to MSI or legacy INTx with one fp
6360 rc
= bnx2x_enable_msix(bp
);
6362 /* failed to enable MSI-X */
6365 BNX2X_ERR("Multi requested but failed"
6366 " to enable MSI-X\n");
6370 if (bnx2x_alloc_mem(bp
))
6373 for_each_queue(bp
, i
)
6374 bnx2x_fp(bp
, i
, disable_tpa
) =
6375 ((bp
->flags
& TPA_ENABLE_FLAG
) == 0);
6377 for_each_queue(bp
, i
)
6378 netif_napi_add(bp
->dev
, &bnx2x_fp(bp
, i
, napi
),
6381 #ifdef BNX2X_STOP_ON_ERROR
6382 for_each_queue(bp
, i
) {
6383 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
6385 fp
->poll_no_work
= 0;
6387 fp
->poll_max_calls
= 0;
6388 fp
->poll_complete
= 0;
6392 bnx2x_napi_enable(bp
);
6394 if (bp
->flags
& USING_MSIX_FLAG
) {
6395 rc
= bnx2x_req_msix_irqs(bp
);
6397 pci_disable_msix(bp
->pdev
);
6400 printk(KERN_INFO PFX
"%s: using MSI-X\n", bp
->dev
->name
);
6403 rc
= bnx2x_req_irq(bp
);
6405 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc
);
6410 /* Send LOAD_REQUEST command to MCP
6411 Returns the type of LOAD command:
6412 if it is the first port to be initialized
6413 common blocks should be initialized, otherwise - not
6415 if (!BP_NOMCP(bp
)) {
6416 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_REQ
);
6418 BNX2X_ERR("MCP response failure, aborting\n");
6422 if (load_code
== FW_MSG_CODE_DRV_LOAD_REFUSED
) {
6423 rc
= -EBUSY
; /* other port in diagnostic mode */
6428 int port
= BP_PORT(bp
);
6430 DP(NETIF_MSG_IFUP
, "NO MCP load counts before us %d, %d, %d\n",
6431 load_count
[0], load_count
[1], load_count
[2]);
6433 load_count
[1 + port
]++;
6434 DP(NETIF_MSG_IFUP
, "NO MCP new load counts %d, %d, %d\n",
6435 load_count
[0], load_count
[1], load_count
[2]);
6436 if (load_count
[0] == 1)
6437 load_code
= FW_MSG_CODE_DRV_LOAD_COMMON
;
6438 else if (load_count
[1 + port
] == 1)
6439 load_code
= FW_MSG_CODE_DRV_LOAD_PORT
;
6441 load_code
= FW_MSG_CODE_DRV_LOAD_FUNCTION
;
6444 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
6445 (load_code
== FW_MSG_CODE_DRV_LOAD_PORT
))
6449 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
6452 rc
= bnx2x_init_hw(bp
, load_code
);
6454 BNX2X_ERR("HW init failed, aborting\n");
6458 /* Setup NIC internals and enable interrupts */
6459 bnx2x_nic_init(bp
, load_code
);
6461 /* Send LOAD_DONE command to MCP */
6462 if (!BP_NOMCP(bp
)) {
6463 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
);
6465 BNX2X_ERR("MCP response failure, aborting\n");
6471 bp
->state
= BNX2X_STATE_OPENING_WAIT4_PORT
;
6473 rc
= bnx2x_setup_leading(bp
);
6475 BNX2X_ERR("Setup leading failed!\n");
6479 if (CHIP_IS_E1H(bp
))
6480 if (bp
->mf_config
& FUNC_MF_CFG_FUNC_DISABLED
) {
6481 BNX2X_ERR("!!! mf_cfg function disabled\n");
6482 bp
->state
= BNX2X_STATE_DISABLED
;
6485 if (bp
->state
== BNX2X_STATE_OPEN
)
6486 for_each_nondefault_queue(bp
, i
) {
6487 rc
= bnx2x_setup_multi(bp
, i
);
6493 bnx2x_set_mac_addr_e1(bp
, 1);
6495 bnx2x_set_mac_addr_e1h(bp
, 1);
6498 bnx2x_initial_phy_init(bp
);
6500 /* Start fast path */
6501 switch (load_mode
) {
6503 /* Tx queue should be only reenabled */
6504 netif_wake_queue(bp
->dev
);
6505 /* Initialize the receive filter. */
6506 bnx2x_set_rx_mode(bp
->dev
);
6510 netif_start_queue(bp
->dev
);
6511 /* Initialize the receive filter. */
6512 bnx2x_set_rx_mode(bp
->dev
);
6516 /* Initialize the receive filter. */
6517 bnx2x_set_rx_mode(bp
->dev
);
6518 bp
->state
= BNX2X_STATE_DIAG
;
6526 bnx2x__link_status_update(bp
);
6528 /* start the timer */
6529 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
6535 bnx2x_int_disable_sync(bp
, 1);
6536 if (!BP_NOMCP(bp
)) {
6537 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
);
6538 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
6541 /* Free SKBs, SGEs, TPA pool and driver internals */
6542 bnx2x_free_skbs(bp
);
6543 for_each_queue(bp
, i
)
6544 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
6549 bnx2x_napi_disable(bp
);
6552 /* TBD we really need to reset the chip
6553 if we want to recover from this */
6557 static int bnx2x_stop_multi(struct bnx2x
*bp
, int index
)
6561 /* halt the connection */
6562 bp
->fp
[index
].state
= BNX2X_FP_STATE_HALTING
;
6563 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, index
, 0, index
, 0);
6565 /* Wait for completion */
6566 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, index
,
6567 &(bp
->fp
[index
].state
), 1);
6568 if (rc
) /* timeout */
6571 /* delete cfc entry */
6572 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CFC_DEL
, index
, 0, 0, 1);
6574 /* Wait for completion */
6575 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_CLOSED
, index
,
6576 &(bp
->fp
[index
].state
), 1);
6580 static int bnx2x_stop_leading(struct bnx2x
*bp
)
6582 u16 dsb_sp_prod_idx
;
6583 /* if the other port is handling traffic,
6584 this can take a lot of time */
6590 /* Send HALT ramrod */
6591 bp
->fp
[0].state
= BNX2X_FP_STATE_HALTING
;
6592 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, 0, 0, BP_CL_ID(bp
), 0);
6594 /* Wait for completion */
6595 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, 0,
6596 &(bp
->fp
[0].state
), 1);
6597 if (rc
) /* timeout */
6600 dsb_sp_prod_idx
= *bp
->dsb_sp_prod
;
6602 /* Send PORT_DELETE ramrod */
6603 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_PORT_DEL
, 0, 0, 0, 1);
6605 /* Wait for completion to arrive on default status block
6606 we are going to reset the chip anyway
6607 so there is not much to do if this times out
6609 while (dsb_sp_prod_idx
== *bp
->dsb_sp_prod
) {
6611 DP(NETIF_MSG_IFDOWN
, "timeout waiting for port del "
6612 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6613 *bp
->dsb_sp_prod
, dsb_sp_prod_idx
);
6614 #ifdef BNX2X_STOP_ON_ERROR
6624 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_UNLOAD
;
6625 bp
->fp
[0].state
= BNX2X_FP_STATE_CLOSED
;
6630 static void bnx2x_reset_func(struct bnx2x
*bp
)
6632 int port
= BP_PORT(bp
);
6633 int func
= BP_FUNC(bp
);
6637 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
6638 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
6640 REG_WR(bp
, HC_REG_CONFIG_0
+ port
*4, 0x1000);
6643 base
= FUNC_ILT_BASE(func
);
6644 for (i
= base
; i
< base
+ ILT_PER_FUNC
; i
++)
6645 bnx2x_ilt_wr(bp
, i
, 0);
6648 static void bnx2x_reset_port(struct bnx2x
*bp
)
6650 int port
= BP_PORT(bp
);
6653 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
6655 /* Do not rcv packets to BRB */
6656 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK
+ port
*4, 0x0);
6657 /* Do not direct rcv packets that are not for MCP to the BRB */
6658 REG_WR(bp
, (port
? NIG_REG_LLH1_BRB1_NOT_MCP
:
6659 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
6662 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4, 0);
6665 /* Check for BRB port occupancy */
6666 val
= REG_RD(bp
, BRB1_REG_PORT_NUM_OCC_BLOCKS_0
+ port
*4);
6668 DP(NETIF_MSG_IFDOWN
,
6669 "BRB1 is not empty %d blocks are occupied\n", val
);
6671 /* TODO: Close Doorbell port? */
6674 static void bnx2x_reset_chip(struct bnx2x
*bp
, u32 reset_code
)
6676 DP(BNX2X_MSG_MCP
, "function %d reset_code %x\n",
6677 BP_FUNC(bp
), reset_code
);
6679 switch (reset_code
) {
6680 case FW_MSG_CODE_DRV_UNLOAD_COMMON
:
6681 bnx2x_reset_port(bp
);
6682 bnx2x_reset_func(bp
);
6683 bnx2x_reset_common(bp
);
6686 case FW_MSG_CODE_DRV_UNLOAD_PORT
:
6687 bnx2x_reset_port(bp
);
6688 bnx2x_reset_func(bp
);
6691 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION
:
6692 bnx2x_reset_func(bp
);
6696 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code
);
6701 /* must be called with rtnl_lock */
6702 static int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
)
6704 int port
= BP_PORT(bp
);
6708 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_HALT
;
6710 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
6711 bnx2x_set_storm_rx_mode(bp
);
6713 bnx2x_netif_stop(bp
, 1);
6715 del_timer_sync(&bp
->timer
);
6716 SHMEM_WR(bp
, func_mb
[BP_FUNC(bp
)].drv_pulse_mb
,
6717 (DRV_PULSE_ALWAYS_ALIVE
| bp
->fw_drv_pulse_wr_seq
));
6718 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
6723 /* Wait until tx fast path tasks complete */
6724 for_each_queue(bp
, i
) {
6725 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
6729 while (bnx2x_has_tx_work(fp
)) {
6731 bnx2x_tx_int(fp
, 1000);
6733 BNX2X_ERR("timeout waiting for queue[%d]\n",
6735 #ifdef BNX2X_STOP_ON_ERROR
6747 /* Give HW time to discard old tx messages */
6750 if (CHIP_IS_E1(bp
)) {
6751 struct mac_configuration_cmd
*config
=
6752 bnx2x_sp(bp
, mcast_config
);
6754 bnx2x_set_mac_addr_e1(bp
, 0);
6756 for (i
= 0; i
< config
->hdr
.length_6b
; i
++)
6757 CAM_INVALIDATE(config
->config_table
[i
]);
6759 config
->hdr
.length_6b
= i
;
6760 if (CHIP_REV_IS_SLOW(bp
))
6761 config
->hdr
.offset
= BNX2X_MAX_EMUL_MULTI
*(1 + port
);
6763 config
->hdr
.offset
= BNX2X_MAX_MULTICAST
*(1 + port
);
6764 config
->hdr
.client_id
= BP_CL_ID(bp
);
6765 config
->hdr
.reserved1
= 0;
6767 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
6768 U64_HI(bnx2x_sp_mapping(bp
, mcast_config
)),
6769 U64_LO(bnx2x_sp_mapping(bp
, mcast_config
)), 0);
6772 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 0);
6774 bnx2x_set_mac_addr_e1h(bp
, 0);
6776 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
6777 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
), 0);
6780 if (unload_mode
== UNLOAD_NORMAL
)
6781 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
6783 else if (bp
->flags
& NO_WOL_FLAG
) {
6784 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
;
6785 if (CHIP_IS_E1H(bp
))
6786 REG_WR(bp
, MISC_REG_E1HMF_MODE
, 0);
6788 } else if (bp
->wol
) {
6789 u32 emac_base
= port
? GRCBASE_EMAC1
: GRCBASE_EMAC0
;
6790 u8
*mac_addr
= bp
->dev
->dev_addr
;
6792 /* The mac address is written to entries 1-4 to
6793 preserve entry 0 which is used by the PMF */
6794 u8 entry
= (BP_E1HVN(bp
) + 1)*8;
6796 val
= (mac_addr
[0] << 8) | mac_addr
[1];
6797 EMAC_WR(bp
, EMAC_REG_EMAC_MAC_MATCH
+ entry
, val
);
6799 val
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
6800 (mac_addr
[4] << 8) | mac_addr
[5];
6801 EMAC_WR(bp
, EMAC_REG_EMAC_MAC_MATCH
+ entry
+ 4, val
);
6803 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_EN
;
6806 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
6808 /* Close multi and leading connections
6809 Completions for ramrods are collected in a synchronous way */
6810 for_each_nondefault_queue(bp
, i
)
6811 if (bnx2x_stop_multi(bp
, i
))
6814 rc
= bnx2x_stop_leading(bp
);
6816 BNX2X_ERR("Stop leading failed!\n");
6817 #ifdef BNX2X_STOP_ON_ERROR
6826 reset_code
= bnx2x_fw_command(bp
, reset_code
);
6828 DP(NETIF_MSG_IFDOWN
, "NO MCP load counts %d, %d, %d\n",
6829 load_count
[0], load_count
[1], load_count
[2]);
6831 load_count
[1 + port
]--;
6832 DP(NETIF_MSG_IFDOWN
, "NO MCP new load counts %d, %d, %d\n",
6833 load_count
[0], load_count
[1], load_count
[2]);
6834 if (load_count
[0] == 0)
6835 reset_code
= FW_MSG_CODE_DRV_UNLOAD_COMMON
;
6836 else if (load_count
[1 + port
] == 0)
6837 reset_code
= FW_MSG_CODE_DRV_UNLOAD_PORT
;
6839 reset_code
= FW_MSG_CODE_DRV_UNLOAD_FUNCTION
;
6842 if ((reset_code
== FW_MSG_CODE_DRV_UNLOAD_COMMON
) ||
6843 (reset_code
== FW_MSG_CODE_DRV_UNLOAD_PORT
))
6844 bnx2x__link_reset(bp
);
6846 /* Reset the chip */
6847 bnx2x_reset_chip(bp
, reset_code
);
6849 /* Report UNLOAD_DONE to MCP */
6851 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
6854 /* Free SKBs, SGEs, TPA pool and driver internals */
6855 bnx2x_free_skbs(bp
);
6856 for_each_queue(bp
, i
)
6857 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
6860 bp
->state
= BNX2X_STATE_CLOSED
;
6862 netif_carrier_off(bp
->dev
);
6867 static void bnx2x_reset_task(struct work_struct
*work
)
6869 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, reset_task
);
6871 #ifdef BNX2X_STOP_ON_ERROR
6872 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6873 " so reset not done to allow debug dump,\n"
6874 KERN_ERR
" you will need to reboot when done\n");
6880 if (!netif_running(bp
->dev
))
6881 goto reset_task_exit
;
6883 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
6884 bnx2x_nic_load(bp
, LOAD_NORMAL
);
6890 /* end of nic load/unload */
6895 * Init service functions
6898 static void __devinit
bnx2x_undi_unload(struct bnx2x
*bp
)
6902 /* Check if there is any driver already loaded */
6903 val
= REG_RD(bp
, MISC_REG_UNPREPARED
);
6905 /* Check if it is the UNDI driver
6906 * UNDI driver initializes CID offset for normal bell to 0x7
6908 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
6909 val
= REG_RD(bp
, DORQ_REG_NORM_CID_OFST
);
6911 u32 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
6913 int func
= BP_FUNC(bp
);
6917 /* clear the UNDI indication */
6918 REG_WR(bp
, DORQ_REG_NORM_CID_OFST
, 0);
6920 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6922 /* try unload UNDI on port 0 */
6925 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
6926 DRV_MSG_SEQ_NUMBER_MASK
);
6927 reset_code
= bnx2x_fw_command(bp
, reset_code
);
6929 /* if UNDI is loaded on the other port */
6930 if (reset_code
!= FW_MSG_CODE_DRV_UNLOAD_COMMON
) {
6932 /* send "DONE" for previous unload */
6933 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
6935 /* unload UNDI on port 1 */
6938 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
6939 DRV_MSG_SEQ_NUMBER_MASK
);
6940 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
6942 bnx2x_fw_command(bp
, reset_code
);
6945 /* now it's safe to release the lock */
6946 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
6948 REG_WR(bp
, (BP_PORT(bp
) ? HC_REG_CONFIG_1
:
6949 HC_REG_CONFIG_0
), 0x1000);
6951 /* close input traffic and wait for it */
6952 /* Do not rcv packets to BRB */
6954 (BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_DRV_MASK
:
6955 NIG_REG_LLH0_BRB1_DRV_MASK
), 0x0);
6956 /* Do not direct rcv packets that are not for MCP to
6959 (BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_NOT_MCP
:
6960 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
6963 (BP_PORT(bp
) ? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
6964 MISC_REG_AEU_MASK_ATTN_FUNC_0
), 0);
6967 /* save NIG port swap info */
6968 swap_val
= REG_RD(bp
, NIG_REG_PORT_SWAP
);
6969 swap_en
= REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
);
6972 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
6975 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
,
6977 /* take the NIG out of reset and restore swap values */
6979 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
6980 MISC_REGISTERS_RESET_REG_1_RST_NIG
);
6981 REG_WR(bp
, NIG_REG_PORT_SWAP
, swap_val
);
6982 REG_WR(bp
, NIG_REG_STRAP_OVERRIDE
, swap_en
);
6984 /* send unload done to the MCP */
6985 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
6987 /* restore our func and fw_seq */
6990 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
6991 DRV_MSG_SEQ_NUMBER_MASK
);
6994 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
6998 static void __devinit
bnx2x_get_common_hwinfo(struct bnx2x
*bp
)
7000 u32 val
, val2
, val3
, val4
, id
;
7003 /* Get the chip revision id and number. */
7004 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7005 val
= REG_RD(bp
, MISC_REG_CHIP_NUM
);
7006 id
= ((val
& 0xffff) << 16);
7007 val
= REG_RD(bp
, MISC_REG_CHIP_REV
);
7008 id
|= ((val
& 0xf) << 12);
7009 val
= REG_RD(bp
, MISC_REG_CHIP_METAL
);
7010 id
|= ((val
& 0xff) << 4);
7011 val
= REG_RD(bp
, MISC_REG_BOND_ID
);
7013 bp
->common
.chip_id
= id
;
7014 bp
->link_params
.chip_id
= bp
->common
.chip_id
;
7015 BNX2X_DEV_INFO("chip ID is 0x%x\n", id
);
7017 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_CFG4
);
7018 bp
->common
.flash_size
= (NVRAM_1MB_SIZE
<<
7019 (val
& MCPR_NVM_CFG4_FLASH_SIZE
));
7020 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7021 bp
->common
.flash_size
, bp
->common
.flash_size
);
7023 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
7024 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
7025 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp
->common
.shmem_base
);
7027 if (!bp
->common
.shmem_base
||
7028 (bp
->common
.shmem_base
< 0xA0000) ||
7029 (bp
->common
.shmem_base
>= 0xC0000)) {
7030 BNX2X_DEV_INFO("MCP not active\n");
7031 bp
->flags
|= NO_MCP_FLAG
;
7035 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
7036 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
7037 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
7038 BNX2X_ERR("BAD MCP validity signature\n");
7040 bp
->common
.hw_config
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.config
);
7041 bp
->common
.board
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.board
);
7043 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
7044 bp
->common
.hw_config
, bp
->common
.board
);
7046 bp
->link_params
.hw_led_mode
= ((bp
->common
.hw_config
&
7047 SHARED_HW_CFG_LED_MODE_MASK
) >>
7048 SHARED_HW_CFG_LED_MODE_SHIFT
);
7050 val
= SHMEM_RD(bp
, dev_info
.bc_rev
) >> 8;
7051 bp
->common
.bc_ver
= val
;
7052 BNX2X_DEV_INFO("bc_ver %X\n", val
);
7053 if (val
< BNX2X_BC_VER
) {
7054 /* for now only warn
7055 * later we might need to enforce this */
7056 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7057 " please upgrade BC\n", BNX2X_BC_VER
, val
);
7060 if (BP_E1HVN(bp
) == 0) {
7061 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_PMC
, &pmc
);
7062 bp
->flags
|= (pmc
& PCI_PM_CAP_PME_D3cold
) ? 0 : NO_WOL_FLAG
;
7064 /* no WOL capability for E1HVN != 0 */
7065 bp
->flags
|= NO_WOL_FLAG
;
7067 BNX2X_DEV_INFO("%sWoL capable\n",
7068 (bp
->flags
& NO_WOL_FLAG
) ? "Not " : "");
7070 val
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
);
7071 val2
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[4]);
7072 val3
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[8]);
7073 val4
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[12]);
7075 printk(KERN_INFO PFX
"part number %X-%X-%X-%X\n",
7076 val
, val2
, val3
, val4
);
7079 static void __devinit
bnx2x_link_settings_supported(struct bnx2x
*bp
,
7082 int port
= BP_PORT(bp
);
7085 switch (switch_cfg
) {
7087 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg
);
7090 SERDES_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
7091 switch (ext_phy_type
) {
7092 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT
:
7093 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7096 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
7097 SUPPORTED_10baseT_Full
|
7098 SUPPORTED_100baseT_Half
|
7099 SUPPORTED_100baseT_Full
|
7100 SUPPORTED_1000baseT_Full
|
7101 SUPPORTED_2500baseX_Full
|
7106 SUPPORTED_Asym_Pause
);
7109 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482
:
7110 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7113 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
7114 SUPPORTED_10baseT_Full
|
7115 SUPPORTED_100baseT_Half
|
7116 SUPPORTED_100baseT_Full
|
7117 SUPPORTED_1000baseT_Full
|
7122 SUPPORTED_Asym_Pause
);
7126 BNX2X_ERR("NVRAM config error. "
7127 "BAD SerDes ext_phy_config 0x%x\n",
7128 bp
->link_params
.ext_phy_config
);
7132 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_SERDES0_CTRL_PHY_ADDR
+
7134 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
7137 case SWITCH_CFG_10G
:
7138 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg
);
7141 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
7142 switch (ext_phy_type
) {
7143 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
:
7144 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7147 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
7148 SUPPORTED_10baseT_Full
|
7149 SUPPORTED_100baseT_Half
|
7150 SUPPORTED_100baseT_Full
|
7151 SUPPORTED_1000baseT_Full
|
7152 SUPPORTED_2500baseX_Full
|
7153 SUPPORTED_10000baseT_Full
|
7158 SUPPORTED_Asym_Pause
);
7161 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
:
7162 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7165 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
7168 SUPPORTED_Asym_Pause
);
7171 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
:
7172 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7175 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
7176 SUPPORTED_1000baseT_Full
|
7179 SUPPORTED_Asym_Pause
);
7182 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
7183 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7186 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
7187 SUPPORTED_1000baseT_Full
|
7191 SUPPORTED_Asym_Pause
);
7194 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
7195 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7198 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
7199 SUPPORTED_2500baseX_Full
|
7200 SUPPORTED_1000baseT_Full
|
7204 SUPPORTED_Asym_Pause
);
7207 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
7208 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7211 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
7215 SUPPORTED_Asym_Pause
);
7218 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
:
7219 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7220 bp
->link_params
.ext_phy_config
);
7224 BNX2X_ERR("NVRAM config error. "
7225 "BAD XGXS ext_phy_config 0x%x\n",
7226 bp
->link_params
.ext_phy_config
);
7230 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_XGXS0_CTRL_PHY_ADDR
+
7232 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
7237 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7238 bp
->port
.link_config
);
7241 bp
->link_params
.phy_addr
= bp
->port
.phy_addr
;
7243 /* mask what we support according to speed_cap_mask */
7244 if (!(bp
->link_params
.speed_cap_mask
&
7245 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF
))
7246 bp
->port
.supported
&= ~SUPPORTED_10baseT_Half
;
7248 if (!(bp
->link_params
.speed_cap_mask
&
7249 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL
))
7250 bp
->port
.supported
&= ~SUPPORTED_10baseT_Full
;
7252 if (!(bp
->link_params
.speed_cap_mask
&
7253 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF
))
7254 bp
->port
.supported
&= ~SUPPORTED_100baseT_Half
;
7256 if (!(bp
->link_params
.speed_cap_mask
&
7257 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL
))
7258 bp
->port
.supported
&= ~SUPPORTED_100baseT_Full
;
7260 if (!(bp
->link_params
.speed_cap_mask
&
7261 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G
))
7262 bp
->port
.supported
&= ~(SUPPORTED_1000baseT_Half
|
7263 SUPPORTED_1000baseT_Full
);
7265 if (!(bp
->link_params
.speed_cap_mask
&
7266 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G
))
7267 bp
->port
.supported
&= ~SUPPORTED_2500baseX_Full
;
7269 if (!(bp
->link_params
.speed_cap_mask
&
7270 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G
))
7271 bp
->port
.supported
&= ~SUPPORTED_10000baseT_Full
;
7273 BNX2X_DEV_INFO("supported 0x%x\n", bp
->port
.supported
);
7276 static void __devinit
bnx2x_link_settings_requested(struct bnx2x
*bp
)
7278 bp
->link_params
.req_duplex
= DUPLEX_FULL
;
7280 switch (bp
->port
.link_config
& PORT_FEATURE_LINK_SPEED_MASK
) {
7281 case PORT_FEATURE_LINK_SPEED_AUTO
:
7282 if (bp
->port
.supported
& SUPPORTED_Autoneg
) {
7283 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
7284 bp
->port
.advertising
= bp
->port
.supported
;
7287 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
7289 if ((ext_phy_type
==
7290 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
) ||
7292 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
)) {
7293 /* force 10G, no AN */
7294 bp
->link_params
.req_line_speed
= SPEED_10000
;
7295 bp
->port
.advertising
=
7296 (ADVERTISED_10000baseT_Full
|
7300 BNX2X_ERR("NVRAM config error. "
7301 "Invalid link_config 0x%x"
7302 " Autoneg not supported\n",
7303 bp
->port
.link_config
);
7308 case PORT_FEATURE_LINK_SPEED_10M_FULL
:
7309 if (bp
->port
.supported
& SUPPORTED_10baseT_Full
) {
7310 bp
->link_params
.req_line_speed
= SPEED_10
;
7311 bp
->port
.advertising
= (ADVERTISED_10baseT_Full
|
7314 BNX2X_ERR("NVRAM config error. "
7315 "Invalid link_config 0x%x"
7316 " speed_cap_mask 0x%x\n",
7317 bp
->port
.link_config
,
7318 bp
->link_params
.speed_cap_mask
);
7323 case PORT_FEATURE_LINK_SPEED_10M_HALF
:
7324 if (bp
->port
.supported
& SUPPORTED_10baseT_Half
) {
7325 bp
->link_params
.req_line_speed
= SPEED_10
;
7326 bp
->link_params
.req_duplex
= DUPLEX_HALF
;
7327 bp
->port
.advertising
= (ADVERTISED_10baseT_Half
|
7330 BNX2X_ERR("NVRAM config error. "
7331 "Invalid link_config 0x%x"
7332 " speed_cap_mask 0x%x\n",
7333 bp
->port
.link_config
,
7334 bp
->link_params
.speed_cap_mask
);
7339 case PORT_FEATURE_LINK_SPEED_100M_FULL
:
7340 if (bp
->port
.supported
& SUPPORTED_100baseT_Full
) {
7341 bp
->link_params
.req_line_speed
= SPEED_100
;
7342 bp
->port
.advertising
= (ADVERTISED_100baseT_Full
|
7345 BNX2X_ERR("NVRAM config error. "
7346 "Invalid link_config 0x%x"
7347 " speed_cap_mask 0x%x\n",
7348 bp
->port
.link_config
,
7349 bp
->link_params
.speed_cap_mask
);
7354 case PORT_FEATURE_LINK_SPEED_100M_HALF
:
7355 if (bp
->port
.supported
& SUPPORTED_100baseT_Half
) {
7356 bp
->link_params
.req_line_speed
= SPEED_100
;
7357 bp
->link_params
.req_duplex
= DUPLEX_HALF
;
7358 bp
->port
.advertising
= (ADVERTISED_100baseT_Half
|
7361 BNX2X_ERR("NVRAM config error. "
7362 "Invalid link_config 0x%x"
7363 " speed_cap_mask 0x%x\n",
7364 bp
->port
.link_config
,
7365 bp
->link_params
.speed_cap_mask
);
7370 case PORT_FEATURE_LINK_SPEED_1G
:
7371 if (bp
->port
.supported
& SUPPORTED_1000baseT_Full
) {
7372 bp
->link_params
.req_line_speed
= SPEED_1000
;
7373 bp
->port
.advertising
= (ADVERTISED_1000baseT_Full
|
7376 BNX2X_ERR("NVRAM config error. "
7377 "Invalid link_config 0x%x"
7378 " speed_cap_mask 0x%x\n",
7379 bp
->port
.link_config
,
7380 bp
->link_params
.speed_cap_mask
);
7385 case PORT_FEATURE_LINK_SPEED_2_5G
:
7386 if (bp
->port
.supported
& SUPPORTED_2500baseX_Full
) {
7387 bp
->link_params
.req_line_speed
= SPEED_2500
;
7388 bp
->port
.advertising
= (ADVERTISED_2500baseX_Full
|
7391 BNX2X_ERR("NVRAM config error. "
7392 "Invalid link_config 0x%x"
7393 " speed_cap_mask 0x%x\n",
7394 bp
->port
.link_config
,
7395 bp
->link_params
.speed_cap_mask
);
7400 case PORT_FEATURE_LINK_SPEED_10G_CX4
:
7401 case PORT_FEATURE_LINK_SPEED_10G_KX4
:
7402 case PORT_FEATURE_LINK_SPEED_10G_KR
:
7403 if (bp
->port
.supported
& SUPPORTED_10000baseT_Full
) {
7404 bp
->link_params
.req_line_speed
= SPEED_10000
;
7405 bp
->port
.advertising
= (ADVERTISED_10000baseT_Full
|
7408 BNX2X_ERR("NVRAM config error. "
7409 "Invalid link_config 0x%x"
7410 " speed_cap_mask 0x%x\n",
7411 bp
->port
.link_config
,
7412 bp
->link_params
.speed_cap_mask
);
7418 BNX2X_ERR("NVRAM config error. "
7419 "BAD link speed link_config 0x%x\n",
7420 bp
->port
.link_config
);
7421 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
7422 bp
->port
.advertising
= bp
->port
.supported
;
7426 bp
->link_params
.req_flow_ctrl
= (bp
->port
.link_config
&
7427 PORT_FEATURE_FLOW_CONTROL_MASK
);
7428 if ((bp
->link_params
.req_flow_ctrl
== BNX2X_FLOW_CTRL_AUTO
) &&
7429 !(bp
->port
.supported
& SUPPORTED_Autoneg
))
7430 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_NONE
;
7432 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7433 " advertising 0x%x\n",
7434 bp
->link_params
.req_line_speed
,
7435 bp
->link_params
.req_duplex
,
7436 bp
->link_params
.req_flow_ctrl
, bp
->port
.advertising
);
7439 static void __devinit
bnx2x_get_port_hwinfo(struct bnx2x
*bp
)
7441 int port
= BP_PORT(bp
);
7444 bp
->link_params
.bp
= bp
;
7445 bp
->link_params
.port
= port
;
7447 bp
->link_params
.serdes_config
=
7448 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].serdes_config
);
7449 bp
->link_params
.lane_config
=
7450 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].lane_config
);
7451 bp
->link_params
.ext_phy_config
=
7453 dev_info
.port_hw_config
[port
].external_phy_config
);
7454 bp
->link_params
.speed_cap_mask
=
7456 dev_info
.port_hw_config
[port
].speed_capability_mask
);
7458 bp
->port
.link_config
=
7459 SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].link_config
);
7461 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7462 KERN_INFO
" ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7463 " link_config 0x%08x\n",
7464 bp
->link_params
.serdes_config
,
7465 bp
->link_params
.lane_config
,
7466 bp
->link_params
.ext_phy_config
,
7467 bp
->link_params
.speed_cap_mask
, bp
->port
.link_config
);
7469 bp
->link_params
.switch_cfg
= (bp
->port
.link_config
&
7470 PORT_FEATURE_CONNECTED_SWITCH_MASK
);
7471 bnx2x_link_settings_supported(bp
, bp
->link_params
.switch_cfg
);
7473 bnx2x_link_settings_requested(bp
);
7475 val2
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_upper
);
7476 val
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_lower
);
7477 bp
->dev
->dev_addr
[0] = (u8
)(val2
>> 8 & 0xff);
7478 bp
->dev
->dev_addr
[1] = (u8
)(val2
& 0xff);
7479 bp
->dev
->dev_addr
[2] = (u8
)(val
>> 24 & 0xff);
7480 bp
->dev
->dev_addr
[3] = (u8
)(val
>> 16 & 0xff);
7481 bp
->dev
->dev_addr
[4] = (u8
)(val
>> 8 & 0xff);
7482 bp
->dev
->dev_addr
[5] = (u8
)(val
& 0xff);
7483 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
7484 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
7487 static int __devinit
bnx2x_get_hwinfo(struct bnx2x
*bp
)
7489 int func
= BP_FUNC(bp
);
7493 bnx2x_get_common_hwinfo(bp
);
7497 if (CHIP_IS_E1H(bp
)) {
7499 SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
7501 val
= (SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].e1hov_tag
) &
7502 FUNC_MF_CFG_E1HOV_TAG_MASK
);
7503 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
) {
7507 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7509 func
, bp
->e1hov
, bp
->e1hov
);
7511 BNX2X_DEV_INFO("Single function mode\n");
7513 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7514 " aborting\n", func
);
7520 if (!BP_NOMCP(bp
)) {
7521 bnx2x_get_port_hwinfo(bp
);
7523 bp
->fw_seq
= (SHMEM_RD(bp
, func_mb
[func
].drv_mb_header
) &
7524 DRV_MSG_SEQ_NUMBER_MASK
);
7525 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
7529 val2
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].mac_upper
);
7530 val
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].mac_lower
);
7531 if ((val2
!= FUNC_MF_CFG_UPPERMAC_DEFAULT
) &&
7532 (val
!= FUNC_MF_CFG_LOWERMAC_DEFAULT
)) {
7533 bp
->dev
->dev_addr
[0] = (u8
)(val2
>> 8 & 0xff);
7534 bp
->dev
->dev_addr
[1] = (u8
)(val2
& 0xff);
7535 bp
->dev
->dev_addr
[2] = (u8
)(val
>> 24 & 0xff);
7536 bp
->dev
->dev_addr
[3] = (u8
)(val
>> 16 & 0xff);
7537 bp
->dev
->dev_addr
[4] = (u8
)(val
>> 8 & 0xff);
7538 bp
->dev
->dev_addr
[5] = (u8
)(val
& 0xff);
7539 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
,
7541 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
,
7549 /* only supposed to happen on emulation/FPGA */
7550 BNX2X_ERR("warning random MAC workaround active\n");
7551 random_ether_addr(bp
->dev
->dev_addr
);
7552 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
7558 static int __devinit
bnx2x_init_bp(struct bnx2x
*bp
)
7560 int func
= BP_FUNC(bp
);
7563 /* Disable interrupt handling until HW is initialized */
7564 atomic_set(&bp
->intr_sem
, 1);
7566 mutex_init(&bp
->port
.phy_mutex
);
7568 INIT_DELAYED_WORK(&bp
->sp_task
, bnx2x_sp_task
);
7569 INIT_WORK(&bp
->reset_task
, bnx2x_reset_task
);
7571 rc
= bnx2x_get_hwinfo(bp
);
7573 /* need to reset chip if undi was active */
7575 bnx2x_undi_unload(bp
);
7577 if (CHIP_REV_IS_FPGA(bp
))
7578 printk(KERN_ERR PFX
"FPGA detected\n");
7580 if (BP_NOMCP(bp
) && (func
== 0))
7582 "MCP disabled, must load devices in order!\n");
7586 bp
->flags
&= ~TPA_ENABLE_FLAG
;
7587 bp
->dev
->features
&= ~NETIF_F_LRO
;
7589 bp
->flags
|= TPA_ENABLE_FLAG
;
7590 bp
->dev
->features
|= NETIF_F_LRO
;
7594 bp
->tx_ring_size
= MAX_TX_AVAIL
;
7595 bp
->rx_ring_size
= MAX_RX_AVAIL
;
7603 bp
->timer_interval
= (CHIP_REV_IS_SLOW(bp
) ? 5*HZ
: HZ
);
7604 bp
->current_interval
= (poll
? poll
: bp
->timer_interval
);
7606 init_timer(&bp
->timer
);
7607 bp
->timer
.expires
= jiffies
+ bp
->current_interval
;
7608 bp
->timer
.data
= (unsigned long) bp
;
7609 bp
->timer
.function
= bnx2x_timer
;
7615 * ethtool service functions
7618 /* All ethtool functions called with rtnl_lock */
7620 static int bnx2x_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
7622 struct bnx2x
*bp
= netdev_priv(dev
);
7624 cmd
->supported
= bp
->port
.supported
;
7625 cmd
->advertising
= bp
->port
.advertising
;
7627 if (netif_carrier_ok(dev
)) {
7628 cmd
->speed
= bp
->link_vars
.line_speed
;
7629 cmd
->duplex
= bp
->link_vars
.duplex
;
7631 cmd
->speed
= bp
->link_params
.req_line_speed
;
7632 cmd
->duplex
= bp
->link_params
.req_duplex
;
7637 vn_max_rate
= ((bp
->mf_config
& FUNC_MF_CFG_MAX_BW_MASK
) >>
7638 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
7639 if (vn_max_rate
< cmd
->speed
)
7640 cmd
->speed
= vn_max_rate
;
7643 if (bp
->link_params
.switch_cfg
== SWITCH_CFG_10G
) {
7645 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
7647 switch (ext_phy_type
) {
7648 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
:
7649 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
:
7650 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
:
7651 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
7652 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
7653 cmd
->port
= PORT_FIBRE
;
7656 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
7657 cmd
->port
= PORT_TP
;
7660 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
:
7661 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7662 bp
->link_params
.ext_phy_config
);
7666 DP(NETIF_MSG_LINK
, "BAD XGXS ext_phy_config 0x%x\n",
7667 bp
->link_params
.ext_phy_config
);
7671 cmd
->port
= PORT_TP
;
7673 cmd
->phy_address
= bp
->port
.phy_addr
;
7674 cmd
->transceiver
= XCVR_INTERNAL
;
7676 if (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
)
7677 cmd
->autoneg
= AUTONEG_ENABLE
;
7679 cmd
->autoneg
= AUTONEG_DISABLE
;
7684 DP(NETIF_MSG_LINK
, "ethtool_cmd: cmd %d\n"
7685 DP_LEVEL
" supported 0x%x advertising 0x%x speed %d\n"
7686 DP_LEVEL
" duplex %d port %d phy_address %d transceiver %d\n"
7687 DP_LEVEL
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
7688 cmd
->cmd
, cmd
->supported
, cmd
->advertising
, cmd
->speed
,
7689 cmd
->duplex
, cmd
->port
, cmd
->phy_address
, cmd
->transceiver
,
7690 cmd
->autoneg
, cmd
->maxtxpkt
, cmd
->maxrxpkt
);
7695 static int bnx2x_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
7697 struct bnx2x
*bp
= netdev_priv(dev
);
7703 DP(NETIF_MSG_LINK
, "ethtool_cmd: cmd %d\n"
7704 DP_LEVEL
" supported 0x%x advertising 0x%x speed %d\n"
7705 DP_LEVEL
" duplex %d port %d phy_address %d transceiver %d\n"
7706 DP_LEVEL
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
7707 cmd
->cmd
, cmd
->supported
, cmd
->advertising
, cmd
->speed
,
7708 cmd
->duplex
, cmd
->port
, cmd
->phy_address
, cmd
->transceiver
,
7709 cmd
->autoneg
, cmd
->maxtxpkt
, cmd
->maxrxpkt
);
7711 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
7712 if (!(bp
->port
.supported
& SUPPORTED_Autoneg
)) {
7713 DP(NETIF_MSG_LINK
, "Autoneg not supported\n");
7717 /* advertise the requested speed and duplex if supported */
7718 cmd
->advertising
&= bp
->port
.supported
;
7720 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
7721 bp
->link_params
.req_duplex
= DUPLEX_FULL
;
7722 bp
->port
.advertising
|= (ADVERTISED_Autoneg
|
7725 } else { /* forced speed */
7726 /* advertise the requested speed and duplex if supported */
7727 switch (cmd
->speed
) {
7729 if (cmd
->duplex
== DUPLEX_FULL
) {
7730 if (!(bp
->port
.supported
&
7731 SUPPORTED_10baseT_Full
)) {
7733 "10M full not supported\n");
7737 advertising
= (ADVERTISED_10baseT_Full
|
7740 if (!(bp
->port
.supported
&
7741 SUPPORTED_10baseT_Half
)) {
7743 "10M half not supported\n");
7747 advertising
= (ADVERTISED_10baseT_Half
|
7753 if (cmd
->duplex
== DUPLEX_FULL
) {
7754 if (!(bp
->port
.supported
&
7755 SUPPORTED_100baseT_Full
)) {
7757 "100M full not supported\n");
7761 advertising
= (ADVERTISED_100baseT_Full
|
7764 if (!(bp
->port
.supported
&
7765 SUPPORTED_100baseT_Half
)) {
7767 "100M half not supported\n");
7771 advertising
= (ADVERTISED_100baseT_Half
|
7777 if (cmd
->duplex
!= DUPLEX_FULL
) {
7778 DP(NETIF_MSG_LINK
, "1G half not supported\n");
7782 if (!(bp
->port
.supported
& SUPPORTED_1000baseT_Full
)) {
7783 DP(NETIF_MSG_LINK
, "1G full not supported\n");
7787 advertising
= (ADVERTISED_1000baseT_Full
|
7792 if (cmd
->duplex
!= DUPLEX_FULL
) {
7794 "2.5G half not supported\n");
7798 if (!(bp
->port
.supported
& SUPPORTED_2500baseX_Full
)) {
7800 "2.5G full not supported\n");
7804 advertising
= (ADVERTISED_2500baseX_Full
|
7809 if (cmd
->duplex
!= DUPLEX_FULL
) {
7810 DP(NETIF_MSG_LINK
, "10G half not supported\n");
7814 if (!(bp
->port
.supported
& SUPPORTED_10000baseT_Full
)) {
7815 DP(NETIF_MSG_LINK
, "10G full not supported\n");
7819 advertising
= (ADVERTISED_10000baseT_Full
|
7824 DP(NETIF_MSG_LINK
, "Unsupported speed\n");
7828 bp
->link_params
.req_line_speed
= cmd
->speed
;
7829 bp
->link_params
.req_duplex
= cmd
->duplex
;
7830 bp
->port
.advertising
= advertising
;
7833 DP(NETIF_MSG_LINK
, "req_line_speed %d\n"
7834 DP_LEVEL
" req_duplex %d advertising 0x%x\n",
7835 bp
->link_params
.req_line_speed
, bp
->link_params
.req_duplex
,
7836 bp
->port
.advertising
);
7838 if (netif_running(dev
)) {
7839 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
7846 #define PHY_FW_VER_LEN 10
7848 static void bnx2x_get_drvinfo(struct net_device
*dev
,
7849 struct ethtool_drvinfo
*info
)
7851 struct bnx2x
*bp
= netdev_priv(dev
);
7852 u8 phy_fw_ver
[PHY_FW_VER_LEN
];
7854 strcpy(info
->driver
, DRV_MODULE_NAME
);
7855 strcpy(info
->version
, DRV_MODULE_VERSION
);
7857 phy_fw_ver
[0] = '\0';
7859 bnx2x_acquire_phy_lock(bp
);
7860 bnx2x_get_ext_phy_fw_version(&bp
->link_params
,
7861 (bp
->state
!= BNX2X_STATE_CLOSED
),
7862 phy_fw_ver
, PHY_FW_VER_LEN
);
7863 bnx2x_release_phy_lock(bp
);
7866 snprintf(info
->fw_version
, 32, "BC:%d.%d.%d%s%s",
7867 (bp
->common
.bc_ver
& 0xff0000) >> 16,
7868 (bp
->common
.bc_ver
& 0xff00) >> 8,
7869 (bp
->common
.bc_ver
& 0xff),
7870 ((phy_fw_ver
[0] != '\0') ? " PHY:" : ""), phy_fw_ver
);
7871 strcpy(info
->bus_info
, pci_name(bp
->pdev
));
7872 info
->n_stats
= BNX2X_NUM_STATS
;
7873 info
->testinfo_len
= BNX2X_NUM_TESTS
;
7874 info
->eedump_len
= bp
->common
.flash_size
;
7875 info
->regdump_len
= 0;
7878 static void bnx2x_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
7880 struct bnx2x
*bp
= netdev_priv(dev
);
7882 if (bp
->flags
& NO_WOL_FLAG
) {
7886 wol
->supported
= WAKE_MAGIC
;
7888 wol
->wolopts
= WAKE_MAGIC
;
7892 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
7895 static int bnx2x_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
7897 struct bnx2x
*bp
= netdev_priv(dev
);
7899 if (wol
->wolopts
& ~WAKE_MAGIC
)
7902 if (wol
->wolopts
& WAKE_MAGIC
) {
7903 if (bp
->flags
& NO_WOL_FLAG
)
7913 static u32
bnx2x_get_msglevel(struct net_device
*dev
)
7915 struct bnx2x
*bp
= netdev_priv(dev
);
7917 return bp
->msglevel
;
7920 static void bnx2x_set_msglevel(struct net_device
*dev
, u32 level
)
7922 struct bnx2x
*bp
= netdev_priv(dev
);
7924 if (capable(CAP_NET_ADMIN
))
7925 bp
->msglevel
= level
;
7928 static int bnx2x_nway_reset(struct net_device
*dev
)
7930 struct bnx2x
*bp
= netdev_priv(dev
);
7935 if (netif_running(dev
)) {
7936 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
7943 static int bnx2x_get_eeprom_len(struct net_device
*dev
)
7945 struct bnx2x
*bp
= netdev_priv(dev
);
7947 return bp
->common
.flash_size
;
7950 static int bnx2x_acquire_nvram_lock(struct bnx2x
*bp
)
7952 int port
= BP_PORT(bp
);
7956 /* adjust timeout for emulation/FPGA */
7957 count
= NVRAM_TIMEOUT_COUNT
;
7958 if (CHIP_REV_IS_SLOW(bp
))
7961 /* request access to nvram interface */
7962 REG_WR(bp
, MCP_REG_MCPR_NVM_SW_ARB
,
7963 (MCPR_NVM_SW_ARB_ARB_REQ_SET1
<< port
));
7965 for (i
= 0; i
< count
*10; i
++) {
7966 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_SW_ARB
);
7967 if (val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
))
7973 if (!(val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
))) {
7974 DP(BNX2X_MSG_NVM
, "cannot get access to nvram interface\n");
7981 static int bnx2x_release_nvram_lock(struct bnx2x
*bp
)
7983 int port
= BP_PORT(bp
);
7987 /* adjust timeout for emulation/FPGA */
7988 count
= NVRAM_TIMEOUT_COUNT
;
7989 if (CHIP_REV_IS_SLOW(bp
))
7992 /* relinquish nvram interface */
7993 REG_WR(bp
, MCP_REG_MCPR_NVM_SW_ARB
,
7994 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1
<< port
));
7996 for (i
= 0; i
< count
*10; i
++) {
7997 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_SW_ARB
);
7998 if (!(val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
)))
8004 if (val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
)) {
8005 DP(BNX2X_MSG_NVM
, "cannot free access to nvram interface\n");
8012 static void bnx2x_enable_nvram_access(struct bnx2x
*bp
)
8016 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
);
8018 /* enable both bits, even on read */
8019 REG_WR(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
,
8020 (val
| MCPR_NVM_ACCESS_ENABLE_EN
|
8021 MCPR_NVM_ACCESS_ENABLE_WR_EN
));
8024 static void bnx2x_disable_nvram_access(struct bnx2x
*bp
)
8028 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
);
8030 /* disable both bits, even after read */
8031 REG_WR(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
,
8032 (val
& ~(MCPR_NVM_ACCESS_ENABLE_EN
|
8033 MCPR_NVM_ACCESS_ENABLE_WR_EN
)));
8036 static int bnx2x_nvram_read_dword(struct bnx2x
*bp
, u32 offset
, u32
*ret_val
,
8042 /* build the command word */
8043 cmd_flags
|= MCPR_NVM_COMMAND_DOIT
;
8045 /* need to clear DONE bit separately */
8046 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, MCPR_NVM_COMMAND_DONE
);
8048 /* address of the NVRAM to read from */
8049 REG_WR(bp
, MCP_REG_MCPR_NVM_ADDR
,
8050 (offset
& MCPR_NVM_ADDR_NVM_ADDR_VALUE
));
8052 /* issue a read command */
8053 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, cmd_flags
);
8055 /* adjust timeout for emulation/FPGA */
8056 count
= NVRAM_TIMEOUT_COUNT
;
8057 if (CHIP_REV_IS_SLOW(bp
))
8060 /* wait for completion */
8063 for (i
= 0; i
< count
; i
++) {
8065 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_COMMAND
);
8067 if (val
& MCPR_NVM_COMMAND_DONE
) {
8068 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_READ
);
8069 /* we read nvram data in cpu order
8070 * but ethtool sees it as an array of bytes
8071 * converting to big-endian will do the work */
8072 val
= cpu_to_be32(val
);
8082 static int bnx2x_nvram_read(struct bnx2x
*bp
, u32 offset
, u8
*ret_buf
,
8089 if ((offset
& 0x03) || (buf_size
& 0x03) || (buf_size
== 0)) {
8091 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8096 if (offset
+ buf_size
> bp
->common
.flash_size
) {
8097 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
8098 " buf_size (0x%x) > flash_size (0x%x)\n",
8099 offset
, buf_size
, bp
->common
.flash_size
);
8103 /* request access to nvram interface */
8104 rc
= bnx2x_acquire_nvram_lock(bp
);
8108 /* enable access to nvram interface */
8109 bnx2x_enable_nvram_access(bp
);
8111 /* read the first word(s) */
8112 cmd_flags
= MCPR_NVM_COMMAND_FIRST
;
8113 while ((buf_size
> sizeof(u32
)) && (rc
== 0)) {
8114 rc
= bnx2x_nvram_read_dword(bp
, offset
, &val
, cmd_flags
);
8115 memcpy(ret_buf
, &val
, 4);
8117 /* advance to the next dword */
8118 offset
+= sizeof(u32
);
8119 ret_buf
+= sizeof(u32
);
8120 buf_size
-= sizeof(u32
);
8125 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
8126 rc
= bnx2x_nvram_read_dword(bp
, offset
, &val
, cmd_flags
);
8127 memcpy(ret_buf
, &val
, 4);
8130 /* disable access to nvram interface */
8131 bnx2x_disable_nvram_access(bp
);
8132 bnx2x_release_nvram_lock(bp
);
8137 static int bnx2x_get_eeprom(struct net_device
*dev
,
8138 struct ethtool_eeprom
*eeprom
, u8
*eebuf
)
8140 struct bnx2x
*bp
= netdev_priv(dev
);
8143 if (!netif_running(dev
))
8146 DP(BNX2X_MSG_NVM
, "ethtool_eeprom: cmd %d\n"
8147 DP_LEVEL
" magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8148 eeprom
->cmd
, eeprom
->magic
, eeprom
->offset
, eeprom
->offset
,
8149 eeprom
->len
, eeprom
->len
);
8151 /* parameters already validated in ethtool_get_eeprom */
8153 rc
= bnx2x_nvram_read(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
8158 static int bnx2x_nvram_write_dword(struct bnx2x
*bp
, u32 offset
, u32 val
,
8163 /* build the command word */
8164 cmd_flags
|= MCPR_NVM_COMMAND_DOIT
| MCPR_NVM_COMMAND_WR
;
8166 /* need to clear DONE bit separately */
8167 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, MCPR_NVM_COMMAND_DONE
);
8169 /* write the data */
8170 REG_WR(bp
, MCP_REG_MCPR_NVM_WRITE
, val
);
8172 /* address of the NVRAM to write to */
8173 REG_WR(bp
, MCP_REG_MCPR_NVM_ADDR
,
8174 (offset
& MCPR_NVM_ADDR_NVM_ADDR_VALUE
));
8176 /* issue the write command */
8177 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, cmd_flags
);
8179 /* adjust timeout for emulation/FPGA */
8180 count
= NVRAM_TIMEOUT_COUNT
;
8181 if (CHIP_REV_IS_SLOW(bp
))
8184 /* wait for completion */
8186 for (i
= 0; i
< count
; i
++) {
8188 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_COMMAND
);
8189 if (val
& MCPR_NVM_COMMAND_DONE
) {
8198 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8200 static int bnx2x_nvram_write1(struct bnx2x
*bp
, u32 offset
, u8
*data_buf
,
8208 if (offset
+ buf_size
> bp
->common
.flash_size
) {
8209 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
8210 " buf_size (0x%x) > flash_size (0x%x)\n",
8211 offset
, buf_size
, bp
->common
.flash_size
);
8215 /* request access to nvram interface */
8216 rc
= bnx2x_acquire_nvram_lock(bp
);
8220 /* enable access to nvram interface */
8221 bnx2x_enable_nvram_access(bp
);
8223 cmd_flags
= (MCPR_NVM_COMMAND_FIRST
| MCPR_NVM_COMMAND_LAST
);
8224 align_offset
= (offset
& ~0x03);
8225 rc
= bnx2x_nvram_read_dword(bp
, align_offset
, &val
, cmd_flags
);
8228 val
&= ~(0xff << BYTE_OFFSET(offset
));
8229 val
|= (*data_buf
<< BYTE_OFFSET(offset
));
8231 /* nvram data is returned as an array of bytes
8232 * convert it back to cpu order */
8233 val
= be32_to_cpu(val
);
8235 rc
= bnx2x_nvram_write_dword(bp
, align_offset
, val
,
8239 /* disable access to nvram interface */
8240 bnx2x_disable_nvram_access(bp
);
8241 bnx2x_release_nvram_lock(bp
);
8246 static int bnx2x_nvram_write(struct bnx2x
*bp
, u32 offset
, u8
*data_buf
,
8254 if (buf_size
== 1) /* ethtool */
8255 return bnx2x_nvram_write1(bp
, offset
, data_buf
, buf_size
);
8257 if ((offset
& 0x03) || (buf_size
& 0x03) || (buf_size
== 0)) {
8259 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8264 if (offset
+ buf_size
> bp
->common
.flash_size
) {
8265 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
8266 " buf_size (0x%x) > flash_size (0x%x)\n",
8267 offset
, buf_size
, bp
->common
.flash_size
);
8271 /* request access to nvram interface */
8272 rc
= bnx2x_acquire_nvram_lock(bp
);
8276 /* enable access to nvram interface */
8277 bnx2x_enable_nvram_access(bp
);
8280 cmd_flags
= MCPR_NVM_COMMAND_FIRST
;
8281 while ((written_so_far
< buf_size
) && (rc
== 0)) {
8282 if (written_so_far
== (buf_size
- sizeof(u32
)))
8283 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
8284 else if (((offset
+ 4) % NVRAM_PAGE_SIZE
) == 0)
8285 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
8286 else if ((offset
% NVRAM_PAGE_SIZE
) == 0)
8287 cmd_flags
|= MCPR_NVM_COMMAND_FIRST
;
8289 memcpy(&val
, data_buf
, 4);
8291 rc
= bnx2x_nvram_write_dword(bp
, offset
, val
, cmd_flags
);
8293 /* advance to the next dword */
8294 offset
+= sizeof(u32
);
8295 data_buf
+= sizeof(u32
);
8296 written_so_far
+= sizeof(u32
);
8300 /* disable access to nvram interface */
8301 bnx2x_disable_nvram_access(bp
);
8302 bnx2x_release_nvram_lock(bp
);
8307 static int bnx2x_set_eeprom(struct net_device
*dev
,
8308 struct ethtool_eeprom
*eeprom
, u8
*eebuf
)
8310 struct bnx2x
*bp
= netdev_priv(dev
);
8313 if (!netif_running(dev
))
8316 DP(BNX2X_MSG_NVM
, "ethtool_eeprom: cmd %d\n"
8317 DP_LEVEL
" magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8318 eeprom
->cmd
, eeprom
->magic
, eeprom
->offset
, eeprom
->offset
,
8319 eeprom
->len
, eeprom
->len
);
8321 /* parameters already validated in ethtool_set_eeprom */
8323 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8324 if (eeprom
->magic
== 0x00504859)
8327 bnx2x_acquire_phy_lock(bp
);
8328 rc
= bnx2x_flash_download(bp
, BP_PORT(bp
),
8329 bp
->link_params
.ext_phy_config
,
8330 (bp
->state
!= BNX2X_STATE_CLOSED
),
8331 eebuf
, eeprom
->len
);
8332 if ((bp
->state
== BNX2X_STATE_OPEN
) ||
8333 (bp
->state
== BNX2X_STATE_DISABLED
)) {
8334 rc
|= bnx2x_link_reset(&bp
->link_params
,
8336 rc
|= bnx2x_phy_init(&bp
->link_params
,
8339 bnx2x_release_phy_lock(bp
);
8341 } else /* Only the PMF can access the PHY */
8344 rc
= bnx2x_nvram_write(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
8349 static int bnx2x_get_coalesce(struct net_device
*dev
,
8350 struct ethtool_coalesce
*coal
)
8352 struct bnx2x
*bp
= netdev_priv(dev
);
8354 memset(coal
, 0, sizeof(struct ethtool_coalesce
));
8356 coal
->rx_coalesce_usecs
= bp
->rx_ticks
;
8357 coal
->tx_coalesce_usecs
= bp
->tx_ticks
;
8362 static int bnx2x_set_coalesce(struct net_device
*dev
,
8363 struct ethtool_coalesce
*coal
)
8365 struct bnx2x
*bp
= netdev_priv(dev
);
8367 bp
->rx_ticks
= (u16
) coal
->rx_coalesce_usecs
;
8368 if (bp
->rx_ticks
> 3000)
8369 bp
->rx_ticks
= 3000;
8371 bp
->tx_ticks
= (u16
) coal
->tx_coalesce_usecs
;
8372 if (bp
->tx_ticks
> 0x3000)
8373 bp
->tx_ticks
= 0x3000;
8375 if (netif_running(dev
))
8376 bnx2x_update_coalesce(bp
);
8381 static void bnx2x_get_ringparam(struct net_device
*dev
,
8382 struct ethtool_ringparam
*ering
)
8384 struct bnx2x
*bp
= netdev_priv(dev
);
8386 ering
->rx_max_pending
= MAX_RX_AVAIL
;
8387 ering
->rx_mini_max_pending
= 0;
8388 ering
->rx_jumbo_max_pending
= 0;
8390 ering
->rx_pending
= bp
->rx_ring_size
;
8391 ering
->rx_mini_pending
= 0;
8392 ering
->rx_jumbo_pending
= 0;
8394 ering
->tx_max_pending
= MAX_TX_AVAIL
;
8395 ering
->tx_pending
= bp
->tx_ring_size
;
8398 static int bnx2x_set_ringparam(struct net_device
*dev
,
8399 struct ethtool_ringparam
*ering
)
8401 struct bnx2x
*bp
= netdev_priv(dev
);
8404 if ((ering
->rx_pending
> MAX_RX_AVAIL
) ||
8405 (ering
->tx_pending
> MAX_TX_AVAIL
) ||
8406 (ering
->tx_pending
<= MAX_SKB_FRAGS
+ 4))
8409 bp
->rx_ring_size
= ering
->rx_pending
;
8410 bp
->tx_ring_size
= ering
->tx_pending
;
8412 if (netif_running(dev
)) {
8413 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
8414 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
8420 static void bnx2x_get_pauseparam(struct net_device
*dev
,
8421 struct ethtool_pauseparam
*epause
)
8423 struct bnx2x
*bp
= netdev_priv(dev
);
8425 epause
->autoneg
= (bp
->link_params
.req_flow_ctrl
== BNX2X_FLOW_CTRL_AUTO
) &&
8426 (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
);
8428 epause
->rx_pause
= ((bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
) ==
8429 BNX2X_FLOW_CTRL_RX
);
8430 epause
->tx_pause
= ((bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
) ==
8431 BNX2X_FLOW_CTRL_TX
);
8433 DP(NETIF_MSG_LINK
, "ethtool_pauseparam: cmd %d\n"
8434 DP_LEVEL
" autoneg %d rx_pause %d tx_pause %d\n",
8435 epause
->cmd
, epause
->autoneg
, epause
->rx_pause
, epause
->tx_pause
);
8438 static int bnx2x_set_pauseparam(struct net_device
*dev
,
8439 struct ethtool_pauseparam
*epause
)
8441 struct bnx2x
*bp
= netdev_priv(dev
);
8446 DP(NETIF_MSG_LINK
, "ethtool_pauseparam: cmd %d\n"
8447 DP_LEVEL
" autoneg %d rx_pause %d tx_pause %d\n",
8448 epause
->cmd
, epause
->autoneg
, epause
->rx_pause
, epause
->tx_pause
);
8450 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_AUTO
;
8452 if (epause
->rx_pause
)
8453 bp
->link_params
.req_flow_ctrl
|= BNX2X_FLOW_CTRL_RX
;
8455 if (epause
->tx_pause
)
8456 bp
->link_params
.req_flow_ctrl
|= BNX2X_FLOW_CTRL_TX
;
8458 if (bp
->link_params
.req_flow_ctrl
== BNX2X_FLOW_CTRL_AUTO
)
8459 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_NONE
;
8461 if (epause
->autoneg
) {
8462 if (!(bp
->port
.supported
& SUPPORTED_Autoneg
)) {
8463 DP(NETIF_MSG_LINK
, "autoneg not supported\n");
8467 if (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
)
8468 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_AUTO
;
8472 "req_flow_ctrl 0x%x\n", bp
->link_params
.req_flow_ctrl
);
8474 if (netif_running(dev
)) {
8475 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
8482 static int bnx2x_set_flags(struct net_device
*dev
, u32 data
)
8484 struct bnx2x
*bp
= netdev_priv(dev
);
8488 /* TPA requires Rx CSUM offloading */
8489 if ((data
& ETH_FLAG_LRO
) && bp
->rx_csum
) {
8490 if (!(dev
->features
& NETIF_F_LRO
)) {
8491 dev
->features
|= NETIF_F_LRO
;
8492 bp
->flags
|= TPA_ENABLE_FLAG
;
8496 } else if (dev
->features
& NETIF_F_LRO
) {
8497 dev
->features
&= ~NETIF_F_LRO
;
8498 bp
->flags
&= ~TPA_ENABLE_FLAG
;
8502 if (changed
&& netif_running(dev
)) {
8503 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
8504 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
8510 static u32
bnx2x_get_rx_csum(struct net_device
*dev
)
8512 struct bnx2x
*bp
= netdev_priv(dev
);
8517 static int bnx2x_set_rx_csum(struct net_device
*dev
, u32 data
)
8519 struct bnx2x
*bp
= netdev_priv(dev
);
8524 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8525 TPA'ed packets will be discarded due to wrong TCP CSUM */
8527 u32 flags
= ethtool_op_get_flags(dev
);
8529 rc
= bnx2x_set_flags(dev
, (flags
& ~ETH_FLAG_LRO
));
8535 static int bnx2x_set_tso(struct net_device
*dev
, u32 data
)
8538 dev
->features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
8539 dev
->features
|= NETIF_F_TSO6
;
8541 dev
->features
&= ~(NETIF_F_TSO
| NETIF_F_TSO_ECN
);
8542 dev
->features
&= ~NETIF_F_TSO6
;
8548 static const struct {
8549 char string
[ETH_GSTRING_LEN
];
8550 } bnx2x_tests_str_arr
[BNX2X_NUM_TESTS
] = {
8551 { "register_test (offline)" },
8552 { "memory_test (offline)" },
8553 { "loopback_test (offline)" },
8554 { "nvram_test (online)" },
8555 { "interrupt_test (online)" },
8556 { "link_test (online)" },
8557 { "idle check (online)" },
8558 { "MC errors (online)" }
8561 static int bnx2x_self_test_count(struct net_device
*dev
)
8563 return BNX2X_NUM_TESTS
;
8566 static int bnx2x_test_registers(struct bnx2x
*bp
)
8568 int idx
, i
, rc
= -ENODEV
;
8570 int port
= BP_PORT(bp
);
8571 static const struct {
8576 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0
, 4, 0x000003ff },
8577 { DORQ_REG_DB_ADDR0
, 4, 0xffffffff },
8578 { HC_REG_AGG_INT_0
, 4, 0x000003ff },
8579 { PBF_REG_MAC_IF0_ENABLE
, 4, 0x00000001 },
8580 { PBF_REG_P0_INIT_CRD
, 4, 0x000007ff },
8581 { PRS_REG_CID_PORT_0
, 4, 0x00ffffff },
8582 { PXP2_REG_PSWRQ_CDU0_L2P
, 4, 0x000fffff },
8583 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR
, 8, 0x0003ffff },
8584 { PXP2_REG_PSWRQ_TM0_L2P
, 4, 0x000fffff },
8585 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR
, 8, 0x0003ffff },
8586 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P
, 4, 0x000fffff },
8587 { QM_REG_CONNNUM_0
, 4, 0x000fffff },
8588 { TM_REG_LIN0_MAX_ACTIVE_CID
, 4, 0x0003ffff },
8589 { SRC_REG_KEYRSS0_0
, 40, 0xffffffff },
8590 { SRC_REG_KEYRSS0_7
, 40, 0xffffffff },
8591 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00
, 4, 0x00000001 },
8592 { XCM_REG_WU_DA_CNT_CMD00
, 4, 0x00000003 },
8593 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0
, 4, 0x000000ff },
8594 { NIG_REG_EGRESS_MNG0_FIFO
, 20, 0xffffffff },
8595 { NIG_REG_LLH0_T_BIT
, 4, 0x00000001 },
8596 /* 20 */ { NIG_REG_EMAC0_IN_EN
, 4, 0x00000001 },
8597 { NIG_REG_BMAC0_IN_EN
, 4, 0x00000001 },
8598 { NIG_REG_XCM0_OUT_EN
, 4, 0x00000001 },
8599 { NIG_REG_BRB0_OUT_EN
, 4, 0x00000001 },
8600 { NIG_REG_LLH0_XCM_MASK
, 4, 0x00000007 },
8601 { NIG_REG_LLH0_ACPI_PAT_6_LEN
, 68, 0x000000ff },
8602 { NIG_REG_LLH0_ACPI_PAT_0_CRC
, 68, 0xffffffff },
8603 { NIG_REG_LLH0_DEST_MAC_0_0
, 160, 0xffffffff },
8604 { NIG_REG_LLH0_DEST_IP_0_1
, 160, 0xffffffff },
8605 { NIG_REG_LLH0_IPV4_IPV6_0
, 160, 0x00000001 },
8606 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0
, 160, 0x0000ffff },
8607 { NIG_REG_LLH0_DEST_TCP_0
, 160, 0x0000ffff },
8608 { NIG_REG_LLH0_VLAN_ID_0
, 160, 0x00000fff },
8609 { NIG_REG_XGXS_SERDES0_MODE_SEL
, 4, 0x00000001 },
8610 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
, 4, 0x00000001 },
8611 { NIG_REG_STATUS_INTERRUPT_PORT0
, 4, 0x07ffffff },
8612 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST
, 24, 0x00000001 },
8613 { NIG_REG_SERDES0_CTRL_PHY_ADDR
, 16, 0x0000001f },
8615 { 0xffffffff, 0, 0x00000000 }
8618 if (!netif_running(bp
->dev
))
8621 /* Repeat the test twice:
8622 First by writing 0x00000000, second by writing 0xffffffff */
8623 for (idx
= 0; idx
< 2; idx
++) {
8630 wr_val
= 0xffffffff;
8634 for (i
= 0; reg_tbl
[i
].offset0
!= 0xffffffff; i
++) {
8635 u32 offset
, mask
, save_val
, val
;
8637 offset
= reg_tbl
[i
].offset0
+ port
*reg_tbl
[i
].offset1
;
8638 mask
= reg_tbl
[i
].mask
;
8640 save_val
= REG_RD(bp
, offset
);
8642 REG_WR(bp
, offset
, wr_val
);
8643 val
= REG_RD(bp
, offset
);
8645 /* Restore the original register's value */
8646 REG_WR(bp
, offset
, save_val
);
8648 /* verify that value is as expected value */
8649 if ((val
& mask
) != (wr_val
& mask
))
8660 static int bnx2x_test_memory(struct bnx2x
*bp
)
8662 int i
, j
, rc
= -ENODEV
;
8664 static const struct {
8668 { CCM_REG_XX_DESCR_TABLE
, CCM_REG_XX_DESCR_TABLE_SIZE
},
8669 { CFC_REG_ACTIVITY_COUNTER
, CFC_REG_ACTIVITY_COUNTER_SIZE
},
8670 { CFC_REG_LINK_LIST
, CFC_REG_LINK_LIST_SIZE
},
8671 { DMAE_REG_CMD_MEM
, DMAE_REG_CMD_MEM_SIZE
},
8672 { TCM_REG_XX_DESCR_TABLE
, TCM_REG_XX_DESCR_TABLE_SIZE
},
8673 { UCM_REG_XX_DESCR_TABLE
, UCM_REG_XX_DESCR_TABLE_SIZE
},
8674 { XCM_REG_XX_DESCR_TABLE
, XCM_REG_XX_DESCR_TABLE_SIZE
},
8678 static const struct {
8684 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS
, 0x3ffc0, 0 },
8685 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS
, 0x2, 0x2 },
8686 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS
, 0, 0 },
8687 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS
, 0x3ffc0, 0 },
8688 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS
, 0x3ffc0, 0 },
8689 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS
, 0x3ffc1, 0 },
8691 { NULL
, 0xffffffff, 0, 0 }
8694 if (!netif_running(bp
->dev
))
8697 /* Go through all the memories */
8698 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++)
8699 for (j
= 0; j
< mem_tbl
[i
].size
; j
++)
8700 REG_RD(bp
, mem_tbl
[i
].offset
+ j
*4);
8702 /* Check the parity status */
8703 for (i
= 0; prty_tbl
[i
].offset
!= 0xffffffff; i
++) {
8704 val
= REG_RD(bp
, prty_tbl
[i
].offset
);
8705 if ((CHIP_IS_E1(bp
) && (val
& ~(prty_tbl
[i
].e1_mask
))) ||
8706 (CHIP_IS_E1H(bp
) && (val
& ~(prty_tbl
[i
].e1h_mask
)))) {
8708 "%s is 0x%x\n", prty_tbl
[i
].name
, val
);
8719 static void bnx2x_wait_for_link(struct bnx2x
*bp
, u8 link_up
)
8724 while (bnx2x_link_test(bp
) && cnt
--)
8728 static int bnx2x_run_loopback(struct bnx2x
*bp
, int loopback_mode
, u8 link_up
)
8730 unsigned int pkt_size
, num_pkts
, i
;
8731 struct sk_buff
*skb
;
8732 unsigned char *packet
;
8733 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
8734 u16 tx_start_idx
, tx_idx
;
8735 u16 rx_start_idx
, rx_idx
;
8737 struct sw_tx_bd
*tx_buf
;
8738 struct eth_tx_bd
*tx_bd
;
8740 union eth_rx_cqe
*cqe
;
8742 struct sw_rx_bd
*rx_buf
;
8746 if (loopback_mode
== BNX2X_MAC_LOOPBACK
) {
8747 bp
->link_params
.loopback_mode
= LOOPBACK_BMAC
;
8748 bnx2x_acquire_phy_lock(bp
);
8749 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
8750 bnx2x_release_phy_lock(bp
);
8752 } else if (loopback_mode
== BNX2X_PHY_LOOPBACK
) {
8753 bp
->link_params
.loopback_mode
= LOOPBACK_XGXS_10
;
8754 bnx2x_acquire_phy_lock(bp
);
8755 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
8756 bnx2x_release_phy_lock(bp
);
8757 /* wait until link state is restored */
8758 bnx2x_wait_for_link(bp
, link_up
);
8764 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
8767 goto test_loopback_exit
;
8769 packet
= skb_put(skb
, pkt_size
);
8770 memcpy(packet
, bp
->dev
->dev_addr
, ETH_ALEN
);
8771 memset(packet
+ ETH_ALEN
, 0, (ETH_HLEN
- ETH_ALEN
));
8772 for (i
= ETH_HLEN
; i
< pkt_size
; i
++)
8773 packet
[i
] = (unsigned char) (i
& 0xff);
8776 tx_start_idx
= le16_to_cpu(*fp
->tx_cons_sb
);
8777 rx_start_idx
= le16_to_cpu(*fp
->rx_cons_sb
);
8779 pkt_prod
= fp
->tx_pkt_prod
++;
8780 tx_buf
= &fp
->tx_buf_ring
[TX_BD(pkt_prod
)];
8781 tx_buf
->first_bd
= fp
->tx_bd_prod
;
8784 tx_bd
= &fp
->tx_desc_ring
[TX_BD(fp
->tx_bd_prod
)];
8785 mapping
= pci_map_single(bp
->pdev
, skb
->data
,
8786 skb_headlen(skb
), PCI_DMA_TODEVICE
);
8787 tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
8788 tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
8789 tx_bd
->nbd
= cpu_to_le16(1);
8790 tx_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
8791 tx_bd
->vlan
= cpu_to_le16(pkt_prod
);
8792 tx_bd
->bd_flags
.as_bitfield
= (ETH_TX_BD_FLAGS_START_BD
|
8793 ETH_TX_BD_FLAGS_END_BD
);
8794 tx_bd
->general_data
= ((UNICAST_ADDRESS
<<
8795 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT
) | 1);
8799 fp
->hw_tx_prods
->bds_prod
=
8800 cpu_to_le16(le16_to_cpu(fp
->hw_tx_prods
->bds_prod
) + 1);
8801 mb(); /* FW restriction: must not reorder writing nbd and packets */
8802 fp
->hw_tx_prods
->packets_prod
=
8803 cpu_to_le32(le32_to_cpu(fp
->hw_tx_prods
->packets_prod
) + 1);
8804 DOORBELL(bp
, FP_IDX(fp
), 0);
8810 bp
->dev
->trans_start
= jiffies
;
8814 tx_idx
= le16_to_cpu(*fp
->tx_cons_sb
);
8815 if (tx_idx
!= tx_start_idx
+ num_pkts
)
8816 goto test_loopback_exit
;
8818 rx_idx
= le16_to_cpu(*fp
->rx_cons_sb
);
8819 if (rx_idx
!= rx_start_idx
+ num_pkts
)
8820 goto test_loopback_exit
;
8822 cqe
= &fp
->rx_comp_ring
[RCQ_BD(fp
->rx_comp_cons
)];
8823 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
8824 if (CQE_TYPE(cqe_fp_flags
) || (cqe_fp_flags
& ETH_RX_ERROR_FALGS
))
8825 goto test_loopback_rx_exit
;
8827 len
= le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
);
8828 if (len
!= pkt_size
)
8829 goto test_loopback_rx_exit
;
8831 rx_buf
= &fp
->rx_buf_ring
[RX_BD(fp
->rx_bd_cons
)];
8833 skb_reserve(skb
, cqe
->fast_path_cqe
.placement_offset
);
8834 for (i
= ETH_HLEN
; i
< pkt_size
; i
++)
8835 if (*(skb
->data
+ i
) != (unsigned char) (i
& 0xff))
8836 goto test_loopback_rx_exit
;
8840 test_loopback_rx_exit
:
8842 fp
->rx_bd_cons
= NEXT_RX_IDX(fp
->rx_bd_cons
);
8843 fp
->rx_bd_prod
= NEXT_RX_IDX(fp
->rx_bd_prod
);
8844 fp
->rx_comp_cons
= NEXT_RCQ_IDX(fp
->rx_comp_cons
);
8845 fp
->rx_comp_prod
= NEXT_RCQ_IDX(fp
->rx_comp_prod
);
8847 /* Update producers */
8848 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
8852 bp
->link_params
.loopback_mode
= LOOPBACK_NONE
;
8857 static int bnx2x_test_loopback(struct bnx2x
*bp
, u8 link_up
)
8861 if (!netif_running(bp
->dev
))
8862 return BNX2X_LOOPBACK_FAILED
;
8864 bnx2x_netif_stop(bp
, 1);
8866 if (bnx2x_run_loopback(bp
, BNX2X_MAC_LOOPBACK
, link_up
)) {
8867 DP(NETIF_MSG_PROBE
, "MAC loopback failed\n");
8868 rc
|= BNX2X_MAC_LOOPBACK_FAILED
;
8871 if (bnx2x_run_loopback(bp
, BNX2X_PHY_LOOPBACK
, link_up
)) {
8872 DP(NETIF_MSG_PROBE
, "PHY loopback failed\n");
8873 rc
|= BNX2X_PHY_LOOPBACK_FAILED
;
8876 bnx2x_netif_start(bp
);
8881 #define CRC32_RESIDUAL 0xdebb20e3
8883 static int bnx2x_test_nvram(struct bnx2x
*bp
)
8885 static const struct {
8889 { 0, 0x14 }, /* bootstrap */
8890 { 0x14, 0xec }, /* dir */
8891 { 0x100, 0x350 }, /* manuf_info */
8892 { 0x450, 0xf0 }, /* feature_info */
8893 { 0x640, 0x64 }, /* upgrade_key_info */
8895 { 0x708, 0x70 }, /* manuf_key_info */
8900 u8
*data
= (u8
*)buf
;
8904 rc
= bnx2x_nvram_read(bp
, 0, data
, 4);
8906 DP(NETIF_MSG_PROBE
, "magic value read (rc -%d)\n", -rc
);
8907 goto test_nvram_exit
;
8910 magic
= be32_to_cpu(buf
[0]);
8911 if (magic
!= 0x669955aa) {
8912 DP(NETIF_MSG_PROBE
, "magic value (0x%08x)\n", magic
);
8914 goto test_nvram_exit
;
8917 for (i
= 0; nvram_tbl
[i
].size
; i
++) {
8919 rc
= bnx2x_nvram_read(bp
, nvram_tbl
[i
].offset
, data
,
8923 "nvram_tbl[%d] read data (rc -%d)\n", i
, -rc
);
8924 goto test_nvram_exit
;
8927 csum
= ether_crc_le(nvram_tbl
[i
].size
, data
);
8928 if (csum
!= CRC32_RESIDUAL
) {
8930 "nvram_tbl[%d] csum value (0x%08x)\n", i
, csum
);
8932 goto test_nvram_exit
;
8940 static int bnx2x_test_intr(struct bnx2x
*bp
)
8942 struct mac_configuration_cmd
*config
= bnx2x_sp(bp
, mac_config
);
8945 if (!netif_running(bp
->dev
))
8948 config
->hdr
.length_6b
= 0;
8950 config
->hdr
.offset
= (BP_PORT(bp
) ? 32 : 0);
8952 config
->hdr
.offset
= BP_FUNC(bp
);
8953 config
->hdr
.client_id
= BP_CL_ID(bp
);
8954 config
->hdr
.reserved1
= 0;
8956 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
8957 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
8958 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
8960 bp
->set_mac_pending
++;
8961 for (i
= 0; i
< 10; i
++) {
8962 if (!bp
->set_mac_pending
)
8964 msleep_interruptible(10);
8973 static void bnx2x_self_test(struct net_device
*dev
,
8974 struct ethtool_test
*etest
, u64
*buf
)
8976 struct bnx2x
*bp
= netdev_priv(dev
);
8978 memset(buf
, 0, sizeof(u64
) * BNX2X_NUM_TESTS
);
8980 if (!netif_running(dev
))
8983 /* offline tests are not supported in MF mode */
8985 etest
->flags
&= ~ETH_TEST_FL_OFFLINE
;
8987 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
8990 link_up
= bp
->link_vars
.link_up
;
8991 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
8992 bnx2x_nic_load(bp
, LOAD_DIAG
);
8993 /* wait until link state is restored */
8994 bnx2x_wait_for_link(bp
, link_up
);
8996 if (bnx2x_test_registers(bp
) != 0) {
8998 etest
->flags
|= ETH_TEST_FL_FAILED
;
9000 if (bnx2x_test_memory(bp
) != 0) {
9002 etest
->flags
|= ETH_TEST_FL_FAILED
;
9004 buf
[2] = bnx2x_test_loopback(bp
, link_up
);
9006 etest
->flags
|= ETH_TEST_FL_FAILED
;
9008 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
9009 bnx2x_nic_load(bp
, LOAD_NORMAL
);
9010 /* wait until link state is restored */
9011 bnx2x_wait_for_link(bp
, link_up
);
9013 if (bnx2x_test_nvram(bp
) != 0) {
9015 etest
->flags
|= ETH_TEST_FL_FAILED
;
9017 if (bnx2x_test_intr(bp
) != 0) {
9019 etest
->flags
|= ETH_TEST_FL_FAILED
;
9022 if (bnx2x_link_test(bp
) != 0) {
9024 etest
->flags
|= ETH_TEST_FL_FAILED
;
9026 buf
[7] = bnx2x_mc_assert(bp
);
9028 etest
->flags
|= ETH_TEST_FL_FAILED
;
9030 #ifdef BNX2X_EXTRA_DEBUG
9031 bnx2x_panic_dump(bp
);
9035 static const struct {
9039 #define STATS_FLAGS_PORT 1
9040 #define STATS_FLAGS_FUNC 2
9041 u8 string
[ETH_GSTRING_LEN
];
9042 } bnx2x_stats_arr
[BNX2X_NUM_STATS
] = {
9043 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi
),
9044 8, STATS_FLAGS_FUNC
, "rx_bytes" },
9045 { STATS_OFFSET32(error_bytes_received_hi
),
9046 8, STATS_FLAGS_FUNC
, "rx_error_bytes" },
9047 { STATS_OFFSET32(total_bytes_transmitted_hi
),
9048 8, STATS_FLAGS_FUNC
, "tx_bytes" },
9049 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi
),
9050 8, STATS_FLAGS_PORT
, "tx_error_bytes" },
9051 { STATS_OFFSET32(total_unicast_packets_received_hi
),
9052 8, STATS_FLAGS_FUNC
, "rx_ucast_packets" },
9053 { STATS_OFFSET32(total_multicast_packets_received_hi
),
9054 8, STATS_FLAGS_FUNC
, "rx_mcast_packets" },
9055 { STATS_OFFSET32(total_broadcast_packets_received_hi
),
9056 8, STATS_FLAGS_FUNC
, "rx_bcast_packets" },
9057 { STATS_OFFSET32(total_unicast_packets_transmitted_hi
),
9058 8, STATS_FLAGS_FUNC
, "tx_packets" },
9059 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi
),
9060 8, STATS_FLAGS_PORT
, "tx_mac_errors" },
9061 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi
),
9062 8, STATS_FLAGS_PORT
, "tx_carrier_errors" },
9063 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi
),
9064 8, STATS_FLAGS_PORT
, "rx_crc_errors" },
9065 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi
),
9066 8, STATS_FLAGS_PORT
, "rx_align_errors" },
9067 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi
),
9068 8, STATS_FLAGS_PORT
, "tx_single_collisions" },
9069 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi
),
9070 8, STATS_FLAGS_PORT
, "tx_multi_collisions" },
9071 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi
),
9072 8, STATS_FLAGS_PORT
, "tx_deferred" },
9073 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi
),
9074 8, STATS_FLAGS_PORT
, "tx_excess_collisions" },
9075 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi
),
9076 8, STATS_FLAGS_PORT
, "tx_late_collisions" },
9077 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi
),
9078 8, STATS_FLAGS_PORT
, "tx_total_collisions" },
9079 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi
),
9080 8, STATS_FLAGS_PORT
, "rx_fragments" },
9081 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi
),
9082 8, STATS_FLAGS_PORT
, "rx_jabbers" },
9083 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi
),
9084 8, STATS_FLAGS_PORT
, "rx_undersize_packets" },
9085 { STATS_OFFSET32(jabber_packets_received
),
9086 4, STATS_FLAGS_FUNC
, "rx_oversize_packets" },
9087 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi
),
9088 8, STATS_FLAGS_PORT
, "tx_64_byte_packets" },
9089 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi
),
9090 8, STATS_FLAGS_PORT
, "tx_65_to_127_byte_packets" },
9091 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi
),
9092 8, STATS_FLAGS_PORT
, "tx_128_to_255_byte_packets" },
9093 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi
),
9094 8, STATS_FLAGS_PORT
, "tx_256_to_511_byte_packets" },
9095 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi
),
9096 8, STATS_FLAGS_PORT
, "tx_512_to_1023_byte_packets" },
9097 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi
),
9098 8, STATS_FLAGS_PORT
, "tx_1024_to_1522_byte_packets" },
9099 { STATS_OFFSET32(etherstatspktsover1522octets_hi
),
9100 8, STATS_FLAGS_PORT
, "tx_1523_to_9022_byte_packets" },
9101 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi
),
9102 8, STATS_FLAGS_PORT
, "rx_xon_frames" },
9103 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi
),
9104 8, STATS_FLAGS_PORT
, "rx_xoff_frames" },
9105 { STATS_OFFSET32(tx_stat_outxonsent_hi
),
9106 8, STATS_FLAGS_PORT
, "tx_xon_frames" },
9107 { STATS_OFFSET32(tx_stat_outxoffsent_hi
),
9108 8, STATS_FLAGS_PORT
, "tx_xoff_frames" },
9109 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi
),
9110 8, STATS_FLAGS_PORT
, "rx_mac_ctrl_frames" },
9111 { STATS_OFFSET32(mac_filter_discard
),
9112 4, STATS_FLAGS_PORT
, "rx_filtered_packets" },
9113 { STATS_OFFSET32(no_buff_discard
),
9114 4, STATS_FLAGS_FUNC
, "rx_discards" },
9115 { STATS_OFFSET32(xxoverflow_discard
),
9116 4, STATS_FLAGS_PORT
, "rx_fw_discards" },
9117 { STATS_OFFSET32(brb_drop_hi
),
9118 8, STATS_FLAGS_PORT
, "brb_discard" },
9119 { STATS_OFFSET32(brb_truncate_hi
),
9120 8, STATS_FLAGS_PORT
, "brb_truncate" },
9121 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt
),
9122 4, STATS_FLAGS_FUNC
, "rx_phy_ip_err_discards"},
9123 { STATS_OFFSET32(rx_skb_alloc_failed
),
9124 4, STATS_FLAGS_FUNC
, "rx_skb_alloc_discard" },
9125 /* 42 */{ STATS_OFFSET32(hw_csum_err
),
9126 4, STATS_FLAGS_FUNC
, "rx_csum_offload_errors" }
9129 #define IS_NOT_E1HMF_STAT(bp, i) \
9130 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9132 static void bnx2x_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
9134 struct bnx2x
*bp
= netdev_priv(dev
);
9137 switch (stringset
) {
9139 for (i
= 0, j
= 0; i
< BNX2X_NUM_STATS
; i
++) {
9140 if (IS_NOT_E1HMF_STAT(bp
, i
))
9142 strcpy(buf
+ j
*ETH_GSTRING_LEN
,
9143 bnx2x_stats_arr
[i
].string
);
9149 memcpy(buf
, bnx2x_tests_str_arr
, sizeof(bnx2x_tests_str_arr
));
9154 static int bnx2x_get_stats_count(struct net_device
*dev
)
9156 struct bnx2x
*bp
= netdev_priv(dev
);
9157 int i
, num_stats
= 0;
9159 for (i
= 0; i
< BNX2X_NUM_STATS
; i
++) {
9160 if (IS_NOT_E1HMF_STAT(bp
, i
))
9167 static void bnx2x_get_ethtool_stats(struct net_device
*dev
,
9168 struct ethtool_stats
*stats
, u64
*buf
)
9170 struct bnx2x
*bp
= netdev_priv(dev
);
9171 u32
*hw_stats
= (u32
*)&bp
->eth_stats
;
9174 for (i
= 0, j
= 0; i
< BNX2X_NUM_STATS
; i
++) {
9175 if (IS_NOT_E1HMF_STAT(bp
, i
))
9178 if (bnx2x_stats_arr
[i
].size
== 0) {
9179 /* skip this counter */
9184 if (bnx2x_stats_arr
[i
].size
== 4) {
9185 /* 4-byte counter */
9186 buf
[j
] = (u64
) *(hw_stats
+ bnx2x_stats_arr
[i
].offset
);
9190 /* 8-byte counter */
9191 buf
[j
] = HILO_U64(*(hw_stats
+ bnx2x_stats_arr
[i
].offset
),
9192 *(hw_stats
+ bnx2x_stats_arr
[i
].offset
+ 1));
9197 static int bnx2x_phys_id(struct net_device
*dev
, u32 data
)
9199 struct bnx2x
*bp
= netdev_priv(dev
);
9200 int port
= BP_PORT(bp
);
9203 if (!netif_running(dev
))
9212 for (i
= 0; i
< (data
* 2); i
++) {
9214 bnx2x_set_led(bp
, port
, LED_MODE_OPER
, SPEED_1000
,
9215 bp
->link_params
.hw_led_mode
,
9216 bp
->link_params
.chip_id
);
9218 bnx2x_set_led(bp
, port
, LED_MODE_OFF
, 0,
9219 bp
->link_params
.hw_led_mode
,
9220 bp
->link_params
.chip_id
);
9222 msleep_interruptible(500);
9223 if (signal_pending(current
))
9227 if (bp
->link_vars
.link_up
)
9228 bnx2x_set_led(bp
, port
, LED_MODE_OPER
,
9229 bp
->link_vars
.line_speed
,
9230 bp
->link_params
.hw_led_mode
,
9231 bp
->link_params
.chip_id
);
9236 static struct ethtool_ops bnx2x_ethtool_ops
= {
9237 .get_settings
= bnx2x_get_settings
,
9238 .set_settings
= bnx2x_set_settings
,
9239 .get_drvinfo
= bnx2x_get_drvinfo
,
9240 .get_wol
= bnx2x_get_wol
,
9241 .set_wol
= bnx2x_set_wol
,
9242 .get_msglevel
= bnx2x_get_msglevel
,
9243 .set_msglevel
= bnx2x_set_msglevel
,
9244 .nway_reset
= bnx2x_nway_reset
,
9245 .get_link
= ethtool_op_get_link
,
9246 .get_eeprom_len
= bnx2x_get_eeprom_len
,
9247 .get_eeprom
= bnx2x_get_eeprom
,
9248 .set_eeprom
= bnx2x_set_eeprom
,
9249 .get_coalesce
= bnx2x_get_coalesce
,
9250 .set_coalesce
= bnx2x_set_coalesce
,
9251 .get_ringparam
= bnx2x_get_ringparam
,
9252 .set_ringparam
= bnx2x_set_ringparam
,
9253 .get_pauseparam
= bnx2x_get_pauseparam
,
9254 .set_pauseparam
= bnx2x_set_pauseparam
,
9255 .get_rx_csum
= bnx2x_get_rx_csum
,
9256 .set_rx_csum
= bnx2x_set_rx_csum
,
9257 .get_tx_csum
= ethtool_op_get_tx_csum
,
9258 .set_tx_csum
= ethtool_op_set_tx_hw_csum
,
9259 .set_flags
= bnx2x_set_flags
,
9260 .get_flags
= ethtool_op_get_flags
,
9261 .get_sg
= ethtool_op_get_sg
,
9262 .set_sg
= ethtool_op_set_sg
,
9263 .get_tso
= ethtool_op_get_tso
,
9264 .set_tso
= bnx2x_set_tso
,
9265 .self_test_count
= bnx2x_self_test_count
,
9266 .self_test
= bnx2x_self_test
,
9267 .get_strings
= bnx2x_get_strings
,
9268 .phys_id
= bnx2x_phys_id
,
9269 .get_stats_count
= bnx2x_get_stats_count
,
9270 .get_ethtool_stats
= bnx2x_get_ethtool_stats
,
9273 /* end of ethtool_ops */
9275 /****************************************************************************
9276 * General service functions
9277 ****************************************************************************/
9279 static int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
)
9283 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
9287 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
9288 ((pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
9289 PCI_PM_CTRL_PME_STATUS
));
9291 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
9292 /* delay required during transition out of D3hot */
9297 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
9301 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
9303 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
9306 /* No more memory access after this point until
9307 * device is brought back to D0.
9317 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath
*fp
)
9321 /* Tell compiler that status block fields can change */
9323 rx_cons_sb
= le16_to_cpu(*fp
->rx_cons_sb
);
9324 if ((rx_cons_sb
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
9326 return (fp
->rx_comp_cons
!= rx_cons_sb
);
9330 * net_device service functions
9333 static int bnx2x_poll(struct napi_struct
*napi
, int budget
)
9335 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
9337 struct bnx2x
*bp
= fp
->bp
;
9340 #ifdef BNX2X_STOP_ON_ERROR
9341 if (unlikely(bp
->panic
))
9345 prefetch(fp
->tx_buf_ring
[TX_BD(fp
->tx_pkt_cons
)].skb
);
9346 prefetch(fp
->rx_buf_ring
[RX_BD(fp
->rx_bd_cons
)].skb
);
9347 prefetch((char *)(fp
->rx_buf_ring
[RX_BD(fp
->rx_bd_cons
)].skb
) + 256);
9349 bnx2x_update_fpsb_idx(fp
);
9351 if (bnx2x_has_tx_work(fp
))
9352 bnx2x_tx_int(fp
, budget
);
9354 if (bnx2x_has_rx_work(fp
))
9355 work_done
= bnx2x_rx_int(fp
, budget
);
9356 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9358 /* must not complete if we consumed full budget */
9359 if ((work_done
< budget
) && !BNX2X_HAS_WORK(fp
)) {
9361 #ifdef BNX2X_STOP_ON_ERROR
9364 netif_rx_complete(napi
);
9366 bnx2x_ack_sb(bp
, FP_SB_ID(fp
), USTORM_ID
,
9367 le16_to_cpu(fp
->fp_u_idx
), IGU_INT_NOP
, 1);
9368 bnx2x_ack_sb(bp
, FP_SB_ID(fp
), CSTORM_ID
,
9369 le16_to_cpu(fp
->fp_c_idx
), IGU_INT_ENABLE
, 1);
9375 /* we split the first BD into headers and data BDs
9376 * to ease the pain of our fellow microcode engineers
9377 * we use one mapping for both BDs
9378 * So far this has only been observed to happen
9379 * in Other Operating Systems(TM)
9381 static noinline u16
bnx2x_tx_split(struct bnx2x
*bp
,
9382 struct bnx2x_fastpath
*fp
,
9383 struct eth_tx_bd
**tx_bd
, u16 hlen
,
9384 u16 bd_prod
, int nbd
)
9386 struct eth_tx_bd
*h_tx_bd
= *tx_bd
;
9387 struct eth_tx_bd
*d_tx_bd
;
9389 int old_len
= le16_to_cpu(h_tx_bd
->nbytes
);
9391 /* first fix first BD */
9392 h_tx_bd
->nbd
= cpu_to_le16(nbd
);
9393 h_tx_bd
->nbytes
= cpu_to_le16(hlen
);
9395 DP(NETIF_MSG_TX_QUEUED
, "TSO split header size is %d "
9396 "(%x:%x) nbd %d\n", h_tx_bd
->nbytes
, h_tx_bd
->addr_hi
,
9397 h_tx_bd
->addr_lo
, h_tx_bd
->nbd
);
9399 /* now get a new data BD
9400 * (after the pbd) and fill it */
9401 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
9402 d_tx_bd
= &fp
->tx_desc_ring
[bd_prod
];
9404 mapping
= HILO_U64(le32_to_cpu(h_tx_bd
->addr_hi
),
9405 le32_to_cpu(h_tx_bd
->addr_lo
)) + hlen
;
9407 d_tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
9408 d_tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
9409 d_tx_bd
->nbytes
= cpu_to_le16(old_len
- hlen
);
9411 /* this marks the BD as one that has no individual mapping
9412 * the FW ignores this flag in a BD not marked start
9414 d_tx_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_SW_LSO
;
9415 DP(NETIF_MSG_TX_QUEUED
,
9416 "TSO split data size is %d (%x:%x)\n",
9417 d_tx_bd
->nbytes
, d_tx_bd
->addr_hi
, d_tx_bd
->addr_lo
);
9419 /* update tx_bd for marking the last BD flag */
9425 static inline u16
bnx2x_csum_fix(unsigned char *t_header
, u16 csum
, s8 fix
)
9428 csum
= (u16
) ~csum_fold(csum_sub(csum
,
9429 csum_partial(t_header
- fix
, fix
, 0)));
9432 csum
= (u16
) ~csum_fold(csum_add(csum
,
9433 csum_partial(t_header
, -fix
, 0)));
9435 return swab16(csum
);
9438 static inline u32
bnx2x_xmit_type(struct bnx2x
*bp
, struct sk_buff
*skb
)
9442 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
9446 if (skb
->protocol
== ntohs(ETH_P_IPV6
)) {
9448 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
9449 rc
|= XMIT_CSUM_TCP
;
9453 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
9454 rc
|= XMIT_CSUM_TCP
;
9458 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
)
9461 else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
9467 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9468 /* check if packet requires linearization (packet is too fragmented) */
9469 static int bnx2x_pkt_req_lin(struct bnx2x
*bp
, struct sk_buff
*skb
,
9474 int first_bd_sz
= 0;
9476 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9477 if (skb_shinfo(skb
)->nr_frags
>= (MAX_FETCH_BD
- 3)) {
9479 if (xmit_type
& XMIT_GSO
) {
9480 unsigned short lso_mss
= skb_shinfo(skb
)->gso_size
;
9481 /* Check if LSO packet needs to be copied:
9482 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9483 int wnd_size
= MAX_FETCH_BD
- 3;
9484 /* Number of windows to check */
9485 int num_wnds
= skb_shinfo(skb
)->nr_frags
- wnd_size
;
9490 /* Headers length */
9491 hlen
= (int)(skb_transport_header(skb
) - skb
->data
) +
9494 /* Amount of data (w/o headers) on linear part of SKB*/
9495 first_bd_sz
= skb_headlen(skb
) - hlen
;
9497 wnd_sum
= first_bd_sz
;
9499 /* Calculate the first sum - it's special */
9500 for (frag_idx
= 0; frag_idx
< wnd_size
- 1; frag_idx
++)
9502 skb_shinfo(skb
)->frags
[frag_idx
].size
;
9504 /* If there was data on linear skb data - check it */
9505 if (first_bd_sz
> 0) {
9506 if (unlikely(wnd_sum
< lso_mss
)) {
9511 wnd_sum
-= first_bd_sz
;
9514 /* Others are easier: run through the frag list and
9515 check all windows */
9516 for (wnd_idx
= 0; wnd_idx
<= num_wnds
; wnd_idx
++) {
9518 skb_shinfo(skb
)->frags
[wnd_idx
+ wnd_size
- 1].size
;
9520 if (unlikely(wnd_sum
< lso_mss
)) {
9525 skb_shinfo(skb
)->frags
[wnd_idx
].size
;
9529 /* in non-LSO too fragmented packet should always
9536 if (unlikely(to_copy
))
9537 DP(NETIF_MSG_TX_QUEUED
,
9538 "Linearization IS REQUIRED for %s packet. "
9539 "num_frags %d hlen %d first_bd_sz %d\n",
9540 (xmit_type
& XMIT_GSO
) ? "LSO" : "non-LSO",
9541 skb_shinfo(skb
)->nr_frags
, hlen
, first_bd_sz
);
9547 /* called with netif_tx_lock
9548 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9549 * netif_wake_queue()
9551 static int bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
9553 struct bnx2x
*bp
= netdev_priv(dev
);
9554 struct bnx2x_fastpath
*fp
;
9555 struct sw_tx_bd
*tx_buf
;
9556 struct eth_tx_bd
*tx_bd
;
9557 struct eth_tx_parse_bd
*pbd
= NULL
;
9558 u16 pkt_prod
, bd_prod
;
9561 u32 xmit_type
= bnx2x_xmit_type(bp
, skb
);
9562 int vlan_off
= (bp
->e1hov
? 4 : 0);
9566 #ifdef BNX2X_STOP_ON_ERROR
9567 if (unlikely(bp
->panic
))
9568 return NETDEV_TX_BUSY
;
9571 fp_index
= (smp_processor_id() % bp
->num_queues
);
9572 fp
= &bp
->fp
[fp_index
];
9574 if (unlikely(bnx2x_tx_avail(fp
) < (skb_shinfo(skb
)->nr_frags
+ 3))) {
9575 bp
->eth_stats
.driver_xoff
++,
9576 netif_stop_queue(dev
);
9577 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9578 return NETDEV_TX_BUSY
;
9581 DP(NETIF_MSG_TX_QUEUED
, "SKB: summed %x protocol %x protocol(%x,%x)"
9582 " gso type %x xmit_type %x\n",
9583 skb
->ip_summed
, skb
->protocol
, ipv6_hdr(skb
)->nexthdr
,
9584 ip_hdr(skb
)->protocol
, skb_shinfo(skb
)->gso_type
, xmit_type
);
9586 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9587 /* First, check if we need to linearize the skb
9588 (due to FW restrictions) */
9589 if (bnx2x_pkt_req_lin(bp
, skb
, xmit_type
)) {
9590 /* Statistics of linearization */
9592 if (skb_linearize(skb
) != 0) {
9593 DP(NETIF_MSG_TX_QUEUED
, "SKB linearization failed - "
9594 "silently dropping this SKB\n");
9595 dev_kfree_skb_any(skb
);
9596 return NETDEV_TX_OK
;
9602 Please read carefully. First we use one BD which we mark as start,
9603 then for TSO or xsum we have a parsing info BD,
9604 and only then we have the rest of the TSO BDs.
9605 (don't forget to mark the last one as last,
9606 and to unmap only AFTER you write to the BD ...)
9607 And above all, all pdb sizes are in words - NOT DWORDS!
9610 pkt_prod
= fp
->tx_pkt_prod
++;
9611 bd_prod
= TX_BD(fp
->tx_bd_prod
);
9613 /* get a tx_buf and first BD */
9614 tx_buf
= &fp
->tx_buf_ring
[TX_BD(pkt_prod
)];
9615 tx_bd
= &fp
->tx_desc_ring
[bd_prod
];
9617 tx_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
9618 tx_bd
->general_data
= (UNICAST_ADDRESS
<<
9619 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT
);
9621 tx_bd
->general_data
|= (1 << ETH_TX_BD_HDR_NBDS_SHIFT
);
9623 /* remember the first BD of the packet */
9624 tx_buf
->first_bd
= fp
->tx_bd_prod
;
9627 DP(NETIF_MSG_TX_QUEUED
,
9628 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9629 pkt_prod
, tx_buf
, fp
->tx_pkt_prod
, bd_prod
, tx_bd
);
9632 if ((bp
->vlgrp
!= NULL
) && vlan_tx_tag_present(skb
) &&
9633 (bp
->flags
& HW_VLAN_TX_FLAG
)) {
9634 tx_bd
->vlan
= cpu_to_le16(vlan_tx_tag_get(skb
));
9635 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_VLAN_TAG
;
9639 tx_bd
->vlan
= cpu_to_le16(pkt_prod
);
9642 /* turn on parsing and get a BD */
9643 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
9644 pbd
= (void *)&fp
->tx_desc_ring
[bd_prod
];
9646 memset(pbd
, 0, sizeof(struct eth_tx_parse_bd
));
9649 if (xmit_type
& XMIT_CSUM
) {
9650 hlen
= (skb_network_header(skb
) - skb
->data
+ vlan_off
) / 2;
9652 /* for now NS flag is not used in Linux */
9653 pbd
->global_data
= (hlen
|
9654 ((skb
->protocol
== ntohs(ETH_P_8021Q
)) <<
9655 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT
));
9657 pbd
->ip_hlen
= (skb_transport_header(skb
) -
9658 skb_network_header(skb
)) / 2;
9660 hlen
+= pbd
->ip_hlen
+ tcp_hdrlen(skb
) / 2;
9662 pbd
->total_hlen
= cpu_to_le16(hlen
);
9663 hlen
= hlen
*2 - vlan_off
;
9665 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_TCP_CSUM
;
9667 if (xmit_type
& XMIT_CSUM_V4
)
9668 tx_bd
->bd_flags
.as_bitfield
|=
9669 ETH_TX_BD_FLAGS_IP_CSUM
;
9671 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_IPV6
;
9673 if (xmit_type
& XMIT_CSUM_TCP
) {
9674 pbd
->tcp_pseudo_csum
= swab16(tcp_hdr(skb
)->check
);
9677 s8 fix
= SKB_CS_OFF(skb
); /* signed! */
9679 pbd
->global_data
|= ETH_TX_PARSE_BD_CS_ANY_FLG
;
9680 pbd
->cs_offset
= fix
/ 2;
9682 DP(NETIF_MSG_TX_QUEUED
,
9683 "hlen %d offset %d fix %d csum before fix %x\n",
9684 le16_to_cpu(pbd
->total_hlen
), pbd
->cs_offset
, fix
,
9687 /* HW bug: fixup the CSUM */
9688 pbd
->tcp_pseudo_csum
=
9689 bnx2x_csum_fix(skb_transport_header(skb
),
9692 DP(NETIF_MSG_TX_QUEUED
, "csum after fix %x\n",
9693 pbd
->tcp_pseudo_csum
);
9697 mapping
= pci_map_single(bp
->pdev
, skb
->data
,
9698 skb_headlen(skb
), PCI_DMA_TODEVICE
);
9700 tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
9701 tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
9702 nbd
= skb_shinfo(skb
)->nr_frags
+ ((pbd
== NULL
) ? 1 : 2);
9703 tx_bd
->nbd
= cpu_to_le16(nbd
);
9704 tx_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
9706 DP(NETIF_MSG_TX_QUEUED
, "first bd @%p addr (%x:%x) nbd %d"
9707 " nbytes %d flags %x vlan %x\n",
9708 tx_bd
, tx_bd
->addr_hi
, tx_bd
->addr_lo
, le16_to_cpu(tx_bd
->nbd
),
9709 le16_to_cpu(tx_bd
->nbytes
), tx_bd
->bd_flags
.as_bitfield
,
9710 le16_to_cpu(tx_bd
->vlan
));
9712 if (xmit_type
& XMIT_GSO
) {
9714 DP(NETIF_MSG_TX_QUEUED
,
9715 "TSO packet len %d hlen %d total len %d tso size %d\n",
9716 skb
->len
, hlen
, skb_headlen(skb
),
9717 skb_shinfo(skb
)->gso_size
);
9719 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_SW_LSO
;
9721 if (unlikely(skb_headlen(skb
) > hlen
))
9722 bd_prod
= bnx2x_tx_split(bp
, fp
, &tx_bd
, hlen
,
9725 pbd
->lso_mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
9726 pbd
->tcp_send_seq
= swab32(tcp_hdr(skb
)->seq
);
9727 pbd
->tcp_flags
= pbd_tcp_flags(skb
);
9729 if (xmit_type
& XMIT_GSO_V4
) {
9730 pbd
->ip_id
= swab16(ip_hdr(skb
)->id
);
9731 pbd
->tcp_pseudo_csum
=
9732 swab16(~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
9734 0, IPPROTO_TCP
, 0));
9737 pbd
->tcp_pseudo_csum
=
9738 swab16(~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
9739 &ipv6_hdr(skb
)->daddr
,
9740 0, IPPROTO_TCP
, 0));
9742 pbd
->global_data
|= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN
;
9745 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
9746 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
9748 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
9749 tx_bd
= &fp
->tx_desc_ring
[bd_prod
];
9751 mapping
= pci_map_page(bp
->pdev
, frag
->page
, frag
->page_offset
,
9752 frag
->size
, PCI_DMA_TODEVICE
);
9754 tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
9755 tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
9756 tx_bd
->nbytes
= cpu_to_le16(frag
->size
);
9757 tx_bd
->vlan
= cpu_to_le16(pkt_prod
);
9758 tx_bd
->bd_flags
.as_bitfield
= 0;
9760 DP(NETIF_MSG_TX_QUEUED
,
9761 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9762 i
, tx_bd
, tx_bd
->addr_hi
, tx_bd
->addr_lo
,
9763 le16_to_cpu(tx_bd
->nbytes
), tx_bd
->bd_flags
.as_bitfield
);
9766 /* now at last mark the BD as the last BD */
9767 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_END_BD
;
9769 DP(NETIF_MSG_TX_QUEUED
, "last bd @%p flags %x\n",
9770 tx_bd
, tx_bd
->bd_flags
.as_bitfield
);
9772 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
9774 /* now send a tx doorbell, counting the next BD
9775 * if the packet contains or ends with it
9777 if (TX_BD_POFF(bd_prod
) < nbd
)
9781 DP(NETIF_MSG_TX_QUEUED
,
9782 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9783 " tcp_flags %x xsum %x seq %u hlen %u\n",
9784 pbd
, pbd
->global_data
, pbd
->ip_hlen
, pbd
->ip_id
,
9785 pbd
->lso_mss
, pbd
->tcp_flags
, pbd
->tcp_pseudo_csum
,
9786 pbd
->tcp_send_seq
, le16_to_cpu(pbd
->total_hlen
));
9788 DP(NETIF_MSG_TX_QUEUED
, "doorbell: nbd %d bd %u\n", nbd
, bd_prod
);
9791 * Make sure that the BD data is updated before updating the producer
9792 * since FW might read the BD right after the producer is updated.
9793 * This is only applicable for weak-ordered memory model archs such
9794 * as IA-64. The following barrier is also mandatory since FW will
9795 * assumes packets must have BDs.
9799 fp
->hw_tx_prods
->bds_prod
=
9800 cpu_to_le16(le16_to_cpu(fp
->hw_tx_prods
->bds_prod
) + nbd
);
9801 mb(); /* FW restriction: must not reorder writing nbd and packets */
9802 fp
->hw_tx_prods
->packets_prod
=
9803 cpu_to_le32(le32_to_cpu(fp
->hw_tx_prods
->packets_prod
) + 1);
9804 DOORBELL(bp
, FP_IDX(fp
), 0);
9808 fp
->tx_bd_prod
+= nbd
;
9809 dev
->trans_start
= jiffies
;
9811 if (unlikely(bnx2x_tx_avail(fp
) < MAX_SKB_FRAGS
+ 3)) {
9812 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9813 if we put Tx into XOFF state. */
9815 netif_stop_queue(dev
);
9816 bp
->eth_stats
.driver_xoff
++;
9817 if (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3)
9818 netif_wake_queue(dev
);
9822 return NETDEV_TX_OK
;
9825 /* called with rtnl_lock */
9826 static int bnx2x_open(struct net_device
*dev
)
9828 struct bnx2x
*bp
= netdev_priv(dev
);
9830 bnx2x_set_power_state(bp
, PCI_D0
);
9832 return bnx2x_nic_load(bp
, LOAD_OPEN
);
9835 /* called with rtnl_lock */
9836 static int bnx2x_close(struct net_device
*dev
)
9838 struct bnx2x
*bp
= netdev_priv(dev
);
9840 /* Unload the driver, release IRQs */
9841 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
9842 if (atomic_read(&bp
->pdev
->enable_cnt
) == 1)
9843 if (!CHIP_REV_IS_SLOW(bp
))
9844 bnx2x_set_power_state(bp
, PCI_D3hot
);
9849 /* called with netif_tx_lock from set_multicast */
9850 static void bnx2x_set_rx_mode(struct net_device
*dev
)
9852 struct bnx2x
*bp
= netdev_priv(dev
);
9853 u32 rx_mode
= BNX2X_RX_MODE_NORMAL
;
9854 int port
= BP_PORT(bp
);
9856 if (bp
->state
!= BNX2X_STATE_OPEN
) {
9857 DP(NETIF_MSG_IFUP
, "state is %x, returning\n", bp
->state
);
9861 DP(NETIF_MSG_IFUP
, "dev->flags = %x\n", dev
->flags
);
9863 if (dev
->flags
& IFF_PROMISC
)
9864 rx_mode
= BNX2X_RX_MODE_PROMISC
;
9866 else if ((dev
->flags
& IFF_ALLMULTI
) ||
9867 ((dev
->mc_count
> BNX2X_MAX_MULTICAST
) && CHIP_IS_E1(bp
)))
9868 rx_mode
= BNX2X_RX_MODE_ALLMULTI
;
9870 else { /* some multicasts */
9871 if (CHIP_IS_E1(bp
)) {
9873 struct dev_mc_list
*mclist
;
9874 struct mac_configuration_cmd
*config
=
9875 bnx2x_sp(bp
, mcast_config
);
9877 for (i
= 0, mclist
= dev
->mc_list
;
9878 mclist
&& (i
< dev
->mc_count
);
9879 i
++, mclist
= mclist
->next
) {
9881 config
->config_table
[i
].
9882 cam_entry
.msb_mac_addr
=
9883 swab16(*(u16
*)&mclist
->dmi_addr
[0]);
9884 config
->config_table
[i
].
9885 cam_entry
.middle_mac_addr
=
9886 swab16(*(u16
*)&mclist
->dmi_addr
[2]);
9887 config
->config_table
[i
].
9888 cam_entry
.lsb_mac_addr
=
9889 swab16(*(u16
*)&mclist
->dmi_addr
[4]);
9890 config
->config_table
[i
].cam_entry
.flags
=
9892 config
->config_table
[i
].
9893 target_table_entry
.flags
= 0;
9894 config
->config_table
[i
].
9895 target_table_entry
.client_id
= 0;
9896 config
->config_table
[i
].
9897 target_table_entry
.vlan_id
= 0;
9900 "setting MCAST[%d] (%04x:%04x:%04x)\n", i
,
9901 config
->config_table
[i
].
9902 cam_entry
.msb_mac_addr
,
9903 config
->config_table
[i
].
9904 cam_entry
.middle_mac_addr
,
9905 config
->config_table
[i
].
9906 cam_entry
.lsb_mac_addr
);
9908 old
= config
->hdr
.length_6b
;
9910 for (; i
< old
; i
++) {
9911 if (CAM_IS_INVALID(config
->
9913 /* already invalidated */
9917 CAM_INVALIDATE(config
->
9922 if (CHIP_REV_IS_SLOW(bp
))
9923 offset
= BNX2X_MAX_EMUL_MULTI
*(1 + port
);
9925 offset
= BNX2X_MAX_MULTICAST
*(1 + port
);
9927 config
->hdr
.length_6b
= i
;
9928 config
->hdr
.offset
= offset
;
9929 config
->hdr
.client_id
= BP_CL_ID(bp
);
9930 config
->hdr
.reserved1
= 0;
9932 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
9933 U64_HI(bnx2x_sp_mapping(bp
, mcast_config
)),
9934 U64_LO(bnx2x_sp_mapping(bp
, mcast_config
)),
9937 /* Accept one or more multicasts */
9938 struct dev_mc_list
*mclist
;
9939 u32 mc_filter
[MC_HASH_SIZE
];
9940 u32 crc
, bit
, regidx
;
9943 memset(mc_filter
, 0, 4 * MC_HASH_SIZE
);
9945 for (i
= 0, mclist
= dev
->mc_list
;
9946 mclist
&& (i
< dev
->mc_count
);
9947 i
++, mclist
= mclist
->next
) {
9949 DP(NETIF_MSG_IFUP
, "Adding mcast MAC: %pM\n",
9952 crc
= crc32c_le(0, mclist
->dmi_addr
, ETH_ALEN
);
9953 bit
= (crc
>> 24) & 0xff;
9956 mc_filter
[regidx
] |= (1 << bit
);
9959 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
9960 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
),
9965 bp
->rx_mode
= rx_mode
;
9966 bnx2x_set_storm_rx_mode(bp
);
9969 /* called with rtnl_lock */
9970 static int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
)
9972 struct sockaddr
*addr
= p
;
9973 struct bnx2x
*bp
= netdev_priv(dev
);
9975 if (!is_valid_ether_addr((u8
*)(addr
->sa_data
)))
9978 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
9979 if (netif_running(dev
)) {
9981 bnx2x_set_mac_addr_e1(bp
, 1);
9983 bnx2x_set_mac_addr_e1h(bp
, 1);
9989 /* called with rtnl_lock */
9990 static int bnx2x_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
9992 struct mii_ioctl_data
*data
= if_mii(ifr
);
9993 struct bnx2x
*bp
= netdev_priv(dev
);
9994 int port
= BP_PORT(bp
);
9999 data
->phy_id
= bp
->port
.phy_addr
;
10003 case SIOCGMIIREG
: {
10006 if (!netif_running(dev
))
10009 mutex_lock(&bp
->port
.phy_mutex
);
10010 err
= bnx2x_cl45_read(bp
, port
, 0, bp
->port
.phy_addr
,
10011 DEFAULT_PHY_DEV_ADDR
,
10012 (data
->reg_num
& 0x1f), &mii_regval
);
10013 data
->val_out
= mii_regval
;
10014 mutex_unlock(&bp
->port
.phy_mutex
);
10019 if (!capable(CAP_NET_ADMIN
))
10022 if (!netif_running(dev
))
10025 mutex_lock(&bp
->port
.phy_mutex
);
10026 err
= bnx2x_cl45_write(bp
, port
, 0, bp
->port
.phy_addr
,
10027 DEFAULT_PHY_DEV_ADDR
,
10028 (data
->reg_num
& 0x1f), data
->val_in
);
10029 mutex_unlock(&bp
->port
.phy_mutex
);
10037 return -EOPNOTSUPP
;
10040 /* called with rtnl_lock */
10041 static int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
)
10043 struct bnx2x
*bp
= netdev_priv(dev
);
10046 if ((new_mtu
> ETH_MAX_JUMBO_PACKET_SIZE
) ||
10047 ((new_mtu
+ ETH_HLEN
) < ETH_MIN_PACKET_SIZE
))
10050 /* This does not race with packet allocation
10051 * because the actual alloc size is
10052 * only updated as part of load
10054 dev
->mtu
= new_mtu
;
10056 if (netif_running(dev
)) {
10057 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
10058 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
10064 static void bnx2x_tx_timeout(struct net_device
*dev
)
10066 struct bnx2x
*bp
= netdev_priv(dev
);
10068 #ifdef BNX2X_STOP_ON_ERROR
10072 /* This allows the netif to be shutdown gracefully before resetting */
10073 schedule_work(&bp
->reset_task
);
10077 /* called with rtnl_lock */
10078 static void bnx2x_vlan_rx_register(struct net_device
*dev
,
10079 struct vlan_group
*vlgrp
)
10081 struct bnx2x
*bp
= netdev_priv(dev
);
10085 /* Set flags according to the required capabilities */
10086 bp
->flags
&= ~(HW_VLAN_RX_FLAG
| HW_VLAN_TX_FLAG
);
10088 if (dev
->features
& NETIF_F_HW_VLAN_TX
)
10089 bp
->flags
|= HW_VLAN_TX_FLAG
;
10091 if (dev
->features
& NETIF_F_HW_VLAN_RX
)
10092 bp
->flags
|= HW_VLAN_RX_FLAG
;
10094 if (netif_running(dev
))
10095 bnx2x_set_client_config(bp
);
10100 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10101 static void poll_bnx2x(struct net_device
*dev
)
10103 struct bnx2x
*bp
= netdev_priv(dev
);
10105 disable_irq(bp
->pdev
->irq
);
10106 bnx2x_interrupt(bp
->pdev
->irq
, dev
);
10107 enable_irq(bp
->pdev
->irq
);
10111 static const struct net_device_ops bnx2x_netdev_ops
= {
10112 .ndo_open
= bnx2x_open
,
10113 .ndo_stop
= bnx2x_close
,
10114 .ndo_start_xmit
= bnx2x_start_xmit
,
10115 .ndo_set_multicast_list
= bnx2x_set_rx_mode
,
10116 .ndo_set_mac_address
= bnx2x_change_mac_addr
,
10117 .ndo_validate_addr
= eth_validate_addr
,
10118 .ndo_do_ioctl
= bnx2x_ioctl
,
10119 .ndo_change_mtu
= bnx2x_change_mtu
,
10120 .ndo_tx_timeout
= bnx2x_tx_timeout
,
10122 .ndo_vlan_rx_register
= bnx2x_vlan_rx_register
,
10124 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10125 .ndo_poll_controller
= poll_bnx2x
,
10130 static int __devinit
bnx2x_init_dev(struct pci_dev
*pdev
,
10131 struct net_device
*dev
)
10136 SET_NETDEV_DEV(dev
, &pdev
->dev
);
10137 bp
= netdev_priv(dev
);
10142 bp
->func
= PCI_FUNC(pdev
->devfn
);
10144 rc
= pci_enable_device(pdev
);
10146 printk(KERN_ERR PFX
"Cannot enable PCI device, aborting\n");
10150 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
10151 printk(KERN_ERR PFX
"Cannot find PCI device base address,"
10154 goto err_out_disable
;
10157 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
10158 printk(KERN_ERR PFX
"Cannot find second PCI device"
10159 " base address, aborting\n");
10161 goto err_out_disable
;
10164 if (atomic_read(&pdev
->enable_cnt
) == 1) {
10165 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
10167 printk(KERN_ERR PFX
"Cannot obtain PCI resources,"
10169 goto err_out_disable
;
10172 pci_set_master(pdev
);
10173 pci_save_state(pdev
);
10176 bp
->pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
10177 if (bp
->pm_cap
== 0) {
10178 printk(KERN_ERR PFX
"Cannot find power management"
10179 " capability, aborting\n");
10181 goto err_out_release
;
10184 bp
->pcie_cap
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
10185 if (bp
->pcie_cap
== 0) {
10186 printk(KERN_ERR PFX
"Cannot find PCI Express capability,"
10189 goto err_out_release
;
10192 if (pci_set_dma_mask(pdev
, DMA_64BIT_MASK
) == 0) {
10193 bp
->flags
|= USING_DAC_FLAG
;
10194 if (pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
) != 0) {
10195 printk(KERN_ERR PFX
"pci_set_consistent_dma_mask"
10196 " failed, aborting\n");
10198 goto err_out_release
;
10201 } else if (pci_set_dma_mask(pdev
, DMA_32BIT_MASK
) != 0) {
10202 printk(KERN_ERR PFX
"System does not support DMA,"
10205 goto err_out_release
;
10208 dev
->mem_start
= pci_resource_start(pdev
, 0);
10209 dev
->base_addr
= dev
->mem_start
;
10210 dev
->mem_end
= pci_resource_end(pdev
, 0);
10212 dev
->irq
= pdev
->irq
;
10214 bp
->regview
= pci_ioremap_bar(pdev
, 0);
10215 if (!bp
->regview
) {
10216 printk(KERN_ERR PFX
"Cannot map register space, aborting\n");
10218 goto err_out_release
;
10221 bp
->doorbells
= ioremap_nocache(pci_resource_start(pdev
, 2),
10222 min_t(u64
, BNX2X_DB_SIZE
,
10223 pci_resource_len(pdev
, 2)));
10224 if (!bp
->doorbells
) {
10225 printk(KERN_ERR PFX
"Cannot map doorbell space, aborting\n");
10227 goto err_out_unmap
;
10230 bnx2x_set_power_state(bp
, PCI_D0
);
10232 /* clean indirect addresses */
10233 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
10234 PCICFG_VENDOR_ID_OFFSET
);
10235 REG_WR(bp
, PXP2_REG_PGL_ADDR_88_F0
+ BP_PORT(bp
)*16, 0);
10236 REG_WR(bp
, PXP2_REG_PGL_ADDR_8C_F0
+ BP_PORT(bp
)*16, 0);
10237 REG_WR(bp
, PXP2_REG_PGL_ADDR_90_F0
+ BP_PORT(bp
)*16, 0);
10238 REG_WR(bp
, PXP2_REG_PGL_ADDR_94_F0
+ BP_PORT(bp
)*16, 0);
10240 dev
->watchdog_timeo
= TX_TIMEOUT
;
10242 dev
->netdev_ops
= &bnx2x_netdev_ops
;
10243 dev
->ethtool_ops
= &bnx2x_ethtool_ops
;
10244 dev
->features
|= NETIF_F_SG
;
10245 dev
->features
|= NETIF_F_HW_CSUM
;
10246 if (bp
->flags
& USING_DAC_FLAG
)
10247 dev
->features
|= NETIF_F_HIGHDMA
;
10249 dev
->features
|= (NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
);
10250 bp
->flags
|= (HW_VLAN_RX_FLAG
| HW_VLAN_TX_FLAG
);
10252 dev
->features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
10253 dev
->features
|= NETIF_F_TSO6
;
10259 iounmap(bp
->regview
);
10260 bp
->regview
= NULL
;
10262 if (bp
->doorbells
) {
10263 iounmap(bp
->doorbells
);
10264 bp
->doorbells
= NULL
;
10268 if (atomic_read(&pdev
->enable_cnt
) == 1)
10269 pci_release_regions(pdev
);
10272 pci_disable_device(pdev
);
10273 pci_set_drvdata(pdev
, NULL
);
10279 static int __devinit
bnx2x_get_pcie_width(struct bnx2x
*bp
)
10281 u32 val
= REG_RD(bp
, PCICFG_OFFSET
+ PCICFG_LINK_CONTROL
);
10283 val
= (val
& PCICFG_LINK_WIDTH
) >> PCICFG_LINK_WIDTH_SHIFT
;
10287 /* return value of 1=2.5GHz 2=5GHz */
10288 static int __devinit
bnx2x_get_pcie_speed(struct bnx2x
*bp
)
10290 u32 val
= REG_RD(bp
, PCICFG_OFFSET
+ PCICFG_LINK_CONTROL
);
10292 val
= (val
& PCICFG_LINK_SPEED
) >> PCICFG_LINK_SPEED_SHIFT
;
10296 static int __devinit
bnx2x_init_one(struct pci_dev
*pdev
,
10297 const struct pci_device_id
*ent
)
10299 static int version_printed
;
10300 struct net_device
*dev
= NULL
;
10304 if (version_printed
++ == 0)
10305 printk(KERN_INFO
"%s", version
);
10307 /* dev zeroed in init_etherdev */
10308 dev
= alloc_etherdev(sizeof(*bp
));
10310 printk(KERN_ERR PFX
"Cannot allocate net device\n");
10314 bp
= netdev_priv(dev
);
10315 bp
->msglevel
= debug
;
10317 rc
= bnx2x_init_dev(pdev
, dev
);
10323 pci_set_drvdata(pdev
, dev
);
10325 rc
= bnx2x_init_bp(bp
);
10327 goto init_one_exit
;
10329 rc
= register_netdev(dev
);
10331 dev_err(&pdev
->dev
, "Cannot register net device\n");
10332 goto init_one_exit
;
10335 netif_carrier_off(dev
);
10337 bp
->common
.name
= board_info
[ent
->driver_data
].name
;
10338 printk(KERN_INFO
"%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10339 " IRQ %d, ", dev
->name
, bp
->common
.name
,
10340 (CHIP_REV(bp
) >> 12) + 'A', (CHIP_METAL(bp
) >> 4),
10341 bnx2x_get_pcie_width(bp
),
10342 (bnx2x_get_pcie_speed(bp
) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10343 dev
->base_addr
, bp
->pdev
->irq
);
10344 printk(KERN_CONT
"node addr %pM\n", dev
->dev_addr
);
10349 iounmap(bp
->regview
);
10352 iounmap(bp
->doorbells
);
10356 if (atomic_read(&pdev
->enable_cnt
) == 1)
10357 pci_release_regions(pdev
);
10359 pci_disable_device(pdev
);
10360 pci_set_drvdata(pdev
, NULL
);
10365 static void __devexit
bnx2x_remove_one(struct pci_dev
*pdev
)
10367 struct net_device
*dev
= pci_get_drvdata(pdev
);
10371 printk(KERN_ERR PFX
"BAD net device from bnx2x_init_one\n");
10374 bp
= netdev_priv(dev
);
10376 unregister_netdev(dev
);
10379 iounmap(bp
->regview
);
10382 iounmap(bp
->doorbells
);
10386 if (atomic_read(&pdev
->enable_cnt
) == 1)
10387 pci_release_regions(pdev
);
10389 pci_disable_device(pdev
);
10390 pci_set_drvdata(pdev
, NULL
);
10393 static int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
)
10395 struct net_device
*dev
= pci_get_drvdata(pdev
);
10399 printk(KERN_ERR PFX
"BAD net device from bnx2x_init_one\n");
10402 bp
= netdev_priv(dev
);
10406 pci_save_state(pdev
);
10408 if (!netif_running(dev
)) {
10413 netif_device_detach(dev
);
10415 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
10417 bnx2x_set_power_state(bp
, pci_choose_state(pdev
, state
));
10424 static int bnx2x_resume(struct pci_dev
*pdev
)
10426 struct net_device
*dev
= pci_get_drvdata(pdev
);
10431 printk(KERN_ERR PFX
"BAD net device from bnx2x_init_one\n");
10434 bp
= netdev_priv(dev
);
10438 pci_restore_state(pdev
);
10440 if (!netif_running(dev
)) {
10445 bnx2x_set_power_state(bp
, PCI_D0
);
10446 netif_device_attach(dev
);
10448 rc
= bnx2x_nic_load(bp
, LOAD_OPEN
);
10455 static int bnx2x_eeh_nic_unload(struct bnx2x
*bp
)
10459 bp
->state
= BNX2X_STATE_ERROR
;
10461 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
10463 bnx2x_netif_stop(bp
, 0);
10465 del_timer_sync(&bp
->timer
);
10466 bp
->stats_state
= STATS_STATE_DISABLED
;
10467 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
10470 bnx2x_free_irq(bp
);
10472 if (CHIP_IS_E1(bp
)) {
10473 struct mac_configuration_cmd
*config
=
10474 bnx2x_sp(bp
, mcast_config
);
10476 for (i
= 0; i
< config
->hdr
.length_6b
; i
++)
10477 CAM_INVALIDATE(config
->config_table
[i
]);
10480 /* Free SKBs, SGEs, TPA pool and driver internals */
10481 bnx2x_free_skbs(bp
);
10482 for_each_queue(bp
, i
)
10483 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
10484 bnx2x_free_mem(bp
);
10486 bp
->state
= BNX2X_STATE_CLOSED
;
10488 netif_carrier_off(bp
->dev
);
10493 static void bnx2x_eeh_recover(struct bnx2x
*bp
)
10497 mutex_init(&bp
->port
.phy_mutex
);
10499 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
10500 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
10501 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp
->common
.shmem_base
);
10503 if (!bp
->common
.shmem_base
||
10504 (bp
->common
.shmem_base
< 0xA0000) ||
10505 (bp
->common
.shmem_base
>= 0xC0000)) {
10506 BNX2X_DEV_INFO("MCP not active\n");
10507 bp
->flags
|= NO_MCP_FLAG
;
10511 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
10512 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
10513 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
10514 BNX2X_ERR("BAD MCP validity signature\n");
10516 if (!BP_NOMCP(bp
)) {
10517 bp
->fw_seq
= (SHMEM_RD(bp
, func_mb
[BP_FUNC(bp
)].drv_mb_header
)
10518 & DRV_MSG_SEQ_NUMBER_MASK
);
10519 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
10524 * bnx2x_io_error_detected - called when PCI error is detected
10525 * @pdev: Pointer to PCI device
10526 * @state: The current pci connection state
10528 * This function is called after a PCI bus error affecting
10529 * this device has been detected.
10531 static pci_ers_result_t
bnx2x_io_error_detected(struct pci_dev
*pdev
,
10532 pci_channel_state_t state
)
10534 struct net_device
*dev
= pci_get_drvdata(pdev
);
10535 struct bnx2x
*bp
= netdev_priv(dev
);
10539 netif_device_detach(dev
);
10541 if (netif_running(dev
))
10542 bnx2x_eeh_nic_unload(bp
);
10544 pci_disable_device(pdev
);
10548 /* Request a slot reset */
10549 return PCI_ERS_RESULT_NEED_RESET
;
10553 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10554 * @pdev: Pointer to PCI device
10556 * Restart the card from scratch, as if from a cold-boot.
10558 static pci_ers_result_t
bnx2x_io_slot_reset(struct pci_dev
*pdev
)
10560 struct net_device
*dev
= pci_get_drvdata(pdev
);
10561 struct bnx2x
*bp
= netdev_priv(dev
);
10565 if (pci_enable_device(pdev
)) {
10566 dev_err(&pdev
->dev
,
10567 "Cannot re-enable PCI device after reset\n");
10569 return PCI_ERS_RESULT_DISCONNECT
;
10572 pci_set_master(pdev
);
10573 pci_restore_state(pdev
);
10575 if (netif_running(dev
))
10576 bnx2x_set_power_state(bp
, PCI_D0
);
10580 return PCI_ERS_RESULT_RECOVERED
;
10584 * bnx2x_io_resume - called when traffic can start flowing again
10585 * @pdev: Pointer to PCI device
10587 * This callback is called when the error recovery driver tells us that
10588 * its OK to resume normal operation.
10590 static void bnx2x_io_resume(struct pci_dev
*pdev
)
10592 struct net_device
*dev
= pci_get_drvdata(pdev
);
10593 struct bnx2x
*bp
= netdev_priv(dev
);
10597 bnx2x_eeh_recover(bp
);
10599 if (netif_running(dev
))
10600 bnx2x_nic_load(bp
, LOAD_NORMAL
);
10602 netif_device_attach(dev
);
10607 static struct pci_error_handlers bnx2x_err_handler
= {
10608 .error_detected
= bnx2x_io_error_detected
,
10609 .slot_reset
= bnx2x_io_slot_reset
,
10610 .resume
= bnx2x_io_resume
,
10613 static struct pci_driver bnx2x_pci_driver
= {
10614 .name
= DRV_MODULE_NAME
,
10615 .id_table
= bnx2x_pci_tbl
,
10616 .probe
= bnx2x_init_one
,
10617 .remove
= __devexit_p(bnx2x_remove_one
),
10618 .suspend
= bnx2x_suspend
,
10619 .resume
= bnx2x_resume
,
10620 .err_handler
= &bnx2x_err_handler
,
10623 static int __init
bnx2x_init(void)
10625 bnx2x_wq
= create_singlethread_workqueue("bnx2x");
10626 if (bnx2x_wq
== NULL
) {
10627 printk(KERN_ERR PFX
"Cannot create workqueue\n");
10631 return pci_register_driver(&bnx2x_pci_driver
);
10634 static void __exit
bnx2x_cleanup(void)
10636 pci_unregister_driver(&bnx2x_pci_driver
);
10638 destroy_workqueue(bnx2x_wq
);
10641 module_init(bnx2x_init
);
10642 module_exit(bnx2x_cleanup
);