[BNX2]: Enable S/G for jumbo RX.
[linux-2.6/linux-loongson.git] / drivers / net / bnx2.c
blobae081c8dd45ffc870334562a7281a36d1d722e2e
1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2007 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
55 #define FW_BUF_SIZE 0x10000
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.6.9"
60 #define DRV_MODULE_RELDATE "December 8, 2007"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static const char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
80 typedef enum {
81 BCM5706 = 0,
82 NC370T,
83 NC370I,
84 BCM5706S,
85 NC370F,
86 BCM5708,
87 BCM5708S,
88 BCM5709,
89 BCM5709S,
90 } board_t;
92 /* indexed by board_t, above */
93 static const struct {
94 char *name;
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
107 static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126 { 0, }
129 static struct flash_spec flash_table[] =
131 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
133 /* Slow EEPROM */
134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137 "EEPROM - slow"},
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142 "Entry 0001"},
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159 "Entry 0100"},
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
176 /* Fast EEPROM */
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180 "EEPROM - fast"},
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185 "Entry 1001"},
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 "Entry 1010"},
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1100"},
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1101"},
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
218 static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
231 u32 diff;
233 smp_mb();
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
238 diff = bp->tx_prod - bp->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
240 diff &= 0xffff;
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
244 return (bp->tx_ring_size - diff);
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
250 u32 val;
252 spin_lock_bh(&bp->indirect_lock);
253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
256 return val;
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
262 spin_lock_bh(&bp->indirect_lock);
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265 spin_unlock_bh(&bp->indirect_lock);
268 static void
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
271 offset += cid_addr;
272 spin_lock_bh(&bp->indirect_lock);
273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274 int i;
276 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279 for (i = 0; i < 5; i++) {
280 u32 val;
281 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283 break;
284 udelay(5);
286 } else {
287 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288 REG_WR(bp, BNX2_CTX_DATA, val);
290 spin_unlock_bh(&bp->indirect_lock);
293 static int
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
296 u32 val1;
297 int i, ret;
299 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
303 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
306 udelay(40);
309 val1 = (bp->phy_addr << 21) | (reg << 16) |
310 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311 BNX2_EMAC_MDIO_COMM_START_BUSY;
312 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
314 for (i = 0; i < 50; i++) {
315 udelay(10);
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319 udelay(5);
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
324 break;
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329 *val = 0x0;
330 ret = -EBUSY;
332 else {
333 *val = val1;
334 ret = 0;
337 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
341 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
344 udelay(40);
347 return ret;
350 static int
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
353 u32 val1;
354 int i, ret;
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
363 udelay(40);
366 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
371 for (i = 0; i < 50; i++) {
372 udelay(10);
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376 udelay(5);
377 break;
381 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382 ret = -EBUSY;
383 else
384 ret = 0;
386 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
390 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
393 udelay(40);
396 return ret;
399 static void
400 bnx2_disable_int(struct bnx2 *bp)
402 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
407 static void
408 bnx2_enable_int(struct bnx2 *bp)
410 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
411 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
412 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
414 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
415 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
417 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
420 static void
421 bnx2_disable_int_sync(struct bnx2 *bp)
423 atomic_inc(&bp->intr_sem);
424 bnx2_disable_int(bp);
425 synchronize_irq(bp->pdev->irq);
428 static void
429 bnx2_netif_stop(struct bnx2 *bp)
431 bnx2_disable_int_sync(bp);
432 if (netif_running(bp->dev)) {
433 napi_disable(&bp->napi);
434 netif_tx_disable(bp->dev);
435 bp->dev->trans_start = jiffies; /* prevent tx timeout */
439 static void
440 bnx2_netif_start(struct bnx2 *bp)
442 if (atomic_dec_and_test(&bp->intr_sem)) {
443 if (netif_running(bp->dev)) {
444 netif_wake_queue(bp->dev);
445 napi_enable(&bp->napi);
446 bnx2_enable_int(bp);
451 static void
452 bnx2_free_mem(struct bnx2 *bp)
454 int i;
456 for (i = 0; i < bp->ctx_pages; i++) {
457 if (bp->ctx_blk[i]) {
458 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
459 bp->ctx_blk[i],
460 bp->ctx_blk_mapping[i]);
461 bp->ctx_blk[i] = NULL;
464 if (bp->status_blk) {
465 pci_free_consistent(bp->pdev, bp->status_stats_size,
466 bp->status_blk, bp->status_blk_mapping);
467 bp->status_blk = NULL;
468 bp->stats_blk = NULL;
470 if (bp->tx_desc_ring) {
471 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
472 bp->tx_desc_ring, bp->tx_desc_mapping);
473 bp->tx_desc_ring = NULL;
475 kfree(bp->tx_buf_ring);
476 bp->tx_buf_ring = NULL;
477 for (i = 0; i < bp->rx_max_ring; i++) {
478 if (bp->rx_desc_ring[i])
479 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
480 bp->rx_desc_ring[i],
481 bp->rx_desc_mapping[i]);
482 bp->rx_desc_ring[i] = NULL;
484 vfree(bp->rx_buf_ring);
485 bp->rx_buf_ring = NULL;
486 for (i = 0; i < bp->rx_max_pg_ring; i++) {
487 if (bp->rx_pg_desc_ring[i])
488 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
489 bp->rx_pg_desc_ring[i],
490 bp->rx_pg_desc_mapping[i]);
491 bp->rx_pg_desc_ring[i] = NULL;
493 if (bp->rx_pg_ring)
494 vfree(bp->rx_pg_ring);
495 bp->rx_pg_ring = NULL;
498 static int
499 bnx2_alloc_mem(struct bnx2 *bp)
501 int i, status_blk_size;
503 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
504 if (bp->tx_buf_ring == NULL)
505 return -ENOMEM;
507 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
508 &bp->tx_desc_mapping);
509 if (bp->tx_desc_ring == NULL)
510 goto alloc_mem_err;
512 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
513 if (bp->rx_buf_ring == NULL)
514 goto alloc_mem_err;
516 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
518 for (i = 0; i < bp->rx_max_ring; i++) {
519 bp->rx_desc_ring[i] =
520 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
521 &bp->rx_desc_mapping[i]);
522 if (bp->rx_desc_ring[i] == NULL)
523 goto alloc_mem_err;
527 if (bp->rx_pg_ring_size) {
528 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
529 bp->rx_max_pg_ring);
530 if (bp->rx_pg_ring == NULL)
531 goto alloc_mem_err;
533 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
534 bp->rx_max_pg_ring);
537 for (i = 0; i < bp->rx_max_pg_ring; i++) {
538 bp->rx_pg_desc_ring[i] =
539 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
540 &bp->rx_pg_desc_mapping[i]);
541 if (bp->rx_pg_desc_ring[i] == NULL)
542 goto alloc_mem_err;
546 /* Combine status and statistics blocks into one allocation. */
547 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
548 bp->status_stats_size = status_blk_size +
549 sizeof(struct statistics_block);
551 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
552 &bp->status_blk_mapping);
553 if (bp->status_blk == NULL)
554 goto alloc_mem_err;
556 memset(bp->status_blk, 0, bp->status_stats_size);
558 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
559 status_blk_size);
561 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
563 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
564 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
565 if (bp->ctx_pages == 0)
566 bp->ctx_pages = 1;
567 for (i = 0; i < bp->ctx_pages; i++) {
568 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
569 BCM_PAGE_SIZE,
570 &bp->ctx_blk_mapping[i]);
571 if (bp->ctx_blk[i] == NULL)
572 goto alloc_mem_err;
575 return 0;
577 alloc_mem_err:
578 bnx2_free_mem(bp);
579 return -ENOMEM;
582 static void
583 bnx2_report_fw_link(struct bnx2 *bp)
585 u32 fw_link_status = 0;
587 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
588 return;
590 if (bp->link_up) {
591 u32 bmsr;
593 switch (bp->line_speed) {
594 case SPEED_10:
595 if (bp->duplex == DUPLEX_HALF)
596 fw_link_status = BNX2_LINK_STATUS_10HALF;
597 else
598 fw_link_status = BNX2_LINK_STATUS_10FULL;
599 break;
600 case SPEED_100:
601 if (bp->duplex == DUPLEX_HALF)
602 fw_link_status = BNX2_LINK_STATUS_100HALF;
603 else
604 fw_link_status = BNX2_LINK_STATUS_100FULL;
605 break;
606 case SPEED_1000:
607 if (bp->duplex == DUPLEX_HALF)
608 fw_link_status = BNX2_LINK_STATUS_1000HALF;
609 else
610 fw_link_status = BNX2_LINK_STATUS_1000FULL;
611 break;
612 case SPEED_2500:
613 if (bp->duplex == DUPLEX_HALF)
614 fw_link_status = BNX2_LINK_STATUS_2500HALF;
615 else
616 fw_link_status = BNX2_LINK_STATUS_2500FULL;
617 break;
620 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
622 if (bp->autoneg) {
623 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
625 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
626 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
628 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
629 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
630 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
631 else
632 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
635 else
636 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
638 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
641 static char *
642 bnx2_xceiver_str(struct bnx2 *bp)
644 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
645 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
646 "Copper"));
649 static void
650 bnx2_report_link(struct bnx2 *bp)
652 if (bp->link_up) {
653 netif_carrier_on(bp->dev);
654 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
655 bnx2_xceiver_str(bp));
657 printk("%d Mbps ", bp->line_speed);
659 if (bp->duplex == DUPLEX_FULL)
660 printk("full duplex");
661 else
662 printk("half duplex");
664 if (bp->flow_ctrl) {
665 if (bp->flow_ctrl & FLOW_CTRL_RX) {
666 printk(", receive ");
667 if (bp->flow_ctrl & FLOW_CTRL_TX)
668 printk("& transmit ");
670 else {
671 printk(", transmit ");
673 printk("flow control ON");
675 printk("\n");
677 else {
678 netif_carrier_off(bp->dev);
679 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
680 bnx2_xceiver_str(bp));
683 bnx2_report_fw_link(bp);
686 static void
687 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
689 u32 local_adv, remote_adv;
691 bp->flow_ctrl = 0;
692 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
693 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
695 if (bp->duplex == DUPLEX_FULL) {
696 bp->flow_ctrl = bp->req_flow_ctrl;
698 return;
701 if (bp->duplex != DUPLEX_FULL) {
702 return;
705 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
706 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
707 u32 val;
709 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
710 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
711 bp->flow_ctrl |= FLOW_CTRL_TX;
712 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
713 bp->flow_ctrl |= FLOW_CTRL_RX;
714 return;
717 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
718 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
720 if (bp->phy_flags & PHY_SERDES_FLAG) {
721 u32 new_local_adv = 0;
722 u32 new_remote_adv = 0;
724 if (local_adv & ADVERTISE_1000XPAUSE)
725 new_local_adv |= ADVERTISE_PAUSE_CAP;
726 if (local_adv & ADVERTISE_1000XPSE_ASYM)
727 new_local_adv |= ADVERTISE_PAUSE_ASYM;
728 if (remote_adv & ADVERTISE_1000XPAUSE)
729 new_remote_adv |= ADVERTISE_PAUSE_CAP;
730 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
731 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
733 local_adv = new_local_adv;
734 remote_adv = new_remote_adv;
737 /* See Table 28B-3 of 802.3ab-1999 spec. */
738 if (local_adv & ADVERTISE_PAUSE_CAP) {
739 if(local_adv & ADVERTISE_PAUSE_ASYM) {
740 if (remote_adv & ADVERTISE_PAUSE_CAP) {
741 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
743 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
744 bp->flow_ctrl = FLOW_CTRL_RX;
747 else {
748 if (remote_adv & ADVERTISE_PAUSE_CAP) {
749 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
753 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
754 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
755 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
757 bp->flow_ctrl = FLOW_CTRL_TX;
762 static int
763 bnx2_5709s_linkup(struct bnx2 *bp)
765 u32 val, speed;
767 bp->link_up = 1;
769 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
770 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
771 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
773 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
774 bp->line_speed = bp->req_line_speed;
775 bp->duplex = bp->req_duplex;
776 return 0;
778 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
779 switch (speed) {
780 case MII_BNX2_GP_TOP_AN_SPEED_10:
781 bp->line_speed = SPEED_10;
782 break;
783 case MII_BNX2_GP_TOP_AN_SPEED_100:
784 bp->line_speed = SPEED_100;
785 break;
786 case MII_BNX2_GP_TOP_AN_SPEED_1G:
787 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
788 bp->line_speed = SPEED_1000;
789 break;
790 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
791 bp->line_speed = SPEED_2500;
792 break;
794 if (val & MII_BNX2_GP_TOP_AN_FD)
795 bp->duplex = DUPLEX_FULL;
796 else
797 bp->duplex = DUPLEX_HALF;
798 return 0;
801 static int
802 bnx2_5708s_linkup(struct bnx2 *bp)
804 u32 val;
806 bp->link_up = 1;
807 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
808 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
809 case BCM5708S_1000X_STAT1_SPEED_10:
810 bp->line_speed = SPEED_10;
811 break;
812 case BCM5708S_1000X_STAT1_SPEED_100:
813 bp->line_speed = SPEED_100;
814 break;
815 case BCM5708S_1000X_STAT1_SPEED_1G:
816 bp->line_speed = SPEED_1000;
817 break;
818 case BCM5708S_1000X_STAT1_SPEED_2G5:
819 bp->line_speed = SPEED_2500;
820 break;
822 if (val & BCM5708S_1000X_STAT1_FD)
823 bp->duplex = DUPLEX_FULL;
824 else
825 bp->duplex = DUPLEX_HALF;
827 return 0;
830 static int
831 bnx2_5706s_linkup(struct bnx2 *bp)
833 u32 bmcr, local_adv, remote_adv, common;
835 bp->link_up = 1;
836 bp->line_speed = SPEED_1000;
838 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
839 if (bmcr & BMCR_FULLDPLX) {
840 bp->duplex = DUPLEX_FULL;
842 else {
843 bp->duplex = DUPLEX_HALF;
846 if (!(bmcr & BMCR_ANENABLE)) {
847 return 0;
850 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
851 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
853 common = local_adv & remote_adv;
854 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
856 if (common & ADVERTISE_1000XFULL) {
857 bp->duplex = DUPLEX_FULL;
859 else {
860 bp->duplex = DUPLEX_HALF;
864 return 0;
867 static int
868 bnx2_copper_linkup(struct bnx2 *bp)
870 u32 bmcr;
872 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
873 if (bmcr & BMCR_ANENABLE) {
874 u32 local_adv, remote_adv, common;
876 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
877 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
879 common = local_adv & (remote_adv >> 2);
880 if (common & ADVERTISE_1000FULL) {
881 bp->line_speed = SPEED_1000;
882 bp->duplex = DUPLEX_FULL;
884 else if (common & ADVERTISE_1000HALF) {
885 bp->line_speed = SPEED_1000;
886 bp->duplex = DUPLEX_HALF;
888 else {
889 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
890 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
892 common = local_adv & remote_adv;
893 if (common & ADVERTISE_100FULL) {
894 bp->line_speed = SPEED_100;
895 bp->duplex = DUPLEX_FULL;
897 else if (common & ADVERTISE_100HALF) {
898 bp->line_speed = SPEED_100;
899 bp->duplex = DUPLEX_HALF;
901 else if (common & ADVERTISE_10FULL) {
902 bp->line_speed = SPEED_10;
903 bp->duplex = DUPLEX_FULL;
905 else if (common & ADVERTISE_10HALF) {
906 bp->line_speed = SPEED_10;
907 bp->duplex = DUPLEX_HALF;
909 else {
910 bp->line_speed = 0;
911 bp->link_up = 0;
915 else {
916 if (bmcr & BMCR_SPEED100) {
917 bp->line_speed = SPEED_100;
919 else {
920 bp->line_speed = SPEED_10;
922 if (bmcr & BMCR_FULLDPLX) {
923 bp->duplex = DUPLEX_FULL;
925 else {
926 bp->duplex = DUPLEX_HALF;
930 return 0;
933 static int
934 bnx2_set_mac_link(struct bnx2 *bp)
936 u32 val;
938 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
939 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
940 (bp->duplex == DUPLEX_HALF)) {
941 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
944 /* Configure the EMAC mode register. */
945 val = REG_RD(bp, BNX2_EMAC_MODE);
947 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
948 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
949 BNX2_EMAC_MODE_25G_MODE);
951 if (bp->link_up) {
952 switch (bp->line_speed) {
953 case SPEED_10:
954 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
955 val |= BNX2_EMAC_MODE_PORT_MII_10M;
956 break;
958 /* fall through */
959 case SPEED_100:
960 val |= BNX2_EMAC_MODE_PORT_MII;
961 break;
962 case SPEED_2500:
963 val |= BNX2_EMAC_MODE_25G_MODE;
964 /* fall through */
965 case SPEED_1000:
966 val |= BNX2_EMAC_MODE_PORT_GMII;
967 break;
970 else {
971 val |= BNX2_EMAC_MODE_PORT_GMII;
974 /* Set the MAC to operate in the appropriate duplex mode. */
975 if (bp->duplex == DUPLEX_HALF)
976 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
977 REG_WR(bp, BNX2_EMAC_MODE, val);
979 /* Enable/disable rx PAUSE. */
980 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
982 if (bp->flow_ctrl & FLOW_CTRL_RX)
983 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
984 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
986 /* Enable/disable tx PAUSE. */
987 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
988 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
990 if (bp->flow_ctrl & FLOW_CTRL_TX)
991 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
992 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
994 /* Acknowledge the interrupt. */
995 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
997 return 0;
1000 static void
1001 bnx2_enable_bmsr1(struct bnx2 *bp)
1003 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1004 (CHIP_NUM(bp) == CHIP_NUM_5709))
1005 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1006 MII_BNX2_BLK_ADDR_GP_STATUS);
1009 static void
1010 bnx2_disable_bmsr1(struct bnx2 *bp)
1012 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1013 (CHIP_NUM(bp) == CHIP_NUM_5709))
1014 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1015 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1018 static int
1019 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1021 u32 up1;
1022 int ret = 1;
1024 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1025 return 0;
1027 if (bp->autoneg & AUTONEG_SPEED)
1028 bp->advertising |= ADVERTISED_2500baseX_Full;
1030 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1031 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1033 bnx2_read_phy(bp, bp->mii_up1, &up1);
1034 if (!(up1 & BCM5708S_UP1_2G5)) {
1035 up1 |= BCM5708S_UP1_2G5;
1036 bnx2_write_phy(bp, bp->mii_up1, up1);
1037 ret = 0;
1040 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1041 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1042 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1044 return ret;
1047 static int
1048 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1050 u32 up1;
1051 int ret = 0;
1053 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1054 return 0;
1056 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1057 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1059 bnx2_read_phy(bp, bp->mii_up1, &up1);
1060 if (up1 & BCM5708S_UP1_2G5) {
1061 up1 &= ~BCM5708S_UP1_2G5;
1062 bnx2_write_phy(bp, bp->mii_up1, up1);
1063 ret = 1;
1066 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1067 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1068 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1070 return ret;
1073 static void
1074 bnx2_enable_forced_2g5(struct bnx2 *bp)
1076 u32 bmcr;
1078 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1079 return;
1081 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1082 u32 val;
1084 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1085 MII_BNX2_BLK_ADDR_SERDES_DIG);
1086 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1087 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1088 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1089 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1091 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1095 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1096 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1097 bmcr |= BCM5708S_BMCR_FORCE_2500;
1100 if (bp->autoneg & AUTONEG_SPEED) {
1101 bmcr &= ~BMCR_ANENABLE;
1102 if (bp->req_duplex == DUPLEX_FULL)
1103 bmcr |= BMCR_FULLDPLX;
1105 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1108 static void
1109 bnx2_disable_forced_2g5(struct bnx2 *bp)
1111 u32 bmcr;
1113 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1114 return;
1116 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1117 u32 val;
1119 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1120 MII_BNX2_BLK_ADDR_SERDES_DIG);
1121 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1122 val &= ~MII_BNX2_SD_MISC1_FORCE;
1123 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1125 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1126 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1127 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1129 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1130 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1131 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1134 if (bp->autoneg & AUTONEG_SPEED)
1135 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1136 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1139 static int
1140 bnx2_set_link(struct bnx2 *bp)
1142 u32 bmsr;
1143 u8 link_up;
1145 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1146 bp->link_up = 1;
1147 return 0;
1150 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1151 return 0;
1153 link_up = bp->link_up;
1155 bnx2_enable_bmsr1(bp);
1156 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1157 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1158 bnx2_disable_bmsr1(bp);
1160 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1161 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1162 u32 val;
1164 val = REG_RD(bp, BNX2_EMAC_STATUS);
1165 if (val & BNX2_EMAC_STATUS_LINK)
1166 bmsr |= BMSR_LSTATUS;
1167 else
1168 bmsr &= ~BMSR_LSTATUS;
1171 if (bmsr & BMSR_LSTATUS) {
1172 bp->link_up = 1;
1174 if (bp->phy_flags & PHY_SERDES_FLAG) {
1175 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1176 bnx2_5706s_linkup(bp);
1177 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1178 bnx2_5708s_linkup(bp);
1179 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1180 bnx2_5709s_linkup(bp);
1182 else {
1183 bnx2_copper_linkup(bp);
1185 bnx2_resolve_flow_ctrl(bp);
1187 else {
1188 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1189 (bp->autoneg & AUTONEG_SPEED))
1190 bnx2_disable_forced_2g5(bp);
1192 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1193 bp->link_up = 0;
1196 if (bp->link_up != link_up) {
1197 bnx2_report_link(bp);
1200 bnx2_set_mac_link(bp);
1202 return 0;
1205 static int
1206 bnx2_reset_phy(struct bnx2 *bp)
1208 int i;
1209 u32 reg;
1211 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1213 #define PHY_RESET_MAX_WAIT 100
1214 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1215 udelay(10);
1217 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1218 if (!(reg & BMCR_RESET)) {
1219 udelay(20);
1220 break;
1223 if (i == PHY_RESET_MAX_WAIT) {
1224 return -EBUSY;
1226 return 0;
1229 static u32
1230 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1232 u32 adv = 0;
1234 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1235 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1237 if (bp->phy_flags & PHY_SERDES_FLAG) {
1238 adv = ADVERTISE_1000XPAUSE;
1240 else {
1241 adv = ADVERTISE_PAUSE_CAP;
1244 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1245 if (bp->phy_flags & PHY_SERDES_FLAG) {
1246 adv = ADVERTISE_1000XPSE_ASYM;
1248 else {
1249 adv = ADVERTISE_PAUSE_ASYM;
1252 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1253 if (bp->phy_flags & PHY_SERDES_FLAG) {
1254 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1256 else {
1257 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1260 return adv;
1263 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1265 static int
1266 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1268 u32 speed_arg = 0, pause_adv;
1270 pause_adv = bnx2_phy_get_pause_adv(bp);
1272 if (bp->autoneg & AUTONEG_SPEED) {
1273 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1274 if (bp->advertising & ADVERTISED_10baseT_Half)
1275 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1276 if (bp->advertising & ADVERTISED_10baseT_Full)
1277 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1278 if (bp->advertising & ADVERTISED_100baseT_Half)
1279 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1280 if (bp->advertising & ADVERTISED_100baseT_Full)
1281 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1282 if (bp->advertising & ADVERTISED_1000baseT_Full)
1283 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1284 if (bp->advertising & ADVERTISED_2500baseX_Full)
1285 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1286 } else {
1287 if (bp->req_line_speed == SPEED_2500)
1288 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1289 else if (bp->req_line_speed == SPEED_1000)
1290 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1291 else if (bp->req_line_speed == SPEED_100) {
1292 if (bp->req_duplex == DUPLEX_FULL)
1293 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1294 else
1295 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1296 } else if (bp->req_line_speed == SPEED_10) {
1297 if (bp->req_duplex == DUPLEX_FULL)
1298 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1299 else
1300 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1304 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1305 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1306 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1307 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1309 if (port == PORT_TP)
1310 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1311 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1313 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1315 spin_unlock_bh(&bp->phy_lock);
1316 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1317 spin_lock_bh(&bp->phy_lock);
1319 return 0;
1322 static int
1323 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1325 u32 adv, bmcr;
1326 u32 new_adv = 0;
1328 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1329 return (bnx2_setup_remote_phy(bp, port));
1331 if (!(bp->autoneg & AUTONEG_SPEED)) {
1332 u32 new_bmcr;
1333 int force_link_down = 0;
1335 if (bp->req_line_speed == SPEED_2500) {
1336 if (!bnx2_test_and_enable_2g5(bp))
1337 force_link_down = 1;
1338 } else if (bp->req_line_speed == SPEED_1000) {
1339 if (bnx2_test_and_disable_2g5(bp))
1340 force_link_down = 1;
1342 bnx2_read_phy(bp, bp->mii_adv, &adv);
1343 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1345 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1346 new_bmcr = bmcr & ~BMCR_ANENABLE;
1347 new_bmcr |= BMCR_SPEED1000;
1349 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1350 if (bp->req_line_speed == SPEED_2500)
1351 bnx2_enable_forced_2g5(bp);
1352 else if (bp->req_line_speed == SPEED_1000) {
1353 bnx2_disable_forced_2g5(bp);
1354 new_bmcr &= ~0x2000;
1357 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1358 if (bp->req_line_speed == SPEED_2500)
1359 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1360 else
1361 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1364 if (bp->req_duplex == DUPLEX_FULL) {
1365 adv |= ADVERTISE_1000XFULL;
1366 new_bmcr |= BMCR_FULLDPLX;
1368 else {
1369 adv |= ADVERTISE_1000XHALF;
1370 new_bmcr &= ~BMCR_FULLDPLX;
1372 if ((new_bmcr != bmcr) || (force_link_down)) {
1373 /* Force a link down visible on the other side */
1374 if (bp->link_up) {
1375 bnx2_write_phy(bp, bp->mii_adv, adv &
1376 ~(ADVERTISE_1000XFULL |
1377 ADVERTISE_1000XHALF));
1378 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1379 BMCR_ANRESTART | BMCR_ANENABLE);
1381 bp->link_up = 0;
1382 netif_carrier_off(bp->dev);
1383 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1384 bnx2_report_link(bp);
1386 bnx2_write_phy(bp, bp->mii_adv, adv);
1387 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1388 } else {
1389 bnx2_resolve_flow_ctrl(bp);
1390 bnx2_set_mac_link(bp);
1392 return 0;
1395 bnx2_test_and_enable_2g5(bp);
1397 if (bp->advertising & ADVERTISED_1000baseT_Full)
1398 new_adv |= ADVERTISE_1000XFULL;
1400 new_adv |= bnx2_phy_get_pause_adv(bp);
1402 bnx2_read_phy(bp, bp->mii_adv, &adv);
1403 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1405 bp->serdes_an_pending = 0;
1406 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1407 /* Force a link down visible on the other side */
1408 if (bp->link_up) {
1409 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1410 spin_unlock_bh(&bp->phy_lock);
1411 msleep(20);
1412 spin_lock_bh(&bp->phy_lock);
1415 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1416 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1417 BMCR_ANENABLE);
1418 /* Speed up link-up time when the link partner
1419 * does not autonegotiate which is very common
1420 * in blade servers. Some blade servers use
1421 * IPMI for kerboard input and it's important
1422 * to minimize link disruptions. Autoneg. involves
1423 * exchanging base pages plus 3 next pages and
1424 * normally completes in about 120 msec.
1426 bp->current_interval = SERDES_AN_TIMEOUT;
1427 bp->serdes_an_pending = 1;
1428 mod_timer(&bp->timer, jiffies + bp->current_interval);
1429 } else {
1430 bnx2_resolve_flow_ctrl(bp);
1431 bnx2_set_mac_link(bp);
1434 return 0;
1437 #define ETHTOOL_ALL_FIBRE_SPEED \
1438 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1439 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1440 (ADVERTISED_1000baseT_Full)
1442 #define ETHTOOL_ALL_COPPER_SPEED \
1443 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1444 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1445 ADVERTISED_1000baseT_Full)
1447 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1448 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1450 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1452 static void
1453 bnx2_set_default_remote_link(struct bnx2 *bp)
1455 u32 link;
1457 if (bp->phy_port == PORT_TP)
1458 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1459 else
1460 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1462 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1463 bp->req_line_speed = 0;
1464 bp->autoneg |= AUTONEG_SPEED;
1465 bp->advertising = ADVERTISED_Autoneg;
1466 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1467 bp->advertising |= ADVERTISED_10baseT_Half;
1468 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1469 bp->advertising |= ADVERTISED_10baseT_Full;
1470 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1471 bp->advertising |= ADVERTISED_100baseT_Half;
1472 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1473 bp->advertising |= ADVERTISED_100baseT_Full;
1474 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1475 bp->advertising |= ADVERTISED_1000baseT_Full;
1476 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1477 bp->advertising |= ADVERTISED_2500baseX_Full;
1478 } else {
1479 bp->autoneg = 0;
1480 bp->advertising = 0;
1481 bp->req_duplex = DUPLEX_FULL;
1482 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1483 bp->req_line_speed = SPEED_10;
1484 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1485 bp->req_duplex = DUPLEX_HALF;
1487 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1488 bp->req_line_speed = SPEED_100;
1489 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1490 bp->req_duplex = DUPLEX_HALF;
1492 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1493 bp->req_line_speed = SPEED_1000;
1494 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1495 bp->req_line_speed = SPEED_2500;
1499 static void
1500 bnx2_set_default_link(struct bnx2 *bp)
1502 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1503 return bnx2_set_default_remote_link(bp);
1505 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1506 bp->req_line_speed = 0;
1507 if (bp->phy_flags & PHY_SERDES_FLAG) {
1508 u32 reg;
1510 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1512 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1513 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1514 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1515 bp->autoneg = 0;
1516 bp->req_line_speed = bp->line_speed = SPEED_1000;
1517 bp->req_duplex = DUPLEX_FULL;
1519 } else
1520 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1523 static void
1524 bnx2_send_heart_beat(struct bnx2 *bp)
1526 u32 msg;
1527 u32 addr;
1529 spin_lock(&bp->indirect_lock);
1530 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1531 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1532 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1533 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1534 spin_unlock(&bp->indirect_lock);
1537 static void
1538 bnx2_remote_phy_event(struct bnx2 *bp)
1540 u32 msg;
1541 u8 link_up = bp->link_up;
1542 u8 old_port;
1544 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1546 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1547 bnx2_send_heart_beat(bp);
1549 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1551 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1552 bp->link_up = 0;
1553 else {
1554 u32 speed;
1556 bp->link_up = 1;
1557 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1558 bp->duplex = DUPLEX_FULL;
1559 switch (speed) {
1560 case BNX2_LINK_STATUS_10HALF:
1561 bp->duplex = DUPLEX_HALF;
1562 case BNX2_LINK_STATUS_10FULL:
1563 bp->line_speed = SPEED_10;
1564 break;
1565 case BNX2_LINK_STATUS_100HALF:
1566 bp->duplex = DUPLEX_HALF;
1567 case BNX2_LINK_STATUS_100BASE_T4:
1568 case BNX2_LINK_STATUS_100FULL:
1569 bp->line_speed = SPEED_100;
1570 break;
1571 case BNX2_LINK_STATUS_1000HALF:
1572 bp->duplex = DUPLEX_HALF;
1573 case BNX2_LINK_STATUS_1000FULL:
1574 bp->line_speed = SPEED_1000;
1575 break;
1576 case BNX2_LINK_STATUS_2500HALF:
1577 bp->duplex = DUPLEX_HALF;
1578 case BNX2_LINK_STATUS_2500FULL:
1579 bp->line_speed = SPEED_2500;
1580 break;
1581 default:
1582 bp->line_speed = 0;
1583 break;
1586 spin_lock(&bp->phy_lock);
1587 bp->flow_ctrl = 0;
1588 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1589 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1590 if (bp->duplex == DUPLEX_FULL)
1591 bp->flow_ctrl = bp->req_flow_ctrl;
1592 } else {
1593 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1594 bp->flow_ctrl |= FLOW_CTRL_TX;
1595 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1596 bp->flow_ctrl |= FLOW_CTRL_RX;
1599 old_port = bp->phy_port;
1600 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1601 bp->phy_port = PORT_FIBRE;
1602 else
1603 bp->phy_port = PORT_TP;
1605 if (old_port != bp->phy_port)
1606 bnx2_set_default_link(bp);
1608 spin_unlock(&bp->phy_lock);
1610 if (bp->link_up != link_up)
1611 bnx2_report_link(bp);
1613 bnx2_set_mac_link(bp);
1616 static int
1617 bnx2_set_remote_link(struct bnx2 *bp)
1619 u32 evt_code;
1621 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1622 switch (evt_code) {
1623 case BNX2_FW_EVT_CODE_LINK_EVENT:
1624 bnx2_remote_phy_event(bp);
1625 break;
1626 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1627 default:
1628 bnx2_send_heart_beat(bp);
1629 break;
1631 return 0;
1634 static int
1635 bnx2_setup_copper_phy(struct bnx2 *bp)
1637 u32 bmcr;
1638 u32 new_bmcr;
1640 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1642 if (bp->autoneg & AUTONEG_SPEED) {
1643 u32 adv_reg, adv1000_reg;
1644 u32 new_adv_reg = 0;
1645 u32 new_adv1000_reg = 0;
1647 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1648 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1649 ADVERTISE_PAUSE_ASYM);
1651 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1652 adv1000_reg &= PHY_ALL_1000_SPEED;
1654 if (bp->advertising & ADVERTISED_10baseT_Half)
1655 new_adv_reg |= ADVERTISE_10HALF;
1656 if (bp->advertising & ADVERTISED_10baseT_Full)
1657 new_adv_reg |= ADVERTISE_10FULL;
1658 if (bp->advertising & ADVERTISED_100baseT_Half)
1659 new_adv_reg |= ADVERTISE_100HALF;
1660 if (bp->advertising & ADVERTISED_100baseT_Full)
1661 new_adv_reg |= ADVERTISE_100FULL;
1662 if (bp->advertising & ADVERTISED_1000baseT_Full)
1663 new_adv1000_reg |= ADVERTISE_1000FULL;
1665 new_adv_reg |= ADVERTISE_CSMA;
1667 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1669 if ((adv1000_reg != new_adv1000_reg) ||
1670 (adv_reg != new_adv_reg) ||
1671 ((bmcr & BMCR_ANENABLE) == 0)) {
1673 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1674 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1675 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1676 BMCR_ANENABLE);
1678 else if (bp->link_up) {
1679 /* Flow ctrl may have changed from auto to forced */
1680 /* or vice-versa. */
1682 bnx2_resolve_flow_ctrl(bp);
1683 bnx2_set_mac_link(bp);
1685 return 0;
1688 new_bmcr = 0;
1689 if (bp->req_line_speed == SPEED_100) {
1690 new_bmcr |= BMCR_SPEED100;
1692 if (bp->req_duplex == DUPLEX_FULL) {
1693 new_bmcr |= BMCR_FULLDPLX;
1695 if (new_bmcr != bmcr) {
1696 u32 bmsr;
1698 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1699 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1701 if (bmsr & BMSR_LSTATUS) {
1702 /* Force link down */
1703 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1704 spin_unlock_bh(&bp->phy_lock);
1705 msleep(50);
1706 spin_lock_bh(&bp->phy_lock);
1708 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1709 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1712 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1714 /* Normally, the new speed is setup after the link has
1715 * gone down and up again. In some cases, link will not go
1716 * down so we need to set up the new speed here.
1718 if (bmsr & BMSR_LSTATUS) {
1719 bp->line_speed = bp->req_line_speed;
1720 bp->duplex = bp->req_duplex;
1721 bnx2_resolve_flow_ctrl(bp);
1722 bnx2_set_mac_link(bp);
1724 } else {
1725 bnx2_resolve_flow_ctrl(bp);
1726 bnx2_set_mac_link(bp);
1728 return 0;
1731 static int
1732 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1734 if (bp->loopback == MAC_LOOPBACK)
1735 return 0;
1737 if (bp->phy_flags & PHY_SERDES_FLAG) {
1738 return (bnx2_setup_serdes_phy(bp, port));
1740 else {
1741 return (bnx2_setup_copper_phy(bp));
1745 static int
1746 bnx2_init_5709s_phy(struct bnx2 *bp)
1748 u32 val;
1750 bp->mii_bmcr = MII_BMCR + 0x10;
1751 bp->mii_bmsr = MII_BMSR + 0x10;
1752 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1753 bp->mii_adv = MII_ADVERTISE + 0x10;
1754 bp->mii_lpa = MII_LPA + 0x10;
1755 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1757 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1758 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1760 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1761 bnx2_reset_phy(bp);
1763 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1765 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1766 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1767 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1768 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1770 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1771 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1772 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1773 val |= BCM5708S_UP1_2G5;
1774 else
1775 val &= ~BCM5708S_UP1_2G5;
1776 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1778 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1779 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1780 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1781 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1783 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1785 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1786 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1787 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1789 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1791 return 0;
1794 static int
1795 bnx2_init_5708s_phy(struct bnx2 *bp)
1797 u32 val;
1799 bnx2_reset_phy(bp);
1801 bp->mii_up1 = BCM5708S_UP1;
1803 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1804 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1805 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1807 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1808 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1809 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1811 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1812 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1813 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1815 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1816 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1817 val |= BCM5708S_UP1_2G5;
1818 bnx2_write_phy(bp, BCM5708S_UP1, val);
1821 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1822 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1823 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1824 /* increase tx signal amplitude */
1825 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1826 BCM5708S_BLK_ADDR_TX_MISC);
1827 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1828 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1829 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1830 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1833 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1834 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1836 if (val) {
1837 u32 is_backplane;
1839 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1840 BNX2_SHARED_HW_CFG_CONFIG);
1841 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1842 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1843 BCM5708S_BLK_ADDR_TX_MISC);
1844 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1845 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1846 BCM5708S_BLK_ADDR_DIG);
1849 return 0;
1852 static int
1853 bnx2_init_5706s_phy(struct bnx2 *bp)
1855 bnx2_reset_phy(bp);
1857 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1859 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1860 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1862 if (bp->dev->mtu > 1500) {
1863 u32 val;
1865 /* Set extended packet length bit */
1866 bnx2_write_phy(bp, 0x18, 0x7);
1867 bnx2_read_phy(bp, 0x18, &val);
1868 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1870 bnx2_write_phy(bp, 0x1c, 0x6c00);
1871 bnx2_read_phy(bp, 0x1c, &val);
1872 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1874 else {
1875 u32 val;
1877 bnx2_write_phy(bp, 0x18, 0x7);
1878 bnx2_read_phy(bp, 0x18, &val);
1879 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1881 bnx2_write_phy(bp, 0x1c, 0x6c00);
1882 bnx2_read_phy(bp, 0x1c, &val);
1883 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1886 return 0;
1889 static int
1890 bnx2_init_copper_phy(struct bnx2 *bp)
1892 u32 val;
1894 bnx2_reset_phy(bp);
1896 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1897 bnx2_write_phy(bp, 0x18, 0x0c00);
1898 bnx2_write_phy(bp, 0x17, 0x000a);
1899 bnx2_write_phy(bp, 0x15, 0x310b);
1900 bnx2_write_phy(bp, 0x17, 0x201f);
1901 bnx2_write_phy(bp, 0x15, 0x9506);
1902 bnx2_write_phy(bp, 0x17, 0x401f);
1903 bnx2_write_phy(bp, 0x15, 0x14e2);
1904 bnx2_write_phy(bp, 0x18, 0x0400);
1907 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1908 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1909 MII_BNX2_DSP_EXPAND_REG | 0x8);
1910 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1911 val &= ~(1 << 8);
1912 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1915 if (bp->dev->mtu > 1500) {
1916 /* Set extended packet length bit */
1917 bnx2_write_phy(bp, 0x18, 0x7);
1918 bnx2_read_phy(bp, 0x18, &val);
1919 bnx2_write_phy(bp, 0x18, val | 0x4000);
1921 bnx2_read_phy(bp, 0x10, &val);
1922 bnx2_write_phy(bp, 0x10, val | 0x1);
1924 else {
1925 bnx2_write_phy(bp, 0x18, 0x7);
1926 bnx2_read_phy(bp, 0x18, &val);
1927 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1929 bnx2_read_phy(bp, 0x10, &val);
1930 bnx2_write_phy(bp, 0x10, val & ~0x1);
1933 /* ethernet@wirespeed */
1934 bnx2_write_phy(bp, 0x18, 0x7007);
1935 bnx2_read_phy(bp, 0x18, &val);
1936 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1937 return 0;
1941 static int
1942 bnx2_init_phy(struct bnx2 *bp)
1944 u32 val;
1945 int rc = 0;
1947 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1948 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1950 bp->mii_bmcr = MII_BMCR;
1951 bp->mii_bmsr = MII_BMSR;
1952 bp->mii_bmsr1 = MII_BMSR;
1953 bp->mii_adv = MII_ADVERTISE;
1954 bp->mii_lpa = MII_LPA;
1956 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1958 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1959 goto setup_phy;
1961 bnx2_read_phy(bp, MII_PHYSID1, &val);
1962 bp->phy_id = val << 16;
1963 bnx2_read_phy(bp, MII_PHYSID2, &val);
1964 bp->phy_id |= val & 0xffff;
1966 if (bp->phy_flags & PHY_SERDES_FLAG) {
1967 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1968 rc = bnx2_init_5706s_phy(bp);
1969 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1970 rc = bnx2_init_5708s_phy(bp);
1971 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1972 rc = bnx2_init_5709s_phy(bp);
1974 else {
1975 rc = bnx2_init_copper_phy(bp);
1978 setup_phy:
1979 if (!rc)
1980 rc = bnx2_setup_phy(bp, bp->phy_port);
1982 return rc;
1985 static int
1986 bnx2_set_mac_loopback(struct bnx2 *bp)
1988 u32 mac_mode;
1990 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1991 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1992 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1993 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1994 bp->link_up = 1;
1995 return 0;
1998 static int bnx2_test_link(struct bnx2 *);
2000 static int
2001 bnx2_set_phy_loopback(struct bnx2 *bp)
2003 u32 mac_mode;
2004 int rc, i;
2006 spin_lock_bh(&bp->phy_lock);
2007 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2008 BMCR_SPEED1000);
2009 spin_unlock_bh(&bp->phy_lock);
2010 if (rc)
2011 return rc;
2013 for (i = 0; i < 10; i++) {
2014 if (bnx2_test_link(bp) == 0)
2015 break;
2016 msleep(100);
2019 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2020 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2021 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2022 BNX2_EMAC_MODE_25G_MODE);
2024 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2025 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2026 bp->link_up = 1;
2027 return 0;
2030 static int
2031 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2033 int i;
2034 u32 val;
2036 bp->fw_wr_seq++;
2037 msg_data |= bp->fw_wr_seq;
2039 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2041 /* wait for an acknowledgement. */
2042 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2043 msleep(10);
2045 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2047 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2048 break;
2050 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2051 return 0;
2053 /* If we timed out, inform the firmware that this is the case. */
2054 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2055 if (!silent)
2056 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2057 "%x\n", msg_data);
2059 msg_data &= ~BNX2_DRV_MSG_CODE;
2060 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2062 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2064 return -EBUSY;
2067 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2068 return -EIO;
2070 return 0;
2073 static int
2074 bnx2_init_5709_context(struct bnx2 *bp)
2076 int i, ret = 0;
2077 u32 val;
2079 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2080 val |= (BCM_PAGE_BITS - 8) << 16;
2081 REG_WR(bp, BNX2_CTX_COMMAND, val);
2082 for (i = 0; i < 10; i++) {
2083 val = REG_RD(bp, BNX2_CTX_COMMAND);
2084 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2085 break;
2086 udelay(2);
2088 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2089 return -EBUSY;
2091 for (i = 0; i < bp->ctx_pages; i++) {
2092 int j;
2094 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2095 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2096 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2097 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2098 (u64) bp->ctx_blk_mapping[i] >> 32);
2099 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2100 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2101 for (j = 0; j < 10; j++) {
2103 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2104 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2105 break;
2106 udelay(5);
2108 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2109 ret = -EBUSY;
2110 break;
2113 return ret;
2116 static void
2117 bnx2_init_context(struct bnx2 *bp)
2119 u32 vcid;
2121 vcid = 96;
2122 while (vcid) {
2123 u32 vcid_addr, pcid_addr, offset;
2124 int i;
2126 vcid--;
2128 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2129 u32 new_vcid;
2131 vcid_addr = GET_PCID_ADDR(vcid);
2132 if (vcid & 0x8) {
2133 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2135 else {
2136 new_vcid = vcid;
2138 pcid_addr = GET_PCID_ADDR(new_vcid);
2140 else {
2141 vcid_addr = GET_CID_ADDR(vcid);
2142 pcid_addr = vcid_addr;
2145 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2146 vcid_addr += (i << PHY_CTX_SHIFT);
2147 pcid_addr += (i << PHY_CTX_SHIFT);
2149 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2150 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2152 /* Zero out the context. */
2153 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2154 CTX_WR(bp, vcid_addr, offset, 0);
2159 static int
2160 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2162 u16 *good_mbuf;
2163 u32 good_mbuf_cnt;
2164 u32 val;
2166 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2167 if (good_mbuf == NULL) {
2168 printk(KERN_ERR PFX "Failed to allocate memory in "
2169 "bnx2_alloc_bad_rbuf\n");
2170 return -ENOMEM;
2173 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2174 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2176 good_mbuf_cnt = 0;
2178 /* Allocate a bunch of mbufs and save the good ones in an array. */
2179 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2180 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2181 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2183 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2185 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2187 /* The addresses with Bit 9 set are bad memory blocks. */
2188 if (!(val & (1 << 9))) {
2189 good_mbuf[good_mbuf_cnt] = (u16) val;
2190 good_mbuf_cnt++;
2193 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2196 /* Free the good ones back to the mbuf pool thus discarding
2197 * all the bad ones. */
2198 while (good_mbuf_cnt) {
2199 good_mbuf_cnt--;
2201 val = good_mbuf[good_mbuf_cnt];
2202 val = (val << 9) | val | 1;
2204 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2206 kfree(good_mbuf);
2207 return 0;
2210 static void
2211 bnx2_set_mac_addr(struct bnx2 *bp)
2213 u32 val;
2214 u8 *mac_addr = bp->dev->dev_addr;
2216 val = (mac_addr[0] << 8) | mac_addr[1];
2218 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2220 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2221 (mac_addr[4] << 8) | mac_addr[5];
2223 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2226 static inline int
2227 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2229 dma_addr_t mapping;
2230 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2231 struct rx_bd *rxbd =
2232 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2233 struct page *page = alloc_page(GFP_ATOMIC);
2235 if (!page)
2236 return -ENOMEM;
2237 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2238 PCI_DMA_FROMDEVICE);
2239 rx_pg->page = page;
2240 pci_unmap_addr_set(rx_pg, mapping, mapping);
2241 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2242 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2243 return 0;
2246 static void
2247 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2249 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2250 struct page *page = rx_pg->page;
2252 if (!page)
2253 return;
2255 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2256 PCI_DMA_FROMDEVICE);
2258 __free_page(page);
2259 rx_pg->page = NULL;
2262 static inline int
2263 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2265 struct sk_buff *skb;
2266 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2267 dma_addr_t mapping;
2268 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2269 unsigned long align;
2271 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2272 if (skb == NULL) {
2273 return -ENOMEM;
2276 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2277 skb_reserve(skb, BNX2_RX_ALIGN - align);
2279 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2280 PCI_DMA_FROMDEVICE);
2282 rx_buf->skb = skb;
2283 pci_unmap_addr_set(rx_buf, mapping, mapping);
2285 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2286 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2288 bp->rx_prod_bseq += bp->rx_buf_use_size;
2290 return 0;
2293 static int
2294 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2296 struct status_block *sblk = bp->status_blk;
2297 u32 new_link_state, old_link_state;
2298 int is_set = 1;
2300 new_link_state = sblk->status_attn_bits & event;
2301 old_link_state = sblk->status_attn_bits_ack & event;
2302 if (new_link_state != old_link_state) {
2303 if (new_link_state)
2304 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2305 else
2306 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2307 } else
2308 is_set = 0;
2310 return is_set;
2313 static void
2314 bnx2_phy_int(struct bnx2 *bp)
2316 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2317 spin_lock(&bp->phy_lock);
2318 bnx2_set_link(bp);
2319 spin_unlock(&bp->phy_lock);
2321 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2322 bnx2_set_remote_link(bp);
2326 static void
2327 bnx2_tx_int(struct bnx2 *bp)
2329 struct status_block *sblk = bp->status_blk;
2330 u16 hw_cons, sw_cons, sw_ring_cons;
2331 int tx_free_bd = 0;
2333 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2334 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2335 hw_cons++;
2337 sw_cons = bp->tx_cons;
2339 while (sw_cons != hw_cons) {
2340 struct sw_bd *tx_buf;
2341 struct sk_buff *skb;
2342 int i, last;
2344 sw_ring_cons = TX_RING_IDX(sw_cons);
2346 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2347 skb = tx_buf->skb;
2349 /* partial BD completions possible with TSO packets */
2350 if (skb_is_gso(skb)) {
2351 u16 last_idx, last_ring_idx;
2353 last_idx = sw_cons +
2354 skb_shinfo(skb)->nr_frags + 1;
2355 last_ring_idx = sw_ring_cons +
2356 skb_shinfo(skb)->nr_frags + 1;
2357 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2358 last_idx++;
2360 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2361 break;
2365 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2366 skb_headlen(skb), PCI_DMA_TODEVICE);
2368 tx_buf->skb = NULL;
2369 last = skb_shinfo(skb)->nr_frags;
2371 for (i = 0; i < last; i++) {
2372 sw_cons = NEXT_TX_BD(sw_cons);
2374 pci_unmap_page(bp->pdev,
2375 pci_unmap_addr(
2376 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2377 mapping),
2378 skb_shinfo(skb)->frags[i].size,
2379 PCI_DMA_TODEVICE);
2382 sw_cons = NEXT_TX_BD(sw_cons);
2384 tx_free_bd += last + 1;
2386 dev_kfree_skb(skb);
2388 hw_cons = bp->hw_tx_cons =
2389 sblk->status_tx_quick_consumer_index0;
2391 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2392 hw_cons++;
2396 bp->tx_cons = sw_cons;
2397 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2398 * before checking for netif_queue_stopped(). Without the
2399 * memory barrier, there is a small possibility that bnx2_start_xmit()
2400 * will miss it and cause the queue to be stopped forever.
2402 smp_mb();
2404 if (unlikely(netif_queue_stopped(bp->dev)) &&
2405 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2406 netif_tx_lock(bp->dev);
2407 if ((netif_queue_stopped(bp->dev)) &&
2408 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2409 netif_wake_queue(bp->dev);
2410 netif_tx_unlock(bp->dev);
2414 static void
2415 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct sk_buff *skb, int count)
2417 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2418 struct rx_bd *cons_bd, *prod_bd;
2419 dma_addr_t mapping;
2420 int i;
2421 u16 hw_prod = bp->rx_pg_prod, prod;
2422 u16 cons = bp->rx_pg_cons;
2424 for (i = 0; i < count; i++) {
2425 prod = RX_PG_RING_IDX(hw_prod);
2427 prod_rx_pg = &bp->rx_pg_ring[prod];
2428 cons_rx_pg = &bp->rx_pg_ring[cons];
2429 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2430 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2432 if (i == 0 && skb) {
2433 struct page *page;
2434 struct skb_shared_info *shinfo;
2436 shinfo = skb_shinfo(skb);
2437 shinfo->nr_frags--;
2438 page = shinfo->frags[shinfo->nr_frags].page;
2439 shinfo->frags[shinfo->nr_frags].page = NULL;
2440 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2441 PCI_DMA_FROMDEVICE);
2442 cons_rx_pg->page = page;
2443 pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2444 dev_kfree_skb(skb);
2446 if (prod != cons) {
2447 prod_rx_pg->page = cons_rx_pg->page;
2448 cons_rx_pg->page = NULL;
2449 pci_unmap_addr_set(prod_rx_pg, mapping,
2450 pci_unmap_addr(cons_rx_pg, mapping));
2452 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2453 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2456 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2457 hw_prod = NEXT_RX_BD(hw_prod);
2459 bp->rx_pg_prod = hw_prod;
2460 bp->rx_pg_cons = cons;
2463 static inline void
2464 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2465 u16 cons, u16 prod)
2467 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2468 struct rx_bd *cons_bd, *prod_bd;
2470 cons_rx_buf = &bp->rx_buf_ring[cons];
2471 prod_rx_buf = &bp->rx_buf_ring[prod];
2473 pci_dma_sync_single_for_device(bp->pdev,
2474 pci_unmap_addr(cons_rx_buf, mapping),
2475 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2477 bp->rx_prod_bseq += bp->rx_buf_use_size;
2479 prod_rx_buf->skb = skb;
2481 if (cons == prod)
2482 return;
2484 pci_unmap_addr_set(prod_rx_buf, mapping,
2485 pci_unmap_addr(cons_rx_buf, mapping));
2487 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2488 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2489 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2490 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2493 static int
2494 bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
2495 unsigned int hdr_len, dma_addr_t dma_addr, u32 ring_idx)
2497 int err;
2498 u16 prod = ring_idx & 0xffff;
2500 err = bnx2_alloc_rx_skb(bp, prod);
2501 if (unlikely(err)) {
2502 bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod);
2503 if (hdr_len) {
2504 unsigned int raw_len = len + 4;
2505 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2507 bnx2_reuse_rx_skb_pages(bp, NULL, pages);
2509 return err;
2512 skb_reserve(skb, bp->rx_offset);
2513 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2514 PCI_DMA_FROMDEVICE);
2516 if (hdr_len == 0) {
2517 skb_put(skb, len);
2518 return 0;
2519 } else {
2520 unsigned int i, frag_len, frag_size, pages;
2521 struct sw_pg *rx_pg;
2522 u16 pg_cons = bp->rx_pg_cons;
2523 u16 pg_prod = bp->rx_pg_prod;
2525 frag_size = len + 4 - hdr_len;
2526 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2527 skb_put(skb, hdr_len);
2529 for (i = 0; i < pages; i++) {
2530 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2531 if (unlikely(frag_len <= 4)) {
2532 unsigned int tail = 4 - frag_len;
2534 bp->rx_pg_cons = pg_cons;
2535 bp->rx_pg_prod = pg_prod;
2536 bnx2_reuse_rx_skb_pages(bp, NULL, pages - i);
2537 skb->len -= tail;
2538 if (i == 0) {
2539 skb->tail -= tail;
2540 } else {
2541 skb_frag_t *frag =
2542 &skb_shinfo(skb)->frags[i - 1];
2543 frag->size -= tail;
2544 skb->data_len -= tail;
2545 skb->truesize -= tail;
2547 return 0;
2549 rx_pg = &bp->rx_pg_ring[pg_cons];
2551 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2552 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2554 if (i == pages - 1)
2555 frag_len -= 4;
2557 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2558 rx_pg->page = NULL;
2560 err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2561 if (unlikely(err)) {
2562 bp->rx_pg_cons = pg_cons;
2563 bp->rx_pg_prod = pg_prod;
2564 bnx2_reuse_rx_skb_pages(bp, skb, pages - i);
2565 return err;
2568 frag_size -= frag_len;
2569 skb->data_len += frag_len;
2570 skb->truesize += frag_len;
2571 skb->len += frag_len;
2573 pg_prod = NEXT_RX_BD(pg_prod);
2574 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2576 bp->rx_pg_prod = pg_prod;
2577 bp->rx_pg_cons = pg_cons;
2579 return 0;
2582 static inline u16
2583 bnx2_get_hw_rx_cons(struct bnx2 *bp)
2585 u16 cons = bp->status_blk->status_rx_quick_consumer_index0;
2587 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2588 cons++;
2589 return cons;
2592 static int
2593 bnx2_rx_int(struct bnx2 *bp, int budget)
2595 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2596 struct l2_fhdr *rx_hdr;
2597 int rx_pkt = 0, pg_ring_used = 0;
2599 hw_cons = bnx2_get_hw_rx_cons(bp);
2600 sw_cons = bp->rx_cons;
2601 sw_prod = bp->rx_prod;
2603 /* Memory barrier necessary as speculative reads of the rx
2604 * buffer can be ahead of the index in the status block
2606 rmb();
2607 while (sw_cons != hw_cons) {
2608 unsigned int len, hdr_len;
2609 u32 status;
2610 struct sw_bd *rx_buf;
2611 struct sk_buff *skb;
2612 dma_addr_t dma_addr;
2614 sw_ring_cons = RX_RING_IDX(sw_cons);
2615 sw_ring_prod = RX_RING_IDX(sw_prod);
2617 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2618 skb = rx_buf->skb;
2620 rx_buf->skb = NULL;
2622 dma_addr = pci_unmap_addr(rx_buf, mapping);
2624 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2625 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2627 rx_hdr = (struct l2_fhdr *) skb->data;
2628 len = rx_hdr->l2_fhdr_pkt_len;
2630 if ((status = rx_hdr->l2_fhdr_status) &
2631 (L2_FHDR_ERRORS_BAD_CRC |
2632 L2_FHDR_ERRORS_PHY_DECODE |
2633 L2_FHDR_ERRORS_ALIGNMENT |
2634 L2_FHDR_ERRORS_TOO_SHORT |
2635 L2_FHDR_ERRORS_GIANT_FRAME)) {
2637 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod);
2638 goto next_rx;
2640 hdr_len = 0;
2641 if (status & L2_FHDR_STATUS_SPLIT) {
2642 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2643 pg_ring_used = 1;
2644 } else if (len > bp->rx_jumbo_thresh) {
2645 hdr_len = bp->rx_jumbo_thresh;
2646 pg_ring_used = 1;
2649 len -= 4;
2651 if (len <= bp->rx_copy_thresh) {
2652 struct sk_buff *new_skb;
2654 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2655 if (new_skb == NULL) {
2656 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons,
2657 sw_ring_prod);
2658 goto next_rx;
2661 /* aligned copy */
2662 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2663 new_skb->data, len + 2);
2664 skb_reserve(new_skb, 2);
2665 skb_put(new_skb, len);
2667 bnx2_reuse_rx_skb(bp, skb,
2668 sw_ring_cons, sw_ring_prod);
2670 skb = new_skb;
2671 } else if (unlikely(bnx2_rx_skb(bp, skb, len, hdr_len, dma_addr,
2672 (sw_ring_cons << 16) | sw_ring_prod)))
2673 goto next_rx;
2675 skb->protocol = eth_type_trans(skb, bp->dev);
2677 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2678 (ntohs(skb->protocol) != 0x8100)) {
2680 dev_kfree_skb(skb);
2681 goto next_rx;
2685 skb->ip_summed = CHECKSUM_NONE;
2686 if (bp->rx_csum &&
2687 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2688 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2690 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2691 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2692 skb->ip_summed = CHECKSUM_UNNECESSARY;
2695 #ifdef BCM_VLAN
2696 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2697 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2698 rx_hdr->l2_fhdr_vlan_tag);
2700 else
2701 #endif
2702 netif_receive_skb(skb);
2704 bp->dev->last_rx = jiffies;
2705 rx_pkt++;
2707 next_rx:
2708 sw_cons = NEXT_RX_BD(sw_cons);
2709 sw_prod = NEXT_RX_BD(sw_prod);
2711 if ((rx_pkt == budget))
2712 break;
2714 /* Refresh hw_cons to see if there is new work */
2715 if (sw_cons == hw_cons) {
2716 hw_cons = bnx2_get_hw_rx_cons(bp);
2717 rmb();
2720 bp->rx_cons = sw_cons;
2721 bp->rx_prod = sw_prod;
2723 if (pg_ring_used)
2724 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2725 bp->rx_pg_prod);
2727 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2729 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2731 mmiowb();
2733 return rx_pkt;
2737 /* MSI ISR - The only difference between this and the INTx ISR
2738 * is that the MSI interrupt is always serviced.
2740 static irqreturn_t
2741 bnx2_msi(int irq, void *dev_instance)
2743 struct net_device *dev = dev_instance;
2744 struct bnx2 *bp = netdev_priv(dev);
2746 prefetch(bp->status_blk);
2747 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2748 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2749 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2751 /* Return here if interrupt is disabled. */
2752 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2753 return IRQ_HANDLED;
2755 netif_rx_schedule(dev, &bp->napi);
2757 return IRQ_HANDLED;
2760 static irqreturn_t
2761 bnx2_msi_1shot(int irq, void *dev_instance)
2763 struct net_device *dev = dev_instance;
2764 struct bnx2 *bp = netdev_priv(dev);
2766 prefetch(bp->status_blk);
2768 /* Return here if interrupt is disabled. */
2769 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2770 return IRQ_HANDLED;
2772 netif_rx_schedule(dev, &bp->napi);
2774 return IRQ_HANDLED;
2777 static irqreturn_t
2778 bnx2_interrupt(int irq, void *dev_instance)
2780 struct net_device *dev = dev_instance;
2781 struct bnx2 *bp = netdev_priv(dev);
2782 struct status_block *sblk = bp->status_blk;
2784 /* When using INTx, it is possible for the interrupt to arrive
2785 * at the CPU before the status block posted prior to the
2786 * interrupt. Reading a register will flush the status block.
2787 * When using MSI, the MSI message will always complete after
2788 * the status block write.
2790 if ((sblk->status_idx == bp->last_status_idx) &&
2791 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2792 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2793 return IRQ_NONE;
2795 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2796 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2797 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2799 /* Read back to deassert IRQ immediately to avoid too many
2800 * spurious interrupts.
2802 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2804 /* Return here if interrupt is shared and is disabled. */
2805 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2806 return IRQ_HANDLED;
2808 if (netif_rx_schedule_prep(dev, &bp->napi)) {
2809 bp->last_status_idx = sblk->status_idx;
2810 __netif_rx_schedule(dev, &bp->napi);
2813 return IRQ_HANDLED;
2816 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2817 STATUS_ATTN_BITS_TIMER_ABORT)
2819 static inline int
2820 bnx2_has_work(struct bnx2 *bp)
2822 struct status_block *sblk = bp->status_blk;
2824 if ((bnx2_get_hw_rx_cons(bp) != bp->rx_cons) ||
2825 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2826 return 1;
2828 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2829 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2830 return 1;
2832 return 0;
2835 static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget)
2837 struct status_block *sblk = bp->status_blk;
2838 u32 status_attn_bits = sblk->status_attn_bits;
2839 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2841 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2842 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2844 bnx2_phy_int(bp);
2846 /* This is needed to take care of transient status
2847 * during link changes.
2849 REG_WR(bp, BNX2_HC_COMMAND,
2850 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2851 REG_RD(bp, BNX2_HC_COMMAND);
2854 if (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2855 bnx2_tx_int(bp);
2857 if (bnx2_get_hw_rx_cons(bp) != bp->rx_cons)
2858 work_done += bnx2_rx_int(bp, budget - work_done);
2860 return work_done;
2863 static int bnx2_poll(struct napi_struct *napi, int budget)
2865 struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2866 int work_done = 0;
2867 struct status_block *sblk = bp->status_blk;
2869 while (1) {
2870 work_done = bnx2_poll_work(bp, work_done, budget);
2872 if (unlikely(work_done >= budget))
2873 break;
2875 /* bp->last_status_idx is used below to tell the hw how
2876 * much work has been processed, so we must read it before
2877 * checking for more work.
2879 bp->last_status_idx = sblk->status_idx;
2880 rmb();
2881 if (likely(!bnx2_has_work(bp))) {
2882 netif_rx_complete(bp->dev, napi);
2883 if (likely(bp->flags & USING_MSI_FLAG)) {
2884 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2885 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2886 bp->last_status_idx);
2887 break;
2889 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2890 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2891 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2892 bp->last_status_idx);
2894 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2895 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2896 bp->last_status_idx);
2897 break;
2901 return work_done;
2904 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2905 * from set_multicast.
2907 static void
2908 bnx2_set_rx_mode(struct net_device *dev)
2910 struct bnx2 *bp = netdev_priv(dev);
2911 u32 rx_mode, sort_mode;
2912 int i;
2914 spin_lock_bh(&bp->phy_lock);
2916 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2917 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2918 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2919 #ifdef BCM_VLAN
2920 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2921 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2922 #else
2923 if (!(bp->flags & ASF_ENABLE_FLAG))
2924 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2925 #endif
2926 if (dev->flags & IFF_PROMISC) {
2927 /* Promiscuous mode. */
2928 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2929 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2930 BNX2_RPM_SORT_USER0_PROM_VLAN;
2932 else if (dev->flags & IFF_ALLMULTI) {
2933 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2934 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2935 0xffffffff);
2937 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2939 else {
2940 /* Accept one or more multicast(s). */
2941 struct dev_mc_list *mclist;
2942 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2943 u32 regidx;
2944 u32 bit;
2945 u32 crc;
2947 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2949 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2950 i++, mclist = mclist->next) {
2952 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2953 bit = crc & 0xff;
2954 regidx = (bit & 0xe0) >> 5;
2955 bit &= 0x1f;
2956 mc_filter[regidx] |= (1 << bit);
2959 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2960 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2961 mc_filter[i]);
2964 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2967 if (rx_mode != bp->rx_mode) {
2968 bp->rx_mode = rx_mode;
2969 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2972 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2973 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2974 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2976 spin_unlock_bh(&bp->phy_lock);
2979 static void
2980 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2981 u32 rv2p_proc)
2983 int i;
2984 u32 val;
2987 for (i = 0; i < rv2p_code_len; i += 8) {
2988 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2989 rv2p_code++;
2990 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2991 rv2p_code++;
2993 if (rv2p_proc == RV2P_PROC1) {
2994 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2995 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2997 else {
2998 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2999 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3003 /* Reset the processor, un-stall is done later. */
3004 if (rv2p_proc == RV2P_PROC1) {
3005 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3007 else {
3008 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3012 static int
3013 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3015 u32 offset;
3016 u32 val;
3017 int rc;
3019 /* Halt the CPU. */
3020 val = REG_RD_IND(bp, cpu_reg->mode);
3021 val |= cpu_reg->mode_value_halt;
3022 REG_WR_IND(bp, cpu_reg->mode, val);
3023 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3025 /* Load the Text area. */
3026 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3027 if (fw->gz_text) {
3028 int j;
3030 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3031 fw->gz_text_len);
3032 if (rc < 0)
3033 return rc;
3035 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3036 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
3040 /* Load the Data area. */
3041 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3042 if (fw->data) {
3043 int j;
3045 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3046 REG_WR_IND(bp, offset, fw->data[j]);
3050 /* Load the SBSS area. */
3051 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3052 if (fw->sbss_len) {
3053 int j;
3055 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3056 REG_WR_IND(bp, offset, 0);
3060 /* Load the BSS area. */
3061 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3062 if (fw->bss_len) {
3063 int j;
3065 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3066 REG_WR_IND(bp, offset, 0);
3070 /* Load the Read-Only area. */
3071 offset = cpu_reg->spad_base +
3072 (fw->rodata_addr - cpu_reg->mips_view_base);
3073 if (fw->rodata) {
3074 int j;
3076 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3077 REG_WR_IND(bp, offset, fw->rodata[j]);
3081 /* Clear the pre-fetch instruction. */
3082 REG_WR_IND(bp, cpu_reg->inst, 0);
3083 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
3085 /* Start the CPU. */
3086 val = REG_RD_IND(bp, cpu_reg->mode);
3087 val &= ~cpu_reg->mode_value_halt;
3088 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3089 REG_WR_IND(bp, cpu_reg->mode, val);
3091 return 0;
3094 static int
3095 bnx2_init_cpus(struct bnx2 *bp)
3097 struct cpu_reg cpu_reg;
3098 struct fw_info *fw;
3099 int rc, rv2p_len;
3100 void *text, *rv2p;
3102 /* Initialize the RV2P processor. */
3103 text = vmalloc(FW_BUF_SIZE);
3104 if (!text)
3105 return -ENOMEM;
3106 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3107 rv2p = bnx2_xi_rv2p_proc1;
3108 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3109 } else {
3110 rv2p = bnx2_rv2p_proc1;
3111 rv2p_len = sizeof(bnx2_rv2p_proc1);
3113 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3114 if (rc < 0)
3115 goto init_cpu_err;
3117 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3119 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3120 rv2p = bnx2_xi_rv2p_proc2;
3121 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3122 } else {
3123 rv2p = bnx2_rv2p_proc2;
3124 rv2p_len = sizeof(bnx2_rv2p_proc2);
3126 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3127 if (rc < 0)
3128 goto init_cpu_err;
3130 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3132 /* Initialize the RX Processor. */
3133 cpu_reg.mode = BNX2_RXP_CPU_MODE;
3134 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3135 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3136 cpu_reg.state = BNX2_RXP_CPU_STATE;
3137 cpu_reg.state_value_clear = 0xffffff;
3138 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3139 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3140 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3141 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3142 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3143 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3144 cpu_reg.mips_view_base = 0x8000000;
3146 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3147 fw = &bnx2_rxp_fw_09;
3148 else
3149 fw = &bnx2_rxp_fw_06;
3151 fw->text = text;
3152 rc = load_cpu_fw(bp, &cpu_reg, fw);
3153 if (rc)
3154 goto init_cpu_err;
3156 /* Initialize the TX Processor. */
3157 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3158 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3159 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3160 cpu_reg.state = BNX2_TXP_CPU_STATE;
3161 cpu_reg.state_value_clear = 0xffffff;
3162 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3163 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3164 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3165 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3166 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3167 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3168 cpu_reg.mips_view_base = 0x8000000;
3170 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3171 fw = &bnx2_txp_fw_09;
3172 else
3173 fw = &bnx2_txp_fw_06;
3175 fw->text = text;
3176 rc = load_cpu_fw(bp, &cpu_reg, fw);
3177 if (rc)
3178 goto init_cpu_err;
3180 /* Initialize the TX Patch-up Processor. */
3181 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3182 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3183 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3184 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3185 cpu_reg.state_value_clear = 0xffffff;
3186 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3187 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3188 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3189 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3190 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3191 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3192 cpu_reg.mips_view_base = 0x8000000;
3194 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3195 fw = &bnx2_tpat_fw_09;
3196 else
3197 fw = &bnx2_tpat_fw_06;
3199 fw->text = text;
3200 rc = load_cpu_fw(bp, &cpu_reg, fw);
3201 if (rc)
3202 goto init_cpu_err;
3204 /* Initialize the Completion Processor. */
3205 cpu_reg.mode = BNX2_COM_CPU_MODE;
3206 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3207 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3208 cpu_reg.state = BNX2_COM_CPU_STATE;
3209 cpu_reg.state_value_clear = 0xffffff;
3210 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3211 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3212 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3213 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3214 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3215 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3216 cpu_reg.mips_view_base = 0x8000000;
3218 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3219 fw = &bnx2_com_fw_09;
3220 else
3221 fw = &bnx2_com_fw_06;
3223 fw->text = text;
3224 rc = load_cpu_fw(bp, &cpu_reg, fw);
3225 if (rc)
3226 goto init_cpu_err;
3228 /* Initialize the Command Processor. */
3229 cpu_reg.mode = BNX2_CP_CPU_MODE;
3230 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3231 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3232 cpu_reg.state = BNX2_CP_CPU_STATE;
3233 cpu_reg.state_value_clear = 0xffffff;
3234 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3235 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3236 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3237 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3238 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3239 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3240 cpu_reg.mips_view_base = 0x8000000;
3242 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3243 fw = &bnx2_cp_fw_09;
3244 else
3245 fw = &bnx2_cp_fw_06;
3247 fw->text = text;
3248 rc = load_cpu_fw(bp, &cpu_reg, fw);
3250 init_cpu_err:
3251 vfree(text);
3252 return rc;
3255 static int
3256 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3258 u16 pmcsr;
3260 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3262 switch (state) {
3263 case PCI_D0: {
3264 u32 val;
3266 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3267 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3268 PCI_PM_CTRL_PME_STATUS);
3270 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3271 /* delay required during transition out of D3hot */
3272 msleep(20);
3274 val = REG_RD(bp, BNX2_EMAC_MODE);
3275 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3276 val &= ~BNX2_EMAC_MODE_MPKT;
3277 REG_WR(bp, BNX2_EMAC_MODE, val);
3279 val = REG_RD(bp, BNX2_RPM_CONFIG);
3280 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3281 REG_WR(bp, BNX2_RPM_CONFIG, val);
3282 break;
3284 case PCI_D3hot: {
3285 int i;
3286 u32 val, wol_msg;
3288 if (bp->wol) {
3289 u32 advertising;
3290 u8 autoneg;
3292 autoneg = bp->autoneg;
3293 advertising = bp->advertising;
3295 if (bp->phy_port == PORT_TP) {
3296 bp->autoneg = AUTONEG_SPEED;
3297 bp->advertising = ADVERTISED_10baseT_Half |
3298 ADVERTISED_10baseT_Full |
3299 ADVERTISED_100baseT_Half |
3300 ADVERTISED_100baseT_Full |
3301 ADVERTISED_Autoneg;
3304 spin_lock_bh(&bp->phy_lock);
3305 bnx2_setup_phy(bp, bp->phy_port);
3306 spin_unlock_bh(&bp->phy_lock);
3308 bp->autoneg = autoneg;
3309 bp->advertising = advertising;
3311 bnx2_set_mac_addr(bp);
3313 val = REG_RD(bp, BNX2_EMAC_MODE);
3315 /* Enable port mode. */
3316 val &= ~BNX2_EMAC_MODE_PORT;
3317 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3318 BNX2_EMAC_MODE_ACPI_RCVD |
3319 BNX2_EMAC_MODE_MPKT;
3320 if (bp->phy_port == PORT_TP)
3321 val |= BNX2_EMAC_MODE_PORT_MII;
3322 else {
3323 val |= BNX2_EMAC_MODE_PORT_GMII;
3324 if (bp->line_speed == SPEED_2500)
3325 val |= BNX2_EMAC_MODE_25G_MODE;
3328 REG_WR(bp, BNX2_EMAC_MODE, val);
3330 /* receive all multicast */
3331 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3332 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3333 0xffffffff);
3335 REG_WR(bp, BNX2_EMAC_RX_MODE,
3336 BNX2_EMAC_RX_MODE_SORT_MODE);
3338 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3339 BNX2_RPM_SORT_USER0_MC_EN;
3340 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3341 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3342 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3343 BNX2_RPM_SORT_USER0_ENA);
3345 /* Need to enable EMAC and RPM for WOL. */
3346 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3347 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3348 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3349 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3351 val = REG_RD(bp, BNX2_RPM_CONFIG);
3352 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3353 REG_WR(bp, BNX2_RPM_CONFIG, val);
3355 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3357 else {
3358 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3361 if (!(bp->flags & NO_WOL_FLAG))
3362 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3364 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3365 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3366 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3368 if (bp->wol)
3369 pmcsr |= 3;
3371 else {
3372 pmcsr |= 3;
3374 if (bp->wol) {
3375 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3377 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3378 pmcsr);
3380 /* No more memory access after this point until
3381 * device is brought back to D0.
3383 udelay(50);
3384 break;
3386 default:
3387 return -EINVAL;
3389 return 0;
3392 static int
3393 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3395 u32 val;
3396 int j;
3398 /* Request access to the flash interface. */
3399 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3400 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3401 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3402 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3403 break;
3405 udelay(5);
3408 if (j >= NVRAM_TIMEOUT_COUNT)
3409 return -EBUSY;
3411 return 0;
3414 static int
3415 bnx2_release_nvram_lock(struct bnx2 *bp)
3417 int j;
3418 u32 val;
3420 /* Relinquish nvram interface. */
3421 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3423 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3424 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3425 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3426 break;
3428 udelay(5);
3431 if (j >= NVRAM_TIMEOUT_COUNT)
3432 return -EBUSY;
3434 return 0;
3438 static int
3439 bnx2_enable_nvram_write(struct bnx2 *bp)
3441 u32 val;
3443 val = REG_RD(bp, BNX2_MISC_CFG);
3444 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3446 if (bp->flash_info->flags & BNX2_NV_WREN) {
3447 int j;
3449 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3450 REG_WR(bp, BNX2_NVM_COMMAND,
3451 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3453 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3454 udelay(5);
3456 val = REG_RD(bp, BNX2_NVM_COMMAND);
3457 if (val & BNX2_NVM_COMMAND_DONE)
3458 break;
3461 if (j >= NVRAM_TIMEOUT_COUNT)
3462 return -EBUSY;
3464 return 0;
3467 static void
3468 bnx2_disable_nvram_write(struct bnx2 *bp)
3470 u32 val;
3472 val = REG_RD(bp, BNX2_MISC_CFG);
3473 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3477 static void
3478 bnx2_enable_nvram_access(struct bnx2 *bp)
3480 u32 val;
3482 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3483 /* Enable both bits, even on read. */
3484 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3485 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3488 static void
3489 bnx2_disable_nvram_access(struct bnx2 *bp)
3491 u32 val;
3493 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3494 /* Disable both bits, even after read. */
3495 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3496 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3497 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3500 static int
3501 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3503 u32 cmd;
3504 int j;
3506 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3507 /* Buffered flash, no erase needed */
3508 return 0;
3510 /* Build an erase command */
3511 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3512 BNX2_NVM_COMMAND_DOIT;
3514 /* Need to clear DONE bit separately. */
3515 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3517 /* Address of the NVRAM to read from. */
3518 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3520 /* Issue an erase command. */
3521 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3523 /* Wait for completion. */
3524 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3525 u32 val;
3527 udelay(5);
3529 val = REG_RD(bp, BNX2_NVM_COMMAND);
3530 if (val & BNX2_NVM_COMMAND_DONE)
3531 break;
3534 if (j >= NVRAM_TIMEOUT_COUNT)
3535 return -EBUSY;
3537 return 0;
3540 static int
3541 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3543 u32 cmd;
3544 int j;
3546 /* Build the command word. */
3547 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3549 /* Calculate an offset of a buffered flash, not needed for 5709. */
3550 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3551 offset = ((offset / bp->flash_info->page_size) <<
3552 bp->flash_info->page_bits) +
3553 (offset % bp->flash_info->page_size);
3556 /* Need to clear DONE bit separately. */
3557 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3559 /* Address of the NVRAM to read from. */
3560 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3562 /* Issue a read command. */
3563 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3565 /* Wait for completion. */
3566 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3567 u32 val;
3569 udelay(5);
3571 val = REG_RD(bp, BNX2_NVM_COMMAND);
3572 if (val & BNX2_NVM_COMMAND_DONE) {
3573 val = REG_RD(bp, BNX2_NVM_READ);
3575 val = be32_to_cpu(val);
3576 memcpy(ret_val, &val, 4);
3577 break;
3580 if (j >= NVRAM_TIMEOUT_COUNT)
3581 return -EBUSY;
3583 return 0;
3587 static int
3588 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3590 u32 cmd, val32;
3591 int j;
3593 /* Build the command word. */
3594 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3596 /* Calculate an offset of a buffered flash, not needed for 5709. */
3597 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3598 offset = ((offset / bp->flash_info->page_size) <<
3599 bp->flash_info->page_bits) +
3600 (offset % bp->flash_info->page_size);
3603 /* Need to clear DONE bit separately. */
3604 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3606 memcpy(&val32, val, 4);
3607 val32 = cpu_to_be32(val32);
3609 /* Write the data. */
3610 REG_WR(bp, BNX2_NVM_WRITE, val32);
3612 /* Address of the NVRAM to write to. */
3613 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3615 /* Issue the write command. */
3616 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3618 /* Wait for completion. */
3619 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3620 udelay(5);
3622 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3623 break;
3625 if (j >= NVRAM_TIMEOUT_COUNT)
3626 return -EBUSY;
3628 return 0;
3631 static int
3632 bnx2_init_nvram(struct bnx2 *bp)
3634 u32 val;
3635 int j, entry_count, rc = 0;
3636 struct flash_spec *flash;
3638 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3639 bp->flash_info = &flash_5709;
3640 goto get_flash_size;
3643 /* Determine the selected interface. */
3644 val = REG_RD(bp, BNX2_NVM_CFG1);
3646 entry_count = ARRAY_SIZE(flash_table);
3648 if (val & 0x40000000) {
3650 /* Flash interface has been reconfigured */
3651 for (j = 0, flash = &flash_table[0]; j < entry_count;
3652 j++, flash++) {
3653 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3654 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3655 bp->flash_info = flash;
3656 break;
3660 else {
3661 u32 mask;
3662 /* Not yet been reconfigured */
3664 if (val & (1 << 23))
3665 mask = FLASH_BACKUP_STRAP_MASK;
3666 else
3667 mask = FLASH_STRAP_MASK;
3669 for (j = 0, flash = &flash_table[0]; j < entry_count;
3670 j++, flash++) {
3672 if ((val & mask) == (flash->strapping & mask)) {
3673 bp->flash_info = flash;
3675 /* Request access to the flash interface. */
3676 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3677 return rc;
3679 /* Enable access to flash interface */
3680 bnx2_enable_nvram_access(bp);
3682 /* Reconfigure the flash interface */
3683 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3684 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3685 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3686 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3688 /* Disable access to flash interface */
3689 bnx2_disable_nvram_access(bp);
3690 bnx2_release_nvram_lock(bp);
3692 break;
3695 } /* if (val & 0x40000000) */
3697 if (j == entry_count) {
3698 bp->flash_info = NULL;
3699 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3700 return -ENODEV;
3703 get_flash_size:
3704 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3705 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3706 if (val)
3707 bp->flash_size = val;
3708 else
3709 bp->flash_size = bp->flash_info->total_size;
3711 return rc;
3714 static int
3715 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3716 int buf_size)
3718 int rc = 0;
3719 u32 cmd_flags, offset32, len32, extra;
3721 if (buf_size == 0)
3722 return 0;
3724 /* Request access to the flash interface. */
3725 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3726 return rc;
3728 /* Enable access to flash interface */
3729 bnx2_enable_nvram_access(bp);
3731 len32 = buf_size;
3732 offset32 = offset;
3733 extra = 0;
3735 cmd_flags = 0;
3737 if (offset32 & 3) {
3738 u8 buf[4];
3739 u32 pre_len;
3741 offset32 &= ~3;
3742 pre_len = 4 - (offset & 3);
3744 if (pre_len >= len32) {
3745 pre_len = len32;
3746 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3747 BNX2_NVM_COMMAND_LAST;
3749 else {
3750 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3753 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3755 if (rc)
3756 return rc;
3758 memcpy(ret_buf, buf + (offset & 3), pre_len);
3760 offset32 += 4;
3761 ret_buf += pre_len;
3762 len32 -= pre_len;
3764 if (len32 & 3) {
3765 extra = 4 - (len32 & 3);
3766 len32 = (len32 + 4) & ~3;
3769 if (len32 == 4) {
3770 u8 buf[4];
3772 if (cmd_flags)
3773 cmd_flags = BNX2_NVM_COMMAND_LAST;
3774 else
3775 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3776 BNX2_NVM_COMMAND_LAST;
3778 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3780 memcpy(ret_buf, buf, 4 - extra);
3782 else if (len32 > 0) {
3783 u8 buf[4];
3785 /* Read the first word. */
3786 if (cmd_flags)
3787 cmd_flags = 0;
3788 else
3789 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3791 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3793 /* Advance to the next dword. */
3794 offset32 += 4;
3795 ret_buf += 4;
3796 len32 -= 4;
3798 while (len32 > 4 && rc == 0) {
3799 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3801 /* Advance to the next dword. */
3802 offset32 += 4;
3803 ret_buf += 4;
3804 len32 -= 4;
3807 if (rc)
3808 return rc;
3810 cmd_flags = BNX2_NVM_COMMAND_LAST;
3811 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3813 memcpy(ret_buf, buf, 4 - extra);
3816 /* Disable access to flash interface */
3817 bnx2_disable_nvram_access(bp);
3819 bnx2_release_nvram_lock(bp);
3821 return rc;
3824 static int
3825 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3826 int buf_size)
3828 u32 written, offset32, len32;
3829 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3830 int rc = 0;
3831 int align_start, align_end;
3833 buf = data_buf;
3834 offset32 = offset;
3835 len32 = buf_size;
3836 align_start = align_end = 0;
3838 if ((align_start = (offset32 & 3))) {
3839 offset32 &= ~3;
3840 len32 += align_start;
3841 if (len32 < 4)
3842 len32 = 4;
3843 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3844 return rc;
3847 if (len32 & 3) {
3848 align_end = 4 - (len32 & 3);
3849 len32 += align_end;
3850 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3851 return rc;
3854 if (align_start || align_end) {
3855 align_buf = kmalloc(len32, GFP_KERNEL);
3856 if (align_buf == NULL)
3857 return -ENOMEM;
3858 if (align_start) {
3859 memcpy(align_buf, start, 4);
3861 if (align_end) {
3862 memcpy(align_buf + len32 - 4, end, 4);
3864 memcpy(align_buf + align_start, data_buf, buf_size);
3865 buf = align_buf;
3868 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3869 flash_buffer = kmalloc(264, GFP_KERNEL);
3870 if (flash_buffer == NULL) {
3871 rc = -ENOMEM;
3872 goto nvram_write_end;
3876 written = 0;
3877 while ((written < len32) && (rc == 0)) {
3878 u32 page_start, page_end, data_start, data_end;
3879 u32 addr, cmd_flags;
3880 int i;
3882 /* Find the page_start addr */
3883 page_start = offset32 + written;
3884 page_start -= (page_start % bp->flash_info->page_size);
3885 /* Find the page_end addr */
3886 page_end = page_start + bp->flash_info->page_size;
3887 /* Find the data_start addr */
3888 data_start = (written == 0) ? offset32 : page_start;
3889 /* Find the data_end addr */
3890 data_end = (page_end > offset32 + len32) ?
3891 (offset32 + len32) : page_end;
3893 /* Request access to the flash interface. */
3894 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3895 goto nvram_write_end;
3897 /* Enable access to flash interface */
3898 bnx2_enable_nvram_access(bp);
3900 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3901 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3902 int j;
3904 /* Read the whole page into the buffer
3905 * (non-buffer flash only) */
3906 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3907 if (j == (bp->flash_info->page_size - 4)) {
3908 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3910 rc = bnx2_nvram_read_dword(bp,
3911 page_start + j,
3912 &flash_buffer[j],
3913 cmd_flags);
3915 if (rc)
3916 goto nvram_write_end;
3918 cmd_flags = 0;
3922 /* Enable writes to flash interface (unlock write-protect) */
3923 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3924 goto nvram_write_end;
3926 /* Loop to write back the buffer data from page_start to
3927 * data_start */
3928 i = 0;
3929 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3930 /* Erase the page */
3931 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3932 goto nvram_write_end;
3934 /* Re-enable the write again for the actual write */
3935 bnx2_enable_nvram_write(bp);
3937 for (addr = page_start; addr < data_start;
3938 addr += 4, i += 4) {
3940 rc = bnx2_nvram_write_dword(bp, addr,
3941 &flash_buffer[i], cmd_flags);
3943 if (rc != 0)
3944 goto nvram_write_end;
3946 cmd_flags = 0;
3950 /* Loop to write the new data from data_start to data_end */
3951 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3952 if ((addr == page_end - 4) ||
3953 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
3954 (addr == data_end - 4))) {
3956 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3958 rc = bnx2_nvram_write_dword(bp, addr, buf,
3959 cmd_flags);
3961 if (rc != 0)
3962 goto nvram_write_end;
3964 cmd_flags = 0;
3965 buf += 4;
3968 /* Loop to write back the buffer data from data_end
3969 * to page_end */
3970 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3971 for (addr = data_end; addr < page_end;
3972 addr += 4, i += 4) {
3974 if (addr == page_end-4) {
3975 cmd_flags = BNX2_NVM_COMMAND_LAST;
3977 rc = bnx2_nvram_write_dword(bp, addr,
3978 &flash_buffer[i], cmd_flags);
3980 if (rc != 0)
3981 goto nvram_write_end;
3983 cmd_flags = 0;
3987 /* Disable writes to flash interface (lock write-protect) */
3988 bnx2_disable_nvram_write(bp);
3990 /* Disable access to flash interface */
3991 bnx2_disable_nvram_access(bp);
3992 bnx2_release_nvram_lock(bp);
3994 /* Increment written */
3995 written += data_end - data_start;
3998 nvram_write_end:
3999 kfree(flash_buffer);
4000 kfree(align_buf);
4001 return rc;
4004 static void
4005 bnx2_init_remote_phy(struct bnx2 *bp)
4007 u32 val;
4009 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
4010 if (!(bp->phy_flags & PHY_SERDES_FLAG))
4011 return;
4013 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
4014 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4015 return;
4017 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4018 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
4020 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
4021 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4022 bp->phy_port = PORT_FIBRE;
4023 else
4024 bp->phy_port = PORT_TP;
4026 if (netif_running(bp->dev)) {
4027 u32 sig;
4029 if (val & BNX2_LINK_STATUS_LINK_UP) {
4030 bp->link_up = 1;
4031 netif_carrier_on(bp->dev);
4032 } else {
4033 bp->link_up = 0;
4034 netif_carrier_off(bp->dev);
4036 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4037 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4038 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
4039 sig);
4044 static int
4045 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4047 u32 val;
4048 int i, rc = 0;
4049 u8 old_port;
4051 /* Wait for the current PCI transaction to complete before
4052 * issuing a reset. */
4053 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4054 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4055 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4056 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4057 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4058 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4059 udelay(5);
4061 /* Wait for the firmware to tell us it is ok to issue a reset. */
4062 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4064 /* Deposit a driver reset signature so the firmware knows that
4065 * this is a soft reset. */
4066 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
4067 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4069 /* Do a dummy read to force the chip to complete all current transaction
4070 * before we issue a reset. */
4071 val = REG_RD(bp, BNX2_MISC_ID);
4073 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4074 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4075 REG_RD(bp, BNX2_MISC_COMMAND);
4076 udelay(5);
4078 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4079 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4081 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4083 } else {
4084 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4085 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4086 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4088 /* Chip reset. */
4089 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4091 /* Reading back any register after chip reset will hang the
4092 * bus on 5706 A0 and A1. The msleep below provides plenty
4093 * of margin for write posting.
4095 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4096 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4097 msleep(20);
4099 /* Reset takes approximate 30 usec */
4100 for (i = 0; i < 10; i++) {
4101 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4102 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4103 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4104 break;
4105 udelay(10);
4108 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4109 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4110 printk(KERN_ERR PFX "Chip reset did not complete\n");
4111 return -EBUSY;
4115 /* Make sure byte swapping is properly configured. */
4116 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4117 if (val != 0x01020304) {
4118 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4119 return -ENODEV;
4122 /* Wait for the firmware to finish its initialization. */
4123 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4124 if (rc)
4125 return rc;
4127 spin_lock_bh(&bp->phy_lock);
4128 old_port = bp->phy_port;
4129 bnx2_init_remote_phy(bp);
4130 if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
4131 bnx2_set_default_remote_link(bp);
4132 spin_unlock_bh(&bp->phy_lock);
4134 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4135 /* Adjust the voltage regular to two steps lower. The default
4136 * of this register is 0x0000000e. */
4137 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4139 /* Remove bad rbuf memory from the free pool. */
4140 rc = bnx2_alloc_bad_rbuf(bp);
4143 return rc;
4146 static int
4147 bnx2_init_chip(struct bnx2 *bp)
4149 u32 val;
4150 int rc;
4152 /* Make sure the interrupt is not active. */
4153 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4155 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4156 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4157 #ifdef __BIG_ENDIAN
4158 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4159 #endif
4160 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4161 DMA_READ_CHANS << 12 |
4162 DMA_WRITE_CHANS << 16;
4164 val |= (0x2 << 20) | (1 << 11);
4166 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
4167 val |= (1 << 23);
4169 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4170 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4171 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4173 REG_WR(bp, BNX2_DMA_CONFIG, val);
4175 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4176 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4177 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4178 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4181 if (bp->flags & PCIX_FLAG) {
4182 u16 val16;
4184 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4185 &val16);
4186 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4187 val16 & ~PCI_X_CMD_ERO);
4190 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4191 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4192 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4193 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4195 /* Initialize context mapping and zero out the quick contexts. The
4196 * context block must have already been enabled. */
4197 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4198 rc = bnx2_init_5709_context(bp);
4199 if (rc)
4200 return rc;
4201 } else
4202 bnx2_init_context(bp);
4204 if ((rc = bnx2_init_cpus(bp)) != 0)
4205 return rc;
4207 bnx2_init_nvram(bp);
4209 bnx2_set_mac_addr(bp);
4211 val = REG_RD(bp, BNX2_MQ_CONFIG);
4212 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4213 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4214 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4215 val |= BNX2_MQ_CONFIG_HALT_DIS;
4217 REG_WR(bp, BNX2_MQ_CONFIG, val);
4219 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4220 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4221 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4223 val = (BCM_PAGE_BITS - 8) << 24;
4224 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4226 /* Configure page size. */
4227 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4228 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4229 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4230 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4232 val = bp->mac_addr[0] +
4233 (bp->mac_addr[1] << 8) +
4234 (bp->mac_addr[2] << 16) +
4235 bp->mac_addr[3] +
4236 (bp->mac_addr[4] << 8) +
4237 (bp->mac_addr[5] << 16);
4238 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4240 /* Program the MTU. Also include 4 bytes for CRC32. */
4241 val = bp->dev->mtu + ETH_HLEN + 4;
4242 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4243 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4244 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4246 bp->last_status_idx = 0;
4247 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4249 /* Set up how to generate a link change interrupt. */
4250 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4252 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4253 (u64) bp->status_blk_mapping & 0xffffffff);
4254 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4256 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4257 (u64) bp->stats_blk_mapping & 0xffffffff);
4258 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4259 (u64) bp->stats_blk_mapping >> 32);
4261 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4262 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4264 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4265 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4267 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4268 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4270 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4272 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4274 REG_WR(bp, BNX2_HC_COM_TICKS,
4275 (bp->com_ticks_int << 16) | bp->com_ticks);
4277 REG_WR(bp, BNX2_HC_CMD_TICKS,
4278 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4280 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4281 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4282 else
4283 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4284 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4286 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4287 val = BNX2_HC_CONFIG_COLLECT_STATS;
4288 else {
4289 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4290 BNX2_HC_CONFIG_COLLECT_STATS;
4293 if (bp->flags & ONE_SHOT_MSI_FLAG)
4294 val |= BNX2_HC_CONFIG_ONE_SHOT;
4296 REG_WR(bp, BNX2_HC_CONFIG, val);
4298 /* Clear internal stats counters. */
4299 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4301 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4303 /* Initialize the receive filter. */
4304 bnx2_set_rx_mode(bp->dev);
4306 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4307 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4308 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4309 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4311 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4314 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4315 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4317 udelay(20);
4319 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4321 return rc;
4324 static void
4325 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4327 u32 val, offset0, offset1, offset2, offset3;
4329 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4330 offset0 = BNX2_L2CTX_TYPE_XI;
4331 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4332 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4333 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4334 } else {
4335 offset0 = BNX2_L2CTX_TYPE;
4336 offset1 = BNX2_L2CTX_CMD_TYPE;
4337 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4338 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4340 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4341 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4343 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4344 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4346 val = (u64) bp->tx_desc_mapping >> 32;
4347 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4349 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4350 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4353 static void
4354 bnx2_init_tx_ring(struct bnx2 *bp)
4356 struct tx_bd *txbd;
4357 u32 cid;
4359 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4361 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4363 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4364 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4366 bp->tx_prod = 0;
4367 bp->tx_cons = 0;
4368 bp->hw_tx_cons = 0;
4369 bp->tx_prod_bseq = 0;
4371 cid = TX_CID;
4372 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4373 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4375 bnx2_init_tx_context(bp, cid);
4378 static void
4379 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4380 int num_rings)
4382 int i;
4383 struct rx_bd *rxbd;
4385 for (i = 0; i < num_rings; i++) {
4386 int j;
4388 rxbd = &rx_ring[i][0];
4389 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4390 rxbd->rx_bd_len = buf_size;
4391 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4393 if (i == (num_rings - 1))
4394 j = 0;
4395 else
4396 j = i + 1;
4397 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4398 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4402 static void
4403 bnx2_init_rx_ring(struct bnx2 *bp)
4405 int i;
4406 u16 prod, ring_prod;
4407 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4409 bp->rx_prod = 0;
4410 bp->rx_cons = 0;
4411 bp->rx_prod_bseq = 0;
4412 bp->rx_pg_prod = 0;
4413 bp->rx_pg_cons = 0;
4415 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4416 bp->rx_buf_use_size, bp->rx_max_ring);
4418 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4419 if (bp->rx_pg_ring_size) {
4420 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4421 bp->rx_pg_desc_mapping,
4422 PAGE_SIZE, bp->rx_max_pg_ring);
4423 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4424 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4425 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4426 BNX2_L2CTX_RBDC_JUMBO_KEY);
4428 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4429 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4431 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4432 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4434 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4435 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4438 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4439 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4440 val |= 0x02 << 8;
4441 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4443 val = (u64) bp->rx_desc_mapping[0] >> 32;
4444 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4446 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4447 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4449 ring_prod = prod = bp->rx_pg_prod;
4450 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4451 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4452 break;
4453 prod = NEXT_RX_BD(prod);
4454 ring_prod = RX_PG_RING_IDX(prod);
4456 bp->rx_pg_prod = prod;
4458 ring_prod = prod = bp->rx_prod;
4459 for (i = 0; i < bp->rx_ring_size; i++) {
4460 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4461 break;
4463 prod = NEXT_RX_BD(prod);
4464 ring_prod = RX_RING_IDX(prod);
4466 bp->rx_prod = prod;
4468 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX, bp->rx_pg_prod);
4469 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4471 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4474 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4476 u32 max, num_rings = 1;
4478 while (ring_size > MAX_RX_DESC_CNT) {
4479 ring_size -= MAX_RX_DESC_CNT;
4480 num_rings++;
4482 /* round to next power of 2 */
4483 max = max_size;
4484 while ((max & num_rings) == 0)
4485 max >>= 1;
4487 if (num_rings != max)
4488 max <<= 1;
4490 return max;
4493 static void
4494 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4496 u32 rx_size, rx_space, jumbo_size;
4498 /* 8 for CRC and VLAN */
4499 rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4501 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4502 sizeof(struct skb_shared_info);
4504 bp->rx_copy_thresh = RX_COPY_THRESH;
4505 bp->rx_pg_ring_size = 0;
4506 bp->rx_max_pg_ring = 0;
4507 bp->rx_max_pg_ring_idx = 0;
4508 if (rx_space > PAGE_SIZE) {
4509 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4511 jumbo_size = size * pages;
4512 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4513 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4515 bp->rx_pg_ring_size = jumbo_size;
4516 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4517 MAX_RX_PG_RINGS);
4518 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4519 rx_size = RX_COPY_THRESH + bp->rx_offset;
4520 bp->rx_copy_thresh = 0;
4523 bp->rx_buf_use_size = rx_size;
4524 /* hw alignment */
4525 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4526 bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
4527 bp->rx_ring_size = size;
4528 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4529 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4532 static void
4533 bnx2_free_tx_skbs(struct bnx2 *bp)
4535 int i;
4537 if (bp->tx_buf_ring == NULL)
4538 return;
4540 for (i = 0; i < TX_DESC_CNT; ) {
4541 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4542 struct sk_buff *skb = tx_buf->skb;
4543 int j, last;
4545 if (skb == NULL) {
4546 i++;
4547 continue;
4550 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4551 skb_headlen(skb), PCI_DMA_TODEVICE);
4553 tx_buf->skb = NULL;
4555 last = skb_shinfo(skb)->nr_frags;
4556 for (j = 0; j < last; j++) {
4557 tx_buf = &bp->tx_buf_ring[i + j + 1];
4558 pci_unmap_page(bp->pdev,
4559 pci_unmap_addr(tx_buf, mapping),
4560 skb_shinfo(skb)->frags[j].size,
4561 PCI_DMA_TODEVICE);
4563 dev_kfree_skb(skb);
4564 i += j + 1;
4569 static void
4570 bnx2_free_rx_skbs(struct bnx2 *bp)
4572 int i;
4574 if (bp->rx_buf_ring == NULL)
4575 return;
4577 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4578 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4579 struct sk_buff *skb = rx_buf->skb;
4581 if (skb == NULL)
4582 continue;
4584 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4585 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4587 rx_buf->skb = NULL;
4589 dev_kfree_skb(skb);
4591 for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4592 bnx2_free_rx_page(bp, i);
4595 static void
4596 bnx2_free_skbs(struct bnx2 *bp)
4598 bnx2_free_tx_skbs(bp);
4599 bnx2_free_rx_skbs(bp);
4602 static int
4603 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4605 int rc;
4607 rc = bnx2_reset_chip(bp, reset_code);
4608 bnx2_free_skbs(bp);
4609 if (rc)
4610 return rc;
4612 if ((rc = bnx2_init_chip(bp)) != 0)
4613 return rc;
4615 bnx2_init_tx_ring(bp);
4616 bnx2_init_rx_ring(bp);
4617 return 0;
4620 static int
4621 bnx2_init_nic(struct bnx2 *bp)
4623 int rc;
4625 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4626 return rc;
4628 spin_lock_bh(&bp->phy_lock);
4629 bnx2_init_phy(bp);
4630 bnx2_set_link(bp);
4631 spin_unlock_bh(&bp->phy_lock);
4632 return 0;
4635 static int
4636 bnx2_test_registers(struct bnx2 *bp)
4638 int ret;
4639 int i, is_5709;
4640 static const struct {
4641 u16 offset;
4642 u16 flags;
4643 #define BNX2_FL_NOT_5709 1
4644 u32 rw_mask;
4645 u32 ro_mask;
4646 } reg_tbl[] = {
4647 { 0x006c, 0, 0x00000000, 0x0000003f },
4648 { 0x0090, 0, 0xffffffff, 0x00000000 },
4649 { 0x0094, 0, 0x00000000, 0x00000000 },
4651 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4652 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4653 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4654 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4655 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4656 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4657 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4658 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4659 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4661 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4662 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4663 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4664 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4665 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4666 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4668 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4669 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4670 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4672 { 0x1000, 0, 0x00000000, 0x00000001 },
4673 { 0x1004, 0, 0x00000000, 0x000f0001 },
4675 { 0x1408, 0, 0x01c00800, 0x00000000 },
4676 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4677 { 0x14a8, 0, 0x00000000, 0x000001ff },
4678 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4679 { 0x14b0, 0, 0x00000002, 0x00000001 },
4680 { 0x14b8, 0, 0x00000000, 0x00000000 },
4681 { 0x14c0, 0, 0x00000000, 0x00000009 },
4682 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4683 { 0x14cc, 0, 0x00000000, 0x00000001 },
4684 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4686 { 0x1800, 0, 0x00000000, 0x00000001 },
4687 { 0x1804, 0, 0x00000000, 0x00000003 },
4689 { 0x2800, 0, 0x00000000, 0x00000001 },
4690 { 0x2804, 0, 0x00000000, 0x00003f01 },
4691 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4692 { 0x2810, 0, 0xffff0000, 0x00000000 },
4693 { 0x2814, 0, 0xffff0000, 0x00000000 },
4694 { 0x2818, 0, 0xffff0000, 0x00000000 },
4695 { 0x281c, 0, 0xffff0000, 0x00000000 },
4696 { 0x2834, 0, 0xffffffff, 0x00000000 },
4697 { 0x2840, 0, 0x00000000, 0xffffffff },
4698 { 0x2844, 0, 0x00000000, 0xffffffff },
4699 { 0x2848, 0, 0xffffffff, 0x00000000 },
4700 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4702 { 0x2c00, 0, 0x00000000, 0x00000011 },
4703 { 0x2c04, 0, 0x00000000, 0x00030007 },
4705 { 0x3c00, 0, 0x00000000, 0x00000001 },
4706 { 0x3c04, 0, 0x00000000, 0x00070000 },
4707 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4708 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4709 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4710 { 0x3c14, 0, 0x00000000, 0xffffffff },
4711 { 0x3c18, 0, 0x00000000, 0xffffffff },
4712 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4713 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4715 { 0x5004, 0, 0x00000000, 0x0000007f },
4716 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4718 { 0x5c00, 0, 0x00000000, 0x00000001 },
4719 { 0x5c04, 0, 0x00000000, 0x0003000f },
4720 { 0x5c08, 0, 0x00000003, 0x00000000 },
4721 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4722 { 0x5c10, 0, 0x00000000, 0xffffffff },
4723 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4724 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4725 { 0x5c88, 0, 0x00000000, 0x00077373 },
4726 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4728 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4729 { 0x680c, 0, 0xffffffff, 0x00000000 },
4730 { 0x6810, 0, 0xffffffff, 0x00000000 },
4731 { 0x6814, 0, 0xffffffff, 0x00000000 },
4732 { 0x6818, 0, 0xffffffff, 0x00000000 },
4733 { 0x681c, 0, 0xffffffff, 0x00000000 },
4734 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4735 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4736 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4737 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4738 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4739 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4740 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4741 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4742 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4743 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4744 { 0x684c, 0, 0xffffffff, 0x00000000 },
4745 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4746 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4747 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4748 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4749 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4750 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4752 { 0xffff, 0, 0x00000000, 0x00000000 },
4755 ret = 0;
4756 is_5709 = 0;
4757 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4758 is_5709 = 1;
4760 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4761 u32 offset, rw_mask, ro_mask, save_val, val;
4762 u16 flags = reg_tbl[i].flags;
4764 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4765 continue;
4767 offset = (u32) reg_tbl[i].offset;
4768 rw_mask = reg_tbl[i].rw_mask;
4769 ro_mask = reg_tbl[i].ro_mask;
4771 save_val = readl(bp->regview + offset);
4773 writel(0, bp->regview + offset);
4775 val = readl(bp->regview + offset);
4776 if ((val & rw_mask) != 0) {
4777 goto reg_test_err;
4780 if ((val & ro_mask) != (save_val & ro_mask)) {
4781 goto reg_test_err;
4784 writel(0xffffffff, bp->regview + offset);
4786 val = readl(bp->regview + offset);
4787 if ((val & rw_mask) != rw_mask) {
4788 goto reg_test_err;
4791 if ((val & ro_mask) != (save_val & ro_mask)) {
4792 goto reg_test_err;
4795 writel(save_val, bp->regview + offset);
4796 continue;
4798 reg_test_err:
4799 writel(save_val, bp->regview + offset);
4800 ret = -ENODEV;
4801 break;
4803 return ret;
4806 static int
4807 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4809 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4810 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4811 int i;
4813 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4814 u32 offset;
4816 for (offset = 0; offset < size; offset += 4) {
4818 REG_WR_IND(bp, start + offset, test_pattern[i]);
4820 if (REG_RD_IND(bp, start + offset) !=
4821 test_pattern[i]) {
4822 return -ENODEV;
4826 return 0;
4829 static int
4830 bnx2_test_memory(struct bnx2 *bp)
4832 int ret = 0;
4833 int i;
4834 static struct mem_entry {
4835 u32 offset;
4836 u32 len;
4837 } mem_tbl_5706[] = {
4838 { 0x60000, 0x4000 },
4839 { 0xa0000, 0x3000 },
4840 { 0xe0000, 0x4000 },
4841 { 0x120000, 0x4000 },
4842 { 0x1a0000, 0x4000 },
4843 { 0x160000, 0x4000 },
4844 { 0xffffffff, 0 },
4846 mem_tbl_5709[] = {
4847 { 0x60000, 0x4000 },
4848 { 0xa0000, 0x3000 },
4849 { 0xe0000, 0x4000 },
4850 { 0x120000, 0x4000 },
4851 { 0x1a0000, 0x4000 },
4852 { 0xffffffff, 0 },
4854 struct mem_entry *mem_tbl;
4856 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4857 mem_tbl = mem_tbl_5709;
4858 else
4859 mem_tbl = mem_tbl_5706;
4861 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4862 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4863 mem_tbl[i].len)) != 0) {
4864 return ret;
4868 return ret;
4871 #define BNX2_MAC_LOOPBACK 0
4872 #define BNX2_PHY_LOOPBACK 1
4874 static int
4875 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4877 unsigned int pkt_size, num_pkts, i;
4878 struct sk_buff *skb, *rx_skb;
4879 unsigned char *packet;
4880 u16 rx_start_idx, rx_idx;
4881 dma_addr_t map;
4882 struct tx_bd *txbd;
4883 struct sw_bd *rx_buf;
4884 struct l2_fhdr *rx_hdr;
4885 int ret = -ENODEV;
4887 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4888 bp->loopback = MAC_LOOPBACK;
4889 bnx2_set_mac_loopback(bp);
4891 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4892 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4893 return 0;
4895 bp->loopback = PHY_LOOPBACK;
4896 bnx2_set_phy_loopback(bp);
4898 else
4899 return -EINVAL;
4901 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
4902 skb = netdev_alloc_skb(bp->dev, pkt_size);
4903 if (!skb)
4904 return -ENOMEM;
4905 packet = skb_put(skb, pkt_size);
4906 memcpy(packet, bp->dev->dev_addr, 6);
4907 memset(packet + 6, 0x0, 8);
4908 for (i = 14; i < pkt_size; i++)
4909 packet[i] = (unsigned char) (i & 0xff);
4911 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4912 PCI_DMA_TODEVICE);
4914 REG_WR(bp, BNX2_HC_COMMAND,
4915 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4917 REG_RD(bp, BNX2_HC_COMMAND);
4919 udelay(5);
4920 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4922 num_pkts = 0;
4924 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4926 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4927 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4928 txbd->tx_bd_mss_nbytes = pkt_size;
4929 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4931 num_pkts++;
4932 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4933 bp->tx_prod_bseq += pkt_size;
4935 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4936 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4938 udelay(100);
4940 REG_WR(bp, BNX2_HC_COMMAND,
4941 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4943 REG_RD(bp, BNX2_HC_COMMAND);
4945 udelay(5);
4947 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4948 dev_kfree_skb(skb);
4950 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4951 goto loopback_test_done;
4954 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4955 if (rx_idx != rx_start_idx + num_pkts) {
4956 goto loopback_test_done;
4959 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4960 rx_skb = rx_buf->skb;
4962 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4963 skb_reserve(rx_skb, bp->rx_offset);
4965 pci_dma_sync_single_for_cpu(bp->pdev,
4966 pci_unmap_addr(rx_buf, mapping),
4967 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4969 if (rx_hdr->l2_fhdr_status &
4970 (L2_FHDR_ERRORS_BAD_CRC |
4971 L2_FHDR_ERRORS_PHY_DECODE |
4972 L2_FHDR_ERRORS_ALIGNMENT |
4973 L2_FHDR_ERRORS_TOO_SHORT |
4974 L2_FHDR_ERRORS_GIANT_FRAME)) {
4976 goto loopback_test_done;
4979 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4980 goto loopback_test_done;
4983 for (i = 14; i < pkt_size; i++) {
4984 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4985 goto loopback_test_done;
4989 ret = 0;
4991 loopback_test_done:
4992 bp->loopback = 0;
4993 return ret;
4996 #define BNX2_MAC_LOOPBACK_FAILED 1
4997 #define BNX2_PHY_LOOPBACK_FAILED 2
4998 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4999 BNX2_PHY_LOOPBACK_FAILED)
5001 static int
5002 bnx2_test_loopback(struct bnx2 *bp)
5004 int rc = 0;
5006 if (!netif_running(bp->dev))
5007 return BNX2_LOOPBACK_FAILED;
5009 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5010 spin_lock_bh(&bp->phy_lock);
5011 bnx2_init_phy(bp);
5012 spin_unlock_bh(&bp->phy_lock);
5013 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5014 rc |= BNX2_MAC_LOOPBACK_FAILED;
5015 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5016 rc |= BNX2_PHY_LOOPBACK_FAILED;
5017 return rc;
5020 #define NVRAM_SIZE 0x200
5021 #define CRC32_RESIDUAL 0xdebb20e3
5023 static int
5024 bnx2_test_nvram(struct bnx2 *bp)
5026 u32 buf[NVRAM_SIZE / 4];
5027 u8 *data = (u8 *) buf;
5028 int rc = 0;
5029 u32 magic, csum;
5031 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5032 goto test_nvram_done;
5034 magic = be32_to_cpu(buf[0]);
5035 if (magic != 0x669955aa) {
5036 rc = -ENODEV;
5037 goto test_nvram_done;
5040 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5041 goto test_nvram_done;
5043 csum = ether_crc_le(0x100, data);
5044 if (csum != CRC32_RESIDUAL) {
5045 rc = -ENODEV;
5046 goto test_nvram_done;
5049 csum = ether_crc_le(0x100, data + 0x100);
5050 if (csum != CRC32_RESIDUAL) {
5051 rc = -ENODEV;
5054 test_nvram_done:
5055 return rc;
5058 static int
5059 bnx2_test_link(struct bnx2 *bp)
5061 u32 bmsr;
5063 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5064 if (bp->link_up)
5065 return 0;
5066 return -ENODEV;
5068 spin_lock_bh(&bp->phy_lock);
5069 bnx2_enable_bmsr1(bp);
5070 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5071 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5072 bnx2_disable_bmsr1(bp);
5073 spin_unlock_bh(&bp->phy_lock);
5075 if (bmsr & BMSR_LSTATUS) {
5076 return 0;
5078 return -ENODEV;
5081 static int
5082 bnx2_test_intr(struct bnx2 *bp)
5084 int i;
5085 u16 status_idx;
5087 if (!netif_running(bp->dev))
5088 return -ENODEV;
5090 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5092 /* This register is not touched during run-time. */
5093 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5094 REG_RD(bp, BNX2_HC_COMMAND);
5096 for (i = 0; i < 10; i++) {
5097 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5098 status_idx) {
5100 break;
5103 msleep_interruptible(10);
5105 if (i < 10)
5106 return 0;
5108 return -ENODEV;
5111 static void
5112 bnx2_5706_serdes_timer(struct bnx2 *bp)
5114 spin_lock(&bp->phy_lock);
5115 if (bp->serdes_an_pending)
5116 bp->serdes_an_pending--;
5117 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5118 u32 bmcr;
5120 bp->current_interval = bp->timer_interval;
5122 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5124 if (bmcr & BMCR_ANENABLE) {
5125 u32 phy1, phy2;
5127 bnx2_write_phy(bp, 0x1c, 0x7c00);
5128 bnx2_read_phy(bp, 0x1c, &phy1);
5130 bnx2_write_phy(bp, 0x17, 0x0f01);
5131 bnx2_read_phy(bp, 0x15, &phy2);
5132 bnx2_write_phy(bp, 0x17, 0x0f01);
5133 bnx2_read_phy(bp, 0x15, &phy2);
5135 if ((phy1 & 0x10) && /* SIGNAL DETECT */
5136 !(phy2 & 0x20)) { /* no CONFIG */
5138 bmcr &= ~BMCR_ANENABLE;
5139 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5140 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5141 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
5145 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5146 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
5147 u32 phy2;
5149 bnx2_write_phy(bp, 0x17, 0x0f01);
5150 bnx2_read_phy(bp, 0x15, &phy2);
5151 if (phy2 & 0x20) {
5152 u32 bmcr;
5154 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5155 bmcr |= BMCR_ANENABLE;
5156 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5158 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
5160 } else
5161 bp->current_interval = bp->timer_interval;
5163 spin_unlock(&bp->phy_lock);
5166 static void
5167 bnx2_5708_serdes_timer(struct bnx2 *bp)
5169 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5170 return;
5172 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
5173 bp->serdes_an_pending = 0;
5174 return;
5177 spin_lock(&bp->phy_lock);
5178 if (bp->serdes_an_pending)
5179 bp->serdes_an_pending--;
5180 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5181 u32 bmcr;
5183 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5184 if (bmcr & BMCR_ANENABLE) {
5185 bnx2_enable_forced_2g5(bp);
5186 bp->current_interval = SERDES_FORCED_TIMEOUT;
5187 } else {
5188 bnx2_disable_forced_2g5(bp);
5189 bp->serdes_an_pending = 2;
5190 bp->current_interval = bp->timer_interval;
5193 } else
5194 bp->current_interval = bp->timer_interval;
5196 spin_unlock(&bp->phy_lock);
5199 static void
5200 bnx2_timer(unsigned long data)
5202 struct bnx2 *bp = (struct bnx2 *) data;
5204 if (!netif_running(bp->dev))
5205 return;
5207 if (atomic_read(&bp->intr_sem) != 0)
5208 goto bnx2_restart_timer;
5210 bnx2_send_heart_beat(bp);
5212 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
5214 /* workaround occasional corrupted counters */
5215 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5216 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5217 BNX2_HC_COMMAND_STATS_NOW);
5219 if (bp->phy_flags & PHY_SERDES_FLAG) {
5220 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5221 bnx2_5706_serdes_timer(bp);
5222 else
5223 bnx2_5708_serdes_timer(bp);
5226 bnx2_restart_timer:
5227 mod_timer(&bp->timer, jiffies + bp->current_interval);
5230 static int
5231 bnx2_request_irq(struct bnx2 *bp)
5233 struct net_device *dev = bp->dev;
5234 int rc = 0;
5236 if (bp->flags & USING_MSI_FLAG) {
5237 irq_handler_t fn = bnx2_msi;
5239 if (bp->flags & ONE_SHOT_MSI_FLAG)
5240 fn = bnx2_msi_1shot;
5242 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
5243 } else
5244 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
5245 IRQF_SHARED, dev->name, dev);
5246 return rc;
5249 static void
5250 bnx2_free_irq(struct bnx2 *bp)
5252 struct net_device *dev = bp->dev;
5254 if (bp->flags & USING_MSI_FLAG) {
5255 free_irq(bp->pdev->irq, dev);
5256 pci_disable_msi(bp->pdev);
5257 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
5258 } else
5259 free_irq(bp->pdev->irq, dev);
5262 /* Called with rtnl_lock */
5263 static int
5264 bnx2_open(struct net_device *dev)
5266 struct bnx2 *bp = netdev_priv(dev);
5267 int rc;
5269 netif_carrier_off(dev);
5271 bnx2_set_power_state(bp, PCI_D0);
5272 bnx2_disable_int(bp);
5274 rc = bnx2_alloc_mem(bp);
5275 if (rc)
5276 return rc;
5278 napi_enable(&bp->napi);
5280 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
5281 if (pci_enable_msi(bp->pdev) == 0) {
5282 bp->flags |= USING_MSI_FLAG;
5283 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5284 bp->flags |= ONE_SHOT_MSI_FLAG;
5287 rc = bnx2_request_irq(bp);
5289 if (rc) {
5290 napi_disable(&bp->napi);
5291 bnx2_free_mem(bp);
5292 return rc;
5295 rc = bnx2_init_nic(bp);
5297 if (rc) {
5298 napi_disable(&bp->napi);
5299 bnx2_free_irq(bp);
5300 bnx2_free_skbs(bp);
5301 bnx2_free_mem(bp);
5302 return rc;
5305 mod_timer(&bp->timer, jiffies + bp->current_interval);
5307 atomic_set(&bp->intr_sem, 0);
5309 bnx2_enable_int(bp);
5311 if (bp->flags & USING_MSI_FLAG) {
5312 /* Test MSI to make sure it is working
5313 * If MSI test fails, go back to INTx mode
5315 if (bnx2_test_intr(bp) != 0) {
5316 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5317 " using MSI, switching to INTx mode. Please"
5318 " report this failure to the PCI maintainer"
5319 " and include system chipset information.\n",
5320 bp->dev->name);
5322 bnx2_disable_int(bp);
5323 bnx2_free_irq(bp);
5325 rc = bnx2_init_nic(bp);
5327 if (!rc)
5328 rc = bnx2_request_irq(bp);
5330 if (rc) {
5331 napi_disable(&bp->napi);
5332 bnx2_free_skbs(bp);
5333 bnx2_free_mem(bp);
5334 del_timer_sync(&bp->timer);
5335 return rc;
5337 bnx2_enable_int(bp);
5340 if (bp->flags & USING_MSI_FLAG) {
5341 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5344 netif_start_queue(dev);
5346 return 0;
5349 static void
5350 bnx2_reset_task(struct work_struct *work)
5352 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5354 if (!netif_running(bp->dev))
5355 return;
5357 bp->in_reset_task = 1;
5358 bnx2_netif_stop(bp);
5360 bnx2_init_nic(bp);
5362 atomic_set(&bp->intr_sem, 1);
5363 bnx2_netif_start(bp);
5364 bp->in_reset_task = 0;
5367 static void
5368 bnx2_tx_timeout(struct net_device *dev)
5370 struct bnx2 *bp = netdev_priv(dev);
5372 /* This allows the netif to be shutdown gracefully before resetting */
5373 schedule_work(&bp->reset_task);
5376 #ifdef BCM_VLAN
5377 /* Called with rtnl_lock */
5378 static void
5379 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5381 struct bnx2 *bp = netdev_priv(dev);
5383 bnx2_netif_stop(bp);
5385 bp->vlgrp = vlgrp;
5386 bnx2_set_rx_mode(dev);
5388 bnx2_netif_start(bp);
5390 #endif
5392 /* Called with netif_tx_lock.
5393 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5394 * netif_wake_queue().
5396 static int
5397 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5399 struct bnx2 *bp = netdev_priv(dev);
5400 dma_addr_t mapping;
5401 struct tx_bd *txbd;
5402 struct sw_bd *tx_buf;
5403 u32 len, vlan_tag_flags, last_frag, mss;
5404 u16 prod, ring_prod;
5405 int i;
5407 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5408 netif_stop_queue(dev);
5409 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5410 dev->name);
5412 return NETDEV_TX_BUSY;
5414 len = skb_headlen(skb);
5415 prod = bp->tx_prod;
5416 ring_prod = TX_RING_IDX(prod);
5418 vlan_tag_flags = 0;
5419 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5420 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5423 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5424 vlan_tag_flags |=
5425 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5427 if ((mss = skb_shinfo(skb)->gso_size)) {
5428 u32 tcp_opt_len, ip_tcp_len;
5429 struct iphdr *iph;
5431 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5433 tcp_opt_len = tcp_optlen(skb);
5435 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5436 u32 tcp_off = skb_transport_offset(skb) -
5437 sizeof(struct ipv6hdr) - ETH_HLEN;
5439 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5440 TX_BD_FLAGS_SW_FLAGS;
5441 if (likely(tcp_off == 0))
5442 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5443 else {
5444 tcp_off >>= 3;
5445 vlan_tag_flags |= ((tcp_off & 0x3) <<
5446 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5447 ((tcp_off & 0x10) <<
5448 TX_BD_FLAGS_TCP6_OFF4_SHL);
5449 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5451 } else {
5452 if (skb_header_cloned(skb) &&
5453 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5454 dev_kfree_skb(skb);
5455 return NETDEV_TX_OK;
5458 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5460 iph = ip_hdr(skb);
5461 iph->check = 0;
5462 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5463 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5464 iph->daddr, 0,
5465 IPPROTO_TCP,
5467 if (tcp_opt_len || (iph->ihl > 5)) {
5468 vlan_tag_flags |= ((iph->ihl - 5) +
5469 (tcp_opt_len >> 2)) << 8;
5472 } else
5473 mss = 0;
5475 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5477 tx_buf = &bp->tx_buf_ring[ring_prod];
5478 tx_buf->skb = skb;
5479 pci_unmap_addr_set(tx_buf, mapping, mapping);
5481 txbd = &bp->tx_desc_ring[ring_prod];
5483 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5484 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5485 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5486 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5488 last_frag = skb_shinfo(skb)->nr_frags;
5490 for (i = 0; i < last_frag; i++) {
5491 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5493 prod = NEXT_TX_BD(prod);
5494 ring_prod = TX_RING_IDX(prod);
5495 txbd = &bp->tx_desc_ring[ring_prod];
5497 len = frag->size;
5498 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5499 len, PCI_DMA_TODEVICE);
5500 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5501 mapping, mapping);
5503 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5504 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5505 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5506 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5509 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5511 prod = NEXT_TX_BD(prod);
5512 bp->tx_prod_bseq += skb->len;
5514 REG_WR16(bp, bp->tx_bidx_addr, prod);
5515 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5517 mmiowb();
5519 bp->tx_prod = prod;
5520 dev->trans_start = jiffies;
5522 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5523 netif_stop_queue(dev);
5524 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5525 netif_wake_queue(dev);
5528 return NETDEV_TX_OK;
5531 /* Called with rtnl_lock */
5532 static int
5533 bnx2_close(struct net_device *dev)
5535 struct bnx2 *bp = netdev_priv(dev);
5536 u32 reset_code;
5538 /* Calling flush_scheduled_work() may deadlock because
5539 * linkwatch_event() may be on the workqueue and it will try to get
5540 * the rtnl_lock which we are holding.
5542 while (bp->in_reset_task)
5543 msleep(1);
5545 bnx2_disable_int_sync(bp);
5546 napi_disable(&bp->napi);
5547 del_timer_sync(&bp->timer);
5548 if (bp->flags & NO_WOL_FLAG)
5549 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5550 else if (bp->wol)
5551 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5552 else
5553 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5554 bnx2_reset_chip(bp, reset_code);
5555 bnx2_free_irq(bp);
5556 bnx2_free_skbs(bp);
5557 bnx2_free_mem(bp);
5558 bp->link_up = 0;
5559 netif_carrier_off(bp->dev);
5560 bnx2_set_power_state(bp, PCI_D3hot);
5561 return 0;
5564 #define GET_NET_STATS64(ctr) \
5565 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5566 (unsigned long) (ctr##_lo)
5568 #define GET_NET_STATS32(ctr) \
5569 (ctr##_lo)
5571 #if (BITS_PER_LONG == 64)
5572 #define GET_NET_STATS GET_NET_STATS64
5573 #else
5574 #define GET_NET_STATS GET_NET_STATS32
5575 #endif
5577 static struct net_device_stats *
5578 bnx2_get_stats(struct net_device *dev)
5580 struct bnx2 *bp = netdev_priv(dev);
5581 struct statistics_block *stats_blk = bp->stats_blk;
5582 struct net_device_stats *net_stats = &bp->net_stats;
5584 if (bp->stats_blk == NULL) {
5585 return net_stats;
5587 net_stats->rx_packets =
5588 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5589 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5590 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5592 net_stats->tx_packets =
5593 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5594 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5595 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5597 net_stats->rx_bytes =
5598 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5600 net_stats->tx_bytes =
5601 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5603 net_stats->multicast =
5604 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5606 net_stats->collisions =
5607 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5609 net_stats->rx_length_errors =
5610 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5611 stats_blk->stat_EtherStatsOverrsizePkts);
5613 net_stats->rx_over_errors =
5614 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5616 net_stats->rx_frame_errors =
5617 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5619 net_stats->rx_crc_errors =
5620 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5622 net_stats->rx_errors = net_stats->rx_length_errors +
5623 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5624 net_stats->rx_crc_errors;
5626 net_stats->tx_aborted_errors =
5627 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5628 stats_blk->stat_Dot3StatsLateCollisions);
5630 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5631 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5632 net_stats->tx_carrier_errors = 0;
5633 else {
5634 net_stats->tx_carrier_errors =
5635 (unsigned long)
5636 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5639 net_stats->tx_errors =
5640 (unsigned long)
5641 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5643 net_stats->tx_aborted_errors +
5644 net_stats->tx_carrier_errors;
5646 net_stats->rx_missed_errors =
5647 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5648 stats_blk->stat_FwRxDrop);
5650 return net_stats;
5653 /* All ethtool functions called with rtnl_lock */
5655 static int
5656 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5658 struct bnx2 *bp = netdev_priv(dev);
5659 int support_serdes = 0, support_copper = 0;
5661 cmd->supported = SUPPORTED_Autoneg;
5662 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5663 support_serdes = 1;
5664 support_copper = 1;
5665 } else if (bp->phy_port == PORT_FIBRE)
5666 support_serdes = 1;
5667 else
5668 support_copper = 1;
5670 if (support_serdes) {
5671 cmd->supported |= SUPPORTED_1000baseT_Full |
5672 SUPPORTED_FIBRE;
5673 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5674 cmd->supported |= SUPPORTED_2500baseX_Full;
5677 if (support_copper) {
5678 cmd->supported |= SUPPORTED_10baseT_Half |
5679 SUPPORTED_10baseT_Full |
5680 SUPPORTED_100baseT_Half |
5681 SUPPORTED_100baseT_Full |
5682 SUPPORTED_1000baseT_Full |
5683 SUPPORTED_TP;
5687 spin_lock_bh(&bp->phy_lock);
5688 cmd->port = bp->phy_port;
5689 cmd->advertising = bp->advertising;
5691 if (bp->autoneg & AUTONEG_SPEED) {
5692 cmd->autoneg = AUTONEG_ENABLE;
5694 else {
5695 cmd->autoneg = AUTONEG_DISABLE;
5698 if (netif_carrier_ok(dev)) {
5699 cmd->speed = bp->line_speed;
5700 cmd->duplex = bp->duplex;
5702 else {
5703 cmd->speed = -1;
5704 cmd->duplex = -1;
5706 spin_unlock_bh(&bp->phy_lock);
5708 cmd->transceiver = XCVR_INTERNAL;
5709 cmd->phy_address = bp->phy_addr;
5711 return 0;
5714 static int
5715 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5717 struct bnx2 *bp = netdev_priv(dev);
5718 u8 autoneg = bp->autoneg;
5719 u8 req_duplex = bp->req_duplex;
5720 u16 req_line_speed = bp->req_line_speed;
5721 u32 advertising = bp->advertising;
5722 int err = -EINVAL;
5724 spin_lock_bh(&bp->phy_lock);
5726 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5727 goto err_out_unlock;
5729 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5730 goto err_out_unlock;
5732 if (cmd->autoneg == AUTONEG_ENABLE) {
5733 autoneg |= AUTONEG_SPEED;
5735 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5737 /* allow advertising 1 speed */
5738 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5739 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5740 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5741 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5743 if (cmd->port == PORT_FIBRE)
5744 goto err_out_unlock;
5746 advertising = cmd->advertising;
5748 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5749 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5750 (cmd->port == PORT_TP))
5751 goto err_out_unlock;
5752 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5753 advertising = cmd->advertising;
5754 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5755 goto err_out_unlock;
5756 else {
5757 if (cmd->port == PORT_FIBRE)
5758 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5759 else
5760 advertising = ETHTOOL_ALL_COPPER_SPEED;
5762 advertising |= ADVERTISED_Autoneg;
5764 else {
5765 if (cmd->port == PORT_FIBRE) {
5766 if ((cmd->speed != SPEED_1000 &&
5767 cmd->speed != SPEED_2500) ||
5768 (cmd->duplex != DUPLEX_FULL))
5769 goto err_out_unlock;
5771 if (cmd->speed == SPEED_2500 &&
5772 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5773 goto err_out_unlock;
5775 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5776 goto err_out_unlock;
5778 autoneg &= ~AUTONEG_SPEED;
5779 req_line_speed = cmd->speed;
5780 req_duplex = cmd->duplex;
5781 advertising = 0;
5784 bp->autoneg = autoneg;
5785 bp->advertising = advertising;
5786 bp->req_line_speed = req_line_speed;
5787 bp->req_duplex = req_duplex;
5789 err = bnx2_setup_phy(bp, cmd->port);
5791 err_out_unlock:
5792 spin_unlock_bh(&bp->phy_lock);
5794 return err;
5797 static void
5798 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5800 struct bnx2 *bp = netdev_priv(dev);
5802 strcpy(info->driver, DRV_MODULE_NAME);
5803 strcpy(info->version, DRV_MODULE_VERSION);
5804 strcpy(info->bus_info, pci_name(bp->pdev));
5805 strcpy(info->fw_version, bp->fw_version);
5808 #define BNX2_REGDUMP_LEN (32 * 1024)
5810 static int
5811 bnx2_get_regs_len(struct net_device *dev)
5813 return BNX2_REGDUMP_LEN;
5816 static void
5817 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5819 u32 *p = _p, i, offset;
5820 u8 *orig_p = _p;
5821 struct bnx2 *bp = netdev_priv(dev);
5822 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5823 0x0800, 0x0880, 0x0c00, 0x0c10,
5824 0x0c30, 0x0d08, 0x1000, 0x101c,
5825 0x1040, 0x1048, 0x1080, 0x10a4,
5826 0x1400, 0x1490, 0x1498, 0x14f0,
5827 0x1500, 0x155c, 0x1580, 0x15dc,
5828 0x1600, 0x1658, 0x1680, 0x16d8,
5829 0x1800, 0x1820, 0x1840, 0x1854,
5830 0x1880, 0x1894, 0x1900, 0x1984,
5831 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5832 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5833 0x2000, 0x2030, 0x23c0, 0x2400,
5834 0x2800, 0x2820, 0x2830, 0x2850,
5835 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5836 0x3c00, 0x3c94, 0x4000, 0x4010,
5837 0x4080, 0x4090, 0x43c0, 0x4458,
5838 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5839 0x4fc0, 0x5010, 0x53c0, 0x5444,
5840 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5841 0x5fc0, 0x6000, 0x6400, 0x6428,
5842 0x6800, 0x6848, 0x684c, 0x6860,
5843 0x6888, 0x6910, 0x8000 };
5845 regs->version = 0;
5847 memset(p, 0, BNX2_REGDUMP_LEN);
5849 if (!netif_running(bp->dev))
5850 return;
5852 i = 0;
5853 offset = reg_boundaries[0];
5854 p += offset;
5855 while (offset < BNX2_REGDUMP_LEN) {
5856 *p++ = REG_RD(bp, offset);
5857 offset += 4;
5858 if (offset == reg_boundaries[i + 1]) {
5859 offset = reg_boundaries[i + 2];
5860 p = (u32 *) (orig_p + offset);
5861 i += 2;
5866 static void
5867 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5869 struct bnx2 *bp = netdev_priv(dev);
5871 if (bp->flags & NO_WOL_FLAG) {
5872 wol->supported = 0;
5873 wol->wolopts = 0;
5875 else {
5876 wol->supported = WAKE_MAGIC;
5877 if (bp->wol)
5878 wol->wolopts = WAKE_MAGIC;
5879 else
5880 wol->wolopts = 0;
5882 memset(&wol->sopass, 0, sizeof(wol->sopass));
5885 static int
5886 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5888 struct bnx2 *bp = netdev_priv(dev);
5890 if (wol->wolopts & ~WAKE_MAGIC)
5891 return -EINVAL;
5893 if (wol->wolopts & WAKE_MAGIC) {
5894 if (bp->flags & NO_WOL_FLAG)
5895 return -EINVAL;
5897 bp->wol = 1;
5899 else {
5900 bp->wol = 0;
5902 return 0;
5905 static int
5906 bnx2_nway_reset(struct net_device *dev)
5908 struct bnx2 *bp = netdev_priv(dev);
5909 u32 bmcr;
5911 if (!(bp->autoneg & AUTONEG_SPEED)) {
5912 return -EINVAL;
5915 spin_lock_bh(&bp->phy_lock);
5917 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5918 int rc;
5920 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5921 spin_unlock_bh(&bp->phy_lock);
5922 return rc;
5925 /* Force a link down visible on the other side */
5926 if (bp->phy_flags & PHY_SERDES_FLAG) {
5927 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5928 spin_unlock_bh(&bp->phy_lock);
5930 msleep(20);
5932 spin_lock_bh(&bp->phy_lock);
5934 bp->current_interval = SERDES_AN_TIMEOUT;
5935 bp->serdes_an_pending = 1;
5936 mod_timer(&bp->timer, jiffies + bp->current_interval);
5939 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5940 bmcr &= ~BMCR_LOOPBACK;
5941 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5943 spin_unlock_bh(&bp->phy_lock);
5945 return 0;
5948 static int
5949 bnx2_get_eeprom_len(struct net_device *dev)
5951 struct bnx2 *bp = netdev_priv(dev);
5953 if (bp->flash_info == NULL)
5954 return 0;
5956 return (int) bp->flash_size;
5959 static int
5960 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5961 u8 *eebuf)
5963 struct bnx2 *bp = netdev_priv(dev);
5964 int rc;
5966 /* parameters already validated in ethtool_get_eeprom */
5968 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5970 return rc;
5973 static int
5974 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5975 u8 *eebuf)
5977 struct bnx2 *bp = netdev_priv(dev);
5978 int rc;
5980 /* parameters already validated in ethtool_set_eeprom */
5982 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5984 return rc;
5987 static int
5988 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5990 struct bnx2 *bp = netdev_priv(dev);
5992 memset(coal, 0, sizeof(struct ethtool_coalesce));
5994 coal->rx_coalesce_usecs = bp->rx_ticks;
5995 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5996 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5997 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5999 coal->tx_coalesce_usecs = bp->tx_ticks;
6000 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6001 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6002 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6004 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6006 return 0;
6009 static int
6010 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6012 struct bnx2 *bp = netdev_priv(dev);
6014 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6015 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6017 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6018 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6020 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6021 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6023 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6024 if (bp->rx_quick_cons_trip_int > 0xff)
6025 bp->rx_quick_cons_trip_int = 0xff;
6027 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6028 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6030 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6031 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6033 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6034 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6036 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6037 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6038 0xff;
6040 bp->stats_ticks = coal->stats_block_coalesce_usecs;
6041 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6042 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6043 bp->stats_ticks = USEC_PER_SEC;
6045 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6046 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6047 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6049 if (netif_running(bp->dev)) {
6050 bnx2_netif_stop(bp);
6051 bnx2_init_nic(bp);
6052 bnx2_netif_start(bp);
6055 return 0;
6058 static void
6059 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6061 struct bnx2 *bp = netdev_priv(dev);
6063 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6064 ering->rx_mini_max_pending = 0;
6065 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6067 ering->rx_pending = bp->rx_ring_size;
6068 ering->rx_mini_pending = 0;
6069 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6071 ering->tx_max_pending = MAX_TX_DESC_CNT;
6072 ering->tx_pending = bp->tx_ring_size;
6075 static int
6076 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6078 if (netif_running(bp->dev)) {
6079 bnx2_netif_stop(bp);
6080 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6081 bnx2_free_skbs(bp);
6082 bnx2_free_mem(bp);
6085 bnx2_set_rx_ring_size(bp, rx);
6086 bp->tx_ring_size = tx;
6088 if (netif_running(bp->dev)) {
6089 int rc;
6091 rc = bnx2_alloc_mem(bp);
6092 if (rc)
6093 return rc;
6094 bnx2_init_nic(bp);
6095 bnx2_netif_start(bp);
6097 return 0;
6100 static int
6101 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6103 struct bnx2 *bp = netdev_priv(dev);
6104 int rc;
6106 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6107 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6108 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6110 return -EINVAL;
6112 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6113 return rc;
6116 static void
6117 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6119 struct bnx2 *bp = netdev_priv(dev);
6121 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6122 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6123 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6126 static int
6127 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6129 struct bnx2 *bp = netdev_priv(dev);
6131 bp->req_flow_ctrl = 0;
6132 if (epause->rx_pause)
6133 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6134 if (epause->tx_pause)
6135 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6137 if (epause->autoneg) {
6138 bp->autoneg |= AUTONEG_FLOW_CTRL;
6140 else {
6141 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6144 spin_lock_bh(&bp->phy_lock);
6146 bnx2_setup_phy(bp, bp->phy_port);
6148 spin_unlock_bh(&bp->phy_lock);
6150 return 0;
6153 static u32
6154 bnx2_get_rx_csum(struct net_device *dev)
6156 struct bnx2 *bp = netdev_priv(dev);
6158 return bp->rx_csum;
6161 static int
6162 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6164 struct bnx2 *bp = netdev_priv(dev);
6166 bp->rx_csum = data;
6167 return 0;
6170 static int
6171 bnx2_set_tso(struct net_device *dev, u32 data)
6173 struct bnx2 *bp = netdev_priv(dev);
6175 if (data) {
6176 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6177 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6178 dev->features |= NETIF_F_TSO6;
6179 } else
6180 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6181 NETIF_F_TSO_ECN);
6182 return 0;
6185 #define BNX2_NUM_STATS 46
6187 static struct {
6188 char string[ETH_GSTRING_LEN];
6189 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6190 { "rx_bytes" },
6191 { "rx_error_bytes" },
6192 { "tx_bytes" },
6193 { "tx_error_bytes" },
6194 { "rx_ucast_packets" },
6195 { "rx_mcast_packets" },
6196 { "rx_bcast_packets" },
6197 { "tx_ucast_packets" },
6198 { "tx_mcast_packets" },
6199 { "tx_bcast_packets" },
6200 { "tx_mac_errors" },
6201 { "tx_carrier_errors" },
6202 { "rx_crc_errors" },
6203 { "rx_align_errors" },
6204 { "tx_single_collisions" },
6205 { "tx_multi_collisions" },
6206 { "tx_deferred" },
6207 { "tx_excess_collisions" },
6208 { "tx_late_collisions" },
6209 { "tx_total_collisions" },
6210 { "rx_fragments" },
6211 { "rx_jabbers" },
6212 { "rx_undersize_packets" },
6213 { "rx_oversize_packets" },
6214 { "rx_64_byte_packets" },
6215 { "rx_65_to_127_byte_packets" },
6216 { "rx_128_to_255_byte_packets" },
6217 { "rx_256_to_511_byte_packets" },
6218 { "rx_512_to_1023_byte_packets" },
6219 { "rx_1024_to_1522_byte_packets" },
6220 { "rx_1523_to_9022_byte_packets" },
6221 { "tx_64_byte_packets" },
6222 { "tx_65_to_127_byte_packets" },
6223 { "tx_128_to_255_byte_packets" },
6224 { "tx_256_to_511_byte_packets" },
6225 { "tx_512_to_1023_byte_packets" },
6226 { "tx_1024_to_1522_byte_packets" },
6227 { "tx_1523_to_9022_byte_packets" },
6228 { "rx_xon_frames" },
6229 { "rx_xoff_frames" },
6230 { "tx_xon_frames" },
6231 { "tx_xoff_frames" },
6232 { "rx_mac_ctrl_frames" },
6233 { "rx_filtered_packets" },
6234 { "rx_discards" },
6235 { "rx_fw_discards" },
6238 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6240 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6241 STATS_OFFSET32(stat_IfHCInOctets_hi),
6242 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6243 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6244 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6245 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6246 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6247 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6248 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6249 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6250 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6251 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6252 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6253 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6254 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6255 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6256 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6257 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6258 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6259 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6260 STATS_OFFSET32(stat_EtherStatsCollisions),
6261 STATS_OFFSET32(stat_EtherStatsFragments),
6262 STATS_OFFSET32(stat_EtherStatsJabbers),
6263 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6264 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6265 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6266 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6267 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6268 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6269 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6270 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6271 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6272 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6273 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6274 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6275 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6276 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6277 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6278 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6279 STATS_OFFSET32(stat_XonPauseFramesReceived),
6280 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6281 STATS_OFFSET32(stat_OutXonSent),
6282 STATS_OFFSET32(stat_OutXoffSent),
6283 STATS_OFFSET32(stat_MacControlFramesReceived),
6284 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6285 STATS_OFFSET32(stat_IfInMBUFDiscards),
6286 STATS_OFFSET32(stat_FwRxDrop),
6289 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6290 * skipped because of errata.
6292 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6293 8,0,8,8,8,8,8,8,8,8,
6294 4,0,4,4,4,4,4,4,4,4,
6295 4,4,4,4,4,4,4,4,4,4,
6296 4,4,4,4,4,4,4,4,4,4,
6297 4,4,4,4,4,4,
6300 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6301 8,0,8,8,8,8,8,8,8,8,
6302 4,4,4,4,4,4,4,4,4,4,
6303 4,4,4,4,4,4,4,4,4,4,
6304 4,4,4,4,4,4,4,4,4,4,
6305 4,4,4,4,4,4,
6308 #define BNX2_NUM_TESTS 6
6310 static struct {
6311 char string[ETH_GSTRING_LEN];
6312 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6313 { "register_test (offline)" },
6314 { "memory_test (offline)" },
6315 { "loopback_test (offline)" },
6316 { "nvram_test (online)" },
6317 { "interrupt_test (online)" },
6318 { "link_test (online)" },
6321 static int
6322 bnx2_get_sset_count(struct net_device *dev, int sset)
6324 switch (sset) {
6325 case ETH_SS_TEST:
6326 return BNX2_NUM_TESTS;
6327 case ETH_SS_STATS:
6328 return BNX2_NUM_STATS;
6329 default:
6330 return -EOPNOTSUPP;
6334 static void
6335 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6337 struct bnx2 *bp = netdev_priv(dev);
6339 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6340 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6341 int i;
6343 bnx2_netif_stop(bp);
6344 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6345 bnx2_free_skbs(bp);
6347 if (bnx2_test_registers(bp) != 0) {
6348 buf[0] = 1;
6349 etest->flags |= ETH_TEST_FL_FAILED;
6351 if (bnx2_test_memory(bp) != 0) {
6352 buf[1] = 1;
6353 etest->flags |= ETH_TEST_FL_FAILED;
6355 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6356 etest->flags |= ETH_TEST_FL_FAILED;
6358 if (!netif_running(bp->dev)) {
6359 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6361 else {
6362 bnx2_init_nic(bp);
6363 bnx2_netif_start(bp);
6366 /* wait for link up */
6367 for (i = 0; i < 7; i++) {
6368 if (bp->link_up)
6369 break;
6370 msleep_interruptible(1000);
6374 if (bnx2_test_nvram(bp) != 0) {
6375 buf[3] = 1;
6376 etest->flags |= ETH_TEST_FL_FAILED;
6378 if (bnx2_test_intr(bp) != 0) {
6379 buf[4] = 1;
6380 etest->flags |= ETH_TEST_FL_FAILED;
6383 if (bnx2_test_link(bp) != 0) {
6384 buf[5] = 1;
6385 etest->flags |= ETH_TEST_FL_FAILED;
6390 static void
6391 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6393 switch (stringset) {
6394 case ETH_SS_STATS:
6395 memcpy(buf, bnx2_stats_str_arr,
6396 sizeof(bnx2_stats_str_arr));
6397 break;
6398 case ETH_SS_TEST:
6399 memcpy(buf, bnx2_tests_str_arr,
6400 sizeof(bnx2_tests_str_arr));
6401 break;
6405 static void
6406 bnx2_get_ethtool_stats(struct net_device *dev,
6407 struct ethtool_stats *stats, u64 *buf)
6409 struct bnx2 *bp = netdev_priv(dev);
6410 int i;
6411 u32 *hw_stats = (u32 *) bp->stats_blk;
6412 u8 *stats_len_arr = NULL;
6414 if (hw_stats == NULL) {
6415 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6416 return;
6419 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6420 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6421 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6422 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6423 stats_len_arr = bnx2_5706_stats_len_arr;
6424 else
6425 stats_len_arr = bnx2_5708_stats_len_arr;
6427 for (i = 0; i < BNX2_NUM_STATS; i++) {
6428 if (stats_len_arr[i] == 0) {
6429 /* skip this counter */
6430 buf[i] = 0;
6431 continue;
6433 if (stats_len_arr[i] == 4) {
6434 /* 4-byte counter */
6435 buf[i] = (u64)
6436 *(hw_stats + bnx2_stats_offset_arr[i]);
6437 continue;
6439 /* 8-byte counter */
6440 buf[i] = (((u64) *(hw_stats +
6441 bnx2_stats_offset_arr[i])) << 32) +
6442 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6446 static int
6447 bnx2_phys_id(struct net_device *dev, u32 data)
6449 struct bnx2 *bp = netdev_priv(dev);
6450 int i;
6451 u32 save;
6453 if (data == 0)
6454 data = 2;
6456 save = REG_RD(bp, BNX2_MISC_CFG);
6457 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6459 for (i = 0; i < (data * 2); i++) {
6460 if ((i % 2) == 0) {
6461 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6463 else {
6464 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6465 BNX2_EMAC_LED_1000MB_OVERRIDE |
6466 BNX2_EMAC_LED_100MB_OVERRIDE |
6467 BNX2_EMAC_LED_10MB_OVERRIDE |
6468 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6469 BNX2_EMAC_LED_TRAFFIC);
6471 msleep_interruptible(500);
6472 if (signal_pending(current))
6473 break;
6475 REG_WR(bp, BNX2_EMAC_LED, 0);
6476 REG_WR(bp, BNX2_MISC_CFG, save);
6477 return 0;
6480 static int
6481 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6483 struct bnx2 *bp = netdev_priv(dev);
6485 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6486 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6487 else
6488 return (ethtool_op_set_tx_csum(dev, data));
6491 static const struct ethtool_ops bnx2_ethtool_ops = {
6492 .get_settings = bnx2_get_settings,
6493 .set_settings = bnx2_set_settings,
6494 .get_drvinfo = bnx2_get_drvinfo,
6495 .get_regs_len = bnx2_get_regs_len,
6496 .get_regs = bnx2_get_regs,
6497 .get_wol = bnx2_get_wol,
6498 .set_wol = bnx2_set_wol,
6499 .nway_reset = bnx2_nway_reset,
6500 .get_link = ethtool_op_get_link,
6501 .get_eeprom_len = bnx2_get_eeprom_len,
6502 .get_eeprom = bnx2_get_eeprom,
6503 .set_eeprom = bnx2_set_eeprom,
6504 .get_coalesce = bnx2_get_coalesce,
6505 .set_coalesce = bnx2_set_coalesce,
6506 .get_ringparam = bnx2_get_ringparam,
6507 .set_ringparam = bnx2_set_ringparam,
6508 .get_pauseparam = bnx2_get_pauseparam,
6509 .set_pauseparam = bnx2_set_pauseparam,
6510 .get_rx_csum = bnx2_get_rx_csum,
6511 .set_rx_csum = bnx2_set_rx_csum,
6512 .set_tx_csum = bnx2_set_tx_csum,
6513 .set_sg = ethtool_op_set_sg,
6514 .set_tso = bnx2_set_tso,
6515 .self_test = bnx2_self_test,
6516 .get_strings = bnx2_get_strings,
6517 .phys_id = bnx2_phys_id,
6518 .get_ethtool_stats = bnx2_get_ethtool_stats,
6519 .get_sset_count = bnx2_get_sset_count,
6522 /* Called with rtnl_lock */
6523 static int
6524 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6526 struct mii_ioctl_data *data = if_mii(ifr);
6527 struct bnx2 *bp = netdev_priv(dev);
6528 int err;
6530 switch(cmd) {
6531 case SIOCGMIIPHY:
6532 data->phy_id = bp->phy_addr;
6534 /* fallthru */
6535 case SIOCGMIIREG: {
6536 u32 mii_regval;
6538 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6539 return -EOPNOTSUPP;
6541 if (!netif_running(dev))
6542 return -EAGAIN;
6544 spin_lock_bh(&bp->phy_lock);
6545 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6546 spin_unlock_bh(&bp->phy_lock);
6548 data->val_out = mii_regval;
6550 return err;
6553 case SIOCSMIIREG:
6554 if (!capable(CAP_NET_ADMIN))
6555 return -EPERM;
6557 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6558 return -EOPNOTSUPP;
6560 if (!netif_running(dev))
6561 return -EAGAIN;
6563 spin_lock_bh(&bp->phy_lock);
6564 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6565 spin_unlock_bh(&bp->phy_lock);
6567 return err;
6569 default:
6570 /* do nothing */
6571 break;
6573 return -EOPNOTSUPP;
6576 /* Called with rtnl_lock */
6577 static int
6578 bnx2_change_mac_addr(struct net_device *dev, void *p)
6580 struct sockaddr *addr = p;
6581 struct bnx2 *bp = netdev_priv(dev);
6583 if (!is_valid_ether_addr(addr->sa_data))
6584 return -EINVAL;
6586 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6587 if (netif_running(dev))
6588 bnx2_set_mac_addr(bp);
6590 return 0;
6593 /* Called with rtnl_lock */
6594 static int
6595 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6597 struct bnx2 *bp = netdev_priv(dev);
6599 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6600 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6601 return -EINVAL;
6603 dev->mtu = new_mtu;
6604 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
6607 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6608 static void
6609 poll_bnx2(struct net_device *dev)
6611 struct bnx2 *bp = netdev_priv(dev);
6613 disable_irq(bp->pdev->irq);
6614 bnx2_interrupt(bp->pdev->irq, dev);
6615 enable_irq(bp->pdev->irq);
6617 #endif
6619 static void __devinit
6620 bnx2_get_5709_media(struct bnx2 *bp)
6622 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6623 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6624 u32 strap;
6626 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6627 return;
6628 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6629 bp->phy_flags |= PHY_SERDES_FLAG;
6630 return;
6633 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6634 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6635 else
6636 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6638 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6639 switch (strap) {
6640 case 0x4:
6641 case 0x5:
6642 case 0x6:
6643 bp->phy_flags |= PHY_SERDES_FLAG;
6644 return;
6646 } else {
6647 switch (strap) {
6648 case 0x1:
6649 case 0x2:
6650 case 0x4:
6651 bp->phy_flags |= PHY_SERDES_FLAG;
6652 return;
6657 static void __devinit
6658 bnx2_get_pci_speed(struct bnx2 *bp)
6660 u32 reg;
6662 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6663 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6664 u32 clkreg;
6666 bp->flags |= PCIX_FLAG;
6668 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6670 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6671 switch (clkreg) {
6672 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6673 bp->bus_speed_mhz = 133;
6674 break;
6676 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6677 bp->bus_speed_mhz = 100;
6678 break;
6680 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6681 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6682 bp->bus_speed_mhz = 66;
6683 break;
6685 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6686 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6687 bp->bus_speed_mhz = 50;
6688 break;
6690 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6691 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6692 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6693 bp->bus_speed_mhz = 33;
6694 break;
6697 else {
6698 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6699 bp->bus_speed_mhz = 66;
6700 else
6701 bp->bus_speed_mhz = 33;
6704 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6705 bp->flags |= PCI_32BIT_FLAG;
6709 static int __devinit
6710 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6712 struct bnx2 *bp;
6713 unsigned long mem_len;
6714 int rc, i, j;
6715 u32 reg;
6716 u64 dma_mask, persist_dma_mask;
6718 SET_NETDEV_DEV(dev, &pdev->dev);
6719 bp = netdev_priv(dev);
6721 bp->flags = 0;
6722 bp->phy_flags = 0;
6724 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6725 rc = pci_enable_device(pdev);
6726 if (rc) {
6727 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
6728 goto err_out;
6731 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6732 dev_err(&pdev->dev,
6733 "Cannot find PCI device base address, aborting.\n");
6734 rc = -ENODEV;
6735 goto err_out_disable;
6738 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6739 if (rc) {
6740 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6741 goto err_out_disable;
6744 pci_set_master(pdev);
6746 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6747 if (bp->pm_cap == 0) {
6748 dev_err(&pdev->dev,
6749 "Cannot find power management capability, aborting.\n");
6750 rc = -EIO;
6751 goto err_out_release;
6754 bp->dev = dev;
6755 bp->pdev = pdev;
6757 spin_lock_init(&bp->phy_lock);
6758 spin_lock_init(&bp->indirect_lock);
6759 INIT_WORK(&bp->reset_task, bnx2_reset_task);
6761 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6762 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6763 dev->mem_end = dev->mem_start + mem_len;
6764 dev->irq = pdev->irq;
6766 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6768 if (!bp->regview) {
6769 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6770 rc = -ENOMEM;
6771 goto err_out_release;
6774 /* Configure byte swap and enable write to the reg_window registers.
6775 * Rely on CPU to do target byte swapping on big endian systems
6776 * The chip's target access swapping will not swap all accesses
6778 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6779 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6780 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6782 bnx2_set_power_state(bp, PCI_D0);
6784 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6786 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6787 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6788 dev_err(&pdev->dev,
6789 "Cannot find PCIE capability, aborting.\n");
6790 rc = -EIO;
6791 goto err_out_unmap;
6793 bp->flags |= PCIE_FLAG;
6794 } else {
6795 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6796 if (bp->pcix_cap == 0) {
6797 dev_err(&pdev->dev,
6798 "Cannot find PCIX capability, aborting.\n");
6799 rc = -EIO;
6800 goto err_out_unmap;
6804 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6805 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6806 bp->flags |= MSI_CAP_FLAG;
6809 /* 5708 cannot support DMA addresses > 40-bit. */
6810 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6811 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6812 else
6813 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6815 /* Configure DMA attributes. */
6816 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6817 dev->features |= NETIF_F_HIGHDMA;
6818 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6819 if (rc) {
6820 dev_err(&pdev->dev,
6821 "pci_set_consistent_dma_mask failed, aborting.\n");
6822 goto err_out_unmap;
6824 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6825 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6826 goto err_out_unmap;
6829 if (!(bp->flags & PCIE_FLAG))
6830 bnx2_get_pci_speed(bp);
6832 /* 5706A0 may falsely detect SERR and PERR. */
6833 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6834 reg = REG_RD(bp, PCI_COMMAND);
6835 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6836 REG_WR(bp, PCI_COMMAND, reg);
6838 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6839 !(bp->flags & PCIX_FLAG)) {
6841 dev_err(&pdev->dev,
6842 "5706 A1 can only be used in a PCIX bus, aborting.\n");
6843 goto err_out_unmap;
6846 bnx2_init_nvram(bp);
6848 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6850 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6851 BNX2_SHM_HDR_SIGNATURE_SIG) {
6852 u32 off = PCI_FUNC(pdev->devfn) << 2;
6854 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6855 } else
6856 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6858 /* Get the permanent MAC address. First we need to make sure the
6859 * firmware is actually running.
6861 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6863 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6864 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6865 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6866 rc = -ENODEV;
6867 goto err_out_unmap;
6870 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6871 for (i = 0, j = 0; i < 3; i++) {
6872 u8 num, k, skip0;
6874 num = (u8) (reg >> (24 - (i * 8)));
6875 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6876 if (num >= k || !skip0 || k == 1) {
6877 bp->fw_version[j++] = (num / k) + '0';
6878 skip0 = 0;
6881 if (i != 2)
6882 bp->fw_version[j++] = '.';
6884 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
6885 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
6886 bp->wol = 1;
6888 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
6889 bp->flags |= ASF_ENABLE_FLAG;
6891 for (i = 0; i < 30; i++) {
6892 reg = REG_RD_IND(bp, bp->shmem_base +
6893 BNX2_BC_STATE_CONDITION);
6894 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6895 break;
6896 msleep(10);
6899 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6900 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6901 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6902 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6903 int i;
6904 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6906 bp->fw_version[j++] = ' ';
6907 for (i = 0; i < 3; i++) {
6908 reg = REG_RD_IND(bp, addr + i * 4);
6909 reg = swab32(reg);
6910 memcpy(&bp->fw_version[j], &reg, 4);
6911 j += 4;
6915 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6916 bp->mac_addr[0] = (u8) (reg >> 8);
6917 bp->mac_addr[1] = (u8) reg;
6919 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6920 bp->mac_addr[2] = (u8) (reg >> 24);
6921 bp->mac_addr[3] = (u8) (reg >> 16);
6922 bp->mac_addr[4] = (u8) (reg >> 8);
6923 bp->mac_addr[5] = (u8) reg;
6925 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6927 bp->tx_ring_size = MAX_TX_DESC_CNT;
6928 bnx2_set_rx_ring_size(bp, 255);
6930 bp->rx_csum = 1;
6932 bp->tx_quick_cons_trip_int = 20;
6933 bp->tx_quick_cons_trip = 20;
6934 bp->tx_ticks_int = 80;
6935 bp->tx_ticks = 80;
6937 bp->rx_quick_cons_trip_int = 6;
6938 bp->rx_quick_cons_trip = 6;
6939 bp->rx_ticks_int = 18;
6940 bp->rx_ticks = 18;
6942 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6944 bp->timer_interval = HZ;
6945 bp->current_interval = HZ;
6947 bp->phy_addr = 1;
6949 /* Disable WOL support if we are running on a SERDES chip. */
6950 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6951 bnx2_get_5709_media(bp);
6952 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6953 bp->phy_flags |= PHY_SERDES_FLAG;
6955 bp->phy_port = PORT_TP;
6956 if (bp->phy_flags & PHY_SERDES_FLAG) {
6957 bp->phy_port = PORT_FIBRE;
6958 reg = REG_RD_IND(bp, bp->shmem_base +
6959 BNX2_SHARED_HW_CFG_CONFIG);
6960 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
6961 bp->flags |= NO_WOL_FLAG;
6962 bp->wol = 0;
6964 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6965 bp->phy_addr = 2;
6966 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6967 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6969 bnx2_init_remote_phy(bp);
6971 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6972 CHIP_NUM(bp) == CHIP_NUM_5708)
6973 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6974 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
6975 (CHIP_REV(bp) == CHIP_REV_Ax ||
6976 CHIP_REV(bp) == CHIP_REV_Bx))
6977 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6979 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6980 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6981 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
6982 bp->flags |= NO_WOL_FLAG;
6983 bp->wol = 0;
6986 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6987 bp->tx_quick_cons_trip_int =
6988 bp->tx_quick_cons_trip;
6989 bp->tx_ticks_int = bp->tx_ticks;
6990 bp->rx_quick_cons_trip_int =
6991 bp->rx_quick_cons_trip;
6992 bp->rx_ticks_int = bp->rx_ticks;
6993 bp->comp_prod_trip_int = bp->comp_prod_trip;
6994 bp->com_ticks_int = bp->com_ticks;
6995 bp->cmd_ticks_int = bp->cmd_ticks;
6998 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7000 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7001 * with byte enables disabled on the unused 32-bit word. This is legal
7002 * but causes problems on the AMD 8132 which will eventually stop
7003 * responding after a while.
7005 * AMD believes this incompatibility is unique to the 5706, and
7006 * prefers to locally disable MSI rather than globally disabling it.
7008 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7009 struct pci_dev *amd_8132 = NULL;
7011 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7012 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7013 amd_8132))) {
7015 if (amd_8132->revision >= 0x10 &&
7016 amd_8132->revision <= 0x13) {
7017 disable_msi = 1;
7018 pci_dev_put(amd_8132);
7019 break;
7024 bnx2_set_default_link(bp);
7025 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7027 init_timer(&bp->timer);
7028 bp->timer.expires = RUN_AT(bp->timer_interval);
7029 bp->timer.data = (unsigned long) bp;
7030 bp->timer.function = bnx2_timer;
7032 return 0;
7034 err_out_unmap:
7035 if (bp->regview) {
7036 iounmap(bp->regview);
7037 bp->regview = NULL;
7040 err_out_release:
7041 pci_release_regions(pdev);
7043 err_out_disable:
7044 pci_disable_device(pdev);
7045 pci_set_drvdata(pdev, NULL);
7047 err_out:
7048 return rc;
7051 static char * __devinit
7052 bnx2_bus_string(struct bnx2 *bp, char *str)
7054 char *s = str;
7056 if (bp->flags & PCIE_FLAG) {
7057 s += sprintf(s, "PCI Express");
7058 } else {
7059 s += sprintf(s, "PCI");
7060 if (bp->flags & PCIX_FLAG)
7061 s += sprintf(s, "-X");
7062 if (bp->flags & PCI_32BIT_FLAG)
7063 s += sprintf(s, " 32-bit");
7064 else
7065 s += sprintf(s, " 64-bit");
7066 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7068 return str;
7071 static int __devinit
7072 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7074 static int version_printed = 0;
7075 struct net_device *dev = NULL;
7076 struct bnx2 *bp;
7077 int rc;
7078 char str[40];
7079 DECLARE_MAC_BUF(mac);
7081 if (version_printed++ == 0)
7082 printk(KERN_INFO "%s", version);
7084 /* dev zeroed in init_etherdev */
7085 dev = alloc_etherdev(sizeof(*bp));
7087 if (!dev)
7088 return -ENOMEM;
7090 rc = bnx2_init_board(pdev, dev);
7091 if (rc < 0) {
7092 free_netdev(dev);
7093 return rc;
7096 dev->open = bnx2_open;
7097 dev->hard_start_xmit = bnx2_start_xmit;
7098 dev->stop = bnx2_close;
7099 dev->get_stats = bnx2_get_stats;
7100 dev->set_multicast_list = bnx2_set_rx_mode;
7101 dev->do_ioctl = bnx2_ioctl;
7102 dev->set_mac_address = bnx2_change_mac_addr;
7103 dev->change_mtu = bnx2_change_mtu;
7104 dev->tx_timeout = bnx2_tx_timeout;
7105 dev->watchdog_timeo = TX_TIMEOUT;
7106 #ifdef BCM_VLAN
7107 dev->vlan_rx_register = bnx2_vlan_rx_register;
7108 #endif
7109 dev->ethtool_ops = &bnx2_ethtool_ops;
7111 bp = netdev_priv(dev);
7112 netif_napi_add(dev, &bp->napi, bnx2_poll, 64);
7114 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7115 dev->poll_controller = poll_bnx2;
7116 #endif
7118 pci_set_drvdata(pdev, dev);
7120 memcpy(dev->dev_addr, bp->mac_addr, 6);
7121 memcpy(dev->perm_addr, bp->mac_addr, 6);
7122 bp->name = board_info[ent->driver_data].name;
7124 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7125 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7126 dev->features |= NETIF_F_IPV6_CSUM;
7128 #ifdef BCM_VLAN
7129 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7130 #endif
7131 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7132 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7133 dev->features |= NETIF_F_TSO6;
7135 if ((rc = register_netdev(dev))) {
7136 dev_err(&pdev->dev, "Cannot register net device\n");
7137 if (bp->regview)
7138 iounmap(bp->regview);
7139 pci_release_regions(pdev);
7140 pci_disable_device(pdev);
7141 pci_set_drvdata(pdev, NULL);
7142 free_netdev(dev);
7143 return rc;
7146 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7147 "IRQ %d, node addr %s\n",
7148 dev->name,
7149 bp->name,
7150 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7151 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7152 bnx2_bus_string(bp, str),
7153 dev->base_addr,
7154 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7156 return 0;
7159 static void __devexit
7160 bnx2_remove_one(struct pci_dev *pdev)
7162 struct net_device *dev = pci_get_drvdata(pdev);
7163 struct bnx2 *bp = netdev_priv(dev);
7165 flush_scheduled_work();
7167 unregister_netdev(dev);
7169 if (bp->regview)
7170 iounmap(bp->regview);
7172 free_netdev(dev);
7173 pci_release_regions(pdev);
7174 pci_disable_device(pdev);
7175 pci_set_drvdata(pdev, NULL);
7178 static int
7179 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7181 struct net_device *dev = pci_get_drvdata(pdev);
7182 struct bnx2 *bp = netdev_priv(dev);
7183 u32 reset_code;
7185 /* PCI register 4 needs to be saved whether netif_running() or not.
7186 * MSI address and data need to be saved if using MSI and
7187 * netif_running().
7189 pci_save_state(pdev);
7190 if (!netif_running(dev))
7191 return 0;
7193 flush_scheduled_work();
7194 bnx2_netif_stop(bp);
7195 netif_device_detach(dev);
7196 del_timer_sync(&bp->timer);
7197 if (bp->flags & NO_WOL_FLAG)
7198 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7199 else if (bp->wol)
7200 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7201 else
7202 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7203 bnx2_reset_chip(bp, reset_code);
7204 bnx2_free_skbs(bp);
7205 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7206 return 0;
7209 static int
7210 bnx2_resume(struct pci_dev *pdev)
7212 struct net_device *dev = pci_get_drvdata(pdev);
7213 struct bnx2 *bp = netdev_priv(dev);
7215 pci_restore_state(pdev);
7216 if (!netif_running(dev))
7217 return 0;
7219 bnx2_set_power_state(bp, PCI_D0);
7220 netif_device_attach(dev);
7221 bnx2_init_nic(bp);
7222 bnx2_netif_start(bp);
7223 return 0;
7226 static struct pci_driver bnx2_pci_driver = {
7227 .name = DRV_MODULE_NAME,
7228 .id_table = bnx2_pci_tbl,
7229 .probe = bnx2_init_one,
7230 .remove = __devexit_p(bnx2_remove_one),
7231 .suspend = bnx2_suspend,
7232 .resume = bnx2_resume,
7235 static int __init bnx2_init(void)
7237 return pci_register_driver(&bnx2_pci_driver);
7240 static void __exit bnx2_cleanup(void)
7242 pci_unregister_driver(&bnx2_pci_driver);
7245 module_init(bnx2_init);
7246 module_exit(bnx2_cleanup);