[BNX2]: Fix 5709 Serdes detection.
[usb.git] / drivers / net / bnx2.c
blobe325f933722565dceec585355665f912d91076d2
1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #ifdef NETIF_F_TSO
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #define BCM_TSO 1
47 #endif
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/prefetch.h>
51 #include <linux/cache.h>
52 #include <linux/zlib.h>
54 #include "bnx2.h"
55 #include "bnx2_fw.h"
56 #include "bnx2_fw2.h"
58 #define DRV_MODULE_NAME "bnx2"
59 #define PFX DRV_MODULE_NAME ": "
60 #define DRV_MODULE_VERSION "1.5.2"
61 #define DRV_MODULE_RELDATE "December 13, 2006"
63 #define RUN_AT(x) (jiffies + (x))
65 /* Time in jiffies before concluding the transmitter is hung. */
66 #define TX_TIMEOUT (5*HZ)
68 static const char version[] __devinitdata =
69 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
71 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
76 static int disable_msi = 0;
78 module_param(disable_msi, int, 0);
79 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
81 typedef enum {
82 BCM5706 = 0,
83 NC370T,
84 NC370I,
85 BCM5706S,
86 NC370F,
87 BCM5708,
88 BCM5708S,
89 BCM5709,
90 } board_t;
92 /* indexed by board_t, above */
93 static const struct {
94 char *name;
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
106 static struct pci_device_id bnx2_pci_tbl[] = {
107 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
108 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
109 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
110 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
111 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
113 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
114 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
116 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
123 { 0, }
126 static struct flash_spec flash_table[] =
128 /* Slow EEPROM */
129 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
130 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
131 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
132 "EEPROM - slow"},
133 /* Expansion entry 0001 */
134 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
135 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
136 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
137 "Entry 0001"},
138 /* Saifun SA25F010 (non-buffered flash) */
139 /* strap, cfg1, & write1 need updates */
140 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
141 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
142 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
143 "Non-buffered flash (128kB)"},
144 /* Saifun SA25F020 (non-buffered flash) */
145 /* strap, cfg1, & write1 need updates */
146 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
147 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
149 "Non-buffered flash (256kB)"},
150 /* Expansion entry 0100 */
151 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
152 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
154 "Entry 0100"},
155 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
156 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
157 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
159 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
160 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
161 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
162 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
164 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
165 /* Saifun SA25F005 (non-buffered flash) */
166 /* strap, cfg1, & write1 need updates */
167 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
168 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
169 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
170 "Non-buffered flash (64kB)"},
171 /* Fast EEPROM */
172 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
173 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
174 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
175 "EEPROM - fast"},
176 /* Expansion entry 1001 */
177 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 "Entry 1001"},
181 /* Expansion entry 1010 */
182 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
183 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185 "Entry 1010"},
186 /* ATMEL AT45DB011B (buffered flash) */
187 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
188 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
189 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
190 "Buffered flash (128kB)"},
191 /* Expansion entry 1100 */
192 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195 "Entry 1100"},
196 /* Expansion entry 1101 */
197 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
198 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1101"},
201 /* Ateml Expansion entry 1110 */
202 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1110 (Atmel)"},
206 /* ATMEL AT45DB021B (buffered flash) */
207 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
208 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
210 "Buffered flash (256kB)"},
213 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
217 u32 diff;
219 smp_mb();
221 /* The ring uses 256 indices for 255 entries, one of them
222 * needs to be skipped.
224 diff = bp->tx_prod - bp->tx_cons;
225 if (unlikely(diff >= TX_DESC_CNT)) {
226 diff &= 0xffff;
227 if (diff == TX_DESC_CNT)
228 diff = MAX_TX_DESC_CNT;
230 return (bp->tx_ring_size - diff);
233 static u32
234 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
237 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
240 static void
241 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
243 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
244 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
247 static void
248 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
250 offset += cid_addr;
251 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
252 int i;
254 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
255 REG_WR(bp, BNX2_CTX_CTX_CTRL,
256 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
257 for (i = 0; i < 5; i++) {
258 u32 val;
259 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
260 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
261 break;
262 udelay(5);
264 } else {
265 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
266 REG_WR(bp, BNX2_CTX_DATA, val);
270 static int
271 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
273 u32 val1;
274 int i, ret;
276 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
277 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
278 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
280 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
281 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
283 udelay(40);
286 val1 = (bp->phy_addr << 21) | (reg << 16) |
287 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
288 BNX2_EMAC_MDIO_COMM_START_BUSY;
289 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
291 for (i = 0; i < 50; i++) {
292 udelay(10);
294 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
295 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
296 udelay(5);
298 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
299 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
301 break;
305 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
306 *val = 0x0;
307 ret = -EBUSY;
309 else {
310 *val = val1;
311 ret = 0;
314 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
315 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
316 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
318 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
319 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
321 udelay(40);
324 return ret;
327 static int
328 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
330 u32 val1;
331 int i, ret;
333 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
334 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
335 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
337 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
338 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
340 udelay(40);
343 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
344 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
345 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
346 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
348 for (i = 0; i < 50; i++) {
349 udelay(10);
351 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
352 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
353 udelay(5);
354 break;
358 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
359 ret = -EBUSY;
360 else
361 ret = 0;
363 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
364 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
365 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
367 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
368 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
370 udelay(40);
373 return ret;
376 static void
377 bnx2_disable_int(struct bnx2 *bp)
379 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
380 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
381 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
384 static void
385 bnx2_enable_int(struct bnx2 *bp)
387 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
388 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
389 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
391 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
392 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
394 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
397 static void
398 bnx2_disable_int_sync(struct bnx2 *bp)
400 atomic_inc(&bp->intr_sem);
401 bnx2_disable_int(bp);
402 synchronize_irq(bp->pdev->irq);
405 static void
406 bnx2_netif_stop(struct bnx2 *bp)
408 bnx2_disable_int_sync(bp);
409 if (netif_running(bp->dev)) {
410 netif_poll_disable(bp->dev);
411 netif_tx_disable(bp->dev);
412 bp->dev->trans_start = jiffies; /* prevent tx timeout */
416 static void
417 bnx2_netif_start(struct bnx2 *bp)
419 if (atomic_dec_and_test(&bp->intr_sem)) {
420 if (netif_running(bp->dev)) {
421 netif_wake_queue(bp->dev);
422 netif_poll_enable(bp->dev);
423 bnx2_enable_int(bp);
428 static void
429 bnx2_free_mem(struct bnx2 *bp)
431 int i;
433 for (i = 0; i < bp->ctx_pages; i++) {
434 if (bp->ctx_blk[i]) {
435 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
436 bp->ctx_blk[i],
437 bp->ctx_blk_mapping[i]);
438 bp->ctx_blk[i] = NULL;
441 if (bp->status_blk) {
442 pci_free_consistent(bp->pdev, bp->status_stats_size,
443 bp->status_blk, bp->status_blk_mapping);
444 bp->status_blk = NULL;
445 bp->stats_blk = NULL;
447 if (bp->tx_desc_ring) {
448 pci_free_consistent(bp->pdev,
449 sizeof(struct tx_bd) * TX_DESC_CNT,
450 bp->tx_desc_ring, bp->tx_desc_mapping);
451 bp->tx_desc_ring = NULL;
453 kfree(bp->tx_buf_ring);
454 bp->tx_buf_ring = NULL;
455 for (i = 0; i < bp->rx_max_ring; i++) {
456 if (bp->rx_desc_ring[i])
457 pci_free_consistent(bp->pdev,
458 sizeof(struct rx_bd) * RX_DESC_CNT,
459 bp->rx_desc_ring[i],
460 bp->rx_desc_mapping[i]);
461 bp->rx_desc_ring[i] = NULL;
463 vfree(bp->rx_buf_ring);
464 bp->rx_buf_ring = NULL;
467 static int
468 bnx2_alloc_mem(struct bnx2 *bp)
470 int i, status_blk_size;
472 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
473 GFP_KERNEL);
474 if (bp->tx_buf_ring == NULL)
475 return -ENOMEM;
477 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
478 sizeof(struct tx_bd) *
479 TX_DESC_CNT,
480 &bp->tx_desc_mapping);
481 if (bp->tx_desc_ring == NULL)
482 goto alloc_mem_err;
484 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
485 bp->rx_max_ring);
486 if (bp->rx_buf_ring == NULL)
487 goto alloc_mem_err;
489 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
490 bp->rx_max_ring);
492 for (i = 0; i < bp->rx_max_ring; i++) {
493 bp->rx_desc_ring[i] =
494 pci_alloc_consistent(bp->pdev,
495 sizeof(struct rx_bd) * RX_DESC_CNT,
496 &bp->rx_desc_mapping[i]);
497 if (bp->rx_desc_ring[i] == NULL)
498 goto alloc_mem_err;
502 /* Combine status and statistics blocks into one allocation. */
503 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
504 bp->status_stats_size = status_blk_size +
505 sizeof(struct statistics_block);
507 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
508 &bp->status_blk_mapping);
509 if (bp->status_blk == NULL)
510 goto alloc_mem_err;
512 memset(bp->status_blk, 0, bp->status_stats_size);
514 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
515 status_blk_size);
517 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
519 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
520 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
521 if (bp->ctx_pages == 0)
522 bp->ctx_pages = 1;
523 for (i = 0; i < bp->ctx_pages; i++) {
524 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
525 BCM_PAGE_SIZE,
526 &bp->ctx_blk_mapping[i]);
527 if (bp->ctx_blk[i] == NULL)
528 goto alloc_mem_err;
531 return 0;
533 alloc_mem_err:
534 bnx2_free_mem(bp);
535 return -ENOMEM;
538 static void
539 bnx2_report_fw_link(struct bnx2 *bp)
541 u32 fw_link_status = 0;
543 if (bp->link_up) {
544 u32 bmsr;
546 switch (bp->line_speed) {
547 case SPEED_10:
548 if (bp->duplex == DUPLEX_HALF)
549 fw_link_status = BNX2_LINK_STATUS_10HALF;
550 else
551 fw_link_status = BNX2_LINK_STATUS_10FULL;
552 break;
553 case SPEED_100:
554 if (bp->duplex == DUPLEX_HALF)
555 fw_link_status = BNX2_LINK_STATUS_100HALF;
556 else
557 fw_link_status = BNX2_LINK_STATUS_100FULL;
558 break;
559 case SPEED_1000:
560 if (bp->duplex == DUPLEX_HALF)
561 fw_link_status = BNX2_LINK_STATUS_1000HALF;
562 else
563 fw_link_status = BNX2_LINK_STATUS_1000FULL;
564 break;
565 case SPEED_2500:
566 if (bp->duplex == DUPLEX_HALF)
567 fw_link_status = BNX2_LINK_STATUS_2500HALF;
568 else
569 fw_link_status = BNX2_LINK_STATUS_2500FULL;
570 break;
573 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
575 if (bp->autoneg) {
576 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
578 bnx2_read_phy(bp, MII_BMSR, &bmsr);
579 bnx2_read_phy(bp, MII_BMSR, &bmsr);
581 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
582 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
583 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
584 else
585 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
588 else
589 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
591 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
594 static void
595 bnx2_report_link(struct bnx2 *bp)
597 if (bp->link_up) {
598 netif_carrier_on(bp->dev);
599 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
601 printk("%d Mbps ", bp->line_speed);
603 if (bp->duplex == DUPLEX_FULL)
604 printk("full duplex");
605 else
606 printk("half duplex");
608 if (bp->flow_ctrl) {
609 if (bp->flow_ctrl & FLOW_CTRL_RX) {
610 printk(", receive ");
611 if (bp->flow_ctrl & FLOW_CTRL_TX)
612 printk("& transmit ");
614 else {
615 printk(", transmit ");
617 printk("flow control ON");
619 printk("\n");
621 else {
622 netif_carrier_off(bp->dev);
623 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
626 bnx2_report_fw_link(bp);
629 static void
630 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
632 u32 local_adv, remote_adv;
634 bp->flow_ctrl = 0;
635 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
636 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
638 if (bp->duplex == DUPLEX_FULL) {
639 bp->flow_ctrl = bp->req_flow_ctrl;
641 return;
644 if (bp->duplex != DUPLEX_FULL) {
645 return;
648 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
649 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
650 u32 val;
652 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
653 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
654 bp->flow_ctrl |= FLOW_CTRL_TX;
655 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
656 bp->flow_ctrl |= FLOW_CTRL_RX;
657 return;
660 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
661 bnx2_read_phy(bp, MII_LPA, &remote_adv);
663 if (bp->phy_flags & PHY_SERDES_FLAG) {
664 u32 new_local_adv = 0;
665 u32 new_remote_adv = 0;
667 if (local_adv & ADVERTISE_1000XPAUSE)
668 new_local_adv |= ADVERTISE_PAUSE_CAP;
669 if (local_adv & ADVERTISE_1000XPSE_ASYM)
670 new_local_adv |= ADVERTISE_PAUSE_ASYM;
671 if (remote_adv & ADVERTISE_1000XPAUSE)
672 new_remote_adv |= ADVERTISE_PAUSE_CAP;
673 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
674 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
676 local_adv = new_local_adv;
677 remote_adv = new_remote_adv;
680 /* See Table 28B-3 of 802.3ab-1999 spec. */
681 if (local_adv & ADVERTISE_PAUSE_CAP) {
682 if(local_adv & ADVERTISE_PAUSE_ASYM) {
683 if (remote_adv & ADVERTISE_PAUSE_CAP) {
684 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
686 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
687 bp->flow_ctrl = FLOW_CTRL_RX;
690 else {
691 if (remote_adv & ADVERTISE_PAUSE_CAP) {
692 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
696 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
697 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
698 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
700 bp->flow_ctrl = FLOW_CTRL_TX;
705 static int
706 bnx2_5708s_linkup(struct bnx2 *bp)
708 u32 val;
710 bp->link_up = 1;
711 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
712 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
713 case BCM5708S_1000X_STAT1_SPEED_10:
714 bp->line_speed = SPEED_10;
715 break;
716 case BCM5708S_1000X_STAT1_SPEED_100:
717 bp->line_speed = SPEED_100;
718 break;
719 case BCM5708S_1000X_STAT1_SPEED_1G:
720 bp->line_speed = SPEED_1000;
721 break;
722 case BCM5708S_1000X_STAT1_SPEED_2G5:
723 bp->line_speed = SPEED_2500;
724 break;
726 if (val & BCM5708S_1000X_STAT1_FD)
727 bp->duplex = DUPLEX_FULL;
728 else
729 bp->duplex = DUPLEX_HALF;
731 return 0;
734 static int
735 bnx2_5706s_linkup(struct bnx2 *bp)
737 u32 bmcr, local_adv, remote_adv, common;
739 bp->link_up = 1;
740 bp->line_speed = SPEED_1000;
742 bnx2_read_phy(bp, MII_BMCR, &bmcr);
743 if (bmcr & BMCR_FULLDPLX) {
744 bp->duplex = DUPLEX_FULL;
746 else {
747 bp->duplex = DUPLEX_HALF;
750 if (!(bmcr & BMCR_ANENABLE)) {
751 return 0;
754 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
755 bnx2_read_phy(bp, MII_LPA, &remote_adv);
757 common = local_adv & remote_adv;
758 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
760 if (common & ADVERTISE_1000XFULL) {
761 bp->duplex = DUPLEX_FULL;
763 else {
764 bp->duplex = DUPLEX_HALF;
768 return 0;
771 static int
772 bnx2_copper_linkup(struct bnx2 *bp)
774 u32 bmcr;
776 bnx2_read_phy(bp, MII_BMCR, &bmcr);
777 if (bmcr & BMCR_ANENABLE) {
778 u32 local_adv, remote_adv, common;
780 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
781 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
783 common = local_adv & (remote_adv >> 2);
784 if (common & ADVERTISE_1000FULL) {
785 bp->line_speed = SPEED_1000;
786 bp->duplex = DUPLEX_FULL;
788 else if (common & ADVERTISE_1000HALF) {
789 bp->line_speed = SPEED_1000;
790 bp->duplex = DUPLEX_HALF;
792 else {
793 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
794 bnx2_read_phy(bp, MII_LPA, &remote_adv);
796 common = local_adv & remote_adv;
797 if (common & ADVERTISE_100FULL) {
798 bp->line_speed = SPEED_100;
799 bp->duplex = DUPLEX_FULL;
801 else if (common & ADVERTISE_100HALF) {
802 bp->line_speed = SPEED_100;
803 bp->duplex = DUPLEX_HALF;
805 else if (common & ADVERTISE_10FULL) {
806 bp->line_speed = SPEED_10;
807 bp->duplex = DUPLEX_FULL;
809 else if (common & ADVERTISE_10HALF) {
810 bp->line_speed = SPEED_10;
811 bp->duplex = DUPLEX_HALF;
813 else {
814 bp->line_speed = 0;
815 bp->link_up = 0;
819 else {
820 if (bmcr & BMCR_SPEED100) {
821 bp->line_speed = SPEED_100;
823 else {
824 bp->line_speed = SPEED_10;
826 if (bmcr & BMCR_FULLDPLX) {
827 bp->duplex = DUPLEX_FULL;
829 else {
830 bp->duplex = DUPLEX_HALF;
834 return 0;
837 static int
838 bnx2_set_mac_link(struct bnx2 *bp)
840 u32 val;
842 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
843 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
844 (bp->duplex == DUPLEX_HALF)) {
845 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
848 /* Configure the EMAC mode register. */
849 val = REG_RD(bp, BNX2_EMAC_MODE);
851 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
852 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
853 BNX2_EMAC_MODE_25G_MODE);
855 if (bp->link_up) {
856 switch (bp->line_speed) {
857 case SPEED_10:
858 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
859 val |= BNX2_EMAC_MODE_PORT_MII_10M;
860 break;
862 /* fall through */
863 case SPEED_100:
864 val |= BNX2_EMAC_MODE_PORT_MII;
865 break;
866 case SPEED_2500:
867 val |= BNX2_EMAC_MODE_25G_MODE;
868 /* fall through */
869 case SPEED_1000:
870 val |= BNX2_EMAC_MODE_PORT_GMII;
871 break;
874 else {
875 val |= BNX2_EMAC_MODE_PORT_GMII;
878 /* Set the MAC to operate in the appropriate duplex mode. */
879 if (bp->duplex == DUPLEX_HALF)
880 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
881 REG_WR(bp, BNX2_EMAC_MODE, val);
883 /* Enable/disable rx PAUSE. */
884 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
886 if (bp->flow_ctrl & FLOW_CTRL_RX)
887 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
888 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
890 /* Enable/disable tx PAUSE. */
891 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
892 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
894 if (bp->flow_ctrl & FLOW_CTRL_TX)
895 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
896 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
898 /* Acknowledge the interrupt. */
899 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
901 return 0;
904 static int
905 bnx2_set_link(struct bnx2 *bp)
907 u32 bmsr;
908 u8 link_up;
910 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
911 bp->link_up = 1;
912 return 0;
915 link_up = bp->link_up;
917 bnx2_read_phy(bp, MII_BMSR, &bmsr);
918 bnx2_read_phy(bp, MII_BMSR, &bmsr);
920 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
921 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
922 u32 val;
924 val = REG_RD(bp, BNX2_EMAC_STATUS);
925 if (val & BNX2_EMAC_STATUS_LINK)
926 bmsr |= BMSR_LSTATUS;
927 else
928 bmsr &= ~BMSR_LSTATUS;
931 if (bmsr & BMSR_LSTATUS) {
932 bp->link_up = 1;
934 if (bp->phy_flags & PHY_SERDES_FLAG) {
935 if (CHIP_NUM(bp) == CHIP_NUM_5706)
936 bnx2_5706s_linkup(bp);
937 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
938 bnx2_5708s_linkup(bp);
940 else {
941 bnx2_copper_linkup(bp);
943 bnx2_resolve_flow_ctrl(bp);
945 else {
946 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
947 (bp->autoneg & AUTONEG_SPEED)) {
949 u32 bmcr;
951 bnx2_read_phy(bp, MII_BMCR, &bmcr);
952 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
953 if (!(bmcr & BMCR_ANENABLE)) {
954 bnx2_write_phy(bp, MII_BMCR, bmcr |
955 BMCR_ANENABLE);
958 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
959 bp->link_up = 0;
962 if (bp->link_up != link_up) {
963 bnx2_report_link(bp);
966 bnx2_set_mac_link(bp);
968 return 0;
971 static int
972 bnx2_reset_phy(struct bnx2 *bp)
974 int i;
975 u32 reg;
977 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
979 #define PHY_RESET_MAX_WAIT 100
980 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
981 udelay(10);
983 bnx2_read_phy(bp, MII_BMCR, &reg);
984 if (!(reg & BMCR_RESET)) {
985 udelay(20);
986 break;
989 if (i == PHY_RESET_MAX_WAIT) {
990 return -EBUSY;
992 return 0;
995 static u32
996 bnx2_phy_get_pause_adv(struct bnx2 *bp)
998 u32 adv = 0;
1000 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1001 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1003 if (bp->phy_flags & PHY_SERDES_FLAG) {
1004 adv = ADVERTISE_1000XPAUSE;
1006 else {
1007 adv = ADVERTISE_PAUSE_CAP;
1010 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1011 if (bp->phy_flags & PHY_SERDES_FLAG) {
1012 adv = ADVERTISE_1000XPSE_ASYM;
1014 else {
1015 adv = ADVERTISE_PAUSE_ASYM;
1018 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1019 if (bp->phy_flags & PHY_SERDES_FLAG) {
1020 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1022 else {
1023 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1026 return adv;
1029 static int
1030 bnx2_setup_serdes_phy(struct bnx2 *bp)
1032 u32 adv, bmcr, up1;
1033 u32 new_adv = 0;
1035 if (!(bp->autoneg & AUTONEG_SPEED)) {
1036 u32 new_bmcr;
1037 int force_link_down = 0;
1039 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1040 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1042 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1043 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1044 new_bmcr |= BMCR_SPEED1000;
1045 if (bp->req_line_speed == SPEED_2500) {
1046 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1047 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1048 if (!(up1 & BCM5708S_UP1_2G5)) {
1049 up1 |= BCM5708S_UP1_2G5;
1050 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1051 force_link_down = 1;
1053 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1054 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1055 if (up1 & BCM5708S_UP1_2G5) {
1056 up1 &= ~BCM5708S_UP1_2G5;
1057 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1058 force_link_down = 1;
1062 if (bp->req_duplex == DUPLEX_FULL) {
1063 adv |= ADVERTISE_1000XFULL;
1064 new_bmcr |= BMCR_FULLDPLX;
1066 else {
1067 adv |= ADVERTISE_1000XHALF;
1068 new_bmcr &= ~BMCR_FULLDPLX;
1070 if ((new_bmcr != bmcr) || (force_link_down)) {
1071 /* Force a link down visible on the other side */
1072 if (bp->link_up) {
1073 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1074 ~(ADVERTISE_1000XFULL |
1075 ADVERTISE_1000XHALF));
1076 bnx2_write_phy(bp, MII_BMCR, bmcr |
1077 BMCR_ANRESTART | BMCR_ANENABLE);
1079 bp->link_up = 0;
1080 netif_carrier_off(bp->dev);
1081 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1082 bnx2_report_link(bp);
1084 bnx2_write_phy(bp, MII_ADVERTISE, adv);
1085 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1087 return 0;
1090 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1091 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1092 up1 |= BCM5708S_UP1_2G5;
1093 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1096 if (bp->advertising & ADVERTISED_1000baseT_Full)
1097 new_adv |= ADVERTISE_1000XFULL;
1099 new_adv |= bnx2_phy_get_pause_adv(bp);
1101 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1102 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1104 bp->serdes_an_pending = 0;
1105 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1106 /* Force a link down visible on the other side */
1107 if (bp->link_up) {
1108 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1109 spin_unlock_bh(&bp->phy_lock);
1110 msleep(20);
1111 spin_lock_bh(&bp->phy_lock);
1114 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1115 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1116 BMCR_ANENABLE);
1117 /* Speed up link-up time when the link partner
1118 * does not autonegotiate which is very common
1119 * in blade servers. Some blade servers use
1120 * IPMI for kerboard input and it's important
1121 * to minimize link disruptions. Autoneg. involves
1122 * exchanging base pages plus 3 next pages and
1123 * normally completes in about 120 msec.
1125 bp->current_interval = SERDES_AN_TIMEOUT;
1126 bp->serdes_an_pending = 1;
1127 mod_timer(&bp->timer, jiffies + bp->current_interval);
1130 return 0;
1133 #define ETHTOOL_ALL_FIBRE_SPEED \
1134 (ADVERTISED_1000baseT_Full)
1136 #define ETHTOOL_ALL_COPPER_SPEED \
1137 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1138 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1139 ADVERTISED_1000baseT_Full)
1141 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1142 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1144 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1146 static int
1147 bnx2_setup_copper_phy(struct bnx2 *bp)
1149 u32 bmcr;
1150 u32 new_bmcr;
1152 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1154 if (bp->autoneg & AUTONEG_SPEED) {
1155 u32 adv_reg, adv1000_reg;
1156 u32 new_adv_reg = 0;
1157 u32 new_adv1000_reg = 0;
1159 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1160 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1161 ADVERTISE_PAUSE_ASYM);
1163 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1164 adv1000_reg &= PHY_ALL_1000_SPEED;
1166 if (bp->advertising & ADVERTISED_10baseT_Half)
1167 new_adv_reg |= ADVERTISE_10HALF;
1168 if (bp->advertising & ADVERTISED_10baseT_Full)
1169 new_adv_reg |= ADVERTISE_10FULL;
1170 if (bp->advertising & ADVERTISED_100baseT_Half)
1171 new_adv_reg |= ADVERTISE_100HALF;
1172 if (bp->advertising & ADVERTISED_100baseT_Full)
1173 new_adv_reg |= ADVERTISE_100FULL;
1174 if (bp->advertising & ADVERTISED_1000baseT_Full)
1175 new_adv1000_reg |= ADVERTISE_1000FULL;
1177 new_adv_reg |= ADVERTISE_CSMA;
1179 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1181 if ((adv1000_reg != new_adv1000_reg) ||
1182 (adv_reg != new_adv_reg) ||
1183 ((bmcr & BMCR_ANENABLE) == 0)) {
1185 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1186 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1187 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1188 BMCR_ANENABLE);
1190 else if (bp->link_up) {
1191 /* Flow ctrl may have changed from auto to forced */
1192 /* or vice-versa. */
1194 bnx2_resolve_flow_ctrl(bp);
1195 bnx2_set_mac_link(bp);
1197 return 0;
1200 new_bmcr = 0;
1201 if (bp->req_line_speed == SPEED_100) {
1202 new_bmcr |= BMCR_SPEED100;
1204 if (bp->req_duplex == DUPLEX_FULL) {
1205 new_bmcr |= BMCR_FULLDPLX;
1207 if (new_bmcr != bmcr) {
1208 u32 bmsr;
1210 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1211 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1213 if (bmsr & BMSR_LSTATUS) {
1214 /* Force link down */
1215 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1216 spin_unlock_bh(&bp->phy_lock);
1217 msleep(50);
1218 spin_lock_bh(&bp->phy_lock);
1220 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1221 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1224 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1226 /* Normally, the new speed is setup after the link has
1227 * gone down and up again. In some cases, link will not go
1228 * down so we need to set up the new speed here.
1230 if (bmsr & BMSR_LSTATUS) {
1231 bp->line_speed = bp->req_line_speed;
1232 bp->duplex = bp->req_duplex;
1233 bnx2_resolve_flow_ctrl(bp);
1234 bnx2_set_mac_link(bp);
1237 return 0;
1240 static int
1241 bnx2_setup_phy(struct bnx2 *bp)
1243 if (bp->loopback == MAC_LOOPBACK)
1244 return 0;
1246 if (bp->phy_flags & PHY_SERDES_FLAG) {
1247 return (bnx2_setup_serdes_phy(bp));
1249 else {
1250 return (bnx2_setup_copper_phy(bp));
1254 static int
1255 bnx2_init_5708s_phy(struct bnx2 *bp)
1257 u32 val;
1259 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1260 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1261 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1263 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1264 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1265 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1267 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1268 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1269 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1271 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1272 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1273 val |= BCM5708S_UP1_2G5;
1274 bnx2_write_phy(bp, BCM5708S_UP1, val);
1277 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1278 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1279 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1280 /* increase tx signal amplitude */
1281 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1282 BCM5708S_BLK_ADDR_TX_MISC);
1283 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1284 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1285 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1286 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1289 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1290 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1292 if (val) {
1293 u32 is_backplane;
1295 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1296 BNX2_SHARED_HW_CFG_CONFIG);
1297 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1298 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1299 BCM5708S_BLK_ADDR_TX_MISC);
1300 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1301 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1302 BCM5708S_BLK_ADDR_DIG);
1305 return 0;
1308 static int
1309 bnx2_init_5706s_phy(struct bnx2 *bp)
1311 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1313 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1314 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1316 if (bp->dev->mtu > 1500) {
1317 u32 val;
1319 /* Set extended packet length bit */
1320 bnx2_write_phy(bp, 0x18, 0x7);
1321 bnx2_read_phy(bp, 0x18, &val);
1322 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1324 bnx2_write_phy(bp, 0x1c, 0x6c00);
1325 bnx2_read_phy(bp, 0x1c, &val);
1326 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1328 else {
1329 u32 val;
1331 bnx2_write_phy(bp, 0x18, 0x7);
1332 bnx2_read_phy(bp, 0x18, &val);
1333 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1335 bnx2_write_phy(bp, 0x1c, 0x6c00);
1336 bnx2_read_phy(bp, 0x1c, &val);
1337 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1340 return 0;
1343 static int
1344 bnx2_init_copper_phy(struct bnx2 *bp)
1346 u32 val;
1348 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1349 bnx2_write_phy(bp, 0x18, 0x0c00);
1350 bnx2_write_phy(bp, 0x17, 0x000a);
1351 bnx2_write_phy(bp, 0x15, 0x310b);
1352 bnx2_write_phy(bp, 0x17, 0x201f);
1353 bnx2_write_phy(bp, 0x15, 0x9506);
1354 bnx2_write_phy(bp, 0x17, 0x401f);
1355 bnx2_write_phy(bp, 0x15, 0x14e2);
1356 bnx2_write_phy(bp, 0x18, 0x0400);
1359 if (bp->dev->mtu > 1500) {
1360 /* Set extended packet length bit */
1361 bnx2_write_phy(bp, 0x18, 0x7);
1362 bnx2_read_phy(bp, 0x18, &val);
1363 bnx2_write_phy(bp, 0x18, val | 0x4000);
1365 bnx2_read_phy(bp, 0x10, &val);
1366 bnx2_write_phy(bp, 0x10, val | 0x1);
1368 else {
1369 bnx2_write_phy(bp, 0x18, 0x7);
1370 bnx2_read_phy(bp, 0x18, &val);
1371 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1373 bnx2_read_phy(bp, 0x10, &val);
1374 bnx2_write_phy(bp, 0x10, val & ~0x1);
1377 /* ethernet@wirespeed */
1378 bnx2_write_phy(bp, 0x18, 0x7007);
1379 bnx2_read_phy(bp, 0x18, &val);
1380 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1381 return 0;
1385 static int
1386 bnx2_init_phy(struct bnx2 *bp)
1388 u32 val;
1389 int rc = 0;
1391 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1392 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1394 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1396 bnx2_reset_phy(bp);
1398 bnx2_read_phy(bp, MII_PHYSID1, &val);
1399 bp->phy_id = val << 16;
1400 bnx2_read_phy(bp, MII_PHYSID2, &val);
1401 bp->phy_id |= val & 0xffff;
1403 if (bp->phy_flags & PHY_SERDES_FLAG) {
1404 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1405 rc = bnx2_init_5706s_phy(bp);
1406 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1407 rc = bnx2_init_5708s_phy(bp);
1409 else {
1410 rc = bnx2_init_copper_phy(bp);
1413 bnx2_setup_phy(bp);
1415 return rc;
1418 static int
1419 bnx2_set_mac_loopback(struct bnx2 *bp)
1421 u32 mac_mode;
1423 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1424 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1425 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1426 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1427 bp->link_up = 1;
1428 return 0;
1431 static int bnx2_test_link(struct bnx2 *);
1433 static int
1434 bnx2_set_phy_loopback(struct bnx2 *bp)
1436 u32 mac_mode;
1437 int rc, i;
1439 spin_lock_bh(&bp->phy_lock);
1440 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1441 BMCR_SPEED1000);
1442 spin_unlock_bh(&bp->phy_lock);
1443 if (rc)
1444 return rc;
1446 for (i = 0; i < 10; i++) {
1447 if (bnx2_test_link(bp) == 0)
1448 break;
1449 msleep(100);
1452 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1453 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1454 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1455 BNX2_EMAC_MODE_25G_MODE);
1457 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1458 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1459 bp->link_up = 1;
1460 return 0;
1463 static int
1464 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1466 int i;
1467 u32 val;
1469 bp->fw_wr_seq++;
1470 msg_data |= bp->fw_wr_seq;
1472 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1474 /* wait for an acknowledgement. */
1475 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1476 msleep(10);
1478 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1480 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1481 break;
1483 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1484 return 0;
1486 /* If we timed out, inform the firmware that this is the case. */
1487 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1488 if (!silent)
1489 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1490 "%x\n", msg_data);
1492 msg_data &= ~BNX2_DRV_MSG_CODE;
1493 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1495 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1497 return -EBUSY;
1500 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1501 return -EIO;
1503 return 0;
1506 static int
1507 bnx2_init_5709_context(struct bnx2 *bp)
1509 int i, ret = 0;
1510 u32 val;
1512 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1513 val |= (BCM_PAGE_BITS - 8) << 16;
1514 REG_WR(bp, BNX2_CTX_COMMAND, val);
1515 for (i = 0; i < bp->ctx_pages; i++) {
1516 int j;
1518 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1519 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1520 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1521 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1522 (u64) bp->ctx_blk_mapping[i] >> 32);
1523 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1524 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1525 for (j = 0; j < 10; j++) {
1527 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1528 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1529 break;
1530 udelay(5);
1532 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1533 ret = -EBUSY;
1534 break;
1537 return ret;
1540 static void
1541 bnx2_init_context(struct bnx2 *bp)
1543 u32 vcid;
1545 vcid = 96;
1546 while (vcid) {
1547 u32 vcid_addr, pcid_addr, offset;
1549 vcid--;
1551 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1552 u32 new_vcid;
1554 vcid_addr = GET_PCID_ADDR(vcid);
1555 if (vcid & 0x8) {
1556 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1558 else {
1559 new_vcid = vcid;
1561 pcid_addr = GET_PCID_ADDR(new_vcid);
1563 else {
1564 vcid_addr = GET_CID_ADDR(vcid);
1565 pcid_addr = vcid_addr;
1568 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1569 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1571 /* Zero out the context. */
1572 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1573 CTX_WR(bp, 0x00, offset, 0);
1576 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1577 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1581 static int
1582 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1584 u16 *good_mbuf;
1585 u32 good_mbuf_cnt;
1586 u32 val;
1588 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1589 if (good_mbuf == NULL) {
1590 printk(KERN_ERR PFX "Failed to allocate memory in "
1591 "bnx2_alloc_bad_rbuf\n");
1592 return -ENOMEM;
1595 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1596 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1598 good_mbuf_cnt = 0;
1600 /* Allocate a bunch of mbufs and save the good ones in an array. */
1601 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1602 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1603 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1605 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1607 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1609 /* The addresses with Bit 9 set are bad memory blocks. */
1610 if (!(val & (1 << 9))) {
1611 good_mbuf[good_mbuf_cnt] = (u16) val;
1612 good_mbuf_cnt++;
1615 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1618 /* Free the good ones back to the mbuf pool thus discarding
1619 * all the bad ones. */
1620 while (good_mbuf_cnt) {
1621 good_mbuf_cnt--;
1623 val = good_mbuf[good_mbuf_cnt];
1624 val = (val << 9) | val | 1;
1626 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1628 kfree(good_mbuf);
1629 return 0;
1632 static void
1633 bnx2_set_mac_addr(struct bnx2 *bp)
1635 u32 val;
1636 u8 *mac_addr = bp->dev->dev_addr;
1638 val = (mac_addr[0] << 8) | mac_addr[1];
1640 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1642 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1643 (mac_addr[4] << 8) | mac_addr[5];
1645 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1648 static inline int
1649 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1651 struct sk_buff *skb;
1652 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1653 dma_addr_t mapping;
1654 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1655 unsigned long align;
1657 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1658 if (skb == NULL) {
1659 return -ENOMEM;
1662 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1663 skb_reserve(skb, BNX2_RX_ALIGN - align);
1665 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1666 PCI_DMA_FROMDEVICE);
1668 rx_buf->skb = skb;
1669 pci_unmap_addr_set(rx_buf, mapping, mapping);
1671 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1672 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1674 bp->rx_prod_bseq += bp->rx_buf_use_size;
1676 return 0;
1679 static void
1680 bnx2_phy_int(struct bnx2 *bp)
1682 u32 new_link_state, old_link_state;
1684 new_link_state = bp->status_blk->status_attn_bits &
1685 STATUS_ATTN_BITS_LINK_STATE;
1686 old_link_state = bp->status_blk->status_attn_bits_ack &
1687 STATUS_ATTN_BITS_LINK_STATE;
1688 if (new_link_state != old_link_state) {
1689 if (new_link_state) {
1690 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1691 STATUS_ATTN_BITS_LINK_STATE);
1693 else {
1694 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1695 STATUS_ATTN_BITS_LINK_STATE);
1697 bnx2_set_link(bp);
1701 static void
1702 bnx2_tx_int(struct bnx2 *bp)
1704 struct status_block *sblk = bp->status_blk;
1705 u16 hw_cons, sw_cons, sw_ring_cons;
1706 int tx_free_bd = 0;
1708 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1709 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1710 hw_cons++;
1712 sw_cons = bp->tx_cons;
1714 while (sw_cons != hw_cons) {
1715 struct sw_bd *tx_buf;
1716 struct sk_buff *skb;
1717 int i, last;
1719 sw_ring_cons = TX_RING_IDX(sw_cons);
1721 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1722 skb = tx_buf->skb;
1723 #ifdef BCM_TSO
1724 /* partial BD completions possible with TSO packets */
1725 if (skb_is_gso(skb)) {
1726 u16 last_idx, last_ring_idx;
1728 last_idx = sw_cons +
1729 skb_shinfo(skb)->nr_frags + 1;
1730 last_ring_idx = sw_ring_cons +
1731 skb_shinfo(skb)->nr_frags + 1;
1732 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1733 last_idx++;
1735 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1736 break;
1739 #endif
1740 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1741 skb_headlen(skb), PCI_DMA_TODEVICE);
1743 tx_buf->skb = NULL;
1744 last = skb_shinfo(skb)->nr_frags;
1746 for (i = 0; i < last; i++) {
1747 sw_cons = NEXT_TX_BD(sw_cons);
1749 pci_unmap_page(bp->pdev,
1750 pci_unmap_addr(
1751 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1752 mapping),
1753 skb_shinfo(skb)->frags[i].size,
1754 PCI_DMA_TODEVICE);
1757 sw_cons = NEXT_TX_BD(sw_cons);
1759 tx_free_bd += last + 1;
1761 dev_kfree_skb(skb);
1763 hw_cons = bp->hw_tx_cons =
1764 sblk->status_tx_quick_consumer_index0;
1766 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1767 hw_cons++;
1771 bp->tx_cons = sw_cons;
1772 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1773 * before checking for netif_queue_stopped(). Without the
1774 * memory barrier, there is a small possibility that bnx2_start_xmit()
1775 * will miss it and cause the queue to be stopped forever.
1777 smp_mb();
1779 if (unlikely(netif_queue_stopped(bp->dev)) &&
1780 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1781 netif_tx_lock(bp->dev);
1782 if ((netif_queue_stopped(bp->dev)) &&
1783 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1784 netif_wake_queue(bp->dev);
1785 netif_tx_unlock(bp->dev);
1789 static inline void
1790 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1791 u16 cons, u16 prod)
1793 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1794 struct rx_bd *cons_bd, *prod_bd;
1796 cons_rx_buf = &bp->rx_buf_ring[cons];
1797 prod_rx_buf = &bp->rx_buf_ring[prod];
1799 pci_dma_sync_single_for_device(bp->pdev,
1800 pci_unmap_addr(cons_rx_buf, mapping),
1801 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1803 bp->rx_prod_bseq += bp->rx_buf_use_size;
1805 prod_rx_buf->skb = skb;
1807 if (cons == prod)
1808 return;
1810 pci_unmap_addr_set(prod_rx_buf, mapping,
1811 pci_unmap_addr(cons_rx_buf, mapping));
1813 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1814 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1815 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1816 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1819 static int
1820 bnx2_rx_int(struct bnx2 *bp, int budget)
1822 struct status_block *sblk = bp->status_blk;
1823 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1824 struct l2_fhdr *rx_hdr;
1825 int rx_pkt = 0;
1827 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1828 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1829 hw_cons++;
1831 sw_cons = bp->rx_cons;
1832 sw_prod = bp->rx_prod;
1834 /* Memory barrier necessary as speculative reads of the rx
1835 * buffer can be ahead of the index in the status block
1837 rmb();
1838 while (sw_cons != hw_cons) {
1839 unsigned int len;
1840 u32 status;
1841 struct sw_bd *rx_buf;
1842 struct sk_buff *skb;
1843 dma_addr_t dma_addr;
1845 sw_ring_cons = RX_RING_IDX(sw_cons);
1846 sw_ring_prod = RX_RING_IDX(sw_prod);
1848 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1849 skb = rx_buf->skb;
1851 rx_buf->skb = NULL;
1853 dma_addr = pci_unmap_addr(rx_buf, mapping);
1855 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1856 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1858 rx_hdr = (struct l2_fhdr *) skb->data;
1859 len = rx_hdr->l2_fhdr_pkt_len - 4;
1861 if ((status = rx_hdr->l2_fhdr_status) &
1862 (L2_FHDR_ERRORS_BAD_CRC |
1863 L2_FHDR_ERRORS_PHY_DECODE |
1864 L2_FHDR_ERRORS_ALIGNMENT |
1865 L2_FHDR_ERRORS_TOO_SHORT |
1866 L2_FHDR_ERRORS_GIANT_FRAME)) {
1868 goto reuse_rx;
1871 /* Since we don't have a jumbo ring, copy small packets
1872 * if mtu > 1500
1874 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1875 struct sk_buff *new_skb;
1877 new_skb = netdev_alloc_skb(bp->dev, len + 2);
1878 if (new_skb == NULL)
1879 goto reuse_rx;
1881 /* aligned copy */
1882 memcpy(new_skb->data,
1883 skb->data + bp->rx_offset - 2,
1884 len + 2);
1886 skb_reserve(new_skb, 2);
1887 skb_put(new_skb, len);
1889 bnx2_reuse_rx_skb(bp, skb,
1890 sw_ring_cons, sw_ring_prod);
1892 skb = new_skb;
1894 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1895 pci_unmap_single(bp->pdev, dma_addr,
1896 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1898 skb_reserve(skb, bp->rx_offset);
1899 skb_put(skb, len);
1901 else {
1902 reuse_rx:
1903 bnx2_reuse_rx_skb(bp, skb,
1904 sw_ring_cons, sw_ring_prod);
1905 goto next_rx;
1908 skb->protocol = eth_type_trans(skb, bp->dev);
1910 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1911 (ntohs(skb->protocol) != 0x8100)) {
1913 dev_kfree_skb(skb);
1914 goto next_rx;
1918 skb->ip_summed = CHECKSUM_NONE;
1919 if (bp->rx_csum &&
1920 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1921 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1923 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1924 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1925 skb->ip_summed = CHECKSUM_UNNECESSARY;
1928 #ifdef BCM_VLAN
1929 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1930 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1931 rx_hdr->l2_fhdr_vlan_tag);
1933 else
1934 #endif
1935 netif_receive_skb(skb);
1937 bp->dev->last_rx = jiffies;
1938 rx_pkt++;
1940 next_rx:
1941 sw_cons = NEXT_RX_BD(sw_cons);
1942 sw_prod = NEXT_RX_BD(sw_prod);
1944 if ((rx_pkt == budget))
1945 break;
1947 /* Refresh hw_cons to see if there is new work */
1948 if (sw_cons == hw_cons) {
1949 hw_cons = bp->hw_rx_cons =
1950 sblk->status_rx_quick_consumer_index0;
1951 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1952 hw_cons++;
1953 rmb();
1956 bp->rx_cons = sw_cons;
1957 bp->rx_prod = sw_prod;
1959 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1961 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1963 mmiowb();
1965 return rx_pkt;
1969 /* MSI ISR - The only difference between this and the INTx ISR
1970 * is that the MSI interrupt is always serviced.
1972 static irqreturn_t
1973 bnx2_msi(int irq, void *dev_instance)
1975 struct net_device *dev = dev_instance;
1976 struct bnx2 *bp = netdev_priv(dev);
1978 prefetch(bp->status_blk);
1979 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1980 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1981 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1983 /* Return here if interrupt is disabled. */
1984 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1985 return IRQ_HANDLED;
1987 netif_rx_schedule(dev);
1989 return IRQ_HANDLED;
1992 static irqreturn_t
1993 bnx2_interrupt(int irq, void *dev_instance)
1995 struct net_device *dev = dev_instance;
1996 struct bnx2 *bp = netdev_priv(dev);
1998 /* When using INTx, it is possible for the interrupt to arrive
1999 * at the CPU before the status block posted prior to the
2000 * interrupt. Reading a register will flush the status block.
2001 * When using MSI, the MSI message will always complete after
2002 * the status block write.
2004 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2005 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2006 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2007 return IRQ_NONE;
2009 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2010 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2011 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2013 /* Return here if interrupt is shared and is disabled. */
2014 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2015 return IRQ_HANDLED;
2017 netif_rx_schedule(dev);
2019 return IRQ_HANDLED;
2022 static inline int
2023 bnx2_has_work(struct bnx2 *bp)
2025 struct status_block *sblk = bp->status_blk;
2027 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2028 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2029 return 1;
2031 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
2032 bp->link_up)
2033 return 1;
2035 return 0;
2038 static int
2039 bnx2_poll(struct net_device *dev, int *budget)
2041 struct bnx2 *bp = netdev_priv(dev);
2043 if ((bp->status_blk->status_attn_bits &
2044 STATUS_ATTN_BITS_LINK_STATE) !=
2045 (bp->status_blk->status_attn_bits_ack &
2046 STATUS_ATTN_BITS_LINK_STATE)) {
2048 spin_lock(&bp->phy_lock);
2049 bnx2_phy_int(bp);
2050 spin_unlock(&bp->phy_lock);
2052 /* This is needed to take care of transient status
2053 * during link changes.
2055 REG_WR(bp, BNX2_HC_COMMAND,
2056 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2057 REG_RD(bp, BNX2_HC_COMMAND);
2060 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2061 bnx2_tx_int(bp);
2063 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2064 int orig_budget = *budget;
2065 int work_done;
2067 if (orig_budget > dev->quota)
2068 orig_budget = dev->quota;
2070 work_done = bnx2_rx_int(bp, orig_budget);
2071 *budget -= work_done;
2072 dev->quota -= work_done;
2075 bp->last_status_idx = bp->status_blk->status_idx;
2076 rmb();
2078 if (!bnx2_has_work(bp)) {
2079 netif_rx_complete(dev);
2080 if (likely(bp->flags & USING_MSI_FLAG)) {
2081 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2082 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2083 bp->last_status_idx);
2084 return 0;
2086 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2087 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2088 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2089 bp->last_status_idx);
2091 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2092 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2093 bp->last_status_idx);
2094 return 0;
2097 return 1;
2100 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2101 * from set_multicast.
2103 static void
2104 bnx2_set_rx_mode(struct net_device *dev)
2106 struct bnx2 *bp = netdev_priv(dev);
2107 u32 rx_mode, sort_mode;
2108 int i;
2110 spin_lock_bh(&bp->phy_lock);
2112 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2113 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2114 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2115 #ifdef BCM_VLAN
2116 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2117 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2118 #else
2119 if (!(bp->flags & ASF_ENABLE_FLAG))
2120 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2121 #endif
2122 if (dev->flags & IFF_PROMISC) {
2123 /* Promiscuous mode. */
2124 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2125 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2126 BNX2_RPM_SORT_USER0_PROM_VLAN;
2128 else if (dev->flags & IFF_ALLMULTI) {
2129 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2130 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2131 0xffffffff);
2133 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2135 else {
2136 /* Accept one or more multicast(s). */
2137 struct dev_mc_list *mclist;
2138 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2139 u32 regidx;
2140 u32 bit;
2141 u32 crc;
2143 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2145 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2146 i++, mclist = mclist->next) {
2148 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2149 bit = crc & 0xff;
2150 regidx = (bit & 0xe0) >> 5;
2151 bit &= 0x1f;
2152 mc_filter[regidx] |= (1 << bit);
2155 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2156 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2157 mc_filter[i]);
2160 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2163 if (rx_mode != bp->rx_mode) {
2164 bp->rx_mode = rx_mode;
2165 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2168 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2169 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2170 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2172 spin_unlock_bh(&bp->phy_lock);
2175 #define FW_BUF_SIZE 0x8000
2177 static int
2178 bnx2_gunzip_init(struct bnx2 *bp)
2180 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2181 goto gunzip_nomem1;
2183 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2184 goto gunzip_nomem2;
2186 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2187 if (bp->strm->workspace == NULL)
2188 goto gunzip_nomem3;
2190 return 0;
2192 gunzip_nomem3:
2193 kfree(bp->strm);
2194 bp->strm = NULL;
2196 gunzip_nomem2:
2197 vfree(bp->gunzip_buf);
2198 bp->gunzip_buf = NULL;
2200 gunzip_nomem1:
2201 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2202 "uncompression.\n", bp->dev->name);
2203 return -ENOMEM;
2206 static void
2207 bnx2_gunzip_end(struct bnx2 *bp)
2209 kfree(bp->strm->workspace);
2211 kfree(bp->strm);
2212 bp->strm = NULL;
2214 if (bp->gunzip_buf) {
2215 vfree(bp->gunzip_buf);
2216 bp->gunzip_buf = NULL;
2220 static int
2221 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2223 int n, rc;
2225 /* check gzip header */
2226 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2227 return -EINVAL;
2229 n = 10;
2231 #define FNAME 0x8
2232 if (zbuf[3] & FNAME)
2233 while ((zbuf[n++] != 0) && (n < len));
2235 bp->strm->next_in = zbuf + n;
2236 bp->strm->avail_in = len - n;
2237 bp->strm->next_out = bp->gunzip_buf;
2238 bp->strm->avail_out = FW_BUF_SIZE;
2240 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2241 if (rc != Z_OK)
2242 return rc;
2244 rc = zlib_inflate(bp->strm, Z_FINISH);
2246 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2247 *outbuf = bp->gunzip_buf;
2249 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2250 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2251 bp->dev->name, bp->strm->msg);
2253 zlib_inflateEnd(bp->strm);
2255 if (rc == Z_STREAM_END)
2256 return 0;
2258 return rc;
2261 static void
2262 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2263 u32 rv2p_proc)
2265 int i;
2266 u32 val;
2269 for (i = 0; i < rv2p_code_len; i += 8) {
2270 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2271 rv2p_code++;
2272 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2273 rv2p_code++;
2275 if (rv2p_proc == RV2P_PROC1) {
2276 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2277 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2279 else {
2280 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2281 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2285 /* Reset the processor, un-stall is done later. */
2286 if (rv2p_proc == RV2P_PROC1) {
2287 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2289 else {
2290 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2294 static int
2295 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2297 u32 offset;
2298 u32 val;
2299 int rc;
2301 /* Halt the CPU. */
2302 val = REG_RD_IND(bp, cpu_reg->mode);
2303 val |= cpu_reg->mode_value_halt;
2304 REG_WR_IND(bp, cpu_reg->mode, val);
2305 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2307 /* Load the Text area. */
2308 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2309 if (fw->gz_text) {
2310 u32 text_len;
2311 void *text;
2313 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2314 &text_len);
2315 if (rc)
2316 return rc;
2318 fw->text = text;
2320 if (fw->gz_text) {
2321 int j;
2323 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2324 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2328 /* Load the Data area. */
2329 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2330 if (fw->data) {
2331 int j;
2333 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2334 REG_WR_IND(bp, offset, fw->data[j]);
2338 /* Load the SBSS area. */
2339 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2340 if (fw->sbss) {
2341 int j;
2343 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2344 REG_WR_IND(bp, offset, fw->sbss[j]);
2348 /* Load the BSS area. */
2349 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2350 if (fw->bss) {
2351 int j;
2353 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2354 REG_WR_IND(bp, offset, fw->bss[j]);
2358 /* Load the Read-Only area. */
2359 offset = cpu_reg->spad_base +
2360 (fw->rodata_addr - cpu_reg->mips_view_base);
2361 if (fw->rodata) {
2362 int j;
2364 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2365 REG_WR_IND(bp, offset, fw->rodata[j]);
2369 /* Clear the pre-fetch instruction. */
2370 REG_WR_IND(bp, cpu_reg->inst, 0);
2371 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2373 /* Start the CPU. */
2374 val = REG_RD_IND(bp, cpu_reg->mode);
2375 val &= ~cpu_reg->mode_value_halt;
2376 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2377 REG_WR_IND(bp, cpu_reg->mode, val);
2379 return 0;
2382 static int
2383 bnx2_init_cpus(struct bnx2 *bp)
2385 struct cpu_reg cpu_reg;
2386 struct fw_info *fw;
2387 int rc = 0;
2388 void *text;
2389 u32 text_len;
2391 if ((rc = bnx2_gunzip_init(bp)) != 0)
2392 return rc;
2394 /* Initialize the RV2P processor. */
2395 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2396 &text_len);
2397 if (rc)
2398 goto init_cpu_err;
2400 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2402 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2403 &text_len);
2404 if (rc)
2405 goto init_cpu_err;
2407 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2409 /* Initialize the RX Processor. */
2410 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2411 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2412 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2413 cpu_reg.state = BNX2_RXP_CPU_STATE;
2414 cpu_reg.state_value_clear = 0xffffff;
2415 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2416 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2417 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2418 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2419 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2420 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2421 cpu_reg.mips_view_base = 0x8000000;
2423 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2424 fw = &bnx2_rxp_fw_09;
2425 else
2426 fw = &bnx2_rxp_fw_06;
2428 rc = load_cpu_fw(bp, &cpu_reg, fw);
2429 if (rc)
2430 goto init_cpu_err;
2432 /* Initialize the TX Processor. */
2433 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2434 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2435 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2436 cpu_reg.state = BNX2_TXP_CPU_STATE;
2437 cpu_reg.state_value_clear = 0xffffff;
2438 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2439 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2440 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2441 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2442 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2443 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2444 cpu_reg.mips_view_base = 0x8000000;
2446 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2447 fw = &bnx2_txp_fw_09;
2448 else
2449 fw = &bnx2_txp_fw_06;
2451 rc = load_cpu_fw(bp, &cpu_reg, fw);
2452 if (rc)
2453 goto init_cpu_err;
2455 /* Initialize the TX Patch-up Processor. */
2456 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2457 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2458 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2459 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2460 cpu_reg.state_value_clear = 0xffffff;
2461 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2462 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2463 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2464 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2465 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2466 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2467 cpu_reg.mips_view_base = 0x8000000;
2469 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2470 fw = &bnx2_tpat_fw_09;
2471 else
2472 fw = &bnx2_tpat_fw_06;
2474 rc = load_cpu_fw(bp, &cpu_reg, fw);
2475 if (rc)
2476 goto init_cpu_err;
2478 /* Initialize the Completion Processor. */
2479 cpu_reg.mode = BNX2_COM_CPU_MODE;
2480 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2481 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2482 cpu_reg.state = BNX2_COM_CPU_STATE;
2483 cpu_reg.state_value_clear = 0xffffff;
2484 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2485 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2486 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2487 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2488 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2489 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2490 cpu_reg.mips_view_base = 0x8000000;
2492 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2493 fw = &bnx2_com_fw_09;
2494 else
2495 fw = &bnx2_com_fw_06;
2497 rc = load_cpu_fw(bp, &cpu_reg, fw);
2498 if (rc)
2499 goto init_cpu_err;
2501 /* Initialize the Command Processor. */
2502 cpu_reg.mode = BNX2_CP_CPU_MODE;
2503 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2504 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2505 cpu_reg.state = BNX2_CP_CPU_STATE;
2506 cpu_reg.state_value_clear = 0xffffff;
2507 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2508 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2509 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2510 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2511 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2512 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2513 cpu_reg.mips_view_base = 0x8000000;
2515 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2516 fw = &bnx2_cp_fw_09;
2518 rc = load_cpu_fw(bp, &cpu_reg, fw);
2519 if (rc)
2520 goto init_cpu_err;
2522 init_cpu_err:
2523 bnx2_gunzip_end(bp);
2524 return rc;
2527 static int
2528 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2530 u16 pmcsr;
2532 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2534 switch (state) {
2535 case PCI_D0: {
2536 u32 val;
2538 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2539 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2540 PCI_PM_CTRL_PME_STATUS);
2542 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2543 /* delay required during transition out of D3hot */
2544 msleep(20);
2546 val = REG_RD(bp, BNX2_EMAC_MODE);
2547 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2548 val &= ~BNX2_EMAC_MODE_MPKT;
2549 REG_WR(bp, BNX2_EMAC_MODE, val);
2551 val = REG_RD(bp, BNX2_RPM_CONFIG);
2552 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2553 REG_WR(bp, BNX2_RPM_CONFIG, val);
2554 break;
2556 case PCI_D3hot: {
2557 int i;
2558 u32 val, wol_msg;
2560 if (bp->wol) {
2561 u32 advertising;
2562 u8 autoneg;
2564 autoneg = bp->autoneg;
2565 advertising = bp->advertising;
2567 bp->autoneg = AUTONEG_SPEED;
2568 bp->advertising = ADVERTISED_10baseT_Half |
2569 ADVERTISED_10baseT_Full |
2570 ADVERTISED_100baseT_Half |
2571 ADVERTISED_100baseT_Full |
2572 ADVERTISED_Autoneg;
2574 bnx2_setup_copper_phy(bp);
2576 bp->autoneg = autoneg;
2577 bp->advertising = advertising;
2579 bnx2_set_mac_addr(bp);
2581 val = REG_RD(bp, BNX2_EMAC_MODE);
2583 /* Enable port mode. */
2584 val &= ~BNX2_EMAC_MODE_PORT;
2585 val |= BNX2_EMAC_MODE_PORT_MII |
2586 BNX2_EMAC_MODE_MPKT_RCVD |
2587 BNX2_EMAC_MODE_ACPI_RCVD |
2588 BNX2_EMAC_MODE_MPKT;
2590 REG_WR(bp, BNX2_EMAC_MODE, val);
2592 /* receive all multicast */
2593 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2594 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2595 0xffffffff);
2597 REG_WR(bp, BNX2_EMAC_RX_MODE,
2598 BNX2_EMAC_RX_MODE_SORT_MODE);
2600 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2601 BNX2_RPM_SORT_USER0_MC_EN;
2602 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2603 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2604 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2605 BNX2_RPM_SORT_USER0_ENA);
2607 /* Need to enable EMAC and RPM for WOL. */
2608 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2609 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2610 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2611 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2613 val = REG_RD(bp, BNX2_RPM_CONFIG);
2614 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2615 REG_WR(bp, BNX2_RPM_CONFIG, val);
2617 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2619 else {
2620 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2623 if (!(bp->flags & NO_WOL_FLAG))
2624 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2626 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2627 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2628 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2630 if (bp->wol)
2631 pmcsr |= 3;
2633 else {
2634 pmcsr |= 3;
2636 if (bp->wol) {
2637 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2639 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2640 pmcsr);
2642 /* No more memory access after this point until
2643 * device is brought back to D0.
2645 udelay(50);
2646 break;
2648 default:
2649 return -EINVAL;
2651 return 0;
2654 static int
2655 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2657 u32 val;
2658 int j;
2660 /* Request access to the flash interface. */
2661 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2662 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2663 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2664 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2665 break;
2667 udelay(5);
2670 if (j >= NVRAM_TIMEOUT_COUNT)
2671 return -EBUSY;
2673 return 0;
2676 static int
2677 bnx2_release_nvram_lock(struct bnx2 *bp)
2679 int j;
2680 u32 val;
2682 /* Relinquish nvram interface. */
2683 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2685 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2686 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2687 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2688 break;
2690 udelay(5);
2693 if (j >= NVRAM_TIMEOUT_COUNT)
2694 return -EBUSY;
2696 return 0;
2700 static int
2701 bnx2_enable_nvram_write(struct bnx2 *bp)
2703 u32 val;
2705 val = REG_RD(bp, BNX2_MISC_CFG);
2706 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2708 if (!bp->flash_info->buffered) {
2709 int j;
2711 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2712 REG_WR(bp, BNX2_NVM_COMMAND,
2713 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2715 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2716 udelay(5);
2718 val = REG_RD(bp, BNX2_NVM_COMMAND);
2719 if (val & BNX2_NVM_COMMAND_DONE)
2720 break;
2723 if (j >= NVRAM_TIMEOUT_COUNT)
2724 return -EBUSY;
2726 return 0;
2729 static void
2730 bnx2_disable_nvram_write(struct bnx2 *bp)
2732 u32 val;
2734 val = REG_RD(bp, BNX2_MISC_CFG);
2735 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2739 static void
2740 bnx2_enable_nvram_access(struct bnx2 *bp)
2742 u32 val;
2744 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2745 /* Enable both bits, even on read. */
2746 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2747 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2750 static void
2751 bnx2_disable_nvram_access(struct bnx2 *bp)
2753 u32 val;
2755 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2756 /* Disable both bits, even after read. */
2757 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2758 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2759 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2762 static int
2763 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2765 u32 cmd;
2766 int j;
2768 if (bp->flash_info->buffered)
2769 /* Buffered flash, no erase needed */
2770 return 0;
2772 /* Build an erase command */
2773 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2774 BNX2_NVM_COMMAND_DOIT;
2776 /* Need to clear DONE bit separately. */
2777 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2779 /* Address of the NVRAM to read from. */
2780 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2782 /* Issue an erase command. */
2783 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2785 /* Wait for completion. */
2786 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2787 u32 val;
2789 udelay(5);
2791 val = REG_RD(bp, BNX2_NVM_COMMAND);
2792 if (val & BNX2_NVM_COMMAND_DONE)
2793 break;
2796 if (j >= NVRAM_TIMEOUT_COUNT)
2797 return -EBUSY;
2799 return 0;
2802 static int
2803 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2805 u32 cmd;
2806 int j;
2808 /* Build the command word. */
2809 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2811 /* Calculate an offset of a buffered flash. */
2812 if (bp->flash_info->buffered) {
2813 offset = ((offset / bp->flash_info->page_size) <<
2814 bp->flash_info->page_bits) +
2815 (offset % bp->flash_info->page_size);
2818 /* Need to clear DONE bit separately. */
2819 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2821 /* Address of the NVRAM to read from. */
2822 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2824 /* Issue a read command. */
2825 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2827 /* Wait for completion. */
2828 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2829 u32 val;
2831 udelay(5);
2833 val = REG_RD(bp, BNX2_NVM_COMMAND);
2834 if (val & BNX2_NVM_COMMAND_DONE) {
2835 val = REG_RD(bp, BNX2_NVM_READ);
2837 val = be32_to_cpu(val);
2838 memcpy(ret_val, &val, 4);
2839 break;
2842 if (j >= NVRAM_TIMEOUT_COUNT)
2843 return -EBUSY;
2845 return 0;
2849 static int
2850 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2852 u32 cmd, val32;
2853 int j;
2855 /* Build the command word. */
2856 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2858 /* Calculate an offset of a buffered flash. */
2859 if (bp->flash_info->buffered) {
2860 offset = ((offset / bp->flash_info->page_size) <<
2861 bp->flash_info->page_bits) +
2862 (offset % bp->flash_info->page_size);
2865 /* Need to clear DONE bit separately. */
2866 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2868 memcpy(&val32, val, 4);
2869 val32 = cpu_to_be32(val32);
2871 /* Write the data. */
2872 REG_WR(bp, BNX2_NVM_WRITE, val32);
2874 /* Address of the NVRAM to write to. */
2875 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2877 /* Issue the write command. */
2878 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2880 /* Wait for completion. */
2881 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2882 udelay(5);
2884 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2885 break;
2887 if (j >= NVRAM_TIMEOUT_COUNT)
2888 return -EBUSY;
2890 return 0;
2893 static int
2894 bnx2_init_nvram(struct bnx2 *bp)
2896 u32 val;
2897 int j, entry_count, rc;
2898 struct flash_spec *flash;
2900 /* Determine the selected interface. */
2901 val = REG_RD(bp, BNX2_NVM_CFG1);
2903 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2905 rc = 0;
2906 if (val & 0x40000000) {
2908 /* Flash interface has been reconfigured */
2909 for (j = 0, flash = &flash_table[0]; j < entry_count;
2910 j++, flash++) {
2911 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2912 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2913 bp->flash_info = flash;
2914 break;
2918 else {
2919 u32 mask;
2920 /* Not yet been reconfigured */
2922 if (val & (1 << 23))
2923 mask = FLASH_BACKUP_STRAP_MASK;
2924 else
2925 mask = FLASH_STRAP_MASK;
2927 for (j = 0, flash = &flash_table[0]; j < entry_count;
2928 j++, flash++) {
2930 if ((val & mask) == (flash->strapping & mask)) {
2931 bp->flash_info = flash;
2933 /* Request access to the flash interface. */
2934 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2935 return rc;
2937 /* Enable access to flash interface */
2938 bnx2_enable_nvram_access(bp);
2940 /* Reconfigure the flash interface */
2941 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2942 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2943 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2944 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2946 /* Disable access to flash interface */
2947 bnx2_disable_nvram_access(bp);
2948 bnx2_release_nvram_lock(bp);
2950 break;
2953 } /* if (val & 0x40000000) */
2955 if (j == entry_count) {
2956 bp->flash_info = NULL;
2957 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2958 return -ENODEV;
2961 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2962 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2963 if (val)
2964 bp->flash_size = val;
2965 else
2966 bp->flash_size = bp->flash_info->total_size;
2968 return rc;
2971 static int
2972 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2973 int buf_size)
2975 int rc = 0;
2976 u32 cmd_flags, offset32, len32, extra;
2978 if (buf_size == 0)
2979 return 0;
2981 /* Request access to the flash interface. */
2982 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2983 return rc;
2985 /* Enable access to flash interface */
2986 bnx2_enable_nvram_access(bp);
2988 len32 = buf_size;
2989 offset32 = offset;
2990 extra = 0;
2992 cmd_flags = 0;
2994 if (offset32 & 3) {
2995 u8 buf[4];
2996 u32 pre_len;
2998 offset32 &= ~3;
2999 pre_len = 4 - (offset & 3);
3001 if (pre_len >= len32) {
3002 pre_len = len32;
3003 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3004 BNX2_NVM_COMMAND_LAST;
3006 else {
3007 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3010 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3012 if (rc)
3013 return rc;
3015 memcpy(ret_buf, buf + (offset & 3), pre_len);
3017 offset32 += 4;
3018 ret_buf += pre_len;
3019 len32 -= pre_len;
3021 if (len32 & 3) {
3022 extra = 4 - (len32 & 3);
3023 len32 = (len32 + 4) & ~3;
3026 if (len32 == 4) {
3027 u8 buf[4];
3029 if (cmd_flags)
3030 cmd_flags = BNX2_NVM_COMMAND_LAST;
3031 else
3032 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3033 BNX2_NVM_COMMAND_LAST;
3035 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3037 memcpy(ret_buf, buf, 4 - extra);
3039 else if (len32 > 0) {
3040 u8 buf[4];
3042 /* Read the first word. */
3043 if (cmd_flags)
3044 cmd_flags = 0;
3045 else
3046 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3048 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3050 /* Advance to the next dword. */
3051 offset32 += 4;
3052 ret_buf += 4;
3053 len32 -= 4;
3055 while (len32 > 4 && rc == 0) {
3056 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3058 /* Advance to the next dword. */
3059 offset32 += 4;
3060 ret_buf += 4;
3061 len32 -= 4;
3064 if (rc)
3065 return rc;
3067 cmd_flags = BNX2_NVM_COMMAND_LAST;
3068 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3070 memcpy(ret_buf, buf, 4 - extra);
3073 /* Disable access to flash interface */
3074 bnx2_disable_nvram_access(bp);
3076 bnx2_release_nvram_lock(bp);
3078 return rc;
3081 static int
3082 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3083 int buf_size)
3085 u32 written, offset32, len32;
3086 u8 *buf, start[4], end[4], *flash_buffer = NULL;
3087 int rc = 0;
3088 int align_start, align_end;
3090 buf = data_buf;
3091 offset32 = offset;
3092 len32 = buf_size;
3093 align_start = align_end = 0;
3095 if ((align_start = (offset32 & 3))) {
3096 offset32 &= ~3;
3097 len32 += (4 - align_start);
3098 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3099 return rc;
3102 if (len32 & 3) {
3103 if ((len32 > 4) || !align_start) {
3104 align_end = 4 - (len32 & 3);
3105 len32 += align_end;
3106 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3107 end, 4))) {
3108 return rc;
3113 if (align_start || align_end) {
3114 buf = kmalloc(len32, GFP_KERNEL);
3115 if (buf == NULL)
3116 return -ENOMEM;
3117 if (align_start) {
3118 memcpy(buf, start, 4);
3120 if (align_end) {
3121 memcpy(buf + len32 - 4, end, 4);
3123 memcpy(buf + align_start, data_buf, buf_size);
3126 if (bp->flash_info->buffered == 0) {
3127 flash_buffer = kmalloc(264, GFP_KERNEL);
3128 if (flash_buffer == NULL) {
3129 rc = -ENOMEM;
3130 goto nvram_write_end;
3134 written = 0;
3135 while ((written < len32) && (rc == 0)) {
3136 u32 page_start, page_end, data_start, data_end;
3137 u32 addr, cmd_flags;
3138 int i;
3140 /* Find the page_start addr */
3141 page_start = offset32 + written;
3142 page_start -= (page_start % bp->flash_info->page_size);
3143 /* Find the page_end addr */
3144 page_end = page_start + bp->flash_info->page_size;
3145 /* Find the data_start addr */
3146 data_start = (written == 0) ? offset32 : page_start;
3147 /* Find the data_end addr */
3148 data_end = (page_end > offset32 + len32) ?
3149 (offset32 + len32) : page_end;
3151 /* Request access to the flash interface. */
3152 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3153 goto nvram_write_end;
3155 /* Enable access to flash interface */
3156 bnx2_enable_nvram_access(bp);
3158 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3159 if (bp->flash_info->buffered == 0) {
3160 int j;
3162 /* Read the whole page into the buffer
3163 * (non-buffer flash only) */
3164 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3165 if (j == (bp->flash_info->page_size - 4)) {
3166 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3168 rc = bnx2_nvram_read_dword(bp,
3169 page_start + j,
3170 &flash_buffer[j],
3171 cmd_flags);
3173 if (rc)
3174 goto nvram_write_end;
3176 cmd_flags = 0;
3180 /* Enable writes to flash interface (unlock write-protect) */
3181 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3182 goto nvram_write_end;
3184 /* Erase the page */
3185 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3186 goto nvram_write_end;
3188 /* Re-enable the write again for the actual write */
3189 bnx2_enable_nvram_write(bp);
3191 /* Loop to write back the buffer data from page_start to
3192 * data_start */
3193 i = 0;
3194 if (bp->flash_info->buffered == 0) {
3195 for (addr = page_start; addr < data_start;
3196 addr += 4, i += 4) {
3198 rc = bnx2_nvram_write_dword(bp, addr,
3199 &flash_buffer[i], cmd_flags);
3201 if (rc != 0)
3202 goto nvram_write_end;
3204 cmd_flags = 0;
3208 /* Loop to write the new data from data_start to data_end */
3209 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3210 if ((addr == page_end - 4) ||
3211 ((bp->flash_info->buffered) &&
3212 (addr == data_end - 4))) {
3214 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3216 rc = bnx2_nvram_write_dword(bp, addr, buf,
3217 cmd_flags);
3219 if (rc != 0)
3220 goto nvram_write_end;
3222 cmd_flags = 0;
3223 buf += 4;
3226 /* Loop to write back the buffer data from data_end
3227 * to page_end */
3228 if (bp->flash_info->buffered == 0) {
3229 for (addr = data_end; addr < page_end;
3230 addr += 4, i += 4) {
3232 if (addr == page_end-4) {
3233 cmd_flags = BNX2_NVM_COMMAND_LAST;
3235 rc = bnx2_nvram_write_dword(bp, addr,
3236 &flash_buffer[i], cmd_flags);
3238 if (rc != 0)
3239 goto nvram_write_end;
3241 cmd_flags = 0;
3245 /* Disable writes to flash interface (lock write-protect) */
3246 bnx2_disable_nvram_write(bp);
3248 /* Disable access to flash interface */
3249 bnx2_disable_nvram_access(bp);
3250 bnx2_release_nvram_lock(bp);
3252 /* Increment written */
3253 written += data_end - data_start;
3256 nvram_write_end:
3257 if (bp->flash_info->buffered == 0)
3258 kfree(flash_buffer);
3260 if (align_start || align_end)
3261 kfree(buf);
3262 return rc;
3265 static int
3266 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3268 u32 val;
3269 int i, rc = 0;
3271 /* Wait for the current PCI transaction to complete before
3272 * issuing a reset. */
3273 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3274 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3275 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3276 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3277 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3278 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3279 udelay(5);
3281 /* Wait for the firmware to tell us it is ok to issue a reset. */
3282 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3284 /* Deposit a driver reset signature so the firmware knows that
3285 * this is a soft reset. */
3286 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3287 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3289 /* Do a dummy read to force the chip to complete all current transaction
3290 * before we issue a reset. */
3291 val = REG_RD(bp, BNX2_MISC_ID);
3293 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3294 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3295 REG_RD(bp, BNX2_MISC_COMMAND);
3296 udelay(5);
3298 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3299 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3301 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3303 } else {
3304 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3305 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3306 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3308 /* Chip reset. */
3309 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3311 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3312 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3313 current->state = TASK_UNINTERRUPTIBLE;
3314 schedule_timeout(HZ / 50);
3317 /* Reset takes approximate 30 usec */
3318 for (i = 0; i < 10; i++) {
3319 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3320 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3321 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3322 break;
3323 udelay(10);
3326 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3327 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3328 printk(KERN_ERR PFX "Chip reset did not complete\n");
3329 return -EBUSY;
3333 /* Make sure byte swapping is properly configured. */
3334 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3335 if (val != 0x01020304) {
3336 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3337 return -ENODEV;
3340 /* Wait for the firmware to finish its initialization. */
3341 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3342 if (rc)
3343 return rc;
3345 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3346 /* Adjust the voltage regular to two steps lower. The default
3347 * of this register is 0x0000000e. */
3348 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3350 /* Remove bad rbuf memory from the free pool. */
3351 rc = bnx2_alloc_bad_rbuf(bp);
3354 return rc;
3357 static int
3358 bnx2_init_chip(struct bnx2 *bp)
3360 u32 val;
3361 int rc;
3363 /* Make sure the interrupt is not active. */
3364 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3366 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3367 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3368 #ifdef __BIG_ENDIAN
3369 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3370 #endif
3371 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3372 DMA_READ_CHANS << 12 |
3373 DMA_WRITE_CHANS << 16;
3375 val |= (0x2 << 20) | (1 << 11);
3377 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3378 val |= (1 << 23);
3380 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3381 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3382 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3384 REG_WR(bp, BNX2_DMA_CONFIG, val);
3386 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3387 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3388 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3389 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3392 if (bp->flags & PCIX_FLAG) {
3393 u16 val16;
3395 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3396 &val16);
3397 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3398 val16 & ~PCI_X_CMD_ERO);
3401 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3402 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3403 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3404 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3406 /* Initialize context mapping and zero out the quick contexts. The
3407 * context block must have already been enabled. */
3408 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3409 bnx2_init_5709_context(bp);
3410 else
3411 bnx2_init_context(bp);
3413 if ((rc = bnx2_init_cpus(bp)) != 0)
3414 return rc;
3416 bnx2_init_nvram(bp);
3418 bnx2_set_mac_addr(bp);
3420 val = REG_RD(bp, BNX2_MQ_CONFIG);
3421 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3422 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3423 REG_WR(bp, BNX2_MQ_CONFIG, val);
3425 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3426 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3427 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3429 val = (BCM_PAGE_BITS - 8) << 24;
3430 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3432 /* Configure page size. */
3433 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3434 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3435 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3436 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3438 val = bp->mac_addr[0] +
3439 (bp->mac_addr[1] << 8) +
3440 (bp->mac_addr[2] << 16) +
3441 bp->mac_addr[3] +
3442 (bp->mac_addr[4] << 8) +
3443 (bp->mac_addr[5] << 16);
3444 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3446 /* Program the MTU. Also include 4 bytes for CRC32. */
3447 val = bp->dev->mtu + ETH_HLEN + 4;
3448 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3449 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3450 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3452 bp->last_status_idx = 0;
3453 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3455 /* Set up how to generate a link change interrupt. */
3456 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3458 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3459 (u64) bp->status_blk_mapping & 0xffffffff);
3460 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3462 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3463 (u64) bp->stats_blk_mapping & 0xffffffff);
3464 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3465 (u64) bp->stats_blk_mapping >> 32);
3467 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3468 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3470 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3471 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3473 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3474 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3476 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3478 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3480 REG_WR(bp, BNX2_HC_COM_TICKS,
3481 (bp->com_ticks_int << 16) | bp->com_ticks);
3483 REG_WR(bp, BNX2_HC_CMD_TICKS,
3484 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3486 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3487 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3489 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3490 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3491 else {
3492 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3493 BNX2_HC_CONFIG_TX_TMR_MODE |
3494 BNX2_HC_CONFIG_COLLECT_STATS);
3497 /* Clear internal stats counters. */
3498 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3500 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3502 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3503 BNX2_PORT_FEATURE_ASF_ENABLED)
3504 bp->flags |= ASF_ENABLE_FLAG;
3506 /* Initialize the receive filter. */
3507 bnx2_set_rx_mode(bp->dev);
3509 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3512 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3513 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3515 udelay(20);
3517 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3519 return rc;
3522 static void
3523 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3525 u32 val, offset0, offset1, offset2, offset3;
3527 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3528 offset0 = BNX2_L2CTX_TYPE_XI;
3529 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3530 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3531 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3532 } else {
3533 offset0 = BNX2_L2CTX_TYPE;
3534 offset1 = BNX2_L2CTX_CMD_TYPE;
3535 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3536 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3538 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3539 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3541 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3542 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3544 val = (u64) bp->tx_desc_mapping >> 32;
3545 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3547 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3548 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3551 static void
3552 bnx2_init_tx_ring(struct bnx2 *bp)
3554 struct tx_bd *txbd;
3555 u32 cid;
3557 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3559 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3561 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3562 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3564 bp->tx_prod = 0;
3565 bp->tx_cons = 0;
3566 bp->hw_tx_cons = 0;
3567 bp->tx_prod_bseq = 0;
3569 cid = TX_CID;
3570 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3571 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
3573 bnx2_init_tx_context(bp, cid);
3576 static void
3577 bnx2_init_rx_ring(struct bnx2 *bp)
3579 struct rx_bd *rxbd;
3580 int i;
3581 u16 prod, ring_prod;
3582 u32 val;
3584 /* 8 for CRC and VLAN */
3585 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3586 /* hw alignment */
3587 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
3589 ring_prod = prod = bp->rx_prod = 0;
3590 bp->rx_cons = 0;
3591 bp->hw_rx_cons = 0;
3592 bp->rx_prod_bseq = 0;
3594 for (i = 0; i < bp->rx_max_ring; i++) {
3595 int j;
3597 rxbd = &bp->rx_desc_ring[i][0];
3598 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3599 rxbd->rx_bd_len = bp->rx_buf_use_size;
3600 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3602 if (i == (bp->rx_max_ring - 1))
3603 j = 0;
3604 else
3605 j = i + 1;
3606 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3607 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3608 0xffffffff;
3611 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3612 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3613 val |= 0x02 << 8;
3614 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3616 val = (u64) bp->rx_desc_mapping[0] >> 32;
3617 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3619 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3620 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3622 for (i = 0; i < bp->rx_ring_size; i++) {
3623 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3624 break;
3626 prod = NEXT_RX_BD(prod);
3627 ring_prod = RX_RING_IDX(prod);
3629 bp->rx_prod = prod;
3631 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3633 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3636 static void
3637 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3639 u32 num_rings, max;
3641 bp->rx_ring_size = size;
3642 num_rings = 1;
3643 while (size > MAX_RX_DESC_CNT) {
3644 size -= MAX_RX_DESC_CNT;
3645 num_rings++;
3647 /* round to next power of 2 */
3648 max = MAX_RX_RINGS;
3649 while ((max & num_rings) == 0)
3650 max >>= 1;
3652 if (num_rings != max)
3653 max <<= 1;
3655 bp->rx_max_ring = max;
3656 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3659 static void
3660 bnx2_free_tx_skbs(struct bnx2 *bp)
3662 int i;
3664 if (bp->tx_buf_ring == NULL)
3665 return;
3667 for (i = 0; i < TX_DESC_CNT; ) {
3668 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3669 struct sk_buff *skb = tx_buf->skb;
3670 int j, last;
3672 if (skb == NULL) {
3673 i++;
3674 continue;
3677 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3678 skb_headlen(skb), PCI_DMA_TODEVICE);
3680 tx_buf->skb = NULL;
3682 last = skb_shinfo(skb)->nr_frags;
3683 for (j = 0; j < last; j++) {
3684 tx_buf = &bp->tx_buf_ring[i + j + 1];
3685 pci_unmap_page(bp->pdev,
3686 pci_unmap_addr(tx_buf, mapping),
3687 skb_shinfo(skb)->frags[j].size,
3688 PCI_DMA_TODEVICE);
3690 dev_kfree_skb(skb);
3691 i += j + 1;
3696 static void
3697 bnx2_free_rx_skbs(struct bnx2 *bp)
3699 int i;
3701 if (bp->rx_buf_ring == NULL)
3702 return;
3704 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3705 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3706 struct sk_buff *skb = rx_buf->skb;
3708 if (skb == NULL)
3709 continue;
3711 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3712 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3714 rx_buf->skb = NULL;
3716 dev_kfree_skb(skb);
3720 static void
3721 bnx2_free_skbs(struct bnx2 *bp)
3723 bnx2_free_tx_skbs(bp);
3724 bnx2_free_rx_skbs(bp);
3727 static int
3728 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3730 int rc;
3732 rc = bnx2_reset_chip(bp, reset_code);
3733 bnx2_free_skbs(bp);
3734 if (rc)
3735 return rc;
3737 if ((rc = bnx2_init_chip(bp)) != 0)
3738 return rc;
3740 bnx2_init_tx_ring(bp);
3741 bnx2_init_rx_ring(bp);
3742 return 0;
3745 static int
3746 bnx2_init_nic(struct bnx2 *bp)
3748 int rc;
3750 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3751 return rc;
3753 spin_lock_bh(&bp->phy_lock);
3754 bnx2_init_phy(bp);
3755 spin_unlock_bh(&bp->phy_lock);
3756 bnx2_set_link(bp);
3757 return 0;
3760 static int
3761 bnx2_test_registers(struct bnx2 *bp)
3763 int ret;
3764 int i;
3765 static const struct {
3766 u16 offset;
3767 u16 flags;
3768 u32 rw_mask;
3769 u32 ro_mask;
3770 } reg_tbl[] = {
3771 { 0x006c, 0, 0x00000000, 0x0000003f },
3772 { 0x0090, 0, 0xffffffff, 0x00000000 },
3773 { 0x0094, 0, 0x00000000, 0x00000000 },
3775 { 0x0404, 0, 0x00003f00, 0x00000000 },
3776 { 0x0418, 0, 0x00000000, 0xffffffff },
3777 { 0x041c, 0, 0x00000000, 0xffffffff },
3778 { 0x0420, 0, 0x00000000, 0x80ffffff },
3779 { 0x0424, 0, 0x00000000, 0x00000000 },
3780 { 0x0428, 0, 0x00000000, 0x00000001 },
3781 { 0x0450, 0, 0x00000000, 0x0000ffff },
3782 { 0x0454, 0, 0x00000000, 0xffffffff },
3783 { 0x0458, 0, 0x00000000, 0xffffffff },
3785 { 0x0808, 0, 0x00000000, 0xffffffff },
3786 { 0x0854, 0, 0x00000000, 0xffffffff },
3787 { 0x0868, 0, 0x00000000, 0x77777777 },
3788 { 0x086c, 0, 0x00000000, 0x77777777 },
3789 { 0x0870, 0, 0x00000000, 0x77777777 },
3790 { 0x0874, 0, 0x00000000, 0x77777777 },
3792 { 0x0c00, 0, 0x00000000, 0x00000001 },
3793 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3794 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3796 { 0x1000, 0, 0x00000000, 0x00000001 },
3797 { 0x1004, 0, 0x00000000, 0x000f0001 },
3799 { 0x1408, 0, 0x01c00800, 0x00000000 },
3800 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3801 { 0x14a8, 0, 0x00000000, 0x000001ff },
3802 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3803 { 0x14b0, 0, 0x00000002, 0x00000001 },
3804 { 0x14b8, 0, 0x00000000, 0x00000000 },
3805 { 0x14c0, 0, 0x00000000, 0x00000009 },
3806 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3807 { 0x14cc, 0, 0x00000000, 0x00000001 },
3808 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3810 { 0x1800, 0, 0x00000000, 0x00000001 },
3811 { 0x1804, 0, 0x00000000, 0x00000003 },
3813 { 0x2800, 0, 0x00000000, 0x00000001 },
3814 { 0x2804, 0, 0x00000000, 0x00003f01 },
3815 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3816 { 0x2810, 0, 0xffff0000, 0x00000000 },
3817 { 0x2814, 0, 0xffff0000, 0x00000000 },
3818 { 0x2818, 0, 0xffff0000, 0x00000000 },
3819 { 0x281c, 0, 0xffff0000, 0x00000000 },
3820 { 0x2834, 0, 0xffffffff, 0x00000000 },
3821 { 0x2840, 0, 0x00000000, 0xffffffff },
3822 { 0x2844, 0, 0x00000000, 0xffffffff },
3823 { 0x2848, 0, 0xffffffff, 0x00000000 },
3824 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3826 { 0x2c00, 0, 0x00000000, 0x00000011 },
3827 { 0x2c04, 0, 0x00000000, 0x00030007 },
3829 { 0x3c00, 0, 0x00000000, 0x00000001 },
3830 { 0x3c04, 0, 0x00000000, 0x00070000 },
3831 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3832 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3833 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3834 { 0x3c14, 0, 0x00000000, 0xffffffff },
3835 { 0x3c18, 0, 0x00000000, 0xffffffff },
3836 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3837 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3839 { 0x5004, 0, 0x00000000, 0x0000007f },
3840 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3841 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3843 { 0x5c00, 0, 0x00000000, 0x00000001 },
3844 { 0x5c04, 0, 0x00000000, 0x0003000f },
3845 { 0x5c08, 0, 0x00000003, 0x00000000 },
3846 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3847 { 0x5c10, 0, 0x00000000, 0xffffffff },
3848 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3849 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3850 { 0x5c88, 0, 0x00000000, 0x00077373 },
3851 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3853 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3854 { 0x680c, 0, 0xffffffff, 0x00000000 },
3855 { 0x6810, 0, 0xffffffff, 0x00000000 },
3856 { 0x6814, 0, 0xffffffff, 0x00000000 },
3857 { 0x6818, 0, 0xffffffff, 0x00000000 },
3858 { 0x681c, 0, 0xffffffff, 0x00000000 },
3859 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3860 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3861 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3862 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3863 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3864 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3865 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3866 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3867 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3868 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3869 { 0x684c, 0, 0xffffffff, 0x00000000 },
3870 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3871 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3872 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3873 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3874 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3875 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3877 { 0xffff, 0, 0x00000000, 0x00000000 },
3880 ret = 0;
3881 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3882 u32 offset, rw_mask, ro_mask, save_val, val;
3884 offset = (u32) reg_tbl[i].offset;
3885 rw_mask = reg_tbl[i].rw_mask;
3886 ro_mask = reg_tbl[i].ro_mask;
3888 save_val = readl(bp->regview + offset);
3890 writel(0, bp->regview + offset);
3892 val = readl(bp->regview + offset);
3893 if ((val & rw_mask) != 0) {
3894 goto reg_test_err;
3897 if ((val & ro_mask) != (save_val & ro_mask)) {
3898 goto reg_test_err;
3901 writel(0xffffffff, bp->regview + offset);
3903 val = readl(bp->regview + offset);
3904 if ((val & rw_mask) != rw_mask) {
3905 goto reg_test_err;
3908 if ((val & ro_mask) != (save_val & ro_mask)) {
3909 goto reg_test_err;
3912 writel(save_val, bp->regview + offset);
3913 continue;
3915 reg_test_err:
3916 writel(save_val, bp->regview + offset);
3917 ret = -ENODEV;
3918 break;
3920 return ret;
3923 static int
3924 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3926 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3927 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3928 int i;
3930 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3931 u32 offset;
3933 for (offset = 0; offset < size; offset += 4) {
3935 REG_WR_IND(bp, start + offset, test_pattern[i]);
3937 if (REG_RD_IND(bp, start + offset) !=
3938 test_pattern[i]) {
3939 return -ENODEV;
3943 return 0;
3946 static int
3947 bnx2_test_memory(struct bnx2 *bp)
3949 int ret = 0;
3950 int i;
3951 static const struct {
3952 u32 offset;
3953 u32 len;
3954 } mem_tbl[] = {
3955 { 0x60000, 0x4000 },
3956 { 0xa0000, 0x3000 },
3957 { 0xe0000, 0x4000 },
3958 { 0x120000, 0x4000 },
3959 { 0x1a0000, 0x4000 },
3960 { 0x160000, 0x4000 },
3961 { 0xffffffff, 0 },
3964 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3965 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3966 mem_tbl[i].len)) != 0) {
3967 return ret;
3971 return ret;
3974 #define BNX2_MAC_LOOPBACK 0
3975 #define BNX2_PHY_LOOPBACK 1
3977 static int
3978 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3980 unsigned int pkt_size, num_pkts, i;
3981 struct sk_buff *skb, *rx_skb;
3982 unsigned char *packet;
3983 u16 rx_start_idx, rx_idx;
3984 dma_addr_t map;
3985 struct tx_bd *txbd;
3986 struct sw_bd *rx_buf;
3987 struct l2_fhdr *rx_hdr;
3988 int ret = -ENODEV;
3990 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3991 bp->loopback = MAC_LOOPBACK;
3992 bnx2_set_mac_loopback(bp);
3994 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3995 bp->loopback = PHY_LOOPBACK;
3996 bnx2_set_phy_loopback(bp);
3998 else
3999 return -EINVAL;
4001 pkt_size = 1514;
4002 skb = netdev_alloc_skb(bp->dev, pkt_size);
4003 if (!skb)
4004 return -ENOMEM;
4005 packet = skb_put(skb, pkt_size);
4006 memcpy(packet, bp->dev->dev_addr, 6);
4007 memset(packet + 6, 0x0, 8);
4008 for (i = 14; i < pkt_size; i++)
4009 packet[i] = (unsigned char) (i & 0xff);
4011 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4012 PCI_DMA_TODEVICE);
4014 REG_WR(bp, BNX2_HC_COMMAND,
4015 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4017 REG_RD(bp, BNX2_HC_COMMAND);
4019 udelay(5);
4020 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4022 num_pkts = 0;
4024 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4026 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4027 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4028 txbd->tx_bd_mss_nbytes = pkt_size;
4029 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4031 num_pkts++;
4032 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4033 bp->tx_prod_bseq += pkt_size;
4035 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4036 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4038 udelay(100);
4040 REG_WR(bp, BNX2_HC_COMMAND,
4041 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4043 REG_RD(bp, BNX2_HC_COMMAND);
4045 udelay(5);
4047 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4048 dev_kfree_skb(skb);
4050 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4051 goto loopback_test_done;
4054 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4055 if (rx_idx != rx_start_idx + num_pkts) {
4056 goto loopback_test_done;
4059 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4060 rx_skb = rx_buf->skb;
4062 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4063 skb_reserve(rx_skb, bp->rx_offset);
4065 pci_dma_sync_single_for_cpu(bp->pdev,
4066 pci_unmap_addr(rx_buf, mapping),
4067 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4069 if (rx_hdr->l2_fhdr_status &
4070 (L2_FHDR_ERRORS_BAD_CRC |
4071 L2_FHDR_ERRORS_PHY_DECODE |
4072 L2_FHDR_ERRORS_ALIGNMENT |
4073 L2_FHDR_ERRORS_TOO_SHORT |
4074 L2_FHDR_ERRORS_GIANT_FRAME)) {
4076 goto loopback_test_done;
4079 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4080 goto loopback_test_done;
4083 for (i = 14; i < pkt_size; i++) {
4084 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4085 goto loopback_test_done;
4089 ret = 0;
4091 loopback_test_done:
4092 bp->loopback = 0;
4093 return ret;
4096 #define BNX2_MAC_LOOPBACK_FAILED 1
4097 #define BNX2_PHY_LOOPBACK_FAILED 2
4098 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4099 BNX2_PHY_LOOPBACK_FAILED)
4101 static int
4102 bnx2_test_loopback(struct bnx2 *bp)
4104 int rc = 0;
4106 if (!netif_running(bp->dev))
4107 return BNX2_LOOPBACK_FAILED;
4109 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4110 spin_lock_bh(&bp->phy_lock);
4111 bnx2_init_phy(bp);
4112 spin_unlock_bh(&bp->phy_lock);
4113 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4114 rc |= BNX2_MAC_LOOPBACK_FAILED;
4115 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4116 rc |= BNX2_PHY_LOOPBACK_FAILED;
4117 return rc;
4120 #define NVRAM_SIZE 0x200
4121 #define CRC32_RESIDUAL 0xdebb20e3
4123 static int
4124 bnx2_test_nvram(struct bnx2 *bp)
4126 u32 buf[NVRAM_SIZE / 4];
4127 u8 *data = (u8 *) buf;
4128 int rc = 0;
4129 u32 magic, csum;
4131 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4132 goto test_nvram_done;
4134 magic = be32_to_cpu(buf[0]);
4135 if (magic != 0x669955aa) {
4136 rc = -ENODEV;
4137 goto test_nvram_done;
4140 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4141 goto test_nvram_done;
4143 csum = ether_crc_le(0x100, data);
4144 if (csum != CRC32_RESIDUAL) {
4145 rc = -ENODEV;
4146 goto test_nvram_done;
4149 csum = ether_crc_le(0x100, data + 0x100);
4150 if (csum != CRC32_RESIDUAL) {
4151 rc = -ENODEV;
4154 test_nvram_done:
4155 return rc;
4158 static int
4159 bnx2_test_link(struct bnx2 *bp)
4161 u32 bmsr;
4163 spin_lock_bh(&bp->phy_lock);
4164 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4165 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4166 spin_unlock_bh(&bp->phy_lock);
4168 if (bmsr & BMSR_LSTATUS) {
4169 return 0;
4171 return -ENODEV;
4174 static int
4175 bnx2_test_intr(struct bnx2 *bp)
4177 int i;
4178 u16 status_idx;
4180 if (!netif_running(bp->dev))
4181 return -ENODEV;
4183 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4185 /* This register is not touched during run-time. */
4186 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4187 REG_RD(bp, BNX2_HC_COMMAND);
4189 for (i = 0; i < 10; i++) {
4190 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4191 status_idx) {
4193 break;
4196 msleep_interruptible(10);
4198 if (i < 10)
4199 return 0;
4201 return -ENODEV;
4204 static void
4205 bnx2_5706_serdes_timer(struct bnx2 *bp)
4207 spin_lock(&bp->phy_lock);
4208 if (bp->serdes_an_pending)
4209 bp->serdes_an_pending--;
4210 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4211 u32 bmcr;
4213 bp->current_interval = bp->timer_interval;
4215 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4217 if (bmcr & BMCR_ANENABLE) {
4218 u32 phy1, phy2;
4220 bnx2_write_phy(bp, 0x1c, 0x7c00);
4221 bnx2_read_phy(bp, 0x1c, &phy1);
4223 bnx2_write_phy(bp, 0x17, 0x0f01);
4224 bnx2_read_phy(bp, 0x15, &phy2);
4225 bnx2_write_phy(bp, 0x17, 0x0f01);
4226 bnx2_read_phy(bp, 0x15, &phy2);
4228 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4229 !(phy2 & 0x20)) { /* no CONFIG */
4231 bmcr &= ~BMCR_ANENABLE;
4232 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4233 bnx2_write_phy(bp, MII_BMCR, bmcr);
4234 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4238 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4239 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4240 u32 phy2;
4242 bnx2_write_phy(bp, 0x17, 0x0f01);
4243 bnx2_read_phy(bp, 0x15, &phy2);
4244 if (phy2 & 0x20) {
4245 u32 bmcr;
4247 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4248 bmcr |= BMCR_ANENABLE;
4249 bnx2_write_phy(bp, MII_BMCR, bmcr);
4251 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4253 } else
4254 bp->current_interval = bp->timer_interval;
4256 spin_unlock(&bp->phy_lock);
4259 static void
4260 bnx2_5708_serdes_timer(struct bnx2 *bp)
4262 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4263 bp->serdes_an_pending = 0;
4264 return;
4267 spin_lock(&bp->phy_lock);
4268 if (bp->serdes_an_pending)
4269 bp->serdes_an_pending--;
4270 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4271 u32 bmcr;
4273 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4275 if (bmcr & BMCR_ANENABLE) {
4276 bmcr &= ~BMCR_ANENABLE;
4277 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4278 bnx2_write_phy(bp, MII_BMCR, bmcr);
4279 bp->current_interval = SERDES_FORCED_TIMEOUT;
4280 } else {
4281 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4282 bmcr |= BMCR_ANENABLE;
4283 bnx2_write_phy(bp, MII_BMCR, bmcr);
4284 bp->serdes_an_pending = 2;
4285 bp->current_interval = bp->timer_interval;
4288 } else
4289 bp->current_interval = bp->timer_interval;
4291 spin_unlock(&bp->phy_lock);
4294 static void
4295 bnx2_timer(unsigned long data)
4297 struct bnx2 *bp = (struct bnx2 *) data;
4298 u32 msg;
4300 if (!netif_running(bp->dev))
4301 return;
4303 if (atomic_read(&bp->intr_sem) != 0)
4304 goto bnx2_restart_timer;
4306 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4307 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4309 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4311 if (bp->phy_flags & PHY_SERDES_FLAG) {
4312 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4313 bnx2_5706_serdes_timer(bp);
4314 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4315 bnx2_5708_serdes_timer(bp);
4318 bnx2_restart_timer:
4319 mod_timer(&bp->timer, jiffies + bp->current_interval);
4322 /* Called with rtnl_lock */
4323 static int
4324 bnx2_open(struct net_device *dev)
4326 struct bnx2 *bp = netdev_priv(dev);
4327 int rc;
4329 bnx2_set_power_state(bp, PCI_D0);
4330 bnx2_disable_int(bp);
4332 rc = bnx2_alloc_mem(bp);
4333 if (rc)
4334 return rc;
4336 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4337 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4338 !disable_msi) {
4340 if (pci_enable_msi(bp->pdev) == 0) {
4341 bp->flags |= USING_MSI_FLAG;
4342 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4343 dev);
4345 else {
4346 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4347 IRQF_SHARED, dev->name, dev);
4350 else {
4351 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4352 dev->name, dev);
4354 if (rc) {
4355 bnx2_free_mem(bp);
4356 return rc;
4359 rc = bnx2_init_nic(bp);
4361 if (rc) {
4362 free_irq(bp->pdev->irq, dev);
4363 if (bp->flags & USING_MSI_FLAG) {
4364 pci_disable_msi(bp->pdev);
4365 bp->flags &= ~USING_MSI_FLAG;
4367 bnx2_free_skbs(bp);
4368 bnx2_free_mem(bp);
4369 return rc;
4372 mod_timer(&bp->timer, jiffies + bp->current_interval);
4374 atomic_set(&bp->intr_sem, 0);
4376 bnx2_enable_int(bp);
4378 if (bp->flags & USING_MSI_FLAG) {
4379 /* Test MSI to make sure it is working
4380 * If MSI test fails, go back to INTx mode
4382 if (bnx2_test_intr(bp) != 0) {
4383 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4384 " using MSI, switching to INTx mode. Please"
4385 " report this failure to the PCI maintainer"
4386 " and include system chipset information.\n",
4387 bp->dev->name);
4389 bnx2_disable_int(bp);
4390 free_irq(bp->pdev->irq, dev);
4391 pci_disable_msi(bp->pdev);
4392 bp->flags &= ~USING_MSI_FLAG;
4394 rc = bnx2_init_nic(bp);
4396 if (!rc) {
4397 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4398 IRQF_SHARED, dev->name, dev);
4400 if (rc) {
4401 bnx2_free_skbs(bp);
4402 bnx2_free_mem(bp);
4403 del_timer_sync(&bp->timer);
4404 return rc;
4406 bnx2_enable_int(bp);
4409 if (bp->flags & USING_MSI_FLAG) {
4410 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4413 netif_start_queue(dev);
4415 return 0;
4418 static void
4419 bnx2_reset_task(struct work_struct *work)
4421 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
4423 if (!netif_running(bp->dev))
4424 return;
4426 bp->in_reset_task = 1;
4427 bnx2_netif_stop(bp);
4429 bnx2_init_nic(bp);
4431 atomic_set(&bp->intr_sem, 1);
4432 bnx2_netif_start(bp);
4433 bp->in_reset_task = 0;
4436 static void
4437 bnx2_tx_timeout(struct net_device *dev)
4439 struct bnx2 *bp = netdev_priv(dev);
4441 /* This allows the netif to be shutdown gracefully before resetting */
4442 schedule_work(&bp->reset_task);
4445 #ifdef BCM_VLAN
4446 /* Called with rtnl_lock */
4447 static void
4448 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4450 struct bnx2 *bp = netdev_priv(dev);
4452 bnx2_netif_stop(bp);
4454 bp->vlgrp = vlgrp;
4455 bnx2_set_rx_mode(dev);
4457 bnx2_netif_start(bp);
4460 /* Called with rtnl_lock */
4461 static void
4462 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4464 struct bnx2 *bp = netdev_priv(dev);
4466 bnx2_netif_stop(bp);
4468 if (bp->vlgrp)
4469 bp->vlgrp->vlan_devices[vid] = NULL;
4470 bnx2_set_rx_mode(dev);
4472 bnx2_netif_start(bp);
4474 #endif
4476 /* Called with netif_tx_lock.
4477 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4478 * netif_wake_queue().
4480 static int
4481 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4483 struct bnx2 *bp = netdev_priv(dev);
4484 dma_addr_t mapping;
4485 struct tx_bd *txbd;
4486 struct sw_bd *tx_buf;
4487 u32 len, vlan_tag_flags, last_frag, mss;
4488 u16 prod, ring_prod;
4489 int i;
4491 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4492 netif_stop_queue(dev);
4493 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4494 dev->name);
4496 return NETDEV_TX_BUSY;
4498 len = skb_headlen(skb);
4499 prod = bp->tx_prod;
4500 ring_prod = TX_RING_IDX(prod);
4502 vlan_tag_flags = 0;
4503 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4504 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4507 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4508 vlan_tag_flags |=
4509 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4511 #ifdef BCM_TSO
4512 if ((mss = skb_shinfo(skb)->gso_size) &&
4513 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4514 u32 tcp_opt_len, ip_tcp_len;
4516 if (skb_header_cloned(skb) &&
4517 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4518 dev_kfree_skb(skb);
4519 return NETDEV_TX_OK;
4522 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4523 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4525 tcp_opt_len = 0;
4526 if (skb->h.th->doff > 5) {
4527 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4529 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4531 skb->nh.iph->check = 0;
4532 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4533 skb->h.th->check =
4534 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4535 skb->nh.iph->daddr,
4536 0, IPPROTO_TCP, 0);
4538 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4539 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4540 (tcp_opt_len >> 2)) << 8;
4543 else
4544 #endif
4546 mss = 0;
4549 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4551 tx_buf = &bp->tx_buf_ring[ring_prod];
4552 tx_buf->skb = skb;
4553 pci_unmap_addr_set(tx_buf, mapping, mapping);
4555 txbd = &bp->tx_desc_ring[ring_prod];
4557 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4558 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4559 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4560 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4562 last_frag = skb_shinfo(skb)->nr_frags;
4564 for (i = 0; i < last_frag; i++) {
4565 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4567 prod = NEXT_TX_BD(prod);
4568 ring_prod = TX_RING_IDX(prod);
4569 txbd = &bp->tx_desc_ring[ring_prod];
4571 len = frag->size;
4572 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4573 len, PCI_DMA_TODEVICE);
4574 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4575 mapping, mapping);
4577 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4578 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4579 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4580 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4583 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4585 prod = NEXT_TX_BD(prod);
4586 bp->tx_prod_bseq += skb->len;
4588 REG_WR16(bp, bp->tx_bidx_addr, prod);
4589 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4591 mmiowb();
4593 bp->tx_prod = prod;
4594 dev->trans_start = jiffies;
4596 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4597 netif_stop_queue(dev);
4598 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4599 netif_wake_queue(dev);
4602 return NETDEV_TX_OK;
4605 /* Called with rtnl_lock */
4606 static int
4607 bnx2_close(struct net_device *dev)
4609 struct bnx2 *bp = netdev_priv(dev);
4610 u32 reset_code;
4612 /* Calling flush_scheduled_work() may deadlock because
4613 * linkwatch_event() may be on the workqueue and it will try to get
4614 * the rtnl_lock which we are holding.
4616 while (bp->in_reset_task)
4617 msleep(1);
4619 bnx2_netif_stop(bp);
4620 del_timer_sync(&bp->timer);
4621 if (bp->flags & NO_WOL_FLAG)
4622 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4623 else if (bp->wol)
4624 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4625 else
4626 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4627 bnx2_reset_chip(bp, reset_code);
4628 free_irq(bp->pdev->irq, dev);
4629 if (bp->flags & USING_MSI_FLAG) {
4630 pci_disable_msi(bp->pdev);
4631 bp->flags &= ~USING_MSI_FLAG;
4633 bnx2_free_skbs(bp);
4634 bnx2_free_mem(bp);
4635 bp->link_up = 0;
4636 netif_carrier_off(bp->dev);
4637 bnx2_set_power_state(bp, PCI_D3hot);
4638 return 0;
4641 #define GET_NET_STATS64(ctr) \
4642 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4643 (unsigned long) (ctr##_lo)
4645 #define GET_NET_STATS32(ctr) \
4646 (ctr##_lo)
4648 #if (BITS_PER_LONG == 64)
4649 #define GET_NET_STATS GET_NET_STATS64
4650 #else
4651 #define GET_NET_STATS GET_NET_STATS32
4652 #endif
4654 static struct net_device_stats *
4655 bnx2_get_stats(struct net_device *dev)
4657 struct bnx2 *bp = netdev_priv(dev);
4658 struct statistics_block *stats_blk = bp->stats_blk;
4659 struct net_device_stats *net_stats = &bp->net_stats;
4661 if (bp->stats_blk == NULL) {
4662 return net_stats;
4664 net_stats->rx_packets =
4665 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4666 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4667 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4669 net_stats->tx_packets =
4670 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4671 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4672 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4674 net_stats->rx_bytes =
4675 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4677 net_stats->tx_bytes =
4678 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4680 net_stats->multicast =
4681 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4683 net_stats->collisions =
4684 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4686 net_stats->rx_length_errors =
4687 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4688 stats_blk->stat_EtherStatsOverrsizePkts);
4690 net_stats->rx_over_errors =
4691 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4693 net_stats->rx_frame_errors =
4694 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4696 net_stats->rx_crc_errors =
4697 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4699 net_stats->rx_errors = net_stats->rx_length_errors +
4700 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4701 net_stats->rx_crc_errors;
4703 net_stats->tx_aborted_errors =
4704 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4705 stats_blk->stat_Dot3StatsLateCollisions);
4707 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4708 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4709 net_stats->tx_carrier_errors = 0;
4710 else {
4711 net_stats->tx_carrier_errors =
4712 (unsigned long)
4713 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4716 net_stats->tx_errors =
4717 (unsigned long)
4718 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4720 net_stats->tx_aborted_errors +
4721 net_stats->tx_carrier_errors;
4723 net_stats->rx_missed_errors =
4724 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4725 stats_blk->stat_FwRxDrop);
4727 return net_stats;
4730 /* All ethtool functions called with rtnl_lock */
4732 static int
4733 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4735 struct bnx2 *bp = netdev_priv(dev);
4737 cmd->supported = SUPPORTED_Autoneg;
4738 if (bp->phy_flags & PHY_SERDES_FLAG) {
4739 cmd->supported |= SUPPORTED_1000baseT_Full |
4740 SUPPORTED_FIBRE;
4742 cmd->port = PORT_FIBRE;
4744 else {
4745 cmd->supported |= SUPPORTED_10baseT_Half |
4746 SUPPORTED_10baseT_Full |
4747 SUPPORTED_100baseT_Half |
4748 SUPPORTED_100baseT_Full |
4749 SUPPORTED_1000baseT_Full |
4750 SUPPORTED_TP;
4752 cmd->port = PORT_TP;
4755 cmd->advertising = bp->advertising;
4757 if (bp->autoneg & AUTONEG_SPEED) {
4758 cmd->autoneg = AUTONEG_ENABLE;
4760 else {
4761 cmd->autoneg = AUTONEG_DISABLE;
4764 if (netif_carrier_ok(dev)) {
4765 cmd->speed = bp->line_speed;
4766 cmd->duplex = bp->duplex;
4768 else {
4769 cmd->speed = -1;
4770 cmd->duplex = -1;
4773 cmd->transceiver = XCVR_INTERNAL;
4774 cmd->phy_address = bp->phy_addr;
4776 return 0;
4779 static int
4780 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4782 struct bnx2 *bp = netdev_priv(dev);
4783 u8 autoneg = bp->autoneg;
4784 u8 req_duplex = bp->req_duplex;
4785 u16 req_line_speed = bp->req_line_speed;
4786 u32 advertising = bp->advertising;
4788 if (cmd->autoneg == AUTONEG_ENABLE) {
4789 autoneg |= AUTONEG_SPEED;
4791 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4793 /* allow advertising 1 speed */
4794 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4795 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4796 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4797 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4799 if (bp->phy_flags & PHY_SERDES_FLAG)
4800 return -EINVAL;
4802 advertising = cmd->advertising;
4805 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4806 advertising = cmd->advertising;
4808 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4809 return -EINVAL;
4811 else {
4812 if (bp->phy_flags & PHY_SERDES_FLAG) {
4813 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4815 else {
4816 advertising = ETHTOOL_ALL_COPPER_SPEED;
4819 advertising |= ADVERTISED_Autoneg;
4821 else {
4822 if (bp->phy_flags & PHY_SERDES_FLAG) {
4823 if ((cmd->speed != SPEED_1000 &&
4824 cmd->speed != SPEED_2500) ||
4825 (cmd->duplex != DUPLEX_FULL))
4826 return -EINVAL;
4828 if (cmd->speed == SPEED_2500 &&
4829 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4830 return -EINVAL;
4832 else if (cmd->speed == SPEED_1000) {
4833 return -EINVAL;
4835 autoneg &= ~AUTONEG_SPEED;
4836 req_line_speed = cmd->speed;
4837 req_duplex = cmd->duplex;
4838 advertising = 0;
4841 bp->autoneg = autoneg;
4842 bp->advertising = advertising;
4843 bp->req_line_speed = req_line_speed;
4844 bp->req_duplex = req_duplex;
4846 spin_lock_bh(&bp->phy_lock);
4848 bnx2_setup_phy(bp);
4850 spin_unlock_bh(&bp->phy_lock);
4852 return 0;
4855 static void
4856 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4858 struct bnx2 *bp = netdev_priv(dev);
4860 strcpy(info->driver, DRV_MODULE_NAME);
4861 strcpy(info->version, DRV_MODULE_VERSION);
4862 strcpy(info->bus_info, pci_name(bp->pdev));
4863 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4864 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4865 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4866 info->fw_version[1] = info->fw_version[3] = '.';
4867 info->fw_version[5] = 0;
4870 #define BNX2_REGDUMP_LEN (32 * 1024)
4872 static int
4873 bnx2_get_regs_len(struct net_device *dev)
4875 return BNX2_REGDUMP_LEN;
4878 static void
4879 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4881 u32 *p = _p, i, offset;
4882 u8 *orig_p = _p;
4883 struct bnx2 *bp = netdev_priv(dev);
4884 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4885 0x0800, 0x0880, 0x0c00, 0x0c10,
4886 0x0c30, 0x0d08, 0x1000, 0x101c,
4887 0x1040, 0x1048, 0x1080, 0x10a4,
4888 0x1400, 0x1490, 0x1498, 0x14f0,
4889 0x1500, 0x155c, 0x1580, 0x15dc,
4890 0x1600, 0x1658, 0x1680, 0x16d8,
4891 0x1800, 0x1820, 0x1840, 0x1854,
4892 0x1880, 0x1894, 0x1900, 0x1984,
4893 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4894 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4895 0x2000, 0x2030, 0x23c0, 0x2400,
4896 0x2800, 0x2820, 0x2830, 0x2850,
4897 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4898 0x3c00, 0x3c94, 0x4000, 0x4010,
4899 0x4080, 0x4090, 0x43c0, 0x4458,
4900 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4901 0x4fc0, 0x5010, 0x53c0, 0x5444,
4902 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4903 0x5fc0, 0x6000, 0x6400, 0x6428,
4904 0x6800, 0x6848, 0x684c, 0x6860,
4905 0x6888, 0x6910, 0x8000 };
4907 regs->version = 0;
4909 memset(p, 0, BNX2_REGDUMP_LEN);
4911 if (!netif_running(bp->dev))
4912 return;
4914 i = 0;
4915 offset = reg_boundaries[0];
4916 p += offset;
4917 while (offset < BNX2_REGDUMP_LEN) {
4918 *p++ = REG_RD(bp, offset);
4919 offset += 4;
4920 if (offset == reg_boundaries[i + 1]) {
4921 offset = reg_boundaries[i + 2];
4922 p = (u32 *) (orig_p + offset);
4923 i += 2;
4928 static void
4929 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4931 struct bnx2 *bp = netdev_priv(dev);
4933 if (bp->flags & NO_WOL_FLAG) {
4934 wol->supported = 0;
4935 wol->wolopts = 0;
4937 else {
4938 wol->supported = WAKE_MAGIC;
4939 if (bp->wol)
4940 wol->wolopts = WAKE_MAGIC;
4941 else
4942 wol->wolopts = 0;
4944 memset(&wol->sopass, 0, sizeof(wol->sopass));
4947 static int
4948 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4950 struct bnx2 *bp = netdev_priv(dev);
4952 if (wol->wolopts & ~WAKE_MAGIC)
4953 return -EINVAL;
4955 if (wol->wolopts & WAKE_MAGIC) {
4956 if (bp->flags & NO_WOL_FLAG)
4957 return -EINVAL;
4959 bp->wol = 1;
4961 else {
4962 bp->wol = 0;
4964 return 0;
4967 static int
4968 bnx2_nway_reset(struct net_device *dev)
4970 struct bnx2 *bp = netdev_priv(dev);
4971 u32 bmcr;
4973 if (!(bp->autoneg & AUTONEG_SPEED)) {
4974 return -EINVAL;
4977 spin_lock_bh(&bp->phy_lock);
4979 /* Force a link down visible on the other side */
4980 if (bp->phy_flags & PHY_SERDES_FLAG) {
4981 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4982 spin_unlock_bh(&bp->phy_lock);
4984 msleep(20);
4986 spin_lock_bh(&bp->phy_lock);
4988 bp->current_interval = SERDES_AN_TIMEOUT;
4989 bp->serdes_an_pending = 1;
4990 mod_timer(&bp->timer, jiffies + bp->current_interval);
4993 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4994 bmcr &= ~BMCR_LOOPBACK;
4995 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4997 spin_unlock_bh(&bp->phy_lock);
4999 return 0;
5002 static int
5003 bnx2_get_eeprom_len(struct net_device *dev)
5005 struct bnx2 *bp = netdev_priv(dev);
5007 if (bp->flash_info == NULL)
5008 return 0;
5010 return (int) bp->flash_size;
5013 static int
5014 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5015 u8 *eebuf)
5017 struct bnx2 *bp = netdev_priv(dev);
5018 int rc;
5020 /* parameters already validated in ethtool_get_eeprom */
5022 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5024 return rc;
5027 static int
5028 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5029 u8 *eebuf)
5031 struct bnx2 *bp = netdev_priv(dev);
5032 int rc;
5034 /* parameters already validated in ethtool_set_eeprom */
5036 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5038 return rc;
5041 static int
5042 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5044 struct bnx2 *bp = netdev_priv(dev);
5046 memset(coal, 0, sizeof(struct ethtool_coalesce));
5048 coal->rx_coalesce_usecs = bp->rx_ticks;
5049 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5050 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5051 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5053 coal->tx_coalesce_usecs = bp->tx_ticks;
5054 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5055 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5056 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5058 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5060 return 0;
5063 static int
5064 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5066 struct bnx2 *bp = netdev_priv(dev);
5068 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5069 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5071 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5072 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5074 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5075 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5077 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5078 if (bp->rx_quick_cons_trip_int > 0xff)
5079 bp->rx_quick_cons_trip_int = 0xff;
5081 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5082 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5084 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5085 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5087 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5088 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5090 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5091 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5092 0xff;
5094 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5095 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5096 bp->stats_ticks &= 0xffff00;
5098 if (netif_running(bp->dev)) {
5099 bnx2_netif_stop(bp);
5100 bnx2_init_nic(bp);
5101 bnx2_netif_start(bp);
5104 return 0;
5107 static void
5108 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5110 struct bnx2 *bp = netdev_priv(dev);
5112 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5113 ering->rx_mini_max_pending = 0;
5114 ering->rx_jumbo_max_pending = 0;
5116 ering->rx_pending = bp->rx_ring_size;
5117 ering->rx_mini_pending = 0;
5118 ering->rx_jumbo_pending = 0;
5120 ering->tx_max_pending = MAX_TX_DESC_CNT;
5121 ering->tx_pending = bp->tx_ring_size;
5124 static int
5125 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5127 struct bnx2 *bp = netdev_priv(dev);
5129 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5130 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5131 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5133 return -EINVAL;
5135 if (netif_running(bp->dev)) {
5136 bnx2_netif_stop(bp);
5137 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5138 bnx2_free_skbs(bp);
5139 bnx2_free_mem(bp);
5142 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5143 bp->tx_ring_size = ering->tx_pending;
5145 if (netif_running(bp->dev)) {
5146 int rc;
5148 rc = bnx2_alloc_mem(bp);
5149 if (rc)
5150 return rc;
5151 bnx2_init_nic(bp);
5152 bnx2_netif_start(bp);
5155 return 0;
5158 static void
5159 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5161 struct bnx2 *bp = netdev_priv(dev);
5163 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5164 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5165 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5168 static int
5169 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5171 struct bnx2 *bp = netdev_priv(dev);
5173 bp->req_flow_ctrl = 0;
5174 if (epause->rx_pause)
5175 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5176 if (epause->tx_pause)
5177 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5179 if (epause->autoneg) {
5180 bp->autoneg |= AUTONEG_FLOW_CTRL;
5182 else {
5183 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5186 spin_lock_bh(&bp->phy_lock);
5188 bnx2_setup_phy(bp);
5190 spin_unlock_bh(&bp->phy_lock);
5192 return 0;
5195 static u32
5196 bnx2_get_rx_csum(struct net_device *dev)
5198 struct bnx2 *bp = netdev_priv(dev);
5200 return bp->rx_csum;
5203 static int
5204 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5206 struct bnx2 *bp = netdev_priv(dev);
5208 bp->rx_csum = data;
5209 return 0;
5212 static int
5213 bnx2_set_tso(struct net_device *dev, u32 data)
5215 if (data)
5216 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5217 else
5218 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5219 return 0;
5222 #define BNX2_NUM_STATS 46
5224 static struct {
5225 char string[ETH_GSTRING_LEN];
5226 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5227 { "rx_bytes" },
5228 { "rx_error_bytes" },
5229 { "tx_bytes" },
5230 { "tx_error_bytes" },
5231 { "rx_ucast_packets" },
5232 { "rx_mcast_packets" },
5233 { "rx_bcast_packets" },
5234 { "tx_ucast_packets" },
5235 { "tx_mcast_packets" },
5236 { "tx_bcast_packets" },
5237 { "tx_mac_errors" },
5238 { "tx_carrier_errors" },
5239 { "rx_crc_errors" },
5240 { "rx_align_errors" },
5241 { "tx_single_collisions" },
5242 { "tx_multi_collisions" },
5243 { "tx_deferred" },
5244 { "tx_excess_collisions" },
5245 { "tx_late_collisions" },
5246 { "tx_total_collisions" },
5247 { "rx_fragments" },
5248 { "rx_jabbers" },
5249 { "rx_undersize_packets" },
5250 { "rx_oversize_packets" },
5251 { "rx_64_byte_packets" },
5252 { "rx_65_to_127_byte_packets" },
5253 { "rx_128_to_255_byte_packets" },
5254 { "rx_256_to_511_byte_packets" },
5255 { "rx_512_to_1023_byte_packets" },
5256 { "rx_1024_to_1522_byte_packets" },
5257 { "rx_1523_to_9022_byte_packets" },
5258 { "tx_64_byte_packets" },
5259 { "tx_65_to_127_byte_packets" },
5260 { "tx_128_to_255_byte_packets" },
5261 { "tx_256_to_511_byte_packets" },
5262 { "tx_512_to_1023_byte_packets" },
5263 { "tx_1024_to_1522_byte_packets" },
5264 { "tx_1523_to_9022_byte_packets" },
5265 { "rx_xon_frames" },
5266 { "rx_xoff_frames" },
5267 { "tx_xon_frames" },
5268 { "tx_xoff_frames" },
5269 { "rx_mac_ctrl_frames" },
5270 { "rx_filtered_packets" },
5271 { "rx_discards" },
5272 { "rx_fw_discards" },
5275 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5277 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5278 STATS_OFFSET32(stat_IfHCInOctets_hi),
5279 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5280 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5281 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5282 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5283 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5284 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5285 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5286 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5287 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5288 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5289 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5290 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5291 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5292 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5293 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5294 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5295 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5296 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5297 STATS_OFFSET32(stat_EtherStatsCollisions),
5298 STATS_OFFSET32(stat_EtherStatsFragments),
5299 STATS_OFFSET32(stat_EtherStatsJabbers),
5300 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5301 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5302 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5303 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5304 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5305 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5306 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5307 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5308 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5309 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5310 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5311 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5312 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5313 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5314 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5315 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5316 STATS_OFFSET32(stat_XonPauseFramesReceived),
5317 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5318 STATS_OFFSET32(stat_OutXonSent),
5319 STATS_OFFSET32(stat_OutXoffSent),
5320 STATS_OFFSET32(stat_MacControlFramesReceived),
5321 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5322 STATS_OFFSET32(stat_IfInMBUFDiscards),
5323 STATS_OFFSET32(stat_FwRxDrop),
5326 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5327 * skipped because of errata.
5329 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5330 8,0,8,8,8,8,8,8,8,8,
5331 4,0,4,4,4,4,4,4,4,4,
5332 4,4,4,4,4,4,4,4,4,4,
5333 4,4,4,4,4,4,4,4,4,4,
5334 4,4,4,4,4,4,
5337 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5338 8,0,8,8,8,8,8,8,8,8,
5339 4,4,4,4,4,4,4,4,4,4,
5340 4,4,4,4,4,4,4,4,4,4,
5341 4,4,4,4,4,4,4,4,4,4,
5342 4,4,4,4,4,4,
5345 #define BNX2_NUM_TESTS 6
5347 static struct {
5348 char string[ETH_GSTRING_LEN];
5349 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5350 { "register_test (offline)" },
5351 { "memory_test (offline)" },
5352 { "loopback_test (offline)" },
5353 { "nvram_test (online)" },
5354 { "interrupt_test (online)" },
5355 { "link_test (online)" },
5358 static int
5359 bnx2_self_test_count(struct net_device *dev)
5361 return BNX2_NUM_TESTS;
5364 static void
5365 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5367 struct bnx2 *bp = netdev_priv(dev);
5369 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5370 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5371 int i;
5373 bnx2_netif_stop(bp);
5374 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5375 bnx2_free_skbs(bp);
5377 if (bnx2_test_registers(bp) != 0) {
5378 buf[0] = 1;
5379 etest->flags |= ETH_TEST_FL_FAILED;
5381 if (bnx2_test_memory(bp) != 0) {
5382 buf[1] = 1;
5383 etest->flags |= ETH_TEST_FL_FAILED;
5385 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5386 etest->flags |= ETH_TEST_FL_FAILED;
5388 if (!netif_running(bp->dev)) {
5389 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5391 else {
5392 bnx2_init_nic(bp);
5393 bnx2_netif_start(bp);
5396 /* wait for link up */
5397 for (i = 0; i < 7; i++) {
5398 if (bp->link_up)
5399 break;
5400 msleep_interruptible(1000);
5404 if (bnx2_test_nvram(bp) != 0) {
5405 buf[3] = 1;
5406 etest->flags |= ETH_TEST_FL_FAILED;
5408 if (bnx2_test_intr(bp) != 0) {
5409 buf[4] = 1;
5410 etest->flags |= ETH_TEST_FL_FAILED;
5413 if (bnx2_test_link(bp) != 0) {
5414 buf[5] = 1;
5415 etest->flags |= ETH_TEST_FL_FAILED;
5420 static void
5421 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5423 switch (stringset) {
5424 case ETH_SS_STATS:
5425 memcpy(buf, bnx2_stats_str_arr,
5426 sizeof(bnx2_stats_str_arr));
5427 break;
5428 case ETH_SS_TEST:
5429 memcpy(buf, bnx2_tests_str_arr,
5430 sizeof(bnx2_tests_str_arr));
5431 break;
5435 static int
5436 bnx2_get_stats_count(struct net_device *dev)
5438 return BNX2_NUM_STATS;
5441 static void
5442 bnx2_get_ethtool_stats(struct net_device *dev,
5443 struct ethtool_stats *stats, u64 *buf)
5445 struct bnx2 *bp = netdev_priv(dev);
5446 int i;
5447 u32 *hw_stats = (u32 *) bp->stats_blk;
5448 u8 *stats_len_arr = NULL;
5450 if (hw_stats == NULL) {
5451 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5452 return;
5455 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5456 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5457 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5458 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5459 stats_len_arr = bnx2_5706_stats_len_arr;
5460 else
5461 stats_len_arr = bnx2_5708_stats_len_arr;
5463 for (i = 0; i < BNX2_NUM_STATS; i++) {
5464 if (stats_len_arr[i] == 0) {
5465 /* skip this counter */
5466 buf[i] = 0;
5467 continue;
5469 if (stats_len_arr[i] == 4) {
5470 /* 4-byte counter */
5471 buf[i] = (u64)
5472 *(hw_stats + bnx2_stats_offset_arr[i]);
5473 continue;
5475 /* 8-byte counter */
5476 buf[i] = (((u64) *(hw_stats +
5477 bnx2_stats_offset_arr[i])) << 32) +
5478 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5482 static int
5483 bnx2_phys_id(struct net_device *dev, u32 data)
5485 struct bnx2 *bp = netdev_priv(dev);
5486 int i;
5487 u32 save;
5489 if (data == 0)
5490 data = 2;
5492 save = REG_RD(bp, BNX2_MISC_CFG);
5493 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5495 for (i = 0; i < (data * 2); i++) {
5496 if ((i % 2) == 0) {
5497 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5499 else {
5500 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5501 BNX2_EMAC_LED_1000MB_OVERRIDE |
5502 BNX2_EMAC_LED_100MB_OVERRIDE |
5503 BNX2_EMAC_LED_10MB_OVERRIDE |
5504 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5505 BNX2_EMAC_LED_TRAFFIC);
5507 msleep_interruptible(500);
5508 if (signal_pending(current))
5509 break;
5511 REG_WR(bp, BNX2_EMAC_LED, 0);
5512 REG_WR(bp, BNX2_MISC_CFG, save);
5513 return 0;
5516 static const struct ethtool_ops bnx2_ethtool_ops = {
5517 .get_settings = bnx2_get_settings,
5518 .set_settings = bnx2_set_settings,
5519 .get_drvinfo = bnx2_get_drvinfo,
5520 .get_regs_len = bnx2_get_regs_len,
5521 .get_regs = bnx2_get_regs,
5522 .get_wol = bnx2_get_wol,
5523 .set_wol = bnx2_set_wol,
5524 .nway_reset = bnx2_nway_reset,
5525 .get_link = ethtool_op_get_link,
5526 .get_eeprom_len = bnx2_get_eeprom_len,
5527 .get_eeprom = bnx2_get_eeprom,
5528 .set_eeprom = bnx2_set_eeprom,
5529 .get_coalesce = bnx2_get_coalesce,
5530 .set_coalesce = bnx2_set_coalesce,
5531 .get_ringparam = bnx2_get_ringparam,
5532 .set_ringparam = bnx2_set_ringparam,
5533 .get_pauseparam = bnx2_get_pauseparam,
5534 .set_pauseparam = bnx2_set_pauseparam,
5535 .get_rx_csum = bnx2_get_rx_csum,
5536 .set_rx_csum = bnx2_set_rx_csum,
5537 .get_tx_csum = ethtool_op_get_tx_csum,
5538 .set_tx_csum = ethtool_op_set_tx_csum,
5539 .get_sg = ethtool_op_get_sg,
5540 .set_sg = ethtool_op_set_sg,
5541 #ifdef BCM_TSO
5542 .get_tso = ethtool_op_get_tso,
5543 .set_tso = bnx2_set_tso,
5544 #endif
5545 .self_test_count = bnx2_self_test_count,
5546 .self_test = bnx2_self_test,
5547 .get_strings = bnx2_get_strings,
5548 .phys_id = bnx2_phys_id,
5549 .get_stats_count = bnx2_get_stats_count,
5550 .get_ethtool_stats = bnx2_get_ethtool_stats,
5551 .get_perm_addr = ethtool_op_get_perm_addr,
5554 /* Called with rtnl_lock */
5555 static int
5556 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5558 struct mii_ioctl_data *data = if_mii(ifr);
5559 struct bnx2 *bp = netdev_priv(dev);
5560 int err;
5562 switch(cmd) {
5563 case SIOCGMIIPHY:
5564 data->phy_id = bp->phy_addr;
5566 /* fallthru */
5567 case SIOCGMIIREG: {
5568 u32 mii_regval;
5570 spin_lock_bh(&bp->phy_lock);
5571 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5572 spin_unlock_bh(&bp->phy_lock);
5574 data->val_out = mii_regval;
5576 return err;
5579 case SIOCSMIIREG:
5580 if (!capable(CAP_NET_ADMIN))
5581 return -EPERM;
5583 spin_lock_bh(&bp->phy_lock);
5584 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5585 spin_unlock_bh(&bp->phy_lock);
5587 return err;
5589 default:
5590 /* do nothing */
5591 break;
5593 return -EOPNOTSUPP;
5596 /* Called with rtnl_lock */
5597 static int
5598 bnx2_change_mac_addr(struct net_device *dev, void *p)
5600 struct sockaddr *addr = p;
5601 struct bnx2 *bp = netdev_priv(dev);
5603 if (!is_valid_ether_addr(addr->sa_data))
5604 return -EINVAL;
5606 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5607 if (netif_running(dev))
5608 bnx2_set_mac_addr(bp);
5610 return 0;
5613 /* Called with rtnl_lock */
5614 static int
5615 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5617 struct bnx2 *bp = netdev_priv(dev);
5619 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5620 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5621 return -EINVAL;
5623 dev->mtu = new_mtu;
5624 if (netif_running(dev)) {
5625 bnx2_netif_stop(bp);
5627 bnx2_init_nic(bp);
5629 bnx2_netif_start(bp);
5631 return 0;
5634 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5635 static void
5636 poll_bnx2(struct net_device *dev)
5638 struct bnx2 *bp = netdev_priv(dev);
5640 disable_irq(bp->pdev->irq);
5641 bnx2_interrupt(bp->pdev->irq, dev);
5642 enable_irq(bp->pdev->irq);
5644 #endif
5646 static void __devinit
5647 bnx2_get_5709_media(struct bnx2 *bp)
5649 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5650 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5651 u32 strap;
5653 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5654 return;
5655 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
5656 bp->phy_flags |= PHY_SERDES_FLAG;
5657 return;
5660 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
5661 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
5662 else
5663 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
5665 if (PCI_FUNC(bp->pdev->devfn) == 0) {
5666 switch (strap) {
5667 case 0x4:
5668 case 0x5:
5669 case 0x6:
5670 bp->phy_flags |= PHY_SERDES_FLAG;
5671 return;
5673 } else {
5674 switch (strap) {
5675 case 0x1:
5676 case 0x2:
5677 case 0x4:
5678 bp->phy_flags |= PHY_SERDES_FLAG;
5679 return;
5684 static int __devinit
5685 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5687 struct bnx2 *bp;
5688 unsigned long mem_len;
5689 int rc;
5690 u32 reg;
5692 SET_MODULE_OWNER(dev);
5693 SET_NETDEV_DEV(dev, &pdev->dev);
5694 bp = netdev_priv(dev);
5696 bp->flags = 0;
5697 bp->phy_flags = 0;
5699 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5700 rc = pci_enable_device(pdev);
5701 if (rc) {
5702 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
5703 goto err_out;
5706 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5707 dev_err(&pdev->dev,
5708 "Cannot find PCI device base address, aborting.\n");
5709 rc = -ENODEV;
5710 goto err_out_disable;
5713 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5714 if (rc) {
5715 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5716 goto err_out_disable;
5719 pci_set_master(pdev);
5721 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5722 if (bp->pm_cap == 0) {
5723 dev_err(&pdev->dev,
5724 "Cannot find power management capability, aborting.\n");
5725 rc = -EIO;
5726 goto err_out_release;
5729 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5730 bp->flags |= USING_DAC_FLAG;
5731 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5732 dev_err(&pdev->dev,
5733 "pci_set_consistent_dma_mask failed, aborting.\n");
5734 rc = -EIO;
5735 goto err_out_release;
5738 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5739 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5740 rc = -EIO;
5741 goto err_out_release;
5744 bp->dev = dev;
5745 bp->pdev = pdev;
5747 spin_lock_init(&bp->phy_lock);
5748 INIT_WORK(&bp->reset_task, bnx2_reset_task);
5750 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5751 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
5752 dev->mem_end = dev->mem_start + mem_len;
5753 dev->irq = pdev->irq;
5755 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5757 if (!bp->regview) {
5758 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
5759 rc = -ENOMEM;
5760 goto err_out_release;
5763 /* Configure byte swap and enable write to the reg_window registers.
5764 * Rely on CPU to do target byte swapping on big endian systems
5765 * The chip's target access swapping will not swap all accesses
5767 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5768 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5769 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5771 bnx2_set_power_state(bp, PCI_D0);
5773 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5775 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
5776 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5777 if (bp->pcix_cap == 0) {
5778 dev_err(&pdev->dev,
5779 "Cannot find PCIX capability, aborting.\n");
5780 rc = -EIO;
5781 goto err_out_unmap;
5785 /* Get bus information. */
5786 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5787 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5788 u32 clkreg;
5790 bp->flags |= PCIX_FLAG;
5792 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5794 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5795 switch (clkreg) {
5796 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5797 bp->bus_speed_mhz = 133;
5798 break;
5800 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5801 bp->bus_speed_mhz = 100;
5802 break;
5804 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5805 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5806 bp->bus_speed_mhz = 66;
5807 break;
5809 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5810 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5811 bp->bus_speed_mhz = 50;
5812 break;
5814 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5815 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5816 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5817 bp->bus_speed_mhz = 33;
5818 break;
5821 else {
5822 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5823 bp->bus_speed_mhz = 66;
5824 else
5825 bp->bus_speed_mhz = 33;
5828 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5829 bp->flags |= PCI_32BIT_FLAG;
5831 /* 5706A0 may falsely detect SERR and PERR. */
5832 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5833 reg = REG_RD(bp, PCI_COMMAND);
5834 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5835 REG_WR(bp, PCI_COMMAND, reg);
5837 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5838 !(bp->flags & PCIX_FLAG)) {
5840 dev_err(&pdev->dev,
5841 "5706 A1 can only be used in a PCIX bus, aborting.\n");
5842 goto err_out_unmap;
5845 bnx2_init_nvram(bp);
5847 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5849 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5850 BNX2_SHM_HDR_SIGNATURE_SIG)
5851 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5852 else
5853 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5855 /* Get the permanent MAC address. First we need to make sure the
5856 * firmware is actually running.
5858 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5860 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5861 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5862 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
5863 rc = -ENODEV;
5864 goto err_out_unmap;
5867 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5869 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5870 bp->mac_addr[0] = (u8) (reg >> 8);
5871 bp->mac_addr[1] = (u8) reg;
5873 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5874 bp->mac_addr[2] = (u8) (reg >> 24);
5875 bp->mac_addr[3] = (u8) (reg >> 16);
5876 bp->mac_addr[4] = (u8) (reg >> 8);
5877 bp->mac_addr[5] = (u8) reg;
5879 bp->tx_ring_size = MAX_TX_DESC_CNT;
5880 bnx2_set_rx_ring_size(bp, 255);
5882 bp->rx_csum = 1;
5884 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5886 bp->tx_quick_cons_trip_int = 20;
5887 bp->tx_quick_cons_trip = 20;
5888 bp->tx_ticks_int = 80;
5889 bp->tx_ticks = 80;
5891 bp->rx_quick_cons_trip_int = 6;
5892 bp->rx_quick_cons_trip = 6;
5893 bp->rx_ticks_int = 18;
5894 bp->rx_ticks = 18;
5896 bp->stats_ticks = 1000000 & 0xffff00;
5898 bp->timer_interval = HZ;
5899 bp->current_interval = HZ;
5901 bp->phy_addr = 1;
5903 /* Disable WOL support if we are running on a SERDES chip. */
5904 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5905 bnx2_get_5709_media(bp);
5906 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
5907 bp->phy_flags |= PHY_SERDES_FLAG;
5909 if (bp->phy_flags & PHY_SERDES_FLAG) {
5910 bp->flags |= NO_WOL_FLAG;
5911 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
5912 bp->phy_addr = 2;
5913 reg = REG_RD_IND(bp, bp->shmem_base +
5914 BNX2_SHARED_HW_CFG_CONFIG);
5915 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5916 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5918 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
5919 CHIP_NUM(bp) == CHIP_NUM_5708)
5920 bp->phy_flags |= PHY_CRC_FIX_FLAG;
5922 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5923 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5924 (CHIP_ID(bp) == CHIP_ID_5708_B1))
5925 bp->flags |= NO_WOL_FLAG;
5927 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5928 bp->tx_quick_cons_trip_int =
5929 bp->tx_quick_cons_trip;
5930 bp->tx_ticks_int = bp->tx_ticks;
5931 bp->rx_quick_cons_trip_int =
5932 bp->rx_quick_cons_trip;
5933 bp->rx_ticks_int = bp->rx_ticks;
5934 bp->comp_prod_trip_int = bp->comp_prod_trip;
5935 bp->com_ticks_int = bp->com_ticks;
5936 bp->cmd_ticks_int = bp->cmd_ticks;
5939 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5941 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5942 * with byte enables disabled on the unused 32-bit word. This is legal
5943 * but causes problems on the AMD 8132 which will eventually stop
5944 * responding after a while.
5946 * AMD believes this incompatibility is unique to the 5706, and
5947 * prefers to locally disable MSI rather than globally disabling it
5948 * using pci_msi_quirk.
5950 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5951 struct pci_dev *amd_8132 = NULL;
5953 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5954 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5955 amd_8132))) {
5956 u8 rev;
5958 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5959 if (rev >= 0x10 && rev <= 0x13) {
5960 disable_msi = 1;
5961 pci_dev_put(amd_8132);
5962 break;
5967 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5968 bp->req_line_speed = 0;
5969 if (bp->phy_flags & PHY_SERDES_FLAG) {
5970 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5972 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5973 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5974 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5975 bp->autoneg = 0;
5976 bp->req_line_speed = bp->line_speed = SPEED_1000;
5977 bp->req_duplex = DUPLEX_FULL;
5980 else {
5981 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5984 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5986 init_timer(&bp->timer);
5987 bp->timer.expires = RUN_AT(bp->timer_interval);
5988 bp->timer.data = (unsigned long) bp;
5989 bp->timer.function = bnx2_timer;
5991 return 0;
5993 err_out_unmap:
5994 if (bp->regview) {
5995 iounmap(bp->regview);
5996 bp->regview = NULL;
5999 err_out_release:
6000 pci_release_regions(pdev);
6002 err_out_disable:
6003 pci_disable_device(pdev);
6004 pci_set_drvdata(pdev, NULL);
6006 err_out:
6007 return rc;
6010 static int __devinit
6011 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6013 static int version_printed = 0;
6014 struct net_device *dev = NULL;
6015 struct bnx2 *bp;
6016 int rc, i;
6018 if (version_printed++ == 0)
6019 printk(KERN_INFO "%s", version);
6021 /* dev zeroed in init_etherdev */
6022 dev = alloc_etherdev(sizeof(*bp));
6024 if (!dev)
6025 return -ENOMEM;
6027 rc = bnx2_init_board(pdev, dev);
6028 if (rc < 0) {
6029 free_netdev(dev);
6030 return rc;
6033 dev->open = bnx2_open;
6034 dev->hard_start_xmit = bnx2_start_xmit;
6035 dev->stop = bnx2_close;
6036 dev->get_stats = bnx2_get_stats;
6037 dev->set_multicast_list = bnx2_set_rx_mode;
6038 dev->do_ioctl = bnx2_ioctl;
6039 dev->set_mac_address = bnx2_change_mac_addr;
6040 dev->change_mtu = bnx2_change_mtu;
6041 dev->tx_timeout = bnx2_tx_timeout;
6042 dev->watchdog_timeo = TX_TIMEOUT;
6043 #ifdef BCM_VLAN
6044 dev->vlan_rx_register = bnx2_vlan_rx_register;
6045 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6046 #endif
6047 dev->poll = bnx2_poll;
6048 dev->ethtool_ops = &bnx2_ethtool_ops;
6049 dev->weight = 64;
6051 bp = netdev_priv(dev);
6053 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6054 dev->poll_controller = poll_bnx2;
6055 #endif
6057 if ((rc = register_netdev(dev))) {
6058 dev_err(&pdev->dev, "Cannot register net device\n");
6059 if (bp->regview)
6060 iounmap(bp->regview);
6061 pci_release_regions(pdev);
6062 pci_disable_device(pdev);
6063 pci_set_drvdata(pdev, NULL);
6064 free_netdev(dev);
6065 return rc;
6068 pci_set_drvdata(pdev, dev);
6070 memcpy(dev->dev_addr, bp->mac_addr, 6);
6071 memcpy(dev->perm_addr, bp->mac_addr, 6);
6072 bp->name = board_info[ent->driver_data].name,
6073 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6074 "IRQ %d, ",
6075 dev->name,
6076 bp->name,
6077 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6078 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6079 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6080 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6081 bp->bus_speed_mhz,
6082 dev->base_addr,
6083 bp->pdev->irq);
6085 printk("node addr ");
6086 for (i = 0; i < 6; i++)
6087 printk("%2.2x", dev->dev_addr[i]);
6088 printk("\n");
6090 dev->features |= NETIF_F_SG;
6091 if (bp->flags & USING_DAC_FLAG)
6092 dev->features |= NETIF_F_HIGHDMA;
6093 dev->features |= NETIF_F_IP_CSUM;
6094 #ifdef BCM_VLAN
6095 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6096 #endif
6097 #ifdef BCM_TSO
6098 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6099 #endif
6101 netif_carrier_off(bp->dev);
6103 return 0;
6106 static void __devexit
6107 bnx2_remove_one(struct pci_dev *pdev)
6109 struct net_device *dev = pci_get_drvdata(pdev);
6110 struct bnx2 *bp = netdev_priv(dev);
6112 flush_scheduled_work();
6114 unregister_netdev(dev);
6116 if (bp->regview)
6117 iounmap(bp->regview);
6119 free_netdev(dev);
6120 pci_release_regions(pdev);
6121 pci_disable_device(pdev);
6122 pci_set_drvdata(pdev, NULL);
6125 static int
6126 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6128 struct net_device *dev = pci_get_drvdata(pdev);
6129 struct bnx2 *bp = netdev_priv(dev);
6130 u32 reset_code;
6132 if (!netif_running(dev))
6133 return 0;
6135 flush_scheduled_work();
6136 bnx2_netif_stop(bp);
6137 netif_device_detach(dev);
6138 del_timer_sync(&bp->timer);
6139 if (bp->flags & NO_WOL_FLAG)
6140 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6141 else if (bp->wol)
6142 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6143 else
6144 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6145 bnx2_reset_chip(bp, reset_code);
6146 bnx2_free_skbs(bp);
6147 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6148 return 0;
6151 static int
6152 bnx2_resume(struct pci_dev *pdev)
6154 struct net_device *dev = pci_get_drvdata(pdev);
6155 struct bnx2 *bp = netdev_priv(dev);
6157 if (!netif_running(dev))
6158 return 0;
6160 bnx2_set_power_state(bp, PCI_D0);
6161 netif_device_attach(dev);
6162 bnx2_init_nic(bp);
6163 bnx2_netif_start(bp);
6164 return 0;
6167 static struct pci_driver bnx2_pci_driver = {
6168 .name = DRV_MODULE_NAME,
6169 .id_table = bnx2_pci_tbl,
6170 .probe = bnx2_init_one,
6171 .remove = __devexit_p(bnx2_remove_one),
6172 .suspend = bnx2_suspend,
6173 .resume = bnx2_resume,
6176 static int __init bnx2_init(void)
6178 return pci_register_driver(&bnx2_pci_driver);
6181 static void __exit bnx2_cleanup(void)
6183 pci_unregister_driver(&bnx2_pci_driver);
6186 module_init(bnx2_init);
6187 module_exit(bnx2_cleanup);