[BNX2]: Add ipv6 TSO and checksum for 5709.
[linux-2.6/btrfs-unstable.git] / drivers / net / bnx2.c
blob01977de759d84cb5ede56e21e9fc2f708ae9e2f9
1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
55 #define DRV_MODULE_NAME "bnx2"
56 #define PFX DRV_MODULE_NAME ": "
57 #define DRV_MODULE_VERSION "1.5.8"
58 #define DRV_MODULE_RELDATE "April 24, 2007"
60 #define RUN_AT(x) (jiffies + (x))
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT (5*HZ)
65 static const char version[] __devinitdata =
66 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
73 static int disable_msi = 0;
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
78 typedef enum {
79 BCM5706 = 0,
80 NC370T,
81 NC370I,
82 BCM5706S,
83 NC370F,
84 BCM5708,
85 BCM5708S,
86 BCM5709,
87 } board_t;
89 /* indexed by board_t, above */
90 static const struct {
91 char *name;
92 } board_info[] __devinitdata = {
93 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
94 { "HP NC370T Multifunction Gigabit Server Adapter" },
95 { "HP NC370i Multifunction Gigabit Server Adapter" },
96 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
97 { "HP NC370F Multifunction Gigabit Server Adapter" },
98 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
100 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
103 static struct pci_device_id bnx2_pci_tbl[] = {
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
120 { 0, }
123 static struct flash_spec flash_table[] =
125 /* Slow EEPROM */
126 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
127 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
128 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
129 "EEPROM - slow"},
130 /* Expansion entry 0001 */
131 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
132 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
133 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
134 "Entry 0001"},
135 /* Saifun SA25F010 (non-buffered flash) */
136 /* strap, cfg1, & write1 need updates */
137 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
138 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
139 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
140 "Non-buffered flash (128kB)"},
141 /* Saifun SA25F020 (non-buffered flash) */
142 /* strap, cfg1, & write1 need updates */
143 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
144 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
145 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
146 "Non-buffered flash (256kB)"},
147 /* Expansion entry 0100 */
148 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
149 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
150 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
151 "Entry 0100"},
152 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
153 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
154 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
155 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
156 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
157 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
158 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
159 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
160 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
161 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
162 /* Saifun SA25F005 (non-buffered flash) */
163 /* strap, cfg1, & write1 need updates */
164 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
165 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
166 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
167 "Non-buffered flash (64kB)"},
168 /* Fast EEPROM */
169 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
170 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
171 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
172 "EEPROM - fast"},
173 /* Expansion entry 1001 */
174 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
175 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
176 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
177 "Entry 1001"},
178 /* Expansion entry 1010 */
179 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
180 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182 "Entry 1010"},
183 /* ATMEL AT45DB011B (buffered flash) */
184 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
185 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
186 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
187 "Buffered flash (128kB)"},
188 /* Expansion entry 1100 */
189 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
190 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
191 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
192 "Entry 1100"},
193 /* Expansion entry 1101 */
194 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
195 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197 "Entry 1101"},
198 /* Ateml Expansion entry 1110 */
199 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
200 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
201 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
202 "Entry 1110 (Atmel)"},
203 /* ATMEL AT45DB021B (buffered flash) */
204 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
205 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
206 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
207 "Buffered flash (256kB)"},
210 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
212 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
214 u32 diff;
216 smp_mb();
218 /* The ring uses 256 indices for 255 entries, one of them
219 * needs to be skipped.
221 diff = bp->tx_prod - bp->tx_cons;
222 if (unlikely(diff >= TX_DESC_CNT)) {
223 diff &= 0xffff;
224 if (diff == TX_DESC_CNT)
225 diff = MAX_TX_DESC_CNT;
227 return (bp->tx_ring_size - diff);
230 static u32
231 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
233 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
234 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
237 static void
238 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
244 static void
245 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
247 offset += cid_addr;
248 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
249 int i;
251 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
252 REG_WR(bp, BNX2_CTX_CTX_CTRL,
253 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
254 for (i = 0; i < 5; i++) {
255 u32 val;
256 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
257 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
258 break;
259 udelay(5);
261 } else {
262 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
263 REG_WR(bp, BNX2_CTX_DATA, val);
267 static int
268 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
270 u32 val1;
271 int i, ret;
273 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
274 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
275 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
277 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
278 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
280 udelay(40);
283 val1 = (bp->phy_addr << 21) | (reg << 16) |
284 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
285 BNX2_EMAC_MDIO_COMM_START_BUSY;
286 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
288 for (i = 0; i < 50; i++) {
289 udelay(10);
291 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
292 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
293 udelay(5);
295 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
296 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
298 break;
302 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
303 *val = 0x0;
304 ret = -EBUSY;
306 else {
307 *val = val1;
308 ret = 0;
311 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
312 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
315 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
318 udelay(40);
321 return ret;
324 static int
325 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
327 u32 val1;
328 int i, ret;
330 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
331 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
332 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
334 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
335 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
337 udelay(40);
340 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
341 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
342 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
343 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
345 for (i = 0; i < 50; i++) {
346 udelay(10);
348 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
349 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
350 udelay(5);
351 break;
355 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
356 ret = -EBUSY;
357 else
358 ret = 0;
360 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
364 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
365 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
367 udelay(40);
370 return ret;
373 static void
374 bnx2_disable_int(struct bnx2 *bp)
376 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
377 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
378 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
381 static void
382 bnx2_enable_int(struct bnx2 *bp)
384 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
385 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
386 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
388 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
389 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
391 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
394 static void
395 bnx2_disable_int_sync(struct bnx2 *bp)
397 atomic_inc(&bp->intr_sem);
398 bnx2_disable_int(bp);
399 synchronize_irq(bp->pdev->irq);
402 static void
403 bnx2_netif_stop(struct bnx2 *bp)
405 bnx2_disable_int_sync(bp);
406 if (netif_running(bp->dev)) {
407 netif_poll_disable(bp->dev);
408 netif_tx_disable(bp->dev);
409 bp->dev->trans_start = jiffies; /* prevent tx timeout */
413 static void
414 bnx2_netif_start(struct bnx2 *bp)
416 if (atomic_dec_and_test(&bp->intr_sem)) {
417 if (netif_running(bp->dev)) {
418 netif_wake_queue(bp->dev);
419 netif_poll_enable(bp->dev);
420 bnx2_enable_int(bp);
425 static void
426 bnx2_free_mem(struct bnx2 *bp)
428 int i;
430 for (i = 0; i < bp->ctx_pages; i++) {
431 if (bp->ctx_blk[i]) {
432 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
433 bp->ctx_blk[i],
434 bp->ctx_blk_mapping[i]);
435 bp->ctx_blk[i] = NULL;
438 if (bp->status_blk) {
439 pci_free_consistent(bp->pdev, bp->status_stats_size,
440 bp->status_blk, bp->status_blk_mapping);
441 bp->status_blk = NULL;
442 bp->stats_blk = NULL;
444 if (bp->tx_desc_ring) {
445 pci_free_consistent(bp->pdev,
446 sizeof(struct tx_bd) * TX_DESC_CNT,
447 bp->tx_desc_ring, bp->tx_desc_mapping);
448 bp->tx_desc_ring = NULL;
450 kfree(bp->tx_buf_ring);
451 bp->tx_buf_ring = NULL;
452 for (i = 0; i < bp->rx_max_ring; i++) {
453 if (bp->rx_desc_ring[i])
454 pci_free_consistent(bp->pdev,
455 sizeof(struct rx_bd) * RX_DESC_CNT,
456 bp->rx_desc_ring[i],
457 bp->rx_desc_mapping[i]);
458 bp->rx_desc_ring[i] = NULL;
460 vfree(bp->rx_buf_ring);
461 bp->rx_buf_ring = NULL;
464 static int
465 bnx2_alloc_mem(struct bnx2 *bp)
467 int i, status_blk_size;
469 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
470 GFP_KERNEL);
471 if (bp->tx_buf_ring == NULL)
472 return -ENOMEM;
474 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
475 sizeof(struct tx_bd) *
476 TX_DESC_CNT,
477 &bp->tx_desc_mapping);
478 if (bp->tx_desc_ring == NULL)
479 goto alloc_mem_err;
481 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
482 bp->rx_max_ring);
483 if (bp->rx_buf_ring == NULL)
484 goto alloc_mem_err;
486 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
487 bp->rx_max_ring);
489 for (i = 0; i < bp->rx_max_ring; i++) {
490 bp->rx_desc_ring[i] =
491 pci_alloc_consistent(bp->pdev,
492 sizeof(struct rx_bd) * RX_DESC_CNT,
493 &bp->rx_desc_mapping[i]);
494 if (bp->rx_desc_ring[i] == NULL)
495 goto alloc_mem_err;
499 /* Combine status and statistics blocks into one allocation. */
500 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
501 bp->status_stats_size = status_blk_size +
502 sizeof(struct statistics_block);
504 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
505 &bp->status_blk_mapping);
506 if (bp->status_blk == NULL)
507 goto alloc_mem_err;
509 memset(bp->status_blk, 0, bp->status_stats_size);
511 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
512 status_blk_size);
514 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
516 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
517 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
518 if (bp->ctx_pages == 0)
519 bp->ctx_pages = 1;
520 for (i = 0; i < bp->ctx_pages; i++) {
521 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
522 BCM_PAGE_SIZE,
523 &bp->ctx_blk_mapping[i]);
524 if (bp->ctx_blk[i] == NULL)
525 goto alloc_mem_err;
528 return 0;
530 alloc_mem_err:
531 bnx2_free_mem(bp);
532 return -ENOMEM;
535 static void
536 bnx2_report_fw_link(struct bnx2 *bp)
538 u32 fw_link_status = 0;
540 if (bp->link_up) {
541 u32 bmsr;
543 switch (bp->line_speed) {
544 case SPEED_10:
545 if (bp->duplex == DUPLEX_HALF)
546 fw_link_status = BNX2_LINK_STATUS_10HALF;
547 else
548 fw_link_status = BNX2_LINK_STATUS_10FULL;
549 break;
550 case SPEED_100:
551 if (bp->duplex == DUPLEX_HALF)
552 fw_link_status = BNX2_LINK_STATUS_100HALF;
553 else
554 fw_link_status = BNX2_LINK_STATUS_100FULL;
555 break;
556 case SPEED_1000:
557 if (bp->duplex == DUPLEX_HALF)
558 fw_link_status = BNX2_LINK_STATUS_1000HALF;
559 else
560 fw_link_status = BNX2_LINK_STATUS_1000FULL;
561 break;
562 case SPEED_2500:
563 if (bp->duplex == DUPLEX_HALF)
564 fw_link_status = BNX2_LINK_STATUS_2500HALF;
565 else
566 fw_link_status = BNX2_LINK_STATUS_2500FULL;
567 break;
570 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
572 if (bp->autoneg) {
573 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
575 bnx2_read_phy(bp, MII_BMSR, &bmsr);
576 bnx2_read_phy(bp, MII_BMSR, &bmsr);
578 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
579 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
580 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
581 else
582 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
585 else
586 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
588 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
591 static void
592 bnx2_report_link(struct bnx2 *bp)
594 if (bp->link_up) {
595 netif_carrier_on(bp->dev);
596 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
598 printk("%d Mbps ", bp->line_speed);
600 if (bp->duplex == DUPLEX_FULL)
601 printk("full duplex");
602 else
603 printk("half duplex");
605 if (bp->flow_ctrl) {
606 if (bp->flow_ctrl & FLOW_CTRL_RX) {
607 printk(", receive ");
608 if (bp->flow_ctrl & FLOW_CTRL_TX)
609 printk("& transmit ");
611 else {
612 printk(", transmit ");
614 printk("flow control ON");
616 printk("\n");
618 else {
619 netif_carrier_off(bp->dev);
620 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
623 bnx2_report_fw_link(bp);
626 static void
627 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
629 u32 local_adv, remote_adv;
631 bp->flow_ctrl = 0;
632 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
633 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
635 if (bp->duplex == DUPLEX_FULL) {
636 bp->flow_ctrl = bp->req_flow_ctrl;
638 return;
641 if (bp->duplex != DUPLEX_FULL) {
642 return;
645 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
646 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
647 u32 val;
649 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
650 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
651 bp->flow_ctrl |= FLOW_CTRL_TX;
652 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
653 bp->flow_ctrl |= FLOW_CTRL_RX;
654 return;
657 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
658 bnx2_read_phy(bp, MII_LPA, &remote_adv);
660 if (bp->phy_flags & PHY_SERDES_FLAG) {
661 u32 new_local_adv = 0;
662 u32 new_remote_adv = 0;
664 if (local_adv & ADVERTISE_1000XPAUSE)
665 new_local_adv |= ADVERTISE_PAUSE_CAP;
666 if (local_adv & ADVERTISE_1000XPSE_ASYM)
667 new_local_adv |= ADVERTISE_PAUSE_ASYM;
668 if (remote_adv & ADVERTISE_1000XPAUSE)
669 new_remote_adv |= ADVERTISE_PAUSE_CAP;
670 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
671 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
673 local_adv = new_local_adv;
674 remote_adv = new_remote_adv;
677 /* See Table 28B-3 of 802.3ab-1999 spec. */
678 if (local_adv & ADVERTISE_PAUSE_CAP) {
679 if(local_adv & ADVERTISE_PAUSE_ASYM) {
680 if (remote_adv & ADVERTISE_PAUSE_CAP) {
681 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
683 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
684 bp->flow_ctrl = FLOW_CTRL_RX;
687 else {
688 if (remote_adv & ADVERTISE_PAUSE_CAP) {
689 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
693 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
694 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
695 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
697 bp->flow_ctrl = FLOW_CTRL_TX;
702 static int
703 bnx2_5708s_linkup(struct bnx2 *bp)
705 u32 val;
707 bp->link_up = 1;
708 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
709 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
710 case BCM5708S_1000X_STAT1_SPEED_10:
711 bp->line_speed = SPEED_10;
712 break;
713 case BCM5708S_1000X_STAT1_SPEED_100:
714 bp->line_speed = SPEED_100;
715 break;
716 case BCM5708S_1000X_STAT1_SPEED_1G:
717 bp->line_speed = SPEED_1000;
718 break;
719 case BCM5708S_1000X_STAT1_SPEED_2G5:
720 bp->line_speed = SPEED_2500;
721 break;
723 if (val & BCM5708S_1000X_STAT1_FD)
724 bp->duplex = DUPLEX_FULL;
725 else
726 bp->duplex = DUPLEX_HALF;
728 return 0;
731 static int
732 bnx2_5706s_linkup(struct bnx2 *bp)
734 u32 bmcr, local_adv, remote_adv, common;
736 bp->link_up = 1;
737 bp->line_speed = SPEED_1000;
739 bnx2_read_phy(bp, MII_BMCR, &bmcr);
740 if (bmcr & BMCR_FULLDPLX) {
741 bp->duplex = DUPLEX_FULL;
743 else {
744 bp->duplex = DUPLEX_HALF;
747 if (!(bmcr & BMCR_ANENABLE)) {
748 return 0;
751 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
752 bnx2_read_phy(bp, MII_LPA, &remote_adv);
754 common = local_adv & remote_adv;
755 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
757 if (common & ADVERTISE_1000XFULL) {
758 bp->duplex = DUPLEX_FULL;
760 else {
761 bp->duplex = DUPLEX_HALF;
765 return 0;
768 static int
769 bnx2_copper_linkup(struct bnx2 *bp)
771 u32 bmcr;
773 bnx2_read_phy(bp, MII_BMCR, &bmcr);
774 if (bmcr & BMCR_ANENABLE) {
775 u32 local_adv, remote_adv, common;
777 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
778 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
780 common = local_adv & (remote_adv >> 2);
781 if (common & ADVERTISE_1000FULL) {
782 bp->line_speed = SPEED_1000;
783 bp->duplex = DUPLEX_FULL;
785 else if (common & ADVERTISE_1000HALF) {
786 bp->line_speed = SPEED_1000;
787 bp->duplex = DUPLEX_HALF;
789 else {
790 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
791 bnx2_read_phy(bp, MII_LPA, &remote_adv);
793 common = local_adv & remote_adv;
794 if (common & ADVERTISE_100FULL) {
795 bp->line_speed = SPEED_100;
796 bp->duplex = DUPLEX_FULL;
798 else if (common & ADVERTISE_100HALF) {
799 bp->line_speed = SPEED_100;
800 bp->duplex = DUPLEX_HALF;
802 else if (common & ADVERTISE_10FULL) {
803 bp->line_speed = SPEED_10;
804 bp->duplex = DUPLEX_FULL;
806 else if (common & ADVERTISE_10HALF) {
807 bp->line_speed = SPEED_10;
808 bp->duplex = DUPLEX_HALF;
810 else {
811 bp->line_speed = 0;
812 bp->link_up = 0;
816 else {
817 if (bmcr & BMCR_SPEED100) {
818 bp->line_speed = SPEED_100;
820 else {
821 bp->line_speed = SPEED_10;
823 if (bmcr & BMCR_FULLDPLX) {
824 bp->duplex = DUPLEX_FULL;
826 else {
827 bp->duplex = DUPLEX_HALF;
831 return 0;
834 static int
835 bnx2_set_mac_link(struct bnx2 *bp)
837 u32 val;
839 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
840 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
841 (bp->duplex == DUPLEX_HALF)) {
842 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
845 /* Configure the EMAC mode register. */
846 val = REG_RD(bp, BNX2_EMAC_MODE);
848 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
849 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
850 BNX2_EMAC_MODE_25G_MODE);
852 if (bp->link_up) {
853 switch (bp->line_speed) {
854 case SPEED_10:
855 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
856 val |= BNX2_EMAC_MODE_PORT_MII_10M;
857 break;
859 /* fall through */
860 case SPEED_100:
861 val |= BNX2_EMAC_MODE_PORT_MII;
862 break;
863 case SPEED_2500:
864 val |= BNX2_EMAC_MODE_25G_MODE;
865 /* fall through */
866 case SPEED_1000:
867 val |= BNX2_EMAC_MODE_PORT_GMII;
868 break;
871 else {
872 val |= BNX2_EMAC_MODE_PORT_GMII;
875 /* Set the MAC to operate in the appropriate duplex mode. */
876 if (bp->duplex == DUPLEX_HALF)
877 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
878 REG_WR(bp, BNX2_EMAC_MODE, val);
880 /* Enable/disable rx PAUSE. */
881 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
883 if (bp->flow_ctrl & FLOW_CTRL_RX)
884 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
885 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
887 /* Enable/disable tx PAUSE. */
888 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
889 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
891 if (bp->flow_ctrl & FLOW_CTRL_TX)
892 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
893 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
895 /* Acknowledge the interrupt. */
896 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
898 return 0;
901 static int
902 bnx2_set_link(struct bnx2 *bp)
904 u32 bmsr;
905 u8 link_up;
907 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
908 bp->link_up = 1;
909 return 0;
912 link_up = bp->link_up;
914 bnx2_read_phy(bp, MII_BMSR, &bmsr);
915 bnx2_read_phy(bp, MII_BMSR, &bmsr);
917 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
918 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
919 u32 val;
921 val = REG_RD(bp, BNX2_EMAC_STATUS);
922 if (val & BNX2_EMAC_STATUS_LINK)
923 bmsr |= BMSR_LSTATUS;
924 else
925 bmsr &= ~BMSR_LSTATUS;
928 if (bmsr & BMSR_LSTATUS) {
929 bp->link_up = 1;
931 if (bp->phy_flags & PHY_SERDES_FLAG) {
932 if (CHIP_NUM(bp) == CHIP_NUM_5706)
933 bnx2_5706s_linkup(bp);
934 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
935 bnx2_5708s_linkup(bp);
937 else {
938 bnx2_copper_linkup(bp);
940 bnx2_resolve_flow_ctrl(bp);
942 else {
943 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
944 (bp->autoneg & AUTONEG_SPEED)) {
946 u32 bmcr;
948 bnx2_read_phy(bp, MII_BMCR, &bmcr);
949 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
950 if (!(bmcr & BMCR_ANENABLE)) {
951 bnx2_write_phy(bp, MII_BMCR, bmcr |
952 BMCR_ANENABLE);
955 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
956 bp->link_up = 0;
959 if (bp->link_up != link_up) {
960 bnx2_report_link(bp);
963 bnx2_set_mac_link(bp);
965 return 0;
968 static int
969 bnx2_reset_phy(struct bnx2 *bp)
971 int i;
972 u32 reg;
974 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
976 #define PHY_RESET_MAX_WAIT 100
977 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
978 udelay(10);
980 bnx2_read_phy(bp, MII_BMCR, &reg);
981 if (!(reg & BMCR_RESET)) {
982 udelay(20);
983 break;
986 if (i == PHY_RESET_MAX_WAIT) {
987 return -EBUSY;
989 return 0;
992 static u32
993 bnx2_phy_get_pause_adv(struct bnx2 *bp)
995 u32 adv = 0;
997 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
998 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1000 if (bp->phy_flags & PHY_SERDES_FLAG) {
1001 adv = ADVERTISE_1000XPAUSE;
1003 else {
1004 adv = ADVERTISE_PAUSE_CAP;
1007 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1008 if (bp->phy_flags & PHY_SERDES_FLAG) {
1009 adv = ADVERTISE_1000XPSE_ASYM;
1011 else {
1012 adv = ADVERTISE_PAUSE_ASYM;
1015 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1016 if (bp->phy_flags & PHY_SERDES_FLAG) {
1017 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1019 else {
1020 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1023 return adv;
1026 static int
1027 bnx2_setup_serdes_phy(struct bnx2 *bp)
1029 u32 adv, bmcr, up1;
1030 u32 new_adv = 0;
1032 if (!(bp->autoneg & AUTONEG_SPEED)) {
1033 u32 new_bmcr;
1034 int force_link_down = 0;
1036 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1037 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1039 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1040 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1041 new_bmcr |= BMCR_SPEED1000;
1042 if (bp->req_line_speed == SPEED_2500) {
1043 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1044 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1045 if (!(up1 & BCM5708S_UP1_2G5)) {
1046 up1 |= BCM5708S_UP1_2G5;
1047 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1048 force_link_down = 1;
1050 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1051 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1052 if (up1 & BCM5708S_UP1_2G5) {
1053 up1 &= ~BCM5708S_UP1_2G5;
1054 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1055 force_link_down = 1;
1059 if (bp->req_duplex == DUPLEX_FULL) {
1060 adv |= ADVERTISE_1000XFULL;
1061 new_bmcr |= BMCR_FULLDPLX;
1063 else {
1064 adv |= ADVERTISE_1000XHALF;
1065 new_bmcr &= ~BMCR_FULLDPLX;
1067 if ((new_bmcr != bmcr) || (force_link_down)) {
1068 /* Force a link down visible on the other side */
1069 if (bp->link_up) {
1070 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1071 ~(ADVERTISE_1000XFULL |
1072 ADVERTISE_1000XHALF));
1073 bnx2_write_phy(bp, MII_BMCR, bmcr |
1074 BMCR_ANRESTART | BMCR_ANENABLE);
1076 bp->link_up = 0;
1077 netif_carrier_off(bp->dev);
1078 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1079 bnx2_report_link(bp);
1081 bnx2_write_phy(bp, MII_ADVERTISE, adv);
1082 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1084 return 0;
1087 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1088 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1089 up1 |= BCM5708S_UP1_2G5;
1090 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1093 if (bp->advertising & ADVERTISED_1000baseT_Full)
1094 new_adv |= ADVERTISE_1000XFULL;
1096 new_adv |= bnx2_phy_get_pause_adv(bp);
1098 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1099 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1101 bp->serdes_an_pending = 0;
1102 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1103 /* Force a link down visible on the other side */
1104 if (bp->link_up) {
1105 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1106 spin_unlock_bh(&bp->phy_lock);
1107 msleep(20);
1108 spin_lock_bh(&bp->phy_lock);
1111 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1112 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1113 BMCR_ANENABLE);
1114 /* Speed up link-up time when the link partner
1115 * does not autonegotiate which is very common
1116 * in blade servers. Some blade servers use
1117 * IPMI for kerboard input and it's important
1118 * to minimize link disruptions. Autoneg. involves
1119 * exchanging base pages plus 3 next pages and
1120 * normally completes in about 120 msec.
1122 bp->current_interval = SERDES_AN_TIMEOUT;
1123 bp->serdes_an_pending = 1;
1124 mod_timer(&bp->timer, jiffies + bp->current_interval);
1127 return 0;
1130 #define ETHTOOL_ALL_FIBRE_SPEED \
1131 (ADVERTISED_1000baseT_Full)
1133 #define ETHTOOL_ALL_COPPER_SPEED \
1134 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1135 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1136 ADVERTISED_1000baseT_Full)
1138 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1139 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1141 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1143 static int
1144 bnx2_setup_copper_phy(struct bnx2 *bp)
1146 u32 bmcr;
1147 u32 new_bmcr;
1149 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1151 if (bp->autoneg & AUTONEG_SPEED) {
1152 u32 adv_reg, adv1000_reg;
1153 u32 new_adv_reg = 0;
1154 u32 new_adv1000_reg = 0;
1156 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1157 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1158 ADVERTISE_PAUSE_ASYM);
1160 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1161 adv1000_reg &= PHY_ALL_1000_SPEED;
1163 if (bp->advertising & ADVERTISED_10baseT_Half)
1164 new_adv_reg |= ADVERTISE_10HALF;
1165 if (bp->advertising & ADVERTISED_10baseT_Full)
1166 new_adv_reg |= ADVERTISE_10FULL;
1167 if (bp->advertising & ADVERTISED_100baseT_Half)
1168 new_adv_reg |= ADVERTISE_100HALF;
1169 if (bp->advertising & ADVERTISED_100baseT_Full)
1170 new_adv_reg |= ADVERTISE_100FULL;
1171 if (bp->advertising & ADVERTISED_1000baseT_Full)
1172 new_adv1000_reg |= ADVERTISE_1000FULL;
1174 new_adv_reg |= ADVERTISE_CSMA;
1176 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1178 if ((adv1000_reg != new_adv1000_reg) ||
1179 (adv_reg != new_adv_reg) ||
1180 ((bmcr & BMCR_ANENABLE) == 0)) {
1182 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1183 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1184 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1185 BMCR_ANENABLE);
1187 else if (bp->link_up) {
1188 /* Flow ctrl may have changed from auto to forced */
1189 /* or vice-versa. */
1191 bnx2_resolve_flow_ctrl(bp);
1192 bnx2_set_mac_link(bp);
1194 return 0;
1197 new_bmcr = 0;
1198 if (bp->req_line_speed == SPEED_100) {
1199 new_bmcr |= BMCR_SPEED100;
1201 if (bp->req_duplex == DUPLEX_FULL) {
1202 new_bmcr |= BMCR_FULLDPLX;
1204 if (new_bmcr != bmcr) {
1205 u32 bmsr;
1207 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1208 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1210 if (bmsr & BMSR_LSTATUS) {
1211 /* Force link down */
1212 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1213 spin_unlock_bh(&bp->phy_lock);
1214 msleep(50);
1215 spin_lock_bh(&bp->phy_lock);
1217 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1218 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1221 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1223 /* Normally, the new speed is setup after the link has
1224 * gone down and up again. In some cases, link will not go
1225 * down so we need to set up the new speed here.
1227 if (bmsr & BMSR_LSTATUS) {
1228 bp->line_speed = bp->req_line_speed;
1229 bp->duplex = bp->req_duplex;
1230 bnx2_resolve_flow_ctrl(bp);
1231 bnx2_set_mac_link(bp);
1234 return 0;
1237 static int
1238 bnx2_setup_phy(struct bnx2 *bp)
1240 if (bp->loopback == MAC_LOOPBACK)
1241 return 0;
1243 if (bp->phy_flags & PHY_SERDES_FLAG) {
1244 return (bnx2_setup_serdes_phy(bp));
1246 else {
1247 return (bnx2_setup_copper_phy(bp));
1251 static int
1252 bnx2_init_5708s_phy(struct bnx2 *bp)
1254 u32 val;
1256 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1257 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1258 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1260 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1261 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1262 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1264 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1265 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1266 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1268 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1269 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1270 val |= BCM5708S_UP1_2G5;
1271 bnx2_write_phy(bp, BCM5708S_UP1, val);
1274 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1275 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1276 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1277 /* increase tx signal amplitude */
1278 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1279 BCM5708S_BLK_ADDR_TX_MISC);
1280 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1281 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1282 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1283 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1286 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1287 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1289 if (val) {
1290 u32 is_backplane;
1292 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1293 BNX2_SHARED_HW_CFG_CONFIG);
1294 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1295 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1296 BCM5708S_BLK_ADDR_TX_MISC);
1297 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1298 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1299 BCM5708S_BLK_ADDR_DIG);
1302 return 0;
1305 static int
1306 bnx2_init_5706s_phy(struct bnx2 *bp)
1308 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1310 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1311 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1313 if (bp->dev->mtu > 1500) {
1314 u32 val;
1316 /* Set extended packet length bit */
1317 bnx2_write_phy(bp, 0x18, 0x7);
1318 bnx2_read_phy(bp, 0x18, &val);
1319 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1321 bnx2_write_phy(bp, 0x1c, 0x6c00);
1322 bnx2_read_phy(bp, 0x1c, &val);
1323 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1325 else {
1326 u32 val;
1328 bnx2_write_phy(bp, 0x18, 0x7);
1329 bnx2_read_phy(bp, 0x18, &val);
1330 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1332 bnx2_write_phy(bp, 0x1c, 0x6c00);
1333 bnx2_read_phy(bp, 0x1c, &val);
1334 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1337 return 0;
1340 static int
1341 bnx2_init_copper_phy(struct bnx2 *bp)
1343 u32 val;
1345 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1346 bnx2_write_phy(bp, 0x18, 0x0c00);
1347 bnx2_write_phy(bp, 0x17, 0x000a);
1348 bnx2_write_phy(bp, 0x15, 0x310b);
1349 bnx2_write_phy(bp, 0x17, 0x201f);
1350 bnx2_write_phy(bp, 0x15, 0x9506);
1351 bnx2_write_phy(bp, 0x17, 0x401f);
1352 bnx2_write_phy(bp, 0x15, 0x14e2);
1353 bnx2_write_phy(bp, 0x18, 0x0400);
1356 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1357 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1358 MII_BNX2_DSP_EXPAND_REG | 0x8);
1359 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1360 val &= ~(1 << 8);
1361 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1364 if (bp->dev->mtu > 1500) {
1365 /* Set extended packet length bit */
1366 bnx2_write_phy(bp, 0x18, 0x7);
1367 bnx2_read_phy(bp, 0x18, &val);
1368 bnx2_write_phy(bp, 0x18, val | 0x4000);
1370 bnx2_read_phy(bp, 0x10, &val);
1371 bnx2_write_phy(bp, 0x10, val | 0x1);
1373 else {
1374 bnx2_write_phy(bp, 0x18, 0x7);
1375 bnx2_read_phy(bp, 0x18, &val);
1376 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1378 bnx2_read_phy(bp, 0x10, &val);
1379 bnx2_write_phy(bp, 0x10, val & ~0x1);
1382 /* ethernet@wirespeed */
1383 bnx2_write_phy(bp, 0x18, 0x7007);
1384 bnx2_read_phy(bp, 0x18, &val);
1385 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1386 return 0;
1390 static int
1391 bnx2_init_phy(struct bnx2 *bp)
1393 u32 val;
1394 int rc = 0;
1396 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1397 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1399 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1401 bnx2_reset_phy(bp);
1403 bnx2_read_phy(bp, MII_PHYSID1, &val);
1404 bp->phy_id = val << 16;
1405 bnx2_read_phy(bp, MII_PHYSID2, &val);
1406 bp->phy_id |= val & 0xffff;
1408 if (bp->phy_flags & PHY_SERDES_FLAG) {
1409 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1410 rc = bnx2_init_5706s_phy(bp);
1411 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1412 rc = bnx2_init_5708s_phy(bp);
1414 else {
1415 rc = bnx2_init_copper_phy(bp);
1418 bnx2_setup_phy(bp);
1420 return rc;
1423 static int
1424 bnx2_set_mac_loopback(struct bnx2 *bp)
1426 u32 mac_mode;
1428 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1429 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1430 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1431 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1432 bp->link_up = 1;
1433 return 0;
1436 static int bnx2_test_link(struct bnx2 *);
1438 static int
1439 bnx2_set_phy_loopback(struct bnx2 *bp)
1441 u32 mac_mode;
1442 int rc, i;
1444 spin_lock_bh(&bp->phy_lock);
1445 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1446 BMCR_SPEED1000);
1447 spin_unlock_bh(&bp->phy_lock);
1448 if (rc)
1449 return rc;
1451 for (i = 0; i < 10; i++) {
1452 if (bnx2_test_link(bp) == 0)
1453 break;
1454 msleep(100);
1457 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1458 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1459 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1460 BNX2_EMAC_MODE_25G_MODE);
1462 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1463 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1464 bp->link_up = 1;
1465 return 0;
1468 static int
1469 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1471 int i;
1472 u32 val;
1474 bp->fw_wr_seq++;
1475 msg_data |= bp->fw_wr_seq;
1477 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1479 /* wait for an acknowledgement. */
1480 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1481 msleep(10);
1483 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1485 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1486 break;
1488 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1489 return 0;
1491 /* If we timed out, inform the firmware that this is the case. */
1492 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1493 if (!silent)
1494 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1495 "%x\n", msg_data);
1497 msg_data &= ~BNX2_DRV_MSG_CODE;
1498 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1500 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1502 return -EBUSY;
1505 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1506 return -EIO;
1508 return 0;
1511 static int
1512 bnx2_init_5709_context(struct bnx2 *bp)
1514 int i, ret = 0;
1515 u32 val;
1517 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1518 val |= (BCM_PAGE_BITS - 8) << 16;
1519 REG_WR(bp, BNX2_CTX_COMMAND, val);
1520 for (i = 0; i < bp->ctx_pages; i++) {
1521 int j;
1523 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1524 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1525 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1526 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1527 (u64) bp->ctx_blk_mapping[i] >> 32);
1528 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1529 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1530 for (j = 0; j < 10; j++) {
1532 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1533 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1534 break;
1535 udelay(5);
1537 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1538 ret = -EBUSY;
1539 break;
1542 return ret;
1545 static void
1546 bnx2_init_context(struct bnx2 *bp)
1548 u32 vcid;
1550 vcid = 96;
1551 while (vcid) {
1552 u32 vcid_addr, pcid_addr, offset;
1554 vcid--;
1556 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1557 u32 new_vcid;
1559 vcid_addr = GET_PCID_ADDR(vcid);
1560 if (vcid & 0x8) {
1561 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1563 else {
1564 new_vcid = vcid;
1566 pcid_addr = GET_PCID_ADDR(new_vcid);
1568 else {
1569 vcid_addr = GET_CID_ADDR(vcid);
1570 pcid_addr = vcid_addr;
1573 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1574 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1576 /* Zero out the context. */
1577 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1578 CTX_WR(bp, 0x00, offset, 0);
1581 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1582 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1586 static int
1587 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1589 u16 *good_mbuf;
1590 u32 good_mbuf_cnt;
1591 u32 val;
1593 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1594 if (good_mbuf == NULL) {
1595 printk(KERN_ERR PFX "Failed to allocate memory in "
1596 "bnx2_alloc_bad_rbuf\n");
1597 return -ENOMEM;
1600 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1601 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1603 good_mbuf_cnt = 0;
1605 /* Allocate a bunch of mbufs and save the good ones in an array. */
1606 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1607 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1608 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1610 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1612 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1614 /* The addresses with Bit 9 set are bad memory blocks. */
1615 if (!(val & (1 << 9))) {
1616 good_mbuf[good_mbuf_cnt] = (u16) val;
1617 good_mbuf_cnt++;
1620 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1623 /* Free the good ones back to the mbuf pool thus discarding
1624 * all the bad ones. */
1625 while (good_mbuf_cnt) {
1626 good_mbuf_cnt--;
1628 val = good_mbuf[good_mbuf_cnt];
1629 val = (val << 9) | val | 1;
1631 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1633 kfree(good_mbuf);
1634 return 0;
1637 static void
1638 bnx2_set_mac_addr(struct bnx2 *bp)
1640 u32 val;
1641 u8 *mac_addr = bp->dev->dev_addr;
1643 val = (mac_addr[0] << 8) | mac_addr[1];
1645 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1647 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1648 (mac_addr[4] << 8) | mac_addr[5];
1650 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1653 static inline int
1654 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1656 struct sk_buff *skb;
1657 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1658 dma_addr_t mapping;
1659 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1660 unsigned long align;
1662 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1663 if (skb == NULL) {
1664 return -ENOMEM;
1667 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1668 skb_reserve(skb, BNX2_RX_ALIGN - align);
1670 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1671 PCI_DMA_FROMDEVICE);
1673 rx_buf->skb = skb;
1674 pci_unmap_addr_set(rx_buf, mapping, mapping);
1676 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1677 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1679 bp->rx_prod_bseq += bp->rx_buf_use_size;
1681 return 0;
1684 static void
1685 bnx2_phy_int(struct bnx2 *bp)
1687 u32 new_link_state, old_link_state;
1689 new_link_state = bp->status_blk->status_attn_bits &
1690 STATUS_ATTN_BITS_LINK_STATE;
1691 old_link_state = bp->status_blk->status_attn_bits_ack &
1692 STATUS_ATTN_BITS_LINK_STATE;
1693 if (new_link_state != old_link_state) {
1694 if (new_link_state) {
1695 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1696 STATUS_ATTN_BITS_LINK_STATE);
1698 else {
1699 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1700 STATUS_ATTN_BITS_LINK_STATE);
1702 bnx2_set_link(bp);
1706 static void
1707 bnx2_tx_int(struct bnx2 *bp)
1709 struct status_block *sblk = bp->status_blk;
1710 u16 hw_cons, sw_cons, sw_ring_cons;
1711 int tx_free_bd = 0;
1713 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1714 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1715 hw_cons++;
1717 sw_cons = bp->tx_cons;
1719 while (sw_cons != hw_cons) {
1720 struct sw_bd *tx_buf;
1721 struct sk_buff *skb;
1722 int i, last;
1724 sw_ring_cons = TX_RING_IDX(sw_cons);
1726 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1727 skb = tx_buf->skb;
1729 /* partial BD completions possible with TSO packets */
1730 if (skb_is_gso(skb)) {
1731 u16 last_idx, last_ring_idx;
1733 last_idx = sw_cons +
1734 skb_shinfo(skb)->nr_frags + 1;
1735 last_ring_idx = sw_ring_cons +
1736 skb_shinfo(skb)->nr_frags + 1;
1737 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1738 last_idx++;
1740 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1741 break;
1745 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1746 skb_headlen(skb), PCI_DMA_TODEVICE);
1748 tx_buf->skb = NULL;
1749 last = skb_shinfo(skb)->nr_frags;
1751 for (i = 0; i < last; i++) {
1752 sw_cons = NEXT_TX_BD(sw_cons);
1754 pci_unmap_page(bp->pdev,
1755 pci_unmap_addr(
1756 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1757 mapping),
1758 skb_shinfo(skb)->frags[i].size,
1759 PCI_DMA_TODEVICE);
1762 sw_cons = NEXT_TX_BD(sw_cons);
1764 tx_free_bd += last + 1;
1766 dev_kfree_skb(skb);
1768 hw_cons = bp->hw_tx_cons =
1769 sblk->status_tx_quick_consumer_index0;
1771 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1772 hw_cons++;
1776 bp->tx_cons = sw_cons;
1777 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1778 * before checking for netif_queue_stopped(). Without the
1779 * memory barrier, there is a small possibility that bnx2_start_xmit()
1780 * will miss it and cause the queue to be stopped forever.
1782 smp_mb();
1784 if (unlikely(netif_queue_stopped(bp->dev)) &&
1785 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1786 netif_tx_lock(bp->dev);
1787 if ((netif_queue_stopped(bp->dev)) &&
1788 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1789 netif_wake_queue(bp->dev);
1790 netif_tx_unlock(bp->dev);
1794 static inline void
1795 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1796 u16 cons, u16 prod)
1798 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1799 struct rx_bd *cons_bd, *prod_bd;
1801 cons_rx_buf = &bp->rx_buf_ring[cons];
1802 prod_rx_buf = &bp->rx_buf_ring[prod];
1804 pci_dma_sync_single_for_device(bp->pdev,
1805 pci_unmap_addr(cons_rx_buf, mapping),
1806 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1808 bp->rx_prod_bseq += bp->rx_buf_use_size;
1810 prod_rx_buf->skb = skb;
1812 if (cons == prod)
1813 return;
1815 pci_unmap_addr_set(prod_rx_buf, mapping,
1816 pci_unmap_addr(cons_rx_buf, mapping));
1818 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1819 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1820 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1821 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1824 static int
1825 bnx2_rx_int(struct bnx2 *bp, int budget)
1827 struct status_block *sblk = bp->status_blk;
1828 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1829 struct l2_fhdr *rx_hdr;
1830 int rx_pkt = 0;
1832 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1833 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1834 hw_cons++;
1836 sw_cons = bp->rx_cons;
1837 sw_prod = bp->rx_prod;
1839 /* Memory barrier necessary as speculative reads of the rx
1840 * buffer can be ahead of the index in the status block
1842 rmb();
1843 while (sw_cons != hw_cons) {
1844 unsigned int len;
1845 u32 status;
1846 struct sw_bd *rx_buf;
1847 struct sk_buff *skb;
1848 dma_addr_t dma_addr;
1850 sw_ring_cons = RX_RING_IDX(sw_cons);
1851 sw_ring_prod = RX_RING_IDX(sw_prod);
1853 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1854 skb = rx_buf->skb;
1856 rx_buf->skb = NULL;
1858 dma_addr = pci_unmap_addr(rx_buf, mapping);
1860 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1861 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1863 rx_hdr = (struct l2_fhdr *) skb->data;
1864 len = rx_hdr->l2_fhdr_pkt_len - 4;
1866 if ((status = rx_hdr->l2_fhdr_status) &
1867 (L2_FHDR_ERRORS_BAD_CRC |
1868 L2_FHDR_ERRORS_PHY_DECODE |
1869 L2_FHDR_ERRORS_ALIGNMENT |
1870 L2_FHDR_ERRORS_TOO_SHORT |
1871 L2_FHDR_ERRORS_GIANT_FRAME)) {
1873 goto reuse_rx;
1876 /* Since we don't have a jumbo ring, copy small packets
1877 * if mtu > 1500
1879 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1880 struct sk_buff *new_skb;
1882 new_skb = netdev_alloc_skb(bp->dev, len + 2);
1883 if (new_skb == NULL)
1884 goto reuse_rx;
1886 /* aligned copy */
1887 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
1888 new_skb->data, len + 2);
1889 skb_reserve(new_skb, 2);
1890 skb_put(new_skb, len);
1892 bnx2_reuse_rx_skb(bp, skb,
1893 sw_ring_cons, sw_ring_prod);
1895 skb = new_skb;
1897 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1898 pci_unmap_single(bp->pdev, dma_addr,
1899 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1901 skb_reserve(skb, bp->rx_offset);
1902 skb_put(skb, len);
1904 else {
1905 reuse_rx:
1906 bnx2_reuse_rx_skb(bp, skb,
1907 sw_ring_cons, sw_ring_prod);
1908 goto next_rx;
1911 skb->protocol = eth_type_trans(skb, bp->dev);
1913 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1914 (ntohs(skb->protocol) != 0x8100)) {
1916 dev_kfree_skb(skb);
1917 goto next_rx;
1921 skb->ip_summed = CHECKSUM_NONE;
1922 if (bp->rx_csum &&
1923 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1924 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1926 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1927 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1928 skb->ip_summed = CHECKSUM_UNNECESSARY;
1931 #ifdef BCM_VLAN
1932 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1933 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1934 rx_hdr->l2_fhdr_vlan_tag);
1936 else
1937 #endif
1938 netif_receive_skb(skb);
1940 bp->dev->last_rx = jiffies;
1941 rx_pkt++;
1943 next_rx:
1944 sw_cons = NEXT_RX_BD(sw_cons);
1945 sw_prod = NEXT_RX_BD(sw_prod);
1947 if ((rx_pkt == budget))
1948 break;
1950 /* Refresh hw_cons to see if there is new work */
1951 if (sw_cons == hw_cons) {
1952 hw_cons = bp->hw_rx_cons =
1953 sblk->status_rx_quick_consumer_index0;
1954 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1955 hw_cons++;
1956 rmb();
1959 bp->rx_cons = sw_cons;
1960 bp->rx_prod = sw_prod;
1962 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1964 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1966 mmiowb();
1968 return rx_pkt;
1972 /* MSI ISR - The only difference between this and the INTx ISR
1973 * is that the MSI interrupt is always serviced.
1975 static irqreturn_t
1976 bnx2_msi(int irq, void *dev_instance)
1978 struct net_device *dev = dev_instance;
1979 struct bnx2 *bp = netdev_priv(dev);
1981 prefetch(bp->status_blk);
1982 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1983 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1984 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1986 /* Return here if interrupt is disabled. */
1987 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1988 return IRQ_HANDLED;
1990 netif_rx_schedule(dev);
1992 return IRQ_HANDLED;
1995 static irqreturn_t
1996 bnx2_interrupt(int irq, void *dev_instance)
1998 struct net_device *dev = dev_instance;
1999 struct bnx2 *bp = netdev_priv(dev);
2001 /* When using INTx, it is possible for the interrupt to arrive
2002 * at the CPU before the status block posted prior to the
2003 * interrupt. Reading a register will flush the status block.
2004 * When using MSI, the MSI message will always complete after
2005 * the status block write.
2007 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2008 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2009 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2010 return IRQ_NONE;
2012 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2013 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2014 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2016 /* Return here if interrupt is shared and is disabled. */
2017 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2018 return IRQ_HANDLED;
2020 netif_rx_schedule(dev);
2022 return IRQ_HANDLED;
2025 static inline int
2026 bnx2_has_work(struct bnx2 *bp)
2028 struct status_block *sblk = bp->status_blk;
2030 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2031 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2032 return 1;
2034 if ((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
2035 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
2036 return 1;
2038 return 0;
2041 static int
2042 bnx2_poll(struct net_device *dev, int *budget)
2044 struct bnx2 *bp = netdev_priv(dev);
2046 if ((bp->status_blk->status_attn_bits &
2047 STATUS_ATTN_BITS_LINK_STATE) !=
2048 (bp->status_blk->status_attn_bits_ack &
2049 STATUS_ATTN_BITS_LINK_STATE)) {
2051 spin_lock(&bp->phy_lock);
2052 bnx2_phy_int(bp);
2053 spin_unlock(&bp->phy_lock);
2055 /* This is needed to take care of transient status
2056 * during link changes.
2058 REG_WR(bp, BNX2_HC_COMMAND,
2059 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2060 REG_RD(bp, BNX2_HC_COMMAND);
2063 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2064 bnx2_tx_int(bp);
2066 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2067 int orig_budget = *budget;
2068 int work_done;
2070 if (orig_budget > dev->quota)
2071 orig_budget = dev->quota;
2073 work_done = bnx2_rx_int(bp, orig_budget);
2074 *budget -= work_done;
2075 dev->quota -= work_done;
2078 bp->last_status_idx = bp->status_blk->status_idx;
2079 rmb();
2081 if (!bnx2_has_work(bp)) {
2082 netif_rx_complete(dev);
2083 if (likely(bp->flags & USING_MSI_FLAG)) {
2084 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2085 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2086 bp->last_status_idx);
2087 return 0;
2089 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2090 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2091 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2092 bp->last_status_idx);
2094 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2095 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2096 bp->last_status_idx);
2097 return 0;
2100 return 1;
2103 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2104 * from set_multicast.
2106 static void
2107 bnx2_set_rx_mode(struct net_device *dev)
2109 struct bnx2 *bp = netdev_priv(dev);
2110 u32 rx_mode, sort_mode;
2111 int i;
2113 spin_lock_bh(&bp->phy_lock);
2115 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2116 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2117 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2118 #ifdef BCM_VLAN
2119 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2120 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2121 #else
2122 if (!(bp->flags & ASF_ENABLE_FLAG))
2123 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2124 #endif
2125 if (dev->flags & IFF_PROMISC) {
2126 /* Promiscuous mode. */
2127 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2128 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2129 BNX2_RPM_SORT_USER0_PROM_VLAN;
2131 else if (dev->flags & IFF_ALLMULTI) {
2132 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2133 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2134 0xffffffff);
2136 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2138 else {
2139 /* Accept one or more multicast(s). */
2140 struct dev_mc_list *mclist;
2141 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2142 u32 regidx;
2143 u32 bit;
2144 u32 crc;
2146 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2148 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2149 i++, mclist = mclist->next) {
2151 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2152 bit = crc & 0xff;
2153 regidx = (bit & 0xe0) >> 5;
2154 bit &= 0x1f;
2155 mc_filter[regidx] |= (1 << bit);
2158 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2159 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2160 mc_filter[i]);
2163 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2166 if (rx_mode != bp->rx_mode) {
2167 bp->rx_mode = rx_mode;
2168 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2171 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2172 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2173 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2175 spin_unlock_bh(&bp->phy_lock);
2178 #define FW_BUF_SIZE 0x8000
2180 static int
2181 bnx2_gunzip_init(struct bnx2 *bp)
2183 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2184 goto gunzip_nomem1;
2186 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2187 goto gunzip_nomem2;
2189 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2190 if (bp->strm->workspace == NULL)
2191 goto gunzip_nomem3;
2193 return 0;
2195 gunzip_nomem3:
2196 kfree(bp->strm);
2197 bp->strm = NULL;
2199 gunzip_nomem2:
2200 vfree(bp->gunzip_buf);
2201 bp->gunzip_buf = NULL;
2203 gunzip_nomem1:
2204 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2205 "uncompression.\n", bp->dev->name);
2206 return -ENOMEM;
2209 static void
2210 bnx2_gunzip_end(struct bnx2 *bp)
2212 kfree(bp->strm->workspace);
2214 kfree(bp->strm);
2215 bp->strm = NULL;
2217 if (bp->gunzip_buf) {
2218 vfree(bp->gunzip_buf);
2219 bp->gunzip_buf = NULL;
2223 static int
2224 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2226 int n, rc;
2228 /* check gzip header */
2229 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2230 return -EINVAL;
2232 n = 10;
2234 #define FNAME 0x8
2235 if (zbuf[3] & FNAME)
2236 while ((zbuf[n++] != 0) && (n < len));
2238 bp->strm->next_in = zbuf + n;
2239 bp->strm->avail_in = len - n;
2240 bp->strm->next_out = bp->gunzip_buf;
2241 bp->strm->avail_out = FW_BUF_SIZE;
2243 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2244 if (rc != Z_OK)
2245 return rc;
2247 rc = zlib_inflate(bp->strm, Z_FINISH);
2249 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2250 *outbuf = bp->gunzip_buf;
2252 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2253 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2254 bp->dev->name, bp->strm->msg);
2256 zlib_inflateEnd(bp->strm);
2258 if (rc == Z_STREAM_END)
2259 return 0;
2261 return rc;
2264 static void
2265 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2266 u32 rv2p_proc)
2268 int i;
2269 u32 val;
2272 for (i = 0; i < rv2p_code_len; i += 8) {
2273 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2274 rv2p_code++;
2275 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2276 rv2p_code++;
2278 if (rv2p_proc == RV2P_PROC1) {
2279 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2280 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2282 else {
2283 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2284 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2288 /* Reset the processor, un-stall is done later. */
2289 if (rv2p_proc == RV2P_PROC1) {
2290 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2292 else {
2293 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2297 static int
2298 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2300 u32 offset;
2301 u32 val;
2302 int rc;
2304 /* Halt the CPU. */
2305 val = REG_RD_IND(bp, cpu_reg->mode);
2306 val |= cpu_reg->mode_value_halt;
2307 REG_WR_IND(bp, cpu_reg->mode, val);
2308 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2310 /* Load the Text area. */
2311 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2312 if (fw->gz_text) {
2313 u32 text_len;
2314 void *text;
2316 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2317 &text_len);
2318 if (rc)
2319 return rc;
2321 fw->text = text;
2323 if (fw->gz_text) {
2324 int j;
2326 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2327 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2331 /* Load the Data area. */
2332 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2333 if (fw->data) {
2334 int j;
2336 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2337 REG_WR_IND(bp, offset, fw->data[j]);
2341 /* Load the SBSS area. */
2342 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2343 if (fw->sbss) {
2344 int j;
2346 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2347 REG_WR_IND(bp, offset, fw->sbss[j]);
2351 /* Load the BSS area. */
2352 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2353 if (fw->bss) {
2354 int j;
2356 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2357 REG_WR_IND(bp, offset, fw->bss[j]);
2361 /* Load the Read-Only area. */
2362 offset = cpu_reg->spad_base +
2363 (fw->rodata_addr - cpu_reg->mips_view_base);
2364 if (fw->rodata) {
2365 int j;
2367 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2368 REG_WR_IND(bp, offset, fw->rodata[j]);
2372 /* Clear the pre-fetch instruction. */
2373 REG_WR_IND(bp, cpu_reg->inst, 0);
2374 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2376 /* Start the CPU. */
2377 val = REG_RD_IND(bp, cpu_reg->mode);
2378 val &= ~cpu_reg->mode_value_halt;
2379 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2380 REG_WR_IND(bp, cpu_reg->mode, val);
2382 return 0;
2385 static int
2386 bnx2_init_cpus(struct bnx2 *bp)
2388 struct cpu_reg cpu_reg;
2389 struct fw_info *fw;
2390 int rc = 0;
2391 void *text;
2392 u32 text_len;
2394 if ((rc = bnx2_gunzip_init(bp)) != 0)
2395 return rc;
2397 /* Initialize the RV2P processor. */
2398 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2399 &text_len);
2400 if (rc)
2401 goto init_cpu_err;
2403 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2405 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2406 &text_len);
2407 if (rc)
2408 goto init_cpu_err;
2410 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2412 /* Initialize the RX Processor. */
2413 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2414 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2415 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2416 cpu_reg.state = BNX2_RXP_CPU_STATE;
2417 cpu_reg.state_value_clear = 0xffffff;
2418 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2419 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2420 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2421 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2422 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2423 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2424 cpu_reg.mips_view_base = 0x8000000;
2426 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2427 fw = &bnx2_rxp_fw_09;
2428 else
2429 fw = &bnx2_rxp_fw_06;
2431 rc = load_cpu_fw(bp, &cpu_reg, fw);
2432 if (rc)
2433 goto init_cpu_err;
2435 /* Initialize the TX Processor. */
2436 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2437 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2438 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2439 cpu_reg.state = BNX2_TXP_CPU_STATE;
2440 cpu_reg.state_value_clear = 0xffffff;
2441 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2442 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2443 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2444 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2445 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2446 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2447 cpu_reg.mips_view_base = 0x8000000;
2449 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2450 fw = &bnx2_txp_fw_09;
2451 else
2452 fw = &bnx2_txp_fw_06;
2454 rc = load_cpu_fw(bp, &cpu_reg, fw);
2455 if (rc)
2456 goto init_cpu_err;
2458 /* Initialize the TX Patch-up Processor. */
2459 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2460 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2461 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2462 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2463 cpu_reg.state_value_clear = 0xffffff;
2464 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2465 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2466 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2467 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2468 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2469 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2470 cpu_reg.mips_view_base = 0x8000000;
2472 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2473 fw = &bnx2_tpat_fw_09;
2474 else
2475 fw = &bnx2_tpat_fw_06;
2477 rc = load_cpu_fw(bp, &cpu_reg, fw);
2478 if (rc)
2479 goto init_cpu_err;
2481 /* Initialize the Completion Processor. */
2482 cpu_reg.mode = BNX2_COM_CPU_MODE;
2483 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2484 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2485 cpu_reg.state = BNX2_COM_CPU_STATE;
2486 cpu_reg.state_value_clear = 0xffffff;
2487 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2488 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2489 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2490 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2491 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2492 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2493 cpu_reg.mips_view_base = 0x8000000;
2495 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2496 fw = &bnx2_com_fw_09;
2497 else
2498 fw = &bnx2_com_fw_06;
2500 rc = load_cpu_fw(bp, &cpu_reg, fw);
2501 if (rc)
2502 goto init_cpu_err;
2504 /* Initialize the Command Processor. */
2505 cpu_reg.mode = BNX2_CP_CPU_MODE;
2506 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2507 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2508 cpu_reg.state = BNX2_CP_CPU_STATE;
2509 cpu_reg.state_value_clear = 0xffffff;
2510 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2511 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2512 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2513 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2514 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2515 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2516 cpu_reg.mips_view_base = 0x8000000;
2518 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2519 fw = &bnx2_cp_fw_09;
2521 rc = load_cpu_fw(bp, &cpu_reg, fw);
2522 if (rc)
2523 goto init_cpu_err;
2525 init_cpu_err:
2526 bnx2_gunzip_end(bp);
2527 return rc;
2530 static int
2531 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2533 u16 pmcsr;
2535 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2537 switch (state) {
2538 case PCI_D0: {
2539 u32 val;
2541 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2542 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2543 PCI_PM_CTRL_PME_STATUS);
2545 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2546 /* delay required during transition out of D3hot */
2547 msleep(20);
2549 val = REG_RD(bp, BNX2_EMAC_MODE);
2550 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2551 val &= ~BNX2_EMAC_MODE_MPKT;
2552 REG_WR(bp, BNX2_EMAC_MODE, val);
2554 val = REG_RD(bp, BNX2_RPM_CONFIG);
2555 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2556 REG_WR(bp, BNX2_RPM_CONFIG, val);
2557 break;
2559 case PCI_D3hot: {
2560 int i;
2561 u32 val, wol_msg;
2563 if (bp->wol) {
2564 u32 advertising;
2565 u8 autoneg;
2567 autoneg = bp->autoneg;
2568 advertising = bp->advertising;
2570 bp->autoneg = AUTONEG_SPEED;
2571 bp->advertising = ADVERTISED_10baseT_Half |
2572 ADVERTISED_10baseT_Full |
2573 ADVERTISED_100baseT_Half |
2574 ADVERTISED_100baseT_Full |
2575 ADVERTISED_Autoneg;
2577 bnx2_setup_copper_phy(bp);
2579 bp->autoneg = autoneg;
2580 bp->advertising = advertising;
2582 bnx2_set_mac_addr(bp);
2584 val = REG_RD(bp, BNX2_EMAC_MODE);
2586 /* Enable port mode. */
2587 val &= ~BNX2_EMAC_MODE_PORT;
2588 val |= BNX2_EMAC_MODE_PORT_MII |
2589 BNX2_EMAC_MODE_MPKT_RCVD |
2590 BNX2_EMAC_MODE_ACPI_RCVD |
2591 BNX2_EMAC_MODE_MPKT;
2593 REG_WR(bp, BNX2_EMAC_MODE, val);
2595 /* receive all multicast */
2596 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2597 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2598 0xffffffff);
2600 REG_WR(bp, BNX2_EMAC_RX_MODE,
2601 BNX2_EMAC_RX_MODE_SORT_MODE);
2603 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2604 BNX2_RPM_SORT_USER0_MC_EN;
2605 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2606 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2607 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2608 BNX2_RPM_SORT_USER0_ENA);
2610 /* Need to enable EMAC and RPM for WOL. */
2611 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2612 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2613 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2614 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2616 val = REG_RD(bp, BNX2_RPM_CONFIG);
2617 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2618 REG_WR(bp, BNX2_RPM_CONFIG, val);
2620 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2622 else {
2623 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2626 if (!(bp->flags & NO_WOL_FLAG))
2627 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2629 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2630 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2631 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2633 if (bp->wol)
2634 pmcsr |= 3;
2636 else {
2637 pmcsr |= 3;
2639 if (bp->wol) {
2640 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2642 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2643 pmcsr);
2645 /* No more memory access after this point until
2646 * device is brought back to D0.
2648 udelay(50);
2649 break;
2651 default:
2652 return -EINVAL;
2654 return 0;
2657 static int
2658 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2660 u32 val;
2661 int j;
2663 /* Request access to the flash interface. */
2664 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2665 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2666 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2667 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2668 break;
2670 udelay(5);
2673 if (j >= NVRAM_TIMEOUT_COUNT)
2674 return -EBUSY;
2676 return 0;
2679 static int
2680 bnx2_release_nvram_lock(struct bnx2 *bp)
2682 int j;
2683 u32 val;
2685 /* Relinquish nvram interface. */
2686 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2688 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2689 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2690 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2691 break;
2693 udelay(5);
2696 if (j >= NVRAM_TIMEOUT_COUNT)
2697 return -EBUSY;
2699 return 0;
2703 static int
2704 bnx2_enable_nvram_write(struct bnx2 *bp)
2706 u32 val;
2708 val = REG_RD(bp, BNX2_MISC_CFG);
2709 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2711 if (!bp->flash_info->buffered) {
2712 int j;
2714 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2715 REG_WR(bp, BNX2_NVM_COMMAND,
2716 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2718 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2719 udelay(5);
2721 val = REG_RD(bp, BNX2_NVM_COMMAND);
2722 if (val & BNX2_NVM_COMMAND_DONE)
2723 break;
2726 if (j >= NVRAM_TIMEOUT_COUNT)
2727 return -EBUSY;
2729 return 0;
2732 static void
2733 bnx2_disable_nvram_write(struct bnx2 *bp)
2735 u32 val;
2737 val = REG_RD(bp, BNX2_MISC_CFG);
2738 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2742 static void
2743 bnx2_enable_nvram_access(struct bnx2 *bp)
2745 u32 val;
2747 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2748 /* Enable both bits, even on read. */
2749 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2750 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2753 static void
2754 bnx2_disable_nvram_access(struct bnx2 *bp)
2756 u32 val;
2758 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2759 /* Disable both bits, even after read. */
2760 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2761 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2762 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2765 static int
2766 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2768 u32 cmd;
2769 int j;
2771 if (bp->flash_info->buffered)
2772 /* Buffered flash, no erase needed */
2773 return 0;
2775 /* Build an erase command */
2776 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2777 BNX2_NVM_COMMAND_DOIT;
2779 /* Need to clear DONE bit separately. */
2780 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2782 /* Address of the NVRAM to read from. */
2783 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2785 /* Issue an erase command. */
2786 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2788 /* Wait for completion. */
2789 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2790 u32 val;
2792 udelay(5);
2794 val = REG_RD(bp, BNX2_NVM_COMMAND);
2795 if (val & BNX2_NVM_COMMAND_DONE)
2796 break;
2799 if (j >= NVRAM_TIMEOUT_COUNT)
2800 return -EBUSY;
2802 return 0;
2805 static int
2806 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2808 u32 cmd;
2809 int j;
2811 /* Build the command word. */
2812 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2814 /* Calculate an offset of a buffered flash. */
2815 if (bp->flash_info->buffered) {
2816 offset = ((offset / bp->flash_info->page_size) <<
2817 bp->flash_info->page_bits) +
2818 (offset % bp->flash_info->page_size);
2821 /* Need to clear DONE bit separately. */
2822 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2824 /* Address of the NVRAM to read from. */
2825 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2827 /* Issue a read command. */
2828 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2830 /* Wait for completion. */
2831 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2832 u32 val;
2834 udelay(5);
2836 val = REG_RD(bp, BNX2_NVM_COMMAND);
2837 if (val & BNX2_NVM_COMMAND_DONE) {
2838 val = REG_RD(bp, BNX2_NVM_READ);
2840 val = be32_to_cpu(val);
2841 memcpy(ret_val, &val, 4);
2842 break;
2845 if (j >= NVRAM_TIMEOUT_COUNT)
2846 return -EBUSY;
2848 return 0;
2852 static int
2853 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2855 u32 cmd, val32;
2856 int j;
2858 /* Build the command word. */
2859 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2861 /* Calculate an offset of a buffered flash. */
2862 if (bp->flash_info->buffered) {
2863 offset = ((offset / bp->flash_info->page_size) <<
2864 bp->flash_info->page_bits) +
2865 (offset % bp->flash_info->page_size);
2868 /* Need to clear DONE bit separately. */
2869 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2871 memcpy(&val32, val, 4);
2872 val32 = cpu_to_be32(val32);
2874 /* Write the data. */
2875 REG_WR(bp, BNX2_NVM_WRITE, val32);
2877 /* Address of the NVRAM to write to. */
2878 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2880 /* Issue the write command. */
2881 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2883 /* Wait for completion. */
2884 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2885 udelay(5);
2887 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2888 break;
2890 if (j >= NVRAM_TIMEOUT_COUNT)
2891 return -EBUSY;
2893 return 0;
2896 static int
2897 bnx2_init_nvram(struct bnx2 *bp)
2899 u32 val;
2900 int j, entry_count, rc;
2901 struct flash_spec *flash;
2903 /* Determine the selected interface. */
2904 val = REG_RD(bp, BNX2_NVM_CFG1);
2906 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2908 rc = 0;
2909 if (val & 0x40000000) {
2911 /* Flash interface has been reconfigured */
2912 for (j = 0, flash = &flash_table[0]; j < entry_count;
2913 j++, flash++) {
2914 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2915 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2916 bp->flash_info = flash;
2917 break;
2921 else {
2922 u32 mask;
2923 /* Not yet been reconfigured */
2925 if (val & (1 << 23))
2926 mask = FLASH_BACKUP_STRAP_MASK;
2927 else
2928 mask = FLASH_STRAP_MASK;
2930 for (j = 0, flash = &flash_table[0]; j < entry_count;
2931 j++, flash++) {
2933 if ((val & mask) == (flash->strapping & mask)) {
2934 bp->flash_info = flash;
2936 /* Request access to the flash interface. */
2937 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2938 return rc;
2940 /* Enable access to flash interface */
2941 bnx2_enable_nvram_access(bp);
2943 /* Reconfigure the flash interface */
2944 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2945 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2946 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2947 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2949 /* Disable access to flash interface */
2950 bnx2_disable_nvram_access(bp);
2951 bnx2_release_nvram_lock(bp);
2953 break;
2956 } /* if (val & 0x40000000) */
2958 if (j == entry_count) {
2959 bp->flash_info = NULL;
2960 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2961 return -ENODEV;
2964 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2965 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2966 if (val)
2967 bp->flash_size = val;
2968 else
2969 bp->flash_size = bp->flash_info->total_size;
2971 return rc;
2974 static int
2975 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2976 int buf_size)
2978 int rc = 0;
2979 u32 cmd_flags, offset32, len32, extra;
2981 if (buf_size == 0)
2982 return 0;
2984 /* Request access to the flash interface. */
2985 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2986 return rc;
2988 /* Enable access to flash interface */
2989 bnx2_enable_nvram_access(bp);
2991 len32 = buf_size;
2992 offset32 = offset;
2993 extra = 0;
2995 cmd_flags = 0;
2997 if (offset32 & 3) {
2998 u8 buf[4];
2999 u32 pre_len;
3001 offset32 &= ~3;
3002 pre_len = 4 - (offset & 3);
3004 if (pre_len >= len32) {
3005 pre_len = len32;
3006 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3007 BNX2_NVM_COMMAND_LAST;
3009 else {
3010 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3013 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3015 if (rc)
3016 return rc;
3018 memcpy(ret_buf, buf + (offset & 3), pre_len);
3020 offset32 += 4;
3021 ret_buf += pre_len;
3022 len32 -= pre_len;
3024 if (len32 & 3) {
3025 extra = 4 - (len32 & 3);
3026 len32 = (len32 + 4) & ~3;
3029 if (len32 == 4) {
3030 u8 buf[4];
3032 if (cmd_flags)
3033 cmd_flags = BNX2_NVM_COMMAND_LAST;
3034 else
3035 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3036 BNX2_NVM_COMMAND_LAST;
3038 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3040 memcpy(ret_buf, buf, 4 - extra);
3042 else if (len32 > 0) {
3043 u8 buf[4];
3045 /* Read the first word. */
3046 if (cmd_flags)
3047 cmd_flags = 0;
3048 else
3049 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3051 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3053 /* Advance to the next dword. */
3054 offset32 += 4;
3055 ret_buf += 4;
3056 len32 -= 4;
3058 while (len32 > 4 && rc == 0) {
3059 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3061 /* Advance to the next dword. */
3062 offset32 += 4;
3063 ret_buf += 4;
3064 len32 -= 4;
3067 if (rc)
3068 return rc;
3070 cmd_flags = BNX2_NVM_COMMAND_LAST;
3071 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3073 memcpy(ret_buf, buf, 4 - extra);
3076 /* Disable access to flash interface */
3077 bnx2_disable_nvram_access(bp);
3079 bnx2_release_nvram_lock(bp);
3081 return rc;
3084 static int
3085 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3086 int buf_size)
3088 u32 written, offset32, len32;
3089 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3090 int rc = 0;
3091 int align_start, align_end;
3093 buf = data_buf;
3094 offset32 = offset;
3095 len32 = buf_size;
3096 align_start = align_end = 0;
3098 if ((align_start = (offset32 & 3))) {
3099 offset32 &= ~3;
3100 len32 += align_start;
3101 if (len32 < 4)
3102 len32 = 4;
3103 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3104 return rc;
3107 if (len32 & 3) {
3108 align_end = 4 - (len32 & 3);
3109 len32 += align_end;
3110 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3111 return rc;
3114 if (align_start || align_end) {
3115 align_buf = kmalloc(len32, GFP_KERNEL);
3116 if (align_buf == NULL)
3117 return -ENOMEM;
3118 if (align_start) {
3119 memcpy(align_buf, start, 4);
3121 if (align_end) {
3122 memcpy(align_buf + len32 - 4, end, 4);
3124 memcpy(align_buf + align_start, data_buf, buf_size);
3125 buf = align_buf;
3128 if (bp->flash_info->buffered == 0) {
3129 flash_buffer = kmalloc(264, GFP_KERNEL);
3130 if (flash_buffer == NULL) {
3131 rc = -ENOMEM;
3132 goto nvram_write_end;
3136 written = 0;
3137 while ((written < len32) && (rc == 0)) {
3138 u32 page_start, page_end, data_start, data_end;
3139 u32 addr, cmd_flags;
3140 int i;
3142 /* Find the page_start addr */
3143 page_start = offset32 + written;
3144 page_start -= (page_start % bp->flash_info->page_size);
3145 /* Find the page_end addr */
3146 page_end = page_start + bp->flash_info->page_size;
3147 /* Find the data_start addr */
3148 data_start = (written == 0) ? offset32 : page_start;
3149 /* Find the data_end addr */
3150 data_end = (page_end > offset32 + len32) ?
3151 (offset32 + len32) : page_end;
3153 /* Request access to the flash interface. */
3154 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3155 goto nvram_write_end;
3157 /* Enable access to flash interface */
3158 bnx2_enable_nvram_access(bp);
3160 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3161 if (bp->flash_info->buffered == 0) {
3162 int j;
3164 /* Read the whole page into the buffer
3165 * (non-buffer flash only) */
3166 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3167 if (j == (bp->flash_info->page_size - 4)) {
3168 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3170 rc = bnx2_nvram_read_dword(bp,
3171 page_start + j,
3172 &flash_buffer[j],
3173 cmd_flags);
3175 if (rc)
3176 goto nvram_write_end;
3178 cmd_flags = 0;
3182 /* Enable writes to flash interface (unlock write-protect) */
3183 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3184 goto nvram_write_end;
3186 /* Loop to write back the buffer data from page_start to
3187 * data_start */
3188 i = 0;
3189 if (bp->flash_info->buffered == 0) {
3190 /* Erase the page */
3191 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3192 goto nvram_write_end;
3194 /* Re-enable the write again for the actual write */
3195 bnx2_enable_nvram_write(bp);
3197 for (addr = page_start; addr < data_start;
3198 addr += 4, i += 4) {
3200 rc = bnx2_nvram_write_dword(bp, addr,
3201 &flash_buffer[i], cmd_flags);
3203 if (rc != 0)
3204 goto nvram_write_end;
3206 cmd_flags = 0;
3210 /* Loop to write the new data from data_start to data_end */
3211 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3212 if ((addr == page_end - 4) ||
3213 ((bp->flash_info->buffered) &&
3214 (addr == data_end - 4))) {
3216 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3218 rc = bnx2_nvram_write_dword(bp, addr, buf,
3219 cmd_flags);
3221 if (rc != 0)
3222 goto nvram_write_end;
3224 cmd_flags = 0;
3225 buf += 4;
3228 /* Loop to write back the buffer data from data_end
3229 * to page_end */
3230 if (bp->flash_info->buffered == 0) {
3231 for (addr = data_end; addr < page_end;
3232 addr += 4, i += 4) {
3234 if (addr == page_end-4) {
3235 cmd_flags = BNX2_NVM_COMMAND_LAST;
3237 rc = bnx2_nvram_write_dword(bp, addr,
3238 &flash_buffer[i], cmd_flags);
3240 if (rc != 0)
3241 goto nvram_write_end;
3243 cmd_flags = 0;
3247 /* Disable writes to flash interface (lock write-protect) */
3248 bnx2_disable_nvram_write(bp);
3250 /* Disable access to flash interface */
3251 bnx2_disable_nvram_access(bp);
3252 bnx2_release_nvram_lock(bp);
3254 /* Increment written */
3255 written += data_end - data_start;
3258 nvram_write_end:
3259 kfree(flash_buffer);
3260 kfree(align_buf);
3261 return rc;
3264 static int
3265 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3267 u32 val;
3268 int i, rc = 0;
3270 /* Wait for the current PCI transaction to complete before
3271 * issuing a reset. */
3272 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3273 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3274 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3275 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3276 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3277 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3278 udelay(5);
3280 /* Wait for the firmware to tell us it is ok to issue a reset. */
3281 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3283 /* Deposit a driver reset signature so the firmware knows that
3284 * this is a soft reset. */
3285 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3286 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3288 /* Do a dummy read to force the chip to complete all current transaction
3289 * before we issue a reset. */
3290 val = REG_RD(bp, BNX2_MISC_ID);
3292 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3293 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3294 REG_RD(bp, BNX2_MISC_COMMAND);
3295 udelay(5);
3297 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3298 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3300 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3302 } else {
3303 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3304 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3305 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3307 /* Chip reset. */
3308 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3310 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3311 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3312 current->state = TASK_UNINTERRUPTIBLE;
3313 schedule_timeout(HZ / 50);
3316 /* Reset takes approximate 30 usec */
3317 for (i = 0; i < 10; i++) {
3318 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3319 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3320 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3321 break;
3322 udelay(10);
3325 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3326 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3327 printk(KERN_ERR PFX "Chip reset did not complete\n");
3328 return -EBUSY;
3332 /* Make sure byte swapping is properly configured. */
3333 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3334 if (val != 0x01020304) {
3335 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3336 return -ENODEV;
3339 /* Wait for the firmware to finish its initialization. */
3340 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3341 if (rc)
3342 return rc;
3344 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3345 /* Adjust the voltage regular to two steps lower. The default
3346 * of this register is 0x0000000e. */
3347 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3349 /* Remove bad rbuf memory from the free pool. */
3350 rc = bnx2_alloc_bad_rbuf(bp);
3353 return rc;
3356 static int
3357 bnx2_init_chip(struct bnx2 *bp)
3359 u32 val;
3360 int rc;
3362 /* Make sure the interrupt is not active. */
3363 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3365 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3366 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3367 #ifdef __BIG_ENDIAN
3368 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3369 #endif
3370 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3371 DMA_READ_CHANS << 12 |
3372 DMA_WRITE_CHANS << 16;
3374 val |= (0x2 << 20) | (1 << 11);
3376 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3377 val |= (1 << 23);
3379 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3380 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3381 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3383 REG_WR(bp, BNX2_DMA_CONFIG, val);
3385 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3386 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3387 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3388 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3391 if (bp->flags & PCIX_FLAG) {
3392 u16 val16;
3394 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3395 &val16);
3396 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3397 val16 & ~PCI_X_CMD_ERO);
3400 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3401 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3402 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3403 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3405 /* Initialize context mapping and zero out the quick contexts. The
3406 * context block must have already been enabled. */
3407 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3408 bnx2_init_5709_context(bp);
3409 else
3410 bnx2_init_context(bp);
3412 if ((rc = bnx2_init_cpus(bp)) != 0)
3413 return rc;
3415 bnx2_init_nvram(bp);
3417 bnx2_set_mac_addr(bp);
3419 val = REG_RD(bp, BNX2_MQ_CONFIG);
3420 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3421 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3422 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3423 val |= BNX2_MQ_CONFIG_HALT_DIS;
3425 REG_WR(bp, BNX2_MQ_CONFIG, val);
3427 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3428 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3429 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3431 val = (BCM_PAGE_BITS - 8) << 24;
3432 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3434 /* Configure page size. */
3435 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3436 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3437 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3438 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3440 val = bp->mac_addr[0] +
3441 (bp->mac_addr[1] << 8) +
3442 (bp->mac_addr[2] << 16) +
3443 bp->mac_addr[3] +
3444 (bp->mac_addr[4] << 8) +
3445 (bp->mac_addr[5] << 16);
3446 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3448 /* Program the MTU. Also include 4 bytes for CRC32. */
3449 val = bp->dev->mtu + ETH_HLEN + 4;
3450 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3451 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3452 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3454 bp->last_status_idx = 0;
3455 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3457 /* Set up how to generate a link change interrupt. */
3458 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3460 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3461 (u64) bp->status_blk_mapping & 0xffffffff);
3462 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3464 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3465 (u64) bp->stats_blk_mapping & 0xffffffff);
3466 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3467 (u64) bp->stats_blk_mapping >> 32);
3469 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3470 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3472 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3473 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3475 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3476 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3478 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3480 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3482 REG_WR(bp, BNX2_HC_COM_TICKS,
3483 (bp->com_ticks_int << 16) | bp->com_ticks);
3485 REG_WR(bp, BNX2_HC_CMD_TICKS,
3486 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3488 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3489 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3491 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3492 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3493 else {
3494 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3495 BNX2_HC_CONFIG_TX_TMR_MODE |
3496 BNX2_HC_CONFIG_COLLECT_STATS);
3499 /* Clear internal stats counters. */
3500 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3502 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3504 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3505 BNX2_PORT_FEATURE_ASF_ENABLED)
3506 bp->flags |= ASF_ENABLE_FLAG;
3508 /* Initialize the receive filter. */
3509 bnx2_set_rx_mode(bp->dev);
3511 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3514 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3515 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3517 udelay(20);
3519 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3521 return rc;
3524 static void
3525 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3527 u32 val, offset0, offset1, offset2, offset3;
3529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3530 offset0 = BNX2_L2CTX_TYPE_XI;
3531 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3532 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3533 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3534 } else {
3535 offset0 = BNX2_L2CTX_TYPE;
3536 offset1 = BNX2_L2CTX_CMD_TYPE;
3537 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3538 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3540 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3541 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3543 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3544 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3546 val = (u64) bp->tx_desc_mapping >> 32;
3547 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3549 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3550 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3553 static void
3554 bnx2_init_tx_ring(struct bnx2 *bp)
3556 struct tx_bd *txbd;
3557 u32 cid;
3559 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3561 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3563 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3564 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3566 bp->tx_prod = 0;
3567 bp->tx_cons = 0;
3568 bp->hw_tx_cons = 0;
3569 bp->tx_prod_bseq = 0;
3571 cid = TX_CID;
3572 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3573 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
3575 bnx2_init_tx_context(bp, cid);
3578 static void
3579 bnx2_init_rx_ring(struct bnx2 *bp)
3581 struct rx_bd *rxbd;
3582 int i;
3583 u16 prod, ring_prod;
3584 u32 val;
3586 /* 8 for CRC and VLAN */
3587 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3588 /* hw alignment */
3589 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
3591 ring_prod = prod = bp->rx_prod = 0;
3592 bp->rx_cons = 0;
3593 bp->hw_rx_cons = 0;
3594 bp->rx_prod_bseq = 0;
3596 for (i = 0; i < bp->rx_max_ring; i++) {
3597 int j;
3599 rxbd = &bp->rx_desc_ring[i][0];
3600 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3601 rxbd->rx_bd_len = bp->rx_buf_use_size;
3602 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3604 if (i == (bp->rx_max_ring - 1))
3605 j = 0;
3606 else
3607 j = i + 1;
3608 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3609 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3610 0xffffffff;
3613 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3614 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3615 val |= 0x02 << 8;
3616 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3618 val = (u64) bp->rx_desc_mapping[0] >> 32;
3619 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3621 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3622 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3624 for (i = 0; i < bp->rx_ring_size; i++) {
3625 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3626 break;
3628 prod = NEXT_RX_BD(prod);
3629 ring_prod = RX_RING_IDX(prod);
3631 bp->rx_prod = prod;
3633 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3635 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3638 static void
3639 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3641 u32 num_rings, max;
3643 bp->rx_ring_size = size;
3644 num_rings = 1;
3645 while (size > MAX_RX_DESC_CNT) {
3646 size -= MAX_RX_DESC_CNT;
3647 num_rings++;
3649 /* round to next power of 2 */
3650 max = MAX_RX_RINGS;
3651 while ((max & num_rings) == 0)
3652 max >>= 1;
3654 if (num_rings != max)
3655 max <<= 1;
3657 bp->rx_max_ring = max;
3658 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3661 static void
3662 bnx2_free_tx_skbs(struct bnx2 *bp)
3664 int i;
3666 if (bp->tx_buf_ring == NULL)
3667 return;
3669 for (i = 0; i < TX_DESC_CNT; ) {
3670 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3671 struct sk_buff *skb = tx_buf->skb;
3672 int j, last;
3674 if (skb == NULL) {
3675 i++;
3676 continue;
3679 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3680 skb_headlen(skb), PCI_DMA_TODEVICE);
3682 tx_buf->skb = NULL;
3684 last = skb_shinfo(skb)->nr_frags;
3685 for (j = 0; j < last; j++) {
3686 tx_buf = &bp->tx_buf_ring[i + j + 1];
3687 pci_unmap_page(bp->pdev,
3688 pci_unmap_addr(tx_buf, mapping),
3689 skb_shinfo(skb)->frags[j].size,
3690 PCI_DMA_TODEVICE);
3692 dev_kfree_skb(skb);
3693 i += j + 1;
3698 static void
3699 bnx2_free_rx_skbs(struct bnx2 *bp)
3701 int i;
3703 if (bp->rx_buf_ring == NULL)
3704 return;
3706 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3707 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3708 struct sk_buff *skb = rx_buf->skb;
3710 if (skb == NULL)
3711 continue;
3713 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3714 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3716 rx_buf->skb = NULL;
3718 dev_kfree_skb(skb);
3722 static void
3723 bnx2_free_skbs(struct bnx2 *bp)
3725 bnx2_free_tx_skbs(bp);
3726 bnx2_free_rx_skbs(bp);
3729 static int
3730 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3732 int rc;
3734 rc = bnx2_reset_chip(bp, reset_code);
3735 bnx2_free_skbs(bp);
3736 if (rc)
3737 return rc;
3739 if ((rc = bnx2_init_chip(bp)) != 0)
3740 return rc;
3742 bnx2_init_tx_ring(bp);
3743 bnx2_init_rx_ring(bp);
3744 return 0;
3747 static int
3748 bnx2_init_nic(struct bnx2 *bp)
3750 int rc;
3752 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3753 return rc;
3755 spin_lock_bh(&bp->phy_lock);
3756 bnx2_init_phy(bp);
3757 spin_unlock_bh(&bp->phy_lock);
3758 bnx2_set_link(bp);
3759 return 0;
3762 static int
3763 bnx2_test_registers(struct bnx2 *bp)
3765 int ret;
3766 int i, is_5709;
3767 static const struct {
3768 u16 offset;
3769 u16 flags;
3770 #define BNX2_FL_NOT_5709 1
3771 u32 rw_mask;
3772 u32 ro_mask;
3773 } reg_tbl[] = {
3774 { 0x006c, 0, 0x00000000, 0x0000003f },
3775 { 0x0090, 0, 0xffffffff, 0x00000000 },
3776 { 0x0094, 0, 0x00000000, 0x00000000 },
3778 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
3779 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3780 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3781 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
3782 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
3783 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
3784 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
3785 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3786 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3788 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3789 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3790 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
3791 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
3792 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
3793 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
3795 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
3796 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
3797 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
3799 { 0x1000, 0, 0x00000000, 0x00000001 },
3800 { 0x1004, 0, 0x00000000, 0x000f0001 },
3802 { 0x1408, 0, 0x01c00800, 0x00000000 },
3803 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3804 { 0x14a8, 0, 0x00000000, 0x000001ff },
3805 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3806 { 0x14b0, 0, 0x00000002, 0x00000001 },
3807 { 0x14b8, 0, 0x00000000, 0x00000000 },
3808 { 0x14c0, 0, 0x00000000, 0x00000009 },
3809 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3810 { 0x14cc, 0, 0x00000000, 0x00000001 },
3811 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3813 { 0x1800, 0, 0x00000000, 0x00000001 },
3814 { 0x1804, 0, 0x00000000, 0x00000003 },
3816 { 0x2800, 0, 0x00000000, 0x00000001 },
3817 { 0x2804, 0, 0x00000000, 0x00003f01 },
3818 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3819 { 0x2810, 0, 0xffff0000, 0x00000000 },
3820 { 0x2814, 0, 0xffff0000, 0x00000000 },
3821 { 0x2818, 0, 0xffff0000, 0x00000000 },
3822 { 0x281c, 0, 0xffff0000, 0x00000000 },
3823 { 0x2834, 0, 0xffffffff, 0x00000000 },
3824 { 0x2840, 0, 0x00000000, 0xffffffff },
3825 { 0x2844, 0, 0x00000000, 0xffffffff },
3826 { 0x2848, 0, 0xffffffff, 0x00000000 },
3827 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3829 { 0x2c00, 0, 0x00000000, 0x00000011 },
3830 { 0x2c04, 0, 0x00000000, 0x00030007 },
3832 { 0x3c00, 0, 0x00000000, 0x00000001 },
3833 { 0x3c04, 0, 0x00000000, 0x00070000 },
3834 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3835 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3836 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3837 { 0x3c14, 0, 0x00000000, 0xffffffff },
3838 { 0x3c18, 0, 0x00000000, 0xffffffff },
3839 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3840 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3842 { 0x5004, 0, 0x00000000, 0x0000007f },
3843 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3845 { 0x5c00, 0, 0x00000000, 0x00000001 },
3846 { 0x5c04, 0, 0x00000000, 0x0003000f },
3847 { 0x5c08, 0, 0x00000003, 0x00000000 },
3848 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3849 { 0x5c10, 0, 0x00000000, 0xffffffff },
3850 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3851 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3852 { 0x5c88, 0, 0x00000000, 0x00077373 },
3853 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3855 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3856 { 0x680c, 0, 0xffffffff, 0x00000000 },
3857 { 0x6810, 0, 0xffffffff, 0x00000000 },
3858 { 0x6814, 0, 0xffffffff, 0x00000000 },
3859 { 0x6818, 0, 0xffffffff, 0x00000000 },
3860 { 0x681c, 0, 0xffffffff, 0x00000000 },
3861 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3862 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3863 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3864 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3865 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3866 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3867 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3868 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3869 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3870 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3871 { 0x684c, 0, 0xffffffff, 0x00000000 },
3872 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3873 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3874 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3875 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3876 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3877 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3879 { 0xffff, 0, 0x00000000, 0x00000000 },
3882 ret = 0;
3883 is_5709 = 0;
3884 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3885 is_5709 = 1;
3887 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3888 u32 offset, rw_mask, ro_mask, save_val, val;
3889 u16 flags = reg_tbl[i].flags;
3891 if (is_5709 && (flags & BNX2_FL_NOT_5709))
3892 continue;
3894 offset = (u32) reg_tbl[i].offset;
3895 rw_mask = reg_tbl[i].rw_mask;
3896 ro_mask = reg_tbl[i].ro_mask;
3898 save_val = readl(bp->regview + offset);
3900 writel(0, bp->regview + offset);
3902 val = readl(bp->regview + offset);
3903 if ((val & rw_mask) != 0) {
3904 goto reg_test_err;
3907 if ((val & ro_mask) != (save_val & ro_mask)) {
3908 goto reg_test_err;
3911 writel(0xffffffff, bp->regview + offset);
3913 val = readl(bp->regview + offset);
3914 if ((val & rw_mask) != rw_mask) {
3915 goto reg_test_err;
3918 if ((val & ro_mask) != (save_val & ro_mask)) {
3919 goto reg_test_err;
3922 writel(save_val, bp->regview + offset);
3923 continue;
3925 reg_test_err:
3926 writel(save_val, bp->regview + offset);
3927 ret = -ENODEV;
3928 break;
3930 return ret;
3933 static int
3934 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3936 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3937 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3938 int i;
3940 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3941 u32 offset;
3943 for (offset = 0; offset < size; offset += 4) {
3945 REG_WR_IND(bp, start + offset, test_pattern[i]);
3947 if (REG_RD_IND(bp, start + offset) !=
3948 test_pattern[i]) {
3949 return -ENODEV;
3953 return 0;
3956 static int
3957 bnx2_test_memory(struct bnx2 *bp)
3959 int ret = 0;
3960 int i;
3961 static struct mem_entry {
3962 u32 offset;
3963 u32 len;
3964 } mem_tbl_5706[] = {
3965 { 0x60000, 0x4000 },
3966 { 0xa0000, 0x3000 },
3967 { 0xe0000, 0x4000 },
3968 { 0x120000, 0x4000 },
3969 { 0x1a0000, 0x4000 },
3970 { 0x160000, 0x4000 },
3971 { 0xffffffff, 0 },
3973 mem_tbl_5709[] = {
3974 { 0x60000, 0x4000 },
3975 { 0xa0000, 0x3000 },
3976 { 0xe0000, 0x4000 },
3977 { 0x120000, 0x4000 },
3978 { 0x1a0000, 0x4000 },
3979 { 0xffffffff, 0 },
3981 struct mem_entry *mem_tbl;
3983 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3984 mem_tbl = mem_tbl_5709;
3985 else
3986 mem_tbl = mem_tbl_5706;
3988 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3989 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3990 mem_tbl[i].len)) != 0) {
3991 return ret;
3995 return ret;
3998 #define BNX2_MAC_LOOPBACK 0
3999 #define BNX2_PHY_LOOPBACK 1
4001 static int
4002 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4004 unsigned int pkt_size, num_pkts, i;
4005 struct sk_buff *skb, *rx_skb;
4006 unsigned char *packet;
4007 u16 rx_start_idx, rx_idx;
4008 dma_addr_t map;
4009 struct tx_bd *txbd;
4010 struct sw_bd *rx_buf;
4011 struct l2_fhdr *rx_hdr;
4012 int ret = -ENODEV;
4014 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4015 bp->loopback = MAC_LOOPBACK;
4016 bnx2_set_mac_loopback(bp);
4018 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4019 bp->loopback = PHY_LOOPBACK;
4020 bnx2_set_phy_loopback(bp);
4022 else
4023 return -EINVAL;
4025 pkt_size = 1514;
4026 skb = netdev_alloc_skb(bp->dev, pkt_size);
4027 if (!skb)
4028 return -ENOMEM;
4029 packet = skb_put(skb, pkt_size);
4030 memcpy(packet, bp->dev->dev_addr, 6);
4031 memset(packet + 6, 0x0, 8);
4032 for (i = 14; i < pkt_size; i++)
4033 packet[i] = (unsigned char) (i & 0xff);
4035 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4036 PCI_DMA_TODEVICE);
4038 REG_WR(bp, BNX2_HC_COMMAND,
4039 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4041 REG_RD(bp, BNX2_HC_COMMAND);
4043 udelay(5);
4044 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4046 num_pkts = 0;
4048 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4050 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4051 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4052 txbd->tx_bd_mss_nbytes = pkt_size;
4053 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4055 num_pkts++;
4056 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4057 bp->tx_prod_bseq += pkt_size;
4059 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4060 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4062 udelay(100);
4064 REG_WR(bp, BNX2_HC_COMMAND,
4065 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4067 REG_RD(bp, BNX2_HC_COMMAND);
4069 udelay(5);
4071 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4072 dev_kfree_skb(skb);
4074 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4075 goto loopback_test_done;
4078 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4079 if (rx_idx != rx_start_idx + num_pkts) {
4080 goto loopback_test_done;
4083 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4084 rx_skb = rx_buf->skb;
4086 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4087 skb_reserve(rx_skb, bp->rx_offset);
4089 pci_dma_sync_single_for_cpu(bp->pdev,
4090 pci_unmap_addr(rx_buf, mapping),
4091 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4093 if (rx_hdr->l2_fhdr_status &
4094 (L2_FHDR_ERRORS_BAD_CRC |
4095 L2_FHDR_ERRORS_PHY_DECODE |
4096 L2_FHDR_ERRORS_ALIGNMENT |
4097 L2_FHDR_ERRORS_TOO_SHORT |
4098 L2_FHDR_ERRORS_GIANT_FRAME)) {
4100 goto loopback_test_done;
4103 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4104 goto loopback_test_done;
4107 for (i = 14; i < pkt_size; i++) {
4108 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4109 goto loopback_test_done;
4113 ret = 0;
4115 loopback_test_done:
4116 bp->loopback = 0;
4117 return ret;
4120 #define BNX2_MAC_LOOPBACK_FAILED 1
4121 #define BNX2_PHY_LOOPBACK_FAILED 2
4122 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4123 BNX2_PHY_LOOPBACK_FAILED)
4125 static int
4126 bnx2_test_loopback(struct bnx2 *bp)
4128 int rc = 0;
4130 if (!netif_running(bp->dev))
4131 return BNX2_LOOPBACK_FAILED;
4133 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4134 spin_lock_bh(&bp->phy_lock);
4135 bnx2_init_phy(bp);
4136 spin_unlock_bh(&bp->phy_lock);
4137 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4138 rc |= BNX2_MAC_LOOPBACK_FAILED;
4139 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4140 rc |= BNX2_PHY_LOOPBACK_FAILED;
4141 return rc;
4144 #define NVRAM_SIZE 0x200
4145 #define CRC32_RESIDUAL 0xdebb20e3
4147 static int
4148 bnx2_test_nvram(struct bnx2 *bp)
4150 u32 buf[NVRAM_SIZE / 4];
4151 u8 *data = (u8 *) buf;
4152 int rc = 0;
4153 u32 magic, csum;
4155 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4156 goto test_nvram_done;
4158 magic = be32_to_cpu(buf[0]);
4159 if (magic != 0x669955aa) {
4160 rc = -ENODEV;
4161 goto test_nvram_done;
4164 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4165 goto test_nvram_done;
4167 csum = ether_crc_le(0x100, data);
4168 if (csum != CRC32_RESIDUAL) {
4169 rc = -ENODEV;
4170 goto test_nvram_done;
4173 csum = ether_crc_le(0x100, data + 0x100);
4174 if (csum != CRC32_RESIDUAL) {
4175 rc = -ENODEV;
4178 test_nvram_done:
4179 return rc;
4182 static int
4183 bnx2_test_link(struct bnx2 *bp)
4185 u32 bmsr;
4187 spin_lock_bh(&bp->phy_lock);
4188 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4189 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4190 spin_unlock_bh(&bp->phy_lock);
4192 if (bmsr & BMSR_LSTATUS) {
4193 return 0;
4195 return -ENODEV;
4198 static int
4199 bnx2_test_intr(struct bnx2 *bp)
4201 int i;
4202 u16 status_idx;
4204 if (!netif_running(bp->dev))
4205 return -ENODEV;
4207 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4209 /* This register is not touched during run-time. */
4210 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4211 REG_RD(bp, BNX2_HC_COMMAND);
4213 for (i = 0; i < 10; i++) {
4214 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4215 status_idx) {
4217 break;
4220 msleep_interruptible(10);
4222 if (i < 10)
4223 return 0;
4225 return -ENODEV;
4228 static void
4229 bnx2_5706_serdes_timer(struct bnx2 *bp)
4231 spin_lock(&bp->phy_lock);
4232 if (bp->serdes_an_pending)
4233 bp->serdes_an_pending--;
4234 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4235 u32 bmcr;
4237 bp->current_interval = bp->timer_interval;
4239 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4241 if (bmcr & BMCR_ANENABLE) {
4242 u32 phy1, phy2;
4244 bnx2_write_phy(bp, 0x1c, 0x7c00);
4245 bnx2_read_phy(bp, 0x1c, &phy1);
4247 bnx2_write_phy(bp, 0x17, 0x0f01);
4248 bnx2_read_phy(bp, 0x15, &phy2);
4249 bnx2_write_phy(bp, 0x17, 0x0f01);
4250 bnx2_read_phy(bp, 0x15, &phy2);
4252 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4253 !(phy2 & 0x20)) { /* no CONFIG */
4255 bmcr &= ~BMCR_ANENABLE;
4256 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4257 bnx2_write_phy(bp, MII_BMCR, bmcr);
4258 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4262 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4263 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4264 u32 phy2;
4266 bnx2_write_phy(bp, 0x17, 0x0f01);
4267 bnx2_read_phy(bp, 0x15, &phy2);
4268 if (phy2 & 0x20) {
4269 u32 bmcr;
4271 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4272 bmcr |= BMCR_ANENABLE;
4273 bnx2_write_phy(bp, MII_BMCR, bmcr);
4275 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4277 } else
4278 bp->current_interval = bp->timer_interval;
4280 spin_unlock(&bp->phy_lock);
4283 static void
4284 bnx2_5708_serdes_timer(struct bnx2 *bp)
4286 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4287 bp->serdes_an_pending = 0;
4288 return;
4291 spin_lock(&bp->phy_lock);
4292 if (bp->serdes_an_pending)
4293 bp->serdes_an_pending--;
4294 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4295 u32 bmcr;
4297 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4299 if (bmcr & BMCR_ANENABLE) {
4300 bmcr &= ~BMCR_ANENABLE;
4301 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4302 bnx2_write_phy(bp, MII_BMCR, bmcr);
4303 bp->current_interval = SERDES_FORCED_TIMEOUT;
4304 } else {
4305 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4306 bmcr |= BMCR_ANENABLE;
4307 bnx2_write_phy(bp, MII_BMCR, bmcr);
4308 bp->serdes_an_pending = 2;
4309 bp->current_interval = bp->timer_interval;
4312 } else
4313 bp->current_interval = bp->timer_interval;
4315 spin_unlock(&bp->phy_lock);
4318 static void
4319 bnx2_timer(unsigned long data)
4321 struct bnx2 *bp = (struct bnx2 *) data;
4322 u32 msg;
4324 if (!netif_running(bp->dev))
4325 return;
4327 if (atomic_read(&bp->intr_sem) != 0)
4328 goto bnx2_restart_timer;
4330 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4331 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4333 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4335 if (bp->phy_flags & PHY_SERDES_FLAG) {
4336 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4337 bnx2_5706_serdes_timer(bp);
4338 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4339 bnx2_5708_serdes_timer(bp);
4342 bnx2_restart_timer:
4343 mod_timer(&bp->timer, jiffies + bp->current_interval);
4346 /* Called with rtnl_lock */
4347 static int
4348 bnx2_open(struct net_device *dev)
4350 struct bnx2 *bp = netdev_priv(dev);
4351 int rc;
4353 netif_carrier_off(dev);
4355 bnx2_set_power_state(bp, PCI_D0);
4356 bnx2_disable_int(bp);
4358 rc = bnx2_alloc_mem(bp);
4359 if (rc)
4360 return rc;
4362 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4363 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4364 !disable_msi) {
4366 if (pci_enable_msi(bp->pdev) == 0) {
4367 bp->flags |= USING_MSI_FLAG;
4368 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4369 dev);
4371 else {
4372 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4373 IRQF_SHARED, dev->name, dev);
4376 else {
4377 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4378 dev->name, dev);
4380 if (rc) {
4381 bnx2_free_mem(bp);
4382 return rc;
4385 rc = bnx2_init_nic(bp);
4387 if (rc) {
4388 free_irq(bp->pdev->irq, dev);
4389 if (bp->flags & USING_MSI_FLAG) {
4390 pci_disable_msi(bp->pdev);
4391 bp->flags &= ~USING_MSI_FLAG;
4393 bnx2_free_skbs(bp);
4394 bnx2_free_mem(bp);
4395 return rc;
4398 mod_timer(&bp->timer, jiffies + bp->current_interval);
4400 atomic_set(&bp->intr_sem, 0);
4402 bnx2_enable_int(bp);
4404 if (bp->flags & USING_MSI_FLAG) {
4405 /* Test MSI to make sure it is working
4406 * If MSI test fails, go back to INTx mode
4408 if (bnx2_test_intr(bp) != 0) {
4409 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4410 " using MSI, switching to INTx mode. Please"
4411 " report this failure to the PCI maintainer"
4412 " and include system chipset information.\n",
4413 bp->dev->name);
4415 bnx2_disable_int(bp);
4416 free_irq(bp->pdev->irq, dev);
4417 pci_disable_msi(bp->pdev);
4418 bp->flags &= ~USING_MSI_FLAG;
4420 rc = bnx2_init_nic(bp);
4422 if (!rc) {
4423 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4424 IRQF_SHARED, dev->name, dev);
4426 if (rc) {
4427 bnx2_free_skbs(bp);
4428 bnx2_free_mem(bp);
4429 del_timer_sync(&bp->timer);
4430 return rc;
4432 bnx2_enable_int(bp);
4435 if (bp->flags & USING_MSI_FLAG) {
4436 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4439 netif_start_queue(dev);
4441 return 0;
4444 static void
4445 bnx2_reset_task(struct work_struct *work)
4447 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
4449 if (!netif_running(bp->dev))
4450 return;
4452 bp->in_reset_task = 1;
4453 bnx2_netif_stop(bp);
4455 bnx2_init_nic(bp);
4457 atomic_set(&bp->intr_sem, 1);
4458 bnx2_netif_start(bp);
4459 bp->in_reset_task = 0;
4462 static void
4463 bnx2_tx_timeout(struct net_device *dev)
4465 struct bnx2 *bp = netdev_priv(dev);
4467 /* This allows the netif to be shutdown gracefully before resetting */
4468 schedule_work(&bp->reset_task);
4471 #ifdef BCM_VLAN
4472 /* Called with rtnl_lock */
4473 static void
4474 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4476 struct bnx2 *bp = netdev_priv(dev);
4478 bnx2_netif_stop(bp);
4480 bp->vlgrp = vlgrp;
4481 bnx2_set_rx_mode(dev);
4483 bnx2_netif_start(bp);
4486 /* Called with rtnl_lock */
4487 static void
4488 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4490 struct bnx2 *bp = netdev_priv(dev);
4492 bnx2_netif_stop(bp);
4493 vlan_group_set_device(bp->vlgrp, vid, NULL);
4494 bnx2_set_rx_mode(dev);
4496 bnx2_netif_start(bp);
4498 #endif
4500 /* Called with netif_tx_lock.
4501 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4502 * netif_wake_queue().
4504 static int
4505 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4507 struct bnx2 *bp = netdev_priv(dev);
4508 dma_addr_t mapping;
4509 struct tx_bd *txbd;
4510 struct sw_bd *tx_buf;
4511 u32 len, vlan_tag_flags, last_frag, mss;
4512 u16 prod, ring_prod;
4513 int i;
4515 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4516 netif_stop_queue(dev);
4517 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4518 dev->name);
4520 return NETDEV_TX_BUSY;
4522 len = skb_headlen(skb);
4523 prod = bp->tx_prod;
4524 ring_prod = TX_RING_IDX(prod);
4526 vlan_tag_flags = 0;
4527 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4528 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4531 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4532 vlan_tag_flags |=
4533 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4535 if ((mss = skb_shinfo(skb)->gso_size) &&
4536 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4537 u32 tcp_opt_len, ip_tcp_len;
4538 struct iphdr *iph;
4540 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4542 tcp_opt_len = tcp_optlen(skb);
4544 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
4545 u32 tcp_off = skb_transport_offset(skb) -
4546 sizeof(struct ipv6hdr) - ETH_HLEN;
4548 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
4549 TX_BD_FLAGS_SW_FLAGS;
4550 if (likely(tcp_off == 0))
4551 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
4552 else {
4553 tcp_off >>= 3;
4554 vlan_tag_flags |= ((tcp_off & 0x3) <<
4555 TX_BD_FLAGS_TCP6_OFF0_SHL) |
4556 ((tcp_off & 0x10) <<
4557 TX_BD_FLAGS_TCP6_OFF4_SHL);
4558 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
4560 } else {
4561 if (skb_header_cloned(skb) &&
4562 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4563 dev_kfree_skb(skb);
4564 return NETDEV_TX_OK;
4567 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4569 iph = ip_hdr(skb);
4570 iph->check = 0;
4571 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4572 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4573 iph->daddr, 0,
4574 IPPROTO_TCP,
4576 if (tcp_opt_len || (iph->ihl > 5)) {
4577 vlan_tag_flags |= ((iph->ihl - 5) +
4578 (tcp_opt_len >> 2)) << 8;
4581 } else
4582 mss = 0;
4584 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4586 tx_buf = &bp->tx_buf_ring[ring_prod];
4587 tx_buf->skb = skb;
4588 pci_unmap_addr_set(tx_buf, mapping, mapping);
4590 txbd = &bp->tx_desc_ring[ring_prod];
4592 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4593 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4594 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4595 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4597 last_frag = skb_shinfo(skb)->nr_frags;
4599 for (i = 0; i < last_frag; i++) {
4600 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4602 prod = NEXT_TX_BD(prod);
4603 ring_prod = TX_RING_IDX(prod);
4604 txbd = &bp->tx_desc_ring[ring_prod];
4606 len = frag->size;
4607 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4608 len, PCI_DMA_TODEVICE);
4609 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4610 mapping, mapping);
4612 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4613 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4614 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4615 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4618 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4620 prod = NEXT_TX_BD(prod);
4621 bp->tx_prod_bseq += skb->len;
4623 REG_WR16(bp, bp->tx_bidx_addr, prod);
4624 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4626 mmiowb();
4628 bp->tx_prod = prod;
4629 dev->trans_start = jiffies;
4631 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4632 netif_stop_queue(dev);
4633 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4634 netif_wake_queue(dev);
4637 return NETDEV_TX_OK;
4640 /* Called with rtnl_lock */
4641 static int
4642 bnx2_close(struct net_device *dev)
4644 struct bnx2 *bp = netdev_priv(dev);
4645 u32 reset_code;
4647 /* Calling flush_scheduled_work() may deadlock because
4648 * linkwatch_event() may be on the workqueue and it will try to get
4649 * the rtnl_lock which we are holding.
4651 while (bp->in_reset_task)
4652 msleep(1);
4654 bnx2_netif_stop(bp);
4655 del_timer_sync(&bp->timer);
4656 if (bp->flags & NO_WOL_FLAG)
4657 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4658 else if (bp->wol)
4659 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4660 else
4661 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4662 bnx2_reset_chip(bp, reset_code);
4663 free_irq(bp->pdev->irq, dev);
4664 if (bp->flags & USING_MSI_FLAG) {
4665 pci_disable_msi(bp->pdev);
4666 bp->flags &= ~USING_MSI_FLAG;
4668 bnx2_free_skbs(bp);
4669 bnx2_free_mem(bp);
4670 bp->link_up = 0;
4671 netif_carrier_off(bp->dev);
4672 bnx2_set_power_state(bp, PCI_D3hot);
4673 return 0;
4676 #define GET_NET_STATS64(ctr) \
4677 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4678 (unsigned long) (ctr##_lo)
4680 #define GET_NET_STATS32(ctr) \
4681 (ctr##_lo)
4683 #if (BITS_PER_LONG == 64)
4684 #define GET_NET_STATS GET_NET_STATS64
4685 #else
4686 #define GET_NET_STATS GET_NET_STATS32
4687 #endif
4689 static struct net_device_stats *
4690 bnx2_get_stats(struct net_device *dev)
4692 struct bnx2 *bp = netdev_priv(dev);
4693 struct statistics_block *stats_blk = bp->stats_blk;
4694 struct net_device_stats *net_stats = &bp->net_stats;
4696 if (bp->stats_blk == NULL) {
4697 return net_stats;
4699 net_stats->rx_packets =
4700 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4701 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4702 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4704 net_stats->tx_packets =
4705 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4706 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4707 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4709 net_stats->rx_bytes =
4710 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4712 net_stats->tx_bytes =
4713 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4715 net_stats->multicast =
4716 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4718 net_stats->collisions =
4719 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4721 net_stats->rx_length_errors =
4722 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4723 stats_blk->stat_EtherStatsOverrsizePkts);
4725 net_stats->rx_over_errors =
4726 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4728 net_stats->rx_frame_errors =
4729 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4731 net_stats->rx_crc_errors =
4732 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4734 net_stats->rx_errors = net_stats->rx_length_errors +
4735 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4736 net_stats->rx_crc_errors;
4738 net_stats->tx_aborted_errors =
4739 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4740 stats_blk->stat_Dot3StatsLateCollisions);
4742 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4743 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4744 net_stats->tx_carrier_errors = 0;
4745 else {
4746 net_stats->tx_carrier_errors =
4747 (unsigned long)
4748 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4751 net_stats->tx_errors =
4752 (unsigned long)
4753 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4755 net_stats->tx_aborted_errors +
4756 net_stats->tx_carrier_errors;
4758 net_stats->rx_missed_errors =
4759 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4760 stats_blk->stat_FwRxDrop);
4762 return net_stats;
4765 /* All ethtool functions called with rtnl_lock */
4767 static int
4768 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4770 struct bnx2 *bp = netdev_priv(dev);
4772 cmd->supported = SUPPORTED_Autoneg;
4773 if (bp->phy_flags & PHY_SERDES_FLAG) {
4774 cmd->supported |= SUPPORTED_1000baseT_Full |
4775 SUPPORTED_FIBRE;
4777 cmd->port = PORT_FIBRE;
4779 else {
4780 cmd->supported |= SUPPORTED_10baseT_Half |
4781 SUPPORTED_10baseT_Full |
4782 SUPPORTED_100baseT_Half |
4783 SUPPORTED_100baseT_Full |
4784 SUPPORTED_1000baseT_Full |
4785 SUPPORTED_TP;
4787 cmd->port = PORT_TP;
4790 cmd->advertising = bp->advertising;
4792 if (bp->autoneg & AUTONEG_SPEED) {
4793 cmd->autoneg = AUTONEG_ENABLE;
4795 else {
4796 cmd->autoneg = AUTONEG_DISABLE;
4799 if (netif_carrier_ok(dev)) {
4800 cmd->speed = bp->line_speed;
4801 cmd->duplex = bp->duplex;
4803 else {
4804 cmd->speed = -1;
4805 cmd->duplex = -1;
4808 cmd->transceiver = XCVR_INTERNAL;
4809 cmd->phy_address = bp->phy_addr;
4811 return 0;
4814 static int
4815 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4817 struct bnx2 *bp = netdev_priv(dev);
4818 u8 autoneg = bp->autoneg;
4819 u8 req_duplex = bp->req_duplex;
4820 u16 req_line_speed = bp->req_line_speed;
4821 u32 advertising = bp->advertising;
4823 if (cmd->autoneg == AUTONEG_ENABLE) {
4824 autoneg |= AUTONEG_SPEED;
4826 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4828 /* allow advertising 1 speed */
4829 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4830 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4831 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4832 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4834 if (bp->phy_flags & PHY_SERDES_FLAG)
4835 return -EINVAL;
4837 advertising = cmd->advertising;
4840 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4841 advertising = cmd->advertising;
4843 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4844 return -EINVAL;
4846 else {
4847 if (bp->phy_flags & PHY_SERDES_FLAG) {
4848 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4850 else {
4851 advertising = ETHTOOL_ALL_COPPER_SPEED;
4854 advertising |= ADVERTISED_Autoneg;
4856 else {
4857 if (bp->phy_flags & PHY_SERDES_FLAG) {
4858 if ((cmd->speed != SPEED_1000 &&
4859 cmd->speed != SPEED_2500) ||
4860 (cmd->duplex != DUPLEX_FULL))
4861 return -EINVAL;
4863 if (cmd->speed == SPEED_2500 &&
4864 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4865 return -EINVAL;
4867 else if (cmd->speed == SPEED_1000) {
4868 return -EINVAL;
4870 autoneg &= ~AUTONEG_SPEED;
4871 req_line_speed = cmd->speed;
4872 req_duplex = cmd->duplex;
4873 advertising = 0;
4876 bp->autoneg = autoneg;
4877 bp->advertising = advertising;
4878 bp->req_line_speed = req_line_speed;
4879 bp->req_duplex = req_duplex;
4881 spin_lock_bh(&bp->phy_lock);
4883 bnx2_setup_phy(bp);
4885 spin_unlock_bh(&bp->phy_lock);
4887 return 0;
4890 static void
4891 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4893 struct bnx2 *bp = netdev_priv(dev);
4895 strcpy(info->driver, DRV_MODULE_NAME);
4896 strcpy(info->version, DRV_MODULE_VERSION);
4897 strcpy(info->bus_info, pci_name(bp->pdev));
4898 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4899 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4900 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4901 info->fw_version[1] = info->fw_version[3] = '.';
4902 info->fw_version[5] = 0;
4905 #define BNX2_REGDUMP_LEN (32 * 1024)
4907 static int
4908 bnx2_get_regs_len(struct net_device *dev)
4910 return BNX2_REGDUMP_LEN;
4913 static void
4914 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4916 u32 *p = _p, i, offset;
4917 u8 *orig_p = _p;
4918 struct bnx2 *bp = netdev_priv(dev);
4919 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4920 0x0800, 0x0880, 0x0c00, 0x0c10,
4921 0x0c30, 0x0d08, 0x1000, 0x101c,
4922 0x1040, 0x1048, 0x1080, 0x10a4,
4923 0x1400, 0x1490, 0x1498, 0x14f0,
4924 0x1500, 0x155c, 0x1580, 0x15dc,
4925 0x1600, 0x1658, 0x1680, 0x16d8,
4926 0x1800, 0x1820, 0x1840, 0x1854,
4927 0x1880, 0x1894, 0x1900, 0x1984,
4928 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4929 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4930 0x2000, 0x2030, 0x23c0, 0x2400,
4931 0x2800, 0x2820, 0x2830, 0x2850,
4932 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4933 0x3c00, 0x3c94, 0x4000, 0x4010,
4934 0x4080, 0x4090, 0x43c0, 0x4458,
4935 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4936 0x4fc0, 0x5010, 0x53c0, 0x5444,
4937 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4938 0x5fc0, 0x6000, 0x6400, 0x6428,
4939 0x6800, 0x6848, 0x684c, 0x6860,
4940 0x6888, 0x6910, 0x8000 };
4942 regs->version = 0;
4944 memset(p, 0, BNX2_REGDUMP_LEN);
4946 if (!netif_running(bp->dev))
4947 return;
4949 i = 0;
4950 offset = reg_boundaries[0];
4951 p += offset;
4952 while (offset < BNX2_REGDUMP_LEN) {
4953 *p++ = REG_RD(bp, offset);
4954 offset += 4;
4955 if (offset == reg_boundaries[i + 1]) {
4956 offset = reg_boundaries[i + 2];
4957 p = (u32 *) (orig_p + offset);
4958 i += 2;
4963 static void
4964 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4966 struct bnx2 *bp = netdev_priv(dev);
4968 if (bp->flags & NO_WOL_FLAG) {
4969 wol->supported = 0;
4970 wol->wolopts = 0;
4972 else {
4973 wol->supported = WAKE_MAGIC;
4974 if (bp->wol)
4975 wol->wolopts = WAKE_MAGIC;
4976 else
4977 wol->wolopts = 0;
4979 memset(&wol->sopass, 0, sizeof(wol->sopass));
4982 static int
4983 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4985 struct bnx2 *bp = netdev_priv(dev);
4987 if (wol->wolopts & ~WAKE_MAGIC)
4988 return -EINVAL;
4990 if (wol->wolopts & WAKE_MAGIC) {
4991 if (bp->flags & NO_WOL_FLAG)
4992 return -EINVAL;
4994 bp->wol = 1;
4996 else {
4997 bp->wol = 0;
4999 return 0;
5002 static int
5003 bnx2_nway_reset(struct net_device *dev)
5005 struct bnx2 *bp = netdev_priv(dev);
5006 u32 bmcr;
5008 if (!(bp->autoneg & AUTONEG_SPEED)) {
5009 return -EINVAL;
5012 spin_lock_bh(&bp->phy_lock);
5014 /* Force a link down visible on the other side */
5015 if (bp->phy_flags & PHY_SERDES_FLAG) {
5016 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
5017 spin_unlock_bh(&bp->phy_lock);
5019 msleep(20);
5021 spin_lock_bh(&bp->phy_lock);
5023 bp->current_interval = SERDES_AN_TIMEOUT;
5024 bp->serdes_an_pending = 1;
5025 mod_timer(&bp->timer, jiffies + bp->current_interval);
5028 bnx2_read_phy(bp, MII_BMCR, &bmcr);
5029 bmcr &= ~BMCR_LOOPBACK;
5030 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5032 spin_unlock_bh(&bp->phy_lock);
5034 return 0;
5037 static int
5038 bnx2_get_eeprom_len(struct net_device *dev)
5040 struct bnx2 *bp = netdev_priv(dev);
5042 if (bp->flash_info == NULL)
5043 return 0;
5045 return (int) bp->flash_size;
5048 static int
5049 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5050 u8 *eebuf)
5052 struct bnx2 *bp = netdev_priv(dev);
5053 int rc;
5055 /* parameters already validated in ethtool_get_eeprom */
5057 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5059 return rc;
5062 static int
5063 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5064 u8 *eebuf)
5066 struct bnx2 *bp = netdev_priv(dev);
5067 int rc;
5069 /* parameters already validated in ethtool_set_eeprom */
5071 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5073 return rc;
5076 static int
5077 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5079 struct bnx2 *bp = netdev_priv(dev);
5081 memset(coal, 0, sizeof(struct ethtool_coalesce));
5083 coal->rx_coalesce_usecs = bp->rx_ticks;
5084 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5085 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5086 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5088 coal->tx_coalesce_usecs = bp->tx_ticks;
5089 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5090 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5091 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5093 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5095 return 0;
5098 static int
5099 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5101 struct bnx2 *bp = netdev_priv(dev);
5103 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5104 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5106 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5107 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5109 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5110 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5112 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5113 if (bp->rx_quick_cons_trip_int > 0xff)
5114 bp->rx_quick_cons_trip_int = 0xff;
5116 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5117 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5119 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5120 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5122 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5123 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5125 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5126 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5127 0xff;
5129 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5130 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5131 bp->stats_ticks &= 0xffff00;
5133 if (netif_running(bp->dev)) {
5134 bnx2_netif_stop(bp);
5135 bnx2_init_nic(bp);
5136 bnx2_netif_start(bp);
5139 return 0;
5142 static void
5143 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5145 struct bnx2 *bp = netdev_priv(dev);
5147 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5148 ering->rx_mini_max_pending = 0;
5149 ering->rx_jumbo_max_pending = 0;
5151 ering->rx_pending = bp->rx_ring_size;
5152 ering->rx_mini_pending = 0;
5153 ering->rx_jumbo_pending = 0;
5155 ering->tx_max_pending = MAX_TX_DESC_CNT;
5156 ering->tx_pending = bp->tx_ring_size;
5159 static int
5160 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5162 struct bnx2 *bp = netdev_priv(dev);
5164 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5165 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5166 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5168 return -EINVAL;
5170 if (netif_running(bp->dev)) {
5171 bnx2_netif_stop(bp);
5172 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5173 bnx2_free_skbs(bp);
5174 bnx2_free_mem(bp);
5177 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5178 bp->tx_ring_size = ering->tx_pending;
5180 if (netif_running(bp->dev)) {
5181 int rc;
5183 rc = bnx2_alloc_mem(bp);
5184 if (rc)
5185 return rc;
5186 bnx2_init_nic(bp);
5187 bnx2_netif_start(bp);
5190 return 0;
5193 static void
5194 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5196 struct bnx2 *bp = netdev_priv(dev);
5198 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5199 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5200 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5203 static int
5204 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5206 struct bnx2 *bp = netdev_priv(dev);
5208 bp->req_flow_ctrl = 0;
5209 if (epause->rx_pause)
5210 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5211 if (epause->tx_pause)
5212 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5214 if (epause->autoneg) {
5215 bp->autoneg |= AUTONEG_FLOW_CTRL;
5217 else {
5218 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5221 spin_lock_bh(&bp->phy_lock);
5223 bnx2_setup_phy(bp);
5225 spin_unlock_bh(&bp->phy_lock);
5227 return 0;
5230 static u32
5231 bnx2_get_rx_csum(struct net_device *dev)
5233 struct bnx2 *bp = netdev_priv(dev);
5235 return bp->rx_csum;
5238 static int
5239 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5241 struct bnx2 *bp = netdev_priv(dev);
5243 bp->rx_csum = data;
5244 return 0;
5247 static int
5248 bnx2_set_tso(struct net_device *dev, u32 data)
5250 struct bnx2 *bp = netdev_priv(dev);
5252 if (data) {
5253 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5254 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5255 dev->features |= NETIF_F_TSO6;
5256 } else
5257 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5258 NETIF_F_TSO_ECN);
5259 return 0;
5262 #define BNX2_NUM_STATS 46
5264 static struct {
5265 char string[ETH_GSTRING_LEN];
5266 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5267 { "rx_bytes" },
5268 { "rx_error_bytes" },
5269 { "tx_bytes" },
5270 { "tx_error_bytes" },
5271 { "rx_ucast_packets" },
5272 { "rx_mcast_packets" },
5273 { "rx_bcast_packets" },
5274 { "tx_ucast_packets" },
5275 { "tx_mcast_packets" },
5276 { "tx_bcast_packets" },
5277 { "tx_mac_errors" },
5278 { "tx_carrier_errors" },
5279 { "rx_crc_errors" },
5280 { "rx_align_errors" },
5281 { "tx_single_collisions" },
5282 { "tx_multi_collisions" },
5283 { "tx_deferred" },
5284 { "tx_excess_collisions" },
5285 { "tx_late_collisions" },
5286 { "tx_total_collisions" },
5287 { "rx_fragments" },
5288 { "rx_jabbers" },
5289 { "rx_undersize_packets" },
5290 { "rx_oversize_packets" },
5291 { "rx_64_byte_packets" },
5292 { "rx_65_to_127_byte_packets" },
5293 { "rx_128_to_255_byte_packets" },
5294 { "rx_256_to_511_byte_packets" },
5295 { "rx_512_to_1023_byte_packets" },
5296 { "rx_1024_to_1522_byte_packets" },
5297 { "rx_1523_to_9022_byte_packets" },
5298 { "tx_64_byte_packets" },
5299 { "tx_65_to_127_byte_packets" },
5300 { "tx_128_to_255_byte_packets" },
5301 { "tx_256_to_511_byte_packets" },
5302 { "tx_512_to_1023_byte_packets" },
5303 { "tx_1024_to_1522_byte_packets" },
5304 { "tx_1523_to_9022_byte_packets" },
5305 { "rx_xon_frames" },
5306 { "rx_xoff_frames" },
5307 { "tx_xon_frames" },
5308 { "tx_xoff_frames" },
5309 { "rx_mac_ctrl_frames" },
5310 { "rx_filtered_packets" },
5311 { "rx_discards" },
5312 { "rx_fw_discards" },
5315 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5317 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5318 STATS_OFFSET32(stat_IfHCInOctets_hi),
5319 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5320 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5321 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5322 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5323 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5324 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5325 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5326 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5327 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5328 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5329 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5330 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5331 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5332 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5333 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5334 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5335 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5336 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5337 STATS_OFFSET32(stat_EtherStatsCollisions),
5338 STATS_OFFSET32(stat_EtherStatsFragments),
5339 STATS_OFFSET32(stat_EtherStatsJabbers),
5340 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5341 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5342 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5343 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5344 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5345 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5346 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5347 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5348 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5349 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5350 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5351 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5352 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5353 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5354 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5355 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5356 STATS_OFFSET32(stat_XonPauseFramesReceived),
5357 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5358 STATS_OFFSET32(stat_OutXonSent),
5359 STATS_OFFSET32(stat_OutXoffSent),
5360 STATS_OFFSET32(stat_MacControlFramesReceived),
5361 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5362 STATS_OFFSET32(stat_IfInMBUFDiscards),
5363 STATS_OFFSET32(stat_FwRxDrop),
5366 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5367 * skipped because of errata.
5369 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5370 8,0,8,8,8,8,8,8,8,8,
5371 4,0,4,4,4,4,4,4,4,4,
5372 4,4,4,4,4,4,4,4,4,4,
5373 4,4,4,4,4,4,4,4,4,4,
5374 4,4,4,4,4,4,
5377 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5378 8,0,8,8,8,8,8,8,8,8,
5379 4,4,4,4,4,4,4,4,4,4,
5380 4,4,4,4,4,4,4,4,4,4,
5381 4,4,4,4,4,4,4,4,4,4,
5382 4,4,4,4,4,4,
5385 #define BNX2_NUM_TESTS 6
5387 static struct {
5388 char string[ETH_GSTRING_LEN];
5389 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5390 { "register_test (offline)" },
5391 { "memory_test (offline)" },
5392 { "loopback_test (offline)" },
5393 { "nvram_test (online)" },
5394 { "interrupt_test (online)" },
5395 { "link_test (online)" },
5398 static int
5399 bnx2_self_test_count(struct net_device *dev)
5401 return BNX2_NUM_TESTS;
5404 static void
5405 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5407 struct bnx2 *bp = netdev_priv(dev);
5409 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5410 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5411 int i;
5413 bnx2_netif_stop(bp);
5414 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5415 bnx2_free_skbs(bp);
5417 if (bnx2_test_registers(bp) != 0) {
5418 buf[0] = 1;
5419 etest->flags |= ETH_TEST_FL_FAILED;
5421 if (bnx2_test_memory(bp) != 0) {
5422 buf[1] = 1;
5423 etest->flags |= ETH_TEST_FL_FAILED;
5425 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5426 etest->flags |= ETH_TEST_FL_FAILED;
5428 if (!netif_running(bp->dev)) {
5429 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5431 else {
5432 bnx2_init_nic(bp);
5433 bnx2_netif_start(bp);
5436 /* wait for link up */
5437 for (i = 0; i < 7; i++) {
5438 if (bp->link_up)
5439 break;
5440 msleep_interruptible(1000);
5444 if (bnx2_test_nvram(bp) != 0) {
5445 buf[3] = 1;
5446 etest->flags |= ETH_TEST_FL_FAILED;
5448 if (bnx2_test_intr(bp) != 0) {
5449 buf[4] = 1;
5450 etest->flags |= ETH_TEST_FL_FAILED;
5453 if (bnx2_test_link(bp) != 0) {
5454 buf[5] = 1;
5455 etest->flags |= ETH_TEST_FL_FAILED;
5460 static void
5461 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5463 switch (stringset) {
5464 case ETH_SS_STATS:
5465 memcpy(buf, bnx2_stats_str_arr,
5466 sizeof(bnx2_stats_str_arr));
5467 break;
5468 case ETH_SS_TEST:
5469 memcpy(buf, bnx2_tests_str_arr,
5470 sizeof(bnx2_tests_str_arr));
5471 break;
5475 static int
5476 bnx2_get_stats_count(struct net_device *dev)
5478 return BNX2_NUM_STATS;
5481 static void
5482 bnx2_get_ethtool_stats(struct net_device *dev,
5483 struct ethtool_stats *stats, u64 *buf)
5485 struct bnx2 *bp = netdev_priv(dev);
5486 int i;
5487 u32 *hw_stats = (u32 *) bp->stats_blk;
5488 u8 *stats_len_arr = NULL;
5490 if (hw_stats == NULL) {
5491 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5492 return;
5495 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5496 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5497 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5498 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5499 stats_len_arr = bnx2_5706_stats_len_arr;
5500 else
5501 stats_len_arr = bnx2_5708_stats_len_arr;
5503 for (i = 0; i < BNX2_NUM_STATS; i++) {
5504 if (stats_len_arr[i] == 0) {
5505 /* skip this counter */
5506 buf[i] = 0;
5507 continue;
5509 if (stats_len_arr[i] == 4) {
5510 /* 4-byte counter */
5511 buf[i] = (u64)
5512 *(hw_stats + bnx2_stats_offset_arr[i]);
5513 continue;
5515 /* 8-byte counter */
5516 buf[i] = (((u64) *(hw_stats +
5517 bnx2_stats_offset_arr[i])) << 32) +
5518 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5522 static int
5523 bnx2_phys_id(struct net_device *dev, u32 data)
5525 struct bnx2 *bp = netdev_priv(dev);
5526 int i;
5527 u32 save;
5529 if (data == 0)
5530 data = 2;
5532 save = REG_RD(bp, BNX2_MISC_CFG);
5533 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5535 for (i = 0; i < (data * 2); i++) {
5536 if ((i % 2) == 0) {
5537 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5539 else {
5540 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5541 BNX2_EMAC_LED_1000MB_OVERRIDE |
5542 BNX2_EMAC_LED_100MB_OVERRIDE |
5543 BNX2_EMAC_LED_10MB_OVERRIDE |
5544 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5545 BNX2_EMAC_LED_TRAFFIC);
5547 msleep_interruptible(500);
5548 if (signal_pending(current))
5549 break;
5551 REG_WR(bp, BNX2_EMAC_LED, 0);
5552 REG_WR(bp, BNX2_MISC_CFG, save);
5553 return 0;
5556 static int
5557 bnx2_set_tx_csum(struct net_device *dev, u32 data)
5559 struct bnx2 *bp = netdev_priv(dev);
5561 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5562 return (ethtool_op_set_tx_hw_csum(dev, data));
5563 else
5564 return (ethtool_op_set_tx_csum(dev, data));
5567 static const struct ethtool_ops bnx2_ethtool_ops = {
5568 .get_settings = bnx2_get_settings,
5569 .set_settings = bnx2_set_settings,
5570 .get_drvinfo = bnx2_get_drvinfo,
5571 .get_regs_len = bnx2_get_regs_len,
5572 .get_regs = bnx2_get_regs,
5573 .get_wol = bnx2_get_wol,
5574 .set_wol = bnx2_set_wol,
5575 .nway_reset = bnx2_nway_reset,
5576 .get_link = ethtool_op_get_link,
5577 .get_eeprom_len = bnx2_get_eeprom_len,
5578 .get_eeprom = bnx2_get_eeprom,
5579 .set_eeprom = bnx2_set_eeprom,
5580 .get_coalesce = bnx2_get_coalesce,
5581 .set_coalesce = bnx2_set_coalesce,
5582 .get_ringparam = bnx2_get_ringparam,
5583 .set_ringparam = bnx2_set_ringparam,
5584 .get_pauseparam = bnx2_get_pauseparam,
5585 .set_pauseparam = bnx2_set_pauseparam,
5586 .get_rx_csum = bnx2_get_rx_csum,
5587 .set_rx_csum = bnx2_set_rx_csum,
5588 .get_tx_csum = ethtool_op_get_tx_csum,
5589 .set_tx_csum = bnx2_set_tx_csum,
5590 .get_sg = ethtool_op_get_sg,
5591 .set_sg = ethtool_op_set_sg,
5592 .get_tso = ethtool_op_get_tso,
5593 .set_tso = bnx2_set_tso,
5594 .self_test_count = bnx2_self_test_count,
5595 .self_test = bnx2_self_test,
5596 .get_strings = bnx2_get_strings,
5597 .phys_id = bnx2_phys_id,
5598 .get_stats_count = bnx2_get_stats_count,
5599 .get_ethtool_stats = bnx2_get_ethtool_stats,
5600 .get_perm_addr = ethtool_op_get_perm_addr,
5603 /* Called with rtnl_lock */
5604 static int
5605 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5607 struct mii_ioctl_data *data = if_mii(ifr);
5608 struct bnx2 *bp = netdev_priv(dev);
5609 int err;
5611 switch(cmd) {
5612 case SIOCGMIIPHY:
5613 data->phy_id = bp->phy_addr;
5615 /* fallthru */
5616 case SIOCGMIIREG: {
5617 u32 mii_regval;
5619 if (!netif_running(dev))
5620 return -EAGAIN;
5622 spin_lock_bh(&bp->phy_lock);
5623 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5624 spin_unlock_bh(&bp->phy_lock);
5626 data->val_out = mii_regval;
5628 return err;
5631 case SIOCSMIIREG:
5632 if (!capable(CAP_NET_ADMIN))
5633 return -EPERM;
5635 if (!netif_running(dev))
5636 return -EAGAIN;
5638 spin_lock_bh(&bp->phy_lock);
5639 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5640 spin_unlock_bh(&bp->phy_lock);
5642 return err;
5644 default:
5645 /* do nothing */
5646 break;
5648 return -EOPNOTSUPP;
5651 /* Called with rtnl_lock */
5652 static int
5653 bnx2_change_mac_addr(struct net_device *dev, void *p)
5655 struct sockaddr *addr = p;
5656 struct bnx2 *bp = netdev_priv(dev);
5658 if (!is_valid_ether_addr(addr->sa_data))
5659 return -EINVAL;
5661 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5662 if (netif_running(dev))
5663 bnx2_set_mac_addr(bp);
5665 return 0;
5668 /* Called with rtnl_lock */
5669 static int
5670 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5672 struct bnx2 *bp = netdev_priv(dev);
5674 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5675 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5676 return -EINVAL;
5678 dev->mtu = new_mtu;
5679 if (netif_running(dev)) {
5680 bnx2_netif_stop(bp);
5682 bnx2_init_nic(bp);
5684 bnx2_netif_start(bp);
5686 return 0;
5689 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5690 static void
5691 poll_bnx2(struct net_device *dev)
5693 struct bnx2 *bp = netdev_priv(dev);
5695 disable_irq(bp->pdev->irq);
5696 bnx2_interrupt(bp->pdev->irq, dev);
5697 enable_irq(bp->pdev->irq);
5699 #endif
5701 static void __devinit
5702 bnx2_get_5709_media(struct bnx2 *bp)
5704 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5705 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5706 u32 strap;
5708 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5709 return;
5710 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
5711 bp->phy_flags |= PHY_SERDES_FLAG;
5712 return;
5715 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
5716 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
5717 else
5718 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
5720 if (PCI_FUNC(bp->pdev->devfn) == 0) {
5721 switch (strap) {
5722 case 0x4:
5723 case 0x5:
5724 case 0x6:
5725 bp->phy_flags |= PHY_SERDES_FLAG;
5726 return;
5728 } else {
5729 switch (strap) {
5730 case 0x1:
5731 case 0x2:
5732 case 0x4:
5733 bp->phy_flags |= PHY_SERDES_FLAG;
5734 return;
5739 static int __devinit
5740 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5742 struct bnx2 *bp;
5743 unsigned long mem_len;
5744 int rc;
5745 u32 reg;
5746 u64 dma_mask, persist_dma_mask;
5748 SET_MODULE_OWNER(dev);
5749 SET_NETDEV_DEV(dev, &pdev->dev);
5750 bp = netdev_priv(dev);
5752 bp->flags = 0;
5753 bp->phy_flags = 0;
5755 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5756 rc = pci_enable_device(pdev);
5757 if (rc) {
5758 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
5759 goto err_out;
5762 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5763 dev_err(&pdev->dev,
5764 "Cannot find PCI device base address, aborting.\n");
5765 rc = -ENODEV;
5766 goto err_out_disable;
5769 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5770 if (rc) {
5771 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5772 goto err_out_disable;
5775 pci_set_master(pdev);
5777 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5778 if (bp->pm_cap == 0) {
5779 dev_err(&pdev->dev,
5780 "Cannot find power management capability, aborting.\n");
5781 rc = -EIO;
5782 goto err_out_release;
5785 bp->dev = dev;
5786 bp->pdev = pdev;
5788 spin_lock_init(&bp->phy_lock);
5789 INIT_WORK(&bp->reset_task, bnx2_reset_task);
5791 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5792 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
5793 dev->mem_end = dev->mem_start + mem_len;
5794 dev->irq = pdev->irq;
5796 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5798 if (!bp->regview) {
5799 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
5800 rc = -ENOMEM;
5801 goto err_out_release;
5804 /* Configure byte swap and enable write to the reg_window registers.
5805 * Rely on CPU to do target byte swapping on big endian systems
5806 * The chip's target access swapping will not swap all accesses
5808 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5809 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5810 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5812 bnx2_set_power_state(bp, PCI_D0);
5814 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5816 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
5817 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5818 if (bp->pcix_cap == 0) {
5819 dev_err(&pdev->dev,
5820 "Cannot find PCIX capability, aborting.\n");
5821 rc = -EIO;
5822 goto err_out_unmap;
5826 /* 5708 cannot support DMA addresses > 40-bit. */
5827 if (CHIP_NUM(bp) == CHIP_NUM_5708)
5828 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
5829 else
5830 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
5832 /* Configure DMA attributes. */
5833 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
5834 dev->features |= NETIF_F_HIGHDMA;
5835 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
5836 if (rc) {
5837 dev_err(&pdev->dev,
5838 "pci_set_consistent_dma_mask failed, aborting.\n");
5839 goto err_out_unmap;
5841 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
5842 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5843 goto err_out_unmap;
5846 /* Get bus information. */
5847 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5848 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5849 u32 clkreg;
5851 bp->flags |= PCIX_FLAG;
5853 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5855 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5856 switch (clkreg) {
5857 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5858 bp->bus_speed_mhz = 133;
5859 break;
5861 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5862 bp->bus_speed_mhz = 100;
5863 break;
5865 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5866 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5867 bp->bus_speed_mhz = 66;
5868 break;
5870 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5871 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5872 bp->bus_speed_mhz = 50;
5873 break;
5875 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5876 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5877 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5878 bp->bus_speed_mhz = 33;
5879 break;
5882 else {
5883 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5884 bp->bus_speed_mhz = 66;
5885 else
5886 bp->bus_speed_mhz = 33;
5889 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5890 bp->flags |= PCI_32BIT_FLAG;
5892 /* 5706A0 may falsely detect SERR and PERR. */
5893 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5894 reg = REG_RD(bp, PCI_COMMAND);
5895 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5896 REG_WR(bp, PCI_COMMAND, reg);
5898 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5899 !(bp->flags & PCIX_FLAG)) {
5901 dev_err(&pdev->dev,
5902 "5706 A1 can only be used in a PCIX bus, aborting.\n");
5903 goto err_out_unmap;
5906 bnx2_init_nvram(bp);
5908 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5910 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5911 BNX2_SHM_HDR_SIGNATURE_SIG) {
5912 u32 off = PCI_FUNC(pdev->devfn) << 2;
5914 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
5915 } else
5916 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5918 /* Get the permanent MAC address. First we need to make sure the
5919 * firmware is actually running.
5921 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5923 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5924 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5925 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
5926 rc = -ENODEV;
5927 goto err_out_unmap;
5930 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5932 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5933 bp->mac_addr[0] = (u8) (reg >> 8);
5934 bp->mac_addr[1] = (u8) reg;
5936 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5937 bp->mac_addr[2] = (u8) (reg >> 24);
5938 bp->mac_addr[3] = (u8) (reg >> 16);
5939 bp->mac_addr[4] = (u8) (reg >> 8);
5940 bp->mac_addr[5] = (u8) reg;
5942 bp->tx_ring_size = MAX_TX_DESC_CNT;
5943 bnx2_set_rx_ring_size(bp, 255);
5945 bp->rx_csum = 1;
5947 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5949 bp->tx_quick_cons_trip_int = 20;
5950 bp->tx_quick_cons_trip = 20;
5951 bp->tx_ticks_int = 80;
5952 bp->tx_ticks = 80;
5954 bp->rx_quick_cons_trip_int = 6;
5955 bp->rx_quick_cons_trip = 6;
5956 bp->rx_ticks_int = 18;
5957 bp->rx_ticks = 18;
5959 bp->stats_ticks = 1000000 & 0xffff00;
5961 bp->timer_interval = HZ;
5962 bp->current_interval = HZ;
5964 bp->phy_addr = 1;
5966 /* Disable WOL support if we are running on a SERDES chip. */
5967 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5968 bnx2_get_5709_media(bp);
5969 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
5970 bp->phy_flags |= PHY_SERDES_FLAG;
5972 if (bp->phy_flags & PHY_SERDES_FLAG) {
5973 bp->flags |= NO_WOL_FLAG;
5974 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
5975 bp->phy_addr = 2;
5976 reg = REG_RD_IND(bp, bp->shmem_base +
5977 BNX2_SHARED_HW_CFG_CONFIG);
5978 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5979 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5981 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
5982 CHIP_NUM(bp) == CHIP_NUM_5708)
5983 bp->phy_flags |= PHY_CRC_FIX_FLAG;
5984 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
5985 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
5987 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5988 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5989 (CHIP_ID(bp) == CHIP_ID_5708_B1))
5990 bp->flags |= NO_WOL_FLAG;
5992 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5993 bp->tx_quick_cons_trip_int =
5994 bp->tx_quick_cons_trip;
5995 bp->tx_ticks_int = bp->tx_ticks;
5996 bp->rx_quick_cons_trip_int =
5997 bp->rx_quick_cons_trip;
5998 bp->rx_ticks_int = bp->rx_ticks;
5999 bp->comp_prod_trip_int = bp->comp_prod_trip;
6000 bp->com_ticks_int = bp->com_ticks;
6001 bp->cmd_ticks_int = bp->cmd_ticks;
6004 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6006 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6007 * with byte enables disabled on the unused 32-bit word. This is legal
6008 * but causes problems on the AMD 8132 which will eventually stop
6009 * responding after a while.
6011 * AMD believes this incompatibility is unique to the 5706, and
6012 * prefers to locally disable MSI rather than globally disabling it.
6014 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6015 struct pci_dev *amd_8132 = NULL;
6017 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6018 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6019 amd_8132))) {
6020 u8 rev;
6022 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6023 if (rev >= 0x10 && rev <= 0x13) {
6024 disable_msi = 1;
6025 pci_dev_put(amd_8132);
6026 break;
6031 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
6032 bp->req_line_speed = 0;
6033 if (bp->phy_flags & PHY_SERDES_FLAG) {
6034 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
6036 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
6037 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
6038 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
6039 bp->autoneg = 0;
6040 bp->req_line_speed = bp->line_speed = SPEED_1000;
6041 bp->req_duplex = DUPLEX_FULL;
6044 else {
6045 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
6048 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6050 init_timer(&bp->timer);
6051 bp->timer.expires = RUN_AT(bp->timer_interval);
6052 bp->timer.data = (unsigned long) bp;
6053 bp->timer.function = bnx2_timer;
6055 return 0;
6057 err_out_unmap:
6058 if (bp->regview) {
6059 iounmap(bp->regview);
6060 bp->regview = NULL;
6063 err_out_release:
6064 pci_release_regions(pdev);
6066 err_out_disable:
6067 pci_disable_device(pdev);
6068 pci_set_drvdata(pdev, NULL);
6070 err_out:
6071 return rc;
6074 static int __devinit
6075 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6077 static int version_printed = 0;
6078 struct net_device *dev = NULL;
6079 struct bnx2 *bp;
6080 int rc, i;
6082 if (version_printed++ == 0)
6083 printk(KERN_INFO "%s", version);
6085 /* dev zeroed in init_etherdev */
6086 dev = alloc_etherdev(sizeof(*bp));
6088 if (!dev)
6089 return -ENOMEM;
6091 rc = bnx2_init_board(pdev, dev);
6092 if (rc < 0) {
6093 free_netdev(dev);
6094 return rc;
6097 dev->open = bnx2_open;
6098 dev->hard_start_xmit = bnx2_start_xmit;
6099 dev->stop = bnx2_close;
6100 dev->get_stats = bnx2_get_stats;
6101 dev->set_multicast_list = bnx2_set_rx_mode;
6102 dev->do_ioctl = bnx2_ioctl;
6103 dev->set_mac_address = bnx2_change_mac_addr;
6104 dev->change_mtu = bnx2_change_mtu;
6105 dev->tx_timeout = bnx2_tx_timeout;
6106 dev->watchdog_timeo = TX_TIMEOUT;
6107 #ifdef BCM_VLAN
6108 dev->vlan_rx_register = bnx2_vlan_rx_register;
6109 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6110 #endif
6111 dev->poll = bnx2_poll;
6112 dev->ethtool_ops = &bnx2_ethtool_ops;
6113 dev->weight = 64;
6115 bp = netdev_priv(dev);
6117 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6118 dev->poll_controller = poll_bnx2;
6119 #endif
6121 pci_set_drvdata(pdev, dev);
6123 memcpy(dev->dev_addr, bp->mac_addr, 6);
6124 memcpy(dev->perm_addr, bp->mac_addr, 6);
6125 bp->name = board_info[ent->driver_data].name;
6127 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6128 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
6129 else
6130 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6131 #ifdef BCM_VLAN
6132 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6133 #endif
6134 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6135 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6136 dev->features |= NETIF_F_TSO6;
6138 if ((rc = register_netdev(dev))) {
6139 dev_err(&pdev->dev, "Cannot register net device\n");
6140 if (bp->regview)
6141 iounmap(bp->regview);
6142 pci_release_regions(pdev);
6143 pci_disable_device(pdev);
6144 pci_set_drvdata(pdev, NULL);
6145 free_netdev(dev);
6146 return rc;
6149 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6150 "IRQ %d, ",
6151 dev->name,
6152 bp->name,
6153 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6154 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6155 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6156 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6157 bp->bus_speed_mhz,
6158 dev->base_addr,
6159 bp->pdev->irq);
6161 printk("node addr ");
6162 for (i = 0; i < 6; i++)
6163 printk("%2.2x", dev->dev_addr[i]);
6164 printk("\n");
6166 return 0;
6169 static void __devexit
6170 bnx2_remove_one(struct pci_dev *pdev)
6172 struct net_device *dev = pci_get_drvdata(pdev);
6173 struct bnx2 *bp = netdev_priv(dev);
6175 flush_scheduled_work();
6177 unregister_netdev(dev);
6179 if (bp->regview)
6180 iounmap(bp->regview);
6182 free_netdev(dev);
6183 pci_release_regions(pdev);
6184 pci_disable_device(pdev);
6185 pci_set_drvdata(pdev, NULL);
6188 static int
6189 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6191 struct net_device *dev = pci_get_drvdata(pdev);
6192 struct bnx2 *bp = netdev_priv(dev);
6193 u32 reset_code;
6195 if (!netif_running(dev))
6196 return 0;
6198 flush_scheduled_work();
6199 bnx2_netif_stop(bp);
6200 netif_device_detach(dev);
6201 del_timer_sync(&bp->timer);
6202 if (bp->flags & NO_WOL_FLAG)
6203 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6204 else if (bp->wol)
6205 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6206 else
6207 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6208 bnx2_reset_chip(bp, reset_code);
6209 bnx2_free_skbs(bp);
6210 pci_save_state(pdev);
6211 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6212 return 0;
6215 static int
6216 bnx2_resume(struct pci_dev *pdev)
6218 struct net_device *dev = pci_get_drvdata(pdev);
6219 struct bnx2 *bp = netdev_priv(dev);
6221 if (!netif_running(dev))
6222 return 0;
6224 pci_restore_state(pdev);
6225 bnx2_set_power_state(bp, PCI_D0);
6226 netif_device_attach(dev);
6227 bnx2_init_nic(bp);
6228 bnx2_netif_start(bp);
6229 return 0;
6232 static struct pci_driver bnx2_pci_driver = {
6233 .name = DRV_MODULE_NAME,
6234 .id_table = bnx2_pci_tbl,
6235 .probe = bnx2_init_one,
6236 .remove = __devexit_p(bnx2_remove_one),
6237 .suspend = bnx2_suspend,
6238 .resume = bnx2_resume,
6241 static int __init bnx2_init(void)
6243 return pci_register_driver(&bnx2_pci_driver);
6246 static void __exit bnx2_cleanup(void)
6248 pci_unregister_driver(&bnx2_pci_driver);
6251 module_init(bnx2_init);
6252 module_exit(bnx2_cleanup);