chelsio: more rx speedup
[linux-2.6/openmoko-kernel/knife-kernel.git] / drivers / net / bnx2.c
blob8e96154be031a113896bb4ba80521f8e01409f6e
1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #define BCM_TSO 1
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/zlib.h>
52 #include "bnx2.h"
53 #include "bnx2_fw.h"
54 #include "bnx2_fw2.h"
56 #define DRV_MODULE_NAME "bnx2"
57 #define PFX DRV_MODULE_NAME ": "
58 #define DRV_MODULE_VERSION "1.5.5"
59 #define DRV_MODULE_RELDATE "February 1, 2007"
61 #define RUN_AT(x) (jiffies + (x))
63 /* Time in jiffies before concluding the transmitter is hung. */
64 #define TX_TIMEOUT (5*HZ)
66 static const char version[] __devinitdata =
67 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
70 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
71 MODULE_LICENSE("GPL");
72 MODULE_VERSION(DRV_MODULE_VERSION);
74 static int disable_msi = 0;
76 module_param(disable_msi, int, 0);
77 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79 typedef enum {
80 BCM5706 = 0,
81 NC370T,
82 NC370I,
83 BCM5706S,
84 NC370F,
85 BCM5708,
86 BCM5708S,
87 BCM5709,
88 } board_t;
90 /* indexed by board_t, above */
91 static const struct {
92 char *name;
93 } board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104 static struct pci_device_id bnx2_pci_tbl[] = {
105 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
106 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
107 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
108 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
109 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
110 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
111 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
113 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
114 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
121 { 0, }
124 static struct flash_spec flash_table[] =
126 /* Slow EEPROM */
127 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
128 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
129 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
130 "EEPROM - slow"},
131 /* Expansion entry 0001 */
132 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
133 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
134 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
135 "Entry 0001"},
136 /* Saifun SA25F010 (non-buffered flash) */
137 /* strap, cfg1, & write1 need updates */
138 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
139 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
140 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
141 "Non-buffered flash (128kB)"},
142 /* Saifun SA25F020 (non-buffered flash) */
143 /* strap, cfg1, & write1 need updates */
144 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
145 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
146 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
147 "Non-buffered flash (256kB)"},
148 /* Expansion entry 0100 */
149 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
150 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
151 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
152 "Entry 0100"},
153 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
154 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
155 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
156 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
157 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
158 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
159 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
160 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
161 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
162 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
163 /* Saifun SA25F005 (non-buffered flash) */
164 /* strap, cfg1, & write1 need updates */
165 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
166 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
168 "Non-buffered flash (64kB)"},
169 /* Fast EEPROM */
170 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
171 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
172 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
173 "EEPROM - fast"},
174 /* Expansion entry 1001 */
175 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
176 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
177 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
178 "Entry 1001"},
179 /* Expansion entry 1010 */
180 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
181 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
182 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
183 "Entry 1010"},
184 /* ATMEL AT45DB011B (buffered flash) */
185 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
186 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
187 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
188 "Buffered flash (128kB)"},
189 /* Expansion entry 1100 */
190 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
191 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
192 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
193 "Entry 1100"},
194 /* Expansion entry 1101 */
195 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
196 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
197 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
198 "Entry 1101"},
199 /* Ateml Expansion entry 1110 */
200 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
201 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
202 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
203 "Entry 1110 (Atmel)"},
204 /* ATMEL AT45DB021B (buffered flash) */
205 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
206 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
207 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
208 "Buffered flash (256kB)"},
211 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
213 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
215 u32 diff;
217 smp_mb();
219 /* The ring uses 256 indices for 255 entries, one of them
220 * needs to be skipped.
222 diff = bp->tx_prod - bp->tx_cons;
223 if (unlikely(diff >= TX_DESC_CNT)) {
224 diff &= 0xffff;
225 if (diff == TX_DESC_CNT)
226 diff = MAX_TX_DESC_CNT;
228 return (bp->tx_ring_size - diff);
231 static u32
232 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
234 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
235 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
238 static void
239 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
241 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
242 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
245 static void
246 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
248 offset += cid_addr;
249 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
250 int i;
252 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
253 REG_WR(bp, BNX2_CTX_CTX_CTRL,
254 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
255 for (i = 0; i < 5; i++) {
256 u32 val;
257 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
258 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
259 break;
260 udelay(5);
262 } else {
263 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
264 REG_WR(bp, BNX2_CTX_DATA, val);
268 static int
269 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
271 u32 val1;
272 int i, ret;
274 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
275 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
276 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
278 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
279 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
281 udelay(40);
284 val1 = (bp->phy_addr << 21) | (reg << 16) |
285 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
286 BNX2_EMAC_MDIO_COMM_START_BUSY;
287 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
289 for (i = 0; i < 50; i++) {
290 udelay(10);
292 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
293 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
294 udelay(5);
296 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
297 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
299 break;
303 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
304 *val = 0x0;
305 ret = -EBUSY;
307 else {
308 *val = val1;
309 ret = 0;
312 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
313 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
314 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
316 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
317 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
319 udelay(40);
322 return ret;
325 static int
326 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
328 u32 val1;
329 int i, ret;
331 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
332 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
333 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
335 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
336 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
338 udelay(40);
341 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
342 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
343 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
344 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
346 for (i = 0; i < 50; i++) {
347 udelay(10);
349 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
350 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
351 udelay(5);
352 break;
356 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
357 ret = -EBUSY;
358 else
359 ret = 0;
361 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
362 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
363 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
365 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
366 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
368 udelay(40);
371 return ret;
374 static void
375 bnx2_disable_int(struct bnx2 *bp)
377 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
378 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
379 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
382 static void
383 bnx2_enable_int(struct bnx2 *bp)
385 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
386 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
387 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
389 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
392 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
395 static void
396 bnx2_disable_int_sync(struct bnx2 *bp)
398 atomic_inc(&bp->intr_sem);
399 bnx2_disable_int(bp);
400 synchronize_irq(bp->pdev->irq);
403 static void
404 bnx2_netif_stop(struct bnx2 *bp)
406 bnx2_disable_int_sync(bp);
407 if (netif_running(bp->dev)) {
408 netif_poll_disable(bp->dev);
409 netif_tx_disable(bp->dev);
410 bp->dev->trans_start = jiffies; /* prevent tx timeout */
414 static void
415 bnx2_netif_start(struct bnx2 *bp)
417 if (atomic_dec_and_test(&bp->intr_sem)) {
418 if (netif_running(bp->dev)) {
419 netif_wake_queue(bp->dev);
420 netif_poll_enable(bp->dev);
421 bnx2_enable_int(bp);
426 static void
427 bnx2_free_mem(struct bnx2 *bp)
429 int i;
431 for (i = 0; i < bp->ctx_pages; i++) {
432 if (bp->ctx_blk[i]) {
433 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
434 bp->ctx_blk[i],
435 bp->ctx_blk_mapping[i]);
436 bp->ctx_blk[i] = NULL;
439 if (bp->status_blk) {
440 pci_free_consistent(bp->pdev, bp->status_stats_size,
441 bp->status_blk, bp->status_blk_mapping);
442 bp->status_blk = NULL;
443 bp->stats_blk = NULL;
445 if (bp->tx_desc_ring) {
446 pci_free_consistent(bp->pdev,
447 sizeof(struct tx_bd) * TX_DESC_CNT,
448 bp->tx_desc_ring, bp->tx_desc_mapping);
449 bp->tx_desc_ring = NULL;
451 kfree(bp->tx_buf_ring);
452 bp->tx_buf_ring = NULL;
453 for (i = 0; i < bp->rx_max_ring; i++) {
454 if (bp->rx_desc_ring[i])
455 pci_free_consistent(bp->pdev,
456 sizeof(struct rx_bd) * RX_DESC_CNT,
457 bp->rx_desc_ring[i],
458 bp->rx_desc_mapping[i]);
459 bp->rx_desc_ring[i] = NULL;
461 vfree(bp->rx_buf_ring);
462 bp->rx_buf_ring = NULL;
465 static int
466 bnx2_alloc_mem(struct bnx2 *bp)
468 int i, status_blk_size;
470 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
471 GFP_KERNEL);
472 if (bp->tx_buf_ring == NULL)
473 return -ENOMEM;
475 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
476 sizeof(struct tx_bd) *
477 TX_DESC_CNT,
478 &bp->tx_desc_mapping);
479 if (bp->tx_desc_ring == NULL)
480 goto alloc_mem_err;
482 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
483 bp->rx_max_ring);
484 if (bp->rx_buf_ring == NULL)
485 goto alloc_mem_err;
487 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
488 bp->rx_max_ring);
490 for (i = 0; i < bp->rx_max_ring; i++) {
491 bp->rx_desc_ring[i] =
492 pci_alloc_consistent(bp->pdev,
493 sizeof(struct rx_bd) * RX_DESC_CNT,
494 &bp->rx_desc_mapping[i]);
495 if (bp->rx_desc_ring[i] == NULL)
496 goto alloc_mem_err;
500 /* Combine status and statistics blocks into one allocation. */
501 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
502 bp->status_stats_size = status_blk_size +
503 sizeof(struct statistics_block);
505 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
506 &bp->status_blk_mapping);
507 if (bp->status_blk == NULL)
508 goto alloc_mem_err;
510 memset(bp->status_blk, 0, bp->status_stats_size);
512 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
513 status_blk_size);
515 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
517 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
518 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
519 if (bp->ctx_pages == 0)
520 bp->ctx_pages = 1;
521 for (i = 0; i < bp->ctx_pages; i++) {
522 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
523 BCM_PAGE_SIZE,
524 &bp->ctx_blk_mapping[i]);
525 if (bp->ctx_blk[i] == NULL)
526 goto alloc_mem_err;
529 return 0;
531 alloc_mem_err:
532 bnx2_free_mem(bp);
533 return -ENOMEM;
536 static void
537 bnx2_report_fw_link(struct bnx2 *bp)
539 u32 fw_link_status = 0;
541 if (bp->link_up) {
542 u32 bmsr;
544 switch (bp->line_speed) {
545 case SPEED_10:
546 if (bp->duplex == DUPLEX_HALF)
547 fw_link_status = BNX2_LINK_STATUS_10HALF;
548 else
549 fw_link_status = BNX2_LINK_STATUS_10FULL;
550 break;
551 case SPEED_100:
552 if (bp->duplex == DUPLEX_HALF)
553 fw_link_status = BNX2_LINK_STATUS_100HALF;
554 else
555 fw_link_status = BNX2_LINK_STATUS_100FULL;
556 break;
557 case SPEED_1000:
558 if (bp->duplex == DUPLEX_HALF)
559 fw_link_status = BNX2_LINK_STATUS_1000HALF;
560 else
561 fw_link_status = BNX2_LINK_STATUS_1000FULL;
562 break;
563 case SPEED_2500:
564 if (bp->duplex == DUPLEX_HALF)
565 fw_link_status = BNX2_LINK_STATUS_2500HALF;
566 else
567 fw_link_status = BNX2_LINK_STATUS_2500FULL;
568 break;
571 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
573 if (bp->autoneg) {
574 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
576 bnx2_read_phy(bp, MII_BMSR, &bmsr);
577 bnx2_read_phy(bp, MII_BMSR, &bmsr);
579 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
580 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
581 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
582 else
583 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
586 else
587 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
589 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
592 static void
593 bnx2_report_link(struct bnx2 *bp)
595 if (bp->link_up) {
596 netif_carrier_on(bp->dev);
597 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
599 printk("%d Mbps ", bp->line_speed);
601 if (bp->duplex == DUPLEX_FULL)
602 printk("full duplex");
603 else
604 printk("half duplex");
606 if (bp->flow_ctrl) {
607 if (bp->flow_ctrl & FLOW_CTRL_RX) {
608 printk(", receive ");
609 if (bp->flow_ctrl & FLOW_CTRL_TX)
610 printk("& transmit ");
612 else {
613 printk(", transmit ");
615 printk("flow control ON");
617 printk("\n");
619 else {
620 netif_carrier_off(bp->dev);
621 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
624 bnx2_report_fw_link(bp);
627 static void
628 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
630 u32 local_adv, remote_adv;
632 bp->flow_ctrl = 0;
633 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
634 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
636 if (bp->duplex == DUPLEX_FULL) {
637 bp->flow_ctrl = bp->req_flow_ctrl;
639 return;
642 if (bp->duplex != DUPLEX_FULL) {
643 return;
646 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
647 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
648 u32 val;
650 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
651 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
652 bp->flow_ctrl |= FLOW_CTRL_TX;
653 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
654 bp->flow_ctrl |= FLOW_CTRL_RX;
655 return;
658 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
659 bnx2_read_phy(bp, MII_LPA, &remote_adv);
661 if (bp->phy_flags & PHY_SERDES_FLAG) {
662 u32 new_local_adv = 0;
663 u32 new_remote_adv = 0;
665 if (local_adv & ADVERTISE_1000XPAUSE)
666 new_local_adv |= ADVERTISE_PAUSE_CAP;
667 if (local_adv & ADVERTISE_1000XPSE_ASYM)
668 new_local_adv |= ADVERTISE_PAUSE_ASYM;
669 if (remote_adv & ADVERTISE_1000XPAUSE)
670 new_remote_adv |= ADVERTISE_PAUSE_CAP;
671 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
672 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
674 local_adv = new_local_adv;
675 remote_adv = new_remote_adv;
678 /* See Table 28B-3 of 802.3ab-1999 spec. */
679 if (local_adv & ADVERTISE_PAUSE_CAP) {
680 if(local_adv & ADVERTISE_PAUSE_ASYM) {
681 if (remote_adv & ADVERTISE_PAUSE_CAP) {
682 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
684 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
685 bp->flow_ctrl = FLOW_CTRL_RX;
688 else {
689 if (remote_adv & ADVERTISE_PAUSE_CAP) {
690 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
694 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
695 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
696 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
698 bp->flow_ctrl = FLOW_CTRL_TX;
703 static int
704 bnx2_5708s_linkup(struct bnx2 *bp)
706 u32 val;
708 bp->link_up = 1;
709 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
710 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
711 case BCM5708S_1000X_STAT1_SPEED_10:
712 bp->line_speed = SPEED_10;
713 break;
714 case BCM5708S_1000X_STAT1_SPEED_100:
715 bp->line_speed = SPEED_100;
716 break;
717 case BCM5708S_1000X_STAT1_SPEED_1G:
718 bp->line_speed = SPEED_1000;
719 break;
720 case BCM5708S_1000X_STAT1_SPEED_2G5:
721 bp->line_speed = SPEED_2500;
722 break;
724 if (val & BCM5708S_1000X_STAT1_FD)
725 bp->duplex = DUPLEX_FULL;
726 else
727 bp->duplex = DUPLEX_HALF;
729 return 0;
732 static int
733 bnx2_5706s_linkup(struct bnx2 *bp)
735 u32 bmcr, local_adv, remote_adv, common;
737 bp->link_up = 1;
738 bp->line_speed = SPEED_1000;
740 bnx2_read_phy(bp, MII_BMCR, &bmcr);
741 if (bmcr & BMCR_FULLDPLX) {
742 bp->duplex = DUPLEX_FULL;
744 else {
745 bp->duplex = DUPLEX_HALF;
748 if (!(bmcr & BMCR_ANENABLE)) {
749 return 0;
752 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
753 bnx2_read_phy(bp, MII_LPA, &remote_adv);
755 common = local_adv & remote_adv;
756 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
758 if (common & ADVERTISE_1000XFULL) {
759 bp->duplex = DUPLEX_FULL;
761 else {
762 bp->duplex = DUPLEX_HALF;
766 return 0;
769 static int
770 bnx2_copper_linkup(struct bnx2 *bp)
772 u32 bmcr;
774 bnx2_read_phy(bp, MII_BMCR, &bmcr);
775 if (bmcr & BMCR_ANENABLE) {
776 u32 local_adv, remote_adv, common;
778 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
779 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
781 common = local_adv & (remote_adv >> 2);
782 if (common & ADVERTISE_1000FULL) {
783 bp->line_speed = SPEED_1000;
784 bp->duplex = DUPLEX_FULL;
786 else if (common & ADVERTISE_1000HALF) {
787 bp->line_speed = SPEED_1000;
788 bp->duplex = DUPLEX_HALF;
790 else {
791 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
792 bnx2_read_phy(bp, MII_LPA, &remote_adv);
794 common = local_adv & remote_adv;
795 if (common & ADVERTISE_100FULL) {
796 bp->line_speed = SPEED_100;
797 bp->duplex = DUPLEX_FULL;
799 else if (common & ADVERTISE_100HALF) {
800 bp->line_speed = SPEED_100;
801 bp->duplex = DUPLEX_HALF;
803 else if (common & ADVERTISE_10FULL) {
804 bp->line_speed = SPEED_10;
805 bp->duplex = DUPLEX_FULL;
807 else if (common & ADVERTISE_10HALF) {
808 bp->line_speed = SPEED_10;
809 bp->duplex = DUPLEX_HALF;
811 else {
812 bp->line_speed = 0;
813 bp->link_up = 0;
817 else {
818 if (bmcr & BMCR_SPEED100) {
819 bp->line_speed = SPEED_100;
821 else {
822 bp->line_speed = SPEED_10;
824 if (bmcr & BMCR_FULLDPLX) {
825 bp->duplex = DUPLEX_FULL;
827 else {
828 bp->duplex = DUPLEX_HALF;
832 return 0;
835 static int
836 bnx2_set_mac_link(struct bnx2 *bp)
838 u32 val;
840 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
841 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
842 (bp->duplex == DUPLEX_HALF)) {
843 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
846 /* Configure the EMAC mode register. */
847 val = REG_RD(bp, BNX2_EMAC_MODE);
849 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
850 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
851 BNX2_EMAC_MODE_25G_MODE);
853 if (bp->link_up) {
854 switch (bp->line_speed) {
855 case SPEED_10:
856 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
857 val |= BNX2_EMAC_MODE_PORT_MII_10M;
858 break;
860 /* fall through */
861 case SPEED_100:
862 val |= BNX2_EMAC_MODE_PORT_MII;
863 break;
864 case SPEED_2500:
865 val |= BNX2_EMAC_MODE_25G_MODE;
866 /* fall through */
867 case SPEED_1000:
868 val |= BNX2_EMAC_MODE_PORT_GMII;
869 break;
872 else {
873 val |= BNX2_EMAC_MODE_PORT_GMII;
876 /* Set the MAC to operate in the appropriate duplex mode. */
877 if (bp->duplex == DUPLEX_HALF)
878 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
879 REG_WR(bp, BNX2_EMAC_MODE, val);
881 /* Enable/disable rx PAUSE. */
882 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
884 if (bp->flow_ctrl & FLOW_CTRL_RX)
885 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
886 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
888 /* Enable/disable tx PAUSE. */
889 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
890 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
892 if (bp->flow_ctrl & FLOW_CTRL_TX)
893 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
894 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
896 /* Acknowledge the interrupt. */
897 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
899 return 0;
902 static int
903 bnx2_set_link(struct bnx2 *bp)
905 u32 bmsr;
906 u8 link_up;
908 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
909 bp->link_up = 1;
910 return 0;
913 link_up = bp->link_up;
915 bnx2_read_phy(bp, MII_BMSR, &bmsr);
916 bnx2_read_phy(bp, MII_BMSR, &bmsr);
918 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
919 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
920 u32 val;
922 val = REG_RD(bp, BNX2_EMAC_STATUS);
923 if (val & BNX2_EMAC_STATUS_LINK)
924 bmsr |= BMSR_LSTATUS;
925 else
926 bmsr &= ~BMSR_LSTATUS;
929 if (bmsr & BMSR_LSTATUS) {
930 bp->link_up = 1;
932 if (bp->phy_flags & PHY_SERDES_FLAG) {
933 if (CHIP_NUM(bp) == CHIP_NUM_5706)
934 bnx2_5706s_linkup(bp);
935 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
936 bnx2_5708s_linkup(bp);
938 else {
939 bnx2_copper_linkup(bp);
941 bnx2_resolve_flow_ctrl(bp);
943 else {
944 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
945 (bp->autoneg & AUTONEG_SPEED)) {
947 u32 bmcr;
949 bnx2_read_phy(bp, MII_BMCR, &bmcr);
950 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
951 if (!(bmcr & BMCR_ANENABLE)) {
952 bnx2_write_phy(bp, MII_BMCR, bmcr |
953 BMCR_ANENABLE);
956 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
957 bp->link_up = 0;
960 if (bp->link_up != link_up) {
961 bnx2_report_link(bp);
964 bnx2_set_mac_link(bp);
966 return 0;
969 static int
970 bnx2_reset_phy(struct bnx2 *bp)
972 int i;
973 u32 reg;
975 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
977 #define PHY_RESET_MAX_WAIT 100
978 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
979 udelay(10);
981 bnx2_read_phy(bp, MII_BMCR, &reg);
982 if (!(reg & BMCR_RESET)) {
983 udelay(20);
984 break;
987 if (i == PHY_RESET_MAX_WAIT) {
988 return -EBUSY;
990 return 0;
993 static u32
994 bnx2_phy_get_pause_adv(struct bnx2 *bp)
996 u32 adv = 0;
998 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
999 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1001 if (bp->phy_flags & PHY_SERDES_FLAG) {
1002 adv = ADVERTISE_1000XPAUSE;
1004 else {
1005 adv = ADVERTISE_PAUSE_CAP;
1008 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1009 if (bp->phy_flags & PHY_SERDES_FLAG) {
1010 adv = ADVERTISE_1000XPSE_ASYM;
1012 else {
1013 adv = ADVERTISE_PAUSE_ASYM;
1016 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1017 if (bp->phy_flags & PHY_SERDES_FLAG) {
1018 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1020 else {
1021 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1024 return adv;
1027 static int
1028 bnx2_setup_serdes_phy(struct bnx2 *bp)
1030 u32 adv, bmcr, up1;
1031 u32 new_adv = 0;
1033 if (!(bp->autoneg & AUTONEG_SPEED)) {
1034 u32 new_bmcr;
1035 int force_link_down = 0;
1037 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1038 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1040 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1041 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1042 new_bmcr |= BMCR_SPEED1000;
1043 if (bp->req_line_speed == SPEED_2500) {
1044 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1045 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1046 if (!(up1 & BCM5708S_UP1_2G5)) {
1047 up1 |= BCM5708S_UP1_2G5;
1048 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1049 force_link_down = 1;
1051 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1052 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1053 if (up1 & BCM5708S_UP1_2G5) {
1054 up1 &= ~BCM5708S_UP1_2G5;
1055 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1056 force_link_down = 1;
1060 if (bp->req_duplex == DUPLEX_FULL) {
1061 adv |= ADVERTISE_1000XFULL;
1062 new_bmcr |= BMCR_FULLDPLX;
1064 else {
1065 adv |= ADVERTISE_1000XHALF;
1066 new_bmcr &= ~BMCR_FULLDPLX;
1068 if ((new_bmcr != bmcr) || (force_link_down)) {
1069 /* Force a link down visible on the other side */
1070 if (bp->link_up) {
1071 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1072 ~(ADVERTISE_1000XFULL |
1073 ADVERTISE_1000XHALF));
1074 bnx2_write_phy(bp, MII_BMCR, bmcr |
1075 BMCR_ANRESTART | BMCR_ANENABLE);
1077 bp->link_up = 0;
1078 netif_carrier_off(bp->dev);
1079 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1080 bnx2_report_link(bp);
1082 bnx2_write_phy(bp, MII_ADVERTISE, adv);
1083 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1085 return 0;
1088 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1089 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1090 up1 |= BCM5708S_UP1_2G5;
1091 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1094 if (bp->advertising & ADVERTISED_1000baseT_Full)
1095 new_adv |= ADVERTISE_1000XFULL;
1097 new_adv |= bnx2_phy_get_pause_adv(bp);
1099 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1100 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1102 bp->serdes_an_pending = 0;
1103 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1104 /* Force a link down visible on the other side */
1105 if (bp->link_up) {
1106 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1107 spin_unlock_bh(&bp->phy_lock);
1108 msleep(20);
1109 spin_lock_bh(&bp->phy_lock);
1112 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1113 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1114 BMCR_ANENABLE);
1115 /* Speed up link-up time when the link partner
1116 * does not autonegotiate which is very common
1117 * in blade servers. Some blade servers use
1118 * IPMI for kerboard input and it's important
1119 * to minimize link disruptions. Autoneg. involves
1120 * exchanging base pages plus 3 next pages and
1121 * normally completes in about 120 msec.
1123 bp->current_interval = SERDES_AN_TIMEOUT;
1124 bp->serdes_an_pending = 1;
1125 mod_timer(&bp->timer, jiffies + bp->current_interval);
1128 return 0;
1131 #define ETHTOOL_ALL_FIBRE_SPEED \
1132 (ADVERTISED_1000baseT_Full)
1134 #define ETHTOOL_ALL_COPPER_SPEED \
1135 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1136 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1137 ADVERTISED_1000baseT_Full)
1139 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1140 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1142 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1144 static int
1145 bnx2_setup_copper_phy(struct bnx2 *bp)
1147 u32 bmcr;
1148 u32 new_bmcr;
1150 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1152 if (bp->autoneg & AUTONEG_SPEED) {
1153 u32 adv_reg, adv1000_reg;
1154 u32 new_adv_reg = 0;
1155 u32 new_adv1000_reg = 0;
1157 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1158 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1159 ADVERTISE_PAUSE_ASYM);
1161 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1162 adv1000_reg &= PHY_ALL_1000_SPEED;
1164 if (bp->advertising & ADVERTISED_10baseT_Half)
1165 new_adv_reg |= ADVERTISE_10HALF;
1166 if (bp->advertising & ADVERTISED_10baseT_Full)
1167 new_adv_reg |= ADVERTISE_10FULL;
1168 if (bp->advertising & ADVERTISED_100baseT_Half)
1169 new_adv_reg |= ADVERTISE_100HALF;
1170 if (bp->advertising & ADVERTISED_100baseT_Full)
1171 new_adv_reg |= ADVERTISE_100FULL;
1172 if (bp->advertising & ADVERTISED_1000baseT_Full)
1173 new_adv1000_reg |= ADVERTISE_1000FULL;
1175 new_adv_reg |= ADVERTISE_CSMA;
1177 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1179 if ((adv1000_reg != new_adv1000_reg) ||
1180 (adv_reg != new_adv_reg) ||
1181 ((bmcr & BMCR_ANENABLE) == 0)) {
1183 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1184 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1185 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1186 BMCR_ANENABLE);
1188 else if (bp->link_up) {
1189 /* Flow ctrl may have changed from auto to forced */
1190 /* or vice-versa. */
1192 bnx2_resolve_flow_ctrl(bp);
1193 bnx2_set_mac_link(bp);
1195 return 0;
1198 new_bmcr = 0;
1199 if (bp->req_line_speed == SPEED_100) {
1200 new_bmcr |= BMCR_SPEED100;
1202 if (bp->req_duplex == DUPLEX_FULL) {
1203 new_bmcr |= BMCR_FULLDPLX;
1205 if (new_bmcr != bmcr) {
1206 u32 bmsr;
1208 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1209 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1211 if (bmsr & BMSR_LSTATUS) {
1212 /* Force link down */
1213 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1214 spin_unlock_bh(&bp->phy_lock);
1215 msleep(50);
1216 spin_lock_bh(&bp->phy_lock);
1218 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1219 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1222 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1224 /* Normally, the new speed is setup after the link has
1225 * gone down and up again. In some cases, link will not go
1226 * down so we need to set up the new speed here.
1228 if (bmsr & BMSR_LSTATUS) {
1229 bp->line_speed = bp->req_line_speed;
1230 bp->duplex = bp->req_duplex;
1231 bnx2_resolve_flow_ctrl(bp);
1232 bnx2_set_mac_link(bp);
1235 return 0;
1238 static int
1239 bnx2_setup_phy(struct bnx2 *bp)
1241 if (bp->loopback == MAC_LOOPBACK)
1242 return 0;
1244 if (bp->phy_flags & PHY_SERDES_FLAG) {
1245 return (bnx2_setup_serdes_phy(bp));
1247 else {
1248 return (bnx2_setup_copper_phy(bp));
1252 static int
1253 bnx2_init_5708s_phy(struct bnx2 *bp)
1255 u32 val;
1257 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1258 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1259 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1261 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1262 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1263 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1265 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1266 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1267 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1269 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1270 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1271 val |= BCM5708S_UP1_2G5;
1272 bnx2_write_phy(bp, BCM5708S_UP1, val);
1275 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1276 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1277 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1278 /* increase tx signal amplitude */
1279 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1280 BCM5708S_BLK_ADDR_TX_MISC);
1281 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1282 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1283 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1284 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1287 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1288 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1290 if (val) {
1291 u32 is_backplane;
1293 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1294 BNX2_SHARED_HW_CFG_CONFIG);
1295 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1296 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1297 BCM5708S_BLK_ADDR_TX_MISC);
1298 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1299 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1300 BCM5708S_BLK_ADDR_DIG);
1303 return 0;
1306 static int
1307 bnx2_init_5706s_phy(struct bnx2 *bp)
1309 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1311 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1312 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1314 if (bp->dev->mtu > 1500) {
1315 u32 val;
1317 /* Set extended packet length bit */
1318 bnx2_write_phy(bp, 0x18, 0x7);
1319 bnx2_read_phy(bp, 0x18, &val);
1320 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1322 bnx2_write_phy(bp, 0x1c, 0x6c00);
1323 bnx2_read_phy(bp, 0x1c, &val);
1324 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1326 else {
1327 u32 val;
1329 bnx2_write_phy(bp, 0x18, 0x7);
1330 bnx2_read_phy(bp, 0x18, &val);
1331 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1333 bnx2_write_phy(bp, 0x1c, 0x6c00);
1334 bnx2_read_phy(bp, 0x1c, &val);
1335 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1338 return 0;
1341 static int
1342 bnx2_init_copper_phy(struct bnx2 *bp)
1344 u32 val;
1346 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1347 bnx2_write_phy(bp, 0x18, 0x0c00);
1348 bnx2_write_phy(bp, 0x17, 0x000a);
1349 bnx2_write_phy(bp, 0x15, 0x310b);
1350 bnx2_write_phy(bp, 0x17, 0x201f);
1351 bnx2_write_phy(bp, 0x15, 0x9506);
1352 bnx2_write_phy(bp, 0x17, 0x401f);
1353 bnx2_write_phy(bp, 0x15, 0x14e2);
1354 bnx2_write_phy(bp, 0x18, 0x0400);
1357 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1358 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1359 MII_BNX2_DSP_EXPAND_REG | 0x8);
1360 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1361 val &= ~(1 << 8);
1362 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1365 if (bp->dev->mtu > 1500) {
1366 /* Set extended packet length bit */
1367 bnx2_write_phy(bp, 0x18, 0x7);
1368 bnx2_read_phy(bp, 0x18, &val);
1369 bnx2_write_phy(bp, 0x18, val | 0x4000);
1371 bnx2_read_phy(bp, 0x10, &val);
1372 bnx2_write_phy(bp, 0x10, val | 0x1);
1374 else {
1375 bnx2_write_phy(bp, 0x18, 0x7);
1376 bnx2_read_phy(bp, 0x18, &val);
1377 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1379 bnx2_read_phy(bp, 0x10, &val);
1380 bnx2_write_phy(bp, 0x10, val & ~0x1);
1383 /* ethernet@wirespeed */
1384 bnx2_write_phy(bp, 0x18, 0x7007);
1385 bnx2_read_phy(bp, 0x18, &val);
1386 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1387 return 0;
1391 static int
1392 bnx2_init_phy(struct bnx2 *bp)
1394 u32 val;
1395 int rc = 0;
1397 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1398 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1400 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1402 bnx2_reset_phy(bp);
1404 bnx2_read_phy(bp, MII_PHYSID1, &val);
1405 bp->phy_id = val << 16;
1406 bnx2_read_phy(bp, MII_PHYSID2, &val);
1407 bp->phy_id |= val & 0xffff;
1409 if (bp->phy_flags & PHY_SERDES_FLAG) {
1410 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1411 rc = bnx2_init_5706s_phy(bp);
1412 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1413 rc = bnx2_init_5708s_phy(bp);
1415 else {
1416 rc = bnx2_init_copper_phy(bp);
1419 bnx2_setup_phy(bp);
1421 return rc;
1424 static int
1425 bnx2_set_mac_loopback(struct bnx2 *bp)
1427 u32 mac_mode;
1429 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1430 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1431 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1432 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1433 bp->link_up = 1;
1434 return 0;
1437 static int bnx2_test_link(struct bnx2 *);
1439 static int
1440 bnx2_set_phy_loopback(struct bnx2 *bp)
1442 u32 mac_mode;
1443 int rc, i;
1445 spin_lock_bh(&bp->phy_lock);
1446 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1447 BMCR_SPEED1000);
1448 spin_unlock_bh(&bp->phy_lock);
1449 if (rc)
1450 return rc;
1452 for (i = 0; i < 10; i++) {
1453 if (bnx2_test_link(bp) == 0)
1454 break;
1455 msleep(100);
1458 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1459 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1460 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1461 BNX2_EMAC_MODE_25G_MODE);
1463 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1464 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1465 bp->link_up = 1;
1466 return 0;
1469 static int
1470 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1472 int i;
1473 u32 val;
1475 bp->fw_wr_seq++;
1476 msg_data |= bp->fw_wr_seq;
1478 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1480 /* wait for an acknowledgement. */
1481 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1482 msleep(10);
1484 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1486 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1487 break;
1489 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1490 return 0;
1492 /* If we timed out, inform the firmware that this is the case. */
1493 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1494 if (!silent)
1495 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1496 "%x\n", msg_data);
1498 msg_data &= ~BNX2_DRV_MSG_CODE;
1499 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1501 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1503 return -EBUSY;
1506 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1507 return -EIO;
1509 return 0;
1512 static int
1513 bnx2_init_5709_context(struct bnx2 *bp)
1515 int i, ret = 0;
1516 u32 val;
1518 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1519 val |= (BCM_PAGE_BITS - 8) << 16;
1520 REG_WR(bp, BNX2_CTX_COMMAND, val);
1521 for (i = 0; i < bp->ctx_pages; i++) {
1522 int j;
1524 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1525 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1526 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1527 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1528 (u64) bp->ctx_blk_mapping[i] >> 32);
1529 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1530 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1531 for (j = 0; j < 10; j++) {
1533 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1534 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1535 break;
1536 udelay(5);
1538 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1539 ret = -EBUSY;
1540 break;
1543 return ret;
1546 static void
1547 bnx2_init_context(struct bnx2 *bp)
1549 u32 vcid;
1551 vcid = 96;
1552 while (vcid) {
1553 u32 vcid_addr, pcid_addr, offset;
1555 vcid--;
1557 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1558 u32 new_vcid;
1560 vcid_addr = GET_PCID_ADDR(vcid);
1561 if (vcid & 0x8) {
1562 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1564 else {
1565 new_vcid = vcid;
1567 pcid_addr = GET_PCID_ADDR(new_vcid);
1569 else {
1570 vcid_addr = GET_CID_ADDR(vcid);
1571 pcid_addr = vcid_addr;
1574 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1575 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1577 /* Zero out the context. */
1578 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1579 CTX_WR(bp, 0x00, offset, 0);
1582 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1583 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1587 static int
1588 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1590 u16 *good_mbuf;
1591 u32 good_mbuf_cnt;
1592 u32 val;
1594 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1595 if (good_mbuf == NULL) {
1596 printk(KERN_ERR PFX "Failed to allocate memory in "
1597 "bnx2_alloc_bad_rbuf\n");
1598 return -ENOMEM;
1601 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1602 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1604 good_mbuf_cnt = 0;
1606 /* Allocate a bunch of mbufs and save the good ones in an array. */
1607 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1608 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1609 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1611 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1613 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1615 /* The addresses with Bit 9 set are bad memory blocks. */
1616 if (!(val & (1 << 9))) {
1617 good_mbuf[good_mbuf_cnt] = (u16) val;
1618 good_mbuf_cnt++;
1621 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1624 /* Free the good ones back to the mbuf pool thus discarding
1625 * all the bad ones. */
1626 while (good_mbuf_cnt) {
1627 good_mbuf_cnt--;
1629 val = good_mbuf[good_mbuf_cnt];
1630 val = (val << 9) | val | 1;
1632 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1634 kfree(good_mbuf);
1635 return 0;
1638 static void
1639 bnx2_set_mac_addr(struct bnx2 *bp)
1641 u32 val;
1642 u8 *mac_addr = bp->dev->dev_addr;
1644 val = (mac_addr[0] << 8) | mac_addr[1];
1646 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1648 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1649 (mac_addr[4] << 8) | mac_addr[5];
1651 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1654 static inline int
1655 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1657 struct sk_buff *skb;
1658 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1659 dma_addr_t mapping;
1660 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1661 unsigned long align;
1663 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1664 if (skb == NULL) {
1665 return -ENOMEM;
1668 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1669 skb_reserve(skb, BNX2_RX_ALIGN - align);
1671 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1672 PCI_DMA_FROMDEVICE);
1674 rx_buf->skb = skb;
1675 pci_unmap_addr_set(rx_buf, mapping, mapping);
1677 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1678 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1680 bp->rx_prod_bseq += bp->rx_buf_use_size;
1682 return 0;
1685 static void
1686 bnx2_phy_int(struct bnx2 *bp)
1688 u32 new_link_state, old_link_state;
1690 new_link_state = bp->status_blk->status_attn_bits &
1691 STATUS_ATTN_BITS_LINK_STATE;
1692 old_link_state = bp->status_blk->status_attn_bits_ack &
1693 STATUS_ATTN_BITS_LINK_STATE;
1694 if (new_link_state != old_link_state) {
1695 if (new_link_state) {
1696 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1697 STATUS_ATTN_BITS_LINK_STATE);
1699 else {
1700 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1701 STATUS_ATTN_BITS_LINK_STATE);
1703 bnx2_set_link(bp);
1707 static void
1708 bnx2_tx_int(struct bnx2 *bp)
1710 struct status_block *sblk = bp->status_blk;
1711 u16 hw_cons, sw_cons, sw_ring_cons;
1712 int tx_free_bd = 0;
1714 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1715 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1716 hw_cons++;
1718 sw_cons = bp->tx_cons;
1720 while (sw_cons != hw_cons) {
1721 struct sw_bd *tx_buf;
1722 struct sk_buff *skb;
1723 int i, last;
1725 sw_ring_cons = TX_RING_IDX(sw_cons);
1727 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1728 skb = tx_buf->skb;
1730 /* partial BD completions possible with TSO packets */
1731 if (skb_is_gso(skb)) {
1732 u16 last_idx, last_ring_idx;
1734 last_idx = sw_cons +
1735 skb_shinfo(skb)->nr_frags + 1;
1736 last_ring_idx = sw_ring_cons +
1737 skb_shinfo(skb)->nr_frags + 1;
1738 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1739 last_idx++;
1741 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1742 break;
1746 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1747 skb_headlen(skb), PCI_DMA_TODEVICE);
1749 tx_buf->skb = NULL;
1750 last = skb_shinfo(skb)->nr_frags;
1752 for (i = 0; i < last; i++) {
1753 sw_cons = NEXT_TX_BD(sw_cons);
1755 pci_unmap_page(bp->pdev,
1756 pci_unmap_addr(
1757 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1758 mapping),
1759 skb_shinfo(skb)->frags[i].size,
1760 PCI_DMA_TODEVICE);
1763 sw_cons = NEXT_TX_BD(sw_cons);
1765 tx_free_bd += last + 1;
1767 dev_kfree_skb(skb);
1769 hw_cons = bp->hw_tx_cons =
1770 sblk->status_tx_quick_consumer_index0;
1772 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1773 hw_cons++;
1777 bp->tx_cons = sw_cons;
1778 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1779 * before checking for netif_queue_stopped(). Without the
1780 * memory barrier, there is a small possibility that bnx2_start_xmit()
1781 * will miss it and cause the queue to be stopped forever.
1783 smp_mb();
1785 if (unlikely(netif_queue_stopped(bp->dev)) &&
1786 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1787 netif_tx_lock(bp->dev);
1788 if ((netif_queue_stopped(bp->dev)) &&
1789 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1790 netif_wake_queue(bp->dev);
1791 netif_tx_unlock(bp->dev);
1795 static inline void
1796 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1797 u16 cons, u16 prod)
1799 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1800 struct rx_bd *cons_bd, *prod_bd;
1802 cons_rx_buf = &bp->rx_buf_ring[cons];
1803 prod_rx_buf = &bp->rx_buf_ring[prod];
1805 pci_dma_sync_single_for_device(bp->pdev,
1806 pci_unmap_addr(cons_rx_buf, mapping),
1807 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1809 bp->rx_prod_bseq += bp->rx_buf_use_size;
1811 prod_rx_buf->skb = skb;
1813 if (cons == prod)
1814 return;
1816 pci_unmap_addr_set(prod_rx_buf, mapping,
1817 pci_unmap_addr(cons_rx_buf, mapping));
1819 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1820 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1821 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1822 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1825 static int
1826 bnx2_rx_int(struct bnx2 *bp, int budget)
1828 struct status_block *sblk = bp->status_blk;
1829 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1830 struct l2_fhdr *rx_hdr;
1831 int rx_pkt = 0;
1833 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1834 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1835 hw_cons++;
1837 sw_cons = bp->rx_cons;
1838 sw_prod = bp->rx_prod;
1840 /* Memory barrier necessary as speculative reads of the rx
1841 * buffer can be ahead of the index in the status block
1843 rmb();
1844 while (sw_cons != hw_cons) {
1845 unsigned int len;
1846 u32 status;
1847 struct sw_bd *rx_buf;
1848 struct sk_buff *skb;
1849 dma_addr_t dma_addr;
1851 sw_ring_cons = RX_RING_IDX(sw_cons);
1852 sw_ring_prod = RX_RING_IDX(sw_prod);
1854 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1855 skb = rx_buf->skb;
1857 rx_buf->skb = NULL;
1859 dma_addr = pci_unmap_addr(rx_buf, mapping);
1861 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1862 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1864 rx_hdr = (struct l2_fhdr *) skb->data;
1865 len = rx_hdr->l2_fhdr_pkt_len - 4;
1867 if ((status = rx_hdr->l2_fhdr_status) &
1868 (L2_FHDR_ERRORS_BAD_CRC |
1869 L2_FHDR_ERRORS_PHY_DECODE |
1870 L2_FHDR_ERRORS_ALIGNMENT |
1871 L2_FHDR_ERRORS_TOO_SHORT |
1872 L2_FHDR_ERRORS_GIANT_FRAME)) {
1874 goto reuse_rx;
1877 /* Since we don't have a jumbo ring, copy small packets
1878 * if mtu > 1500
1880 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1881 struct sk_buff *new_skb;
1883 new_skb = netdev_alloc_skb(bp->dev, len + 2);
1884 if (new_skb == NULL)
1885 goto reuse_rx;
1887 /* aligned copy */
1888 memcpy(new_skb->data,
1889 skb->data + bp->rx_offset - 2,
1890 len + 2);
1892 skb_reserve(new_skb, 2);
1893 skb_put(new_skb, len);
1895 bnx2_reuse_rx_skb(bp, skb,
1896 sw_ring_cons, sw_ring_prod);
1898 skb = new_skb;
1900 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1901 pci_unmap_single(bp->pdev, dma_addr,
1902 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1904 skb_reserve(skb, bp->rx_offset);
1905 skb_put(skb, len);
1907 else {
1908 reuse_rx:
1909 bnx2_reuse_rx_skb(bp, skb,
1910 sw_ring_cons, sw_ring_prod);
1911 goto next_rx;
1914 skb->protocol = eth_type_trans(skb, bp->dev);
1916 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1917 (ntohs(skb->protocol) != 0x8100)) {
1919 dev_kfree_skb(skb);
1920 goto next_rx;
1924 skb->ip_summed = CHECKSUM_NONE;
1925 if (bp->rx_csum &&
1926 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1927 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1929 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1930 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1931 skb->ip_summed = CHECKSUM_UNNECESSARY;
1934 #ifdef BCM_VLAN
1935 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1936 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1937 rx_hdr->l2_fhdr_vlan_tag);
1939 else
1940 #endif
1941 netif_receive_skb(skb);
1943 bp->dev->last_rx = jiffies;
1944 rx_pkt++;
1946 next_rx:
1947 sw_cons = NEXT_RX_BD(sw_cons);
1948 sw_prod = NEXT_RX_BD(sw_prod);
1950 if ((rx_pkt == budget))
1951 break;
1953 /* Refresh hw_cons to see if there is new work */
1954 if (sw_cons == hw_cons) {
1955 hw_cons = bp->hw_rx_cons =
1956 sblk->status_rx_quick_consumer_index0;
1957 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1958 hw_cons++;
1959 rmb();
1962 bp->rx_cons = sw_cons;
1963 bp->rx_prod = sw_prod;
1965 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1967 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1969 mmiowb();
1971 return rx_pkt;
1975 /* MSI ISR - The only difference between this and the INTx ISR
1976 * is that the MSI interrupt is always serviced.
1978 static irqreturn_t
1979 bnx2_msi(int irq, void *dev_instance)
1981 struct net_device *dev = dev_instance;
1982 struct bnx2 *bp = netdev_priv(dev);
1984 prefetch(bp->status_blk);
1985 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1986 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1987 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1989 /* Return here if interrupt is disabled. */
1990 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1991 return IRQ_HANDLED;
1993 netif_rx_schedule(dev);
1995 return IRQ_HANDLED;
1998 static irqreturn_t
1999 bnx2_interrupt(int irq, void *dev_instance)
2001 struct net_device *dev = dev_instance;
2002 struct bnx2 *bp = netdev_priv(dev);
2004 /* When using INTx, it is possible for the interrupt to arrive
2005 * at the CPU before the status block posted prior to the
2006 * interrupt. Reading a register will flush the status block.
2007 * When using MSI, the MSI message will always complete after
2008 * the status block write.
2010 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2011 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2012 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2013 return IRQ_NONE;
2015 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2016 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2017 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2019 /* Return here if interrupt is shared and is disabled. */
2020 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2021 return IRQ_HANDLED;
2023 netif_rx_schedule(dev);
2025 return IRQ_HANDLED;
2028 static inline int
2029 bnx2_has_work(struct bnx2 *bp)
2031 struct status_block *sblk = bp->status_blk;
2033 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2034 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2035 return 1;
2037 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
2038 bp->link_up)
2039 return 1;
2041 return 0;
2044 static int
2045 bnx2_poll(struct net_device *dev, int *budget)
2047 struct bnx2 *bp = netdev_priv(dev);
2049 if ((bp->status_blk->status_attn_bits &
2050 STATUS_ATTN_BITS_LINK_STATE) !=
2051 (bp->status_blk->status_attn_bits_ack &
2052 STATUS_ATTN_BITS_LINK_STATE)) {
2054 spin_lock(&bp->phy_lock);
2055 bnx2_phy_int(bp);
2056 spin_unlock(&bp->phy_lock);
2058 /* This is needed to take care of transient status
2059 * during link changes.
2061 REG_WR(bp, BNX2_HC_COMMAND,
2062 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2063 REG_RD(bp, BNX2_HC_COMMAND);
2066 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2067 bnx2_tx_int(bp);
2069 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2070 int orig_budget = *budget;
2071 int work_done;
2073 if (orig_budget > dev->quota)
2074 orig_budget = dev->quota;
2076 work_done = bnx2_rx_int(bp, orig_budget);
2077 *budget -= work_done;
2078 dev->quota -= work_done;
2081 bp->last_status_idx = bp->status_blk->status_idx;
2082 rmb();
2084 if (!bnx2_has_work(bp)) {
2085 netif_rx_complete(dev);
2086 if (likely(bp->flags & USING_MSI_FLAG)) {
2087 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2088 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2089 bp->last_status_idx);
2090 return 0;
2092 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2093 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2094 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2095 bp->last_status_idx);
2097 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2098 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2099 bp->last_status_idx);
2100 return 0;
2103 return 1;
2106 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2107 * from set_multicast.
2109 static void
2110 bnx2_set_rx_mode(struct net_device *dev)
2112 struct bnx2 *bp = netdev_priv(dev);
2113 u32 rx_mode, sort_mode;
2114 int i;
2116 spin_lock_bh(&bp->phy_lock);
2118 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2119 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2120 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2121 #ifdef BCM_VLAN
2122 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2123 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2124 #else
2125 if (!(bp->flags & ASF_ENABLE_FLAG))
2126 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2127 #endif
2128 if (dev->flags & IFF_PROMISC) {
2129 /* Promiscuous mode. */
2130 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2131 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2132 BNX2_RPM_SORT_USER0_PROM_VLAN;
2134 else if (dev->flags & IFF_ALLMULTI) {
2135 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2136 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2137 0xffffffff);
2139 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2141 else {
2142 /* Accept one or more multicast(s). */
2143 struct dev_mc_list *mclist;
2144 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2145 u32 regidx;
2146 u32 bit;
2147 u32 crc;
2149 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2151 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2152 i++, mclist = mclist->next) {
2154 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2155 bit = crc & 0xff;
2156 regidx = (bit & 0xe0) >> 5;
2157 bit &= 0x1f;
2158 mc_filter[regidx] |= (1 << bit);
2161 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2162 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2163 mc_filter[i]);
2166 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2169 if (rx_mode != bp->rx_mode) {
2170 bp->rx_mode = rx_mode;
2171 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2174 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2175 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2176 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2178 spin_unlock_bh(&bp->phy_lock);
2181 #define FW_BUF_SIZE 0x8000
2183 static int
2184 bnx2_gunzip_init(struct bnx2 *bp)
2186 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2187 goto gunzip_nomem1;
2189 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2190 goto gunzip_nomem2;
2192 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2193 if (bp->strm->workspace == NULL)
2194 goto gunzip_nomem3;
2196 return 0;
2198 gunzip_nomem3:
2199 kfree(bp->strm);
2200 bp->strm = NULL;
2202 gunzip_nomem2:
2203 vfree(bp->gunzip_buf);
2204 bp->gunzip_buf = NULL;
2206 gunzip_nomem1:
2207 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2208 "uncompression.\n", bp->dev->name);
2209 return -ENOMEM;
2212 static void
2213 bnx2_gunzip_end(struct bnx2 *bp)
2215 kfree(bp->strm->workspace);
2217 kfree(bp->strm);
2218 bp->strm = NULL;
2220 if (bp->gunzip_buf) {
2221 vfree(bp->gunzip_buf);
2222 bp->gunzip_buf = NULL;
2226 static int
2227 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2229 int n, rc;
2231 /* check gzip header */
2232 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2233 return -EINVAL;
2235 n = 10;
2237 #define FNAME 0x8
2238 if (zbuf[3] & FNAME)
2239 while ((zbuf[n++] != 0) && (n < len));
2241 bp->strm->next_in = zbuf + n;
2242 bp->strm->avail_in = len - n;
2243 bp->strm->next_out = bp->gunzip_buf;
2244 bp->strm->avail_out = FW_BUF_SIZE;
2246 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2247 if (rc != Z_OK)
2248 return rc;
2250 rc = zlib_inflate(bp->strm, Z_FINISH);
2252 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2253 *outbuf = bp->gunzip_buf;
2255 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2256 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2257 bp->dev->name, bp->strm->msg);
2259 zlib_inflateEnd(bp->strm);
2261 if (rc == Z_STREAM_END)
2262 return 0;
2264 return rc;
2267 static void
2268 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2269 u32 rv2p_proc)
2271 int i;
2272 u32 val;
2275 for (i = 0; i < rv2p_code_len; i += 8) {
2276 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2277 rv2p_code++;
2278 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2279 rv2p_code++;
2281 if (rv2p_proc == RV2P_PROC1) {
2282 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2283 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2285 else {
2286 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2287 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2291 /* Reset the processor, un-stall is done later. */
2292 if (rv2p_proc == RV2P_PROC1) {
2293 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2295 else {
2296 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2300 static int
2301 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2303 u32 offset;
2304 u32 val;
2305 int rc;
2307 /* Halt the CPU. */
2308 val = REG_RD_IND(bp, cpu_reg->mode);
2309 val |= cpu_reg->mode_value_halt;
2310 REG_WR_IND(bp, cpu_reg->mode, val);
2311 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2313 /* Load the Text area. */
2314 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2315 if (fw->gz_text) {
2316 u32 text_len;
2317 void *text;
2319 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2320 &text_len);
2321 if (rc)
2322 return rc;
2324 fw->text = text;
2326 if (fw->gz_text) {
2327 int j;
2329 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2330 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2334 /* Load the Data area. */
2335 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2336 if (fw->data) {
2337 int j;
2339 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2340 REG_WR_IND(bp, offset, fw->data[j]);
2344 /* Load the SBSS area. */
2345 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2346 if (fw->sbss) {
2347 int j;
2349 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2350 REG_WR_IND(bp, offset, fw->sbss[j]);
2354 /* Load the BSS area. */
2355 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2356 if (fw->bss) {
2357 int j;
2359 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2360 REG_WR_IND(bp, offset, fw->bss[j]);
2364 /* Load the Read-Only area. */
2365 offset = cpu_reg->spad_base +
2366 (fw->rodata_addr - cpu_reg->mips_view_base);
2367 if (fw->rodata) {
2368 int j;
2370 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2371 REG_WR_IND(bp, offset, fw->rodata[j]);
2375 /* Clear the pre-fetch instruction. */
2376 REG_WR_IND(bp, cpu_reg->inst, 0);
2377 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2379 /* Start the CPU. */
2380 val = REG_RD_IND(bp, cpu_reg->mode);
2381 val &= ~cpu_reg->mode_value_halt;
2382 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2383 REG_WR_IND(bp, cpu_reg->mode, val);
2385 return 0;
2388 static int
2389 bnx2_init_cpus(struct bnx2 *bp)
2391 struct cpu_reg cpu_reg;
2392 struct fw_info *fw;
2393 int rc = 0;
2394 void *text;
2395 u32 text_len;
2397 if ((rc = bnx2_gunzip_init(bp)) != 0)
2398 return rc;
2400 /* Initialize the RV2P processor. */
2401 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2402 &text_len);
2403 if (rc)
2404 goto init_cpu_err;
2406 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2408 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2409 &text_len);
2410 if (rc)
2411 goto init_cpu_err;
2413 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2415 /* Initialize the RX Processor. */
2416 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2417 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2418 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2419 cpu_reg.state = BNX2_RXP_CPU_STATE;
2420 cpu_reg.state_value_clear = 0xffffff;
2421 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2422 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2423 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2424 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2425 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2426 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2427 cpu_reg.mips_view_base = 0x8000000;
2429 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2430 fw = &bnx2_rxp_fw_09;
2431 else
2432 fw = &bnx2_rxp_fw_06;
2434 rc = load_cpu_fw(bp, &cpu_reg, fw);
2435 if (rc)
2436 goto init_cpu_err;
2438 /* Initialize the TX Processor. */
2439 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2440 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2441 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2442 cpu_reg.state = BNX2_TXP_CPU_STATE;
2443 cpu_reg.state_value_clear = 0xffffff;
2444 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2445 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2446 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2447 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2448 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2449 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2450 cpu_reg.mips_view_base = 0x8000000;
2452 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2453 fw = &bnx2_txp_fw_09;
2454 else
2455 fw = &bnx2_txp_fw_06;
2457 rc = load_cpu_fw(bp, &cpu_reg, fw);
2458 if (rc)
2459 goto init_cpu_err;
2461 /* Initialize the TX Patch-up Processor. */
2462 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2463 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2464 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2465 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2466 cpu_reg.state_value_clear = 0xffffff;
2467 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2468 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2469 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2470 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2471 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2472 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2473 cpu_reg.mips_view_base = 0x8000000;
2475 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2476 fw = &bnx2_tpat_fw_09;
2477 else
2478 fw = &bnx2_tpat_fw_06;
2480 rc = load_cpu_fw(bp, &cpu_reg, fw);
2481 if (rc)
2482 goto init_cpu_err;
2484 /* Initialize the Completion Processor. */
2485 cpu_reg.mode = BNX2_COM_CPU_MODE;
2486 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2487 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2488 cpu_reg.state = BNX2_COM_CPU_STATE;
2489 cpu_reg.state_value_clear = 0xffffff;
2490 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2491 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2492 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2493 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2494 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2495 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2496 cpu_reg.mips_view_base = 0x8000000;
2498 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2499 fw = &bnx2_com_fw_09;
2500 else
2501 fw = &bnx2_com_fw_06;
2503 rc = load_cpu_fw(bp, &cpu_reg, fw);
2504 if (rc)
2505 goto init_cpu_err;
2507 /* Initialize the Command Processor. */
2508 cpu_reg.mode = BNX2_CP_CPU_MODE;
2509 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2510 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2511 cpu_reg.state = BNX2_CP_CPU_STATE;
2512 cpu_reg.state_value_clear = 0xffffff;
2513 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2514 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2515 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2516 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2517 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2518 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2519 cpu_reg.mips_view_base = 0x8000000;
2521 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2522 fw = &bnx2_cp_fw_09;
2524 rc = load_cpu_fw(bp, &cpu_reg, fw);
2525 if (rc)
2526 goto init_cpu_err;
2528 init_cpu_err:
2529 bnx2_gunzip_end(bp);
2530 return rc;
2533 static int
2534 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2536 u16 pmcsr;
2538 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2540 switch (state) {
2541 case PCI_D0: {
2542 u32 val;
2544 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2545 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2546 PCI_PM_CTRL_PME_STATUS);
2548 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2549 /* delay required during transition out of D3hot */
2550 msleep(20);
2552 val = REG_RD(bp, BNX2_EMAC_MODE);
2553 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2554 val &= ~BNX2_EMAC_MODE_MPKT;
2555 REG_WR(bp, BNX2_EMAC_MODE, val);
2557 val = REG_RD(bp, BNX2_RPM_CONFIG);
2558 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2559 REG_WR(bp, BNX2_RPM_CONFIG, val);
2560 break;
2562 case PCI_D3hot: {
2563 int i;
2564 u32 val, wol_msg;
2566 if (bp->wol) {
2567 u32 advertising;
2568 u8 autoneg;
2570 autoneg = bp->autoneg;
2571 advertising = bp->advertising;
2573 bp->autoneg = AUTONEG_SPEED;
2574 bp->advertising = ADVERTISED_10baseT_Half |
2575 ADVERTISED_10baseT_Full |
2576 ADVERTISED_100baseT_Half |
2577 ADVERTISED_100baseT_Full |
2578 ADVERTISED_Autoneg;
2580 bnx2_setup_copper_phy(bp);
2582 bp->autoneg = autoneg;
2583 bp->advertising = advertising;
2585 bnx2_set_mac_addr(bp);
2587 val = REG_RD(bp, BNX2_EMAC_MODE);
2589 /* Enable port mode. */
2590 val &= ~BNX2_EMAC_MODE_PORT;
2591 val |= BNX2_EMAC_MODE_PORT_MII |
2592 BNX2_EMAC_MODE_MPKT_RCVD |
2593 BNX2_EMAC_MODE_ACPI_RCVD |
2594 BNX2_EMAC_MODE_MPKT;
2596 REG_WR(bp, BNX2_EMAC_MODE, val);
2598 /* receive all multicast */
2599 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2600 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2601 0xffffffff);
2603 REG_WR(bp, BNX2_EMAC_RX_MODE,
2604 BNX2_EMAC_RX_MODE_SORT_MODE);
2606 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2607 BNX2_RPM_SORT_USER0_MC_EN;
2608 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2609 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2610 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2611 BNX2_RPM_SORT_USER0_ENA);
2613 /* Need to enable EMAC and RPM for WOL. */
2614 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2615 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2616 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2617 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2619 val = REG_RD(bp, BNX2_RPM_CONFIG);
2620 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2621 REG_WR(bp, BNX2_RPM_CONFIG, val);
2623 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2625 else {
2626 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2629 if (!(bp->flags & NO_WOL_FLAG))
2630 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2632 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2633 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2634 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2636 if (bp->wol)
2637 pmcsr |= 3;
2639 else {
2640 pmcsr |= 3;
2642 if (bp->wol) {
2643 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2645 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2646 pmcsr);
2648 /* No more memory access after this point until
2649 * device is brought back to D0.
2651 udelay(50);
2652 break;
2654 default:
2655 return -EINVAL;
2657 return 0;
2660 static int
2661 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2663 u32 val;
2664 int j;
2666 /* Request access to the flash interface. */
2667 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2668 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2669 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2670 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2671 break;
2673 udelay(5);
2676 if (j >= NVRAM_TIMEOUT_COUNT)
2677 return -EBUSY;
2679 return 0;
2682 static int
2683 bnx2_release_nvram_lock(struct bnx2 *bp)
2685 int j;
2686 u32 val;
2688 /* Relinquish nvram interface. */
2689 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2691 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2692 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2693 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2694 break;
2696 udelay(5);
2699 if (j >= NVRAM_TIMEOUT_COUNT)
2700 return -EBUSY;
2702 return 0;
2706 static int
2707 bnx2_enable_nvram_write(struct bnx2 *bp)
2709 u32 val;
2711 val = REG_RD(bp, BNX2_MISC_CFG);
2712 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2714 if (!bp->flash_info->buffered) {
2715 int j;
2717 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2718 REG_WR(bp, BNX2_NVM_COMMAND,
2719 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2721 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2722 udelay(5);
2724 val = REG_RD(bp, BNX2_NVM_COMMAND);
2725 if (val & BNX2_NVM_COMMAND_DONE)
2726 break;
2729 if (j >= NVRAM_TIMEOUT_COUNT)
2730 return -EBUSY;
2732 return 0;
2735 static void
2736 bnx2_disable_nvram_write(struct bnx2 *bp)
2738 u32 val;
2740 val = REG_RD(bp, BNX2_MISC_CFG);
2741 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2745 static void
2746 bnx2_enable_nvram_access(struct bnx2 *bp)
2748 u32 val;
2750 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2751 /* Enable both bits, even on read. */
2752 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2753 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2756 static void
2757 bnx2_disable_nvram_access(struct bnx2 *bp)
2759 u32 val;
2761 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2762 /* Disable both bits, even after read. */
2763 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2764 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2765 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2768 static int
2769 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2771 u32 cmd;
2772 int j;
2774 if (bp->flash_info->buffered)
2775 /* Buffered flash, no erase needed */
2776 return 0;
2778 /* Build an erase command */
2779 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2780 BNX2_NVM_COMMAND_DOIT;
2782 /* Need to clear DONE bit separately. */
2783 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2785 /* Address of the NVRAM to read from. */
2786 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2788 /* Issue an erase command. */
2789 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2791 /* Wait for completion. */
2792 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2793 u32 val;
2795 udelay(5);
2797 val = REG_RD(bp, BNX2_NVM_COMMAND);
2798 if (val & BNX2_NVM_COMMAND_DONE)
2799 break;
2802 if (j >= NVRAM_TIMEOUT_COUNT)
2803 return -EBUSY;
2805 return 0;
2808 static int
2809 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2811 u32 cmd;
2812 int j;
2814 /* Build the command word. */
2815 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2817 /* Calculate an offset of a buffered flash. */
2818 if (bp->flash_info->buffered) {
2819 offset = ((offset / bp->flash_info->page_size) <<
2820 bp->flash_info->page_bits) +
2821 (offset % bp->flash_info->page_size);
2824 /* Need to clear DONE bit separately. */
2825 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2827 /* Address of the NVRAM to read from. */
2828 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2830 /* Issue a read command. */
2831 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2833 /* Wait for completion. */
2834 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2835 u32 val;
2837 udelay(5);
2839 val = REG_RD(bp, BNX2_NVM_COMMAND);
2840 if (val & BNX2_NVM_COMMAND_DONE) {
2841 val = REG_RD(bp, BNX2_NVM_READ);
2843 val = be32_to_cpu(val);
2844 memcpy(ret_val, &val, 4);
2845 break;
2848 if (j >= NVRAM_TIMEOUT_COUNT)
2849 return -EBUSY;
2851 return 0;
2855 static int
2856 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2858 u32 cmd, val32;
2859 int j;
2861 /* Build the command word. */
2862 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2864 /* Calculate an offset of a buffered flash. */
2865 if (bp->flash_info->buffered) {
2866 offset = ((offset / bp->flash_info->page_size) <<
2867 bp->flash_info->page_bits) +
2868 (offset % bp->flash_info->page_size);
2871 /* Need to clear DONE bit separately. */
2872 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2874 memcpy(&val32, val, 4);
2875 val32 = cpu_to_be32(val32);
2877 /* Write the data. */
2878 REG_WR(bp, BNX2_NVM_WRITE, val32);
2880 /* Address of the NVRAM to write to. */
2881 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2883 /* Issue the write command. */
2884 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2886 /* Wait for completion. */
2887 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2888 udelay(5);
2890 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2891 break;
2893 if (j >= NVRAM_TIMEOUT_COUNT)
2894 return -EBUSY;
2896 return 0;
2899 static int
2900 bnx2_init_nvram(struct bnx2 *bp)
2902 u32 val;
2903 int j, entry_count, rc;
2904 struct flash_spec *flash;
2906 /* Determine the selected interface. */
2907 val = REG_RD(bp, BNX2_NVM_CFG1);
2909 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2911 rc = 0;
2912 if (val & 0x40000000) {
2914 /* Flash interface has been reconfigured */
2915 for (j = 0, flash = &flash_table[0]; j < entry_count;
2916 j++, flash++) {
2917 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2918 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2919 bp->flash_info = flash;
2920 break;
2924 else {
2925 u32 mask;
2926 /* Not yet been reconfigured */
2928 if (val & (1 << 23))
2929 mask = FLASH_BACKUP_STRAP_MASK;
2930 else
2931 mask = FLASH_STRAP_MASK;
2933 for (j = 0, flash = &flash_table[0]; j < entry_count;
2934 j++, flash++) {
2936 if ((val & mask) == (flash->strapping & mask)) {
2937 bp->flash_info = flash;
2939 /* Request access to the flash interface. */
2940 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2941 return rc;
2943 /* Enable access to flash interface */
2944 bnx2_enable_nvram_access(bp);
2946 /* Reconfigure the flash interface */
2947 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2948 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2949 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2950 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2952 /* Disable access to flash interface */
2953 bnx2_disable_nvram_access(bp);
2954 bnx2_release_nvram_lock(bp);
2956 break;
2959 } /* if (val & 0x40000000) */
2961 if (j == entry_count) {
2962 bp->flash_info = NULL;
2963 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2964 return -ENODEV;
2967 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2968 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2969 if (val)
2970 bp->flash_size = val;
2971 else
2972 bp->flash_size = bp->flash_info->total_size;
2974 return rc;
2977 static int
2978 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2979 int buf_size)
2981 int rc = 0;
2982 u32 cmd_flags, offset32, len32, extra;
2984 if (buf_size == 0)
2985 return 0;
2987 /* Request access to the flash interface. */
2988 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2989 return rc;
2991 /* Enable access to flash interface */
2992 bnx2_enable_nvram_access(bp);
2994 len32 = buf_size;
2995 offset32 = offset;
2996 extra = 0;
2998 cmd_flags = 0;
3000 if (offset32 & 3) {
3001 u8 buf[4];
3002 u32 pre_len;
3004 offset32 &= ~3;
3005 pre_len = 4 - (offset & 3);
3007 if (pre_len >= len32) {
3008 pre_len = len32;
3009 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3010 BNX2_NVM_COMMAND_LAST;
3012 else {
3013 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3016 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3018 if (rc)
3019 return rc;
3021 memcpy(ret_buf, buf + (offset & 3), pre_len);
3023 offset32 += 4;
3024 ret_buf += pre_len;
3025 len32 -= pre_len;
3027 if (len32 & 3) {
3028 extra = 4 - (len32 & 3);
3029 len32 = (len32 + 4) & ~3;
3032 if (len32 == 4) {
3033 u8 buf[4];
3035 if (cmd_flags)
3036 cmd_flags = BNX2_NVM_COMMAND_LAST;
3037 else
3038 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3039 BNX2_NVM_COMMAND_LAST;
3041 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3043 memcpy(ret_buf, buf, 4 - extra);
3045 else if (len32 > 0) {
3046 u8 buf[4];
3048 /* Read the first word. */
3049 if (cmd_flags)
3050 cmd_flags = 0;
3051 else
3052 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3054 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3056 /* Advance to the next dword. */
3057 offset32 += 4;
3058 ret_buf += 4;
3059 len32 -= 4;
3061 while (len32 > 4 && rc == 0) {
3062 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3064 /* Advance to the next dword. */
3065 offset32 += 4;
3066 ret_buf += 4;
3067 len32 -= 4;
3070 if (rc)
3071 return rc;
3073 cmd_flags = BNX2_NVM_COMMAND_LAST;
3074 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3076 memcpy(ret_buf, buf, 4 - extra);
3079 /* Disable access to flash interface */
3080 bnx2_disable_nvram_access(bp);
3082 bnx2_release_nvram_lock(bp);
3084 return rc;
3087 static int
3088 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3089 int buf_size)
3091 u32 written, offset32, len32;
3092 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3093 int rc = 0;
3094 int align_start, align_end;
3096 buf = data_buf;
3097 offset32 = offset;
3098 len32 = buf_size;
3099 align_start = align_end = 0;
3101 if ((align_start = (offset32 & 3))) {
3102 offset32 &= ~3;
3103 len32 += (4 - align_start);
3104 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3105 return rc;
3108 if (len32 & 3) {
3109 if ((len32 > 4) || !align_start) {
3110 align_end = 4 - (len32 & 3);
3111 len32 += align_end;
3112 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3113 end, 4))) {
3114 return rc;
3119 if (align_start || align_end) {
3120 align_buf = kmalloc(len32, GFP_KERNEL);
3121 if (align_buf == NULL)
3122 return -ENOMEM;
3123 if (align_start) {
3124 memcpy(align_buf, start, 4);
3126 if (align_end) {
3127 memcpy(align_buf + len32 - 4, end, 4);
3129 memcpy(align_buf + align_start, data_buf, buf_size);
3130 buf = align_buf;
3133 if (bp->flash_info->buffered == 0) {
3134 flash_buffer = kmalloc(264, GFP_KERNEL);
3135 if (flash_buffer == NULL) {
3136 rc = -ENOMEM;
3137 goto nvram_write_end;
3141 written = 0;
3142 while ((written < len32) && (rc == 0)) {
3143 u32 page_start, page_end, data_start, data_end;
3144 u32 addr, cmd_flags;
3145 int i;
3147 /* Find the page_start addr */
3148 page_start = offset32 + written;
3149 page_start -= (page_start % bp->flash_info->page_size);
3150 /* Find the page_end addr */
3151 page_end = page_start + bp->flash_info->page_size;
3152 /* Find the data_start addr */
3153 data_start = (written == 0) ? offset32 : page_start;
3154 /* Find the data_end addr */
3155 data_end = (page_end > offset32 + len32) ?
3156 (offset32 + len32) : page_end;
3158 /* Request access to the flash interface. */
3159 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3160 goto nvram_write_end;
3162 /* Enable access to flash interface */
3163 bnx2_enable_nvram_access(bp);
3165 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3166 if (bp->flash_info->buffered == 0) {
3167 int j;
3169 /* Read the whole page into the buffer
3170 * (non-buffer flash only) */
3171 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3172 if (j == (bp->flash_info->page_size - 4)) {
3173 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3175 rc = bnx2_nvram_read_dword(bp,
3176 page_start + j,
3177 &flash_buffer[j],
3178 cmd_flags);
3180 if (rc)
3181 goto nvram_write_end;
3183 cmd_flags = 0;
3187 /* Enable writes to flash interface (unlock write-protect) */
3188 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3189 goto nvram_write_end;
3191 /* Erase the page */
3192 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3193 goto nvram_write_end;
3195 /* Re-enable the write again for the actual write */
3196 bnx2_enable_nvram_write(bp);
3198 /* Loop to write back the buffer data from page_start to
3199 * data_start */
3200 i = 0;
3201 if (bp->flash_info->buffered == 0) {
3202 for (addr = page_start; addr < data_start;
3203 addr += 4, i += 4) {
3205 rc = bnx2_nvram_write_dword(bp, addr,
3206 &flash_buffer[i], cmd_flags);
3208 if (rc != 0)
3209 goto nvram_write_end;
3211 cmd_flags = 0;
3215 /* Loop to write the new data from data_start to data_end */
3216 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3217 if ((addr == page_end - 4) ||
3218 ((bp->flash_info->buffered) &&
3219 (addr == data_end - 4))) {
3221 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3223 rc = bnx2_nvram_write_dword(bp, addr, buf,
3224 cmd_flags);
3226 if (rc != 0)
3227 goto nvram_write_end;
3229 cmd_flags = 0;
3230 buf += 4;
3233 /* Loop to write back the buffer data from data_end
3234 * to page_end */
3235 if (bp->flash_info->buffered == 0) {
3236 for (addr = data_end; addr < page_end;
3237 addr += 4, i += 4) {
3239 if (addr == page_end-4) {
3240 cmd_flags = BNX2_NVM_COMMAND_LAST;
3242 rc = bnx2_nvram_write_dword(bp, addr,
3243 &flash_buffer[i], cmd_flags);
3245 if (rc != 0)
3246 goto nvram_write_end;
3248 cmd_flags = 0;
3252 /* Disable writes to flash interface (lock write-protect) */
3253 bnx2_disable_nvram_write(bp);
3255 /* Disable access to flash interface */
3256 bnx2_disable_nvram_access(bp);
3257 bnx2_release_nvram_lock(bp);
3259 /* Increment written */
3260 written += data_end - data_start;
3263 nvram_write_end:
3264 kfree(flash_buffer);
3265 kfree(align_buf);
3266 return rc;
3269 static int
3270 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3272 u32 val;
3273 int i, rc = 0;
3275 /* Wait for the current PCI transaction to complete before
3276 * issuing a reset. */
3277 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3278 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3279 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3280 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3281 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3282 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3283 udelay(5);
3285 /* Wait for the firmware to tell us it is ok to issue a reset. */
3286 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3288 /* Deposit a driver reset signature so the firmware knows that
3289 * this is a soft reset. */
3290 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3291 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3293 /* Do a dummy read to force the chip to complete all current transaction
3294 * before we issue a reset. */
3295 val = REG_RD(bp, BNX2_MISC_ID);
3297 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3298 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3299 REG_RD(bp, BNX2_MISC_COMMAND);
3300 udelay(5);
3302 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3303 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3305 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3307 } else {
3308 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3309 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3310 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3312 /* Chip reset. */
3313 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3315 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3316 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3317 current->state = TASK_UNINTERRUPTIBLE;
3318 schedule_timeout(HZ / 50);
3321 /* Reset takes approximate 30 usec */
3322 for (i = 0; i < 10; i++) {
3323 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3324 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3325 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3326 break;
3327 udelay(10);
3330 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3331 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3332 printk(KERN_ERR PFX "Chip reset did not complete\n");
3333 return -EBUSY;
3337 /* Make sure byte swapping is properly configured. */
3338 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3339 if (val != 0x01020304) {
3340 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3341 return -ENODEV;
3344 /* Wait for the firmware to finish its initialization. */
3345 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3346 if (rc)
3347 return rc;
3349 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3350 /* Adjust the voltage regular to two steps lower. The default
3351 * of this register is 0x0000000e. */
3352 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3354 /* Remove bad rbuf memory from the free pool. */
3355 rc = bnx2_alloc_bad_rbuf(bp);
3358 return rc;
3361 static int
3362 bnx2_init_chip(struct bnx2 *bp)
3364 u32 val;
3365 int rc;
3367 /* Make sure the interrupt is not active. */
3368 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3370 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3371 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3372 #ifdef __BIG_ENDIAN
3373 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3374 #endif
3375 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3376 DMA_READ_CHANS << 12 |
3377 DMA_WRITE_CHANS << 16;
3379 val |= (0x2 << 20) | (1 << 11);
3381 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3382 val |= (1 << 23);
3384 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3385 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3386 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3388 REG_WR(bp, BNX2_DMA_CONFIG, val);
3390 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3391 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3392 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3393 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3396 if (bp->flags & PCIX_FLAG) {
3397 u16 val16;
3399 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3400 &val16);
3401 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3402 val16 & ~PCI_X_CMD_ERO);
3405 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3406 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3407 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3408 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3410 /* Initialize context mapping and zero out the quick contexts. The
3411 * context block must have already been enabled. */
3412 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3413 bnx2_init_5709_context(bp);
3414 else
3415 bnx2_init_context(bp);
3417 if ((rc = bnx2_init_cpus(bp)) != 0)
3418 return rc;
3420 bnx2_init_nvram(bp);
3422 bnx2_set_mac_addr(bp);
3424 val = REG_RD(bp, BNX2_MQ_CONFIG);
3425 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3426 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3427 REG_WR(bp, BNX2_MQ_CONFIG, val);
3429 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3430 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3431 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3433 val = (BCM_PAGE_BITS - 8) << 24;
3434 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3436 /* Configure page size. */
3437 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3438 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3439 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3440 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3442 val = bp->mac_addr[0] +
3443 (bp->mac_addr[1] << 8) +
3444 (bp->mac_addr[2] << 16) +
3445 bp->mac_addr[3] +
3446 (bp->mac_addr[4] << 8) +
3447 (bp->mac_addr[5] << 16);
3448 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3450 /* Program the MTU. Also include 4 bytes for CRC32. */
3451 val = bp->dev->mtu + ETH_HLEN + 4;
3452 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3453 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3454 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3456 bp->last_status_idx = 0;
3457 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3459 /* Set up how to generate a link change interrupt. */
3460 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3462 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3463 (u64) bp->status_blk_mapping & 0xffffffff);
3464 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3466 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3467 (u64) bp->stats_blk_mapping & 0xffffffff);
3468 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3469 (u64) bp->stats_blk_mapping >> 32);
3471 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3472 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3474 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3475 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3477 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3478 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3480 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3482 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3484 REG_WR(bp, BNX2_HC_COM_TICKS,
3485 (bp->com_ticks_int << 16) | bp->com_ticks);
3487 REG_WR(bp, BNX2_HC_CMD_TICKS,
3488 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3490 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3491 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3493 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3494 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3495 else {
3496 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3497 BNX2_HC_CONFIG_TX_TMR_MODE |
3498 BNX2_HC_CONFIG_COLLECT_STATS);
3501 /* Clear internal stats counters. */
3502 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3504 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3506 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3507 BNX2_PORT_FEATURE_ASF_ENABLED)
3508 bp->flags |= ASF_ENABLE_FLAG;
3510 /* Initialize the receive filter. */
3511 bnx2_set_rx_mode(bp->dev);
3513 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3516 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3517 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3519 udelay(20);
3521 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3523 return rc;
3526 static void
3527 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3529 u32 val, offset0, offset1, offset2, offset3;
3531 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3532 offset0 = BNX2_L2CTX_TYPE_XI;
3533 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3534 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3535 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3536 } else {
3537 offset0 = BNX2_L2CTX_TYPE;
3538 offset1 = BNX2_L2CTX_CMD_TYPE;
3539 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3540 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3542 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3543 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3545 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3546 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3548 val = (u64) bp->tx_desc_mapping >> 32;
3549 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3551 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3552 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3555 static void
3556 bnx2_init_tx_ring(struct bnx2 *bp)
3558 struct tx_bd *txbd;
3559 u32 cid;
3561 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3563 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3565 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3566 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3568 bp->tx_prod = 0;
3569 bp->tx_cons = 0;
3570 bp->hw_tx_cons = 0;
3571 bp->tx_prod_bseq = 0;
3573 cid = TX_CID;
3574 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3575 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
3577 bnx2_init_tx_context(bp, cid);
3580 static void
3581 bnx2_init_rx_ring(struct bnx2 *bp)
3583 struct rx_bd *rxbd;
3584 int i;
3585 u16 prod, ring_prod;
3586 u32 val;
3588 /* 8 for CRC and VLAN */
3589 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3590 /* hw alignment */
3591 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
3593 ring_prod = prod = bp->rx_prod = 0;
3594 bp->rx_cons = 0;
3595 bp->hw_rx_cons = 0;
3596 bp->rx_prod_bseq = 0;
3598 for (i = 0; i < bp->rx_max_ring; i++) {
3599 int j;
3601 rxbd = &bp->rx_desc_ring[i][0];
3602 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3603 rxbd->rx_bd_len = bp->rx_buf_use_size;
3604 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3606 if (i == (bp->rx_max_ring - 1))
3607 j = 0;
3608 else
3609 j = i + 1;
3610 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3611 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3612 0xffffffff;
3615 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3616 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3617 val |= 0x02 << 8;
3618 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3620 val = (u64) bp->rx_desc_mapping[0] >> 32;
3621 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3623 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3624 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3626 for (i = 0; i < bp->rx_ring_size; i++) {
3627 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3628 break;
3630 prod = NEXT_RX_BD(prod);
3631 ring_prod = RX_RING_IDX(prod);
3633 bp->rx_prod = prod;
3635 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3637 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3640 static void
3641 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3643 u32 num_rings, max;
3645 bp->rx_ring_size = size;
3646 num_rings = 1;
3647 while (size > MAX_RX_DESC_CNT) {
3648 size -= MAX_RX_DESC_CNT;
3649 num_rings++;
3651 /* round to next power of 2 */
3652 max = MAX_RX_RINGS;
3653 while ((max & num_rings) == 0)
3654 max >>= 1;
3656 if (num_rings != max)
3657 max <<= 1;
3659 bp->rx_max_ring = max;
3660 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3663 static void
3664 bnx2_free_tx_skbs(struct bnx2 *bp)
3666 int i;
3668 if (bp->tx_buf_ring == NULL)
3669 return;
3671 for (i = 0; i < TX_DESC_CNT; ) {
3672 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3673 struct sk_buff *skb = tx_buf->skb;
3674 int j, last;
3676 if (skb == NULL) {
3677 i++;
3678 continue;
3681 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3682 skb_headlen(skb), PCI_DMA_TODEVICE);
3684 tx_buf->skb = NULL;
3686 last = skb_shinfo(skb)->nr_frags;
3687 for (j = 0; j < last; j++) {
3688 tx_buf = &bp->tx_buf_ring[i + j + 1];
3689 pci_unmap_page(bp->pdev,
3690 pci_unmap_addr(tx_buf, mapping),
3691 skb_shinfo(skb)->frags[j].size,
3692 PCI_DMA_TODEVICE);
3694 dev_kfree_skb(skb);
3695 i += j + 1;
3700 static void
3701 bnx2_free_rx_skbs(struct bnx2 *bp)
3703 int i;
3705 if (bp->rx_buf_ring == NULL)
3706 return;
3708 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3709 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3710 struct sk_buff *skb = rx_buf->skb;
3712 if (skb == NULL)
3713 continue;
3715 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3716 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3718 rx_buf->skb = NULL;
3720 dev_kfree_skb(skb);
3724 static void
3725 bnx2_free_skbs(struct bnx2 *bp)
3727 bnx2_free_tx_skbs(bp);
3728 bnx2_free_rx_skbs(bp);
3731 static int
3732 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3734 int rc;
3736 rc = bnx2_reset_chip(bp, reset_code);
3737 bnx2_free_skbs(bp);
3738 if (rc)
3739 return rc;
3741 if ((rc = bnx2_init_chip(bp)) != 0)
3742 return rc;
3744 bnx2_init_tx_ring(bp);
3745 bnx2_init_rx_ring(bp);
3746 return 0;
3749 static int
3750 bnx2_init_nic(struct bnx2 *bp)
3752 int rc;
3754 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3755 return rc;
3757 spin_lock_bh(&bp->phy_lock);
3758 bnx2_init_phy(bp);
3759 spin_unlock_bh(&bp->phy_lock);
3760 bnx2_set_link(bp);
3761 return 0;
3764 static int
3765 bnx2_test_registers(struct bnx2 *bp)
3767 int ret;
3768 int i;
3769 static const struct {
3770 u16 offset;
3771 u16 flags;
3772 u32 rw_mask;
3773 u32 ro_mask;
3774 } reg_tbl[] = {
3775 { 0x006c, 0, 0x00000000, 0x0000003f },
3776 { 0x0090, 0, 0xffffffff, 0x00000000 },
3777 { 0x0094, 0, 0x00000000, 0x00000000 },
3779 { 0x0404, 0, 0x00003f00, 0x00000000 },
3780 { 0x0418, 0, 0x00000000, 0xffffffff },
3781 { 0x041c, 0, 0x00000000, 0xffffffff },
3782 { 0x0420, 0, 0x00000000, 0x80ffffff },
3783 { 0x0424, 0, 0x00000000, 0x00000000 },
3784 { 0x0428, 0, 0x00000000, 0x00000001 },
3785 { 0x0450, 0, 0x00000000, 0x0000ffff },
3786 { 0x0454, 0, 0x00000000, 0xffffffff },
3787 { 0x0458, 0, 0x00000000, 0xffffffff },
3789 { 0x0808, 0, 0x00000000, 0xffffffff },
3790 { 0x0854, 0, 0x00000000, 0xffffffff },
3791 { 0x0868, 0, 0x00000000, 0x77777777 },
3792 { 0x086c, 0, 0x00000000, 0x77777777 },
3793 { 0x0870, 0, 0x00000000, 0x77777777 },
3794 { 0x0874, 0, 0x00000000, 0x77777777 },
3796 { 0x0c00, 0, 0x00000000, 0x00000001 },
3797 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3798 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3800 { 0x1000, 0, 0x00000000, 0x00000001 },
3801 { 0x1004, 0, 0x00000000, 0x000f0001 },
3803 { 0x1408, 0, 0x01c00800, 0x00000000 },
3804 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3805 { 0x14a8, 0, 0x00000000, 0x000001ff },
3806 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3807 { 0x14b0, 0, 0x00000002, 0x00000001 },
3808 { 0x14b8, 0, 0x00000000, 0x00000000 },
3809 { 0x14c0, 0, 0x00000000, 0x00000009 },
3810 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3811 { 0x14cc, 0, 0x00000000, 0x00000001 },
3812 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3814 { 0x1800, 0, 0x00000000, 0x00000001 },
3815 { 0x1804, 0, 0x00000000, 0x00000003 },
3817 { 0x2800, 0, 0x00000000, 0x00000001 },
3818 { 0x2804, 0, 0x00000000, 0x00003f01 },
3819 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3820 { 0x2810, 0, 0xffff0000, 0x00000000 },
3821 { 0x2814, 0, 0xffff0000, 0x00000000 },
3822 { 0x2818, 0, 0xffff0000, 0x00000000 },
3823 { 0x281c, 0, 0xffff0000, 0x00000000 },
3824 { 0x2834, 0, 0xffffffff, 0x00000000 },
3825 { 0x2840, 0, 0x00000000, 0xffffffff },
3826 { 0x2844, 0, 0x00000000, 0xffffffff },
3827 { 0x2848, 0, 0xffffffff, 0x00000000 },
3828 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3830 { 0x2c00, 0, 0x00000000, 0x00000011 },
3831 { 0x2c04, 0, 0x00000000, 0x00030007 },
3833 { 0x3c00, 0, 0x00000000, 0x00000001 },
3834 { 0x3c04, 0, 0x00000000, 0x00070000 },
3835 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3836 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3837 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3838 { 0x3c14, 0, 0x00000000, 0xffffffff },
3839 { 0x3c18, 0, 0x00000000, 0xffffffff },
3840 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3841 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3843 { 0x5004, 0, 0x00000000, 0x0000007f },
3844 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3845 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3847 { 0x5c00, 0, 0x00000000, 0x00000001 },
3848 { 0x5c04, 0, 0x00000000, 0x0003000f },
3849 { 0x5c08, 0, 0x00000003, 0x00000000 },
3850 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3851 { 0x5c10, 0, 0x00000000, 0xffffffff },
3852 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3853 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3854 { 0x5c88, 0, 0x00000000, 0x00077373 },
3855 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3857 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3858 { 0x680c, 0, 0xffffffff, 0x00000000 },
3859 { 0x6810, 0, 0xffffffff, 0x00000000 },
3860 { 0x6814, 0, 0xffffffff, 0x00000000 },
3861 { 0x6818, 0, 0xffffffff, 0x00000000 },
3862 { 0x681c, 0, 0xffffffff, 0x00000000 },
3863 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3864 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3865 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3866 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3867 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3868 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3869 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3870 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3871 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3872 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3873 { 0x684c, 0, 0xffffffff, 0x00000000 },
3874 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3875 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3876 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3877 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3878 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3879 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3881 { 0xffff, 0, 0x00000000, 0x00000000 },
3884 ret = 0;
3885 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3886 u32 offset, rw_mask, ro_mask, save_val, val;
3888 offset = (u32) reg_tbl[i].offset;
3889 rw_mask = reg_tbl[i].rw_mask;
3890 ro_mask = reg_tbl[i].ro_mask;
3892 save_val = readl(bp->regview + offset);
3894 writel(0, bp->regview + offset);
3896 val = readl(bp->regview + offset);
3897 if ((val & rw_mask) != 0) {
3898 goto reg_test_err;
3901 if ((val & ro_mask) != (save_val & ro_mask)) {
3902 goto reg_test_err;
3905 writel(0xffffffff, bp->regview + offset);
3907 val = readl(bp->regview + offset);
3908 if ((val & rw_mask) != rw_mask) {
3909 goto reg_test_err;
3912 if ((val & ro_mask) != (save_val & ro_mask)) {
3913 goto reg_test_err;
3916 writel(save_val, bp->regview + offset);
3917 continue;
3919 reg_test_err:
3920 writel(save_val, bp->regview + offset);
3921 ret = -ENODEV;
3922 break;
3924 return ret;
3927 static int
3928 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3930 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3931 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3932 int i;
3934 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3935 u32 offset;
3937 for (offset = 0; offset < size; offset += 4) {
3939 REG_WR_IND(bp, start + offset, test_pattern[i]);
3941 if (REG_RD_IND(bp, start + offset) !=
3942 test_pattern[i]) {
3943 return -ENODEV;
3947 return 0;
3950 static int
3951 bnx2_test_memory(struct bnx2 *bp)
3953 int ret = 0;
3954 int i;
3955 static const struct {
3956 u32 offset;
3957 u32 len;
3958 } mem_tbl[] = {
3959 { 0x60000, 0x4000 },
3960 { 0xa0000, 0x3000 },
3961 { 0xe0000, 0x4000 },
3962 { 0x120000, 0x4000 },
3963 { 0x1a0000, 0x4000 },
3964 { 0x160000, 0x4000 },
3965 { 0xffffffff, 0 },
3968 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3969 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3970 mem_tbl[i].len)) != 0) {
3971 return ret;
3975 return ret;
3978 #define BNX2_MAC_LOOPBACK 0
3979 #define BNX2_PHY_LOOPBACK 1
3981 static int
3982 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3984 unsigned int pkt_size, num_pkts, i;
3985 struct sk_buff *skb, *rx_skb;
3986 unsigned char *packet;
3987 u16 rx_start_idx, rx_idx;
3988 dma_addr_t map;
3989 struct tx_bd *txbd;
3990 struct sw_bd *rx_buf;
3991 struct l2_fhdr *rx_hdr;
3992 int ret = -ENODEV;
3994 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3995 bp->loopback = MAC_LOOPBACK;
3996 bnx2_set_mac_loopback(bp);
3998 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3999 bp->loopback = PHY_LOOPBACK;
4000 bnx2_set_phy_loopback(bp);
4002 else
4003 return -EINVAL;
4005 pkt_size = 1514;
4006 skb = netdev_alloc_skb(bp->dev, pkt_size);
4007 if (!skb)
4008 return -ENOMEM;
4009 packet = skb_put(skb, pkt_size);
4010 memcpy(packet, bp->dev->dev_addr, 6);
4011 memset(packet + 6, 0x0, 8);
4012 for (i = 14; i < pkt_size; i++)
4013 packet[i] = (unsigned char) (i & 0xff);
4015 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4016 PCI_DMA_TODEVICE);
4018 REG_WR(bp, BNX2_HC_COMMAND,
4019 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4021 REG_RD(bp, BNX2_HC_COMMAND);
4023 udelay(5);
4024 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4026 num_pkts = 0;
4028 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4030 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4031 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4032 txbd->tx_bd_mss_nbytes = pkt_size;
4033 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4035 num_pkts++;
4036 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4037 bp->tx_prod_bseq += pkt_size;
4039 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4040 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4042 udelay(100);
4044 REG_WR(bp, BNX2_HC_COMMAND,
4045 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4047 REG_RD(bp, BNX2_HC_COMMAND);
4049 udelay(5);
4051 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4052 dev_kfree_skb(skb);
4054 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4055 goto loopback_test_done;
4058 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4059 if (rx_idx != rx_start_idx + num_pkts) {
4060 goto loopback_test_done;
4063 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4064 rx_skb = rx_buf->skb;
4066 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4067 skb_reserve(rx_skb, bp->rx_offset);
4069 pci_dma_sync_single_for_cpu(bp->pdev,
4070 pci_unmap_addr(rx_buf, mapping),
4071 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4073 if (rx_hdr->l2_fhdr_status &
4074 (L2_FHDR_ERRORS_BAD_CRC |
4075 L2_FHDR_ERRORS_PHY_DECODE |
4076 L2_FHDR_ERRORS_ALIGNMENT |
4077 L2_FHDR_ERRORS_TOO_SHORT |
4078 L2_FHDR_ERRORS_GIANT_FRAME)) {
4080 goto loopback_test_done;
4083 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4084 goto loopback_test_done;
4087 for (i = 14; i < pkt_size; i++) {
4088 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4089 goto loopback_test_done;
4093 ret = 0;
4095 loopback_test_done:
4096 bp->loopback = 0;
4097 return ret;
4100 #define BNX2_MAC_LOOPBACK_FAILED 1
4101 #define BNX2_PHY_LOOPBACK_FAILED 2
4102 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4103 BNX2_PHY_LOOPBACK_FAILED)
4105 static int
4106 bnx2_test_loopback(struct bnx2 *bp)
4108 int rc = 0;
4110 if (!netif_running(bp->dev))
4111 return BNX2_LOOPBACK_FAILED;
4113 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4114 spin_lock_bh(&bp->phy_lock);
4115 bnx2_init_phy(bp);
4116 spin_unlock_bh(&bp->phy_lock);
4117 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4118 rc |= BNX2_MAC_LOOPBACK_FAILED;
4119 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4120 rc |= BNX2_PHY_LOOPBACK_FAILED;
4121 return rc;
4124 #define NVRAM_SIZE 0x200
4125 #define CRC32_RESIDUAL 0xdebb20e3
4127 static int
4128 bnx2_test_nvram(struct bnx2 *bp)
4130 u32 buf[NVRAM_SIZE / 4];
4131 u8 *data = (u8 *) buf;
4132 int rc = 0;
4133 u32 magic, csum;
4135 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4136 goto test_nvram_done;
4138 magic = be32_to_cpu(buf[0]);
4139 if (magic != 0x669955aa) {
4140 rc = -ENODEV;
4141 goto test_nvram_done;
4144 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4145 goto test_nvram_done;
4147 csum = ether_crc_le(0x100, data);
4148 if (csum != CRC32_RESIDUAL) {
4149 rc = -ENODEV;
4150 goto test_nvram_done;
4153 csum = ether_crc_le(0x100, data + 0x100);
4154 if (csum != CRC32_RESIDUAL) {
4155 rc = -ENODEV;
4158 test_nvram_done:
4159 return rc;
4162 static int
4163 bnx2_test_link(struct bnx2 *bp)
4165 u32 bmsr;
4167 spin_lock_bh(&bp->phy_lock);
4168 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4169 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4170 spin_unlock_bh(&bp->phy_lock);
4172 if (bmsr & BMSR_LSTATUS) {
4173 return 0;
4175 return -ENODEV;
4178 static int
4179 bnx2_test_intr(struct bnx2 *bp)
4181 int i;
4182 u16 status_idx;
4184 if (!netif_running(bp->dev))
4185 return -ENODEV;
4187 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4189 /* This register is not touched during run-time. */
4190 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4191 REG_RD(bp, BNX2_HC_COMMAND);
4193 for (i = 0; i < 10; i++) {
4194 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4195 status_idx) {
4197 break;
4200 msleep_interruptible(10);
4202 if (i < 10)
4203 return 0;
4205 return -ENODEV;
4208 static void
4209 bnx2_5706_serdes_timer(struct bnx2 *bp)
4211 spin_lock(&bp->phy_lock);
4212 if (bp->serdes_an_pending)
4213 bp->serdes_an_pending--;
4214 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4215 u32 bmcr;
4217 bp->current_interval = bp->timer_interval;
4219 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4221 if (bmcr & BMCR_ANENABLE) {
4222 u32 phy1, phy2;
4224 bnx2_write_phy(bp, 0x1c, 0x7c00);
4225 bnx2_read_phy(bp, 0x1c, &phy1);
4227 bnx2_write_phy(bp, 0x17, 0x0f01);
4228 bnx2_read_phy(bp, 0x15, &phy2);
4229 bnx2_write_phy(bp, 0x17, 0x0f01);
4230 bnx2_read_phy(bp, 0x15, &phy2);
4232 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4233 !(phy2 & 0x20)) { /* no CONFIG */
4235 bmcr &= ~BMCR_ANENABLE;
4236 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4237 bnx2_write_phy(bp, MII_BMCR, bmcr);
4238 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4242 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4243 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4244 u32 phy2;
4246 bnx2_write_phy(bp, 0x17, 0x0f01);
4247 bnx2_read_phy(bp, 0x15, &phy2);
4248 if (phy2 & 0x20) {
4249 u32 bmcr;
4251 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4252 bmcr |= BMCR_ANENABLE;
4253 bnx2_write_phy(bp, MII_BMCR, bmcr);
4255 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4257 } else
4258 bp->current_interval = bp->timer_interval;
4260 spin_unlock(&bp->phy_lock);
4263 static void
4264 bnx2_5708_serdes_timer(struct bnx2 *bp)
4266 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4267 bp->serdes_an_pending = 0;
4268 return;
4271 spin_lock(&bp->phy_lock);
4272 if (bp->serdes_an_pending)
4273 bp->serdes_an_pending--;
4274 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4275 u32 bmcr;
4277 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4279 if (bmcr & BMCR_ANENABLE) {
4280 bmcr &= ~BMCR_ANENABLE;
4281 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4282 bnx2_write_phy(bp, MII_BMCR, bmcr);
4283 bp->current_interval = SERDES_FORCED_TIMEOUT;
4284 } else {
4285 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4286 bmcr |= BMCR_ANENABLE;
4287 bnx2_write_phy(bp, MII_BMCR, bmcr);
4288 bp->serdes_an_pending = 2;
4289 bp->current_interval = bp->timer_interval;
4292 } else
4293 bp->current_interval = bp->timer_interval;
4295 spin_unlock(&bp->phy_lock);
4298 static void
4299 bnx2_timer(unsigned long data)
4301 struct bnx2 *bp = (struct bnx2 *) data;
4302 u32 msg;
4304 if (!netif_running(bp->dev))
4305 return;
4307 if (atomic_read(&bp->intr_sem) != 0)
4308 goto bnx2_restart_timer;
4310 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4311 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4313 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4315 if (bp->phy_flags & PHY_SERDES_FLAG) {
4316 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4317 bnx2_5706_serdes_timer(bp);
4318 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4319 bnx2_5708_serdes_timer(bp);
4322 bnx2_restart_timer:
4323 mod_timer(&bp->timer, jiffies + bp->current_interval);
4326 /* Called with rtnl_lock */
4327 static int
4328 bnx2_open(struct net_device *dev)
4330 struct bnx2 *bp = netdev_priv(dev);
4331 int rc;
4333 bnx2_set_power_state(bp, PCI_D0);
4334 bnx2_disable_int(bp);
4336 rc = bnx2_alloc_mem(bp);
4337 if (rc)
4338 return rc;
4340 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4341 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4342 !disable_msi) {
4344 if (pci_enable_msi(bp->pdev) == 0) {
4345 bp->flags |= USING_MSI_FLAG;
4346 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4347 dev);
4349 else {
4350 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4351 IRQF_SHARED, dev->name, dev);
4354 else {
4355 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4356 dev->name, dev);
4358 if (rc) {
4359 bnx2_free_mem(bp);
4360 return rc;
4363 rc = bnx2_init_nic(bp);
4365 if (rc) {
4366 free_irq(bp->pdev->irq, dev);
4367 if (bp->flags & USING_MSI_FLAG) {
4368 pci_disable_msi(bp->pdev);
4369 bp->flags &= ~USING_MSI_FLAG;
4371 bnx2_free_skbs(bp);
4372 bnx2_free_mem(bp);
4373 return rc;
4376 mod_timer(&bp->timer, jiffies + bp->current_interval);
4378 atomic_set(&bp->intr_sem, 0);
4380 bnx2_enable_int(bp);
4382 if (bp->flags & USING_MSI_FLAG) {
4383 /* Test MSI to make sure it is working
4384 * If MSI test fails, go back to INTx mode
4386 if (bnx2_test_intr(bp) != 0) {
4387 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4388 " using MSI, switching to INTx mode. Please"
4389 " report this failure to the PCI maintainer"
4390 " and include system chipset information.\n",
4391 bp->dev->name);
4393 bnx2_disable_int(bp);
4394 free_irq(bp->pdev->irq, dev);
4395 pci_disable_msi(bp->pdev);
4396 bp->flags &= ~USING_MSI_FLAG;
4398 rc = bnx2_init_nic(bp);
4400 if (!rc) {
4401 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4402 IRQF_SHARED, dev->name, dev);
4404 if (rc) {
4405 bnx2_free_skbs(bp);
4406 bnx2_free_mem(bp);
4407 del_timer_sync(&bp->timer);
4408 return rc;
4410 bnx2_enable_int(bp);
4413 if (bp->flags & USING_MSI_FLAG) {
4414 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4417 netif_start_queue(dev);
4419 return 0;
4422 static void
4423 bnx2_reset_task(struct work_struct *work)
4425 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
4427 if (!netif_running(bp->dev))
4428 return;
4430 bp->in_reset_task = 1;
4431 bnx2_netif_stop(bp);
4433 bnx2_init_nic(bp);
4435 atomic_set(&bp->intr_sem, 1);
4436 bnx2_netif_start(bp);
4437 bp->in_reset_task = 0;
4440 static void
4441 bnx2_tx_timeout(struct net_device *dev)
4443 struct bnx2 *bp = netdev_priv(dev);
4445 /* This allows the netif to be shutdown gracefully before resetting */
4446 schedule_work(&bp->reset_task);
4449 #ifdef BCM_VLAN
4450 /* Called with rtnl_lock */
4451 static void
4452 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4454 struct bnx2 *bp = netdev_priv(dev);
4456 bnx2_netif_stop(bp);
4458 bp->vlgrp = vlgrp;
4459 bnx2_set_rx_mode(dev);
4461 bnx2_netif_start(bp);
4464 /* Called with rtnl_lock */
4465 static void
4466 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4468 struct bnx2 *bp = netdev_priv(dev);
4470 bnx2_netif_stop(bp);
4472 if (bp->vlgrp)
4473 bp->vlgrp->vlan_devices[vid] = NULL;
4474 bnx2_set_rx_mode(dev);
4476 bnx2_netif_start(bp);
4478 #endif
4480 /* Called with netif_tx_lock.
4481 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4482 * netif_wake_queue().
4484 static int
4485 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4487 struct bnx2 *bp = netdev_priv(dev);
4488 dma_addr_t mapping;
4489 struct tx_bd *txbd;
4490 struct sw_bd *tx_buf;
4491 u32 len, vlan_tag_flags, last_frag, mss;
4492 u16 prod, ring_prod;
4493 int i;
4495 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4496 netif_stop_queue(dev);
4497 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4498 dev->name);
4500 return NETDEV_TX_BUSY;
4502 len = skb_headlen(skb);
4503 prod = bp->tx_prod;
4504 ring_prod = TX_RING_IDX(prod);
4506 vlan_tag_flags = 0;
4507 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4508 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4511 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4512 vlan_tag_flags |=
4513 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4515 if ((mss = skb_shinfo(skb)->gso_size) &&
4516 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4517 u32 tcp_opt_len, ip_tcp_len;
4519 if (skb_header_cloned(skb) &&
4520 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4521 dev_kfree_skb(skb);
4522 return NETDEV_TX_OK;
4525 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4526 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4528 tcp_opt_len = 0;
4529 if (skb->h.th->doff > 5) {
4530 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4532 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4534 skb->nh.iph->check = 0;
4535 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4536 skb->h.th->check =
4537 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4538 skb->nh.iph->daddr,
4539 0, IPPROTO_TCP, 0);
4541 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4542 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4543 (tcp_opt_len >> 2)) << 8;
4546 else
4548 mss = 0;
4551 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4553 tx_buf = &bp->tx_buf_ring[ring_prod];
4554 tx_buf->skb = skb;
4555 pci_unmap_addr_set(tx_buf, mapping, mapping);
4557 txbd = &bp->tx_desc_ring[ring_prod];
4559 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4560 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4561 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4562 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4564 last_frag = skb_shinfo(skb)->nr_frags;
4566 for (i = 0; i < last_frag; i++) {
4567 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4569 prod = NEXT_TX_BD(prod);
4570 ring_prod = TX_RING_IDX(prod);
4571 txbd = &bp->tx_desc_ring[ring_prod];
4573 len = frag->size;
4574 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4575 len, PCI_DMA_TODEVICE);
4576 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4577 mapping, mapping);
4579 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4580 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4581 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4582 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4585 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4587 prod = NEXT_TX_BD(prod);
4588 bp->tx_prod_bseq += skb->len;
4590 REG_WR16(bp, bp->tx_bidx_addr, prod);
4591 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4593 mmiowb();
4595 bp->tx_prod = prod;
4596 dev->trans_start = jiffies;
4598 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4599 netif_stop_queue(dev);
4600 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4601 netif_wake_queue(dev);
4604 return NETDEV_TX_OK;
4607 /* Called with rtnl_lock */
4608 static int
4609 bnx2_close(struct net_device *dev)
4611 struct bnx2 *bp = netdev_priv(dev);
4612 u32 reset_code;
4614 /* Calling flush_scheduled_work() may deadlock because
4615 * linkwatch_event() may be on the workqueue and it will try to get
4616 * the rtnl_lock which we are holding.
4618 while (bp->in_reset_task)
4619 msleep(1);
4621 bnx2_netif_stop(bp);
4622 del_timer_sync(&bp->timer);
4623 if (bp->flags & NO_WOL_FLAG)
4624 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4625 else if (bp->wol)
4626 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4627 else
4628 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4629 bnx2_reset_chip(bp, reset_code);
4630 free_irq(bp->pdev->irq, dev);
4631 if (bp->flags & USING_MSI_FLAG) {
4632 pci_disable_msi(bp->pdev);
4633 bp->flags &= ~USING_MSI_FLAG;
4635 bnx2_free_skbs(bp);
4636 bnx2_free_mem(bp);
4637 bp->link_up = 0;
4638 netif_carrier_off(bp->dev);
4639 bnx2_set_power_state(bp, PCI_D3hot);
4640 return 0;
4643 #define GET_NET_STATS64(ctr) \
4644 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4645 (unsigned long) (ctr##_lo)
4647 #define GET_NET_STATS32(ctr) \
4648 (ctr##_lo)
4650 #if (BITS_PER_LONG == 64)
4651 #define GET_NET_STATS GET_NET_STATS64
4652 #else
4653 #define GET_NET_STATS GET_NET_STATS32
4654 #endif
4656 static struct net_device_stats *
4657 bnx2_get_stats(struct net_device *dev)
4659 struct bnx2 *bp = netdev_priv(dev);
4660 struct statistics_block *stats_blk = bp->stats_blk;
4661 struct net_device_stats *net_stats = &bp->net_stats;
4663 if (bp->stats_blk == NULL) {
4664 return net_stats;
4666 net_stats->rx_packets =
4667 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4668 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4669 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4671 net_stats->tx_packets =
4672 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4673 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4674 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4676 net_stats->rx_bytes =
4677 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4679 net_stats->tx_bytes =
4680 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4682 net_stats->multicast =
4683 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4685 net_stats->collisions =
4686 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4688 net_stats->rx_length_errors =
4689 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4690 stats_blk->stat_EtherStatsOverrsizePkts);
4692 net_stats->rx_over_errors =
4693 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4695 net_stats->rx_frame_errors =
4696 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4698 net_stats->rx_crc_errors =
4699 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4701 net_stats->rx_errors = net_stats->rx_length_errors +
4702 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4703 net_stats->rx_crc_errors;
4705 net_stats->tx_aborted_errors =
4706 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4707 stats_blk->stat_Dot3StatsLateCollisions);
4709 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4710 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4711 net_stats->tx_carrier_errors = 0;
4712 else {
4713 net_stats->tx_carrier_errors =
4714 (unsigned long)
4715 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4718 net_stats->tx_errors =
4719 (unsigned long)
4720 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4722 net_stats->tx_aborted_errors +
4723 net_stats->tx_carrier_errors;
4725 net_stats->rx_missed_errors =
4726 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4727 stats_blk->stat_FwRxDrop);
4729 return net_stats;
4732 /* All ethtool functions called with rtnl_lock */
4734 static int
4735 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4737 struct bnx2 *bp = netdev_priv(dev);
4739 cmd->supported = SUPPORTED_Autoneg;
4740 if (bp->phy_flags & PHY_SERDES_FLAG) {
4741 cmd->supported |= SUPPORTED_1000baseT_Full |
4742 SUPPORTED_FIBRE;
4744 cmd->port = PORT_FIBRE;
4746 else {
4747 cmd->supported |= SUPPORTED_10baseT_Half |
4748 SUPPORTED_10baseT_Full |
4749 SUPPORTED_100baseT_Half |
4750 SUPPORTED_100baseT_Full |
4751 SUPPORTED_1000baseT_Full |
4752 SUPPORTED_TP;
4754 cmd->port = PORT_TP;
4757 cmd->advertising = bp->advertising;
4759 if (bp->autoneg & AUTONEG_SPEED) {
4760 cmd->autoneg = AUTONEG_ENABLE;
4762 else {
4763 cmd->autoneg = AUTONEG_DISABLE;
4766 if (netif_carrier_ok(dev)) {
4767 cmd->speed = bp->line_speed;
4768 cmd->duplex = bp->duplex;
4770 else {
4771 cmd->speed = -1;
4772 cmd->duplex = -1;
4775 cmd->transceiver = XCVR_INTERNAL;
4776 cmd->phy_address = bp->phy_addr;
4778 return 0;
4781 static int
4782 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4784 struct bnx2 *bp = netdev_priv(dev);
4785 u8 autoneg = bp->autoneg;
4786 u8 req_duplex = bp->req_duplex;
4787 u16 req_line_speed = bp->req_line_speed;
4788 u32 advertising = bp->advertising;
4790 if (cmd->autoneg == AUTONEG_ENABLE) {
4791 autoneg |= AUTONEG_SPEED;
4793 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4795 /* allow advertising 1 speed */
4796 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4797 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4798 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4799 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4801 if (bp->phy_flags & PHY_SERDES_FLAG)
4802 return -EINVAL;
4804 advertising = cmd->advertising;
4807 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4808 advertising = cmd->advertising;
4810 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4811 return -EINVAL;
4813 else {
4814 if (bp->phy_flags & PHY_SERDES_FLAG) {
4815 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4817 else {
4818 advertising = ETHTOOL_ALL_COPPER_SPEED;
4821 advertising |= ADVERTISED_Autoneg;
4823 else {
4824 if (bp->phy_flags & PHY_SERDES_FLAG) {
4825 if ((cmd->speed != SPEED_1000 &&
4826 cmd->speed != SPEED_2500) ||
4827 (cmd->duplex != DUPLEX_FULL))
4828 return -EINVAL;
4830 if (cmd->speed == SPEED_2500 &&
4831 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4832 return -EINVAL;
4834 else if (cmd->speed == SPEED_1000) {
4835 return -EINVAL;
4837 autoneg &= ~AUTONEG_SPEED;
4838 req_line_speed = cmd->speed;
4839 req_duplex = cmd->duplex;
4840 advertising = 0;
4843 bp->autoneg = autoneg;
4844 bp->advertising = advertising;
4845 bp->req_line_speed = req_line_speed;
4846 bp->req_duplex = req_duplex;
4848 spin_lock_bh(&bp->phy_lock);
4850 bnx2_setup_phy(bp);
4852 spin_unlock_bh(&bp->phy_lock);
4854 return 0;
4857 static void
4858 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4860 struct bnx2 *bp = netdev_priv(dev);
4862 strcpy(info->driver, DRV_MODULE_NAME);
4863 strcpy(info->version, DRV_MODULE_VERSION);
4864 strcpy(info->bus_info, pci_name(bp->pdev));
4865 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4866 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4867 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4868 info->fw_version[1] = info->fw_version[3] = '.';
4869 info->fw_version[5] = 0;
4872 #define BNX2_REGDUMP_LEN (32 * 1024)
4874 static int
4875 bnx2_get_regs_len(struct net_device *dev)
4877 return BNX2_REGDUMP_LEN;
4880 static void
4881 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4883 u32 *p = _p, i, offset;
4884 u8 *orig_p = _p;
4885 struct bnx2 *bp = netdev_priv(dev);
4886 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4887 0x0800, 0x0880, 0x0c00, 0x0c10,
4888 0x0c30, 0x0d08, 0x1000, 0x101c,
4889 0x1040, 0x1048, 0x1080, 0x10a4,
4890 0x1400, 0x1490, 0x1498, 0x14f0,
4891 0x1500, 0x155c, 0x1580, 0x15dc,
4892 0x1600, 0x1658, 0x1680, 0x16d8,
4893 0x1800, 0x1820, 0x1840, 0x1854,
4894 0x1880, 0x1894, 0x1900, 0x1984,
4895 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4896 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4897 0x2000, 0x2030, 0x23c0, 0x2400,
4898 0x2800, 0x2820, 0x2830, 0x2850,
4899 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4900 0x3c00, 0x3c94, 0x4000, 0x4010,
4901 0x4080, 0x4090, 0x43c0, 0x4458,
4902 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4903 0x4fc0, 0x5010, 0x53c0, 0x5444,
4904 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4905 0x5fc0, 0x6000, 0x6400, 0x6428,
4906 0x6800, 0x6848, 0x684c, 0x6860,
4907 0x6888, 0x6910, 0x8000 };
4909 regs->version = 0;
4911 memset(p, 0, BNX2_REGDUMP_LEN);
4913 if (!netif_running(bp->dev))
4914 return;
4916 i = 0;
4917 offset = reg_boundaries[0];
4918 p += offset;
4919 while (offset < BNX2_REGDUMP_LEN) {
4920 *p++ = REG_RD(bp, offset);
4921 offset += 4;
4922 if (offset == reg_boundaries[i + 1]) {
4923 offset = reg_boundaries[i + 2];
4924 p = (u32 *) (orig_p + offset);
4925 i += 2;
4930 static void
4931 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4933 struct bnx2 *bp = netdev_priv(dev);
4935 if (bp->flags & NO_WOL_FLAG) {
4936 wol->supported = 0;
4937 wol->wolopts = 0;
4939 else {
4940 wol->supported = WAKE_MAGIC;
4941 if (bp->wol)
4942 wol->wolopts = WAKE_MAGIC;
4943 else
4944 wol->wolopts = 0;
4946 memset(&wol->sopass, 0, sizeof(wol->sopass));
4949 static int
4950 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4952 struct bnx2 *bp = netdev_priv(dev);
4954 if (wol->wolopts & ~WAKE_MAGIC)
4955 return -EINVAL;
4957 if (wol->wolopts & WAKE_MAGIC) {
4958 if (bp->flags & NO_WOL_FLAG)
4959 return -EINVAL;
4961 bp->wol = 1;
4963 else {
4964 bp->wol = 0;
4966 return 0;
4969 static int
4970 bnx2_nway_reset(struct net_device *dev)
4972 struct bnx2 *bp = netdev_priv(dev);
4973 u32 bmcr;
4975 if (!(bp->autoneg & AUTONEG_SPEED)) {
4976 return -EINVAL;
4979 spin_lock_bh(&bp->phy_lock);
4981 /* Force a link down visible on the other side */
4982 if (bp->phy_flags & PHY_SERDES_FLAG) {
4983 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4984 spin_unlock_bh(&bp->phy_lock);
4986 msleep(20);
4988 spin_lock_bh(&bp->phy_lock);
4990 bp->current_interval = SERDES_AN_TIMEOUT;
4991 bp->serdes_an_pending = 1;
4992 mod_timer(&bp->timer, jiffies + bp->current_interval);
4995 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4996 bmcr &= ~BMCR_LOOPBACK;
4997 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4999 spin_unlock_bh(&bp->phy_lock);
5001 return 0;
5004 static int
5005 bnx2_get_eeprom_len(struct net_device *dev)
5007 struct bnx2 *bp = netdev_priv(dev);
5009 if (bp->flash_info == NULL)
5010 return 0;
5012 return (int) bp->flash_size;
5015 static int
5016 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5017 u8 *eebuf)
5019 struct bnx2 *bp = netdev_priv(dev);
5020 int rc;
5022 /* parameters already validated in ethtool_get_eeprom */
5024 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5026 return rc;
5029 static int
5030 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5031 u8 *eebuf)
5033 struct bnx2 *bp = netdev_priv(dev);
5034 int rc;
5036 /* parameters already validated in ethtool_set_eeprom */
5038 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5040 return rc;
5043 static int
5044 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5046 struct bnx2 *bp = netdev_priv(dev);
5048 memset(coal, 0, sizeof(struct ethtool_coalesce));
5050 coal->rx_coalesce_usecs = bp->rx_ticks;
5051 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5052 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5053 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5055 coal->tx_coalesce_usecs = bp->tx_ticks;
5056 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5057 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5058 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5060 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5062 return 0;
5065 static int
5066 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5068 struct bnx2 *bp = netdev_priv(dev);
5070 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5071 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5073 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5074 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5076 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5077 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5079 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5080 if (bp->rx_quick_cons_trip_int > 0xff)
5081 bp->rx_quick_cons_trip_int = 0xff;
5083 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5084 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5086 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5087 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5089 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5090 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5092 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5093 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5094 0xff;
5096 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5097 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5098 bp->stats_ticks &= 0xffff00;
5100 if (netif_running(bp->dev)) {
5101 bnx2_netif_stop(bp);
5102 bnx2_init_nic(bp);
5103 bnx2_netif_start(bp);
5106 return 0;
5109 static void
5110 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5112 struct bnx2 *bp = netdev_priv(dev);
5114 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5115 ering->rx_mini_max_pending = 0;
5116 ering->rx_jumbo_max_pending = 0;
5118 ering->rx_pending = bp->rx_ring_size;
5119 ering->rx_mini_pending = 0;
5120 ering->rx_jumbo_pending = 0;
5122 ering->tx_max_pending = MAX_TX_DESC_CNT;
5123 ering->tx_pending = bp->tx_ring_size;
5126 static int
5127 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5129 struct bnx2 *bp = netdev_priv(dev);
5131 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5132 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5133 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5135 return -EINVAL;
5137 if (netif_running(bp->dev)) {
5138 bnx2_netif_stop(bp);
5139 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5140 bnx2_free_skbs(bp);
5141 bnx2_free_mem(bp);
5144 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5145 bp->tx_ring_size = ering->tx_pending;
5147 if (netif_running(bp->dev)) {
5148 int rc;
5150 rc = bnx2_alloc_mem(bp);
5151 if (rc)
5152 return rc;
5153 bnx2_init_nic(bp);
5154 bnx2_netif_start(bp);
5157 return 0;
5160 static void
5161 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5163 struct bnx2 *bp = netdev_priv(dev);
5165 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5166 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5167 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5170 static int
5171 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5173 struct bnx2 *bp = netdev_priv(dev);
5175 bp->req_flow_ctrl = 0;
5176 if (epause->rx_pause)
5177 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5178 if (epause->tx_pause)
5179 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5181 if (epause->autoneg) {
5182 bp->autoneg |= AUTONEG_FLOW_CTRL;
5184 else {
5185 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5188 spin_lock_bh(&bp->phy_lock);
5190 bnx2_setup_phy(bp);
5192 spin_unlock_bh(&bp->phy_lock);
5194 return 0;
5197 static u32
5198 bnx2_get_rx_csum(struct net_device *dev)
5200 struct bnx2 *bp = netdev_priv(dev);
5202 return bp->rx_csum;
5205 static int
5206 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5208 struct bnx2 *bp = netdev_priv(dev);
5210 bp->rx_csum = data;
5211 return 0;
5214 static int
5215 bnx2_set_tso(struct net_device *dev, u32 data)
5217 if (data)
5218 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5219 else
5220 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5221 return 0;
5224 #define BNX2_NUM_STATS 46
5226 static struct {
5227 char string[ETH_GSTRING_LEN];
5228 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5229 { "rx_bytes" },
5230 { "rx_error_bytes" },
5231 { "tx_bytes" },
5232 { "tx_error_bytes" },
5233 { "rx_ucast_packets" },
5234 { "rx_mcast_packets" },
5235 { "rx_bcast_packets" },
5236 { "tx_ucast_packets" },
5237 { "tx_mcast_packets" },
5238 { "tx_bcast_packets" },
5239 { "tx_mac_errors" },
5240 { "tx_carrier_errors" },
5241 { "rx_crc_errors" },
5242 { "rx_align_errors" },
5243 { "tx_single_collisions" },
5244 { "tx_multi_collisions" },
5245 { "tx_deferred" },
5246 { "tx_excess_collisions" },
5247 { "tx_late_collisions" },
5248 { "tx_total_collisions" },
5249 { "rx_fragments" },
5250 { "rx_jabbers" },
5251 { "rx_undersize_packets" },
5252 { "rx_oversize_packets" },
5253 { "rx_64_byte_packets" },
5254 { "rx_65_to_127_byte_packets" },
5255 { "rx_128_to_255_byte_packets" },
5256 { "rx_256_to_511_byte_packets" },
5257 { "rx_512_to_1023_byte_packets" },
5258 { "rx_1024_to_1522_byte_packets" },
5259 { "rx_1523_to_9022_byte_packets" },
5260 { "tx_64_byte_packets" },
5261 { "tx_65_to_127_byte_packets" },
5262 { "tx_128_to_255_byte_packets" },
5263 { "tx_256_to_511_byte_packets" },
5264 { "tx_512_to_1023_byte_packets" },
5265 { "tx_1024_to_1522_byte_packets" },
5266 { "tx_1523_to_9022_byte_packets" },
5267 { "rx_xon_frames" },
5268 { "rx_xoff_frames" },
5269 { "tx_xon_frames" },
5270 { "tx_xoff_frames" },
5271 { "rx_mac_ctrl_frames" },
5272 { "rx_filtered_packets" },
5273 { "rx_discards" },
5274 { "rx_fw_discards" },
5277 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5279 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5280 STATS_OFFSET32(stat_IfHCInOctets_hi),
5281 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5282 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5283 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5284 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5285 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5286 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5287 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5288 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5289 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5290 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5291 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5292 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5293 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5294 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5295 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5296 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5297 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5298 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5299 STATS_OFFSET32(stat_EtherStatsCollisions),
5300 STATS_OFFSET32(stat_EtherStatsFragments),
5301 STATS_OFFSET32(stat_EtherStatsJabbers),
5302 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5303 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5304 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5305 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5306 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5307 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5308 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5309 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5310 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5311 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5312 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5313 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5314 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5315 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5316 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5317 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5318 STATS_OFFSET32(stat_XonPauseFramesReceived),
5319 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5320 STATS_OFFSET32(stat_OutXonSent),
5321 STATS_OFFSET32(stat_OutXoffSent),
5322 STATS_OFFSET32(stat_MacControlFramesReceived),
5323 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5324 STATS_OFFSET32(stat_IfInMBUFDiscards),
5325 STATS_OFFSET32(stat_FwRxDrop),
5328 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5329 * skipped because of errata.
5331 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5332 8,0,8,8,8,8,8,8,8,8,
5333 4,0,4,4,4,4,4,4,4,4,
5334 4,4,4,4,4,4,4,4,4,4,
5335 4,4,4,4,4,4,4,4,4,4,
5336 4,4,4,4,4,4,
5339 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5340 8,0,8,8,8,8,8,8,8,8,
5341 4,4,4,4,4,4,4,4,4,4,
5342 4,4,4,4,4,4,4,4,4,4,
5343 4,4,4,4,4,4,4,4,4,4,
5344 4,4,4,4,4,4,
5347 #define BNX2_NUM_TESTS 6
5349 static struct {
5350 char string[ETH_GSTRING_LEN];
5351 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5352 { "register_test (offline)" },
5353 { "memory_test (offline)" },
5354 { "loopback_test (offline)" },
5355 { "nvram_test (online)" },
5356 { "interrupt_test (online)" },
5357 { "link_test (online)" },
5360 static int
5361 bnx2_self_test_count(struct net_device *dev)
5363 return BNX2_NUM_TESTS;
5366 static void
5367 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5369 struct bnx2 *bp = netdev_priv(dev);
5371 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5372 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5373 int i;
5375 bnx2_netif_stop(bp);
5376 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5377 bnx2_free_skbs(bp);
5379 if (bnx2_test_registers(bp) != 0) {
5380 buf[0] = 1;
5381 etest->flags |= ETH_TEST_FL_FAILED;
5383 if (bnx2_test_memory(bp) != 0) {
5384 buf[1] = 1;
5385 etest->flags |= ETH_TEST_FL_FAILED;
5387 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5388 etest->flags |= ETH_TEST_FL_FAILED;
5390 if (!netif_running(bp->dev)) {
5391 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5393 else {
5394 bnx2_init_nic(bp);
5395 bnx2_netif_start(bp);
5398 /* wait for link up */
5399 for (i = 0; i < 7; i++) {
5400 if (bp->link_up)
5401 break;
5402 msleep_interruptible(1000);
5406 if (bnx2_test_nvram(bp) != 0) {
5407 buf[3] = 1;
5408 etest->flags |= ETH_TEST_FL_FAILED;
5410 if (bnx2_test_intr(bp) != 0) {
5411 buf[4] = 1;
5412 etest->flags |= ETH_TEST_FL_FAILED;
5415 if (bnx2_test_link(bp) != 0) {
5416 buf[5] = 1;
5417 etest->flags |= ETH_TEST_FL_FAILED;
5422 static void
5423 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5425 switch (stringset) {
5426 case ETH_SS_STATS:
5427 memcpy(buf, bnx2_stats_str_arr,
5428 sizeof(bnx2_stats_str_arr));
5429 break;
5430 case ETH_SS_TEST:
5431 memcpy(buf, bnx2_tests_str_arr,
5432 sizeof(bnx2_tests_str_arr));
5433 break;
5437 static int
5438 bnx2_get_stats_count(struct net_device *dev)
5440 return BNX2_NUM_STATS;
5443 static void
5444 bnx2_get_ethtool_stats(struct net_device *dev,
5445 struct ethtool_stats *stats, u64 *buf)
5447 struct bnx2 *bp = netdev_priv(dev);
5448 int i;
5449 u32 *hw_stats = (u32 *) bp->stats_blk;
5450 u8 *stats_len_arr = NULL;
5452 if (hw_stats == NULL) {
5453 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5454 return;
5457 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5458 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5459 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5460 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5461 stats_len_arr = bnx2_5706_stats_len_arr;
5462 else
5463 stats_len_arr = bnx2_5708_stats_len_arr;
5465 for (i = 0; i < BNX2_NUM_STATS; i++) {
5466 if (stats_len_arr[i] == 0) {
5467 /* skip this counter */
5468 buf[i] = 0;
5469 continue;
5471 if (stats_len_arr[i] == 4) {
5472 /* 4-byte counter */
5473 buf[i] = (u64)
5474 *(hw_stats + bnx2_stats_offset_arr[i]);
5475 continue;
5477 /* 8-byte counter */
5478 buf[i] = (((u64) *(hw_stats +
5479 bnx2_stats_offset_arr[i])) << 32) +
5480 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5484 static int
5485 bnx2_phys_id(struct net_device *dev, u32 data)
5487 struct bnx2 *bp = netdev_priv(dev);
5488 int i;
5489 u32 save;
5491 if (data == 0)
5492 data = 2;
5494 save = REG_RD(bp, BNX2_MISC_CFG);
5495 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5497 for (i = 0; i < (data * 2); i++) {
5498 if ((i % 2) == 0) {
5499 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5501 else {
5502 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5503 BNX2_EMAC_LED_1000MB_OVERRIDE |
5504 BNX2_EMAC_LED_100MB_OVERRIDE |
5505 BNX2_EMAC_LED_10MB_OVERRIDE |
5506 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5507 BNX2_EMAC_LED_TRAFFIC);
5509 msleep_interruptible(500);
5510 if (signal_pending(current))
5511 break;
5513 REG_WR(bp, BNX2_EMAC_LED, 0);
5514 REG_WR(bp, BNX2_MISC_CFG, save);
5515 return 0;
5518 static const struct ethtool_ops bnx2_ethtool_ops = {
5519 .get_settings = bnx2_get_settings,
5520 .set_settings = bnx2_set_settings,
5521 .get_drvinfo = bnx2_get_drvinfo,
5522 .get_regs_len = bnx2_get_regs_len,
5523 .get_regs = bnx2_get_regs,
5524 .get_wol = bnx2_get_wol,
5525 .set_wol = bnx2_set_wol,
5526 .nway_reset = bnx2_nway_reset,
5527 .get_link = ethtool_op_get_link,
5528 .get_eeprom_len = bnx2_get_eeprom_len,
5529 .get_eeprom = bnx2_get_eeprom,
5530 .set_eeprom = bnx2_set_eeprom,
5531 .get_coalesce = bnx2_get_coalesce,
5532 .set_coalesce = bnx2_set_coalesce,
5533 .get_ringparam = bnx2_get_ringparam,
5534 .set_ringparam = bnx2_set_ringparam,
5535 .get_pauseparam = bnx2_get_pauseparam,
5536 .set_pauseparam = bnx2_set_pauseparam,
5537 .get_rx_csum = bnx2_get_rx_csum,
5538 .set_rx_csum = bnx2_set_rx_csum,
5539 .get_tx_csum = ethtool_op_get_tx_csum,
5540 .set_tx_csum = ethtool_op_set_tx_csum,
5541 .get_sg = ethtool_op_get_sg,
5542 .set_sg = ethtool_op_set_sg,
5543 .get_tso = ethtool_op_get_tso,
5544 .set_tso = bnx2_set_tso,
5545 .self_test_count = bnx2_self_test_count,
5546 .self_test = bnx2_self_test,
5547 .get_strings = bnx2_get_strings,
5548 .phys_id = bnx2_phys_id,
5549 .get_stats_count = bnx2_get_stats_count,
5550 .get_ethtool_stats = bnx2_get_ethtool_stats,
5551 .get_perm_addr = ethtool_op_get_perm_addr,
5554 /* Called with rtnl_lock */
5555 static int
5556 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5558 struct mii_ioctl_data *data = if_mii(ifr);
5559 struct bnx2 *bp = netdev_priv(dev);
5560 int err;
5562 switch(cmd) {
5563 case SIOCGMIIPHY:
5564 data->phy_id = bp->phy_addr;
5566 /* fallthru */
5567 case SIOCGMIIREG: {
5568 u32 mii_regval;
5570 spin_lock_bh(&bp->phy_lock);
5571 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5572 spin_unlock_bh(&bp->phy_lock);
5574 data->val_out = mii_regval;
5576 return err;
5579 case SIOCSMIIREG:
5580 if (!capable(CAP_NET_ADMIN))
5581 return -EPERM;
5583 spin_lock_bh(&bp->phy_lock);
5584 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5585 spin_unlock_bh(&bp->phy_lock);
5587 return err;
5589 default:
5590 /* do nothing */
5591 break;
5593 return -EOPNOTSUPP;
5596 /* Called with rtnl_lock */
5597 static int
5598 bnx2_change_mac_addr(struct net_device *dev, void *p)
5600 struct sockaddr *addr = p;
5601 struct bnx2 *bp = netdev_priv(dev);
5603 if (!is_valid_ether_addr(addr->sa_data))
5604 return -EINVAL;
5606 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5607 if (netif_running(dev))
5608 bnx2_set_mac_addr(bp);
5610 return 0;
5613 /* Called with rtnl_lock */
5614 static int
5615 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5617 struct bnx2 *bp = netdev_priv(dev);
5619 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5620 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5621 return -EINVAL;
5623 dev->mtu = new_mtu;
5624 if (netif_running(dev)) {
5625 bnx2_netif_stop(bp);
5627 bnx2_init_nic(bp);
5629 bnx2_netif_start(bp);
5631 return 0;
5634 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5635 static void
5636 poll_bnx2(struct net_device *dev)
5638 struct bnx2 *bp = netdev_priv(dev);
5640 disable_irq(bp->pdev->irq);
5641 bnx2_interrupt(bp->pdev->irq, dev);
5642 enable_irq(bp->pdev->irq);
5644 #endif
5646 static void __devinit
5647 bnx2_get_5709_media(struct bnx2 *bp)
5649 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5650 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5651 u32 strap;
5653 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5654 return;
5655 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
5656 bp->phy_flags |= PHY_SERDES_FLAG;
5657 return;
5660 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
5661 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
5662 else
5663 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
5665 if (PCI_FUNC(bp->pdev->devfn) == 0) {
5666 switch (strap) {
5667 case 0x4:
5668 case 0x5:
5669 case 0x6:
5670 bp->phy_flags |= PHY_SERDES_FLAG;
5671 return;
5673 } else {
5674 switch (strap) {
5675 case 0x1:
5676 case 0x2:
5677 case 0x4:
5678 bp->phy_flags |= PHY_SERDES_FLAG;
5679 return;
5684 static int __devinit
5685 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5687 struct bnx2 *bp;
5688 unsigned long mem_len;
5689 int rc;
5690 u32 reg;
5692 SET_MODULE_OWNER(dev);
5693 SET_NETDEV_DEV(dev, &pdev->dev);
5694 bp = netdev_priv(dev);
5696 bp->flags = 0;
5697 bp->phy_flags = 0;
5699 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5700 rc = pci_enable_device(pdev);
5701 if (rc) {
5702 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
5703 goto err_out;
5706 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5707 dev_err(&pdev->dev,
5708 "Cannot find PCI device base address, aborting.\n");
5709 rc = -ENODEV;
5710 goto err_out_disable;
5713 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5714 if (rc) {
5715 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5716 goto err_out_disable;
5719 pci_set_master(pdev);
5721 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5722 if (bp->pm_cap == 0) {
5723 dev_err(&pdev->dev,
5724 "Cannot find power management capability, aborting.\n");
5725 rc = -EIO;
5726 goto err_out_release;
5729 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5730 bp->flags |= USING_DAC_FLAG;
5731 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5732 dev_err(&pdev->dev,
5733 "pci_set_consistent_dma_mask failed, aborting.\n");
5734 rc = -EIO;
5735 goto err_out_release;
5738 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5739 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5740 rc = -EIO;
5741 goto err_out_release;
5744 bp->dev = dev;
5745 bp->pdev = pdev;
5747 spin_lock_init(&bp->phy_lock);
5748 INIT_WORK(&bp->reset_task, bnx2_reset_task);
5750 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5751 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
5752 dev->mem_end = dev->mem_start + mem_len;
5753 dev->irq = pdev->irq;
5755 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5757 if (!bp->regview) {
5758 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
5759 rc = -ENOMEM;
5760 goto err_out_release;
5763 /* Configure byte swap and enable write to the reg_window registers.
5764 * Rely on CPU to do target byte swapping on big endian systems
5765 * The chip's target access swapping will not swap all accesses
5767 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5768 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5769 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5771 bnx2_set_power_state(bp, PCI_D0);
5773 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5775 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
5776 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5777 if (bp->pcix_cap == 0) {
5778 dev_err(&pdev->dev,
5779 "Cannot find PCIX capability, aborting.\n");
5780 rc = -EIO;
5781 goto err_out_unmap;
5785 /* Get bus information. */
5786 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5787 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5788 u32 clkreg;
5790 bp->flags |= PCIX_FLAG;
5792 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5794 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5795 switch (clkreg) {
5796 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5797 bp->bus_speed_mhz = 133;
5798 break;
5800 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5801 bp->bus_speed_mhz = 100;
5802 break;
5804 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5805 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5806 bp->bus_speed_mhz = 66;
5807 break;
5809 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5810 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5811 bp->bus_speed_mhz = 50;
5812 break;
5814 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5815 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5816 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5817 bp->bus_speed_mhz = 33;
5818 break;
5821 else {
5822 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5823 bp->bus_speed_mhz = 66;
5824 else
5825 bp->bus_speed_mhz = 33;
5828 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5829 bp->flags |= PCI_32BIT_FLAG;
5831 /* 5706A0 may falsely detect SERR and PERR. */
5832 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5833 reg = REG_RD(bp, PCI_COMMAND);
5834 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5835 REG_WR(bp, PCI_COMMAND, reg);
5837 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5838 !(bp->flags & PCIX_FLAG)) {
5840 dev_err(&pdev->dev,
5841 "5706 A1 can only be used in a PCIX bus, aborting.\n");
5842 goto err_out_unmap;
5845 bnx2_init_nvram(bp);
5847 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5849 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5850 BNX2_SHM_HDR_SIGNATURE_SIG) {
5851 u32 off = PCI_FUNC(pdev->devfn) << 2;
5853 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
5854 } else
5855 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5857 /* Get the permanent MAC address. First we need to make sure the
5858 * firmware is actually running.
5860 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5862 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5863 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5864 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
5865 rc = -ENODEV;
5866 goto err_out_unmap;
5869 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5871 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5872 bp->mac_addr[0] = (u8) (reg >> 8);
5873 bp->mac_addr[1] = (u8) reg;
5875 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5876 bp->mac_addr[2] = (u8) (reg >> 24);
5877 bp->mac_addr[3] = (u8) (reg >> 16);
5878 bp->mac_addr[4] = (u8) (reg >> 8);
5879 bp->mac_addr[5] = (u8) reg;
5881 bp->tx_ring_size = MAX_TX_DESC_CNT;
5882 bnx2_set_rx_ring_size(bp, 255);
5884 bp->rx_csum = 1;
5886 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5888 bp->tx_quick_cons_trip_int = 20;
5889 bp->tx_quick_cons_trip = 20;
5890 bp->tx_ticks_int = 80;
5891 bp->tx_ticks = 80;
5893 bp->rx_quick_cons_trip_int = 6;
5894 bp->rx_quick_cons_trip = 6;
5895 bp->rx_ticks_int = 18;
5896 bp->rx_ticks = 18;
5898 bp->stats_ticks = 1000000 & 0xffff00;
5900 bp->timer_interval = HZ;
5901 bp->current_interval = HZ;
5903 bp->phy_addr = 1;
5905 /* Disable WOL support if we are running on a SERDES chip. */
5906 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5907 bnx2_get_5709_media(bp);
5908 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
5909 bp->phy_flags |= PHY_SERDES_FLAG;
5911 if (bp->phy_flags & PHY_SERDES_FLAG) {
5912 bp->flags |= NO_WOL_FLAG;
5913 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
5914 bp->phy_addr = 2;
5915 reg = REG_RD_IND(bp, bp->shmem_base +
5916 BNX2_SHARED_HW_CFG_CONFIG);
5917 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5918 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5920 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
5921 CHIP_NUM(bp) == CHIP_NUM_5708)
5922 bp->phy_flags |= PHY_CRC_FIX_FLAG;
5923 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
5924 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
5926 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5927 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5928 (CHIP_ID(bp) == CHIP_ID_5708_B1))
5929 bp->flags |= NO_WOL_FLAG;
5931 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5932 bp->tx_quick_cons_trip_int =
5933 bp->tx_quick_cons_trip;
5934 bp->tx_ticks_int = bp->tx_ticks;
5935 bp->rx_quick_cons_trip_int =
5936 bp->rx_quick_cons_trip;
5937 bp->rx_ticks_int = bp->rx_ticks;
5938 bp->comp_prod_trip_int = bp->comp_prod_trip;
5939 bp->com_ticks_int = bp->com_ticks;
5940 bp->cmd_ticks_int = bp->cmd_ticks;
5943 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5945 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5946 * with byte enables disabled on the unused 32-bit word. This is legal
5947 * but causes problems on the AMD 8132 which will eventually stop
5948 * responding after a while.
5950 * AMD believes this incompatibility is unique to the 5706, and
5951 * prefers to locally disable MSI rather than globally disabling it
5952 * using pci_msi_quirk.
5954 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5955 struct pci_dev *amd_8132 = NULL;
5957 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5958 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5959 amd_8132))) {
5960 u8 rev;
5962 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5963 if (rev >= 0x10 && rev <= 0x13) {
5964 disable_msi = 1;
5965 pci_dev_put(amd_8132);
5966 break;
5971 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5972 bp->req_line_speed = 0;
5973 if (bp->phy_flags & PHY_SERDES_FLAG) {
5974 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5976 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5977 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5978 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5979 bp->autoneg = 0;
5980 bp->req_line_speed = bp->line_speed = SPEED_1000;
5981 bp->req_duplex = DUPLEX_FULL;
5984 else {
5985 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5988 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5990 init_timer(&bp->timer);
5991 bp->timer.expires = RUN_AT(bp->timer_interval);
5992 bp->timer.data = (unsigned long) bp;
5993 bp->timer.function = bnx2_timer;
5995 return 0;
5997 err_out_unmap:
5998 if (bp->regview) {
5999 iounmap(bp->regview);
6000 bp->regview = NULL;
6003 err_out_release:
6004 pci_release_regions(pdev);
6006 err_out_disable:
6007 pci_disable_device(pdev);
6008 pci_set_drvdata(pdev, NULL);
6010 err_out:
6011 return rc;
6014 static int __devinit
6015 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6017 static int version_printed = 0;
6018 struct net_device *dev = NULL;
6019 struct bnx2 *bp;
6020 int rc, i;
6022 if (version_printed++ == 0)
6023 printk(KERN_INFO "%s", version);
6025 /* dev zeroed in init_etherdev */
6026 dev = alloc_etherdev(sizeof(*bp));
6028 if (!dev)
6029 return -ENOMEM;
6031 rc = bnx2_init_board(pdev, dev);
6032 if (rc < 0) {
6033 free_netdev(dev);
6034 return rc;
6037 dev->open = bnx2_open;
6038 dev->hard_start_xmit = bnx2_start_xmit;
6039 dev->stop = bnx2_close;
6040 dev->get_stats = bnx2_get_stats;
6041 dev->set_multicast_list = bnx2_set_rx_mode;
6042 dev->do_ioctl = bnx2_ioctl;
6043 dev->set_mac_address = bnx2_change_mac_addr;
6044 dev->change_mtu = bnx2_change_mtu;
6045 dev->tx_timeout = bnx2_tx_timeout;
6046 dev->watchdog_timeo = TX_TIMEOUT;
6047 #ifdef BCM_VLAN
6048 dev->vlan_rx_register = bnx2_vlan_rx_register;
6049 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6050 #endif
6051 dev->poll = bnx2_poll;
6052 dev->ethtool_ops = &bnx2_ethtool_ops;
6053 dev->weight = 64;
6055 bp = netdev_priv(dev);
6057 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6058 dev->poll_controller = poll_bnx2;
6059 #endif
6061 if ((rc = register_netdev(dev))) {
6062 dev_err(&pdev->dev, "Cannot register net device\n");
6063 if (bp->regview)
6064 iounmap(bp->regview);
6065 pci_release_regions(pdev);
6066 pci_disable_device(pdev);
6067 pci_set_drvdata(pdev, NULL);
6068 free_netdev(dev);
6069 return rc;
6072 pci_set_drvdata(pdev, dev);
6074 memcpy(dev->dev_addr, bp->mac_addr, 6);
6075 memcpy(dev->perm_addr, bp->mac_addr, 6);
6076 bp->name = board_info[ent->driver_data].name,
6077 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6078 "IRQ %d, ",
6079 dev->name,
6080 bp->name,
6081 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6082 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6083 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6084 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6085 bp->bus_speed_mhz,
6086 dev->base_addr,
6087 bp->pdev->irq);
6089 printk("node addr ");
6090 for (i = 0; i < 6; i++)
6091 printk("%2.2x", dev->dev_addr[i]);
6092 printk("\n");
6094 dev->features |= NETIF_F_SG;
6095 if (bp->flags & USING_DAC_FLAG)
6096 dev->features |= NETIF_F_HIGHDMA;
6097 dev->features |= NETIF_F_IP_CSUM;
6098 #ifdef BCM_VLAN
6099 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6100 #endif
6101 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6103 netif_carrier_off(bp->dev);
6105 return 0;
6108 static void __devexit
6109 bnx2_remove_one(struct pci_dev *pdev)
6111 struct net_device *dev = pci_get_drvdata(pdev);
6112 struct bnx2 *bp = netdev_priv(dev);
6114 flush_scheduled_work();
6116 unregister_netdev(dev);
6118 if (bp->regview)
6119 iounmap(bp->regview);
6121 free_netdev(dev);
6122 pci_release_regions(pdev);
6123 pci_disable_device(pdev);
6124 pci_set_drvdata(pdev, NULL);
6127 static int
6128 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6130 struct net_device *dev = pci_get_drvdata(pdev);
6131 struct bnx2 *bp = netdev_priv(dev);
6132 u32 reset_code;
6134 if (!netif_running(dev))
6135 return 0;
6137 flush_scheduled_work();
6138 bnx2_netif_stop(bp);
6139 netif_device_detach(dev);
6140 del_timer_sync(&bp->timer);
6141 if (bp->flags & NO_WOL_FLAG)
6142 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6143 else if (bp->wol)
6144 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6145 else
6146 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6147 bnx2_reset_chip(bp, reset_code);
6148 bnx2_free_skbs(bp);
6149 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6150 return 0;
6153 static int
6154 bnx2_resume(struct pci_dev *pdev)
6156 struct net_device *dev = pci_get_drvdata(pdev);
6157 struct bnx2 *bp = netdev_priv(dev);
6159 if (!netif_running(dev))
6160 return 0;
6162 bnx2_set_power_state(bp, PCI_D0);
6163 netif_device_attach(dev);
6164 bnx2_init_nic(bp);
6165 bnx2_netif_start(bp);
6166 return 0;
6169 static struct pci_driver bnx2_pci_driver = {
6170 .name = DRV_MODULE_NAME,
6171 .id_table = bnx2_pci_tbl,
6172 .probe = bnx2_init_one,
6173 .remove = __devexit_p(bnx2_remove_one),
6174 .suspend = bnx2_suspend,
6175 .resume = bnx2_resume,
6178 static int __init bnx2_init(void)
6180 return pci_register_driver(&bnx2_pci_driver);
6183 static void __exit bnx2_cleanup(void)
6185 pci_unregister_driver(&bnx2_pci_driver);
6188 module_init(bnx2_init);
6189 module_exit(bnx2_cleanup);