[PATCH] sky2 version 1.1
[linux-2.6/x86.git] / drivers / net / bnx2.c
blob7d213707008a884ab1ac846bdc79ac2c75db1766
1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
12 #include "bnx2.h"
13 #include "bnx2_fw.h"
15 #define DRV_MODULE_NAME "bnx2"
16 #define PFX DRV_MODULE_NAME ": "
17 #define DRV_MODULE_VERSION "1.4.38"
18 #define DRV_MODULE_RELDATE "February 10, 2006"
20 #define RUN_AT(x) (jiffies + (x))
22 /* Time in jiffies before concluding the transmitter is hung. */
23 #define TX_TIMEOUT (5*HZ)
25 static char version[] __devinitdata =
26 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
28 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
29 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
30 MODULE_LICENSE("GPL");
31 MODULE_VERSION(DRV_MODULE_VERSION);
33 static int disable_msi = 0;
35 module_param(disable_msi, int, 0);
36 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
38 typedef enum {
39 BCM5706 = 0,
40 NC370T,
41 NC370I,
42 BCM5706S,
43 NC370F,
44 BCM5708,
45 BCM5708S,
46 } board_t;
48 /* indexed by board_t, above */
49 static const struct {
50 char *name;
51 } board_info[] __devinitdata = {
52 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
53 { "HP NC370T Multifunction Gigabit Server Adapter" },
54 { "HP NC370i Multifunction Gigabit Server Adapter" },
55 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
56 { "HP NC370F Multifunction Gigabit Server Adapter" },
57 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
58 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
61 static struct pci_device_id bnx2_pci_tbl[] = {
62 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
63 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
64 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
65 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
66 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
67 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
68 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
69 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
70 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
71 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
72 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
73 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
74 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
75 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
76 { 0, }
79 static struct flash_spec flash_table[] =
81 /* Slow EEPROM */
82 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
83 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
84 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
85 "EEPROM - slow"},
86 /* Expansion entry 0001 */
87 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
88 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
89 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
90 "Entry 0001"},
91 /* Saifun SA25F010 (non-buffered flash) */
92 /* strap, cfg1, & write1 need updates */
93 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
94 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
95 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
96 "Non-buffered flash (128kB)"},
97 /* Saifun SA25F020 (non-buffered flash) */
98 /* strap, cfg1, & write1 need updates */
99 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
100 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
101 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
102 "Non-buffered flash (256kB)"},
103 /* Expansion entry 0100 */
104 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
105 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
106 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
107 "Entry 0100"},
108 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
109 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
110 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
111 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
112 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
113 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
114 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
115 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
116 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
117 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
118 /* Saifun SA25F005 (non-buffered flash) */
119 /* strap, cfg1, & write1 need updates */
120 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
121 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
122 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
123 "Non-buffered flash (64kB)"},
124 /* Fast EEPROM */
125 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
126 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
127 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
128 "EEPROM - fast"},
129 /* Expansion entry 1001 */
130 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
131 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
132 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
133 "Entry 1001"},
134 /* Expansion entry 1010 */
135 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138 "Entry 1010"},
139 /* ATMEL AT45DB011B (buffered flash) */
140 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
141 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
142 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
143 "Buffered flash (128kB)"},
144 /* Expansion entry 1100 */
145 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
146 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
148 "Entry 1100"},
149 /* Expansion entry 1101 */
150 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
151 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
152 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
153 "Entry 1101"},
154 /* Ateml Expansion entry 1110 */
155 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
156 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
157 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
158 "Entry 1110 (Atmel)"},
159 /* ATMEL AT45DB021B (buffered flash) */
160 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
161 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
162 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
163 "Buffered flash (256kB)"},
166 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
168 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
170 u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
172 if (diff > MAX_TX_DESC_CNT)
173 diff = (diff & MAX_TX_DESC_CNT) - 1;
174 return (bp->tx_ring_size - diff);
177 static u32
178 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
180 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
181 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
184 static void
185 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
187 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
188 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
191 static void
192 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
194 offset += cid_addr;
195 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
196 REG_WR(bp, BNX2_CTX_DATA, val);
199 static int
200 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
202 u32 val1;
203 int i, ret;
205 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
206 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
207 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
209 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
210 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
212 udelay(40);
215 val1 = (bp->phy_addr << 21) | (reg << 16) |
216 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
217 BNX2_EMAC_MDIO_COMM_START_BUSY;
218 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
220 for (i = 0; i < 50; i++) {
221 udelay(10);
223 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
224 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
225 udelay(5);
227 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
228 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
230 break;
234 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
235 *val = 0x0;
236 ret = -EBUSY;
238 else {
239 *val = val1;
240 ret = 0;
243 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
244 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
245 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
247 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
248 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
250 udelay(40);
253 return ret;
256 static int
257 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
259 u32 val1;
260 int i, ret;
262 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
263 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
264 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
266 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
267 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
269 udelay(40);
272 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
273 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
274 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
275 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
277 for (i = 0; i < 50; i++) {
278 udelay(10);
280 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
281 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
282 udelay(5);
283 break;
287 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
288 ret = -EBUSY;
289 else
290 ret = 0;
292 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
293 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
294 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
296 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
297 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
299 udelay(40);
302 return ret;
305 static void
306 bnx2_disable_int(struct bnx2 *bp)
308 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
309 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
310 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
313 static void
314 bnx2_enable_int(struct bnx2 *bp)
316 u32 val;
318 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
319 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
320 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
322 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
323 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
325 val = REG_RD(bp, BNX2_HC_COMMAND);
326 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW);
329 static void
330 bnx2_disable_int_sync(struct bnx2 *bp)
332 atomic_inc(&bp->intr_sem);
333 bnx2_disable_int(bp);
334 synchronize_irq(bp->pdev->irq);
337 static void
338 bnx2_netif_stop(struct bnx2 *bp)
340 bnx2_disable_int_sync(bp);
341 if (netif_running(bp->dev)) {
342 netif_poll_disable(bp->dev);
343 netif_tx_disable(bp->dev);
344 bp->dev->trans_start = jiffies; /* prevent tx timeout */
348 static void
349 bnx2_netif_start(struct bnx2 *bp)
351 if (atomic_dec_and_test(&bp->intr_sem)) {
352 if (netif_running(bp->dev)) {
353 netif_wake_queue(bp->dev);
354 netif_poll_enable(bp->dev);
355 bnx2_enable_int(bp);
360 static void
361 bnx2_free_mem(struct bnx2 *bp)
363 int i;
365 if (bp->stats_blk) {
366 pci_free_consistent(bp->pdev, sizeof(struct statistics_block),
367 bp->stats_blk, bp->stats_blk_mapping);
368 bp->stats_blk = NULL;
370 if (bp->status_blk) {
371 pci_free_consistent(bp->pdev, sizeof(struct status_block),
372 bp->status_blk, bp->status_blk_mapping);
373 bp->status_blk = NULL;
375 if (bp->tx_desc_ring) {
376 pci_free_consistent(bp->pdev,
377 sizeof(struct tx_bd) * TX_DESC_CNT,
378 bp->tx_desc_ring, bp->tx_desc_mapping);
379 bp->tx_desc_ring = NULL;
381 kfree(bp->tx_buf_ring);
382 bp->tx_buf_ring = NULL;
383 for (i = 0; i < bp->rx_max_ring; i++) {
384 if (bp->rx_desc_ring[i])
385 pci_free_consistent(bp->pdev,
386 sizeof(struct rx_bd) * RX_DESC_CNT,
387 bp->rx_desc_ring[i],
388 bp->rx_desc_mapping[i]);
389 bp->rx_desc_ring[i] = NULL;
391 vfree(bp->rx_buf_ring);
392 bp->rx_buf_ring = NULL;
395 static int
396 bnx2_alloc_mem(struct bnx2 *bp)
398 int i;
400 bp->tx_buf_ring = kmalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
401 GFP_KERNEL);
402 if (bp->tx_buf_ring == NULL)
403 return -ENOMEM;
405 memset(bp->tx_buf_ring, 0, sizeof(struct sw_bd) * TX_DESC_CNT);
406 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
407 sizeof(struct tx_bd) *
408 TX_DESC_CNT,
409 &bp->tx_desc_mapping);
410 if (bp->tx_desc_ring == NULL)
411 goto alloc_mem_err;
413 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
414 bp->rx_max_ring);
415 if (bp->rx_buf_ring == NULL)
416 goto alloc_mem_err;
418 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
419 bp->rx_max_ring);
421 for (i = 0; i < bp->rx_max_ring; i++) {
422 bp->rx_desc_ring[i] =
423 pci_alloc_consistent(bp->pdev,
424 sizeof(struct rx_bd) * RX_DESC_CNT,
425 &bp->rx_desc_mapping[i]);
426 if (bp->rx_desc_ring[i] == NULL)
427 goto alloc_mem_err;
431 bp->status_blk = pci_alloc_consistent(bp->pdev,
432 sizeof(struct status_block),
433 &bp->status_blk_mapping);
434 if (bp->status_blk == NULL)
435 goto alloc_mem_err;
437 memset(bp->status_blk, 0, sizeof(struct status_block));
439 bp->stats_blk = pci_alloc_consistent(bp->pdev,
440 sizeof(struct statistics_block),
441 &bp->stats_blk_mapping);
442 if (bp->stats_blk == NULL)
443 goto alloc_mem_err;
445 memset(bp->stats_blk, 0, sizeof(struct statistics_block));
447 return 0;
449 alloc_mem_err:
450 bnx2_free_mem(bp);
451 return -ENOMEM;
454 static void
455 bnx2_report_fw_link(struct bnx2 *bp)
457 u32 fw_link_status = 0;
459 if (bp->link_up) {
460 u32 bmsr;
462 switch (bp->line_speed) {
463 case SPEED_10:
464 if (bp->duplex == DUPLEX_HALF)
465 fw_link_status = BNX2_LINK_STATUS_10HALF;
466 else
467 fw_link_status = BNX2_LINK_STATUS_10FULL;
468 break;
469 case SPEED_100:
470 if (bp->duplex == DUPLEX_HALF)
471 fw_link_status = BNX2_LINK_STATUS_100HALF;
472 else
473 fw_link_status = BNX2_LINK_STATUS_100FULL;
474 break;
475 case SPEED_1000:
476 if (bp->duplex == DUPLEX_HALF)
477 fw_link_status = BNX2_LINK_STATUS_1000HALF;
478 else
479 fw_link_status = BNX2_LINK_STATUS_1000FULL;
480 break;
481 case SPEED_2500:
482 if (bp->duplex == DUPLEX_HALF)
483 fw_link_status = BNX2_LINK_STATUS_2500HALF;
484 else
485 fw_link_status = BNX2_LINK_STATUS_2500FULL;
486 break;
489 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
491 if (bp->autoneg) {
492 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
494 bnx2_read_phy(bp, MII_BMSR, &bmsr);
495 bnx2_read_phy(bp, MII_BMSR, &bmsr);
497 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
498 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
499 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
500 else
501 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
504 else
505 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
507 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
510 static void
511 bnx2_report_link(struct bnx2 *bp)
513 if (bp->link_up) {
514 netif_carrier_on(bp->dev);
515 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
517 printk("%d Mbps ", bp->line_speed);
519 if (bp->duplex == DUPLEX_FULL)
520 printk("full duplex");
521 else
522 printk("half duplex");
524 if (bp->flow_ctrl) {
525 if (bp->flow_ctrl & FLOW_CTRL_RX) {
526 printk(", receive ");
527 if (bp->flow_ctrl & FLOW_CTRL_TX)
528 printk("& transmit ");
530 else {
531 printk(", transmit ");
533 printk("flow control ON");
535 printk("\n");
537 else {
538 netif_carrier_off(bp->dev);
539 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
542 bnx2_report_fw_link(bp);
545 static void
546 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
548 u32 local_adv, remote_adv;
550 bp->flow_ctrl = 0;
551 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
552 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
554 if (bp->duplex == DUPLEX_FULL) {
555 bp->flow_ctrl = bp->req_flow_ctrl;
557 return;
560 if (bp->duplex != DUPLEX_FULL) {
561 return;
564 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
565 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
566 u32 val;
568 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
569 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
570 bp->flow_ctrl |= FLOW_CTRL_TX;
571 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
572 bp->flow_ctrl |= FLOW_CTRL_RX;
573 return;
576 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
577 bnx2_read_phy(bp, MII_LPA, &remote_adv);
579 if (bp->phy_flags & PHY_SERDES_FLAG) {
580 u32 new_local_adv = 0;
581 u32 new_remote_adv = 0;
583 if (local_adv & ADVERTISE_1000XPAUSE)
584 new_local_adv |= ADVERTISE_PAUSE_CAP;
585 if (local_adv & ADVERTISE_1000XPSE_ASYM)
586 new_local_adv |= ADVERTISE_PAUSE_ASYM;
587 if (remote_adv & ADVERTISE_1000XPAUSE)
588 new_remote_adv |= ADVERTISE_PAUSE_CAP;
589 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
590 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
592 local_adv = new_local_adv;
593 remote_adv = new_remote_adv;
596 /* See Table 28B-3 of 802.3ab-1999 spec. */
597 if (local_adv & ADVERTISE_PAUSE_CAP) {
598 if(local_adv & ADVERTISE_PAUSE_ASYM) {
599 if (remote_adv & ADVERTISE_PAUSE_CAP) {
600 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
602 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
603 bp->flow_ctrl = FLOW_CTRL_RX;
606 else {
607 if (remote_adv & ADVERTISE_PAUSE_CAP) {
608 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
612 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
613 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
614 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
616 bp->flow_ctrl = FLOW_CTRL_TX;
621 static int
622 bnx2_5708s_linkup(struct bnx2 *bp)
624 u32 val;
626 bp->link_up = 1;
627 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
628 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
629 case BCM5708S_1000X_STAT1_SPEED_10:
630 bp->line_speed = SPEED_10;
631 break;
632 case BCM5708S_1000X_STAT1_SPEED_100:
633 bp->line_speed = SPEED_100;
634 break;
635 case BCM5708S_1000X_STAT1_SPEED_1G:
636 bp->line_speed = SPEED_1000;
637 break;
638 case BCM5708S_1000X_STAT1_SPEED_2G5:
639 bp->line_speed = SPEED_2500;
640 break;
642 if (val & BCM5708S_1000X_STAT1_FD)
643 bp->duplex = DUPLEX_FULL;
644 else
645 bp->duplex = DUPLEX_HALF;
647 return 0;
650 static int
651 bnx2_5706s_linkup(struct bnx2 *bp)
653 u32 bmcr, local_adv, remote_adv, common;
655 bp->link_up = 1;
656 bp->line_speed = SPEED_1000;
658 bnx2_read_phy(bp, MII_BMCR, &bmcr);
659 if (bmcr & BMCR_FULLDPLX) {
660 bp->duplex = DUPLEX_FULL;
662 else {
663 bp->duplex = DUPLEX_HALF;
666 if (!(bmcr & BMCR_ANENABLE)) {
667 return 0;
670 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
671 bnx2_read_phy(bp, MII_LPA, &remote_adv);
673 common = local_adv & remote_adv;
674 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
676 if (common & ADVERTISE_1000XFULL) {
677 bp->duplex = DUPLEX_FULL;
679 else {
680 bp->duplex = DUPLEX_HALF;
684 return 0;
687 static int
688 bnx2_copper_linkup(struct bnx2 *bp)
690 u32 bmcr;
692 bnx2_read_phy(bp, MII_BMCR, &bmcr);
693 if (bmcr & BMCR_ANENABLE) {
694 u32 local_adv, remote_adv, common;
696 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
697 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
699 common = local_adv & (remote_adv >> 2);
700 if (common & ADVERTISE_1000FULL) {
701 bp->line_speed = SPEED_1000;
702 bp->duplex = DUPLEX_FULL;
704 else if (common & ADVERTISE_1000HALF) {
705 bp->line_speed = SPEED_1000;
706 bp->duplex = DUPLEX_HALF;
708 else {
709 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
710 bnx2_read_phy(bp, MII_LPA, &remote_adv);
712 common = local_adv & remote_adv;
713 if (common & ADVERTISE_100FULL) {
714 bp->line_speed = SPEED_100;
715 bp->duplex = DUPLEX_FULL;
717 else if (common & ADVERTISE_100HALF) {
718 bp->line_speed = SPEED_100;
719 bp->duplex = DUPLEX_HALF;
721 else if (common & ADVERTISE_10FULL) {
722 bp->line_speed = SPEED_10;
723 bp->duplex = DUPLEX_FULL;
725 else if (common & ADVERTISE_10HALF) {
726 bp->line_speed = SPEED_10;
727 bp->duplex = DUPLEX_HALF;
729 else {
730 bp->line_speed = 0;
731 bp->link_up = 0;
735 else {
736 if (bmcr & BMCR_SPEED100) {
737 bp->line_speed = SPEED_100;
739 else {
740 bp->line_speed = SPEED_10;
742 if (bmcr & BMCR_FULLDPLX) {
743 bp->duplex = DUPLEX_FULL;
745 else {
746 bp->duplex = DUPLEX_HALF;
750 return 0;
753 static int
754 bnx2_set_mac_link(struct bnx2 *bp)
756 u32 val;
758 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
759 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
760 (bp->duplex == DUPLEX_HALF)) {
761 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
764 /* Configure the EMAC mode register. */
765 val = REG_RD(bp, BNX2_EMAC_MODE);
767 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
768 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
769 BNX2_EMAC_MODE_25G);
771 if (bp->link_up) {
772 switch (bp->line_speed) {
773 case SPEED_10:
774 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
775 val |= BNX2_EMAC_MODE_PORT_MII_10;
776 break;
778 /* fall through */
779 case SPEED_100:
780 val |= BNX2_EMAC_MODE_PORT_MII;
781 break;
782 case SPEED_2500:
783 val |= BNX2_EMAC_MODE_25G;
784 /* fall through */
785 case SPEED_1000:
786 val |= BNX2_EMAC_MODE_PORT_GMII;
787 break;
790 else {
791 val |= BNX2_EMAC_MODE_PORT_GMII;
794 /* Set the MAC to operate in the appropriate duplex mode. */
795 if (bp->duplex == DUPLEX_HALF)
796 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
797 REG_WR(bp, BNX2_EMAC_MODE, val);
799 /* Enable/disable rx PAUSE. */
800 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
802 if (bp->flow_ctrl & FLOW_CTRL_RX)
803 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
804 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
806 /* Enable/disable tx PAUSE. */
807 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
808 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
810 if (bp->flow_ctrl & FLOW_CTRL_TX)
811 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
812 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
814 /* Acknowledge the interrupt. */
815 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
817 return 0;
820 static int
821 bnx2_set_link(struct bnx2 *bp)
823 u32 bmsr;
824 u8 link_up;
826 if (bp->loopback == MAC_LOOPBACK) {
827 bp->link_up = 1;
828 return 0;
831 link_up = bp->link_up;
833 bnx2_read_phy(bp, MII_BMSR, &bmsr);
834 bnx2_read_phy(bp, MII_BMSR, &bmsr);
836 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
837 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
838 u32 val;
840 val = REG_RD(bp, BNX2_EMAC_STATUS);
841 if (val & BNX2_EMAC_STATUS_LINK)
842 bmsr |= BMSR_LSTATUS;
843 else
844 bmsr &= ~BMSR_LSTATUS;
847 if (bmsr & BMSR_LSTATUS) {
848 bp->link_up = 1;
850 if (bp->phy_flags & PHY_SERDES_FLAG) {
851 if (CHIP_NUM(bp) == CHIP_NUM_5706)
852 bnx2_5706s_linkup(bp);
853 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
854 bnx2_5708s_linkup(bp);
856 else {
857 bnx2_copper_linkup(bp);
859 bnx2_resolve_flow_ctrl(bp);
861 else {
862 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
863 (bp->autoneg & AUTONEG_SPEED)) {
865 u32 bmcr;
867 bnx2_read_phy(bp, MII_BMCR, &bmcr);
868 if (!(bmcr & BMCR_ANENABLE)) {
869 bnx2_write_phy(bp, MII_BMCR, bmcr |
870 BMCR_ANENABLE);
873 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
874 bp->link_up = 0;
877 if (bp->link_up != link_up) {
878 bnx2_report_link(bp);
881 bnx2_set_mac_link(bp);
883 return 0;
886 static int
887 bnx2_reset_phy(struct bnx2 *bp)
889 int i;
890 u32 reg;
892 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
894 #define PHY_RESET_MAX_WAIT 100
895 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
896 udelay(10);
898 bnx2_read_phy(bp, MII_BMCR, &reg);
899 if (!(reg & BMCR_RESET)) {
900 udelay(20);
901 break;
904 if (i == PHY_RESET_MAX_WAIT) {
905 return -EBUSY;
907 return 0;
910 static u32
911 bnx2_phy_get_pause_adv(struct bnx2 *bp)
913 u32 adv = 0;
915 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
916 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
918 if (bp->phy_flags & PHY_SERDES_FLAG) {
919 adv = ADVERTISE_1000XPAUSE;
921 else {
922 adv = ADVERTISE_PAUSE_CAP;
925 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
926 if (bp->phy_flags & PHY_SERDES_FLAG) {
927 adv = ADVERTISE_1000XPSE_ASYM;
929 else {
930 adv = ADVERTISE_PAUSE_ASYM;
933 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
934 if (bp->phy_flags & PHY_SERDES_FLAG) {
935 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
937 else {
938 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
941 return adv;
944 static int
945 bnx2_setup_serdes_phy(struct bnx2 *bp)
947 u32 adv, bmcr, up1;
948 u32 new_adv = 0;
950 if (!(bp->autoneg & AUTONEG_SPEED)) {
951 u32 new_bmcr;
952 int force_link_down = 0;
954 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
955 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
956 if (up1 & BCM5708S_UP1_2G5) {
957 up1 &= ~BCM5708S_UP1_2G5;
958 bnx2_write_phy(bp, BCM5708S_UP1, up1);
959 force_link_down = 1;
963 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
964 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
966 bnx2_read_phy(bp, MII_BMCR, &bmcr);
967 new_bmcr = bmcr & ~BMCR_ANENABLE;
968 new_bmcr |= BMCR_SPEED1000;
969 if (bp->req_duplex == DUPLEX_FULL) {
970 adv |= ADVERTISE_1000XFULL;
971 new_bmcr |= BMCR_FULLDPLX;
973 else {
974 adv |= ADVERTISE_1000XHALF;
975 new_bmcr &= ~BMCR_FULLDPLX;
977 if ((new_bmcr != bmcr) || (force_link_down)) {
978 /* Force a link down visible on the other side */
979 if (bp->link_up) {
980 bnx2_write_phy(bp, MII_ADVERTISE, adv &
981 ~(ADVERTISE_1000XFULL |
982 ADVERTISE_1000XHALF));
983 bnx2_write_phy(bp, MII_BMCR, bmcr |
984 BMCR_ANRESTART | BMCR_ANENABLE);
986 bp->link_up = 0;
987 netif_carrier_off(bp->dev);
988 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
990 bnx2_write_phy(bp, MII_ADVERTISE, adv);
991 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
993 return 0;
996 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
997 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
998 up1 |= BCM5708S_UP1_2G5;
999 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1002 if (bp->advertising & ADVERTISED_1000baseT_Full)
1003 new_adv |= ADVERTISE_1000XFULL;
1005 new_adv |= bnx2_phy_get_pause_adv(bp);
1007 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1008 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1010 bp->serdes_an_pending = 0;
1011 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1012 /* Force a link down visible on the other side */
1013 if (bp->link_up) {
1014 int i;
1016 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1017 for (i = 0; i < 110; i++) {
1018 udelay(100);
1022 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1023 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1024 BMCR_ANENABLE);
1025 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1026 /* Speed up link-up time when the link partner
1027 * does not autonegotiate which is very common
1028 * in blade servers. Some blade servers use
1029 * IPMI for kerboard input and it's important
1030 * to minimize link disruptions. Autoneg. involves
1031 * exchanging base pages plus 3 next pages and
1032 * normally completes in about 120 msec.
1034 bp->current_interval = SERDES_AN_TIMEOUT;
1035 bp->serdes_an_pending = 1;
1036 mod_timer(&bp->timer, jiffies + bp->current_interval);
1040 return 0;
1043 #define ETHTOOL_ALL_FIBRE_SPEED \
1044 (ADVERTISED_1000baseT_Full)
1046 #define ETHTOOL_ALL_COPPER_SPEED \
1047 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1048 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1049 ADVERTISED_1000baseT_Full)
1051 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1052 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1054 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1056 static int
1057 bnx2_setup_copper_phy(struct bnx2 *bp)
1059 u32 bmcr;
1060 u32 new_bmcr;
1062 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1064 if (bp->autoneg & AUTONEG_SPEED) {
1065 u32 adv_reg, adv1000_reg;
1066 u32 new_adv_reg = 0;
1067 u32 new_adv1000_reg = 0;
1069 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1070 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1071 ADVERTISE_PAUSE_ASYM);
1073 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1074 adv1000_reg &= PHY_ALL_1000_SPEED;
1076 if (bp->advertising & ADVERTISED_10baseT_Half)
1077 new_adv_reg |= ADVERTISE_10HALF;
1078 if (bp->advertising & ADVERTISED_10baseT_Full)
1079 new_adv_reg |= ADVERTISE_10FULL;
1080 if (bp->advertising & ADVERTISED_100baseT_Half)
1081 new_adv_reg |= ADVERTISE_100HALF;
1082 if (bp->advertising & ADVERTISED_100baseT_Full)
1083 new_adv_reg |= ADVERTISE_100FULL;
1084 if (bp->advertising & ADVERTISED_1000baseT_Full)
1085 new_adv1000_reg |= ADVERTISE_1000FULL;
1087 new_adv_reg |= ADVERTISE_CSMA;
1089 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1091 if ((adv1000_reg != new_adv1000_reg) ||
1092 (adv_reg != new_adv_reg) ||
1093 ((bmcr & BMCR_ANENABLE) == 0)) {
1095 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1096 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1097 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1098 BMCR_ANENABLE);
1100 else if (bp->link_up) {
1101 /* Flow ctrl may have changed from auto to forced */
1102 /* or vice-versa. */
1104 bnx2_resolve_flow_ctrl(bp);
1105 bnx2_set_mac_link(bp);
1107 return 0;
1110 new_bmcr = 0;
1111 if (bp->req_line_speed == SPEED_100) {
1112 new_bmcr |= BMCR_SPEED100;
1114 if (bp->req_duplex == DUPLEX_FULL) {
1115 new_bmcr |= BMCR_FULLDPLX;
1117 if (new_bmcr != bmcr) {
1118 u32 bmsr;
1119 int i = 0;
1121 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1122 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1124 if (bmsr & BMSR_LSTATUS) {
1125 /* Force link down */
1126 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1127 do {
1128 udelay(100);
1129 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1130 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1131 i++;
1132 } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1135 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1137 /* Normally, the new speed is setup after the link has
1138 * gone down and up again. In some cases, link will not go
1139 * down so we need to set up the new speed here.
1141 if (bmsr & BMSR_LSTATUS) {
1142 bp->line_speed = bp->req_line_speed;
1143 bp->duplex = bp->req_duplex;
1144 bnx2_resolve_flow_ctrl(bp);
1145 bnx2_set_mac_link(bp);
1148 return 0;
1151 static int
1152 bnx2_setup_phy(struct bnx2 *bp)
1154 if (bp->loopback == MAC_LOOPBACK)
1155 return 0;
1157 if (bp->phy_flags & PHY_SERDES_FLAG) {
1158 return (bnx2_setup_serdes_phy(bp));
1160 else {
1161 return (bnx2_setup_copper_phy(bp));
1165 static int
1166 bnx2_init_5708s_phy(struct bnx2 *bp)
1168 u32 val;
1170 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1171 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1172 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1174 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1175 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1176 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1178 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1179 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1180 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1182 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1183 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1184 val |= BCM5708S_UP1_2G5;
1185 bnx2_write_phy(bp, BCM5708S_UP1, val);
1188 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1189 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1190 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1191 /* increase tx signal amplitude */
1192 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1193 BCM5708S_BLK_ADDR_TX_MISC);
1194 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1195 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1196 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1197 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1200 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1201 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1203 if (val) {
1204 u32 is_backplane;
1206 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1207 BNX2_SHARED_HW_CFG_CONFIG);
1208 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1209 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1210 BCM5708S_BLK_ADDR_TX_MISC);
1211 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1212 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1213 BCM5708S_BLK_ADDR_DIG);
1216 return 0;
1219 static int
1220 bnx2_init_5706s_phy(struct bnx2 *bp)
1222 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1224 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1225 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1228 if (bp->dev->mtu > 1500) {
1229 u32 val;
1231 /* Set extended packet length bit */
1232 bnx2_write_phy(bp, 0x18, 0x7);
1233 bnx2_read_phy(bp, 0x18, &val);
1234 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1236 bnx2_write_phy(bp, 0x1c, 0x6c00);
1237 bnx2_read_phy(bp, 0x1c, &val);
1238 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1240 else {
1241 u32 val;
1243 bnx2_write_phy(bp, 0x18, 0x7);
1244 bnx2_read_phy(bp, 0x18, &val);
1245 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1247 bnx2_write_phy(bp, 0x1c, 0x6c00);
1248 bnx2_read_phy(bp, 0x1c, &val);
1249 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1252 return 0;
1255 static int
1256 bnx2_init_copper_phy(struct bnx2 *bp)
1258 u32 val;
1260 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1262 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1263 bnx2_write_phy(bp, 0x18, 0x0c00);
1264 bnx2_write_phy(bp, 0x17, 0x000a);
1265 bnx2_write_phy(bp, 0x15, 0x310b);
1266 bnx2_write_phy(bp, 0x17, 0x201f);
1267 bnx2_write_phy(bp, 0x15, 0x9506);
1268 bnx2_write_phy(bp, 0x17, 0x401f);
1269 bnx2_write_phy(bp, 0x15, 0x14e2);
1270 bnx2_write_phy(bp, 0x18, 0x0400);
1273 if (bp->dev->mtu > 1500) {
1274 /* Set extended packet length bit */
1275 bnx2_write_phy(bp, 0x18, 0x7);
1276 bnx2_read_phy(bp, 0x18, &val);
1277 bnx2_write_phy(bp, 0x18, val | 0x4000);
1279 bnx2_read_phy(bp, 0x10, &val);
1280 bnx2_write_phy(bp, 0x10, val | 0x1);
1282 else {
1283 bnx2_write_phy(bp, 0x18, 0x7);
1284 bnx2_read_phy(bp, 0x18, &val);
1285 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1287 bnx2_read_phy(bp, 0x10, &val);
1288 bnx2_write_phy(bp, 0x10, val & ~0x1);
1291 /* ethernet@wirespeed */
1292 bnx2_write_phy(bp, 0x18, 0x7007);
1293 bnx2_read_phy(bp, 0x18, &val);
1294 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1295 return 0;
1299 static int
1300 bnx2_init_phy(struct bnx2 *bp)
1302 u32 val;
1303 int rc = 0;
1305 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1306 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1308 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1310 bnx2_reset_phy(bp);
1312 bnx2_read_phy(bp, MII_PHYSID1, &val);
1313 bp->phy_id = val << 16;
1314 bnx2_read_phy(bp, MII_PHYSID2, &val);
1315 bp->phy_id |= val & 0xffff;
1317 if (bp->phy_flags & PHY_SERDES_FLAG) {
1318 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1319 rc = bnx2_init_5706s_phy(bp);
1320 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1321 rc = bnx2_init_5708s_phy(bp);
1323 else {
1324 rc = bnx2_init_copper_phy(bp);
1327 bnx2_setup_phy(bp);
1329 return rc;
1332 static int
1333 bnx2_set_mac_loopback(struct bnx2 *bp)
1335 u32 mac_mode;
1337 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1338 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1339 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1340 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1341 bp->link_up = 1;
1342 return 0;
1345 static int bnx2_test_link(struct bnx2 *);
1347 static int
1348 bnx2_set_phy_loopback(struct bnx2 *bp)
1350 u32 mac_mode;
1351 int rc, i;
1353 spin_lock_bh(&bp->phy_lock);
1354 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1355 BMCR_SPEED1000);
1356 spin_unlock_bh(&bp->phy_lock);
1357 if (rc)
1358 return rc;
1360 for (i = 0; i < 10; i++) {
1361 if (bnx2_test_link(bp) == 0)
1362 break;
1363 udelay(10);
1366 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1367 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1368 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1369 BNX2_EMAC_MODE_25G);
1371 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1372 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1373 bp->link_up = 1;
1374 return 0;
1377 static int
1378 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1380 int i;
1381 u32 val;
1383 bp->fw_wr_seq++;
1384 msg_data |= bp->fw_wr_seq;
1386 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1388 /* wait for an acknowledgement. */
1389 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1390 msleep(10);
1392 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1394 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1395 break;
1397 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1398 return 0;
1400 /* If we timed out, inform the firmware that this is the case. */
1401 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1402 if (!silent)
1403 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1404 "%x\n", msg_data);
1406 msg_data &= ~BNX2_DRV_MSG_CODE;
1407 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1409 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1411 return -EBUSY;
1414 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1415 return -EIO;
1417 return 0;
1420 static void
1421 bnx2_init_context(struct bnx2 *bp)
1423 u32 vcid;
1425 vcid = 96;
1426 while (vcid) {
1427 u32 vcid_addr, pcid_addr, offset;
1429 vcid--;
1431 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1432 u32 new_vcid;
1434 vcid_addr = GET_PCID_ADDR(vcid);
1435 if (vcid & 0x8) {
1436 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1438 else {
1439 new_vcid = vcid;
1441 pcid_addr = GET_PCID_ADDR(new_vcid);
1443 else {
1444 vcid_addr = GET_CID_ADDR(vcid);
1445 pcid_addr = vcid_addr;
1448 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1449 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1451 /* Zero out the context. */
1452 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1453 CTX_WR(bp, 0x00, offset, 0);
1456 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1457 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1461 static int
1462 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1464 u16 *good_mbuf;
1465 u32 good_mbuf_cnt;
1466 u32 val;
1468 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1469 if (good_mbuf == NULL) {
1470 printk(KERN_ERR PFX "Failed to allocate memory in "
1471 "bnx2_alloc_bad_rbuf\n");
1472 return -ENOMEM;
1475 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1476 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1478 good_mbuf_cnt = 0;
1480 /* Allocate a bunch of mbufs and save the good ones in an array. */
1481 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1482 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1483 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1485 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1487 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1489 /* The addresses with Bit 9 set are bad memory blocks. */
1490 if (!(val & (1 << 9))) {
1491 good_mbuf[good_mbuf_cnt] = (u16) val;
1492 good_mbuf_cnt++;
1495 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1498 /* Free the good ones back to the mbuf pool thus discarding
1499 * all the bad ones. */
1500 while (good_mbuf_cnt) {
1501 good_mbuf_cnt--;
1503 val = good_mbuf[good_mbuf_cnt];
1504 val = (val << 9) | val | 1;
1506 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1508 kfree(good_mbuf);
1509 return 0;
1512 static void
1513 bnx2_set_mac_addr(struct bnx2 *bp)
1515 u32 val;
1516 u8 *mac_addr = bp->dev->dev_addr;
1518 val = (mac_addr[0] << 8) | mac_addr[1];
1520 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1522 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1523 (mac_addr[4] << 8) | mac_addr[5];
1525 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1528 static inline int
1529 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1531 struct sk_buff *skb;
1532 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1533 dma_addr_t mapping;
1534 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1535 unsigned long align;
1537 skb = dev_alloc_skb(bp->rx_buf_size);
1538 if (skb == NULL) {
1539 return -ENOMEM;
1542 if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1543 skb_reserve(skb, 8 - align);
1546 skb->dev = bp->dev;
1547 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1548 PCI_DMA_FROMDEVICE);
1550 rx_buf->skb = skb;
1551 pci_unmap_addr_set(rx_buf, mapping, mapping);
1553 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1554 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1556 bp->rx_prod_bseq += bp->rx_buf_use_size;
1558 return 0;
1561 static void
1562 bnx2_phy_int(struct bnx2 *bp)
1564 u32 new_link_state, old_link_state;
1566 new_link_state = bp->status_blk->status_attn_bits &
1567 STATUS_ATTN_BITS_LINK_STATE;
1568 old_link_state = bp->status_blk->status_attn_bits_ack &
1569 STATUS_ATTN_BITS_LINK_STATE;
1570 if (new_link_state != old_link_state) {
1571 if (new_link_state) {
1572 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1573 STATUS_ATTN_BITS_LINK_STATE);
1575 else {
1576 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1577 STATUS_ATTN_BITS_LINK_STATE);
1579 bnx2_set_link(bp);
1583 static void
1584 bnx2_tx_int(struct bnx2 *bp)
1586 struct status_block *sblk = bp->status_blk;
1587 u16 hw_cons, sw_cons, sw_ring_cons;
1588 int tx_free_bd = 0;
1590 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1591 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1592 hw_cons++;
1594 sw_cons = bp->tx_cons;
1596 while (sw_cons != hw_cons) {
1597 struct sw_bd *tx_buf;
1598 struct sk_buff *skb;
1599 int i, last;
1601 sw_ring_cons = TX_RING_IDX(sw_cons);
1603 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1604 skb = tx_buf->skb;
1605 #ifdef BCM_TSO
1606 /* partial BD completions possible with TSO packets */
1607 if (skb_shinfo(skb)->tso_size) {
1608 u16 last_idx, last_ring_idx;
1610 last_idx = sw_cons +
1611 skb_shinfo(skb)->nr_frags + 1;
1612 last_ring_idx = sw_ring_cons +
1613 skb_shinfo(skb)->nr_frags + 1;
1614 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1615 last_idx++;
1617 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1618 break;
1621 #endif
1622 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1623 skb_headlen(skb), PCI_DMA_TODEVICE);
1625 tx_buf->skb = NULL;
1626 last = skb_shinfo(skb)->nr_frags;
1628 for (i = 0; i < last; i++) {
1629 sw_cons = NEXT_TX_BD(sw_cons);
1631 pci_unmap_page(bp->pdev,
1632 pci_unmap_addr(
1633 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1634 mapping),
1635 skb_shinfo(skb)->frags[i].size,
1636 PCI_DMA_TODEVICE);
1639 sw_cons = NEXT_TX_BD(sw_cons);
1641 tx_free_bd += last + 1;
1643 dev_kfree_skb_irq(skb);
1645 hw_cons = bp->hw_tx_cons =
1646 sblk->status_tx_quick_consumer_index0;
1648 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1649 hw_cons++;
1653 bp->tx_cons = sw_cons;
1655 if (unlikely(netif_queue_stopped(bp->dev))) {
1656 spin_lock(&bp->tx_lock);
1657 if ((netif_queue_stopped(bp->dev)) &&
1658 (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
1660 netif_wake_queue(bp->dev);
1662 spin_unlock(&bp->tx_lock);
1666 static inline void
1667 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1668 u16 cons, u16 prod)
1670 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1671 struct rx_bd *cons_bd, *prod_bd;
1673 cons_rx_buf = &bp->rx_buf_ring[cons];
1674 prod_rx_buf = &bp->rx_buf_ring[prod];
1676 pci_dma_sync_single_for_device(bp->pdev,
1677 pci_unmap_addr(cons_rx_buf, mapping),
1678 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1680 bp->rx_prod_bseq += bp->rx_buf_use_size;
1682 prod_rx_buf->skb = skb;
1684 if (cons == prod)
1685 return;
1687 pci_unmap_addr_set(prod_rx_buf, mapping,
1688 pci_unmap_addr(cons_rx_buf, mapping));
1690 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1691 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1692 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1693 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1696 static int
1697 bnx2_rx_int(struct bnx2 *bp, int budget)
1699 struct status_block *sblk = bp->status_blk;
1700 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1701 struct l2_fhdr *rx_hdr;
1702 int rx_pkt = 0;
1704 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1705 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1706 hw_cons++;
1708 sw_cons = bp->rx_cons;
1709 sw_prod = bp->rx_prod;
1711 /* Memory barrier necessary as speculative reads of the rx
1712 * buffer can be ahead of the index in the status block
1714 rmb();
1715 while (sw_cons != hw_cons) {
1716 unsigned int len;
1717 u32 status;
1718 struct sw_bd *rx_buf;
1719 struct sk_buff *skb;
1720 dma_addr_t dma_addr;
1722 sw_ring_cons = RX_RING_IDX(sw_cons);
1723 sw_ring_prod = RX_RING_IDX(sw_prod);
1725 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1726 skb = rx_buf->skb;
1728 rx_buf->skb = NULL;
1730 dma_addr = pci_unmap_addr(rx_buf, mapping);
1732 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1733 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1735 rx_hdr = (struct l2_fhdr *) skb->data;
1736 len = rx_hdr->l2_fhdr_pkt_len - 4;
1738 if ((status = rx_hdr->l2_fhdr_status) &
1739 (L2_FHDR_ERRORS_BAD_CRC |
1740 L2_FHDR_ERRORS_PHY_DECODE |
1741 L2_FHDR_ERRORS_ALIGNMENT |
1742 L2_FHDR_ERRORS_TOO_SHORT |
1743 L2_FHDR_ERRORS_GIANT_FRAME)) {
1745 goto reuse_rx;
1748 /* Since we don't have a jumbo ring, copy small packets
1749 * if mtu > 1500
1751 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1752 struct sk_buff *new_skb;
1754 new_skb = dev_alloc_skb(len + 2);
1755 if (new_skb == NULL)
1756 goto reuse_rx;
1758 /* aligned copy */
1759 memcpy(new_skb->data,
1760 skb->data + bp->rx_offset - 2,
1761 len + 2);
1763 skb_reserve(new_skb, 2);
1764 skb_put(new_skb, len);
1765 new_skb->dev = bp->dev;
1767 bnx2_reuse_rx_skb(bp, skb,
1768 sw_ring_cons, sw_ring_prod);
1770 skb = new_skb;
1772 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1773 pci_unmap_single(bp->pdev, dma_addr,
1774 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1776 skb_reserve(skb, bp->rx_offset);
1777 skb_put(skb, len);
1779 else {
1780 reuse_rx:
1781 bnx2_reuse_rx_skb(bp, skb,
1782 sw_ring_cons, sw_ring_prod);
1783 goto next_rx;
1786 skb->protocol = eth_type_trans(skb, bp->dev);
1788 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1789 (htons(skb->protocol) != 0x8100)) {
1791 dev_kfree_skb_irq(skb);
1792 goto next_rx;
1796 skb->ip_summed = CHECKSUM_NONE;
1797 if (bp->rx_csum &&
1798 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1799 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1801 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1802 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1803 skb->ip_summed = CHECKSUM_UNNECESSARY;
1806 #ifdef BCM_VLAN
1807 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1808 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1809 rx_hdr->l2_fhdr_vlan_tag);
1811 else
1812 #endif
1813 netif_receive_skb(skb);
1815 bp->dev->last_rx = jiffies;
1816 rx_pkt++;
1818 next_rx:
1819 sw_cons = NEXT_RX_BD(sw_cons);
1820 sw_prod = NEXT_RX_BD(sw_prod);
1822 if ((rx_pkt == budget))
1823 break;
1825 /* Refresh hw_cons to see if there is new work */
1826 if (sw_cons == hw_cons) {
1827 hw_cons = bp->hw_rx_cons =
1828 sblk->status_rx_quick_consumer_index0;
1829 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1830 hw_cons++;
1831 rmb();
1834 bp->rx_cons = sw_cons;
1835 bp->rx_prod = sw_prod;
1837 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1839 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1841 mmiowb();
1843 return rx_pkt;
1847 /* MSI ISR - The only difference between this and the INTx ISR
1848 * is that the MSI interrupt is always serviced.
1850 static irqreturn_t
1851 bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1853 struct net_device *dev = dev_instance;
1854 struct bnx2 *bp = netdev_priv(dev);
1856 prefetch(bp->status_blk);
1857 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1858 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1859 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1861 /* Return here if interrupt is disabled. */
1862 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1863 return IRQ_HANDLED;
1865 netif_rx_schedule(dev);
1867 return IRQ_HANDLED;
1870 static irqreturn_t
1871 bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1873 struct net_device *dev = dev_instance;
1874 struct bnx2 *bp = netdev_priv(dev);
1876 /* When using INTx, it is possible for the interrupt to arrive
1877 * at the CPU before the status block posted prior to the
1878 * interrupt. Reading a register will flush the status block.
1879 * When using MSI, the MSI message will always complete after
1880 * the status block write.
1882 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1883 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1884 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1885 return IRQ_NONE;
1887 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1888 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1889 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1891 /* Return here if interrupt is shared and is disabled. */
1892 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1893 return IRQ_HANDLED;
1895 netif_rx_schedule(dev);
1897 return IRQ_HANDLED;
1900 static inline int
1901 bnx2_has_work(struct bnx2 *bp)
1903 struct status_block *sblk = bp->status_blk;
1905 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1906 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1907 return 1;
1909 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1910 bp->link_up)
1911 return 1;
1913 return 0;
1916 static int
1917 bnx2_poll(struct net_device *dev, int *budget)
1919 struct bnx2 *bp = netdev_priv(dev);
1921 if ((bp->status_blk->status_attn_bits &
1922 STATUS_ATTN_BITS_LINK_STATE) !=
1923 (bp->status_blk->status_attn_bits_ack &
1924 STATUS_ATTN_BITS_LINK_STATE)) {
1926 spin_lock(&bp->phy_lock);
1927 bnx2_phy_int(bp);
1928 spin_unlock(&bp->phy_lock);
1931 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
1932 bnx2_tx_int(bp);
1934 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
1935 int orig_budget = *budget;
1936 int work_done;
1938 if (orig_budget > dev->quota)
1939 orig_budget = dev->quota;
1941 work_done = bnx2_rx_int(bp, orig_budget);
1942 *budget -= work_done;
1943 dev->quota -= work_done;
1946 bp->last_status_idx = bp->status_blk->status_idx;
1947 rmb();
1949 if (!bnx2_has_work(bp)) {
1950 netif_rx_complete(dev);
1951 if (likely(bp->flags & USING_MSI_FLAG)) {
1952 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1953 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1954 bp->last_status_idx);
1955 return 0;
1957 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1958 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1959 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
1960 bp->last_status_idx);
1962 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1963 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1964 bp->last_status_idx);
1965 return 0;
1968 return 1;
1971 /* Called with rtnl_lock from vlan functions and also dev->xmit_lock
1972 * from set_multicast.
1974 static void
1975 bnx2_set_rx_mode(struct net_device *dev)
1977 struct bnx2 *bp = netdev_priv(dev);
1978 u32 rx_mode, sort_mode;
1979 int i;
1981 spin_lock_bh(&bp->phy_lock);
1983 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
1984 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
1985 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
1986 #ifdef BCM_VLAN
1987 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
1988 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
1989 #else
1990 if (!(bp->flags & ASF_ENABLE_FLAG))
1991 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
1992 #endif
1993 if (dev->flags & IFF_PROMISC) {
1994 /* Promiscuous mode. */
1995 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
1996 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
1998 else if (dev->flags & IFF_ALLMULTI) {
1999 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2000 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2001 0xffffffff);
2003 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2005 else {
2006 /* Accept one or more multicast(s). */
2007 struct dev_mc_list *mclist;
2008 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2009 u32 regidx;
2010 u32 bit;
2011 u32 crc;
2013 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2015 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2016 i++, mclist = mclist->next) {
2018 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2019 bit = crc & 0xff;
2020 regidx = (bit & 0xe0) >> 5;
2021 bit &= 0x1f;
2022 mc_filter[regidx] |= (1 << bit);
2025 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2026 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2027 mc_filter[i]);
2030 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2033 if (rx_mode != bp->rx_mode) {
2034 bp->rx_mode = rx_mode;
2035 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2038 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2039 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2040 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2042 spin_unlock_bh(&bp->phy_lock);
2045 static void
2046 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2047 u32 rv2p_proc)
2049 int i;
2050 u32 val;
2053 for (i = 0; i < rv2p_code_len; i += 8) {
2054 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, *rv2p_code);
2055 rv2p_code++;
2056 REG_WR(bp, BNX2_RV2P_INSTR_LOW, *rv2p_code);
2057 rv2p_code++;
2059 if (rv2p_proc == RV2P_PROC1) {
2060 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2061 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2063 else {
2064 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2065 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2069 /* Reset the processor, un-stall is done later. */
2070 if (rv2p_proc == RV2P_PROC1) {
2071 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2073 else {
2074 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2078 static void
2079 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2081 u32 offset;
2082 u32 val;
2084 /* Halt the CPU. */
2085 val = REG_RD_IND(bp, cpu_reg->mode);
2086 val |= cpu_reg->mode_value_halt;
2087 REG_WR_IND(bp, cpu_reg->mode, val);
2088 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2090 /* Load the Text area. */
2091 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2092 if (fw->text) {
2093 int j;
2095 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2096 REG_WR_IND(bp, offset, fw->text[j]);
2100 /* Load the Data area. */
2101 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2102 if (fw->data) {
2103 int j;
2105 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2106 REG_WR_IND(bp, offset, fw->data[j]);
2110 /* Load the SBSS area. */
2111 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2112 if (fw->sbss) {
2113 int j;
2115 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2116 REG_WR_IND(bp, offset, fw->sbss[j]);
2120 /* Load the BSS area. */
2121 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2122 if (fw->bss) {
2123 int j;
2125 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2126 REG_WR_IND(bp, offset, fw->bss[j]);
2130 /* Load the Read-Only area. */
2131 offset = cpu_reg->spad_base +
2132 (fw->rodata_addr - cpu_reg->mips_view_base);
2133 if (fw->rodata) {
2134 int j;
2136 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2137 REG_WR_IND(bp, offset, fw->rodata[j]);
2141 /* Clear the pre-fetch instruction. */
2142 REG_WR_IND(bp, cpu_reg->inst, 0);
2143 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2145 /* Start the CPU. */
2146 val = REG_RD_IND(bp, cpu_reg->mode);
2147 val &= ~cpu_reg->mode_value_halt;
2148 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2149 REG_WR_IND(bp, cpu_reg->mode, val);
2152 static void
2153 bnx2_init_cpus(struct bnx2 *bp)
2155 struct cpu_reg cpu_reg;
2156 struct fw_info fw;
2158 /* Initialize the RV2P processor. */
2159 load_rv2p_fw(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), RV2P_PROC1);
2160 load_rv2p_fw(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), RV2P_PROC2);
2162 /* Initialize the RX Processor. */
2163 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2164 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2165 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2166 cpu_reg.state = BNX2_RXP_CPU_STATE;
2167 cpu_reg.state_value_clear = 0xffffff;
2168 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2169 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2170 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2171 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2172 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2173 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2174 cpu_reg.mips_view_base = 0x8000000;
2176 fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2177 fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2178 fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2179 fw.start_addr = bnx2_RXP_b06FwStartAddr;
2181 fw.text_addr = bnx2_RXP_b06FwTextAddr;
2182 fw.text_len = bnx2_RXP_b06FwTextLen;
2183 fw.text_index = 0;
2184 fw.text = bnx2_RXP_b06FwText;
2186 fw.data_addr = bnx2_RXP_b06FwDataAddr;
2187 fw.data_len = bnx2_RXP_b06FwDataLen;
2188 fw.data_index = 0;
2189 fw.data = bnx2_RXP_b06FwData;
2191 fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2192 fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2193 fw.sbss_index = 0;
2194 fw.sbss = bnx2_RXP_b06FwSbss;
2196 fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2197 fw.bss_len = bnx2_RXP_b06FwBssLen;
2198 fw.bss_index = 0;
2199 fw.bss = bnx2_RXP_b06FwBss;
2201 fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2202 fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2203 fw.rodata_index = 0;
2204 fw.rodata = bnx2_RXP_b06FwRodata;
2206 load_cpu_fw(bp, &cpu_reg, &fw);
2208 /* Initialize the TX Processor. */
2209 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2210 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2211 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2212 cpu_reg.state = BNX2_TXP_CPU_STATE;
2213 cpu_reg.state_value_clear = 0xffffff;
2214 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2215 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2216 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2217 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2218 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2219 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2220 cpu_reg.mips_view_base = 0x8000000;
2222 fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2223 fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2224 fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2225 fw.start_addr = bnx2_TXP_b06FwStartAddr;
2227 fw.text_addr = bnx2_TXP_b06FwTextAddr;
2228 fw.text_len = bnx2_TXP_b06FwTextLen;
2229 fw.text_index = 0;
2230 fw.text = bnx2_TXP_b06FwText;
2232 fw.data_addr = bnx2_TXP_b06FwDataAddr;
2233 fw.data_len = bnx2_TXP_b06FwDataLen;
2234 fw.data_index = 0;
2235 fw.data = bnx2_TXP_b06FwData;
2237 fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2238 fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2239 fw.sbss_index = 0;
2240 fw.sbss = bnx2_TXP_b06FwSbss;
2242 fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2243 fw.bss_len = bnx2_TXP_b06FwBssLen;
2244 fw.bss_index = 0;
2245 fw.bss = bnx2_TXP_b06FwBss;
2247 fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2248 fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2249 fw.rodata_index = 0;
2250 fw.rodata = bnx2_TXP_b06FwRodata;
2252 load_cpu_fw(bp, &cpu_reg, &fw);
2254 /* Initialize the TX Patch-up Processor. */
2255 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2256 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2257 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2258 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2259 cpu_reg.state_value_clear = 0xffffff;
2260 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2261 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2262 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2263 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2264 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2265 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2266 cpu_reg.mips_view_base = 0x8000000;
2268 fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2269 fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2270 fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2271 fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2273 fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2274 fw.text_len = bnx2_TPAT_b06FwTextLen;
2275 fw.text_index = 0;
2276 fw.text = bnx2_TPAT_b06FwText;
2278 fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2279 fw.data_len = bnx2_TPAT_b06FwDataLen;
2280 fw.data_index = 0;
2281 fw.data = bnx2_TPAT_b06FwData;
2283 fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2284 fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2285 fw.sbss_index = 0;
2286 fw.sbss = bnx2_TPAT_b06FwSbss;
2288 fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2289 fw.bss_len = bnx2_TPAT_b06FwBssLen;
2290 fw.bss_index = 0;
2291 fw.bss = bnx2_TPAT_b06FwBss;
2293 fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2294 fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2295 fw.rodata_index = 0;
2296 fw.rodata = bnx2_TPAT_b06FwRodata;
2298 load_cpu_fw(bp, &cpu_reg, &fw);
2300 /* Initialize the Completion Processor. */
2301 cpu_reg.mode = BNX2_COM_CPU_MODE;
2302 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2303 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2304 cpu_reg.state = BNX2_COM_CPU_STATE;
2305 cpu_reg.state_value_clear = 0xffffff;
2306 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2307 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2308 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2309 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2310 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2311 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2312 cpu_reg.mips_view_base = 0x8000000;
2314 fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2315 fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2316 fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2317 fw.start_addr = bnx2_COM_b06FwStartAddr;
2319 fw.text_addr = bnx2_COM_b06FwTextAddr;
2320 fw.text_len = bnx2_COM_b06FwTextLen;
2321 fw.text_index = 0;
2322 fw.text = bnx2_COM_b06FwText;
2324 fw.data_addr = bnx2_COM_b06FwDataAddr;
2325 fw.data_len = bnx2_COM_b06FwDataLen;
2326 fw.data_index = 0;
2327 fw.data = bnx2_COM_b06FwData;
2329 fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2330 fw.sbss_len = bnx2_COM_b06FwSbssLen;
2331 fw.sbss_index = 0;
2332 fw.sbss = bnx2_COM_b06FwSbss;
2334 fw.bss_addr = bnx2_COM_b06FwBssAddr;
2335 fw.bss_len = bnx2_COM_b06FwBssLen;
2336 fw.bss_index = 0;
2337 fw.bss = bnx2_COM_b06FwBss;
2339 fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2340 fw.rodata_len = bnx2_COM_b06FwRodataLen;
2341 fw.rodata_index = 0;
2342 fw.rodata = bnx2_COM_b06FwRodata;
2344 load_cpu_fw(bp, &cpu_reg, &fw);
2348 static int
2349 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2351 u16 pmcsr;
2353 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2355 switch (state) {
2356 case PCI_D0: {
2357 u32 val;
2359 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2360 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2361 PCI_PM_CTRL_PME_STATUS);
2363 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2364 /* delay required during transition out of D3hot */
2365 msleep(20);
2367 val = REG_RD(bp, BNX2_EMAC_MODE);
2368 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2369 val &= ~BNX2_EMAC_MODE_MPKT;
2370 REG_WR(bp, BNX2_EMAC_MODE, val);
2372 val = REG_RD(bp, BNX2_RPM_CONFIG);
2373 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2374 REG_WR(bp, BNX2_RPM_CONFIG, val);
2375 break;
2377 case PCI_D3hot: {
2378 int i;
2379 u32 val, wol_msg;
2381 if (bp->wol) {
2382 u32 advertising;
2383 u8 autoneg;
2385 autoneg = bp->autoneg;
2386 advertising = bp->advertising;
2388 bp->autoneg = AUTONEG_SPEED;
2389 bp->advertising = ADVERTISED_10baseT_Half |
2390 ADVERTISED_10baseT_Full |
2391 ADVERTISED_100baseT_Half |
2392 ADVERTISED_100baseT_Full |
2393 ADVERTISED_Autoneg;
2395 bnx2_setup_copper_phy(bp);
2397 bp->autoneg = autoneg;
2398 bp->advertising = advertising;
2400 bnx2_set_mac_addr(bp);
2402 val = REG_RD(bp, BNX2_EMAC_MODE);
2404 /* Enable port mode. */
2405 val &= ~BNX2_EMAC_MODE_PORT;
2406 val |= BNX2_EMAC_MODE_PORT_MII |
2407 BNX2_EMAC_MODE_MPKT_RCVD |
2408 BNX2_EMAC_MODE_ACPI_RCVD |
2409 BNX2_EMAC_MODE_MPKT;
2411 REG_WR(bp, BNX2_EMAC_MODE, val);
2413 /* receive all multicast */
2414 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2415 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2416 0xffffffff);
2418 REG_WR(bp, BNX2_EMAC_RX_MODE,
2419 BNX2_EMAC_RX_MODE_SORT_MODE);
2421 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2422 BNX2_RPM_SORT_USER0_MC_EN;
2423 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2424 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2425 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2426 BNX2_RPM_SORT_USER0_ENA);
2428 /* Need to enable EMAC and RPM for WOL. */
2429 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2430 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2431 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2432 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2434 val = REG_RD(bp, BNX2_RPM_CONFIG);
2435 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2436 REG_WR(bp, BNX2_RPM_CONFIG, val);
2438 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2440 else {
2441 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2444 if (!(bp->flags & NO_WOL_FLAG))
2445 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2447 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2448 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2449 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2451 if (bp->wol)
2452 pmcsr |= 3;
2454 else {
2455 pmcsr |= 3;
2457 if (bp->wol) {
2458 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2460 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2461 pmcsr);
2463 /* No more memory access after this point until
2464 * device is brought back to D0.
2466 udelay(50);
2467 break;
2469 default:
2470 return -EINVAL;
2472 return 0;
2475 static int
2476 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2478 u32 val;
2479 int j;
2481 /* Request access to the flash interface. */
2482 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2483 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2484 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2485 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2486 break;
2488 udelay(5);
2491 if (j >= NVRAM_TIMEOUT_COUNT)
2492 return -EBUSY;
2494 return 0;
2497 static int
2498 bnx2_release_nvram_lock(struct bnx2 *bp)
2500 int j;
2501 u32 val;
2503 /* Relinquish nvram interface. */
2504 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2506 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2507 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2508 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2509 break;
2511 udelay(5);
2514 if (j >= NVRAM_TIMEOUT_COUNT)
2515 return -EBUSY;
2517 return 0;
2521 static int
2522 bnx2_enable_nvram_write(struct bnx2 *bp)
2524 u32 val;
2526 val = REG_RD(bp, BNX2_MISC_CFG);
2527 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2529 if (!bp->flash_info->buffered) {
2530 int j;
2532 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2533 REG_WR(bp, BNX2_NVM_COMMAND,
2534 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2536 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2537 udelay(5);
2539 val = REG_RD(bp, BNX2_NVM_COMMAND);
2540 if (val & BNX2_NVM_COMMAND_DONE)
2541 break;
2544 if (j >= NVRAM_TIMEOUT_COUNT)
2545 return -EBUSY;
2547 return 0;
2550 static void
2551 bnx2_disable_nvram_write(struct bnx2 *bp)
2553 u32 val;
2555 val = REG_RD(bp, BNX2_MISC_CFG);
2556 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2560 static void
2561 bnx2_enable_nvram_access(struct bnx2 *bp)
2563 u32 val;
2565 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2566 /* Enable both bits, even on read. */
2567 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2568 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2571 static void
2572 bnx2_disable_nvram_access(struct bnx2 *bp)
2574 u32 val;
2576 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2577 /* Disable both bits, even after read. */
2578 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2579 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2580 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2583 static int
2584 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2586 u32 cmd;
2587 int j;
2589 if (bp->flash_info->buffered)
2590 /* Buffered flash, no erase needed */
2591 return 0;
2593 /* Build an erase command */
2594 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2595 BNX2_NVM_COMMAND_DOIT;
2597 /* Need to clear DONE bit separately. */
2598 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2600 /* Address of the NVRAM to read from. */
2601 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2603 /* Issue an erase command. */
2604 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2606 /* Wait for completion. */
2607 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2608 u32 val;
2610 udelay(5);
2612 val = REG_RD(bp, BNX2_NVM_COMMAND);
2613 if (val & BNX2_NVM_COMMAND_DONE)
2614 break;
2617 if (j >= NVRAM_TIMEOUT_COUNT)
2618 return -EBUSY;
2620 return 0;
2623 static int
2624 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2626 u32 cmd;
2627 int j;
2629 /* Build the command word. */
2630 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2632 /* Calculate an offset of a buffered flash. */
2633 if (bp->flash_info->buffered) {
2634 offset = ((offset / bp->flash_info->page_size) <<
2635 bp->flash_info->page_bits) +
2636 (offset % bp->flash_info->page_size);
2639 /* Need to clear DONE bit separately. */
2640 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2642 /* Address of the NVRAM to read from. */
2643 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2645 /* Issue a read command. */
2646 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2648 /* Wait for completion. */
2649 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2650 u32 val;
2652 udelay(5);
2654 val = REG_RD(bp, BNX2_NVM_COMMAND);
2655 if (val & BNX2_NVM_COMMAND_DONE) {
2656 val = REG_RD(bp, BNX2_NVM_READ);
2658 val = be32_to_cpu(val);
2659 memcpy(ret_val, &val, 4);
2660 break;
2663 if (j >= NVRAM_TIMEOUT_COUNT)
2664 return -EBUSY;
2666 return 0;
2670 static int
2671 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2673 u32 cmd, val32;
2674 int j;
2676 /* Build the command word. */
2677 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2679 /* Calculate an offset of a buffered flash. */
2680 if (bp->flash_info->buffered) {
2681 offset = ((offset / bp->flash_info->page_size) <<
2682 bp->flash_info->page_bits) +
2683 (offset % bp->flash_info->page_size);
2686 /* Need to clear DONE bit separately. */
2687 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2689 memcpy(&val32, val, 4);
2690 val32 = cpu_to_be32(val32);
2692 /* Write the data. */
2693 REG_WR(bp, BNX2_NVM_WRITE, val32);
2695 /* Address of the NVRAM to write to. */
2696 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2698 /* Issue the write command. */
2699 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2701 /* Wait for completion. */
2702 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2703 udelay(5);
2705 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2706 break;
2708 if (j >= NVRAM_TIMEOUT_COUNT)
2709 return -EBUSY;
2711 return 0;
2714 static int
2715 bnx2_init_nvram(struct bnx2 *bp)
2717 u32 val;
2718 int j, entry_count, rc;
2719 struct flash_spec *flash;
2721 /* Determine the selected interface. */
2722 val = REG_RD(bp, BNX2_NVM_CFG1);
2724 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2726 rc = 0;
2727 if (val & 0x40000000) {
2729 /* Flash interface has been reconfigured */
2730 for (j = 0, flash = &flash_table[0]; j < entry_count;
2731 j++, flash++) {
2732 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2733 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2734 bp->flash_info = flash;
2735 break;
2739 else {
2740 u32 mask;
2741 /* Not yet been reconfigured */
2743 if (val & (1 << 23))
2744 mask = FLASH_BACKUP_STRAP_MASK;
2745 else
2746 mask = FLASH_STRAP_MASK;
2748 for (j = 0, flash = &flash_table[0]; j < entry_count;
2749 j++, flash++) {
2751 if ((val & mask) == (flash->strapping & mask)) {
2752 bp->flash_info = flash;
2754 /* Request access to the flash interface. */
2755 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2756 return rc;
2758 /* Enable access to flash interface */
2759 bnx2_enable_nvram_access(bp);
2761 /* Reconfigure the flash interface */
2762 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2763 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2764 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2765 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2767 /* Disable access to flash interface */
2768 bnx2_disable_nvram_access(bp);
2769 bnx2_release_nvram_lock(bp);
2771 break;
2774 } /* if (val & 0x40000000) */
2776 if (j == entry_count) {
2777 bp->flash_info = NULL;
2778 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2779 return -ENODEV;
2782 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2783 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2784 if (val)
2785 bp->flash_size = val;
2786 else
2787 bp->flash_size = bp->flash_info->total_size;
2789 return rc;
2792 static int
2793 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2794 int buf_size)
2796 int rc = 0;
2797 u32 cmd_flags, offset32, len32, extra;
2799 if (buf_size == 0)
2800 return 0;
2802 /* Request access to the flash interface. */
2803 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2804 return rc;
2806 /* Enable access to flash interface */
2807 bnx2_enable_nvram_access(bp);
2809 len32 = buf_size;
2810 offset32 = offset;
2811 extra = 0;
2813 cmd_flags = 0;
2815 if (offset32 & 3) {
2816 u8 buf[4];
2817 u32 pre_len;
2819 offset32 &= ~3;
2820 pre_len = 4 - (offset & 3);
2822 if (pre_len >= len32) {
2823 pre_len = len32;
2824 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2825 BNX2_NVM_COMMAND_LAST;
2827 else {
2828 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2831 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2833 if (rc)
2834 return rc;
2836 memcpy(ret_buf, buf + (offset & 3), pre_len);
2838 offset32 += 4;
2839 ret_buf += pre_len;
2840 len32 -= pre_len;
2842 if (len32 & 3) {
2843 extra = 4 - (len32 & 3);
2844 len32 = (len32 + 4) & ~3;
2847 if (len32 == 4) {
2848 u8 buf[4];
2850 if (cmd_flags)
2851 cmd_flags = BNX2_NVM_COMMAND_LAST;
2852 else
2853 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2854 BNX2_NVM_COMMAND_LAST;
2856 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2858 memcpy(ret_buf, buf, 4 - extra);
2860 else if (len32 > 0) {
2861 u8 buf[4];
2863 /* Read the first word. */
2864 if (cmd_flags)
2865 cmd_flags = 0;
2866 else
2867 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2869 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
2871 /* Advance to the next dword. */
2872 offset32 += 4;
2873 ret_buf += 4;
2874 len32 -= 4;
2876 while (len32 > 4 && rc == 0) {
2877 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
2879 /* Advance to the next dword. */
2880 offset32 += 4;
2881 ret_buf += 4;
2882 len32 -= 4;
2885 if (rc)
2886 return rc;
2888 cmd_flags = BNX2_NVM_COMMAND_LAST;
2889 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2891 memcpy(ret_buf, buf, 4 - extra);
2894 /* Disable access to flash interface */
2895 bnx2_disable_nvram_access(bp);
2897 bnx2_release_nvram_lock(bp);
2899 return rc;
2902 static int
2903 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
2904 int buf_size)
2906 u32 written, offset32, len32;
2907 u8 *buf, start[4], end[4];
2908 int rc = 0;
2909 int align_start, align_end;
2911 buf = data_buf;
2912 offset32 = offset;
2913 len32 = buf_size;
2914 align_start = align_end = 0;
2916 if ((align_start = (offset32 & 3))) {
2917 offset32 &= ~3;
2918 len32 += align_start;
2919 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
2920 return rc;
2923 if (len32 & 3) {
2924 if ((len32 > 4) || !align_start) {
2925 align_end = 4 - (len32 & 3);
2926 len32 += align_end;
2927 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
2928 end, 4))) {
2929 return rc;
2934 if (align_start || align_end) {
2935 buf = kmalloc(len32, GFP_KERNEL);
2936 if (buf == 0)
2937 return -ENOMEM;
2938 if (align_start) {
2939 memcpy(buf, start, 4);
2941 if (align_end) {
2942 memcpy(buf + len32 - 4, end, 4);
2944 memcpy(buf + align_start, data_buf, buf_size);
2947 written = 0;
2948 while ((written < len32) && (rc == 0)) {
2949 u32 page_start, page_end, data_start, data_end;
2950 u32 addr, cmd_flags;
2951 int i;
2952 u8 flash_buffer[264];
2954 /* Find the page_start addr */
2955 page_start = offset32 + written;
2956 page_start -= (page_start % bp->flash_info->page_size);
2957 /* Find the page_end addr */
2958 page_end = page_start + bp->flash_info->page_size;
2959 /* Find the data_start addr */
2960 data_start = (written == 0) ? offset32 : page_start;
2961 /* Find the data_end addr */
2962 data_end = (page_end > offset32 + len32) ?
2963 (offset32 + len32) : page_end;
2965 /* Request access to the flash interface. */
2966 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2967 goto nvram_write_end;
2969 /* Enable access to flash interface */
2970 bnx2_enable_nvram_access(bp);
2972 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2973 if (bp->flash_info->buffered == 0) {
2974 int j;
2976 /* Read the whole page into the buffer
2977 * (non-buffer flash only) */
2978 for (j = 0; j < bp->flash_info->page_size; j += 4) {
2979 if (j == (bp->flash_info->page_size - 4)) {
2980 cmd_flags |= BNX2_NVM_COMMAND_LAST;
2982 rc = bnx2_nvram_read_dword(bp,
2983 page_start + j,
2984 &flash_buffer[j],
2985 cmd_flags);
2987 if (rc)
2988 goto nvram_write_end;
2990 cmd_flags = 0;
2994 /* Enable writes to flash interface (unlock write-protect) */
2995 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
2996 goto nvram_write_end;
2998 /* Erase the page */
2999 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3000 goto nvram_write_end;
3002 /* Re-enable the write again for the actual write */
3003 bnx2_enable_nvram_write(bp);
3005 /* Loop to write back the buffer data from page_start to
3006 * data_start */
3007 i = 0;
3008 if (bp->flash_info->buffered == 0) {
3009 for (addr = page_start; addr < data_start;
3010 addr += 4, i += 4) {
3012 rc = bnx2_nvram_write_dword(bp, addr,
3013 &flash_buffer[i], cmd_flags);
3015 if (rc != 0)
3016 goto nvram_write_end;
3018 cmd_flags = 0;
3022 /* Loop to write the new data from data_start to data_end */
3023 for (addr = data_start; addr < data_end; addr += 4, i++) {
3024 if ((addr == page_end - 4) ||
3025 ((bp->flash_info->buffered) &&
3026 (addr == data_end - 4))) {
3028 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3030 rc = bnx2_nvram_write_dword(bp, addr, buf,
3031 cmd_flags);
3033 if (rc != 0)
3034 goto nvram_write_end;
3036 cmd_flags = 0;
3037 buf += 4;
3040 /* Loop to write back the buffer data from data_end
3041 * to page_end */
3042 if (bp->flash_info->buffered == 0) {
3043 for (addr = data_end; addr < page_end;
3044 addr += 4, i += 4) {
3046 if (addr == page_end-4) {
3047 cmd_flags = BNX2_NVM_COMMAND_LAST;
3049 rc = bnx2_nvram_write_dword(bp, addr,
3050 &flash_buffer[i], cmd_flags);
3052 if (rc != 0)
3053 goto nvram_write_end;
3055 cmd_flags = 0;
3059 /* Disable writes to flash interface (lock write-protect) */
3060 bnx2_disable_nvram_write(bp);
3062 /* Disable access to flash interface */
3063 bnx2_disable_nvram_access(bp);
3064 bnx2_release_nvram_lock(bp);
3066 /* Increment written */
3067 written += data_end - data_start;
3070 nvram_write_end:
3071 if (align_start || align_end)
3072 kfree(buf);
3073 return rc;
3076 static int
3077 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3079 u32 val;
3080 int i, rc = 0;
3082 /* Wait for the current PCI transaction to complete before
3083 * issuing a reset. */
3084 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3085 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3086 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3087 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3088 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3089 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3090 udelay(5);
3092 /* Wait for the firmware to tell us it is ok to issue a reset. */
3093 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3095 /* Deposit a driver reset signature so the firmware knows that
3096 * this is a soft reset. */
3097 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3098 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3100 /* Do a dummy read to force the chip to complete all current transaction
3101 * before we issue a reset. */
3102 val = REG_RD(bp, BNX2_MISC_ID);
3104 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3105 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3106 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3108 /* Chip reset. */
3109 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3111 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3112 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3113 msleep(15);
3115 /* Reset takes approximate 30 usec */
3116 for (i = 0; i < 10; i++) {
3117 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3118 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3119 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3120 break;
3122 udelay(10);
3125 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3126 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3127 printk(KERN_ERR PFX "Chip reset did not complete\n");
3128 return -EBUSY;
3131 /* Make sure byte swapping is properly configured. */
3132 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3133 if (val != 0x01020304) {
3134 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3135 return -ENODEV;
3138 /* Wait for the firmware to finish its initialization. */
3139 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3140 if (rc)
3141 return rc;
3143 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3144 /* Adjust the voltage regular to two steps lower. The default
3145 * of this register is 0x0000000e. */
3146 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3148 /* Remove bad rbuf memory from the free pool. */
3149 rc = bnx2_alloc_bad_rbuf(bp);
3152 return rc;
3155 static int
3156 bnx2_init_chip(struct bnx2 *bp)
3158 u32 val;
3159 int rc;
3161 /* Make sure the interrupt is not active. */
3162 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3164 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3165 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3166 #ifdef __BIG_ENDIAN
3167 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3168 #endif
3169 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3170 DMA_READ_CHANS << 12 |
3171 DMA_WRITE_CHANS << 16;
3173 val |= (0x2 << 20) | (1 << 11);
3175 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3176 val |= (1 << 23);
3178 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3179 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3180 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3182 REG_WR(bp, BNX2_DMA_CONFIG, val);
3184 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3185 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3186 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3187 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3190 if (bp->flags & PCIX_FLAG) {
3191 u16 val16;
3193 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3194 &val16);
3195 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3196 val16 & ~PCI_X_CMD_ERO);
3199 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3200 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3201 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3202 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3204 /* Initialize context mapping and zero out the quick contexts. The
3205 * context block must have already been enabled. */
3206 bnx2_init_context(bp);
3208 bnx2_init_cpus(bp);
3209 bnx2_init_nvram(bp);
3211 bnx2_set_mac_addr(bp);
3213 val = REG_RD(bp, BNX2_MQ_CONFIG);
3214 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3215 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3216 REG_WR(bp, BNX2_MQ_CONFIG, val);
3218 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3219 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3220 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3222 val = (BCM_PAGE_BITS - 8) << 24;
3223 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3225 /* Configure page size. */
3226 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3227 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3228 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3229 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3231 val = bp->mac_addr[0] +
3232 (bp->mac_addr[1] << 8) +
3233 (bp->mac_addr[2] << 16) +
3234 bp->mac_addr[3] +
3235 (bp->mac_addr[4] << 8) +
3236 (bp->mac_addr[5] << 16);
3237 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3239 /* Program the MTU. Also include 4 bytes for CRC32. */
3240 val = bp->dev->mtu + ETH_HLEN + 4;
3241 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3242 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3243 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3245 bp->last_status_idx = 0;
3246 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3248 /* Set up how to generate a link change interrupt. */
3249 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3251 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3252 (u64) bp->status_blk_mapping & 0xffffffff);
3253 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3255 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3256 (u64) bp->stats_blk_mapping & 0xffffffff);
3257 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3258 (u64) bp->stats_blk_mapping >> 32);
3260 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3261 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3263 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3264 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3266 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3267 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3269 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3271 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3273 REG_WR(bp, BNX2_HC_COM_TICKS,
3274 (bp->com_ticks_int << 16) | bp->com_ticks);
3276 REG_WR(bp, BNX2_HC_CMD_TICKS,
3277 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3279 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3280 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3282 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3283 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3284 else {
3285 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3286 BNX2_HC_CONFIG_TX_TMR_MODE |
3287 BNX2_HC_CONFIG_COLLECT_STATS);
3290 /* Clear internal stats counters. */
3291 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3293 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3295 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3296 BNX2_PORT_FEATURE_ASF_ENABLED)
3297 bp->flags |= ASF_ENABLE_FLAG;
3299 /* Initialize the receive filter. */
3300 bnx2_set_rx_mode(bp->dev);
3302 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3305 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3306 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3308 udelay(20);
3310 return rc;
3314 static void
3315 bnx2_init_tx_ring(struct bnx2 *bp)
3317 struct tx_bd *txbd;
3318 u32 val;
3320 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3322 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3323 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3325 bp->tx_prod = 0;
3326 bp->tx_cons = 0;
3327 bp->hw_tx_cons = 0;
3328 bp->tx_prod_bseq = 0;
3330 val = BNX2_L2CTX_TYPE_TYPE_L2;
3331 val |= BNX2_L2CTX_TYPE_SIZE_L2;
3332 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3334 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3335 val |= 8 << 16;
3336 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3338 val = (u64) bp->tx_desc_mapping >> 32;
3339 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3341 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3342 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3345 static void
3346 bnx2_init_rx_ring(struct bnx2 *bp)
3348 struct rx_bd *rxbd;
3349 int i;
3350 u16 prod, ring_prod;
3351 u32 val;
3353 /* 8 for CRC and VLAN */
3354 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3355 /* 8 for alignment */
3356 bp->rx_buf_size = bp->rx_buf_use_size + 8;
3358 ring_prod = prod = bp->rx_prod = 0;
3359 bp->rx_cons = 0;
3360 bp->hw_rx_cons = 0;
3361 bp->rx_prod_bseq = 0;
3363 for (i = 0; i < bp->rx_max_ring; i++) {
3364 int j;
3366 rxbd = &bp->rx_desc_ring[i][0];
3367 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3368 rxbd->rx_bd_len = bp->rx_buf_use_size;
3369 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3371 if (i == (bp->rx_max_ring - 1))
3372 j = 0;
3373 else
3374 j = i + 1;
3375 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3376 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3377 0xffffffff;
3380 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3381 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3382 val |= 0x02 << 8;
3383 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3385 val = (u64) bp->rx_desc_mapping[0] >> 32;
3386 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3388 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3389 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3391 for (i = 0; i < bp->rx_ring_size; i++) {
3392 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3393 break;
3395 prod = NEXT_RX_BD(prod);
3396 ring_prod = RX_RING_IDX(prod);
3398 bp->rx_prod = prod;
3400 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3402 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3405 static void
3406 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3408 u32 num_rings, max;
3410 bp->rx_ring_size = size;
3411 num_rings = 1;
3412 while (size > MAX_RX_DESC_CNT) {
3413 size -= MAX_RX_DESC_CNT;
3414 num_rings++;
3416 /* round to next power of 2 */
3417 max = MAX_RX_RINGS;
3418 while ((max & num_rings) == 0)
3419 max >>= 1;
3421 if (num_rings != max)
3422 max <<= 1;
3424 bp->rx_max_ring = max;
3425 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3428 static void
3429 bnx2_free_tx_skbs(struct bnx2 *bp)
3431 int i;
3433 if (bp->tx_buf_ring == NULL)
3434 return;
3436 for (i = 0; i < TX_DESC_CNT; ) {
3437 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3438 struct sk_buff *skb = tx_buf->skb;
3439 int j, last;
3441 if (skb == NULL) {
3442 i++;
3443 continue;
3446 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3447 skb_headlen(skb), PCI_DMA_TODEVICE);
3449 tx_buf->skb = NULL;
3451 last = skb_shinfo(skb)->nr_frags;
3452 for (j = 0; j < last; j++) {
3453 tx_buf = &bp->tx_buf_ring[i + j + 1];
3454 pci_unmap_page(bp->pdev,
3455 pci_unmap_addr(tx_buf, mapping),
3456 skb_shinfo(skb)->frags[j].size,
3457 PCI_DMA_TODEVICE);
3459 dev_kfree_skb_any(skb);
3460 i += j + 1;
3465 static void
3466 bnx2_free_rx_skbs(struct bnx2 *bp)
3468 int i;
3470 if (bp->rx_buf_ring == NULL)
3471 return;
3473 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3474 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3475 struct sk_buff *skb = rx_buf->skb;
3477 if (skb == NULL)
3478 continue;
3480 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3481 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3483 rx_buf->skb = NULL;
3485 dev_kfree_skb_any(skb);
3489 static void
3490 bnx2_free_skbs(struct bnx2 *bp)
3492 bnx2_free_tx_skbs(bp);
3493 bnx2_free_rx_skbs(bp);
3496 static int
3497 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3499 int rc;
3501 rc = bnx2_reset_chip(bp, reset_code);
3502 bnx2_free_skbs(bp);
3503 if (rc)
3504 return rc;
3506 bnx2_init_chip(bp);
3507 bnx2_init_tx_ring(bp);
3508 bnx2_init_rx_ring(bp);
3509 return 0;
3512 static int
3513 bnx2_init_nic(struct bnx2 *bp)
3515 int rc;
3517 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3518 return rc;
3520 bnx2_init_phy(bp);
3521 bnx2_set_link(bp);
3522 return 0;
3525 static int
3526 bnx2_test_registers(struct bnx2 *bp)
3528 int ret;
3529 int i;
3530 static const struct {
3531 u16 offset;
3532 u16 flags;
3533 u32 rw_mask;
3534 u32 ro_mask;
3535 } reg_tbl[] = {
3536 { 0x006c, 0, 0x00000000, 0x0000003f },
3537 { 0x0090, 0, 0xffffffff, 0x00000000 },
3538 { 0x0094, 0, 0x00000000, 0x00000000 },
3540 { 0x0404, 0, 0x00003f00, 0x00000000 },
3541 { 0x0418, 0, 0x00000000, 0xffffffff },
3542 { 0x041c, 0, 0x00000000, 0xffffffff },
3543 { 0x0420, 0, 0x00000000, 0x80ffffff },
3544 { 0x0424, 0, 0x00000000, 0x00000000 },
3545 { 0x0428, 0, 0x00000000, 0x00000001 },
3546 { 0x0450, 0, 0x00000000, 0x0000ffff },
3547 { 0x0454, 0, 0x00000000, 0xffffffff },
3548 { 0x0458, 0, 0x00000000, 0xffffffff },
3550 { 0x0808, 0, 0x00000000, 0xffffffff },
3551 { 0x0854, 0, 0x00000000, 0xffffffff },
3552 { 0x0868, 0, 0x00000000, 0x77777777 },
3553 { 0x086c, 0, 0x00000000, 0x77777777 },
3554 { 0x0870, 0, 0x00000000, 0x77777777 },
3555 { 0x0874, 0, 0x00000000, 0x77777777 },
3557 { 0x0c00, 0, 0x00000000, 0x00000001 },
3558 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3559 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3561 { 0x1000, 0, 0x00000000, 0x00000001 },
3562 { 0x1004, 0, 0x00000000, 0x000f0001 },
3564 { 0x1408, 0, 0x01c00800, 0x00000000 },
3565 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3566 { 0x14a8, 0, 0x00000000, 0x000001ff },
3567 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3568 { 0x14b0, 0, 0x00000002, 0x00000001 },
3569 { 0x14b8, 0, 0x00000000, 0x00000000 },
3570 { 0x14c0, 0, 0x00000000, 0x00000009 },
3571 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3572 { 0x14cc, 0, 0x00000000, 0x00000001 },
3573 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3575 { 0x1800, 0, 0x00000000, 0x00000001 },
3576 { 0x1804, 0, 0x00000000, 0x00000003 },
3578 { 0x2800, 0, 0x00000000, 0x00000001 },
3579 { 0x2804, 0, 0x00000000, 0x00003f01 },
3580 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3581 { 0x2810, 0, 0xffff0000, 0x00000000 },
3582 { 0x2814, 0, 0xffff0000, 0x00000000 },
3583 { 0x2818, 0, 0xffff0000, 0x00000000 },
3584 { 0x281c, 0, 0xffff0000, 0x00000000 },
3585 { 0x2834, 0, 0xffffffff, 0x00000000 },
3586 { 0x2840, 0, 0x00000000, 0xffffffff },
3587 { 0x2844, 0, 0x00000000, 0xffffffff },
3588 { 0x2848, 0, 0xffffffff, 0x00000000 },
3589 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3591 { 0x2c00, 0, 0x00000000, 0x00000011 },
3592 { 0x2c04, 0, 0x00000000, 0x00030007 },
3594 { 0x3c00, 0, 0x00000000, 0x00000001 },
3595 { 0x3c04, 0, 0x00000000, 0x00070000 },
3596 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3597 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3598 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3599 { 0x3c14, 0, 0x00000000, 0xffffffff },
3600 { 0x3c18, 0, 0x00000000, 0xffffffff },
3601 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3602 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3604 { 0x5004, 0, 0x00000000, 0x0000007f },
3605 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3606 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3608 { 0x5c00, 0, 0x00000000, 0x00000001 },
3609 { 0x5c04, 0, 0x00000000, 0x0003000f },
3610 { 0x5c08, 0, 0x00000003, 0x00000000 },
3611 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3612 { 0x5c10, 0, 0x00000000, 0xffffffff },
3613 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3614 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3615 { 0x5c88, 0, 0x00000000, 0x00077373 },
3616 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3618 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3619 { 0x680c, 0, 0xffffffff, 0x00000000 },
3620 { 0x6810, 0, 0xffffffff, 0x00000000 },
3621 { 0x6814, 0, 0xffffffff, 0x00000000 },
3622 { 0x6818, 0, 0xffffffff, 0x00000000 },
3623 { 0x681c, 0, 0xffffffff, 0x00000000 },
3624 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3625 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3626 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3627 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3628 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3629 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3630 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3631 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3632 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3633 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3634 { 0x684c, 0, 0xffffffff, 0x00000000 },
3635 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3636 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3637 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3638 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3639 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3640 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3642 { 0xffff, 0, 0x00000000, 0x00000000 },
3645 ret = 0;
3646 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3647 u32 offset, rw_mask, ro_mask, save_val, val;
3649 offset = (u32) reg_tbl[i].offset;
3650 rw_mask = reg_tbl[i].rw_mask;
3651 ro_mask = reg_tbl[i].ro_mask;
3653 save_val = readl(bp->regview + offset);
3655 writel(0, bp->regview + offset);
3657 val = readl(bp->regview + offset);
3658 if ((val & rw_mask) != 0) {
3659 goto reg_test_err;
3662 if ((val & ro_mask) != (save_val & ro_mask)) {
3663 goto reg_test_err;
3666 writel(0xffffffff, bp->regview + offset);
3668 val = readl(bp->regview + offset);
3669 if ((val & rw_mask) != rw_mask) {
3670 goto reg_test_err;
3673 if ((val & ro_mask) != (save_val & ro_mask)) {
3674 goto reg_test_err;
3677 writel(save_val, bp->regview + offset);
3678 continue;
3680 reg_test_err:
3681 writel(save_val, bp->regview + offset);
3682 ret = -ENODEV;
3683 break;
3685 return ret;
3688 static int
3689 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3691 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3692 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3693 int i;
3695 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3696 u32 offset;
3698 for (offset = 0; offset < size; offset += 4) {
3700 REG_WR_IND(bp, start + offset, test_pattern[i]);
3702 if (REG_RD_IND(bp, start + offset) !=
3703 test_pattern[i]) {
3704 return -ENODEV;
3708 return 0;
3711 static int
3712 bnx2_test_memory(struct bnx2 *bp)
3714 int ret = 0;
3715 int i;
3716 static const struct {
3717 u32 offset;
3718 u32 len;
3719 } mem_tbl[] = {
3720 { 0x60000, 0x4000 },
3721 { 0xa0000, 0x3000 },
3722 { 0xe0000, 0x4000 },
3723 { 0x120000, 0x4000 },
3724 { 0x1a0000, 0x4000 },
3725 { 0x160000, 0x4000 },
3726 { 0xffffffff, 0 },
3729 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3730 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3731 mem_tbl[i].len)) != 0) {
3732 return ret;
3736 return ret;
3739 #define BNX2_MAC_LOOPBACK 0
3740 #define BNX2_PHY_LOOPBACK 1
3742 static int
3743 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3745 unsigned int pkt_size, num_pkts, i;
3746 struct sk_buff *skb, *rx_skb;
3747 unsigned char *packet;
3748 u16 rx_start_idx, rx_idx;
3749 u32 val;
3750 dma_addr_t map;
3751 struct tx_bd *txbd;
3752 struct sw_bd *rx_buf;
3753 struct l2_fhdr *rx_hdr;
3754 int ret = -ENODEV;
3756 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3757 bp->loopback = MAC_LOOPBACK;
3758 bnx2_set_mac_loopback(bp);
3760 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3761 bp->loopback = 0;
3762 bnx2_set_phy_loopback(bp);
3764 else
3765 return -EINVAL;
3767 pkt_size = 1514;
3768 skb = dev_alloc_skb(pkt_size);
3769 if (!skb)
3770 return -ENOMEM;
3771 packet = skb_put(skb, pkt_size);
3772 memcpy(packet, bp->mac_addr, 6);
3773 memset(packet + 6, 0x0, 8);
3774 for (i = 14; i < pkt_size; i++)
3775 packet[i] = (unsigned char) (i & 0xff);
3777 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3778 PCI_DMA_TODEVICE);
3780 val = REG_RD(bp, BNX2_HC_COMMAND);
3781 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3782 REG_RD(bp, BNX2_HC_COMMAND);
3784 udelay(5);
3785 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3787 num_pkts = 0;
3789 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
3791 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3792 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3793 txbd->tx_bd_mss_nbytes = pkt_size;
3794 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3796 num_pkts++;
3797 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3798 bp->tx_prod_bseq += pkt_size;
3800 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
3801 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
3803 udelay(100);
3805 val = REG_RD(bp, BNX2_HC_COMMAND);
3806 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3807 REG_RD(bp, BNX2_HC_COMMAND);
3809 udelay(5);
3811 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
3812 dev_kfree_skb_irq(skb);
3814 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
3815 goto loopback_test_done;
3818 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
3819 if (rx_idx != rx_start_idx + num_pkts) {
3820 goto loopback_test_done;
3823 rx_buf = &bp->rx_buf_ring[rx_start_idx];
3824 rx_skb = rx_buf->skb;
3826 rx_hdr = (struct l2_fhdr *) rx_skb->data;
3827 skb_reserve(rx_skb, bp->rx_offset);
3829 pci_dma_sync_single_for_cpu(bp->pdev,
3830 pci_unmap_addr(rx_buf, mapping),
3831 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
3833 if (rx_hdr->l2_fhdr_status &
3834 (L2_FHDR_ERRORS_BAD_CRC |
3835 L2_FHDR_ERRORS_PHY_DECODE |
3836 L2_FHDR_ERRORS_ALIGNMENT |
3837 L2_FHDR_ERRORS_TOO_SHORT |
3838 L2_FHDR_ERRORS_GIANT_FRAME)) {
3840 goto loopback_test_done;
3843 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
3844 goto loopback_test_done;
3847 for (i = 14; i < pkt_size; i++) {
3848 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
3849 goto loopback_test_done;
3853 ret = 0;
3855 loopback_test_done:
3856 bp->loopback = 0;
3857 return ret;
3860 #define BNX2_MAC_LOOPBACK_FAILED 1
3861 #define BNX2_PHY_LOOPBACK_FAILED 2
3862 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
3863 BNX2_PHY_LOOPBACK_FAILED)
3865 static int
3866 bnx2_test_loopback(struct bnx2 *bp)
3868 int rc = 0;
3870 if (!netif_running(bp->dev))
3871 return BNX2_LOOPBACK_FAILED;
3873 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
3874 spin_lock_bh(&bp->phy_lock);
3875 bnx2_init_phy(bp);
3876 spin_unlock_bh(&bp->phy_lock);
3877 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
3878 rc |= BNX2_MAC_LOOPBACK_FAILED;
3879 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
3880 rc |= BNX2_PHY_LOOPBACK_FAILED;
3881 return rc;
3884 #define NVRAM_SIZE 0x200
3885 #define CRC32_RESIDUAL 0xdebb20e3
3887 static int
3888 bnx2_test_nvram(struct bnx2 *bp)
3890 u32 buf[NVRAM_SIZE / 4];
3891 u8 *data = (u8 *) buf;
3892 int rc = 0;
3893 u32 magic, csum;
3895 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
3896 goto test_nvram_done;
3898 magic = be32_to_cpu(buf[0]);
3899 if (magic != 0x669955aa) {
3900 rc = -ENODEV;
3901 goto test_nvram_done;
3904 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
3905 goto test_nvram_done;
3907 csum = ether_crc_le(0x100, data);
3908 if (csum != CRC32_RESIDUAL) {
3909 rc = -ENODEV;
3910 goto test_nvram_done;
3913 csum = ether_crc_le(0x100, data + 0x100);
3914 if (csum != CRC32_RESIDUAL) {
3915 rc = -ENODEV;
3918 test_nvram_done:
3919 return rc;
3922 static int
3923 bnx2_test_link(struct bnx2 *bp)
3925 u32 bmsr;
3927 spin_lock_bh(&bp->phy_lock);
3928 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3929 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3930 spin_unlock_bh(&bp->phy_lock);
3932 if (bmsr & BMSR_LSTATUS) {
3933 return 0;
3935 return -ENODEV;
3938 static int
3939 bnx2_test_intr(struct bnx2 *bp)
3941 int i;
3942 u32 val;
3943 u16 status_idx;
3945 if (!netif_running(bp->dev))
3946 return -ENODEV;
3948 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
3950 /* This register is not touched during run-time. */
3951 val = REG_RD(bp, BNX2_HC_COMMAND);
3952 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW);
3953 REG_RD(bp, BNX2_HC_COMMAND);
3955 for (i = 0; i < 10; i++) {
3956 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
3957 status_idx) {
3959 break;
3962 msleep_interruptible(10);
3964 if (i < 10)
3965 return 0;
3967 return -ENODEV;
3970 static void
3971 bnx2_timer(unsigned long data)
3973 struct bnx2 *bp = (struct bnx2 *) data;
3974 u32 msg;
3976 if (!netif_running(bp->dev))
3977 return;
3979 if (atomic_read(&bp->intr_sem) != 0)
3980 goto bnx2_restart_timer;
3982 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
3983 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
3985 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
3986 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
3988 spin_lock(&bp->phy_lock);
3989 if (bp->serdes_an_pending) {
3990 bp->serdes_an_pending--;
3992 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
3993 u32 bmcr;
3995 bp->current_interval = bp->timer_interval;
3997 bnx2_read_phy(bp, MII_BMCR, &bmcr);
3999 if (bmcr & BMCR_ANENABLE) {
4000 u32 phy1, phy2;
4002 bnx2_write_phy(bp, 0x1c, 0x7c00);
4003 bnx2_read_phy(bp, 0x1c, &phy1);
4005 bnx2_write_phy(bp, 0x17, 0x0f01);
4006 bnx2_read_phy(bp, 0x15, &phy2);
4007 bnx2_write_phy(bp, 0x17, 0x0f01);
4008 bnx2_read_phy(bp, 0x15, &phy2);
4010 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4011 !(phy2 & 0x20)) { /* no CONFIG */
4013 bmcr &= ~BMCR_ANENABLE;
4014 bmcr |= BMCR_SPEED1000 |
4015 BMCR_FULLDPLX;
4016 bnx2_write_phy(bp, MII_BMCR, bmcr);
4017 bp->phy_flags |=
4018 PHY_PARALLEL_DETECT_FLAG;
4022 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4023 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4024 u32 phy2;
4026 bnx2_write_phy(bp, 0x17, 0x0f01);
4027 bnx2_read_phy(bp, 0x15, &phy2);
4028 if (phy2 & 0x20) {
4029 u32 bmcr;
4031 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4032 bmcr |= BMCR_ANENABLE;
4033 bnx2_write_phy(bp, MII_BMCR, bmcr);
4035 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4039 else
4040 bp->current_interval = bp->timer_interval;
4042 spin_unlock(&bp->phy_lock);
4045 bnx2_restart_timer:
4046 mod_timer(&bp->timer, jiffies + bp->current_interval);
4049 /* Called with rtnl_lock */
4050 static int
4051 bnx2_open(struct net_device *dev)
4053 struct bnx2 *bp = netdev_priv(dev);
4054 int rc;
4056 bnx2_set_power_state(bp, PCI_D0);
4057 bnx2_disable_int(bp);
4059 rc = bnx2_alloc_mem(bp);
4060 if (rc)
4061 return rc;
4063 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4064 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4065 !disable_msi) {
4067 if (pci_enable_msi(bp->pdev) == 0) {
4068 bp->flags |= USING_MSI_FLAG;
4069 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4070 dev);
4072 else {
4073 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4074 SA_SHIRQ, dev->name, dev);
4077 else {
4078 rc = request_irq(bp->pdev->irq, bnx2_interrupt, SA_SHIRQ,
4079 dev->name, dev);
4081 if (rc) {
4082 bnx2_free_mem(bp);
4083 return rc;
4086 rc = bnx2_init_nic(bp);
4088 if (rc) {
4089 free_irq(bp->pdev->irq, dev);
4090 if (bp->flags & USING_MSI_FLAG) {
4091 pci_disable_msi(bp->pdev);
4092 bp->flags &= ~USING_MSI_FLAG;
4094 bnx2_free_skbs(bp);
4095 bnx2_free_mem(bp);
4096 return rc;
4099 mod_timer(&bp->timer, jiffies + bp->current_interval);
4101 atomic_set(&bp->intr_sem, 0);
4103 bnx2_enable_int(bp);
4105 if (bp->flags & USING_MSI_FLAG) {
4106 /* Test MSI to make sure it is working
4107 * If MSI test fails, go back to INTx mode
4109 if (bnx2_test_intr(bp) != 0) {
4110 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4111 " using MSI, switching to INTx mode. Please"
4112 " report this failure to the PCI maintainer"
4113 " and include system chipset information.\n",
4114 bp->dev->name);
4116 bnx2_disable_int(bp);
4117 free_irq(bp->pdev->irq, dev);
4118 pci_disable_msi(bp->pdev);
4119 bp->flags &= ~USING_MSI_FLAG;
4121 rc = bnx2_init_nic(bp);
4123 if (!rc) {
4124 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4125 SA_SHIRQ, dev->name, dev);
4127 if (rc) {
4128 bnx2_free_skbs(bp);
4129 bnx2_free_mem(bp);
4130 del_timer_sync(&bp->timer);
4131 return rc;
4133 bnx2_enable_int(bp);
4136 if (bp->flags & USING_MSI_FLAG) {
4137 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4140 netif_start_queue(dev);
4142 return 0;
4145 static void
4146 bnx2_reset_task(void *data)
4148 struct bnx2 *bp = data;
4150 if (!netif_running(bp->dev))
4151 return;
4153 bp->in_reset_task = 1;
4154 bnx2_netif_stop(bp);
4156 bnx2_init_nic(bp);
4158 atomic_set(&bp->intr_sem, 1);
4159 bnx2_netif_start(bp);
4160 bp->in_reset_task = 0;
4163 static void
4164 bnx2_tx_timeout(struct net_device *dev)
4166 struct bnx2 *bp = netdev_priv(dev);
4168 /* This allows the netif to be shutdown gracefully before resetting */
4169 schedule_work(&bp->reset_task);
4172 #ifdef BCM_VLAN
4173 /* Called with rtnl_lock */
4174 static void
4175 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4177 struct bnx2 *bp = netdev_priv(dev);
4179 bnx2_netif_stop(bp);
4181 bp->vlgrp = vlgrp;
4182 bnx2_set_rx_mode(dev);
4184 bnx2_netif_start(bp);
4187 /* Called with rtnl_lock */
4188 static void
4189 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4191 struct bnx2 *bp = netdev_priv(dev);
4193 bnx2_netif_stop(bp);
4195 if (bp->vlgrp)
4196 bp->vlgrp->vlan_devices[vid] = NULL;
4197 bnx2_set_rx_mode(dev);
4199 bnx2_netif_start(bp);
4201 #endif
4203 /* Called with dev->xmit_lock.
4204 * hard_start_xmit is pseudo-lockless - a lock is only required when
4205 * the tx queue is full. This way, we get the benefit of lockless
4206 * operations most of the time without the complexities to handle
4207 * netif_stop_queue/wake_queue race conditions.
4209 static int
4210 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4212 struct bnx2 *bp = netdev_priv(dev);
4213 dma_addr_t mapping;
4214 struct tx_bd *txbd;
4215 struct sw_bd *tx_buf;
4216 u32 len, vlan_tag_flags, last_frag, mss;
4217 u16 prod, ring_prod;
4218 int i;
4220 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4221 netif_stop_queue(dev);
4222 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4223 dev->name);
4225 return NETDEV_TX_BUSY;
4227 len = skb_headlen(skb);
4228 prod = bp->tx_prod;
4229 ring_prod = TX_RING_IDX(prod);
4231 vlan_tag_flags = 0;
4232 if (skb->ip_summed == CHECKSUM_HW) {
4233 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4236 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4237 vlan_tag_flags |=
4238 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4240 #ifdef BCM_TSO
4241 if ((mss = skb_shinfo(skb)->tso_size) &&
4242 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4243 u32 tcp_opt_len, ip_tcp_len;
4245 if (skb_header_cloned(skb) &&
4246 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4247 dev_kfree_skb(skb);
4248 return NETDEV_TX_OK;
4251 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4252 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4254 tcp_opt_len = 0;
4255 if (skb->h.th->doff > 5) {
4256 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4258 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4260 skb->nh.iph->check = 0;
4261 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
4262 skb->h.th->check =
4263 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4264 skb->nh.iph->daddr,
4265 0, IPPROTO_TCP, 0);
4267 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4268 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4269 (tcp_opt_len >> 2)) << 8;
4272 else
4273 #endif
4275 mss = 0;
4278 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4280 tx_buf = &bp->tx_buf_ring[ring_prod];
4281 tx_buf->skb = skb;
4282 pci_unmap_addr_set(tx_buf, mapping, mapping);
4284 txbd = &bp->tx_desc_ring[ring_prod];
4286 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4287 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4288 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4289 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4291 last_frag = skb_shinfo(skb)->nr_frags;
4293 for (i = 0; i < last_frag; i++) {
4294 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4296 prod = NEXT_TX_BD(prod);
4297 ring_prod = TX_RING_IDX(prod);
4298 txbd = &bp->tx_desc_ring[ring_prod];
4300 len = frag->size;
4301 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4302 len, PCI_DMA_TODEVICE);
4303 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4304 mapping, mapping);
4306 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4307 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4308 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4309 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4312 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4314 prod = NEXT_TX_BD(prod);
4315 bp->tx_prod_bseq += skb->len;
4317 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4318 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4320 mmiowb();
4322 bp->tx_prod = prod;
4323 dev->trans_start = jiffies;
4325 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4326 spin_lock(&bp->tx_lock);
4327 netif_stop_queue(dev);
4329 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4330 netif_wake_queue(dev);
4331 spin_unlock(&bp->tx_lock);
4334 return NETDEV_TX_OK;
4337 /* Called with rtnl_lock */
4338 static int
4339 bnx2_close(struct net_device *dev)
4341 struct bnx2 *bp = netdev_priv(dev);
4342 u32 reset_code;
4344 /* Calling flush_scheduled_work() may deadlock because
4345 * linkwatch_event() may be on the workqueue and it will try to get
4346 * the rtnl_lock which we are holding.
4348 while (bp->in_reset_task)
4349 msleep(1);
4351 bnx2_netif_stop(bp);
4352 del_timer_sync(&bp->timer);
4353 if (bp->flags & NO_WOL_FLAG)
4354 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
4355 else if (bp->wol)
4356 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4357 else
4358 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4359 bnx2_reset_chip(bp, reset_code);
4360 free_irq(bp->pdev->irq, dev);
4361 if (bp->flags & USING_MSI_FLAG) {
4362 pci_disable_msi(bp->pdev);
4363 bp->flags &= ~USING_MSI_FLAG;
4365 bnx2_free_skbs(bp);
4366 bnx2_free_mem(bp);
4367 bp->link_up = 0;
4368 netif_carrier_off(bp->dev);
4369 bnx2_set_power_state(bp, PCI_D3hot);
4370 return 0;
4373 #define GET_NET_STATS64(ctr) \
4374 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4375 (unsigned long) (ctr##_lo)
4377 #define GET_NET_STATS32(ctr) \
4378 (ctr##_lo)
4380 #if (BITS_PER_LONG == 64)
4381 #define GET_NET_STATS GET_NET_STATS64
4382 #else
4383 #define GET_NET_STATS GET_NET_STATS32
4384 #endif
4386 static struct net_device_stats *
4387 bnx2_get_stats(struct net_device *dev)
4389 struct bnx2 *bp = netdev_priv(dev);
4390 struct statistics_block *stats_blk = bp->stats_blk;
4391 struct net_device_stats *net_stats = &bp->net_stats;
4393 if (bp->stats_blk == NULL) {
4394 return net_stats;
4396 net_stats->rx_packets =
4397 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4398 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4399 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4401 net_stats->tx_packets =
4402 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4403 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4404 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4406 net_stats->rx_bytes =
4407 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4409 net_stats->tx_bytes =
4410 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4412 net_stats->multicast =
4413 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4415 net_stats->collisions =
4416 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4418 net_stats->rx_length_errors =
4419 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4420 stats_blk->stat_EtherStatsOverrsizePkts);
4422 net_stats->rx_over_errors =
4423 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4425 net_stats->rx_frame_errors =
4426 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4428 net_stats->rx_crc_errors =
4429 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4431 net_stats->rx_errors = net_stats->rx_length_errors +
4432 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4433 net_stats->rx_crc_errors;
4435 net_stats->tx_aborted_errors =
4436 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4437 stats_blk->stat_Dot3StatsLateCollisions);
4439 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4440 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4441 net_stats->tx_carrier_errors = 0;
4442 else {
4443 net_stats->tx_carrier_errors =
4444 (unsigned long)
4445 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4448 net_stats->tx_errors =
4449 (unsigned long)
4450 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4452 net_stats->tx_aborted_errors +
4453 net_stats->tx_carrier_errors;
4455 return net_stats;
4458 /* All ethtool functions called with rtnl_lock */
4460 static int
4461 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4463 struct bnx2 *bp = netdev_priv(dev);
4465 cmd->supported = SUPPORTED_Autoneg;
4466 if (bp->phy_flags & PHY_SERDES_FLAG) {
4467 cmd->supported |= SUPPORTED_1000baseT_Full |
4468 SUPPORTED_FIBRE;
4470 cmd->port = PORT_FIBRE;
4472 else {
4473 cmd->supported |= SUPPORTED_10baseT_Half |
4474 SUPPORTED_10baseT_Full |
4475 SUPPORTED_100baseT_Half |
4476 SUPPORTED_100baseT_Full |
4477 SUPPORTED_1000baseT_Full |
4478 SUPPORTED_TP;
4480 cmd->port = PORT_TP;
4483 cmd->advertising = bp->advertising;
4485 if (bp->autoneg & AUTONEG_SPEED) {
4486 cmd->autoneg = AUTONEG_ENABLE;
4488 else {
4489 cmd->autoneg = AUTONEG_DISABLE;
4492 if (netif_carrier_ok(dev)) {
4493 cmd->speed = bp->line_speed;
4494 cmd->duplex = bp->duplex;
4496 else {
4497 cmd->speed = -1;
4498 cmd->duplex = -1;
4501 cmd->transceiver = XCVR_INTERNAL;
4502 cmd->phy_address = bp->phy_addr;
4504 return 0;
4507 static int
4508 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4510 struct bnx2 *bp = netdev_priv(dev);
4511 u8 autoneg = bp->autoneg;
4512 u8 req_duplex = bp->req_duplex;
4513 u16 req_line_speed = bp->req_line_speed;
4514 u32 advertising = bp->advertising;
4516 if (cmd->autoneg == AUTONEG_ENABLE) {
4517 autoneg |= AUTONEG_SPEED;
4519 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4521 /* allow advertising 1 speed */
4522 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4523 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4524 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4525 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4527 if (bp->phy_flags & PHY_SERDES_FLAG)
4528 return -EINVAL;
4530 advertising = cmd->advertising;
4533 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4534 advertising = cmd->advertising;
4536 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4537 return -EINVAL;
4539 else {
4540 if (bp->phy_flags & PHY_SERDES_FLAG) {
4541 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4543 else {
4544 advertising = ETHTOOL_ALL_COPPER_SPEED;
4547 advertising |= ADVERTISED_Autoneg;
4549 else {
4550 if (bp->phy_flags & PHY_SERDES_FLAG) {
4551 if ((cmd->speed != SPEED_1000) ||
4552 (cmd->duplex != DUPLEX_FULL)) {
4553 return -EINVAL;
4556 else if (cmd->speed == SPEED_1000) {
4557 return -EINVAL;
4559 autoneg &= ~AUTONEG_SPEED;
4560 req_line_speed = cmd->speed;
4561 req_duplex = cmd->duplex;
4562 advertising = 0;
4565 bp->autoneg = autoneg;
4566 bp->advertising = advertising;
4567 bp->req_line_speed = req_line_speed;
4568 bp->req_duplex = req_duplex;
4570 spin_lock_bh(&bp->phy_lock);
4572 bnx2_setup_phy(bp);
4574 spin_unlock_bh(&bp->phy_lock);
4576 return 0;
4579 static void
4580 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4582 struct bnx2 *bp = netdev_priv(dev);
4584 strcpy(info->driver, DRV_MODULE_NAME);
4585 strcpy(info->version, DRV_MODULE_VERSION);
4586 strcpy(info->bus_info, pci_name(bp->pdev));
4587 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4588 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4589 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4590 info->fw_version[1] = info->fw_version[3] = '.';
4591 info->fw_version[5] = 0;
4594 #define BNX2_REGDUMP_LEN (32 * 1024)
4596 static int
4597 bnx2_get_regs_len(struct net_device *dev)
4599 return BNX2_REGDUMP_LEN;
4602 static void
4603 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4605 u32 *p = _p, i, offset;
4606 u8 *orig_p = _p;
4607 struct bnx2 *bp = netdev_priv(dev);
4608 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4609 0x0800, 0x0880, 0x0c00, 0x0c10,
4610 0x0c30, 0x0d08, 0x1000, 0x101c,
4611 0x1040, 0x1048, 0x1080, 0x10a4,
4612 0x1400, 0x1490, 0x1498, 0x14f0,
4613 0x1500, 0x155c, 0x1580, 0x15dc,
4614 0x1600, 0x1658, 0x1680, 0x16d8,
4615 0x1800, 0x1820, 0x1840, 0x1854,
4616 0x1880, 0x1894, 0x1900, 0x1984,
4617 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4618 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4619 0x2000, 0x2030, 0x23c0, 0x2400,
4620 0x2800, 0x2820, 0x2830, 0x2850,
4621 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4622 0x3c00, 0x3c94, 0x4000, 0x4010,
4623 0x4080, 0x4090, 0x43c0, 0x4458,
4624 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4625 0x4fc0, 0x5010, 0x53c0, 0x5444,
4626 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4627 0x5fc0, 0x6000, 0x6400, 0x6428,
4628 0x6800, 0x6848, 0x684c, 0x6860,
4629 0x6888, 0x6910, 0x8000 };
4631 regs->version = 0;
4633 memset(p, 0, BNX2_REGDUMP_LEN);
4635 if (!netif_running(bp->dev))
4636 return;
4638 i = 0;
4639 offset = reg_boundaries[0];
4640 p += offset;
4641 while (offset < BNX2_REGDUMP_LEN) {
4642 *p++ = REG_RD(bp, offset);
4643 offset += 4;
4644 if (offset == reg_boundaries[i + 1]) {
4645 offset = reg_boundaries[i + 2];
4646 p = (u32 *) (orig_p + offset);
4647 i += 2;
4652 static void
4653 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4655 struct bnx2 *bp = netdev_priv(dev);
4657 if (bp->flags & NO_WOL_FLAG) {
4658 wol->supported = 0;
4659 wol->wolopts = 0;
4661 else {
4662 wol->supported = WAKE_MAGIC;
4663 if (bp->wol)
4664 wol->wolopts = WAKE_MAGIC;
4665 else
4666 wol->wolopts = 0;
4668 memset(&wol->sopass, 0, sizeof(wol->sopass));
4671 static int
4672 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4674 struct bnx2 *bp = netdev_priv(dev);
4676 if (wol->wolopts & ~WAKE_MAGIC)
4677 return -EINVAL;
4679 if (wol->wolopts & WAKE_MAGIC) {
4680 if (bp->flags & NO_WOL_FLAG)
4681 return -EINVAL;
4683 bp->wol = 1;
4685 else {
4686 bp->wol = 0;
4688 return 0;
4691 static int
4692 bnx2_nway_reset(struct net_device *dev)
4694 struct bnx2 *bp = netdev_priv(dev);
4695 u32 bmcr;
4697 if (!(bp->autoneg & AUTONEG_SPEED)) {
4698 return -EINVAL;
4701 spin_lock_bh(&bp->phy_lock);
4703 /* Force a link down visible on the other side */
4704 if (bp->phy_flags & PHY_SERDES_FLAG) {
4705 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4706 spin_unlock_bh(&bp->phy_lock);
4708 msleep(20);
4710 spin_lock_bh(&bp->phy_lock);
4711 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
4712 bp->current_interval = SERDES_AN_TIMEOUT;
4713 bp->serdes_an_pending = 1;
4714 mod_timer(&bp->timer, jiffies + bp->current_interval);
4718 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4719 bmcr &= ~BMCR_LOOPBACK;
4720 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4722 spin_unlock_bh(&bp->phy_lock);
4724 return 0;
4727 static int
4728 bnx2_get_eeprom_len(struct net_device *dev)
4730 struct bnx2 *bp = netdev_priv(dev);
4732 if (bp->flash_info == NULL)
4733 return 0;
4735 return (int) bp->flash_size;
4738 static int
4739 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4740 u8 *eebuf)
4742 struct bnx2 *bp = netdev_priv(dev);
4743 int rc;
4745 /* parameters already validated in ethtool_get_eeprom */
4747 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4749 return rc;
4752 static int
4753 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4754 u8 *eebuf)
4756 struct bnx2 *bp = netdev_priv(dev);
4757 int rc;
4759 /* parameters already validated in ethtool_set_eeprom */
4761 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4763 return rc;
4766 static int
4767 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4769 struct bnx2 *bp = netdev_priv(dev);
4771 memset(coal, 0, sizeof(struct ethtool_coalesce));
4773 coal->rx_coalesce_usecs = bp->rx_ticks;
4774 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4775 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4776 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4778 coal->tx_coalesce_usecs = bp->tx_ticks;
4779 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4780 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4781 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4783 coal->stats_block_coalesce_usecs = bp->stats_ticks;
4785 return 0;
4788 static int
4789 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4791 struct bnx2 *bp = netdev_priv(dev);
4793 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
4794 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
4796 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
4797 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
4799 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
4800 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
4802 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
4803 if (bp->rx_quick_cons_trip_int > 0xff)
4804 bp->rx_quick_cons_trip_int = 0xff;
4806 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
4807 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
4809 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
4810 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
4812 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
4813 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
4815 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
4816 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
4817 0xff;
4819 bp->stats_ticks = coal->stats_block_coalesce_usecs;
4820 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
4821 bp->stats_ticks &= 0xffff00;
4823 if (netif_running(bp->dev)) {
4824 bnx2_netif_stop(bp);
4825 bnx2_init_nic(bp);
4826 bnx2_netif_start(bp);
4829 return 0;
4832 static void
4833 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4835 struct bnx2 *bp = netdev_priv(dev);
4837 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
4838 ering->rx_mini_max_pending = 0;
4839 ering->rx_jumbo_max_pending = 0;
4841 ering->rx_pending = bp->rx_ring_size;
4842 ering->rx_mini_pending = 0;
4843 ering->rx_jumbo_pending = 0;
4845 ering->tx_max_pending = MAX_TX_DESC_CNT;
4846 ering->tx_pending = bp->tx_ring_size;
4849 static int
4850 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4852 struct bnx2 *bp = netdev_priv(dev);
4854 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
4855 (ering->tx_pending > MAX_TX_DESC_CNT) ||
4856 (ering->tx_pending <= MAX_SKB_FRAGS)) {
4858 return -EINVAL;
4860 if (netif_running(bp->dev)) {
4861 bnx2_netif_stop(bp);
4862 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
4863 bnx2_free_skbs(bp);
4864 bnx2_free_mem(bp);
4867 bnx2_set_rx_ring_size(bp, ering->rx_pending);
4868 bp->tx_ring_size = ering->tx_pending;
4870 if (netif_running(bp->dev)) {
4871 int rc;
4873 rc = bnx2_alloc_mem(bp);
4874 if (rc)
4875 return rc;
4876 bnx2_init_nic(bp);
4877 bnx2_netif_start(bp);
4880 return 0;
4883 static void
4884 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4886 struct bnx2 *bp = netdev_priv(dev);
4888 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
4889 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
4890 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
4893 static int
4894 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4896 struct bnx2 *bp = netdev_priv(dev);
4898 bp->req_flow_ctrl = 0;
4899 if (epause->rx_pause)
4900 bp->req_flow_ctrl |= FLOW_CTRL_RX;
4901 if (epause->tx_pause)
4902 bp->req_flow_ctrl |= FLOW_CTRL_TX;
4904 if (epause->autoneg) {
4905 bp->autoneg |= AUTONEG_FLOW_CTRL;
4907 else {
4908 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
4911 spin_lock_bh(&bp->phy_lock);
4913 bnx2_setup_phy(bp);
4915 spin_unlock_bh(&bp->phy_lock);
4917 return 0;
4920 static u32
4921 bnx2_get_rx_csum(struct net_device *dev)
4923 struct bnx2 *bp = netdev_priv(dev);
4925 return bp->rx_csum;
4928 static int
4929 bnx2_set_rx_csum(struct net_device *dev, u32 data)
4931 struct bnx2 *bp = netdev_priv(dev);
4933 bp->rx_csum = data;
4934 return 0;
4937 #define BNX2_NUM_STATS 45
4939 static struct {
4940 char string[ETH_GSTRING_LEN];
4941 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
4942 { "rx_bytes" },
4943 { "rx_error_bytes" },
4944 { "tx_bytes" },
4945 { "tx_error_bytes" },
4946 { "rx_ucast_packets" },
4947 { "rx_mcast_packets" },
4948 { "rx_bcast_packets" },
4949 { "tx_ucast_packets" },
4950 { "tx_mcast_packets" },
4951 { "tx_bcast_packets" },
4952 { "tx_mac_errors" },
4953 { "tx_carrier_errors" },
4954 { "rx_crc_errors" },
4955 { "rx_align_errors" },
4956 { "tx_single_collisions" },
4957 { "tx_multi_collisions" },
4958 { "tx_deferred" },
4959 { "tx_excess_collisions" },
4960 { "tx_late_collisions" },
4961 { "tx_total_collisions" },
4962 { "rx_fragments" },
4963 { "rx_jabbers" },
4964 { "rx_undersize_packets" },
4965 { "rx_oversize_packets" },
4966 { "rx_64_byte_packets" },
4967 { "rx_65_to_127_byte_packets" },
4968 { "rx_128_to_255_byte_packets" },
4969 { "rx_256_to_511_byte_packets" },
4970 { "rx_512_to_1023_byte_packets" },
4971 { "rx_1024_to_1522_byte_packets" },
4972 { "rx_1523_to_9022_byte_packets" },
4973 { "tx_64_byte_packets" },
4974 { "tx_65_to_127_byte_packets" },
4975 { "tx_128_to_255_byte_packets" },
4976 { "tx_256_to_511_byte_packets" },
4977 { "tx_512_to_1023_byte_packets" },
4978 { "tx_1024_to_1522_byte_packets" },
4979 { "tx_1523_to_9022_byte_packets" },
4980 { "rx_xon_frames" },
4981 { "rx_xoff_frames" },
4982 { "tx_xon_frames" },
4983 { "tx_xoff_frames" },
4984 { "rx_mac_ctrl_frames" },
4985 { "rx_filtered_packets" },
4986 { "rx_discards" },
4989 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
4991 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
4992 STATS_OFFSET32(stat_IfHCInOctets_hi),
4993 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
4994 STATS_OFFSET32(stat_IfHCOutOctets_hi),
4995 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
4996 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
4997 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
4998 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
4999 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5000 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5001 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5002 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5003 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5004 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5005 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5006 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5007 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5008 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5009 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5010 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5011 STATS_OFFSET32(stat_EtherStatsCollisions),
5012 STATS_OFFSET32(stat_EtherStatsFragments),
5013 STATS_OFFSET32(stat_EtherStatsJabbers),
5014 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5015 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5016 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5017 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5018 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5019 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5020 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5021 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5022 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5023 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5024 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5025 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5026 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5027 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5028 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5029 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5030 STATS_OFFSET32(stat_XonPauseFramesReceived),
5031 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5032 STATS_OFFSET32(stat_OutXonSent),
5033 STATS_OFFSET32(stat_OutXoffSent),
5034 STATS_OFFSET32(stat_MacControlFramesReceived),
5035 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5036 STATS_OFFSET32(stat_IfInMBUFDiscards),
5039 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5040 * skipped because of errata.
5042 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5043 8,0,8,8,8,8,8,8,8,8,
5044 4,0,4,4,4,4,4,4,4,4,
5045 4,4,4,4,4,4,4,4,4,4,
5046 4,4,4,4,4,4,4,4,4,4,
5047 4,4,4,4,4,
5050 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5051 8,0,8,8,8,8,8,8,8,8,
5052 4,4,4,4,4,4,4,4,4,4,
5053 4,4,4,4,4,4,4,4,4,4,
5054 4,4,4,4,4,4,4,4,4,4,
5055 4,4,4,4,4,
5058 #define BNX2_NUM_TESTS 6
5060 static struct {
5061 char string[ETH_GSTRING_LEN];
5062 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5063 { "register_test (offline)" },
5064 { "memory_test (offline)" },
5065 { "loopback_test (offline)" },
5066 { "nvram_test (online)" },
5067 { "interrupt_test (online)" },
5068 { "link_test (online)" },
5071 static int
5072 bnx2_self_test_count(struct net_device *dev)
5074 return BNX2_NUM_TESTS;
5077 static void
5078 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5080 struct bnx2 *bp = netdev_priv(dev);
5082 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5083 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5084 bnx2_netif_stop(bp);
5085 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5086 bnx2_free_skbs(bp);
5088 if (bnx2_test_registers(bp) != 0) {
5089 buf[0] = 1;
5090 etest->flags |= ETH_TEST_FL_FAILED;
5092 if (bnx2_test_memory(bp) != 0) {
5093 buf[1] = 1;
5094 etest->flags |= ETH_TEST_FL_FAILED;
5096 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5097 etest->flags |= ETH_TEST_FL_FAILED;
5099 if (!netif_running(bp->dev)) {
5100 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5102 else {
5103 bnx2_init_nic(bp);
5104 bnx2_netif_start(bp);
5107 /* wait for link up */
5108 msleep_interruptible(3000);
5109 if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
5110 msleep_interruptible(4000);
5113 if (bnx2_test_nvram(bp) != 0) {
5114 buf[3] = 1;
5115 etest->flags |= ETH_TEST_FL_FAILED;
5117 if (bnx2_test_intr(bp) != 0) {
5118 buf[4] = 1;
5119 etest->flags |= ETH_TEST_FL_FAILED;
5122 if (bnx2_test_link(bp) != 0) {
5123 buf[5] = 1;
5124 etest->flags |= ETH_TEST_FL_FAILED;
5129 static void
5130 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5132 switch (stringset) {
5133 case ETH_SS_STATS:
5134 memcpy(buf, bnx2_stats_str_arr,
5135 sizeof(bnx2_stats_str_arr));
5136 break;
5137 case ETH_SS_TEST:
5138 memcpy(buf, bnx2_tests_str_arr,
5139 sizeof(bnx2_tests_str_arr));
5140 break;
5144 static int
5145 bnx2_get_stats_count(struct net_device *dev)
5147 return BNX2_NUM_STATS;
5150 static void
5151 bnx2_get_ethtool_stats(struct net_device *dev,
5152 struct ethtool_stats *stats, u64 *buf)
5154 struct bnx2 *bp = netdev_priv(dev);
5155 int i;
5156 u32 *hw_stats = (u32 *) bp->stats_blk;
5157 u8 *stats_len_arr = NULL;
5159 if (hw_stats == NULL) {
5160 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5161 return;
5164 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5165 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5166 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5167 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5168 stats_len_arr = bnx2_5706_stats_len_arr;
5169 else
5170 stats_len_arr = bnx2_5708_stats_len_arr;
5172 for (i = 0; i < BNX2_NUM_STATS; i++) {
5173 if (stats_len_arr[i] == 0) {
5174 /* skip this counter */
5175 buf[i] = 0;
5176 continue;
5178 if (stats_len_arr[i] == 4) {
5179 /* 4-byte counter */
5180 buf[i] = (u64)
5181 *(hw_stats + bnx2_stats_offset_arr[i]);
5182 continue;
5184 /* 8-byte counter */
5185 buf[i] = (((u64) *(hw_stats +
5186 bnx2_stats_offset_arr[i])) << 32) +
5187 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5191 static int
5192 bnx2_phys_id(struct net_device *dev, u32 data)
5194 struct bnx2 *bp = netdev_priv(dev);
5195 int i;
5196 u32 save;
5198 if (data == 0)
5199 data = 2;
5201 save = REG_RD(bp, BNX2_MISC_CFG);
5202 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5204 for (i = 0; i < (data * 2); i++) {
5205 if ((i % 2) == 0) {
5206 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5208 else {
5209 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5210 BNX2_EMAC_LED_1000MB_OVERRIDE |
5211 BNX2_EMAC_LED_100MB_OVERRIDE |
5212 BNX2_EMAC_LED_10MB_OVERRIDE |
5213 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5214 BNX2_EMAC_LED_TRAFFIC);
5216 msleep_interruptible(500);
5217 if (signal_pending(current))
5218 break;
5220 REG_WR(bp, BNX2_EMAC_LED, 0);
5221 REG_WR(bp, BNX2_MISC_CFG, save);
5222 return 0;
5225 static struct ethtool_ops bnx2_ethtool_ops = {
5226 .get_settings = bnx2_get_settings,
5227 .set_settings = bnx2_set_settings,
5228 .get_drvinfo = bnx2_get_drvinfo,
5229 .get_regs_len = bnx2_get_regs_len,
5230 .get_regs = bnx2_get_regs,
5231 .get_wol = bnx2_get_wol,
5232 .set_wol = bnx2_set_wol,
5233 .nway_reset = bnx2_nway_reset,
5234 .get_link = ethtool_op_get_link,
5235 .get_eeprom_len = bnx2_get_eeprom_len,
5236 .get_eeprom = bnx2_get_eeprom,
5237 .set_eeprom = bnx2_set_eeprom,
5238 .get_coalesce = bnx2_get_coalesce,
5239 .set_coalesce = bnx2_set_coalesce,
5240 .get_ringparam = bnx2_get_ringparam,
5241 .set_ringparam = bnx2_set_ringparam,
5242 .get_pauseparam = bnx2_get_pauseparam,
5243 .set_pauseparam = bnx2_set_pauseparam,
5244 .get_rx_csum = bnx2_get_rx_csum,
5245 .set_rx_csum = bnx2_set_rx_csum,
5246 .get_tx_csum = ethtool_op_get_tx_csum,
5247 .set_tx_csum = ethtool_op_set_tx_csum,
5248 .get_sg = ethtool_op_get_sg,
5249 .set_sg = ethtool_op_set_sg,
5250 #ifdef BCM_TSO
5251 .get_tso = ethtool_op_get_tso,
5252 .set_tso = ethtool_op_set_tso,
5253 #endif
5254 .self_test_count = bnx2_self_test_count,
5255 .self_test = bnx2_self_test,
5256 .get_strings = bnx2_get_strings,
5257 .phys_id = bnx2_phys_id,
5258 .get_stats_count = bnx2_get_stats_count,
5259 .get_ethtool_stats = bnx2_get_ethtool_stats,
5260 .get_perm_addr = ethtool_op_get_perm_addr,
5263 /* Called with rtnl_lock */
5264 static int
5265 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5267 struct mii_ioctl_data *data = if_mii(ifr);
5268 struct bnx2 *bp = netdev_priv(dev);
5269 int err;
5271 switch(cmd) {
5272 case SIOCGMIIPHY:
5273 data->phy_id = bp->phy_addr;
5275 /* fallthru */
5276 case SIOCGMIIREG: {
5277 u32 mii_regval;
5279 spin_lock_bh(&bp->phy_lock);
5280 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5281 spin_unlock_bh(&bp->phy_lock);
5283 data->val_out = mii_regval;
5285 return err;
5288 case SIOCSMIIREG:
5289 if (!capable(CAP_NET_ADMIN))
5290 return -EPERM;
5292 spin_lock_bh(&bp->phy_lock);
5293 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5294 spin_unlock_bh(&bp->phy_lock);
5296 return err;
5298 default:
5299 /* do nothing */
5300 break;
5302 return -EOPNOTSUPP;
5305 /* Called with rtnl_lock */
5306 static int
5307 bnx2_change_mac_addr(struct net_device *dev, void *p)
5309 struct sockaddr *addr = p;
5310 struct bnx2 *bp = netdev_priv(dev);
5312 if (!is_valid_ether_addr(addr->sa_data))
5313 return -EINVAL;
5315 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5316 if (netif_running(dev))
5317 bnx2_set_mac_addr(bp);
5319 return 0;
5322 /* Called with rtnl_lock */
5323 static int
5324 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5326 struct bnx2 *bp = netdev_priv(dev);
5328 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5329 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5330 return -EINVAL;
5332 dev->mtu = new_mtu;
5333 if (netif_running(dev)) {
5334 bnx2_netif_stop(bp);
5336 bnx2_init_nic(bp);
5338 bnx2_netif_start(bp);
5340 return 0;
5343 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5344 static void
5345 poll_bnx2(struct net_device *dev)
5347 struct bnx2 *bp = netdev_priv(dev);
5349 disable_irq(bp->pdev->irq);
5350 bnx2_interrupt(bp->pdev->irq, dev, NULL);
5351 enable_irq(bp->pdev->irq);
5353 #endif
5355 static int __devinit
5356 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5358 struct bnx2 *bp;
5359 unsigned long mem_len;
5360 int rc;
5361 u32 reg;
5363 SET_MODULE_OWNER(dev);
5364 SET_NETDEV_DEV(dev, &pdev->dev);
5365 bp = netdev_priv(dev);
5367 bp->flags = 0;
5368 bp->phy_flags = 0;
5370 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5371 rc = pci_enable_device(pdev);
5372 if (rc) {
5373 printk(KERN_ERR PFX "Cannot enable PCI device, aborting.");
5374 goto err_out;
5377 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5378 printk(KERN_ERR PFX "Cannot find PCI device base address, "
5379 "aborting.\n");
5380 rc = -ENODEV;
5381 goto err_out_disable;
5384 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5385 if (rc) {
5386 printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n");
5387 goto err_out_disable;
5390 pci_set_master(pdev);
5392 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5393 if (bp->pm_cap == 0) {
5394 printk(KERN_ERR PFX "Cannot find power management capability, "
5395 "aborting.\n");
5396 rc = -EIO;
5397 goto err_out_release;
5400 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5401 if (bp->pcix_cap == 0) {
5402 printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n");
5403 rc = -EIO;
5404 goto err_out_release;
5407 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5408 bp->flags |= USING_DAC_FLAG;
5409 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5410 printk(KERN_ERR PFX "pci_set_consistent_dma_mask "
5411 "failed, aborting.\n");
5412 rc = -EIO;
5413 goto err_out_release;
5416 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5417 printk(KERN_ERR PFX "System does not support DMA, aborting.\n");
5418 rc = -EIO;
5419 goto err_out_release;
5422 bp->dev = dev;
5423 bp->pdev = pdev;
5425 spin_lock_init(&bp->phy_lock);
5426 spin_lock_init(&bp->tx_lock);
5427 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5429 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5430 mem_len = MB_GET_CID_ADDR(17);
5431 dev->mem_end = dev->mem_start + mem_len;
5432 dev->irq = pdev->irq;
5434 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5436 if (!bp->regview) {
5437 printk(KERN_ERR PFX "Cannot map register space, aborting.\n");
5438 rc = -ENOMEM;
5439 goto err_out_release;
5442 /* Configure byte swap and enable write to the reg_window registers.
5443 * Rely on CPU to do target byte swapping on big endian systems
5444 * The chip's target access swapping will not swap all accesses
5446 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5447 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5448 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5450 bnx2_set_power_state(bp, PCI_D0);
5452 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5454 /* Get bus information. */
5455 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5456 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5457 u32 clkreg;
5459 bp->flags |= PCIX_FLAG;
5461 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5463 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5464 switch (clkreg) {
5465 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5466 bp->bus_speed_mhz = 133;
5467 break;
5469 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5470 bp->bus_speed_mhz = 100;
5471 break;
5473 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5474 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5475 bp->bus_speed_mhz = 66;
5476 break;
5478 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5479 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5480 bp->bus_speed_mhz = 50;
5481 break;
5483 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5484 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5485 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5486 bp->bus_speed_mhz = 33;
5487 break;
5490 else {
5491 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5492 bp->bus_speed_mhz = 66;
5493 else
5494 bp->bus_speed_mhz = 33;
5497 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5498 bp->flags |= PCI_32BIT_FLAG;
5500 /* 5706A0 may falsely detect SERR and PERR. */
5501 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5502 reg = REG_RD(bp, PCI_COMMAND);
5503 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5504 REG_WR(bp, PCI_COMMAND, reg);
5506 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5507 !(bp->flags & PCIX_FLAG)) {
5509 printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, "
5510 "aborting.\n");
5511 goto err_out_unmap;
5514 bnx2_init_nvram(bp);
5516 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5518 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5519 BNX2_SHM_HDR_SIGNATURE_SIG)
5520 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5521 else
5522 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5524 /* Get the permanent MAC address. First we need to make sure the
5525 * firmware is actually running.
5527 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5529 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5530 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5531 printk(KERN_ERR PFX "Firmware not running, aborting.\n");
5532 rc = -ENODEV;
5533 goto err_out_unmap;
5536 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5538 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5539 bp->mac_addr[0] = (u8) (reg >> 8);
5540 bp->mac_addr[1] = (u8) reg;
5542 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5543 bp->mac_addr[2] = (u8) (reg >> 24);
5544 bp->mac_addr[3] = (u8) (reg >> 16);
5545 bp->mac_addr[4] = (u8) (reg >> 8);
5546 bp->mac_addr[5] = (u8) reg;
5548 bp->tx_ring_size = MAX_TX_DESC_CNT;
5549 bnx2_set_rx_ring_size(bp, 100);
5551 bp->rx_csum = 1;
5553 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5555 bp->tx_quick_cons_trip_int = 20;
5556 bp->tx_quick_cons_trip = 20;
5557 bp->tx_ticks_int = 80;
5558 bp->tx_ticks = 80;
5560 bp->rx_quick_cons_trip_int = 6;
5561 bp->rx_quick_cons_trip = 6;
5562 bp->rx_ticks_int = 18;
5563 bp->rx_ticks = 18;
5565 bp->stats_ticks = 1000000 & 0xffff00;
5567 bp->timer_interval = HZ;
5568 bp->current_interval = HZ;
5570 bp->phy_addr = 1;
5572 /* Disable WOL support if we are running on a SERDES chip. */
5573 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5574 bp->phy_flags |= PHY_SERDES_FLAG;
5575 bp->flags |= NO_WOL_FLAG;
5576 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5577 bp->phy_addr = 2;
5578 reg = REG_RD_IND(bp, bp->shmem_base +
5579 BNX2_SHARED_HW_CFG_CONFIG);
5580 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5581 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5585 if (CHIP_NUM(bp) == CHIP_NUM_5708)
5586 bp->flags |= NO_WOL_FLAG;
5588 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5589 bp->tx_quick_cons_trip_int =
5590 bp->tx_quick_cons_trip;
5591 bp->tx_ticks_int = bp->tx_ticks;
5592 bp->rx_quick_cons_trip_int =
5593 bp->rx_quick_cons_trip;
5594 bp->rx_ticks_int = bp->rx_ticks;
5595 bp->comp_prod_trip_int = bp->comp_prod_trip;
5596 bp->com_ticks_int = bp->com_ticks;
5597 bp->cmd_ticks_int = bp->cmd_ticks;
5600 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5601 bp->req_line_speed = 0;
5602 if (bp->phy_flags & PHY_SERDES_FLAG) {
5603 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5605 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5606 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5607 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5608 bp->autoneg = 0;
5609 bp->req_line_speed = bp->line_speed = SPEED_1000;
5610 bp->req_duplex = DUPLEX_FULL;
5613 else {
5614 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5617 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5619 init_timer(&bp->timer);
5620 bp->timer.expires = RUN_AT(bp->timer_interval);
5621 bp->timer.data = (unsigned long) bp;
5622 bp->timer.function = bnx2_timer;
5624 return 0;
5626 err_out_unmap:
5627 if (bp->regview) {
5628 iounmap(bp->regview);
5629 bp->regview = NULL;
5632 err_out_release:
5633 pci_release_regions(pdev);
5635 err_out_disable:
5636 pci_disable_device(pdev);
5637 pci_set_drvdata(pdev, NULL);
5639 err_out:
5640 return rc;
5643 static int __devinit
5644 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5646 static int version_printed = 0;
5647 struct net_device *dev = NULL;
5648 struct bnx2 *bp;
5649 int rc, i;
5651 if (version_printed++ == 0)
5652 printk(KERN_INFO "%s", version);
5654 /* dev zeroed in init_etherdev */
5655 dev = alloc_etherdev(sizeof(*bp));
5657 if (!dev)
5658 return -ENOMEM;
5660 rc = bnx2_init_board(pdev, dev);
5661 if (rc < 0) {
5662 free_netdev(dev);
5663 return rc;
5666 dev->open = bnx2_open;
5667 dev->hard_start_xmit = bnx2_start_xmit;
5668 dev->stop = bnx2_close;
5669 dev->get_stats = bnx2_get_stats;
5670 dev->set_multicast_list = bnx2_set_rx_mode;
5671 dev->do_ioctl = bnx2_ioctl;
5672 dev->set_mac_address = bnx2_change_mac_addr;
5673 dev->change_mtu = bnx2_change_mtu;
5674 dev->tx_timeout = bnx2_tx_timeout;
5675 dev->watchdog_timeo = TX_TIMEOUT;
5676 #ifdef BCM_VLAN
5677 dev->vlan_rx_register = bnx2_vlan_rx_register;
5678 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5679 #endif
5680 dev->poll = bnx2_poll;
5681 dev->ethtool_ops = &bnx2_ethtool_ops;
5682 dev->weight = 64;
5684 bp = netdev_priv(dev);
5686 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5687 dev->poll_controller = poll_bnx2;
5688 #endif
5690 if ((rc = register_netdev(dev))) {
5691 printk(KERN_ERR PFX "Cannot register net device\n");
5692 if (bp->regview)
5693 iounmap(bp->regview);
5694 pci_release_regions(pdev);
5695 pci_disable_device(pdev);
5696 pci_set_drvdata(pdev, NULL);
5697 free_netdev(dev);
5698 return rc;
5701 pci_set_drvdata(pdev, dev);
5703 memcpy(dev->dev_addr, bp->mac_addr, 6);
5704 memcpy(dev->perm_addr, bp->mac_addr, 6);
5705 bp->name = board_info[ent->driver_data].name,
5706 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5707 "IRQ %d, ",
5708 dev->name,
5709 bp->name,
5710 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5711 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5712 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5713 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5714 bp->bus_speed_mhz,
5715 dev->base_addr,
5716 bp->pdev->irq);
5718 printk("node addr ");
5719 for (i = 0; i < 6; i++)
5720 printk("%2.2x", dev->dev_addr[i]);
5721 printk("\n");
5723 dev->features |= NETIF_F_SG;
5724 if (bp->flags & USING_DAC_FLAG)
5725 dev->features |= NETIF_F_HIGHDMA;
5726 dev->features |= NETIF_F_IP_CSUM;
5727 #ifdef BCM_VLAN
5728 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5729 #endif
5730 #ifdef BCM_TSO
5731 dev->features |= NETIF_F_TSO;
5732 #endif
5734 netif_carrier_off(bp->dev);
5736 return 0;
5739 static void __devexit
5740 bnx2_remove_one(struct pci_dev *pdev)
5742 struct net_device *dev = pci_get_drvdata(pdev);
5743 struct bnx2 *bp = netdev_priv(dev);
5745 flush_scheduled_work();
5747 unregister_netdev(dev);
5749 if (bp->regview)
5750 iounmap(bp->regview);
5752 free_netdev(dev);
5753 pci_release_regions(pdev);
5754 pci_disable_device(pdev);
5755 pci_set_drvdata(pdev, NULL);
5758 static int
5759 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
5761 struct net_device *dev = pci_get_drvdata(pdev);
5762 struct bnx2 *bp = netdev_priv(dev);
5763 u32 reset_code;
5765 if (!netif_running(dev))
5766 return 0;
5768 flush_scheduled_work();
5769 bnx2_netif_stop(bp);
5770 netif_device_detach(dev);
5771 del_timer_sync(&bp->timer);
5772 if (bp->flags & NO_WOL_FLAG)
5773 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
5774 else if (bp->wol)
5775 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5776 else
5777 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5778 bnx2_reset_chip(bp, reset_code);
5779 bnx2_free_skbs(bp);
5780 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
5781 return 0;
5784 static int
5785 bnx2_resume(struct pci_dev *pdev)
5787 struct net_device *dev = pci_get_drvdata(pdev);
5788 struct bnx2 *bp = netdev_priv(dev);
5790 if (!netif_running(dev))
5791 return 0;
5793 bnx2_set_power_state(bp, PCI_D0);
5794 netif_device_attach(dev);
5795 bnx2_init_nic(bp);
5796 bnx2_netif_start(bp);
5797 return 0;
5800 static struct pci_driver bnx2_pci_driver = {
5801 .name = DRV_MODULE_NAME,
5802 .id_table = bnx2_pci_tbl,
5803 .probe = bnx2_init_one,
5804 .remove = __devexit_p(bnx2_remove_one),
5805 .suspend = bnx2_suspend,
5806 .resume = bnx2_resume,
5809 static int __init bnx2_init(void)
5811 return pci_module_init(&bnx2_pci_driver);
5814 static void __exit bnx2_cleanup(void)
5816 pci_unregister_driver(&bnx2_pci_driver);
5819 module_init(bnx2_init);
5820 module_exit(bnx2_cleanup);