1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define DRV_MODULE_NAME "bnx2"
56 #define PFX DRV_MODULE_NAME ": "
57 #define DRV_MODULE_VERSION "1.5.8.1"
58 #define DRV_MODULE_RELDATE "May 7, 2007"
60 #define RUN_AT(x) (jiffies + (x))
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT (5*HZ)
65 static const char version
[] __devinitdata
=
66 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME
" v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION
);
73 static int disable_msi
= 0;
75 module_param(disable_msi
, int, 0);
76 MODULE_PARM_DESC(disable_msi
, "Disable Message Signaled Interrupt (MSI)");
89 /* indexed by board_t, above */
92 } board_info
[] __devinitdata
= {
93 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
94 { "HP NC370T Multifunction Gigabit Server Adapter" },
95 { "HP NC370i Multifunction Gigabit Server Adapter" },
96 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
97 { "HP NC370F Multifunction Gigabit Server Adapter" },
98 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
100 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
103 static struct pci_device_id bnx2_pci_tbl
[] = {
104 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706
,
105 PCI_VENDOR_ID_HP
, 0x3101, 0, 0, NC370T
},
106 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706
,
107 PCI_VENDOR_ID_HP
, 0x3106, 0, 0, NC370I
},
108 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706
,
109 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5706
},
110 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5708
,
111 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5708
},
112 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706S
,
113 PCI_VENDOR_ID_HP
, 0x3102, 0, 0, NC370F
},
114 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706S
,
115 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5706S
},
116 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5708S
,
117 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5708S
},
118 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5709
,
119 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5709
},
123 static struct flash_spec flash_table
[] =
126 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
127 1, SEEPROM_PAGE_BITS
, SEEPROM_PAGE_SIZE
,
128 SEEPROM_BYTE_ADDR_MASK
, SEEPROM_TOTAL_SIZE
,
130 /* Expansion entry 0001 */
131 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
132 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
133 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
135 /* Saifun SA25F010 (non-buffered flash) */
136 /* strap, cfg1, & write1 need updates */
137 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
138 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
139 SAIFUN_FLASH_BYTE_ADDR_MASK
, SAIFUN_FLASH_BASE_TOTAL_SIZE
*2,
140 "Non-buffered flash (128kB)"},
141 /* Saifun SA25F020 (non-buffered flash) */
142 /* strap, cfg1, & write1 need updates */
143 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
144 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
145 SAIFUN_FLASH_BYTE_ADDR_MASK
, SAIFUN_FLASH_BASE_TOTAL_SIZE
*4,
146 "Non-buffered flash (256kB)"},
147 /* Expansion entry 0100 */
148 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
149 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
150 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
152 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
153 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
154 0, ST_MICRO_FLASH_PAGE_BITS
, ST_MICRO_FLASH_PAGE_SIZE
,
155 ST_MICRO_FLASH_BYTE_ADDR_MASK
, ST_MICRO_FLASH_BASE_TOTAL_SIZE
*2,
156 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
157 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
158 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
159 0, ST_MICRO_FLASH_PAGE_BITS
, ST_MICRO_FLASH_PAGE_SIZE
,
160 ST_MICRO_FLASH_BYTE_ADDR_MASK
, ST_MICRO_FLASH_BASE_TOTAL_SIZE
*4,
161 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
162 /* Saifun SA25F005 (non-buffered flash) */
163 /* strap, cfg1, & write1 need updates */
164 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
165 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
166 SAIFUN_FLASH_BYTE_ADDR_MASK
, SAIFUN_FLASH_BASE_TOTAL_SIZE
,
167 "Non-buffered flash (64kB)"},
169 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
170 1, SEEPROM_PAGE_BITS
, SEEPROM_PAGE_SIZE
,
171 SEEPROM_BYTE_ADDR_MASK
, SEEPROM_TOTAL_SIZE
,
173 /* Expansion entry 1001 */
174 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
175 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
176 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
178 /* Expansion entry 1010 */
179 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
180 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
181 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
183 /* ATMEL AT45DB011B (buffered flash) */
184 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
185 1, BUFFERED_FLASH_PAGE_BITS
, BUFFERED_FLASH_PAGE_SIZE
,
186 BUFFERED_FLASH_BYTE_ADDR_MASK
, BUFFERED_FLASH_TOTAL_SIZE
,
187 "Buffered flash (128kB)"},
188 /* Expansion entry 1100 */
189 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
190 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
191 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
193 /* Expansion entry 1101 */
194 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
195 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
196 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
198 /* Ateml Expansion entry 1110 */
199 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
200 1, BUFFERED_FLASH_PAGE_BITS
, BUFFERED_FLASH_PAGE_SIZE
,
201 BUFFERED_FLASH_BYTE_ADDR_MASK
, 0,
202 "Entry 1110 (Atmel)"},
203 /* ATMEL AT45DB021B (buffered flash) */
204 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
205 1, BUFFERED_FLASH_PAGE_BITS
, BUFFERED_FLASH_PAGE_SIZE
,
206 BUFFERED_FLASH_BYTE_ADDR_MASK
, BUFFERED_FLASH_TOTAL_SIZE
*2,
207 "Buffered flash (256kB)"},
210 MODULE_DEVICE_TABLE(pci
, bnx2_pci_tbl
);
212 static inline u32
bnx2_tx_avail(struct bnx2
*bp
)
218 /* The ring uses 256 indices for 255 entries, one of them
219 * needs to be skipped.
221 diff
= bp
->tx_prod
- bp
->tx_cons
;
222 if (unlikely(diff
>= TX_DESC_CNT
)) {
224 if (diff
== TX_DESC_CNT
)
225 diff
= MAX_TX_DESC_CNT
;
227 return (bp
->tx_ring_size
- diff
);
231 bnx2_reg_rd_ind(struct bnx2
*bp
, u32 offset
)
233 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW_ADDRESS
, offset
);
234 return (REG_RD(bp
, BNX2_PCICFG_REG_WINDOW
));
238 bnx2_reg_wr_ind(struct bnx2
*bp
, u32 offset
, u32 val
)
240 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW_ADDRESS
, offset
);
241 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW
, val
);
245 bnx2_ctx_wr(struct bnx2
*bp
, u32 cid_addr
, u32 offset
, u32 val
)
248 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
251 REG_WR(bp
, BNX2_CTX_CTX_DATA
, val
);
252 REG_WR(bp
, BNX2_CTX_CTX_CTRL
,
253 offset
| BNX2_CTX_CTX_CTRL_WRITE_REQ
);
254 for (i
= 0; i
< 5; i
++) {
256 val
= REG_RD(bp
, BNX2_CTX_CTX_CTRL
);
257 if ((val
& BNX2_CTX_CTX_CTRL_WRITE_REQ
) == 0)
262 REG_WR(bp
, BNX2_CTX_DATA_ADR
, offset
);
263 REG_WR(bp
, BNX2_CTX_DATA
, val
);
268 bnx2_read_phy(struct bnx2
*bp
, u32 reg
, u32
*val
)
273 if (bp
->phy_flags
& PHY_INT_MODE_AUTO_POLLING_FLAG
) {
274 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
275 val1
&= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
277 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
278 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
283 val1
= (bp
->phy_addr
<< 21) | (reg
<< 16) |
284 BNX2_EMAC_MDIO_COMM_COMMAND_READ
| BNX2_EMAC_MDIO_COMM_DISEXT
|
285 BNX2_EMAC_MDIO_COMM_START_BUSY
;
286 REG_WR(bp
, BNX2_EMAC_MDIO_COMM
, val1
);
288 for (i
= 0; i
< 50; i
++) {
291 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_COMM
);
292 if (!(val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
)) {
295 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_COMM
);
296 val1
&= BNX2_EMAC_MDIO_COMM_DATA
;
302 if (val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
) {
311 if (bp
->phy_flags
& PHY_INT_MODE_AUTO_POLLING_FLAG
) {
312 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
313 val1
|= BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
315 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
316 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
325 bnx2_write_phy(struct bnx2
*bp
, u32 reg
, u32 val
)
330 if (bp
->phy_flags
& PHY_INT_MODE_AUTO_POLLING_FLAG
) {
331 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
332 val1
&= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
334 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
335 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
340 val1
= (bp
->phy_addr
<< 21) | (reg
<< 16) | val
|
341 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE
|
342 BNX2_EMAC_MDIO_COMM_START_BUSY
| BNX2_EMAC_MDIO_COMM_DISEXT
;
343 REG_WR(bp
, BNX2_EMAC_MDIO_COMM
, val1
);
345 for (i
= 0; i
< 50; i
++) {
348 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_COMM
);
349 if (!(val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
)) {
355 if (val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
)
360 if (bp
->phy_flags
& PHY_INT_MODE_AUTO_POLLING_FLAG
) {
361 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
362 val1
|= BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
364 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
365 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
374 bnx2_disable_int(struct bnx2
*bp
)
376 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
377 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
378 REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
);
382 bnx2_enable_int(struct bnx2
*bp
)
384 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
385 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
386 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
| bp
->last_status_idx
);
388 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
389 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| bp
->last_status_idx
);
391 REG_WR(bp
, BNX2_HC_COMMAND
, bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW
);
395 bnx2_disable_int_sync(struct bnx2
*bp
)
397 atomic_inc(&bp
->intr_sem
);
398 bnx2_disable_int(bp
);
399 synchronize_irq(bp
->pdev
->irq
);
403 bnx2_netif_stop(struct bnx2
*bp
)
405 bnx2_disable_int_sync(bp
);
406 if (netif_running(bp
->dev
)) {
407 netif_poll_disable(bp
->dev
);
408 netif_tx_disable(bp
->dev
);
409 bp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
414 bnx2_netif_start(struct bnx2
*bp
)
416 if (atomic_dec_and_test(&bp
->intr_sem
)) {
417 if (netif_running(bp
->dev
)) {
418 netif_wake_queue(bp
->dev
);
419 netif_poll_enable(bp
->dev
);
426 bnx2_free_mem(struct bnx2
*bp
)
430 for (i
= 0; i
< bp
->ctx_pages
; i
++) {
431 if (bp
->ctx_blk
[i
]) {
432 pci_free_consistent(bp
->pdev
, BCM_PAGE_SIZE
,
434 bp
->ctx_blk_mapping
[i
]);
435 bp
->ctx_blk
[i
] = NULL
;
438 if (bp
->status_blk
) {
439 pci_free_consistent(bp
->pdev
, bp
->status_stats_size
,
440 bp
->status_blk
, bp
->status_blk_mapping
);
441 bp
->status_blk
= NULL
;
442 bp
->stats_blk
= NULL
;
444 if (bp
->tx_desc_ring
) {
445 pci_free_consistent(bp
->pdev
,
446 sizeof(struct tx_bd
) * TX_DESC_CNT
,
447 bp
->tx_desc_ring
, bp
->tx_desc_mapping
);
448 bp
->tx_desc_ring
= NULL
;
450 kfree(bp
->tx_buf_ring
);
451 bp
->tx_buf_ring
= NULL
;
452 for (i
= 0; i
< bp
->rx_max_ring
; i
++) {
453 if (bp
->rx_desc_ring
[i
])
454 pci_free_consistent(bp
->pdev
,
455 sizeof(struct rx_bd
) * RX_DESC_CNT
,
457 bp
->rx_desc_mapping
[i
]);
458 bp
->rx_desc_ring
[i
] = NULL
;
460 vfree(bp
->rx_buf_ring
);
461 bp
->rx_buf_ring
= NULL
;
465 bnx2_alloc_mem(struct bnx2
*bp
)
467 int i
, status_blk_size
;
469 bp
->tx_buf_ring
= kzalloc(sizeof(struct sw_bd
) * TX_DESC_CNT
,
471 if (bp
->tx_buf_ring
== NULL
)
474 bp
->tx_desc_ring
= pci_alloc_consistent(bp
->pdev
,
475 sizeof(struct tx_bd
) *
477 &bp
->tx_desc_mapping
);
478 if (bp
->tx_desc_ring
== NULL
)
481 bp
->rx_buf_ring
= vmalloc(sizeof(struct sw_bd
) * RX_DESC_CNT
*
483 if (bp
->rx_buf_ring
== NULL
)
486 memset(bp
->rx_buf_ring
, 0, sizeof(struct sw_bd
) * RX_DESC_CNT
*
489 for (i
= 0; i
< bp
->rx_max_ring
; i
++) {
490 bp
->rx_desc_ring
[i
] =
491 pci_alloc_consistent(bp
->pdev
,
492 sizeof(struct rx_bd
) * RX_DESC_CNT
,
493 &bp
->rx_desc_mapping
[i
]);
494 if (bp
->rx_desc_ring
[i
] == NULL
)
499 /* Combine status and statistics blocks into one allocation. */
500 status_blk_size
= L1_CACHE_ALIGN(sizeof(struct status_block
));
501 bp
->status_stats_size
= status_blk_size
+
502 sizeof(struct statistics_block
);
504 bp
->status_blk
= pci_alloc_consistent(bp
->pdev
, bp
->status_stats_size
,
505 &bp
->status_blk_mapping
);
506 if (bp
->status_blk
== NULL
)
509 memset(bp
->status_blk
, 0, bp
->status_stats_size
);
511 bp
->stats_blk
= (void *) ((unsigned long) bp
->status_blk
+
514 bp
->stats_blk_mapping
= bp
->status_blk_mapping
+ status_blk_size
;
516 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
517 bp
->ctx_pages
= 0x2000 / BCM_PAGE_SIZE
;
518 if (bp
->ctx_pages
== 0)
520 for (i
= 0; i
< bp
->ctx_pages
; i
++) {
521 bp
->ctx_blk
[i
] = pci_alloc_consistent(bp
->pdev
,
523 &bp
->ctx_blk_mapping
[i
]);
524 if (bp
->ctx_blk
[i
] == NULL
)
536 bnx2_report_fw_link(struct bnx2
*bp
)
538 u32 fw_link_status
= 0;
543 switch (bp
->line_speed
) {
545 if (bp
->duplex
== DUPLEX_HALF
)
546 fw_link_status
= BNX2_LINK_STATUS_10HALF
;
548 fw_link_status
= BNX2_LINK_STATUS_10FULL
;
551 if (bp
->duplex
== DUPLEX_HALF
)
552 fw_link_status
= BNX2_LINK_STATUS_100HALF
;
554 fw_link_status
= BNX2_LINK_STATUS_100FULL
;
557 if (bp
->duplex
== DUPLEX_HALF
)
558 fw_link_status
= BNX2_LINK_STATUS_1000HALF
;
560 fw_link_status
= BNX2_LINK_STATUS_1000FULL
;
563 if (bp
->duplex
== DUPLEX_HALF
)
564 fw_link_status
= BNX2_LINK_STATUS_2500HALF
;
566 fw_link_status
= BNX2_LINK_STATUS_2500FULL
;
570 fw_link_status
|= BNX2_LINK_STATUS_LINK_UP
;
573 fw_link_status
|= BNX2_LINK_STATUS_AN_ENABLED
;
575 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
576 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
578 if (!(bmsr
& BMSR_ANEGCOMPLETE
) ||
579 bp
->phy_flags
& PHY_PARALLEL_DETECT_FLAG
)
580 fw_link_status
|= BNX2_LINK_STATUS_PARALLEL_DET
;
582 fw_link_status
|= BNX2_LINK_STATUS_AN_COMPLETE
;
586 fw_link_status
= BNX2_LINK_STATUS_LINK_DOWN
;
588 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_LINK_STATUS
, fw_link_status
);
592 bnx2_report_link(struct bnx2
*bp
)
595 netif_carrier_on(bp
->dev
);
596 printk(KERN_INFO PFX
"%s NIC Link is Up, ", bp
->dev
->name
);
598 printk("%d Mbps ", bp
->line_speed
);
600 if (bp
->duplex
== DUPLEX_FULL
)
601 printk("full duplex");
603 printk("half duplex");
606 if (bp
->flow_ctrl
& FLOW_CTRL_RX
) {
607 printk(", receive ");
608 if (bp
->flow_ctrl
& FLOW_CTRL_TX
)
609 printk("& transmit ");
612 printk(", transmit ");
614 printk("flow control ON");
619 netif_carrier_off(bp
->dev
);
620 printk(KERN_ERR PFX
"%s NIC Link is Down\n", bp
->dev
->name
);
623 bnx2_report_fw_link(bp
);
627 bnx2_resolve_flow_ctrl(struct bnx2
*bp
)
629 u32 local_adv
, remote_adv
;
632 if ((bp
->autoneg
& (AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
)) !=
633 (AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
)) {
635 if (bp
->duplex
== DUPLEX_FULL
) {
636 bp
->flow_ctrl
= bp
->req_flow_ctrl
;
641 if (bp
->duplex
!= DUPLEX_FULL
) {
645 if ((bp
->phy_flags
& PHY_SERDES_FLAG
) &&
646 (CHIP_NUM(bp
) == CHIP_NUM_5708
)) {
649 bnx2_read_phy(bp
, BCM5708S_1000X_STAT1
, &val
);
650 if (val
& BCM5708S_1000X_STAT1_TX_PAUSE
)
651 bp
->flow_ctrl
|= FLOW_CTRL_TX
;
652 if (val
& BCM5708S_1000X_STAT1_RX_PAUSE
)
653 bp
->flow_ctrl
|= FLOW_CTRL_RX
;
657 bnx2_read_phy(bp
, MII_ADVERTISE
, &local_adv
);
658 bnx2_read_phy(bp
, MII_LPA
, &remote_adv
);
660 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
661 u32 new_local_adv
= 0;
662 u32 new_remote_adv
= 0;
664 if (local_adv
& ADVERTISE_1000XPAUSE
)
665 new_local_adv
|= ADVERTISE_PAUSE_CAP
;
666 if (local_adv
& ADVERTISE_1000XPSE_ASYM
)
667 new_local_adv
|= ADVERTISE_PAUSE_ASYM
;
668 if (remote_adv
& ADVERTISE_1000XPAUSE
)
669 new_remote_adv
|= ADVERTISE_PAUSE_CAP
;
670 if (remote_adv
& ADVERTISE_1000XPSE_ASYM
)
671 new_remote_adv
|= ADVERTISE_PAUSE_ASYM
;
673 local_adv
= new_local_adv
;
674 remote_adv
= new_remote_adv
;
677 /* See Table 28B-3 of 802.3ab-1999 spec. */
678 if (local_adv
& ADVERTISE_PAUSE_CAP
) {
679 if(local_adv
& ADVERTISE_PAUSE_ASYM
) {
680 if (remote_adv
& ADVERTISE_PAUSE_CAP
) {
681 bp
->flow_ctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
683 else if (remote_adv
& ADVERTISE_PAUSE_ASYM
) {
684 bp
->flow_ctrl
= FLOW_CTRL_RX
;
688 if (remote_adv
& ADVERTISE_PAUSE_CAP
) {
689 bp
->flow_ctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
693 else if (local_adv
& ADVERTISE_PAUSE_ASYM
) {
694 if ((remote_adv
& ADVERTISE_PAUSE_CAP
) &&
695 (remote_adv
& ADVERTISE_PAUSE_ASYM
)) {
697 bp
->flow_ctrl
= FLOW_CTRL_TX
;
703 bnx2_5708s_linkup(struct bnx2
*bp
)
708 bnx2_read_phy(bp
, BCM5708S_1000X_STAT1
, &val
);
709 switch (val
& BCM5708S_1000X_STAT1_SPEED_MASK
) {
710 case BCM5708S_1000X_STAT1_SPEED_10
:
711 bp
->line_speed
= SPEED_10
;
713 case BCM5708S_1000X_STAT1_SPEED_100
:
714 bp
->line_speed
= SPEED_100
;
716 case BCM5708S_1000X_STAT1_SPEED_1G
:
717 bp
->line_speed
= SPEED_1000
;
719 case BCM5708S_1000X_STAT1_SPEED_2G5
:
720 bp
->line_speed
= SPEED_2500
;
723 if (val
& BCM5708S_1000X_STAT1_FD
)
724 bp
->duplex
= DUPLEX_FULL
;
726 bp
->duplex
= DUPLEX_HALF
;
732 bnx2_5706s_linkup(struct bnx2
*bp
)
734 u32 bmcr
, local_adv
, remote_adv
, common
;
737 bp
->line_speed
= SPEED_1000
;
739 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
740 if (bmcr
& BMCR_FULLDPLX
) {
741 bp
->duplex
= DUPLEX_FULL
;
744 bp
->duplex
= DUPLEX_HALF
;
747 if (!(bmcr
& BMCR_ANENABLE
)) {
751 bnx2_read_phy(bp
, MII_ADVERTISE
, &local_adv
);
752 bnx2_read_phy(bp
, MII_LPA
, &remote_adv
);
754 common
= local_adv
& remote_adv
;
755 if (common
& (ADVERTISE_1000XHALF
| ADVERTISE_1000XFULL
)) {
757 if (common
& ADVERTISE_1000XFULL
) {
758 bp
->duplex
= DUPLEX_FULL
;
761 bp
->duplex
= DUPLEX_HALF
;
769 bnx2_copper_linkup(struct bnx2
*bp
)
773 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
774 if (bmcr
& BMCR_ANENABLE
) {
775 u32 local_adv
, remote_adv
, common
;
777 bnx2_read_phy(bp
, MII_CTRL1000
, &local_adv
);
778 bnx2_read_phy(bp
, MII_STAT1000
, &remote_adv
);
780 common
= local_adv
& (remote_adv
>> 2);
781 if (common
& ADVERTISE_1000FULL
) {
782 bp
->line_speed
= SPEED_1000
;
783 bp
->duplex
= DUPLEX_FULL
;
785 else if (common
& ADVERTISE_1000HALF
) {
786 bp
->line_speed
= SPEED_1000
;
787 bp
->duplex
= DUPLEX_HALF
;
790 bnx2_read_phy(bp
, MII_ADVERTISE
, &local_adv
);
791 bnx2_read_phy(bp
, MII_LPA
, &remote_adv
);
793 common
= local_adv
& remote_adv
;
794 if (common
& ADVERTISE_100FULL
) {
795 bp
->line_speed
= SPEED_100
;
796 bp
->duplex
= DUPLEX_FULL
;
798 else if (common
& ADVERTISE_100HALF
) {
799 bp
->line_speed
= SPEED_100
;
800 bp
->duplex
= DUPLEX_HALF
;
802 else if (common
& ADVERTISE_10FULL
) {
803 bp
->line_speed
= SPEED_10
;
804 bp
->duplex
= DUPLEX_FULL
;
806 else if (common
& ADVERTISE_10HALF
) {
807 bp
->line_speed
= SPEED_10
;
808 bp
->duplex
= DUPLEX_HALF
;
817 if (bmcr
& BMCR_SPEED100
) {
818 bp
->line_speed
= SPEED_100
;
821 bp
->line_speed
= SPEED_10
;
823 if (bmcr
& BMCR_FULLDPLX
) {
824 bp
->duplex
= DUPLEX_FULL
;
827 bp
->duplex
= DUPLEX_HALF
;
835 bnx2_set_mac_link(struct bnx2
*bp
)
839 REG_WR(bp
, BNX2_EMAC_TX_LENGTHS
, 0x2620);
840 if (bp
->link_up
&& (bp
->line_speed
== SPEED_1000
) &&
841 (bp
->duplex
== DUPLEX_HALF
)) {
842 REG_WR(bp
, BNX2_EMAC_TX_LENGTHS
, 0x26ff);
845 /* Configure the EMAC mode register. */
846 val
= REG_RD(bp
, BNX2_EMAC_MODE
);
848 val
&= ~(BNX2_EMAC_MODE_PORT
| BNX2_EMAC_MODE_HALF_DUPLEX
|
849 BNX2_EMAC_MODE_MAC_LOOP
| BNX2_EMAC_MODE_FORCE_LINK
|
850 BNX2_EMAC_MODE_25G_MODE
);
853 switch (bp
->line_speed
) {
855 if (CHIP_NUM(bp
) != CHIP_NUM_5706
) {
856 val
|= BNX2_EMAC_MODE_PORT_MII_10M
;
861 val
|= BNX2_EMAC_MODE_PORT_MII
;
864 val
|= BNX2_EMAC_MODE_25G_MODE
;
867 val
|= BNX2_EMAC_MODE_PORT_GMII
;
872 val
|= BNX2_EMAC_MODE_PORT_GMII
;
875 /* Set the MAC to operate in the appropriate duplex mode. */
876 if (bp
->duplex
== DUPLEX_HALF
)
877 val
|= BNX2_EMAC_MODE_HALF_DUPLEX
;
878 REG_WR(bp
, BNX2_EMAC_MODE
, val
);
880 /* Enable/disable rx PAUSE. */
881 bp
->rx_mode
&= ~BNX2_EMAC_RX_MODE_FLOW_EN
;
883 if (bp
->flow_ctrl
& FLOW_CTRL_RX
)
884 bp
->rx_mode
|= BNX2_EMAC_RX_MODE_FLOW_EN
;
885 REG_WR(bp
, BNX2_EMAC_RX_MODE
, bp
->rx_mode
);
887 /* Enable/disable tx PAUSE. */
888 val
= REG_RD(bp
, BNX2_EMAC_TX_MODE
);
889 val
&= ~BNX2_EMAC_TX_MODE_FLOW_EN
;
891 if (bp
->flow_ctrl
& FLOW_CTRL_TX
)
892 val
|= BNX2_EMAC_TX_MODE_FLOW_EN
;
893 REG_WR(bp
, BNX2_EMAC_TX_MODE
, val
);
895 /* Acknowledge the interrupt. */
896 REG_WR(bp
, BNX2_EMAC_STATUS
, BNX2_EMAC_STATUS_LINK_CHANGE
);
902 bnx2_set_link(struct bnx2
*bp
)
907 if (bp
->loopback
== MAC_LOOPBACK
|| bp
->loopback
== PHY_LOOPBACK
) {
912 link_up
= bp
->link_up
;
914 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
915 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
917 if ((bp
->phy_flags
& PHY_SERDES_FLAG
) &&
918 (CHIP_NUM(bp
) == CHIP_NUM_5706
)) {
921 val
= REG_RD(bp
, BNX2_EMAC_STATUS
);
922 if (val
& BNX2_EMAC_STATUS_LINK
)
923 bmsr
|= BMSR_LSTATUS
;
925 bmsr
&= ~BMSR_LSTATUS
;
928 if (bmsr
& BMSR_LSTATUS
) {
931 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
932 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
933 bnx2_5706s_linkup(bp
);
934 else if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
935 bnx2_5708s_linkup(bp
);
938 bnx2_copper_linkup(bp
);
940 bnx2_resolve_flow_ctrl(bp
);
943 if ((bp
->phy_flags
& PHY_SERDES_FLAG
) &&
944 (bp
->autoneg
& AUTONEG_SPEED
)) {
948 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
949 bmcr
&= ~BCM5708S_BMCR_FORCE_2500
;
950 if (!(bmcr
& BMCR_ANENABLE
)) {
951 bnx2_write_phy(bp
, MII_BMCR
, bmcr
|
955 bp
->phy_flags
&= ~PHY_PARALLEL_DETECT_FLAG
;
959 if (bp
->link_up
!= link_up
) {
960 bnx2_report_link(bp
);
963 bnx2_set_mac_link(bp
);
969 bnx2_reset_phy(struct bnx2
*bp
)
974 bnx2_write_phy(bp
, MII_BMCR
, BMCR_RESET
);
976 #define PHY_RESET_MAX_WAIT 100
977 for (i
= 0; i
< PHY_RESET_MAX_WAIT
; i
++) {
980 bnx2_read_phy(bp
, MII_BMCR
, ®
);
981 if (!(reg
& BMCR_RESET
)) {
986 if (i
== PHY_RESET_MAX_WAIT
) {
993 bnx2_phy_get_pause_adv(struct bnx2
*bp
)
997 if ((bp
->req_flow_ctrl
& (FLOW_CTRL_RX
| FLOW_CTRL_TX
)) ==
998 (FLOW_CTRL_RX
| FLOW_CTRL_TX
)) {
1000 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
1001 adv
= ADVERTISE_1000XPAUSE
;
1004 adv
= ADVERTISE_PAUSE_CAP
;
1007 else if (bp
->req_flow_ctrl
& FLOW_CTRL_TX
) {
1008 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
1009 adv
= ADVERTISE_1000XPSE_ASYM
;
1012 adv
= ADVERTISE_PAUSE_ASYM
;
1015 else if (bp
->req_flow_ctrl
& FLOW_CTRL_RX
) {
1016 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
1017 adv
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1020 adv
= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
1027 bnx2_setup_serdes_phy(struct bnx2
*bp
)
1032 if (!(bp
->autoneg
& AUTONEG_SPEED
)) {
1034 int force_link_down
= 0;
1036 bnx2_read_phy(bp
, MII_ADVERTISE
, &adv
);
1037 adv
&= ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
);
1039 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
1040 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BCM5708S_BMCR_FORCE_2500
);
1041 new_bmcr
|= BMCR_SPEED1000
;
1042 if (bp
->req_line_speed
== SPEED_2500
) {
1043 new_bmcr
|= BCM5708S_BMCR_FORCE_2500
;
1044 bnx2_read_phy(bp
, BCM5708S_UP1
, &up1
);
1045 if (!(up1
& BCM5708S_UP1_2G5
)) {
1046 up1
|= BCM5708S_UP1_2G5
;
1047 bnx2_write_phy(bp
, BCM5708S_UP1
, up1
);
1048 force_link_down
= 1;
1050 } else if (CHIP_NUM(bp
) == CHIP_NUM_5708
) {
1051 bnx2_read_phy(bp
, BCM5708S_UP1
, &up1
);
1052 if (up1
& BCM5708S_UP1_2G5
) {
1053 up1
&= ~BCM5708S_UP1_2G5
;
1054 bnx2_write_phy(bp
, BCM5708S_UP1
, up1
);
1055 force_link_down
= 1;
1059 if (bp
->req_duplex
== DUPLEX_FULL
) {
1060 adv
|= ADVERTISE_1000XFULL
;
1061 new_bmcr
|= BMCR_FULLDPLX
;
1064 adv
|= ADVERTISE_1000XHALF
;
1065 new_bmcr
&= ~BMCR_FULLDPLX
;
1067 if ((new_bmcr
!= bmcr
) || (force_link_down
)) {
1068 /* Force a link down visible on the other side */
1070 bnx2_write_phy(bp
, MII_ADVERTISE
, adv
&
1071 ~(ADVERTISE_1000XFULL
|
1072 ADVERTISE_1000XHALF
));
1073 bnx2_write_phy(bp
, MII_BMCR
, bmcr
|
1074 BMCR_ANRESTART
| BMCR_ANENABLE
);
1077 netif_carrier_off(bp
->dev
);
1078 bnx2_write_phy(bp
, MII_BMCR
, new_bmcr
);
1079 bnx2_report_link(bp
);
1081 bnx2_write_phy(bp
, MII_ADVERTISE
, adv
);
1082 bnx2_write_phy(bp
, MII_BMCR
, new_bmcr
);
1087 if (bp
->phy_flags
& PHY_2_5G_CAPABLE_FLAG
) {
1088 bnx2_read_phy(bp
, BCM5708S_UP1
, &up1
);
1089 up1
|= BCM5708S_UP1_2G5
;
1090 bnx2_write_phy(bp
, BCM5708S_UP1
, up1
);
1093 if (bp
->advertising
& ADVERTISED_1000baseT_Full
)
1094 new_adv
|= ADVERTISE_1000XFULL
;
1096 new_adv
|= bnx2_phy_get_pause_adv(bp
);
1098 bnx2_read_phy(bp
, MII_ADVERTISE
, &adv
);
1099 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
1101 bp
->serdes_an_pending
= 0;
1102 if ((adv
!= new_adv
) || ((bmcr
& BMCR_ANENABLE
) == 0)) {
1103 /* Force a link down visible on the other side */
1105 bnx2_write_phy(bp
, MII_BMCR
, BMCR_LOOPBACK
);
1106 spin_unlock_bh(&bp
->phy_lock
);
1108 spin_lock_bh(&bp
->phy_lock
);
1111 bnx2_write_phy(bp
, MII_ADVERTISE
, new_adv
);
1112 bnx2_write_phy(bp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
1114 /* Speed up link-up time when the link partner
1115 * does not autonegotiate which is very common
1116 * in blade servers. Some blade servers use
1117 * IPMI for kerboard input and it's important
1118 * to minimize link disruptions. Autoneg. involves
1119 * exchanging base pages plus 3 next pages and
1120 * normally completes in about 120 msec.
1122 bp
->current_interval
= SERDES_AN_TIMEOUT
;
1123 bp
->serdes_an_pending
= 1;
1124 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
1130 #define ETHTOOL_ALL_FIBRE_SPEED \
1131 (ADVERTISED_1000baseT_Full)
1133 #define ETHTOOL_ALL_COPPER_SPEED \
1134 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1135 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1136 ADVERTISED_1000baseT_Full)
1138 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1139 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1141 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1144 bnx2_setup_copper_phy(struct bnx2
*bp
)
1149 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
1151 if (bp
->autoneg
& AUTONEG_SPEED
) {
1152 u32 adv_reg
, adv1000_reg
;
1153 u32 new_adv_reg
= 0;
1154 u32 new_adv1000_reg
= 0;
1156 bnx2_read_phy(bp
, MII_ADVERTISE
, &adv_reg
);
1157 adv_reg
&= (PHY_ALL_10_100_SPEED
| ADVERTISE_PAUSE_CAP
|
1158 ADVERTISE_PAUSE_ASYM
);
1160 bnx2_read_phy(bp
, MII_CTRL1000
, &adv1000_reg
);
1161 adv1000_reg
&= PHY_ALL_1000_SPEED
;
1163 if (bp
->advertising
& ADVERTISED_10baseT_Half
)
1164 new_adv_reg
|= ADVERTISE_10HALF
;
1165 if (bp
->advertising
& ADVERTISED_10baseT_Full
)
1166 new_adv_reg
|= ADVERTISE_10FULL
;
1167 if (bp
->advertising
& ADVERTISED_100baseT_Half
)
1168 new_adv_reg
|= ADVERTISE_100HALF
;
1169 if (bp
->advertising
& ADVERTISED_100baseT_Full
)
1170 new_adv_reg
|= ADVERTISE_100FULL
;
1171 if (bp
->advertising
& ADVERTISED_1000baseT_Full
)
1172 new_adv1000_reg
|= ADVERTISE_1000FULL
;
1174 new_adv_reg
|= ADVERTISE_CSMA
;
1176 new_adv_reg
|= bnx2_phy_get_pause_adv(bp
);
1178 if ((adv1000_reg
!= new_adv1000_reg
) ||
1179 (adv_reg
!= new_adv_reg
) ||
1180 ((bmcr
& BMCR_ANENABLE
) == 0)) {
1182 bnx2_write_phy(bp
, MII_ADVERTISE
, new_adv_reg
);
1183 bnx2_write_phy(bp
, MII_CTRL1000
, new_adv1000_reg
);
1184 bnx2_write_phy(bp
, MII_BMCR
, BMCR_ANRESTART
|
1187 else if (bp
->link_up
) {
1188 /* Flow ctrl may have changed from auto to forced */
1189 /* or vice-versa. */
1191 bnx2_resolve_flow_ctrl(bp
);
1192 bnx2_set_mac_link(bp
);
1198 if (bp
->req_line_speed
== SPEED_100
) {
1199 new_bmcr
|= BMCR_SPEED100
;
1201 if (bp
->req_duplex
== DUPLEX_FULL
) {
1202 new_bmcr
|= BMCR_FULLDPLX
;
1204 if (new_bmcr
!= bmcr
) {
1207 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
1208 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
1210 if (bmsr
& BMSR_LSTATUS
) {
1211 /* Force link down */
1212 bnx2_write_phy(bp
, MII_BMCR
, BMCR_LOOPBACK
);
1213 spin_unlock_bh(&bp
->phy_lock
);
1215 spin_lock_bh(&bp
->phy_lock
);
1217 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
1218 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
1221 bnx2_write_phy(bp
, MII_BMCR
, new_bmcr
);
1223 /* Normally, the new speed is setup after the link has
1224 * gone down and up again. In some cases, link will not go
1225 * down so we need to set up the new speed here.
1227 if (bmsr
& BMSR_LSTATUS
) {
1228 bp
->line_speed
= bp
->req_line_speed
;
1229 bp
->duplex
= bp
->req_duplex
;
1230 bnx2_resolve_flow_ctrl(bp
);
1231 bnx2_set_mac_link(bp
);
1238 bnx2_setup_phy(struct bnx2
*bp
)
1240 if (bp
->loopback
== MAC_LOOPBACK
)
1243 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
1244 return (bnx2_setup_serdes_phy(bp
));
1247 return (bnx2_setup_copper_phy(bp
));
1252 bnx2_init_5708s_phy(struct bnx2
*bp
)
1256 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
, BCM5708S_BLK_ADDR_DIG3
);
1257 bnx2_write_phy(bp
, BCM5708S_DIG_3_0
, BCM5708S_DIG_3_0_USE_IEEE
);
1258 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
, BCM5708S_BLK_ADDR_DIG
);
1260 bnx2_read_phy(bp
, BCM5708S_1000X_CTL1
, &val
);
1261 val
|= BCM5708S_1000X_CTL1_FIBER_MODE
| BCM5708S_1000X_CTL1_AUTODET_EN
;
1262 bnx2_write_phy(bp
, BCM5708S_1000X_CTL1
, val
);
1264 bnx2_read_phy(bp
, BCM5708S_1000X_CTL2
, &val
);
1265 val
|= BCM5708S_1000X_CTL2_PLLEL_DET_EN
;
1266 bnx2_write_phy(bp
, BCM5708S_1000X_CTL2
, val
);
1268 if (bp
->phy_flags
& PHY_2_5G_CAPABLE_FLAG
) {
1269 bnx2_read_phy(bp
, BCM5708S_UP1
, &val
);
1270 val
|= BCM5708S_UP1_2G5
;
1271 bnx2_write_phy(bp
, BCM5708S_UP1
, val
);
1274 if ((CHIP_ID(bp
) == CHIP_ID_5708_A0
) ||
1275 (CHIP_ID(bp
) == CHIP_ID_5708_B0
) ||
1276 (CHIP_ID(bp
) == CHIP_ID_5708_B1
)) {
1277 /* increase tx signal amplitude */
1278 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
,
1279 BCM5708S_BLK_ADDR_TX_MISC
);
1280 bnx2_read_phy(bp
, BCM5708S_TX_ACTL1
, &val
);
1281 val
&= ~BCM5708S_TX_ACTL1_DRIVER_VCM
;
1282 bnx2_write_phy(bp
, BCM5708S_TX_ACTL1
, val
);
1283 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
, BCM5708S_BLK_ADDR_DIG
);
1286 val
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_HW_CFG_CONFIG
) &
1287 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK
;
1292 is_backplane
= REG_RD_IND(bp
, bp
->shmem_base
+
1293 BNX2_SHARED_HW_CFG_CONFIG
);
1294 if (is_backplane
& BNX2_SHARED_HW_CFG_PHY_BACKPLANE
) {
1295 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
,
1296 BCM5708S_BLK_ADDR_TX_MISC
);
1297 bnx2_write_phy(bp
, BCM5708S_TX_ACTL3
, val
);
1298 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
,
1299 BCM5708S_BLK_ADDR_DIG
);
1306 bnx2_init_5706s_phy(struct bnx2
*bp
)
1308 bp
->phy_flags
&= ~PHY_PARALLEL_DETECT_FLAG
;
1310 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
1311 REG_WR(bp
, BNX2_MISC_GP_HW_CTL0
, 0x300);
1313 if (bp
->dev
->mtu
> 1500) {
1316 /* Set extended packet length bit */
1317 bnx2_write_phy(bp
, 0x18, 0x7);
1318 bnx2_read_phy(bp
, 0x18, &val
);
1319 bnx2_write_phy(bp
, 0x18, (val
& 0xfff8) | 0x4000);
1321 bnx2_write_phy(bp
, 0x1c, 0x6c00);
1322 bnx2_read_phy(bp
, 0x1c, &val
);
1323 bnx2_write_phy(bp
, 0x1c, (val
& 0x3ff) | 0xec02);
1328 bnx2_write_phy(bp
, 0x18, 0x7);
1329 bnx2_read_phy(bp
, 0x18, &val
);
1330 bnx2_write_phy(bp
, 0x18, val
& ~0x4007);
1332 bnx2_write_phy(bp
, 0x1c, 0x6c00);
1333 bnx2_read_phy(bp
, 0x1c, &val
);
1334 bnx2_write_phy(bp
, 0x1c, (val
& 0x3fd) | 0xec00);
1341 bnx2_init_copper_phy(struct bnx2
*bp
)
1345 if (bp
->phy_flags
& PHY_CRC_FIX_FLAG
) {
1346 bnx2_write_phy(bp
, 0x18, 0x0c00);
1347 bnx2_write_phy(bp
, 0x17, 0x000a);
1348 bnx2_write_phy(bp
, 0x15, 0x310b);
1349 bnx2_write_phy(bp
, 0x17, 0x201f);
1350 bnx2_write_phy(bp
, 0x15, 0x9506);
1351 bnx2_write_phy(bp
, 0x17, 0x401f);
1352 bnx2_write_phy(bp
, 0x15, 0x14e2);
1353 bnx2_write_phy(bp
, 0x18, 0x0400);
1356 if (bp
->phy_flags
& PHY_DIS_EARLY_DAC_FLAG
) {
1357 bnx2_write_phy(bp
, MII_BNX2_DSP_ADDRESS
,
1358 MII_BNX2_DSP_EXPAND_REG
| 0x8);
1359 bnx2_read_phy(bp
, MII_BNX2_DSP_RW_PORT
, &val
);
1361 bnx2_write_phy(bp
, MII_BNX2_DSP_RW_PORT
, val
);
1364 if (bp
->dev
->mtu
> 1500) {
1365 /* Set extended packet length bit */
1366 bnx2_write_phy(bp
, 0x18, 0x7);
1367 bnx2_read_phy(bp
, 0x18, &val
);
1368 bnx2_write_phy(bp
, 0x18, val
| 0x4000);
1370 bnx2_read_phy(bp
, 0x10, &val
);
1371 bnx2_write_phy(bp
, 0x10, val
| 0x1);
1374 bnx2_write_phy(bp
, 0x18, 0x7);
1375 bnx2_read_phy(bp
, 0x18, &val
);
1376 bnx2_write_phy(bp
, 0x18, val
& ~0x4007);
1378 bnx2_read_phy(bp
, 0x10, &val
);
1379 bnx2_write_phy(bp
, 0x10, val
& ~0x1);
1382 /* ethernet@wirespeed */
1383 bnx2_write_phy(bp
, 0x18, 0x7007);
1384 bnx2_read_phy(bp
, 0x18, &val
);
1385 bnx2_write_phy(bp
, 0x18, val
| (1 << 15) | (1 << 4));
1391 bnx2_init_phy(struct bnx2
*bp
)
1396 bp
->phy_flags
&= ~PHY_INT_MODE_MASK_FLAG
;
1397 bp
->phy_flags
|= PHY_INT_MODE_LINK_READY_FLAG
;
1399 REG_WR(bp
, BNX2_EMAC_ATTENTION_ENA
, BNX2_EMAC_ATTENTION_ENA_LINK
);
1403 bnx2_read_phy(bp
, MII_PHYSID1
, &val
);
1404 bp
->phy_id
= val
<< 16;
1405 bnx2_read_phy(bp
, MII_PHYSID2
, &val
);
1406 bp
->phy_id
|= val
& 0xffff;
1408 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
1409 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
1410 rc
= bnx2_init_5706s_phy(bp
);
1411 else if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
1412 rc
= bnx2_init_5708s_phy(bp
);
1415 rc
= bnx2_init_copper_phy(bp
);
1424 bnx2_set_mac_loopback(struct bnx2
*bp
)
1428 mac_mode
= REG_RD(bp
, BNX2_EMAC_MODE
);
1429 mac_mode
&= ~BNX2_EMAC_MODE_PORT
;
1430 mac_mode
|= BNX2_EMAC_MODE_MAC_LOOP
| BNX2_EMAC_MODE_FORCE_LINK
;
1431 REG_WR(bp
, BNX2_EMAC_MODE
, mac_mode
);
1436 static int bnx2_test_link(struct bnx2
*);
1439 bnx2_set_phy_loopback(struct bnx2
*bp
)
1444 spin_lock_bh(&bp
->phy_lock
);
1445 rc
= bnx2_write_phy(bp
, MII_BMCR
, BMCR_LOOPBACK
| BMCR_FULLDPLX
|
1447 spin_unlock_bh(&bp
->phy_lock
);
1451 for (i
= 0; i
< 10; i
++) {
1452 if (bnx2_test_link(bp
) == 0)
1457 mac_mode
= REG_RD(bp
, BNX2_EMAC_MODE
);
1458 mac_mode
&= ~(BNX2_EMAC_MODE_PORT
| BNX2_EMAC_MODE_HALF_DUPLEX
|
1459 BNX2_EMAC_MODE_MAC_LOOP
| BNX2_EMAC_MODE_FORCE_LINK
|
1460 BNX2_EMAC_MODE_25G_MODE
);
1462 mac_mode
|= BNX2_EMAC_MODE_PORT_GMII
;
1463 REG_WR(bp
, BNX2_EMAC_MODE
, mac_mode
);
1469 bnx2_fw_sync(struct bnx2
*bp
, u32 msg_data
, int silent
)
1475 msg_data
|= bp
->fw_wr_seq
;
1477 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_DRV_MB
, msg_data
);
1479 /* wait for an acknowledgement. */
1480 for (i
= 0; i
< (FW_ACK_TIME_OUT_MS
/ 10); i
++) {
1483 val
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_FW_MB
);
1485 if ((val
& BNX2_FW_MSG_ACK
) == (msg_data
& BNX2_DRV_MSG_SEQ
))
1488 if ((msg_data
& BNX2_DRV_MSG_DATA
) == BNX2_DRV_MSG_DATA_WAIT0
)
1491 /* If we timed out, inform the firmware that this is the case. */
1492 if ((val
& BNX2_FW_MSG_ACK
) != (msg_data
& BNX2_DRV_MSG_SEQ
)) {
1494 printk(KERN_ERR PFX
"fw sync timeout, reset code = "
1497 msg_data
&= ~BNX2_DRV_MSG_CODE
;
1498 msg_data
|= BNX2_DRV_MSG_CODE_FW_TIMEOUT
;
1500 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_DRV_MB
, msg_data
);
1505 if ((val
& BNX2_FW_MSG_STATUS_MASK
) != BNX2_FW_MSG_STATUS_OK
)
1512 bnx2_init_5709_context(struct bnx2
*bp
)
1517 val
= BNX2_CTX_COMMAND_ENABLED
| BNX2_CTX_COMMAND_MEM_INIT
| (1 << 12);
1518 val
|= (BCM_PAGE_BITS
- 8) << 16;
1519 REG_WR(bp
, BNX2_CTX_COMMAND
, val
);
1520 for (i
= 0; i
< bp
->ctx_pages
; i
++) {
1523 REG_WR(bp
, BNX2_CTX_HOST_PAGE_TBL_DATA0
,
1524 (bp
->ctx_blk_mapping
[i
] & 0xffffffff) |
1525 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID
);
1526 REG_WR(bp
, BNX2_CTX_HOST_PAGE_TBL_DATA1
,
1527 (u64
) bp
->ctx_blk_mapping
[i
] >> 32);
1528 REG_WR(bp
, BNX2_CTX_HOST_PAGE_TBL_CTRL
, i
|
1529 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
);
1530 for (j
= 0; j
< 10; j
++) {
1532 val
= REG_RD(bp
, BNX2_CTX_HOST_PAGE_TBL_CTRL
);
1533 if (!(val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
))
1537 if (val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
) {
1546 bnx2_init_context(struct bnx2
*bp
)
1552 u32 vcid_addr
, pcid_addr
, offset
;
1556 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
1559 vcid_addr
= GET_PCID_ADDR(vcid
);
1561 new_vcid
= 0x60 + (vcid
& 0xf0) + (vcid
& 0x7);
1566 pcid_addr
= GET_PCID_ADDR(new_vcid
);
1569 vcid_addr
= GET_CID_ADDR(vcid
);
1570 pcid_addr
= vcid_addr
;
1573 REG_WR(bp
, BNX2_CTX_VIRT_ADDR
, 0x00);
1574 REG_WR(bp
, BNX2_CTX_PAGE_TBL
, pcid_addr
);
1576 /* Zero out the context. */
1577 for (offset
= 0; offset
< PHY_CTX_SIZE
; offset
+= 4) {
1578 CTX_WR(bp
, 0x00, offset
, 0);
1581 REG_WR(bp
, BNX2_CTX_VIRT_ADDR
, vcid_addr
);
1582 REG_WR(bp
, BNX2_CTX_PAGE_TBL
, pcid_addr
);
1587 bnx2_alloc_bad_rbuf(struct bnx2
*bp
)
1593 good_mbuf
= kmalloc(512 * sizeof(u16
), GFP_KERNEL
);
1594 if (good_mbuf
== NULL
) {
1595 printk(KERN_ERR PFX
"Failed to allocate memory in "
1596 "bnx2_alloc_bad_rbuf\n");
1600 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
,
1601 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE
);
1605 /* Allocate a bunch of mbufs and save the good ones in an array. */
1606 val
= REG_RD_IND(bp
, BNX2_RBUF_STATUS1
);
1607 while (val
& BNX2_RBUF_STATUS1_FREE_COUNT
) {
1608 REG_WR_IND(bp
, BNX2_RBUF_COMMAND
, BNX2_RBUF_COMMAND_ALLOC_REQ
);
1610 val
= REG_RD_IND(bp
, BNX2_RBUF_FW_BUF_ALLOC
);
1612 val
&= BNX2_RBUF_FW_BUF_ALLOC_VALUE
;
1614 /* The addresses with Bit 9 set are bad memory blocks. */
1615 if (!(val
& (1 << 9))) {
1616 good_mbuf
[good_mbuf_cnt
] = (u16
) val
;
1620 val
= REG_RD_IND(bp
, BNX2_RBUF_STATUS1
);
1623 /* Free the good ones back to the mbuf pool thus discarding
1624 * all the bad ones. */
1625 while (good_mbuf_cnt
) {
1628 val
= good_mbuf
[good_mbuf_cnt
];
1629 val
= (val
<< 9) | val
| 1;
1631 REG_WR_IND(bp
, BNX2_RBUF_FW_BUF_FREE
, val
);
1638 bnx2_set_mac_addr(struct bnx2
*bp
)
1641 u8
*mac_addr
= bp
->dev
->dev_addr
;
1643 val
= (mac_addr
[0] << 8) | mac_addr
[1];
1645 REG_WR(bp
, BNX2_EMAC_MAC_MATCH0
, val
);
1647 val
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
1648 (mac_addr
[4] << 8) | mac_addr
[5];
1650 REG_WR(bp
, BNX2_EMAC_MAC_MATCH1
, val
);
1654 bnx2_alloc_rx_skb(struct bnx2
*bp
, u16 index
)
1656 struct sk_buff
*skb
;
1657 struct sw_bd
*rx_buf
= &bp
->rx_buf_ring
[index
];
1659 struct rx_bd
*rxbd
= &bp
->rx_desc_ring
[RX_RING(index
)][RX_IDX(index
)];
1660 unsigned long align
;
1662 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
1667 if (unlikely((align
= (unsigned long) skb
->data
& (BNX2_RX_ALIGN
- 1))))
1668 skb_reserve(skb
, BNX2_RX_ALIGN
- align
);
1670 mapping
= pci_map_single(bp
->pdev
, skb
->data
, bp
->rx_buf_use_size
,
1671 PCI_DMA_FROMDEVICE
);
1674 pci_unmap_addr_set(rx_buf
, mapping
, mapping
);
1676 rxbd
->rx_bd_haddr_hi
= (u64
) mapping
>> 32;
1677 rxbd
->rx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
1679 bp
->rx_prod_bseq
+= bp
->rx_buf_use_size
;
1685 bnx2_phy_int(struct bnx2
*bp
)
1687 u32 new_link_state
, old_link_state
;
1689 new_link_state
= bp
->status_blk
->status_attn_bits
&
1690 STATUS_ATTN_BITS_LINK_STATE
;
1691 old_link_state
= bp
->status_blk
->status_attn_bits_ack
&
1692 STATUS_ATTN_BITS_LINK_STATE
;
1693 if (new_link_state
!= old_link_state
) {
1694 if (new_link_state
) {
1695 REG_WR(bp
, BNX2_PCICFG_STATUS_BIT_SET_CMD
,
1696 STATUS_ATTN_BITS_LINK_STATE
);
1699 REG_WR(bp
, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD
,
1700 STATUS_ATTN_BITS_LINK_STATE
);
1707 bnx2_tx_int(struct bnx2
*bp
)
1709 struct status_block
*sblk
= bp
->status_blk
;
1710 u16 hw_cons
, sw_cons
, sw_ring_cons
;
1713 hw_cons
= bp
->hw_tx_cons
= sblk
->status_tx_quick_consumer_index0
;
1714 if ((hw_cons
& MAX_TX_DESC_CNT
) == MAX_TX_DESC_CNT
) {
1717 sw_cons
= bp
->tx_cons
;
1719 while (sw_cons
!= hw_cons
) {
1720 struct sw_bd
*tx_buf
;
1721 struct sk_buff
*skb
;
1724 sw_ring_cons
= TX_RING_IDX(sw_cons
);
1726 tx_buf
= &bp
->tx_buf_ring
[sw_ring_cons
];
1729 /* partial BD completions possible with TSO packets */
1730 if (skb_is_gso(skb
)) {
1731 u16 last_idx
, last_ring_idx
;
1733 last_idx
= sw_cons
+
1734 skb_shinfo(skb
)->nr_frags
+ 1;
1735 last_ring_idx
= sw_ring_cons
+
1736 skb_shinfo(skb
)->nr_frags
+ 1;
1737 if (unlikely(last_ring_idx
>= MAX_TX_DESC_CNT
)) {
1740 if (((s16
) ((s16
) last_idx
- (s16
) hw_cons
)) > 0) {
1745 pci_unmap_single(bp
->pdev
, pci_unmap_addr(tx_buf
, mapping
),
1746 skb_headlen(skb
), PCI_DMA_TODEVICE
);
1749 last
= skb_shinfo(skb
)->nr_frags
;
1751 for (i
= 0; i
< last
; i
++) {
1752 sw_cons
= NEXT_TX_BD(sw_cons
);
1754 pci_unmap_page(bp
->pdev
,
1756 &bp
->tx_buf_ring
[TX_RING_IDX(sw_cons
)],
1758 skb_shinfo(skb
)->frags
[i
].size
,
1762 sw_cons
= NEXT_TX_BD(sw_cons
);
1764 tx_free_bd
+= last
+ 1;
1768 hw_cons
= bp
->hw_tx_cons
=
1769 sblk
->status_tx_quick_consumer_index0
;
1771 if ((hw_cons
& MAX_TX_DESC_CNT
) == MAX_TX_DESC_CNT
) {
1776 bp
->tx_cons
= sw_cons
;
1777 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1778 * before checking for netif_queue_stopped(). Without the
1779 * memory barrier, there is a small possibility that bnx2_start_xmit()
1780 * will miss it and cause the queue to be stopped forever.
1784 if (unlikely(netif_queue_stopped(bp
->dev
)) &&
1785 (bnx2_tx_avail(bp
) > bp
->tx_wake_thresh
)) {
1786 netif_tx_lock(bp
->dev
);
1787 if ((netif_queue_stopped(bp
->dev
)) &&
1788 (bnx2_tx_avail(bp
) > bp
->tx_wake_thresh
))
1789 netif_wake_queue(bp
->dev
);
1790 netif_tx_unlock(bp
->dev
);
1795 bnx2_reuse_rx_skb(struct bnx2
*bp
, struct sk_buff
*skb
,
1798 struct sw_bd
*cons_rx_buf
, *prod_rx_buf
;
1799 struct rx_bd
*cons_bd
, *prod_bd
;
1801 cons_rx_buf
= &bp
->rx_buf_ring
[cons
];
1802 prod_rx_buf
= &bp
->rx_buf_ring
[prod
];
1804 pci_dma_sync_single_for_device(bp
->pdev
,
1805 pci_unmap_addr(cons_rx_buf
, mapping
),
1806 bp
->rx_offset
+ RX_COPY_THRESH
, PCI_DMA_FROMDEVICE
);
1808 bp
->rx_prod_bseq
+= bp
->rx_buf_use_size
;
1810 prod_rx_buf
->skb
= skb
;
1815 pci_unmap_addr_set(prod_rx_buf
, mapping
,
1816 pci_unmap_addr(cons_rx_buf
, mapping
));
1818 cons_bd
= &bp
->rx_desc_ring
[RX_RING(cons
)][RX_IDX(cons
)];
1819 prod_bd
= &bp
->rx_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
1820 prod_bd
->rx_bd_haddr_hi
= cons_bd
->rx_bd_haddr_hi
;
1821 prod_bd
->rx_bd_haddr_lo
= cons_bd
->rx_bd_haddr_lo
;
1825 bnx2_rx_int(struct bnx2
*bp
, int budget
)
1827 struct status_block
*sblk
= bp
->status_blk
;
1828 u16 hw_cons
, sw_cons
, sw_ring_cons
, sw_prod
, sw_ring_prod
;
1829 struct l2_fhdr
*rx_hdr
;
1832 hw_cons
= bp
->hw_rx_cons
= sblk
->status_rx_quick_consumer_index0
;
1833 if ((hw_cons
& MAX_RX_DESC_CNT
) == MAX_RX_DESC_CNT
) {
1836 sw_cons
= bp
->rx_cons
;
1837 sw_prod
= bp
->rx_prod
;
1839 /* Memory barrier necessary as speculative reads of the rx
1840 * buffer can be ahead of the index in the status block
1843 while (sw_cons
!= hw_cons
) {
1846 struct sw_bd
*rx_buf
;
1847 struct sk_buff
*skb
;
1848 dma_addr_t dma_addr
;
1850 sw_ring_cons
= RX_RING_IDX(sw_cons
);
1851 sw_ring_prod
= RX_RING_IDX(sw_prod
);
1853 rx_buf
= &bp
->rx_buf_ring
[sw_ring_cons
];
1858 dma_addr
= pci_unmap_addr(rx_buf
, mapping
);
1860 pci_dma_sync_single_for_cpu(bp
->pdev
, dma_addr
,
1861 bp
->rx_offset
+ RX_COPY_THRESH
, PCI_DMA_FROMDEVICE
);
1863 rx_hdr
= (struct l2_fhdr
*) skb
->data
;
1864 len
= rx_hdr
->l2_fhdr_pkt_len
- 4;
1866 if ((status
= rx_hdr
->l2_fhdr_status
) &
1867 (L2_FHDR_ERRORS_BAD_CRC
|
1868 L2_FHDR_ERRORS_PHY_DECODE
|
1869 L2_FHDR_ERRORS_ALIGNMENT
|
1870 L2_FHDR_ERRORS_TOO_SHORT
|
1871 L2_FHDR_ERRORS_GIANT_FRAME
)) {
1876 /* Since we don't have a jumbo ring, copy small packets
1879 if ((bp
->dev
->mtu
> 1500) && (len
<= RX_COPY_THRESH
)) {
1880 struct sk_buff
*new_skb
;
1882 new_skb
= netdev_alloc_skb(bp
->dev
, len
+ 2);
1883 if (new_skb
== NULL
)
1887 memcpy(new_skb
->data
,
1888 skb
->data
+ bp
->rx_offset
- 2,
1891 skb_reserve(new_skb
, 2);
1892 skb_put(new_skb
, len
);
1894 bnx2_reuse_rx_skb(bp
, skb
,
1895 sw_ring_cons
, sw_ring_prod
);
1899 else if (bnx2_alloc_rx_skb(bp
, sw_ring_prod
) == 0) {
1900 pci_unmap_single(bp
->pdev
, dma_addr
,
1901 bp
->rx_buf_use_size
, PCI_DMA_FROMDEVICE
);
1903 skb_reserve(skb
, bp
->rx_offset
);
1908 bnx2_reuse_rx_skb(bp
, skb
,
1909 sw_ring_cons
, sw_ring_prod
);
1913 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1915 if ((len
> (bp
->dev
->mtu
+ ETH_HLEN
)) &&
1916 (ntohs(skb
->protocol
) != 0x8100)) {
1923 skb
->ip_summed
= CHECKSUM_NONE
;
1925 (status
& (L2_FHDR_STATUS_TCP_SEGMENT
|
1926 L2_FHDR_STATUS_UDP_DATAGRAM
))) {
1928 if (likely((status
& (L2_FHDR_ERRORS_TCP_XSUM
|
1929 L2_FHDR_ERRORS_UDP_XSUM
)) == 0))
1930 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1934 if ((status
& L2_FHDR_STATUS_L2_VLAN_TAG
) && (bp
->vlgrp
!= 0)) {
1935 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
,
1936 rx_hdr
->l2_fhdr_vlan_tag
);
1940 netif_receive_skb(skb
);
1942 bp
->dev
->last_rx
= jiffies
;
1946 sw_cons
= NEXT_RX_BD(sw_cons
);
1947 sw_prod
= NEXT_RX_BD(sw_prod
);
1949 if ((rx_pkt
== budget
))
1952 /* Refresh hw_cons to see if there is new work */
1953 if (sw_cons
== hw_cons
) {
1954 hw_cons
= bp
->hw_rx_cons
=
1955 sblk
->status_rx_quick_consumer_index0
;
1956 if ((hw_cons
& MAX_RX_DESC_CNT
) == MAX_RX_DESC_CNT
)
1961 bp
->rx_cons
= sw_cons
;
1962 bp
->rx_prod
= sw_prod
;
1964 REG_WR16(bp
, MB_RX_CID_ADDR
+ BNX2_L2CTX_HOST_BDIDX
, sw_prod
);
1966 REG_WR(bp
, MB_RX_CID_ADDR
+ BNX2_L2CTX_HOST_BSEQ
, bp
->rx_prod_bseq
);
1974 /* MSI ISR - The only difference between this and the INTx ISR
1975 * is that the MSI interrupt is always serviced.
1978 bnx2_msi(int irq
, void *dev_instance
)
1980 struct net_device
*dev
= dev_instance
;
1981 struct bnx2
*bp
= netdev_priv(dev
);
1983 prefetch(bp
->status_blk
);
1984 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
1985 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM
|
1986 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
1988 /* Return here if interrupt is disabled. */
1989 if (unlikely(atomic_read(&bp
->intr_sem
) != 0))
1992 netif_rx_schedule(dev
);
1998 bnx2_interrupt(int irq
, void *dev_instance
)
2000 struct net_device
*dev
= dev_instance
;
2001 struct bnx2
*bp
= netdev_priv(dev
);
2003 /* When using INTx, it is possible for the interrupt to arrive
2004 * at the CPU before the status block posted prior to the
2005 * interrupt. Reading a register will flush the status block.
2006 * When using MSI, the MSI message will always complete after
2007 * the status block write.
2009 if ((bp
->status_blk
->status_idx
== bp
->last_status_idx
) &&
2010 (REG_RD(bp
, BNX2_PCICFG_MISC_STATUS
) &
2011 BNX2_PCICFG_MISC_STATUS_INTA_VALUE
))
2014 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
2015 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM
|
2016 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
2018 /* Return here if interrupt is shared and is disabled. */
2019 if (unlikely(atomic_read(&bp
->intr_sem
) != 0))
2022 netif_rx_schedule(dev
);
2028 bnx2_has_work(struct bnx2
*bp
)
2030 struct status_block
*sblk
= bp
->status_blk
;
2032 if ((sblk
->status_rx_quick_consumer_index0
!= bp
->hw_rx_cons
) ||
2033 (sblk
->status_tx_quick_consumer_index0
!= bp
->hw_tx_cons
))
2036 if ((sblk
->status_attn_bits
& STATUS_ATTN_BITS_LINK_STATE
) !=
2037 (sblk
->status_attn_bits_ack
& STATUS_ATTN_BITS_LINK_STATE
))
2044 bnx2_poll(struct net_device
*dev
, int *budget
)
2046 struct bnx2
*bp
= netdev_priv(dev
);
2048 if ((bp
->status_blk
->status_attn_bits
&
2049 STATUS_ATTN_BITS_LINK_STATE
) !=
2050 (bp
->status_blk
->status_attn_bits_ack
&
2051 STATUS_ATTN_BITS_LINK_STATE
)) {
2053 spin_lock(&bp
->phy_lock
);
2055 spin_unlock(&bp
->phy_lock
);
2057 /* This is needed to take care of transient status
2058 * during link changes.
2060 REG_WR(bp
, BNX2_HC_COMMAND
,
2061 bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
2062 REG_RD(bp
, BNX2_HC_COMMAND
);
2065 if (bp
->status_blk
->status_tx_quick_consumer_index0
!= bp
->hw_tx_cons
)
2068 if (bp
->status_blk
->status_rx_quick_consumer_index0
!= bp
->hw_rx_cons
) {
2069 int orig_budget
= *budget
;
2072 if (orig_budget
> dev
->quota
)
2073 orig_budget
= dev
->quota
;
2075 work_done
= bnx2_rx_int(bp
, orig_budget
);
2076 *budget
-= work_done
;
2077 dev
->quota
-= work_done
;
2080 bp
->last_status_idx
= bp
->status_blk
->status_idx
;
2083 if (!bnx2_has_work(bp
)) {
2084 netif_rx_complete(dev
);
2085 if (likely(bp
->flags
& USING_MSI_FLAG
)) {
2086 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
2087 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
2088 bp
->last_status_idx
);
2091 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
2092 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
2093 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
|
2094 bp
->last_status_idx
);
2096 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
2097 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
2098 bp
->last_status_idx
);
2105 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2106 * from set_multicast.
2109 bnx2_set_rx_mode(struct net_device
*dev
)
2111 struct bnx2
*bp
= netdev_priv(dev
);
2112 u32 rx_mode
, sort_mode
;
2115 spin_lock_bh(&bp
->phy_lock
);
2117 rx_mode
= bp
->rx_mode
& ~(BNX2_EMAC_RX_MODE_PROMISCUOUS
|
2118 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
);
2119 sort_mode
= 1 | BNX2_RPM_SORT_USER0_BC_EN
;
2121 if (!bp
->vlgrp
&& !(bp
->flags
& ASF_ENABLE_FLAG
))
2122 rx_mode
|= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
;
2124 if (!(bp
->flags
& ASF_ENABLE_FLAG
))
2125 rx_mode
|= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
;
2127 if (dev
->flags
& IFF_PROMISC
) {
2128 /* Promiscuous mode. */
2129 rx_mode
|= BNX2_EMAC_RX_MODE_PROMISCUOUS
;
2130 sort_mode
|= BNX2_RPM_SORT_USER0_PROM_EN
|
2131 BNX2_RPM_SORT_USER0_PROM_VLAN
;
2133 else if (dev
->flags
& IFF_ALLMULTI
) {
2134 for (i
= 0; i
< NUM_MC_HASH_REGISTERS
; i
++) {
2135 REG_WR(bp
, BNX2_EMAC_MULTICAST_HASH0
+ (i
* 4),
2138 sort_mode
|= BNX2_RPM_SORT_USER0_MC_EN
;
2141 /* Accept one or more multicast(s). */
2142 struct dev_mc_list
*mclist
;
2143 u32 mc_filter
[NUM_MC_HASH_REGISTERS
];
2148 memset(mc_filter
, 0, 4 * NUM_MC_HASH_REGISTERS
);
2150 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
2151 i
++, mclist
= mclist
->next
) {
2153 crc
= ether_crc_le(ETH_ALEN
, mclist
->dmi_addr
);
2155 regidx
= (bit
& 0xe0) >> 5;
2157 mc_filter
[regidx
] |= (1 << bit
);
2160 for (i
= 0; i
< NUM_MC_HASH_REGISTERS
; i
++) {
2161 REG_WR(bp
, BNX2_EMAC_MULTICAST_HASH0
+ (i
* 4),
2165 sort_mode
|= BNX2_RPM_SORT_USER0_MC_HSH_EN
;
2168 if (rx_mode
!= bp
->rx_mode
) {
2169 bp
->rx_mode
= rx_mode
;
2170 REG_WR(bp
, BNX2_EMAC_RX_MODE
, rx_mode
);
2173 REG_WR(bp
, BNX2_RPM_SORT_USER0
, 0x0);
2174 REG_WR(bp
, BNX2_RPM_SORT_USER0
, sort_mode
);
2175 REG_WR(bp
, BNX2_RPM_SORT_USER0
, sort_mode
| BNX2_RPM_SORT_USER0_ENA
);
2177 spin_unlock_bh(&bp
->phy_lock
);
2180 #define FW_BUF_SIZE 0x8000
2183 bnx2_gunzip_init(struct bnx2
*bp
)
2185 if ((bp
->gunzip_buf
= vmalloc(FW_BUF_SIZE
)) == NULL
)
2188 if ((bp
->strm
= kmalloc(sizeof(*bp
->strm
), GFP_KERNEL
)) == NULL
)
2191 bp
->strm
->workspace
= kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL
);
2192 if (bp
->strm
->workspace
== NULL
)
2202 vfree(bp
->gunzip_buf
);
2203 bp
->gunzip_buf
= NULL
;
2206 printk(KERN_ERR PFX
"%s: Cannot allocate firmware buffer for "
2207 "uncompression.\n", bp
->dev
->name
);
2212 bnx2_gunzip_end(struct bnx2
*bp
)
2214 kfree(bp
->strm
->workspace
);
2219 if (bp
->gunzip_buf
) {
2220 vfree(bp
->gunzip_buf
);
2221 bp
->gunzip_buf
= NULL
;
2226 bnx2_gunzip(struct bnx2
*bp
, u8
*zbuf
, int len
, void **outbuf
, int *outlen
)
2230 /* check gzip header */
2231 if ((zbuf
[0] != 0x1f) || (zbuf
[1] != 0x8b) || (zbuf
[2] != Z_DEFLATED
))
2237 if (zbuf
[3] & FNAME
)
2238 while ((zbuf
[n
++] != 0) && (n
< len
));
2240 bp
->strm
->next_in
= zbuf
+ n
;
2241 bp
->strm
->avail_in
= len
- n
;
2242 bp
->strm
->next_out
= bp
->gunzip_buf
;
2243 bp
->strm
->avail_out
= FW_BUF_SIZE
;
2245 rc
= zlib_inflateInit2(bp
->strm
, -MAX_WBITS
);
2249 rc
= zlib_inflate(bp
->strm
, Z_FINISH
);
2251 *outlen
= FW_BUF_SIZE
- bp
->strm
->avail_out
;
2252 *outbuf
= bp
->gunzip_buf
;
2254 if ((rc
!= Z_OK
) && (rc
!= Z_STREAM_END
))
2255 printk(KERN_ERR PFX
"%s: Firmware decompression error: %s\n",
2256 bp
->dev
->name
, bp
->strm
->msg
);
2258 zlib_inflateEnd(bp
->strm
);
2260 if (rc
== Z_STREAM_END
)
2267 load_rv2p_fw(struct bnx2
*bp
, u32
*rv2p_code
, u32 rv2p_code_len
,
2274 for (i
= 0; i
< rv2p_code_len
; i
+= 8) {
2275 REG_WR(bp
, BNX2_RV2P_INSTR_HIGH
, cpu_to_le32(*rv2p_code
));
2277 REG_WR(bp
, BNX2_RV2P_INSTR_LOW
, cpu_to_le32(*rv2p_code
));
2280 if (rv2p_proc
== RV2P_PROC1
) {
2281 val
= (i
/ 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR
;
2282 REG_WR(bp
, BNX2_RV2P_PROC1_ADDR_CMD
, val
);
2285 val
= (i
/ 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR
;
2286 REG_WR(bp
, BNX2_RV2P_PROC2_ADDR_CMD
, val
);
2290 /* Reset the processor, un-stall is done later. */
2291 if (rv2p_proc
== RV2P_PROC1
) {
2292 REG_WR(bp
, BNX2_RV2P_COMMAND
, BNX2_RV2P_COMMAND_PROC1_RESET
);
2295 REG_WR(bp
, BNX2_RV2P_COMMAND
, BNX2_RV2P_COMMAND_PROC2_RESET
);
2300 load_cpu_fw(struct bnx2
*bp
, struct cpu_reg
*cpu_reg
, struct fw_info
*fw
)
2307 val
= REG_RD_IND(bp
, cpu_reg
->mode
);
2308 val
|= cpu_reg
->mode_value_halt
;
2309 REG_WR_IND(bp
, cpu_reg
->mode
, val
);
2310 REG_WR_IND(bp
, cpu_reg
->state
, cpu_reg
->state_value_clear
);
2312 /* Load the Text area. */
2313 offset
= cpu_reg
->spad_base
+ (fw
->text_addr
- cpu_reg
->mips_view_base
);
2318 rc
= bnx2_gunzip(bp
, fw
->gz_text
, fw
->gz_text_len
, &text
,
2328 for (j
= 0; j
< (fw
->text_len
/ 4); j
++, offset
+= 4) {
2329 REG_WR_IND(bp
, offset
, cpu_to_le32(fw
->text
[j
]));
2333 /* Load the Data area. */
2334 offset
= cpu_reg
->spad_base
+ (fw
->data_addr
- cpu_reg
->mips_view_base
);
2338 for (j
= 0; j
< (fw
->data_len
/ 4); j
++, offset
+= 4) {
2339 REG_WR_IND(bp
, offset
, fw
->data
[j
]);
2343 /* Load the SBSS area. */
2344 offset
= cpu_reg
->spad_base
+ (fw
->sbss_addr
- cpu_reg
->mips_view_base
);
2348 for (j
= 0; j
< (fw
->sbss_len
/ 4); j
++, offset
+= 4) {
2349 REG_WR_IND(bp
, offset
, fw
->sbss
[j
]);
2353 /* Load the BSS area. */
2354 offset
= cpu_reg
->spad_base
+ (fw
->bss_addr
- cpu_reg
->mips_view_base
);
2358 for (j
= 0; j
< (fw
->bss_len
/4); j
++, offset
+= 4) {
2359 REG_WR_IND(bp
, offset
, fw
->bss
[j
]);
2363 /* Load the Read-Only area. */
2364 offset
= cpu_reg
->spad_base
+
2365 (fw
->rodata_addr
- cpu_reg
->mips_view_base
);
2369 for (j
= 0; j
< (fw
->rodata_len
/ 4); j
++, offset
+= 4) {
2370 REG_WR_IND(bp
, offset
, fw
->rodata
[j
]);
2374 /* Clear the pre-fetch instruction. */
2375 REG_WR_IND(bp
, cpu_reg
->inst
, 0);
2376 REG_WR_IND(bp
, cpu_reg
->pc
, fw
->start_addr
);
2378 /* Start the CPU. */
2379 val
= REG_RD_IND(bp
, cpu_reg
->mode
);
2380 val
&= ~cpu_reg
->mode_value_halt
;
2381 REG_WR_IND(bp
, cpu_reg
->state
, cpu_reg
->state_value_clear
);
2382 REG_WR_IND(bp
, cpu_reg
->mode
, val
);
2388 bnx2_init_cpus(struct bnx2
*bp
)
2390 struct cpu_reg cpu_reg
;
2396 if ((rc
= bnx2_gunzip_init(bp
)) != 0)
2399 /* Initialize the RV2P processor. */
2400 rc
= bnx2_gunzip(bp
, bnx2_rv2p_proc1
, sizeof(bnx2_rv2p_proc1
), &text
,
2405 load_rv2p_fw(bp
, text
, text_len
, RV2P_PROC1
);
2407 rc
= bnx2_gunzip(bp
, bnx2_rv2p_proc2
, sizeof(bnx2_rv2p_proc2
), &text
,
2412 load_rv2p_fw(bp
, text
, text_len
, RV2P_PROC2
);
2414 /* Initialize the RX Processor. */
2415 cpu_reg
.mode
= BNX2_RXP_CPU_MODE
;
2416 cpu_reg
.mode_value_halt
= BNX2_RXP_CPU_MODE_SOFT_HALT
;
2417 cpu_reg
.mode_value_sstep
= BNX2_RXP_CPU_MODE_STEP_ENA
;
2418 cpu_reg
.state
= BNX2_RXP_CPU_STATE
;
2419 cpu_reg
.state_value_clear
= 0xffffff;
2420 cpu_reg
.gpr0
= BNX2_RXP_CPU_REG_FILE
;
2421 cpu_reg
.evmask
= BNX2_RXP_CPU_EVENT_MASK
;
2422 cpu_reg
.pc
= BNX2_RXP_CPU_PROGRAM_COUNTER
;
2423 cpu_reg
.inst
= BNX2_RXP_CPU_INSTRUCTION
;
2424 cpu_reg
.bp
= BNX2_RXP_CPU_HW_BREAKPOINT
;
2425 cpu_reg
.spad_base
= BNX2_RXP_SCRATCH
;
2426 cpu_reg
.mips_view_base
= 0x8000000;
2428 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
2429 fw
= &bnx2_rxp_fw_09
;
2431 fw
= &bnx2_rxp_fw_06
;
2433 rc
= load_cpu_fw(bp
, &cpu_reg
, fw
);
2437 /* Initialize the TX Processor. */
2438 cpu_reg
.mode
= BNX2_TXP_CPU_MODE
;
2439 cpu_reg
.mode_value_halt
= BNX2_TXP_CPU_MODE_SOFT_HALT
;
2440 cpu_reg
.mode_value_sstep
= BNX2_TXP_CPU_MODE_STEP_ENA
;
2441 cpu_reg
.state
= BNX2_TXP_CPU_STATE
;
2442 cpu_reg
.state_value_clear
= 0xffffff;
2443 cpu_reg
.gpr0
= BNX2_TXP_CPU_REG_FILE
;
2444 cpu_reg
.evmask
= BNX2_TXP_CPU_EVENT_MASK
;
2445 cpu_reg
.pc
= BNX2_TXP_CPU_PROGRAM_COUNTER
;
2446 cpu_reg
.inst
= BNX2_TXP_CPU_INSTRUCTION
;
2447 cpu_reg
.bp
= BNX2_TXP_CPU_HW_BREAKPOINT
;
2448 cpu_reg
.spad_base
= BNX2_TXP_SCRATCH
;
2449 cpu_reg
.mips_view_base
= 0x8000000;
2451 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
2452 fw
= &bnx2_txp_fw_09
;
2454 fw
= &bnx2_txp_fw_06
;
2456 rc
= load_cpu_fw(bp
, &cpu_reg
, fw
);
2460 /* Initialize the TX Patch-up Processor. */
2461 cpu_reg
.mode
= BNX2_TPAT_CPU_MODE
;
2462 cpu_reg
.mode_value_halt
= BNX2_TPAT_CPU_MODE_SOFT_HALT
;
2463 cpu_reg
.mode_value_sstep
= BNX2_TPAT_CPU_MODE_STEP_ENA
;
2464 cpu_reg
.state
= BNX2_TPAT_CPU_STATE
;
2465 cpu_reg
.state_value_clear
= 0xffffff;
2466 cpu_reg
.gpr0
= BNX2_TPAT_CPU_REG_FILE
;
2467 cpu_reg
.evmask
= BNX2_TPAT_CPU_EVENT_MASK
;
2468 cpu_reg
.pc
= BNX2_TPAT_CPU_PROGRAM_COUNTER
;
2469 cpu_reg
.inst
= BNX2_TPAT_CPU_INSTRUCTION
;
2470 cpu_reg
.bp
= BNX2_TPAT_CPU_HW_BREAKPOINT
;
2471 cpu_reg
.spad_base
= BNX2_TPAT_SCRATCH
;
2472 cpu_reg
.mips_view_base
= 0x8000000;
2474 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
2475 fw
= &bnx2_tpat_fw_09
;
2477 fw
= &bnx2_tpat_fw_06
;
2479 rc
= load_cpu_fw(bp
, &cpu_reg
, fw
);
2483 /* Initialize the Completion Processor. */
2484 cpu_reg
.mode
= BNX2_COM_CPU_MODE
;
2485 cpu_reg
.mode_value_halt
= BNX2_COM_CPU_MODE_SOFT_HALT
;
2486 cpu_reg
.mode_value_sstep
= BNX2_COM_CPU_MODE_STEP_ENA
;
2487 cpu_reg
.state
= BNX2_COM_CPU_STATE
;
2488 cpu_reg
.state_value_clear
= 0xffffff;
2489 cpu_reg
.gpr0
= BNX2_COM_CPU_REG_FILE
;
2490 cpu_reg
.evmask
= BNX2_COM_CPU_EVENT_MASK
;
2491 cpu_reg
.pc
= BNX2_COM_CPU_PROGRAM_COUNTER
;
2492 cpu_reg
.inst
= BNX2_COM_CPU_INSTRUCTION
;
2493 cpu_reg
.bp
= BNX2_COM_CPU_HW_BREAKPOINT
;
2494 cpu_reg
.spad_base
= BNX2_COM_SCRATCH
;
2495 cpu_reg
.mips_view_base
= 0x8000000;
2497 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
2498 fw
= &bnx2_com_fw_09
;
2500 fw
= &bnx2_com_fw_06
;
2502 rc
= load_cpu_fw(bp
, &cpu_reg
, fw
);
2506 /* Initialize the Command Processor. */
2507 cpu_reg
.mode
= BNX2_CP_CPU_MODE
;
2508 cpu_reg
.mode_value_halt
= BNX2_CP_CPU_MODE_SOFT_HALT
;
2509 cpu_reg
.mode_value_sstep
= BNX2_CP_CPU_MODE_STEP_ENA
;
2510 cpu_reg
.state
= BNX2_CP_CPU_STATE
;
2511 cpu_reg
.state_value_clear
= 0xffffff;
2512 cpu_reg
.gpr0
= BNX2_CP_CPU_REG_FILE
;
2513 cpu_reg
.evmask
= BNX2_CP_CPU_EVENT_MASK
;
2514 cpu_reg
.pc
= BNX2_CP_CPU_PROGRAM_COUNTER
;
2515 cpu_reg
.inst
= BNX2_CP_CPU_INSTRUCTION
;
2516 cpu_reg
.bp
= BNX2_CP_CPU_HW_BREAKPOINT
;
2517 cpu_reg
.spad_base
= BNX2_CP_SCRATCH
;
2518 cpu_reg
.mips_view_base
= 0x8000000;
2520 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
2521 fw
= &bnx2_cp_fw_09
;
2523 rc
= load_cpu_fw(bp
, &cpu_reg
, fw
);
2528 bnx2_gunzip_end(bp
);
2533 bnx2_set_power_state(struct bnx2
*bp
, pci_power_t state
)
2537 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
2543 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
2544 (pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
2545 PCI_PM_CTRL_PME_STATUS
);
2547 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
2548 /* delay required during transition out of D3hot */
2551 val
= REG_RD(bp
, BNX2_EMAC_MODE
);
2552 val
|= BNX2_EMAC_MODE_MPKT_RCVD
| BNX2_EMAC_MODE_ACPI_RCVD
;
2553 val
&= ~BNX2_EMAC_MODE_MPKT
;
2554 REG_WR(bp
, BNX2_EMAC_MODE
, val
);
2556 val
= REG_RD(bp
, BNX2_RPM_CONFIG
);
2557 val
&= ~BNX2_RPM_CONFIG_ACPI_ENA
;
2558 REG_WR(bp
, BNX2_RPM_CONFIG
, val
);
2569 autoneg
= bp
->autoneg
;
2570 advertising
= bp
->advertising
;
2572 bp
->autoneg
= AUTONEG_SPEED
;
2573 bp
->advertising
= ADVERTISED_10baseT_Half
|
2574 ADVERTISED_10baseT_Full
|
2575 ADVERTISED_100baseT_Half
|
2576 ADVERTISED_100baseT_Full
|
2579 bnx2_setup_copper_phy(bp
);
2581 bp
->autoneg
= autoneg
;
2582 bp
->advertising
= advertising
;
2584 bnx2_set_mac_addr(bp
);
2586 val
= REG_RD(bp
, BNX2_EMAC_MODE
);
2588 /* Enable port mode. */
2589 val
&= ~BNX2_EMAC_MODE_PORT
;
2590 val
|= BNX2_EMAC_MODE_PORT_MII
|
2591 BNX2_EMAC_MODE_MPKT_RCVD
|
2592 BNX2_EMAC_MODE_ACPI_RCVD
|
2593 BNX2_EMAC_MODE_MPKT
;
2595 REG_WR(bp
, BNX2_EMAC_MODE
, val
);
2597 /* receive all multicast */
2598 for (i
= 0; i
< NUM_MC_HASH_REGISTERS
; i
++) {
2599 REG_WR(bp
, BNX2_EMAC_MULTICAST_HASH0
+ (i
* 4),
2602 REG_WR(bp
, BNX2_EMAC_RX_MODE
,
2603 BNX2_EMAC_RX_MODE_SORT_MODE
);
2605 val
= 1 | BNX2_RPM_SORT_USER0_BC_EN
|
2606 BNX2_RPM_SORT_USER0_MC_EN
;
2607 REG_WR(bp
, BNX2_RPM_SORT_USER0
, 0x0);
2608 REG_WR(bp
, BNX2_RPM_SORT_USER0
, val
);
2609 REG_WR(bp
, BNX2_RPM_SORT_USER0
, val
|
2610 BNX2_RPM_SORT_USER0_ENA
);
2612 /* Need to enable EMAC and RPM for WOL. */
2613 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
,
2614 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE
|
2615 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE
|
2616 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE
);
2618 val
= REG_RD(bp
, BNX2_RPM_CONFIG
);
2619 val
&= ~BNX2_RPM_CONFIG_ACPI_ENA
;
2620 REG_WR(bp
, BNX2_RPM_CONFIG
, val
);
2622 wol_msg
= BNX2_DRV_MSG_CODE_SUSPEND_WOL
;
2625 wol_msg
= BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL
;
2628 if (!(bp
->flags
& NO_WOL_FLAG
))
2629 bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT3
| wol_msg
, 0);
2631 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
2632 if ((CHIP_ID(bp
) == CHIP_ID_5706_A0
) ||
2633 (CHIP_ID(bp
) == CHIP_ID_5706_A1
)) {
2642 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
2644 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
2647 /* No more memory access after this point until
2648 * device is brought back to D0.
2660 bnx2_acquire_nvram_lock(struct bnx2
*bp
)
2665 /* Request access to the flash interface. */
2666 REG_WR(bp
, BNX2_NVM_SW_ARB
, BNX2_NVM_SW_ARB_ARB_REQ_SET2
);
2667 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2668 val
= REG_RD(bp
, BNX2_NVM_SW_ARB
);
2669 if (val
& BNX2_NVM_SW_ARB_ARB_ARB2
)
2675 if (j
>= NVRAM_TIMEOUT_COUNT
)
2682 bnx2_release_nvram_lock(struct bnx2
*bp
)
2687 /* Relinquish nvram interface. */
2688 REG_WR(bp
, BNX2_NVM_SW_ARB
, BNX2_NVM_SW_ARB_ARB_REQ_CLR2
);
2690 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2691 val
= REG_RD(bp
, BNX2_NVM_SW_ARB
);
2692 if (!(val
& BNX2_NVM_SW_ARB_ARB_ARB2
))
2698 if (j
>= NVRAM_TIMEOUT_COUNT
)
2706 bnx2_enable_nvram_write(struct bnx2
*bp
)
2710 val
= REG_RD(bp
, BNX2_MISC_CFG
);
2711 REG_WR(bp
, BNX2_MISC_CFG
, val
| BNX2_MISC_CFG_NVM_WR_EN_PCI
);
2713 if (!bp
->flash_info
->buffered
) {
2716 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
2717 REG_WR(bp
, BNX2_NVM_COMMAND
,
2718 BNX2_NVM_COMMAND_WREN
| BNX2_NVM_COMMAND_DOIT
);
2720 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2723 val
= REG_RD(bp
, BNX2_NVM_COMMAND
);
2724 if (val
& BNX2_NVM_COMMAND_DONE
)
2728 if (j
>= NVRAM_TIMEOUT_COUNT
)
2735 bnx2_disable_nvram_write(struct bnx2
*bp
)
2739 val
= REG_RD(bp
, BNX2_MISC_CFG
);
2740 REG_WR(bp
, BNX2_MISC_CFG
, val
& ~BNX2_MISC_CFG_NVM_WR_EN
);
2745 bnx2_enable_nvram_access(struct bnx2
*bp
)
2749 val
= REG_RD(bp
, BNX2_NVM_ACCESS_ENABLE
);
2750 /* Enable both bits, even on read. */
2751 REG_WR(bp
, BNX2_NVM_ACCESS_ENABLE
,
2752 val
| BNX2_NVM_ACCESS_ENABLE_EN
| BNX2_NVM_ACCESS_ENABLE_WR_EN
);
2756 bnx2_disable_nvram_access(struct bnx2
*bp
)
2760 val
= REG_RD(bp
, BNX2_NVM_ACCESS_ENABLE
);
2761 /* Disable both bits, even after read. */
2762 REG_WR(bp
, BNX2_NVM_ACCESS_ENABLE
,
2763 val
& ~(BNX2_NVM_ACCESS_ENABLE_EN
|
2764 BNX2_NVM_ACCESS_ENABLE_WR_EN
));
2768 bnx2_nvram_erase_page(struct bnx2
*bp
, u32 offset
)
2773 if (bp
->flash_info
->buffered
)
2774 /* Buffered flash, no erase needed */
2777 /* Build an erase command */
2778 cmd
= BNX2_NVM_COMMAND_ERASE
| BNX2_NVM_COMMAND_WR
|
2779 BNX2_NVM_COMMAND_DOIT
;
2781 /* Need to clear DONE bit separately. */
2782 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
2784 /* Address of the NVRAM to read from. */
2785 REG_WR(bp
, BNX2_NVM_ADDR
, offset
& BNX2_NVM_ADDR_NVM_ADDR_VALUE
);
2787 /* Issue an erase command. */
2788 REG_WR(bp
, BNX2_NVM_COMMAND
, cmd
);
2790 /* Wait for completion. */
2791 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2796 val
= REG_RD(bp
, BNX2_NVM_COMMAND
);
2797 if (val
& BNX2_NVM_COMMAND_DONE
)
2801 if (j
>= NVRAM_TIMEOUT_COUNT
)
2808 bnx2_nvram_read_dword(struct bnx2
*bp
, u32 offset
, u8
*ret_val
, u32 cmd_flags
)
2813 /* Build the command word. */
2814 cmd
= BNX2_NVM_COMMAND_DOIT
| cmd_flags
;
2816 /* Calculate an offset of a buffered flash. */
2817 if (bp
->flash_info
->buffered
) {
2818 offset
= ((offset
/ bp
->flash_info
->page_size
) <<
2819 bp
->flash_info
->page_bits
) +
2820 (offset
% bp
->flash_info
->page_size
);
2823 /* Need to clear DONE bit separately. */
2824 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
2826 /* Address of the NVRAM to read from. */
2827 REG_WR(bp
, BNX2_NVM_ADDR
, offset
& BNX2_NVM_ADDR_NVM_ADDR_VALUE
);
2829 /* Issue a read command. */
2830 REG_WR(bp
, BNX2_NVM_COMMAND
, cmd
);
2832 /* Wait for completion. */
2833 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2838 val
= REG_RD(bp
, BNX2_NVM_COMMAND
);
2839 if (val
& BNX2_NVM_COMMAND_DONE
) {
2840 val
= REG_RD(bp
, BNX2_NVM_READ
);
2842 val
= be32_to_cpu(val
);
2843 memcpy(ret_val
, &val
, 4);
2847 if (j
>= NVRAM_TIMEOUT_COUNT
)
2855 bnx2_nvram_write_dword(struct bnx2
*bp
, u32 offset
, u8
*val
, u32 cmd_flags
)
2860 /* Build the command word. */
2861 cmd
= BNX2_NVM_COMMAND_DOIT
| BNX2_NVM_COMMAND_WR
| cmd_flags
;
2863 /* Calculate an offset of a buffered flash. */
2864 if (bp
->flash_info
->buffered
) {
2865 offset
= ((offset
/ bp
->flash_info
->page_size
) <<
2866 bp
->flash_info
->page_bits
) +
2867 (offset
% bp
->flash_info
->page_size
);
2870 /* Need to clear DONE bit separately. */
2871 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
2873 memcpy(&val32
, val
, 4);
2874 val32
= cpu_to_be32(val32
);
2876 /* Write the data. */
2877 REG_WR(bp
, BNX2_NVM_WRITE
, val32
);
2879 /* Address of the NVRAM to write to. */
2880 REG_WR(bp
, BNX2_NVM_ADDR
, offset
& BNX2_NVM_ADDR_NVM_ADDR_VALUE
);
2882 /* Issue the write command. */
2883 REG_WR(bp
, BNX2_NVM_COMMAND
, cmd
);
2885 /* Wait for completion. */
2886 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2889 if (REG_RD(bp
, BNX2_NVM_COMMAND
) & BNX2_NVM_COMMAND_DONE
)
2892 if (j
>= NVRAM_TIMEOUT_COUNT
)
2899 bnx2_init_nvram(struct bnx2
*bp
)
2902 int j
, entry_count
, rc
;
2903 struct flash_spec
*flash
;
2905 /* Determine the selected interface. */
2906 val
= REG_RD(bp
, BNX2_NVM_CFG1
);
2908 entry_count
= sizeof(flash_table
) / sizeof(struct flash_spec
);
2911 if (val
& 0x40000000) {
2913 /* Flash interface has been reconfigured */
2914 for (j
= 0, flash
= &flash_table
[0]; j
< entry_count
;
2916 if ((val
& FLASH_BACKUP_STRAP_MASK
) ==
2917 (flash
->config1
& FLASH_BACKUP_STRAP_MASK
)) {
2918 bp
->flash_info
= flash
;
2925 /* Not yet been reconfigured */
2927 if (val
& (1 << 23))
2928 mask
= FLASH_BACKUP_STRAP_MASK
;
2930 mask
= FLASH_STRAP_MASK
;
2932 for (j
= 0, flash
= &flash_table
[0]; j
< entry_count
;
2935 if ((val
& mask
) == (flash
->strapping
& mask
)) {
2936 bp
->flash_info
= flash
;
2938 /* Request access to the flash interface. */
2939 if ((rc
= bnx2_acquire_nvram_lock(bp
)) != 0)
2942 /* Enable access to flash interface */
2943 bnx2_enable_nvram_access(bp
);
2945 /* Reconfigure the flash interface */
2946 REG_WR(bp
, BNX2_NVM_CFG1
, flash
->config1
);
2947 REG_WR(bp
, BNX2_NVM_CFG2
, flash
->config2
);
2948 REG_WR(bp
, BNX2_NVM_CFG3
, flash
->config3
);
2949 REG_WR(bp
, BNX2_NVM_WRITE1
, flash
->write1
);
2951 /* Disable access to flash interface */
2952 bnx2_disable_nvram_access(bp
);
2953 bnx2_release_nvram_lock(bp
);
2958 } /* if (val & 0x40000000) */
2960 if (j
== entry_count
) {
2961 bp
->flash_info
= NULL
;
2962 printk(KERN_ALERT PFX
"Unknown flash/EEPROM type.\n");
2966 val
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_SHARED_HW_CFG_CONFIG2
);
2967 val
&= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK
;
2969 bp
->flash_size
= val
;
2971 bp
->flash_size
= bp
->flash_info
->total_size
;
2977 bnx2_nvram_read(struct bnx2
*bp
, u32 offset
, u8
*ret_buf
,
2981 u32 cmd_flags
, offset32
, len32
, extra
;
2986 /* Request access to the flash interface. */
2987 if ((rc
= bnx2_acquire_nvram_lock(bp
)) != 0)
2990 /* Enable access to flash interface */
2991 bnx2_enable_nvram_access(bp
);
3004 pre_len
= 4 - (offset
& 3);
3006 if (pre_len
>= len32
) {
3008 cmd_flags
= BNX2_NVM_COMMAND_FIRST
|
3009 BNX2_NVM_COMMAND_LAST
;
3012 cmd_flags
= BNX2_NVM_COMMAND_FIRST
;
3015 rc
= bnx2_nvram_read_dword(bp
, offset32
, buf
, cmd_flags
);
3020 memcpy(ret_buf
, buf
+ (offset
& 3), pre_len
);
3027 extra
= 4 - (len32
& 3);
3028 len32
= (len32
+ 4) & ~3;
3035 cmd_flags
= BNX2_NVM_COMMAND_LAST
;
3037 cmd_flags
= BNX2_NVM_COMMAND_FIRST
|
3038 BNX2_NVM_COMMAND_LAST
;
3040 rc
= bnx2_nvram_read_dword(bp
, offset32
, buf
, cmd_flags
);
3042 memcpy(ret_buf
, buf
, 4 - extra
);
3044 else if (len32
> 0) {
3047 /* Read the first word. */
3051 cmd_flags
= BNX2_NVM_COMMAND_FIRST
;
3053 rc
= bnx2_nvram_read_dword(bp
, offset32
, ret_buf
, cmd_flags
);
3055 /* Advance to the next dword. */
3060 while (len32
> 4 && rc
== 0) {
3061 rc
= bnx2_nvram_read_dword(bp
, offset32
, ret_buf
, 0);
3063 /* Advance to the next dword. */
3072 cmd_flags
= BNX2_NVM_COMMAND_LAST
;
3073 rc
= bnx2_nvram_read_dword(bp
, offset32
, buf
, cmd_flags
);
3075 memcpy(ret_buf
, buf
, 4 - extra
);
3078 /* Disable access to flash interface */
3079 bnx2_disable_nvram_access(bp
);
3081 bnx2_release_nvram_lock(bp
);
3087 bnx2_nvram_write(struct bnx2
*bp
, u32 offset
, u8
*data_buf
,
3090 u32 written
, offset32
, len32
;
3091 u8
*buf
, start
[4], end
[4], *align_buf
= NULL
, *flash_buffer
= NULL
;
3093 int align_start
, align_end
;
3098 align_start
= align_end
= 0;
3100 if ((align_start
= (offset32
& 3))) {
3102 len32
+= align_start
;
3105 if ((rc
= bnx2_nvram_read(bp
, offset32
, start
, 4)))
3110 align_end
= 4 - (len32
& 3);
3112 if ((rc
= bnx2_nvram_read(bp
, offset32
+ len32
- 4, end
, 4)))
3116 if (align_start
|| align_end
) {
3117 align_buf
= kmalloc(len32
, GFP_KERNEL
);
3118 if (align_buf
== NULL
)
3121 memcpy(align_buf
, start
, 4);
3124 memcpy(align_buf
+ len32
- 4, end
, 4);
3126 memcpy(align_buf
+ align_start
, data_buf
, buf_size
);
3130 if (bp
->flash_info
->buffered
== 0) {
3131 flash_buffer
= kmalloc(264, GFP_KERNEL
);
3132 if (flash_buffer
== NULL
) {
3134 goto nvram_write_end
;
3139 while ((written
< len32
) && (rc
== 0)) {
3140 u32 page_start
, page_end
, data_start
, data_end
;
3141 u32 addr
, cmd_flags
;
3144 /* Find the page_start addr */
3145 page_start
= offset32
+ written
;
3146 page_start
-= (page_start
% bp
->flash_info
->page_size
);
3147 /* Find the page_end addr */
3148 page_end
= page_start
+ bp
->flash_info
->page_size
;
3149 /* Find the data_start addr */
3150 data_start
= (written
== 0) ? offset32
: page_start
;
3151 /* Find the data_end addr */
3152 data_end
= (page_end
> offset32
+ len32
) ?
3153 (offset32
+ len32
) : page_end
;
3155 /* Request access to the flash interface. */
3156 if ((rc
= bnx2_acquire_nvram_lock(bp
)) != 0)
3157 goto nvram_write_end
;
3159 /* Enable access to flash interface */
3160 bnx2_enable_nvram_access(bp
);
3162 cmd_flags
= BNX2_NVM_COMMAND_FIRST
;
3163 if (bp
->flash_info
->buffered
== 0) {
3166 /* Read the whole page into the buffer
3167 * (non-buffer flash only) */
3168 for (j
= 0; j
< bp
->flash_info
->page_size
; j
+= 4) {
3169 if (j
== (bp
->flash_info
->page_size
- 4)) {
3170 cmd_flags
|= BNX2_NVM_COMMAND_LAST
;
3172 rc
= bnx2_nvram_read_dword(bp
,
3178 goto nvram_write_end
;
3184 /* Enable writes to flash interface (unlock write-protect) */
3185 if ((rc
= bnx2_enable_nvram_write(bp
)) != 0)
3186 goto nvram_write_end
;
3188 /* Loop to write back the buffer data from page_start to
3191 if (bp
->flash_info
->buffered
== 0) {
3192 /* Erase the page */
3193 if ((rc
= bnx2_nvram_erase_page(bp
, page_start
)) != 0)
3194 goto nvram_write_end
;
3196 /* Re-enable the write again for the actual write */
3197 bnx2_enable_nvram_write(bp
);
3199 for (addr
= page_start
; addr
< data_start
;
3200 addr
+= 4, i
+= 4) {
3202 rc
= bnx2_nvram_write_dword(bp
, addr
,
3203 &flash_buffer
[i
], cmd_flags
);
3206 goto nvram_write_end
;
3212 /* Loop to write the new data from data_start to data_end */
3213 for (addr
= data_start
; addr
< data_end
; addr
+= 4, i
+= 4) {
3214 if ((addr
== page_end
- 4) ||
3215 ((bp
->flash_info
->buffered
) &&
3216 (addr
== data_end
- 4))) {
3218 cmd_flags
|= BNX2_NVM_COMMAND_LAST
;
3220 rc
= bnx2_nvram_write_dword(bp
, addr
, buf
,
3224 goto nvram_write_end
;
3230 /* Loop to write back the buffer data from data_end
3232 if (bp
->flash_info
->buffered
== 0) {
3233 for (addr
= data_end
; addr
< page_end
;
3234 addr
+= 4, i
+= 4) {
3236 if (addr
== page_end
-4) {
3237 cmd_flags
= BNX2_NVM_COMMAND_LAST
;
3239 rc
= bnx2_nvram_write_dword(bp
, addr
,
3240 &flash_buffer
[i
], cmd_flags
);
3243 goto nvram_write_end
;
3249 /* Disable writes to flash interface (lock write-protect) */
3250 bnx2_disable_nvram_write(bp
);
3252 /* Disable access to flash interface */
3253 bnx2_disable_nvram_access(bp
);
3254 bnx2_release_nvram_lock(bp
);
3256 /* Increment written */
3257 written
+= data_end
- data_start
;
3261 kfree(flash_buffer
);
3267 bnx2_reset_chip(struct bnx2
*bp
, u32 reset_code
)
3272 /* Wait for the current PCI transaction to complete before
3273 * issuing a reset. */
3274 REG_WR(bp
, BNX2_MISC_ENABLE_CLR_BITS
,
3275 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE
|
3276 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE
|
3277 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE
|
3278 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE
);
3279 val
= REG_RD(bp
, BNX2_MISC_ENABLE_CLR_BITS
);
3282 /* Wait for the firmware to tell us it is ok to issue a reset. */
3283 bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT0
| reset_code
, 1);
3285 /* Deposit a driver reset signature so the firmware knows that
3286 * this is a soft reset. */
3287 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_DRV_RESET_SIGNATURE
,
3288 BNX2_DRV_RESET_SIGNATURE_MAGIC
);
3290 /* Do a dummy read to force the chip to complete all current transaction
3291 * before we issue a reset. */
3292 val
= REG_RD(bp
, BNX2_MISC_ID
);
3294 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
3295 REG_WR(bp
, BNX2_MISC_COMMAND
, BNX2_MISC_COMMAND_SW_RESET
);
3296 REG_RD(bp
, BNX2_MISC_COMMAND
);
3299 val
= BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA
|
3300 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP
;
3302 pci_write_config_dword(bp
->pdev
, BNX2_PCICFG_MISC_CONFIG
, val
);
3305 val
= BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ
|
3306 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA
|
3307 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP
;
3310 REG_WR(bp
, BNX2_PCICFG_MISC_CONFIG
, val
);
3312 if ((CHIP_ID(bp
) == CHIP_ID_5706_A0
) ||
3313 (CHIP_ID(bp
) == CHIP_ID_5706_A1
)) {
3314 current
->state
= TASK_UNINTERRUPTIBLE
;
3315 schedule_timeout(HZ
/ 50);
3318 /* Reset takes approximate 30 usec */
3319 for (i
= 0; i
< 10; i
++) {
3320 val
= REG_RD(bp
, BNX2_PCICFG_MISC_CONFIG
);
3321 if ((val
& (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ
|
3322 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY
)) == 0)
3327 if (val
& (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ
|
3328 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY
)) {
3329 printk(KERN_ERR PFX
"Chip reset did not complete\n");
3334 /* Make sure byte swapping is properly configured. */
3335 val
= REG_RD(bp
, BNX2_PCI_SWAP_DIAG0
);
3336 if (val
!= 0x01020304) {
3337 printk(KERN_ERR PFX
"Chip not in correct endian mode\n");
3341 /* Wait for the firmware to finish its initialization. */
3342 rc
= bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT1
| reset_code
, 0);
3346 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
3347 /* Adjust the voltage regular to two steps lower. The default
3348 * of this register is 0x0000000e. */
3349 REG_WR(bp
, BNX2_MISC_VREG_CONTROL
, 0x000000fa);
3351 /* Remove bad rbuf memory from the free pool. */
3352 rc
= bnx2_alloc_bad_rbuf(bp
);
3359 bnx2_init_chip(struct bnx2
*bp
)
3364 /* Make sure the interrupt is not active. */
3365 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
, BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
3367 val
= BNX2_DMA_CONFIG_DATA_BYTE_SWAP
|
3368 BNX2_DMA_CONFIG_DATA_WORD_SWAP
|
3370 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP
|
3372 BNX2_DMA_CONFIG_CNTL_WORD_SWAP
|
3373 DMA_READ_CHANS
<< 12 |
3374 DMA_WRITE_CHANS
<< 16;
3376 val
|= (0x2 << 20) | (1 << 11);
3378 if ((bp
->flags
& PCIX_FLAG
) && (bp
->bus_speed_mhz
== 133))
3381 if ((CHIP_NUM(bp
) == CHIP_NUM_5706
) &&
3382 (CHIP_ID(bp
) != CHIP_ID_5706_A0
) && !(bp
->flags
& PCIX_FLAG
))
3383 val
|= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA
;
3385 REG_WR(bp
, BNX2_DMA_CONFIG
, val
);
3387 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
3388 val
= REG_RD(bp
, BNX2_TDMA_CONFIG
);
3389 val
|= BNX2_TDMA_CONFIG_ONE_DMA
;
3390 REG_WR(bp
, BNX2_TDMA_CONFIG
, val
);
3393 if (bp
->flags
& PCIX_FLAG
) {
3396 pci_read_config_word(bp
->pdev
, bp
->pcix_cap
+ PCI_X_CMD
,
3398 pci_write_config_word(bp
->pdev
, bp
->pcix_cap
+ PCI_X_CMD
,
3399 val16
& ~PCI_X_CMD_ERO
);
3402 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
,
3403 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE
|
3404 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE
|
3405 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE
);
3407 /* Initialize context mapping and zero out the quick contexts. The
3408 * context block must have already been enabled. */
3409 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
3410 bnx2_init_5709_context(bp
);
3412 bnx2_init_context(bp
);
3414 if ((rc
= bnx2_init_cpus(bp
)) != 0)
3417 bnx2_init_nvram(bp
);
3419 bnx2_set_mac_addr(bp
);
3421 val
= REG_RD(bp
, BNX2_MQ_CONFIG
);
3422 val
&= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE
;
3423 val
|= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256
;
3424 if (CHIP_ID(bp
) == CHIP_ID_5709_A0
|| CHIP_ID(bp
) == CHIP_ID_5709_A1
)
3425 val
|= BNX2_MQ_CONFIG_HALT_DIS
;
3427 REG_WR(bp
, BNX2_MQ_CONFIG
, val
);
3429 val
= 0x10000 + (MAX_CID_CNT
* MB_KERNEL_CTX_SIZE
);
3430 REG_WR(bp
, BNX2_MQ_KNL_BYP_WIND_START
, val
);
3431 REG_WR(bp
, BNX2_MQ_KNL_WIND_END
, val
);
3433 val
= (BCM_PAGE_BITS
- 8) << 24;
3434 REG_WR(bp
, BNX2_RV2P_CONFIG
, val
);
3436 /* Configure page size. */
3437 val
= REG_RD(bp
, BNX2_TBDR_CONFIG
);
3438 val
&= ~BNX2_TBDR_CONFIG_PAGE_SIZE
;
3439 val
|= (BCM_PAGE_BITS
- 8) << 24 | 0x40;
3440 REG_WR(bp
, BNX2_TBDR_CONFIG
, val
);
3442 val
= bp
->mac_addr
[0] +
3443 (bp
->mac_addr
[1] << 8) +
3444 (bp
->mac_addr
[2] << 16) +
3446 (bp
->mac_addr
[4] << 8) +
3447 (bp
->mac_addr
[5] << 16);
3448 REG_WR(bp
, BNX2_EMAC_BACKOFF_SEED
, val
);
3450 /* Program the MTU. Also include 4 bytes for CRC32. */
3451 val
= bp
->dev
->mtu
+ ETH_HLEN
+ 4;
3452 if (val
> (MAX_ETHERNET_PACKET_SIZE
+ 4))
3453 val
|= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA
;
3454 REG_WR(bp
, BNX2_EMAC_RX_MTU_SIZE
, val
);
3456 bp
->last_status_idx
= 0;
3457 bp
->rx_mode
= BNX2_EMAC_RX_MODE_SORT_MODE
;
3459 /* Set up how to generate a link change interrupt. */
3460 REG_WR(bp
, BNX2_EMAC_ATTENTION_ENA
, BNX2_EMAC_ATTENTION_ENA_LINK
);
3462 REG_WR(bp
, BNX2_HC_STATUS_ADDR_L
,
3463 (u64
) bp
->status_blk_mapping
& 0xffffffff);
3464 REG_WR(bp
, BNX2_HC_STATUS_ADDR_H
, (u64
) bp
->status_blk_mapping
>> 32);
3466 REG_WR(bp
, BNX2_HC_STATISTICS_ADDR_L
,
3467 (u64
) bp
->stats_blk_mapping
& 0xffffffff);
3468 REG_WR(bp
, BNX2_HC_STATISTICS_ADDR_H
,
3469 (u64
) bp
->stats_blk_mapping
>> 32);
3471 REG_WR(bp
, BNX2_HC_TX_QUICK_CONS_TRIP
,
3472 (bp
->tx_quick_cons_trip_int
<< 16) | bp
->tx_quick_cons_trip
);
3474 REG_WR(bp
, BNX2_HC_RX_QUICK_CONS_TRIP
,
3475 (bp
->rx_quick_cons_trip_int
<< 16) | bp
->rx_quick_cons_trip
);
3477 REG_WR(bp
, BNX2_HC_COMP_PROD_TRIP
,
3478 (bp
->comp_prod_trip_int
<< 16) | bp
->comp_prod_trip
);
3480 REG_WR(bp
, BNX2_HC_TX_TICKS
, (bp
->tx_ticks_int
<< 16) | bp
->tx_ticks
);
3482 REG_WR(bp
, BNX2_HC_RX_TICKS
, (bp
->rx_ticks_int
<< 16) | bp
->rx_ticks
);
3484 REG_WR(bp
, BNX2_HC_COM_TICKS
,
3485 (bp
->com_ticks_int
<< 16) | bp
->com_ticks
);
3487 REG_WR(bp
, BNX2_HC_CMD_TICKS
,
3488 (bp
->cmd_ticks_int
<< 16) | bp
->cmd_ticks
);
3490 REG_WR(bp
, BNX2_HC_STATS_TICKS
, bp
->stats_ticks
& 0xffff00);
3491 REG_WR(bp
, BNX2_HC_STAT_COLLECT_TICKS
, 0xbb8); /* 3ms */
3493 if (CHIP_ID(bp
) == CHIP_ID_5706_A1
)
3494 REG_WR(bp
, BNX2_HC_CONFIG
, BNX2_HC_CONFIG_COLLECT_STATS
);
3496 REG_WR(bp
, BNX2_HC_CONFIG
, BNX2_HC_CONFIG_RX_TMR_MODE
|
3497 BNX2_HC_CONFIG_TX_TMR_MODE
|
3498 BNX2_HC_CONFIG_COLLECT_STATS
);
3501 /* Clear internal stats counters. */
3502 REG_WR(bp
, BNX2_HC_COMMAND
, BNX2_HC_COMMAND_CLR_STAT_NOW
);
3504 REG_WR(bp
, BNX2_HC_ATTN_BITS_ENABLE
, STATUS_ATTN_BITS_LINK_STATE
);
3506 if (REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_FEATURE
) &
3507 BNX2_PORT_FEATURE_ASF_ENABLED
)
3508 bp
->flags
|= ASF_ENABLE_FLAG
;
3510 /* Initialize the receive filter. */
3511 bnx2_set_rx_mode(bp
->dev
);
3513 rc
= bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT2
| BNX2_DRV_MSG_CODE_RESET
,
3516 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
, 0x5ffffff);
3517 REG_RD(bp
, BNX2_MISC_ENABLE_SET_BITS
);
3521 bp
->hc_cmd
= REG_RD(bp
, BNX2_HC_COMMAND
);
3527 bnx2_init_tx_context(struct bnx2
*bp
, u32 cid
)
3529 u32 val
, offset0
, offset1
, offset2
, offset3
;
3531 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
3532 offset0
= BNX2_L2CTX_TYPE_XI
;
3533 offset1
= BNX2_L2CTX_CMD_TYPE_XI
;
3534 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI_XI
;
3535 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO_XI
;
3537 offset0
= BNX2_L2CTX_TYPE
;
3538 offset1
= BNX2_L2CTX_CMD_TYPE
;
3539 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI
;
3540 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO
;
3542 val
= BNX2_L2CTX_TYPE_TYPE_L2
| BNX2_L2CTX_TYPE_SIZE_L2
;
3543 CTX_WR(bp
, GET_CID_ADDR(cid
), offset0
, val
);
3545 val
= BNX2_L2CTX_CMD_TYPE_TYPE_L2
| (8 << 16);
3546 CTX_WR(bp
, GET_CID_ADDR(cid
), offset1
, val
);
3548 val
= (u64
) bp
->tx_desc_mapping
>> 32;
3549 CTX_WR(bp
, GET_CID_ADDR(cid
), offset2
, val
);
3551 val
= (u64
) bp
->tx_desc_mapping
& 0xffffffff;
3552 CTX_WR(bp
, GET_CID_ADDR(cid
), offset3
, val
);
3556 bnx2_init_tx_ring(struct bnx2
*bp
)
3561 bp
->tx_wake_thresh
= bp
->tx_ring_size
/ 2;
3563 txbd
= &bp
->tx_desc_ring
[MAX_TX_DESC_CNT
];
3565 txbd
->tx_bd_haddr_hi
= (u64
) bp
->tx_desc_mapping
>> 32;
3566 txbd
->tx_bd_haddr_lo
= (u64
) bp
->tx_desc_mapping
& 0xffffffff;
3571 bp
->tx_prod_bseq
= 0;
3574 bp
->tx_bidx_addr
= MB_GET_CID_ADDR(cid
) + BNX2_L2CTX_TX_HOST_BIDX
;
3575 bp
->tx_bseq_addr
= MB_GET_CID_ADDR(cid
) + BNX2_L2CTX_TX_HOST_BSEQ
;
3577 bnx2_init_tx_context(bp
, cid
);
3581 bnx2_init_rx_ring(struct bnx2
*bp
)
3585 u16 prod
, ring_prod
;
3588 /* 8 for CRC and VLAN */
3589 bp
->rx_buf_use_size
= bp
->dev
->mtu
+ ETH_HLEN
+ bp
->rx_offset
+ 8;
3591 bp
->rx_buf_size
= bp
->rx_buf_use_size
+ BNX2_RX_ALIGN
;
3593 ring_prod
= prod
= bp
->rx_prod
= 0;
3596 bp
->rx_prod_bseq
= 0;
3598 for (i
= 0; i
< bp
->rx_max_ring
; i
++) {
3601 rxbd
= &bp
->rx_desc_ring
[i
][0];
3602 for (j
= 0; j
< MAX_RX_DESC_CNT
; j
++, rxbd
++) {
3603 rxbd
->rx_bd_len
= bp
->rx_buf_use_size
;
3604 rxbd
->rx_bd_flags
= RX_BD_FLAGS_START
| RX_BD_FLAGS_END
;
3606 if (i
== (bp
->rx_max_ring
- 1))
3610 rxbd
->rx_bd_haddr_hi
= (u64
) bp
->rx_desc_mapping
[j
] >> 32;
3611 rxbd
->rx_bd_haddr_lo
= (u64
) bp
->rx_desc_mapping
[j
] &
3615 val
= BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE
;
3616 val
|= BNX2_L2CTX_CTX_TYPE_SIZE_L2
;
3618 CTX_WR(bp
, GET_CID_ADDR(RX_CID
), BNX2_L2CTX_CTX_TYPE
, val
);
3620 val
= (u64
) bp
->rx_desc_mapping
[0] >> 32;
3621 CTX_WR(bp
, GET_CID_ADDR(RX_CID
), BNX2_L2CTX_NX_BDHADDR_HI
, val
);
3623 val
= (u64
) bp
->rx_desc_mapping
[0] & 0xffffffff;
3624 CTX_WR(bp
, GET_CID_ADDR(RX_CID
), BNX2_L2CTX_NX_BDHADDR_LO
, val
);
3626 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
3627 if (bnx2_alloc_rx_skb(bp
, ring_prod
) < 0) {
3630 prod
= NEXT_RX_BD(prod
);
3631 ring_prod
= RX_RING_IDX(prod
);
3635 REG_WR16(bp
, MB_RX_CID_ADDR
+ BNX2_L2CTX_HOST_BDIDX
, prod
);
3637 REG_WR(bp
, MB_RX_CID_ADDR
+ BNX2_L2CTX_HOST_BSEQ
, bp
->rx_prod_bseq
);
3641 bnx2_set_rx_ring_size(struct bnx2
*bp
, u32 size
)
3645 bp
->rx_ring_size
= size
;
3647 while (size
> MAX_RX_DESC_CNT
) {
3648 size
-= MAX_RX_DESC_CNT
;
3651 /* round to next power of 2 */
3653 while ((max
& num_rings
) == 0)
3656 if (num_rings
!= max
)
3659 bp
->rx_max_ring
= max
;
3660 bp
->rx_max_ring_idx
= (bp
->rx_max_ring
* RX_DESC_CNT
) - 1;
3664 bnx2_free_tx_skbs(struct bnx2
*bp
)
3668 if (bp
->tx_buf_ring
== NULL
)
3671 for (i
= 0; i
< TX_DESC_CNT
; ) {
3672 struct sw_bd
*tx_buf
= &bp
->tx_buf_ring
[i
];
3673 struct sk_buff
*skb
= tx_buf
->skb
;
3681 pci_unmap_single(bp
->pdev
, pci_unmap_addr(tx_buf
, mapping
),
3682 skb_headlen(skb
), PCI_DMA_TODEVICE
);
3686 last
= skb_shinfo(skb
)->nr_frags
;
3687 for (j
= 0; j
< last
; j
++) {
3688 tx_buf
= &bp
->tx_buf_ring
[i
+ j
+ 1];
3689 pci_unmap_page(bp
->pdev
,
3690 pci_unmap_addr(tx_buf
, mapping
),
3691 skb_shinfo(skb
)->frags
[j
].size
,
3701 bnx2_free_rx_skbs(struct bnx2
*bp
)
3705 if (bp
->rx_buf_ring
== NULL
)
3708 for (i
= 0; i
< bp
->rx_max_ring_idx
; i
++) {
3709 struct sw_bd
*rx_buf
= &bp
->rx_buf_ring
[i
];
3710 struct sk_buff
*skb
= rx_buf
->skb
;
3715 pci_unmap_single(bp
->pdev
, pci_unmap_addr(rx_buf
, mapping
),
3716 bp
->rx_buf_use_size
, PCI_DMA_FROMDEVICE
);
3725 bnx2_free_skbs(struct bnx2
*bp
)
3727 bnx2_free_tx_skbs(bp
);
3728 bnx2_free_rx_skbs(bp
);
3732 bnx2_reset_nic(struct bnx2
*bp
, u32 reset_code
)
3736 rc
= bnx2_reset_chip(bp
, reset_code
);
3741 if ((rc
= bnx2_init_chip(bp
)) != 0)
3744 bnx2_init_tx_ring(bp
);
3745 bnx2_init_rx_ring(bp
);
3750 bnx2_init_nic(struct bnx2
*bp
)
3754 if ((rc
= bnx2_reset_nic(bp
, BNX2_DRV_MSG_CODE_RESET
)) != 0)
3757 spin_lock_bh(&bp
->phy_lock
);
3759 spin_unlock_bh(&bp
->phy_lock
);
3765 bnx2_test_registers(struct bnx2
*bp
)
3769 static const struct {
3775 { 0x006c, 0, 0x00000000, 0x0000003f },
3776 { 0x0090, 0, 0xffffffff, 0x00000000 },
3777 { 0x0094, 0, 0x00000000, 0x00000000 },
3779 { 0x0404, 0, 0x00003f00, 0x00000000 },
3780 { 0x0418, 0, 0x00000000, 0xffffffff },
3781 { 0x041c, 0, 0x00000000, 0xffffffff },
3782 { 0x0420, 0, 0x00000000, 0x80ffffff },
3783 { 0x0424, 0, 0x00000000, 0x00000000 },
3784 { 0x0428, 0, 0x00000000, 0x00000001 },
3785 { 0x0450, 0, 0x00000000, 0x0000ffff },
3786 { 0x0454, 0, 0x00000000, 0xffffffff },
3787 { 0x0458, 0, 0x00000000, 0xffffffff },
3789 { 0x0808, 0, 0x00000000, 0xffffffff },
3790 { 0x0854, 0, 0x00000000, 0xffffffff },
3791 { 0x0868, 0, 0x00000000, 0x77777777 },
3792 { 0x086c, 0, 0x00000000, 0x77777777 },
3793 { 0x0870, 0, 0x00000000, 0x77777777 },
3794 { 0x0874, 0, 0x00000000, 0x77777777 },
3796 { 0x0c00, 0, 0x00000000, 0x00000001 },
3797 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3798 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3800 { 0x1000, 0, 0x00000000, 0x00000001 },
3801 { 0x1004, 0, 0x00000000, 0x000f0001 },
3803 { 0x1408, 0, 0x01c00800, 0x00000000 },
3804 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3805 { 0x14a8, 0, 0x00000000, 0x000001ff },
3806 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3807 { 0x14b0, 0, 0x00000002, 0x00000001 },
3808 { 0x14b8, 0, 0x00000000, 0x00000000 },
3809 { 0x14c0, 0, 0x00000000, 0x00000009 },
3810 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3811 { 0x14cc, 0, 0x00000000, 0x00000001 },
3812 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3814 { 0x1800, 0, 0x00000000, 0x00000001 },
3815 { 0x1804, 0, 0x00000000, 0x00000003 },
3817 { 0x2800, 0, 0x00000000, 0x00000001 },
3818 { 0x2804, 0, 0x00000000, 0x00003f01 },
3819 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3820 { 0x2810, 0, 0xffff0000, 0x00000000 },
3821 { 0x2814, 0, 0xffff0000, 0x00000000 },
3822 { 0x2818, 0, 0xffff0000, 0x00000000 },
3823 { 0x281c, 0, 0xffff0000, 0x00000000 },
3824 { 0x2834, 0, 0xffffffff, 0x00000000 },
3825 { 0x2840, 0, 0x00000000, 0xffffffff },
3826 { 0x2844, 0, 0x00000000, 0xffffffff },
3827 { 0x2848, 0, 0xffffffff, 0x00000000 },
3828 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3830 { 0x2c00, 0, 0x00000000, 0x00000011 },
3831 { 0x2c04, 0, 0x00000000, 0x00030007 },
3833 { 0x3c00, 0, 0x00000000, 0x00000001 },
3834 { 0x3c04, 0, 0x00000000, 0x00070000 },
3835 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3836 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3837 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3838 { 0x3c14, 0, 0x00000000, 0xffffffff },
3839 { 0x3c18, 0, 0x00000000, 0xffffffff },
3840 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3841 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3843 { 0x5004, 0, 0x00000000, 0x0000007f },
3844 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3845 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3847 { 0x5c00, 0, 0x00000000, 0x00000001 },
3848 { 0x5c04, 0, 0x00000000, 0x0003000f },
3849 { 0x5c08, 0, 0x00000003, 0x00000000 },
3850 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3851 { 0x5c10, 0, 0x00000000, 0xffffffff },
3852 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3853 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3854 { 0x5c88, 0, 0x00000000, 0x00077373 },
3855 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3857 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3858 { 0x680c, 0, 0xffffffff, 0x00000000 },
3859 { 0x6810, 0, 0xffffffff, 0x00000000 },
3860 { 0x6814, 0, 0xffffffff, 0x00000000 },
3861 { 0x6818, 0, 0xffffffff, 0x00000000 },
3862 { 0x681c, 0, 0xffffffff, 0x00000000 },
3863 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3864 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3865 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3866 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3867 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3868 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3869 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3870 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3871 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3872 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3873 { 0x684c, 0, 0xffffffff, 0x00000000 },
3874 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3875 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3876 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3877 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3878 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3879 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3881 { 0xffff, 0, 0x00000000, 0x00000000 },
3885 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
3886 u32 offset
, rw_mask
, ro_mask
, save_val
, val
;
3888 offset
= (u32
) reg_tbl
[i
].offset
;
3889 rw_mask
= reg_tbl
[i
].rw_mask
;
3890 ro_mask
= reg_tbl
[i
].ro_mask
;
3892 save_val
= readl(bp
->regview
+ offset
);
3894 writel(0, bp
->regview
+ offset
);
3896 val
= readl(bp
->regview
+ offset
);
3897 if ((val
& rw_mask
) != 0) {
3901 if ((val
& ro_mask
) != (save_val
& ro_mask
)) {
3905 writel(0xffffffff, bp
->regview
+ offset
);
3907 val
= readl(bp
->regview
+ offset
);
3908 if ((val
& rw_mask
) != rw_mask
) {
3912 if ((val
& ro_mask
) != (save_val
& ro_mask
)) {
3916 writel(save_val
, bp
->regview
+ offset
);
3920 writel(save_val
, bp
->regview
+ offset
);
3928 bnx2_do_mem_test(struct bnx2
*bp
, u32 start
, u32 size
)
3930 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0x55555555,
3931 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3934 for (i
= 0; i
< sizeof(test_pattern
) / 4; i
++) {
3937 for (offset
= 0; offset
< size
; offset
+= 4) {
3939 REG_WR_IND(bp
, start
+ offset
, test_pattern
[i
]);
3941 if (REG_RD_IND(bp
, start
+ offset
) !=
3951 bnx2_test_memory(struct bnx2
*bp
)
3955 static const struct {
3959 { 0x60000, 0x4000 },
3960 { 0xa0000, 0x3000 },
3961 { 0xe0000, 0x4000 },
3962 { 0x120000, 0x4000 },
3963 { 0x1a0000, 0x4000 },
3964 { 0x160000, 0x4000 },
3968 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
3969 if ((ret
= bnx2_do_mem_test(bp
, mem_tbl
[i
].offset
,
3970 mem_tbl
[i
].len
)) != 0) {
3978 #define BNX2_MAC_LOOPBACK 0
3979 #define BNX2_PHY_LOOPBACK 1
3982 bnx2_run_loopback(struct bnx2
*bp
, int loopback_mode
)
3984 unsigned int pkt_size
, num_pkts
, i
;
3985 struct sk_buff
*skb
, *rx_skb
;
3986 unsigned char *packet
;
3987 u16 rx_start_idx
, rx_idx
;
3990 struct sw_bd
*rx_buf
;
3991 struct l2_fhdr
*rx_hdr
;
3994 if (loopback_mode
== BNX2_MAC_LOOPBACK
) {
3995 bp
->loopback
= MAC_LOOPBACK
;
3996 bnx2_set_mac_loopback(bp
);
3998 else if (loopback_mode
== BNX2_PHY_LOOPBACK
) {
3999 bp
->loopback
= PHY_LOOPBACK
;
4000 bnx2_set_phy_loopback(bp
);
4006 skb
= netdev_alloc_skb(bp
->dev
, pkt_size
);
4009 packet
= skb_put(skb
, pkt_size
);
4010 memcpy(packet
, bp
->dev
->dev_addr
, 6);
4011 memset(packet
+ 6, 0x0, 8);
4012 for (i
= 14; i
< pkt_size
; i
++)
4013 packet
[i
] = (unsigned char) (i
& 0xff);
4015 map
= pci_map_single(bp
->pdev
, skb
->data
, pkt_size
,
4018 REG_WR(bp
, BNX2_HC_COMMAND
,
4019 bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
4021 REG_RD(bp
, BNX2_HC_COMMAND
);
4024 rx_start_idx
= bp
->status_blk
->status_rx_quick_consumer_index0
;
4028 txbd
= &bp
->tx_desc_ring
[TX_RING_IDX(bp
->tx_prod
)];
4030 txbd
->tx_bd_haddr_hi
= (u64
) map
>> 32;
4031 txbd
->tx_bd_haddr_lo
= (u64
) map
& 0xffffffff;
4032 txbd
->tx_bd_mss_nbytes
= pkt_size
;
4033 txbd
->tx_bd_vlan_tag_flags
= TX_BD_FLAGS_START
| TX_BD_FLAGS_END
;
4036 bp
->tx_prod
= NEXT_TX_BD(bp
->tx_prod
);
4037 bp
->tx_prod_bseq
+= pkt_size
;
4039 REG_WR16(bp
, bp
->tx_bidx_addr
, bp
->tx_prod
);
4040 REG_WR(bp
, bp
->tx_bseq_addr
, bp
->tx_prod_bseq
);
4044 REG_WR(bp
, BNX2_HC_COMMAND
,
4045 bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
4047 REG_RD(bp
, BNX2_HC_COMMAND
);
4051 pci_unmap_single(bp
->pdev
, map
, pkt_size
, PCI_DMA_TODEVICE
);
4054 if (bp
->status_blk
->status_tx_quick_consumer_index0
!= bp
->tx_prod
) {
4055 goto loopback_test_done
;
4058 rx_idx
= bp
->status_blk
->status_rx_quick_consumer_index0
;
4059 if (rx_idx
!= rx_start_idx
+ num_pkts
) {
4060 goto loopback_test_done
;
4063 rx_buf
= &bp
->rx_buf_ring
[rx_start_idx
];
4064 rx_skb
= rx_buf
->skb
;
4066 rx_hdr
= (struct l2_fhdr
*) rx_skb
->data
;
4067 skb_reserve(rx_skb
, bp
->rx_offset
);
4069 pci_dma_sync_single_for_cpu(bp
->pdev
,
4070 pci_unmap_addr(rx_buf
, mapping
),
4071 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
4073 if (rx_hdr
->l2_fhdr_status
&
4074 (L2_FHDR_ERRORS_BAD_CRC
|
4075 L2_FHDR_ERRORS_PHY_DECODE
|
4076 L2_FHDR_ERRORS_ALIGNMENT
|
4077 L2_FHDR_ERRORS_TOO_SHORT
|
4078 L2_FHDR_ERRORS_GIANT_FRAME
)) {
4080 goto loopback_test_done
;
4083 if ((rx_hdr
->l2_fhdr_pkt_len
- 4) != pkt_size
) {
4084 goto loopback_test_done
;
4087 for (i
= 14; i
< pkt_size
; i
++) {
4088 if (*(rx_skb
->data
+ i
) != (unsigned char) (i
& 0xff)) {
4089 goto loopback_test_done
;
4100 #define BNX2_MAC_LOOPBACK_FAILED 1
4101 #define BNX2_PHY_LOOPBACK_FAILED 2
4102 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4103 BNX2_PHY_LOOPBACK_FAILED)
4106 bnx2_test_loopback(struct bnx2
*bp
)
4110 if (!netif_running(bp
->dev
))
4111 return BNX2_LOOPBACK_FAILED
;
4113 bnx2_reset_nic(bp
, BNX2_DRV_MSG_CODE_RESET
);
4114 spin_lock_bh(&bp
->phy_lock
);
4116 spin_unlock_bh(&bp
->phy_lock
);
4117 if (bnx2_run_loopback(bp
, BNX2_MAC_LOOPBACK
))
4118 rc
|= BNX2_MAC_LOOPBACK_FAILED
;
4119 if (bnx2_run_loopback(bp
, BNX2_PHY_LOOPBACK
))
4120 rc
|= BNX2_PHY_LOOPBACK_FAILED
;
4124 #define NVRAM_SIZE 0x200
4125 #define CRC32_RESIDUAL 0xdebb20e3
4128 bnx2_test_nvram(struct bnx2
*bp
)
4130 u32 buf
[NVRAM_SIZE
/ 4];
4131 u8
*data
= (u8
*) buf
;
4135 if ((rc
= bnx2_nvram_read(bp
, 0, data
, 4)) != 0)
4136 goto test_nvram_done
;
4138 magic
= be32_to_cpu(buf
[0]);
4139 if (magic
!= 0x669955aa) {
4141 goto test_nvram_done
;
4144 if ((rc
= bnx2_nvram_read(bp
, 0x100, data
, NVRAM_SIZE
)) != 0)
4145 goto test_nvram_done
;
4147 csum
= ether_crc_le(0x100, data
);
4148 if (csum
!= CRC32_RESIDUAL
) {
4150 goto test_nvram_done
;
4153 csum
= ether_crc_le(0x100, data
+ 0x100);
4154 if (csum
!= CRC32_RESIDUAL
) {
4163 bnx2_test_link(struct bnx2
*bp
)
4167 spin_lock_bh(&bp
->phy_lock
);
4168 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
4169 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
4170 spin_unlock_bh(&bp
->phy_lock
);
4172 if (bmsr
& BMSR_LSTATUS
) {
4179 bnx2_test_intr(struct bnx2
*bp
)
4184 if (!netif_running(bp
->dev
))
4187 status_idx
= REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
) & 0xffff;
4189 /* This register is not touched during run-time. */
4190 REG_WR(bp
, BNX2_HC_COMMAND
, bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW
);
4191 REG_RD(bp
, BNX2_HC_COMMAND
);
4193 for (i
= 0; i
< 10; i
++) {
4194 if ((REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
) & 0xffff) !=
4200 msleep_interruptible(10);
4209 bnx2_5706_serdes_timer(struct bnx2
*bp
)
4211 spin_lock(&bp
->phy_lock
);
4212 if (bp
->serdes_an_pending
)
4213 bp
->serdes_an_pending
--;
4214 else if ((bp
->link_up
== 0) && (bp
->autoneg
& AUTONEG_SPEED
)) {
4217 bp
->current_interval
= bp
->timer_interval
;
4219 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
4221 if (bmcr
& BMCR_ANENABLE
) {
4224 bnx2_write_phy(bp
, 0x1c, 0x7c00);
4225 bnx2_read_phy(bp
, 0x1c, &phy1
);
4227 bnx2_write_phy(bp
, 0x17, 0x0f01);
4228 bnx2_read_phy(bp
, 0x15, &phy2
);
4229 bnx2_write_phy(bp
, 0x17, 0x0f01);
4230 bnx2_read_phy(bp
, 0x15, &phy2
);
4232 if ((phy1
& 0x10) && /* SIGNAL DETECT */
4233 !(phy2
& 0x20)) { /* no CONFIG */
4235 bmcr
&= ~BMCR_ANENABLE
;
4236 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
4237 bnx2_write_phy(bp
, MII_BMCR
, bmcr
);
4238 bp
->phy_flags
|= PHY_PARALLEL_DETECT_FLAG
;
4242 else if ((bp
->link_up
) && (bp
->autoneg
& AUTONEG_SPEED
) &&
4243 (bp
->phy_flags
& PHY_PARALLEL_DETECT_FLAG
)) {
4246 bnx2_write_phy(bp
, 0x17, 0x0f01);
4247 bnx2_read_phy(bp
, 0x15, &phy2
);
4251 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
4252 bmcr
|= BMCR_ANENABLE
;
4253 bnx2_write_phy(bp
, MII_BMCR
, bmcr
);
4255 bp
->phy_flags
&= ~PHY_PARALLEL_DETECT_FLAG
;
4258 bp
->current_interval
= bp
->timer_interval
;
4260 spin_unlock(&bp
->phy_lock
);
4264 bnx2_5708_serdes_timer(struct bnx2
*bp
)
4266 if ((bp
->phy_flags
& PHY_2_5G_CAPABLE_FLAG
) == 0) {
4267 bp
->serdes_an_pending
= 0;
4271 spin_lock(&bp
->phy_lock
);
4272 if (bp
->serdes_an_pending
)
4273 bp
->serdes_an_pending
--;
4274 else if ((bp
->link_up
== 0) && (bp
->autoneg
& AUTONEG_SPEED
)) {
4277 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
4279 if (bmcr
& BMCR_ANENABLE
) {
4280 bmcr
&= ~BMCR_ANENABLE
;
4281 bmcr
|= BMCR_FULLDPLX
| BCM5708S_BMCR_FORCE_2500
;
4282 bnx2_write_phy(bp
, MII_BMCR
, bmcr
);
4283 bp
->current_interval
= SERDES_FORCED_TIMEOUT
;
4285 bmcr
&= ~(BMCR_FULLDPLX
| BCM5708S_BMCR_FORCE_2500
);
4286 bmcr
|= BMCR_ANENABLE
;
4287 bnx2_write_phy(bp
, MII_BMCR
, bmcr
);
4288 bp
->serdes_an_pending
= 2;
4289 bp
->current_interval
= bp
->timer_interval
;
4293 bp
->current_interval
= bp
->timer_interval
;
4295 spin_unlock(&bp
->phy_lock
);
4299 bnx2_timer(unsigned long data
)
4301 struct bnx2
*bp
= (struct bnx2
*) data
;
4304 if (!netif_running(bp
->dev
))
4307 if (atomic_read(&bp
->intr_sem
) != 0)
4308 goto bnx2_restart_timer
;
4310 msg
= (u32
) ++bp
->fw_drv_pulse_wr_seq
;
4311 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_DRV_PULSE_MB
, msg
);
4313 bp
->stats_blk
->stat_FwRxDrop
= REG_RD_IND(bp
, BNX2_FW_RX_DROP_COUNT
);
4315 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
4316 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
4317 bnx2_5706_serdes_timer(bp
);
4318 else if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
4319 bnx2_5708_serdes_timer(bp
);
4323 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
4326 /* Called with rtnl_lock */
4328 bnx2_open(struct net_device
*dev
)
4330 struct bnx2
*bp
= netdev_priv(dev
);
4333 bnx2_set_power_state(bp
, PCI_D0
);
4334 bnx2_disable_int(bp
);
4336 rc
= bnx2_alloc_mem(bp
);
4340 if ((CHIP_ID(bp
) != CHIP_ID_5706_A0
) &&
4341 (CHIP_ID(bp
) != CHIP_ID_5706_A1
) &&
4344 if (pci_enable_msi(bp
->pdev
) == 0) {
4345 bp
->flags
|= USING_MSI_FLAG
;
4346 rc
= request_irq(bp
->pdev
->irq
, bnx2_msi
, 0, dev
->name
,
4350 rc
= request_irq(bp
->pdev
->irq
, bnx2_interrupt
,
4351 IRQF_SHARED
, dev
->name
, dev
);
4355 rc
= request_irq(bp
->pdev
->irq
, bnx2_interrupt
, IRQF_SHARED
,
4363 rc
= bnx2_init_nic(bp
);
4366 free_irq(bp
->pdev
->irq
, dev
);
4367 if (bp
->flags
& USING_MSI_FLAG
) {
4368 pci_disable_msi(bp
->pdev
);
4369 bp
->flags
&= ~USING_MSI_FLAG
;
4376 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
4378 atomic_set(&bp
->intr_sem
, 0);
4380 bnx2_enable_int(bp
);
4382 if (bp
->flags
& USING_MSI_FLAG
) {
4383 /* Test MSI to make sure it is working
4384 * If MSI test fails, go back to INTx mode
4386 if (bnx2_test_intr(bp
) != 0) {
4387 printk(KERN_WARNING PFX
"%s: No interrupt was generated"
4388 " using MSI, switching to INTx mode. Please"
4389 " report this failure to the PCI maintainer"
4390 " and include system chipset information.\n",
4393 bnx2_disable_int(bp
);
4394 free_irq(bp
->pdev
->irq
, dev
);
4395 pci_disable_msi(bp
->pdev
);
4396 bp
->flags
&= ~USING_MSI_FLAG
;
4398 rc
= bnx2_init_nic(bp
);
4401 rc
= request_irq(bp
->pdev
->irq
, bnx2_interrupt
,
4402 IRQF_SHARED
, dev
->name
, dev
);
4407 del_timer_sync(&bp
->timer
);
4410 bnx2_enable_int(bp
);
4413 if (bp
->flags
& USING_MSI_FLAG
) {
4414 printk(KERN_INFO PFX
"%s: using MSI\n", dev
->name
);
4417 netif_start_queue(dev
);
4423 bnx2_reset_task(struct work_struct
*work
)
4425 struct bnx2
*bp
= container_of(work
, struct bnx2
, reset_task
);
4427 if (!netif_running(bp
->dev
))
4430 bp
->in_reset_task
= 1;
4431 bnx2_netif_stop(bp
);
4435 atomic_set(&bp
->intr_sem
, 1);
4436 bnx2_netif_start(bp
);
4437 bp
->in_reset_task
= 0;
4441 bnx2_tx_timeout(struct net_device
*dev
)
4443 struct bnx2
*bp
= netdev_priv(dev
);
4445 /* This allows the netif to be shutdown gracefully before resetting */
4446 schedule_work(&bp
->reset_task
);
4450 /* Called with rtnl_lock */
4452 bnx2_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*vlgrp
)
4454 struct bnx2
*bp
= netdev_priv(dev
);
4456 bnx2_netif_stop(bp
);
4459 bnx2_set_rx_mode(dev
);
4461 bnx2_netif_start(bp
);
4464 /* Called with rtnl_lock */
4466 bnx2_vlan_rx_kill_vid(struct net_device
*dev
, uint16_t vid
)
4468 struct bnx2
*bp
= netdev_priv(dev
);
4470 bnx2_netif_stop(bp
);
4471 vlan_group_set_device(bp
->vlgrp
, vid
, NULL
);
4472 bnx2_set_rx_mode(dev
);
4474 bnx2_netif_start(bp
);
4478 /* Called with netif_tx_lock.
4479 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4480 * netif_wake_queue().
4483 bnx2_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
4485 struct bnx2
*bp
= netdev_priv(dev
);
4488 struct sw_bd
*tx_buf
;
4489 u32 len
, vlan_tag_flags
, last_frag
, mss
;
4490 u16 prod
, ring_prod
;
4493 if (unlikely(bnx2_tx_avail(bp
) < (skb_shinfo(skb
)->nr_frags
+ 1))) {
4494 netif_stop_queue(dev
);
4495 printk(KERN_ERR PFX
"%s: BUG! Tx ring full when queue awake!\n",
4498 return NETDEV_TX_BUSY
;
4500 len
= skb_headlen(skb
);
4502 ring_prod
= TX_RING_IDX(prod
);
4505 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
4506 vlan_tag_flags
|= TX_BD_FLAGS_TCP_UDP_CKSUM
;
4509 if (bp
->vlgrp
!= 0 && vlan_tx_tag_present(skb
)) {
4511 (TX_BD_FLAGS_VLAN_TAG
| (vlan_tx_tag_get(skb
) << 16));
4513 if ((mss
= skb_shinfo(skb
)->gso_size
)) {
4514 u32 tcp_opt_len
, ip_tcp_len
;
4516 if (skb_header_cloned(skb
) &&
4517 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
4519 return NETDEV_TX_OK
;
4522 tcp_opt_len
= ((skb
->h
.th
->doff
- 5) * 4);
4523 vlan_tag_flags
|= TX_BD_FLAGS_SW_LSO
;
4526 if (skb
->h
.th
->doff
> 5) {
4527 tcp_opt_len
= (skb
->h
.th
->doff
- 5) << 2;
4529 ip_tcp_len
= (skb
->nh
.iph
->ihl
<< 2) + sizeof(struct tcphdr
);
4531 skb
->nh
.iph
->check
= 0;
4532 skb
->nh
.iph
->tot_len
= htons(mss
+ ip_tcp_len
+ tcp_opt_len
);
4534 ~csum_tcpudp_magic(skb
->nh
.iph
->saddr
,
4538 if (tcp_opt_len
|| (skb
->nh
.iph
->ihl
> 5)) {
4539 vlan_tag_flags
|= ((skb
->nh
.iph
->ihl
- 5) +
4540 (tcp_opt_len
>> 2)) << 8;
4548 mapping
= pci_map_single(bp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
4550 tx_buf
= &bp
->tx_buf_ring
[ring_prod
];
4552 pci_unmap_addr_set(tx_buf
, mapping
, mapping
);
4554 txbd
= &bp
->tx_desc_ring
[ring_prod
];
4556 txbd
->tx_bd_haddr_hi
= (u64
) mapping
>> 32;
4557 txbd
->tx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
4558 txbd
->tx_bd_mss_nbytes
= len
| (mss
<< 16);
4559 txbd
->tx_bd_vlan_tag_flags
= vlan_tag_flags
| TX_BD_FLAGS_START
;
4561 last_frag
= skb_shinfo(skb
)->nr_frags
;
4563 for (i
= 0; i
< last_frag
; i
++) {
4564 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
4566 prod
= NEXT_TX_BD(prod
);
4567 ring_prod
= TX_RING_IDX(prod
);
4568 txbd
= &bp
->tx_desc_ring
[ring_prod
];
4571 mapping
= pci_map_page(bp
->pdev
, frag
->page
, frag
->page_offset
,
4572 len
, PCI_DMA_TODEVICE
);
4573 pci_unmap_addr_set(&bp
->tx_buf_ring
[ring_prod
],
4576 txbd
->tx_bd_haddr_hi
= (u64
) mapping
>> 32;
4577 txbd
->tx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
4578 txbd
->tx_bd_mss_nbytes
= len
| (mss
<< 16);
4579 txbd
->tx_bd_vlan_tag_flags
= vlan_tag_flags
;
4582 txbd
->tx_bd_vlan_tag_flags
|= TX_BD_FLAGS_END
;
4584 prod
= NEXT_TX_BD(prod
);
4585 bp
->tx_prod_bseq
+= skb
->len
;
4587 REG_WR16(bp
, bp
->tx_bidx_addr
, prod
);
4588 REG_WR(bp
, bp
->tx_bseq_addr
, bp
->tx_prod_bseq
);
4593 dev
->trans_start
= jiffies
;
4595 if (unlikely(bnx2_tx_avail(bp
) <= MAX_SKB_FRAGS
)) {
4596 netif_stop_queue(dev
);
4597 if (bnx2_tx_avail(bp
) > bp
->tx_wake_thresh
)
4598 netif_wake_queue(dev
);
4601 return NETDEV_TX_OK
;
4604 /* Called with rtnl_lock */
4606 bnx2_close(struct net_device
*dev
)
4608 struct bnx2
*bp
= netdev_priv(dev
);
4611 /* Calling flush_scheduled_work() may deadlock because
4612 * linkwatch_event() may be on the workqueue and it will try to get
4613 * the rtnl_lock which we are holding.
4615 while (bp
->in_reset_task
)
4618 bnx2_netif_stop(bp
);
4619 del_timer_sync(&bp
->timer
);
4620 if (bp
->flags
& NO_WOL_FLAG
)
4621 reset_code
= BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN
;
4623 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_WOL
;
4625 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL
;
4626 bnx2_reset_chip(bp
, reset_code
);
4627 free_irq(bp
->pdev
->irq
, dev
);
4628 if (bp
->flags
& USING_MSI_FLAG
) {
4629 pci_disable_msi(bp
->pdev
);
4630 bp
->flags
&= ~USING_MSI_FLAG
;
4635 netif_carrier_off(bp
->dev
);
4636 bnx2_set_power_state(bp
, PCI_D3hot
);
4640 #define GET_NET_STATS64(ctr) \
4641 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4642 (unsigned long) (ctr##_lo)
4644 #define GET_NET_STATS32(ctr) \
4647 #if (BITS_PER_LONG == 64)
4648 #define GET_NET_STATS GET_NET_STATS64
4650 #define GET_NET_STATS GET_NET_STATS32
4653 static struct net_device_stats
*
4654 bnx2_get_stats(struct net_device
*dev
)
4656 struct bnx2
*bp
= netdev_priv(dev
);
4657 struct statistics_block
*stats_blk
= bp
->stats_blk
;
4658 struct net_device_stats
*net_stats
= &bp
->net_stats
;
4660 if (bp
->stats_blk
== NULL
) {
4663 net_stats
->rx_packets
=
4664 GET_NET_STATS(stats_blk
->stat_IfHCInUcastPkts
) +
4665 GET_NET_STATS(stats_blk
->stat_IfHCInMulticastPkts
) +
4666 GET_NET_STATS(stats_blk
->stat_IfHCInBroadcastPkts
);
4668 net_stats
->tx_packets
=
4669 GET_NET_STATS(stats_blk
->stat_IfHCOutUcastPkts
) +
4670 GET_NET_STATS(stats_blk
->stat_IfHCOutMulticastPkts
) +
4671 GET_NET_STATS(stats_blk
->stat_IfHCOutBroadcastPkts
);
4673 net_stats
->rx_bytes
=
4674 GET_NET_STATS(stats_blk
->stat_IfHCInOctets
);
4676 net_stats
->tx_bytes
=
4677 GET_NET_STATS(stats_blk
->stat_IfHCOutOctets
);
4679 net_stats
->multicast
=
4680 GET_NET_STATS(stats_blk
->stat_IfHCOutMulticastPkts
);
4682 net_stats
->collisions
=
4683 (unsigned long) stats_blk
->stat_EtherStatsCollisions
;
4685 net_stats
->rx_length_errors
=
4686 (unsigned long) (stats_blk
->stat_EtherStatsUndersizePkts
+
4687 stats_blk
->stat_EtherStatsOverrsizePkts
);
4689 net_stats
->rx_over_errors
=
4690 (unsigned long) stats_blk
->stat_IfInMBUFDiscards
;
4692 net_stats
->rx_frame_errors
=
4693 (unsigned long) stats_blk
->stat_Dot3StatsAlignmentErrors
;
4695 net_stats
->rx_crc_errors
=
4696 (unsigned long) stats_blk
->stat_Dot3StatsFCSErrors
;
4698 net_stats
->rx_errors
= net_stats
->rx_length_errors
+
4699 net_stats
->rx_over_errors
+ net_stats
->rx_frame_errors
+
4700 net_stats
->rx_crc_errors
;
4702 net_stats
->tx_aborted_errors
=
4703 (unsigned long) (stats_blk
->stat_Dot3StatsExcessiveCollisions
+
4704 stats_blk
->stat_Dot3StatsLateCollisions
);
4706 if ((CHIP_NUM(bp
) == CHIP_NUM_5706
) ||
4707 (CHIP_ID(bp
) == CHIP_ID_5708_A0
))
4708 net_stats
->tx_carrier_errors
= 0;
4710 net_stats
->tx_carrier_errors
=
4712 stats_blk
->stat_Dot3StatsCarrierSenseErrors
;
4715 net_stats
->tx_errors
=
4717 stats_blk
->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4719 net_stats
->tx_aborted_errors
+
4720 net_stats
->tx_carrier_errors
;
4722 net_stats
->rx_missed_errors
=
4723 (unsigned long) (stats_blk
->stat_IfInMBUFDiscards
+
4724 stats_blk
->stat_FwRxDrop
);
4729 /* All ethtool functions called with rtnl_lock */
4732 bnx2_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
4734 struct bnx2
*bp
= netdev_priv(dev
);
4736 cmd
->supported
= SUPPORTED_Autoneg
;
4737 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
4738 cmd
->supported
|= SUPPORTED_1000baseT_Full
|
4741 cmd
->port
= PORT_FIBRE
;
4744 cmd
->supported
|= SUPPORTED_10baseT_Half
|
4745 SUPPORTED_10baseT_Full
|
4746 SUPPORTED_100baseT_Half
|
4747 SUPPORTED_100baseT_Full
|
4748 SUPPORTED_1000baseT_Full
|
4751 cmd
->port
= PORT_TP
;
4754 cmd
->advertising
= bp
->advertising
;
4756 if (bp
->autoneg
& AUTONEG_SPEED
) {
4757 cmd
->autoneg
= AUTONEG_ENABLE
;
4760 cmd
->autoneg
= AUTONEG_DISABLE
;
4763 if (netif_carrier_ok(dev
)) {
4764 cmd
->speed
= bp
->line_speed
;
4765 cmd
->duplex
= bp
->duplex
;
4772 cmd
->transceiver
= XCVR_INTERNAL
;
4773 cmd
->phy_address
= bp
->phy_addr
;
4779 bnx2_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
4781 struct bnx2
*bp
= netdev_priv(dev
);
4782 u8 autoneg
= bp
->autoneg
;
4783 u8 req_duplex
= bp
->req_duplex
;
4784 u16 req_line_speed
= bp
->req_line_speed
;
4785 u32 advertising
= bp
->advertising
;
4787 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
4788 autoneg
|= AUTONEG_SPEED
;
4790 cmd
->advertising
&= ETHTOOL_ALL_COPPER_SPEED
;
4792 /* allow advertising 1 speed */
4793 if ((cmd
->advertising
== ADVERTISED_10baseT_Half
) ||
4794 (cmd
->advertising
== ADVERTISED_10baseT_Full
) ||
4795 (cmd
->advertising
== ADVERTISED_100baseT_Half
) ||
4796 (cmd
->advertising
== ADVERTISED_100baseT_Full
)) {
4798 if (bp
->phy_flags
& PHY_SERDES_FLAG
)
4801 advertising
= cmd
->advertising
;
4804 else if (cmd
->advertising
== ADVERTISED_1000baseT_Full
) {
4805 advertising
= cmd
->advertising
;
4807 else if (cmd
->advertising
== ADVERTISED_1000baseT_Half
) {
4811 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
4812 advertising
= ETHTOOL_ALL_FIBRE_SPEED
;
4815 advertising
= ETHTOOL_ALL_COPPER_SPEED
;
4818 advertising
|= ADVERTISED_Autoneg
;
4821 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
4822 if ((cmd
->speed
!= SPEED_1000
&&
4823 cmd
->speed
!= SPEED_2500
) ||
4824 (cmd
->duplex
!= DUPLEX_FULL
))
4827 if (cmd
->speed
== SPEED_2500
&&
4828 !(bp
->phy_flags
& PHY_2_5G_CAPABLE_FLAG
))
4831 else if (cmd
->speed
== SPEED_1000
) {
4834 autoneg
&= ~AUTONEG_SPEED
;
4835 req_line_speed
= cmd
->speed
;
4836 req_duplex
= cmd
->duplex
;
4840 bp
->autoneg
= autoneg
;
4841 bp
->advertising
= advertising
;
4842 bp
->req_line_speed
= req_line_speed
;
4843 bp
->req_duplex
= req_duplex
;
4845 spin_lock_bh(&bp
->phy_lock
);
4849 spin_unlock_bh(&bp
->phy_lock
);
4855 bnx2_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
4857 struct bnx2
*bp
= netdev_priv(dev
);
4859 strcpy(info
->driver
, DRV_MODULE_NAME
);
4860 strcpy(info
->version
, DRV_MODULE_VERSION
);
4861 strcpy(info
->bus_info
, pci_name(bp
->pdev
));
4862 info
->fw_version
[0] = ((bp
->fw_ver
& 0xff000000) >> 24) + '0';
4863 info
->fw_version
[2] = ((bp
->fw_ver
& 0xff0000) >> 16) + '0';
4864 info
->fw_version
[4] = ((bp
->fw_ver
& 0xff00) >> 8) + '0';
4865 info
->fw_version
[1] = info
->fw_version
[3] = '.';
4866 info
->fw_version
[5] = 0;
4869 #define BNX2_REGDUMP_LEN (32 * 1024)
4872 bnx2_get_regs_len(struct net_device
*dev
)
4874 return BNX2_REGDUMP_LEN
;
4878 bnx2_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
, void *_p
)
4880 u32
*p
= _p
, i
, offset
;
4882 struct bnx2
*bp
= netdev_priv(dev
);
4883 u32 reg_boundaries
[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4884 0x0800, 0x0880, 0x0c00, 0x0c10,
4885 0x0c30, 0x0d08, 0x1000, 0x101c,
4886 0x1040, 0x1048, 0x1080, 0x10a4,
4887 0x1400, 0x1490, 0x1498, 0x14f0,
4888 0x1500, 0x155c, 0x1580, 0x15dc,
4889 0x1600, 0x1658, 0x1680, 0x16d8,
4890 0x1800, 0x1820, 0x1840, 0x1854,
4891 0x1880, 0x1894, 0x1900, 0x1984,
4892 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4893 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4894 0x2000, 0x2030, 0x23c0, 0x2400,
4895 0x2800, 0x2820, 0x2830, 0x2850,
4896 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4897 0x3c00, 0x3c94, 0x4000, 0x4010,
4898 0x4080, 0x4090, 0x43c0, 0x4458,
4899 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4900 0x4fc0, 0x5010, 0x53c0, 0x5444,
4901 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4902 0x5fc0, 0x6000, 0x6400, 0x6428,
4903 0x6800, 0x6848, 0x684c, 0x6860,
4904 0x6888, 0x6910, 0x8000 };
4908 memset(p
, 0, BNX2_REGDUMP_LEN
);
4910 if (!netif_running(bp
->dev
))
4914 offset
= reg_boundaries
[0];
4916 while (offset
< BNX2_REGDUMP_LEN
) {
4917 *p
++ = REG_RD(bp
, offset
);
4919 if (offset
== reg_boundaries
[i
+ 1]) {
4920 offset
= reg_boundaries
[i
+ 2];
4921 p
= (u32
*) (orig_p
+ offset
);
4928 bnx2_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
4930 struct bnx2
*bp
= netdev_priv(dev
);
4932 if (bp
->flags
& NO_WOL_FLAG
) {
4937 wol
->supported
= WAKE_MAGIC
;
4939 wol
->wolopts
= WAKE_MAGIC
;
4943 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
4947 bnx2_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
4949 struct bnx2
*bp
= netdev_priv(dev
);
4951 if (wol
->wolopts
& ~WAKE_MAGIC
)
4954 if (wol
->wolopts
& WAKE_MAGIC
) {
4955 if (bp
->flags
& NO_WOL_FLAG
)
4967 bnx2_nway_reset(struct net_device
*dev
)
4969 struct bnx2
*bp
= netdev_priv(dev
);
4972 if (!(bp
->autoneg
& AUTONEG_SPEED
)) {
4976 spin_lock_bh(&bp
->phy_lock
);
4978 /* Force a link down visible on the other side */
4979 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
4980 bnx2_write_phy(bp
, MII_BMCR
, BMCR_LOOPBACK
);
4981 spin_unlock_bh(&bp
->phy_lock
);
4985 spin_lock_bh(&bp
->phy_lock
);
4987 bp
->current_interval
= SERDES_AN_TIMEOUT
;
4988 bp
->serdes_an_pending
= 1;
4989 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
4992 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
4993 bmcr
&= ~BMCR_LOOPBACK
;
4994 bnx2_write_phy(bp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
| BMCR_ANENABLE
);
4996 spin_unlock_bh(&bp
->phy_lock
);
5002 bnx2_get_eeprom_len(struct net_device
*dev
)
5004 struct bnx2
*bp
= netdev_priv(dev
);
5006 if (bp
->flash_info
== NULL
)
5009 return (int) bp
->flash_size
;
5013 bnx2_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
5016 struct bnx2
*bp
= netdev_priv(dev
);
5019 /* parameters already validated in ethtool_get_eeprom */
5021 rc
= bnx2_nvram_read(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
5027 bnx2_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
5030 struct bnx2
*bp
= netdev_priv(dev
);
5033 /* parameters already validated in ethtool_set_eeprom */
5035 rc
= bnx2_nvram_write(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
5041 bnx2_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*coal
)
5043 struct bnx2
*bp
= netdev_priv(dev
);
5045 memset(coal
, 0, sizeof(struct ethtool_coalesce
));
5047 coal
->rx_coalesce_usecs
= bp
->rx_ticks
;
5048 coal
->rx_max_coalesced_frames
= bp
->rx_quick_cons_trip
;
5049 coal
->rx_coalesce_usecs_irq
= bp
->rx_ticks_int
;
5050 coal
->rx_max_coalesced_frames_irq
= bp
->rx_quick_cons_trip_int
;
5052 coal
->tx_coalesce_usecs
= bp
->tx_ticks
;
5053 coal
->tx_max_coalesced_frames
= bp
->tx_quick_cons_trip
;
5054 coal
->tx_coalesce_usecs_irq
= bp
->tx_ticks_int
;
5055 coal
->tx_max_coalesced_frames_irq
= bp
->tx_quick_cons_trip_int
;
5057 coal
->stats_block_coalesce_usecs
= bp
->stats_ticks
;
5063 bnx2_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*coal
)
5065 struct bnx2
*bp
= netdev_priv(dev
);
5067 bp
->rx_ticks
= (u16
) coal
->rx_coalesce_usecs
;
5068 if (bp
->rx_ticks
> 0x3ff) bp
->rx_ticks
= 0x3ff;
5070 bp
->rx_quick_cons_trip
= (u16
) coal
->rx_max_coalesced_frames
;
5071 if (bp
->rx_quick_cons_trip
> 0xff) bp
->rx_quick_cons_trip
= 0xff;
5073 bp
->rx_ticks_int
= (u16
) coal
->rx_coalesce_usecs_irq
;
5074 if (bp
->rx_ticks_int
> 0x3ff) bp
->rx_ticks_int
= 0x3ff;
5076 bp
->rx_quick_cons_trip_int
= (u16
) coal
->rx_max_coalesced_frames_irq
;
5077 if (bp
->rx_quick_cons_trip_int
> 0xff)
5078 bp
->rx_quick_cons_trip_int
= 0xff;
5080 bp
->tx_ticks
= (u16
) coal
->tx_coalesce_usecs
;
5081 if (bp
->tx_ticks
> 0x3ff) bp
->tx_ticks
= 0x3ff;
5083 bp
->tx_quick_cons_trip
= (u16
) coal
->tx_max_coalesced_frames
;
5084 if (bp
->tx_quick_cons_trip
> 0xff) bp
->tx_quick_cons_trip
= 0xff;
5086 bp
->tx_ticks_int
= (u16
) coal
->tx_coalesce_usecs_irq
;
5087 if (bp
->tx_ticks_int
> 0x3ff) bp
->tx_ticks_int
= 0x3ff;
5089 bp
->tx_quick_cons_trip_int
= (u16
) coal
->tx_max_coalesced_frames_irq
;
5090 if (bp
->tx_quick_cons_trip_int
> 0xff) bp
->tx_quick_cons_trip_int
=
5093 bp
->stats_ticks
= coal
->stats_block_coalesce_usecs
;
5094 if (bp
->stats_ticks
> 0xffff00) bp
->stats_ticks
= 0xffff00;
5095 bp
->stats_ticks
&= 0xffff00;
5097 if (netif_running(bp
->dev
)) {
5098 bnx2_netif_stop(bp
);
5100 bnx2_netif_start(bp
);
5107 bnx2_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
5109 struct bnx2
*bp
= netdev_priv(dev
);
5111 ering
->rx_max_pending
= MAX_TOTAL_RX_DESC_CNT
;
5112 ering
->rx_mini_max_pending
= 0;
5113 ering
->rx_jumbo_max_pending
= 0;
5115 ering
->rx_pending
= bp
->rx_ring_size
;
5116 ering
->rx_mini_pending
= 0;
5117 ering
->rx_jumbo_pending
= 0;
5119 ering
->tx_max_pending
= MAX_TX_DESC_CNT
;
5120 ering
->tx_pending
= bp
->tx_ring_size
;
5124 bnx2_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
5126 struct bnx2
*bp
= netdev_priv(dev
);
5128 if ((ering
->rx_pending
> MAX_TOTAL_RX_DESC_CNT
) ||
5129 (ering
->tx_pending
> MAX_TX_DESC_CNT
) ||
5130 (ering
->tx_pending
<= MAX_SKB_FRAGS
)) {
5134 if (netif_running(bp
->dev
)) {
5135 bnx2_netif_stop(bp
);
5136 bnx2_reset_chip(bp
, BNX2_DRV_MSG_CODE_RESET
);
5141 bnx2_set_rx_ring_size(bp
, ering
->rx_pending
);
5142 bp
->tx_ring_size
= ering
->tx_pending
;
5144 if (netif_running(bp
->dev
)) {
5147 rc
= bnx2_alloc_mem(bp
);
5151 bnx2_netif_start(bp
);
5158 bnx2_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
5160 struct bnx2
*bp
= netdev_priv(dev
);
5162 epause
->autoneg
= ((bp
->autoneg
& AUTONEG_FLOW_CTRL
) != 0);
5163 epause
->rx_pause
= ((bp
->flow_ctrl
& FLOW_CTRL_RX
) != 0);
5164 epause
->tx_pause
= ((bp
->flow_ctrl
& FLOW_CTRL_TX
) != 0);
5168 bnx2_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
5170 struct bnx2
*bp
= netdev_priv(dev
);
5172 bp
->req_flow_ctrl
= 0;
5173 if (epause
->rx_pause
)
5174 bp
->req_flow_ctrl
|= FLOW_CTRL_RX
;
5175 if (epause
->tx_pause
)
5176 bp
->req_flow_ctrl
|= FLOW_CTRL_TX
;
5178 if (epause
->autoneg
) {
5179 bp
->autoneg
|= AUTONEG_FLOW_CTRL
;
5182 bp
->autoneg
&= ~AUTONEG_FLOW_CTRL
;
5185 spin_lock_bh(&bp
->phy_lock
);
5189 spin_unlock_bh(&bp
->phy_lock
);
5195 bnx2_get_rx_csum(struct net_device
*dev
)
5197 struct bnx2
*bp
= netdev_priv(dev
);
5203 bnx2_set_rx_csum(struct net_device
*dev
, u32 data
)
5205 struct bnx2
*bp
= netdev_priv(dev
);
5212 bnx2_set_tso(struct net_device
*dev
, u32 data
)
5215 dev
->features
|= NETIF_F_TSO
| NETIF_F_TSO_ECN
;
5217 dev
->features
&= ~(NETIF_F_TSO
| NETIF_F_TSO_ECN
);
5221 #define BNX2_NUM_STATS 46
5224 char string
[ETH_GSTRING_LEN
];
5225 } bnx2_stats_str_arr
[BNX2_NUM_STATS
] = {
5227 { "rx_error_bytes" },
5229 { "tx_error_bytes" },
5230 { "rx_ucast_packets" },
5231 { "rx_mcast_packets" },
5232 { "rx_bcast_packets" },
5233 { "tx_ucast_packets" },
5234 { "tx_mcast_packets" },
5235 { "tx_bcast_packets" },
5236 { "tx_mac_errors" },
5237 { "tx_carrier_errors" },
5238 { "rx_crc_errors" },
5239 { "rx_align_errors" },
5240 { "tx_single_collisions" },
5241 { "tx_multi_collisions" },
5243 { "tx_excess_collisions" },
5244 { "tx_late_collisions" },
5245 { "tx_total_collisions" },
5248 { "rx_undersize_packets" },
5249 { "rx_oversize_packets" },
5250 { "rx_64_byte_packets" },
5251 { "rx_65_to_127_byte_packets" },
5252 { "rx_128_to_255_byte_packets" },
5253 { "rx_256_to_511_byte_packets" },
5254 { "rx_512_to_1023_byte_packets" },
5255 { "rx_1024_to_1522_byte_packets" },
5256 { "rx_1523_to_9022_byte_packets" },
5257 { "tx_64_byte_packets" },
5258 { "tx_65_to_127_byte_packets" },
5259 { "tx_128_to_255_byte_packets" },
5260 { "tx_256_to_511_byte_packets" },
5261 { "tx_512_to_1023_byte_packets" },
5262 { "tx_1024_to_1522_byte_packets" },
5263 { "tx_1523_to_9022_byte_packets" },
5264 { "rx_xon_frames" },
5265 { "rx_xoff_frames" },
5266 { "tx_xon_frames" },
5267 { "tx_xoff_frames" },
5268 { "rx_mac_ctrl_frames" },
5269 { "rx_filtered_packets" },
5271 { "rx_fw_discards" },
5274 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5276 static const unsigned long bnx2_stats_offset_arr
[BNX2_NUM_STATS
] = {
5277 STATS_OFFSET32(stat_IfHCInOctets_hi
),
5278 STATS_OFFSET32(stat_IfHCInBadOctets_hi
),
5279 STATS_OFFSET32(stat_IfHCOutOctets_hi
),
5280 STATS_OFFSET32(stat_IfHCOutBadOctets_hi
),
5281 STATS_OFFSET32(stat_IfHCInUcastPkts_hi
),
5282 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi
),
5283 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi
),
5284 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi
),
5285 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi
),
5286 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi
),
5287 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors
),
5288 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors
),
5289 STATS_OFFSET32(stat_Dot3StatsFCSErrors
),
5290 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors
),
5291 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames
),
5292 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames
),
5293 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions
),
5294 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions
),
5295 STATS_OFFSET32(stat_Dot3StatsLateCollisions
),
5296 STATS_OFFSET32(stat_EtherStatsCollisions
),
5297 STATS_OFFSET32(stat_EtherStatsFragments
),
5298 STATS_OFFSET32(stat_EtherStatsJabbers
),
5299 STATS_OFFSET32(stat_EtherStatsUndersizePkts
),
5300 STATS_OFFSET32(stat_EtherStatsOverrsizePkts
),
5301 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets
),
5302 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets
),
5303 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets
),
5304 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets
),
5305 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets
),
5306 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets
),
5307 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets
),
5308 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets
),
5309 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets
),
5310 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets
),
5311 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets
),
5312 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets
),
5313 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets
),
5314 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets
),
5315 STATS_OFFSET32(stat_XonPauseFramesReceived
),
5316 STATS_OFFSET32(stat_XoffPauseFramesReceived
),
5317 STATS_OFFSET32(stat_OutXonSent
),
5318 STATS_OFFSET32(stat_OutXoffSent
),
5319 STATS_OFFSET32(stat_MacControlFramesReceived
),
5320 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards
),
5321 STATS_OFFSET32(stat_IfInMBUFDiscards
),
5322 STATS_OFFSET32(stat_FwRxDrop
),
5325 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5326 * skipped because of errata.
5328 static u8 bnx2_5706_stats_len_arr
[BNX2_NUM_STATS
] = {
5329 8,0,8,8,8,8,8,8,8,8,
5330 4,0,4,4,4,4,4,4,4,4,
5331 4,4,4,4,4,4,4,4,4,4,
5332 4,4,4,4,4,4,4,4,4,4,
5336 static u8 bnx2_5708_stats_len_arr
[BNX2_NUM_STATS
] = {
5337 8,0,8,8,8,8,8,8,8,8,
5338 4,4,4,4,4,4,4,4,4,4,
5339 4,4,4,4,4,4,4,4,4,4,
5340 4,4,4,4,4,4,4,4,4,4,
5344 #define BNX2_NUM_TESTS 6
5347 char string
[ETH_GSTRING_LEN
];
5348 } bnx2_tests_str_arr
[BNX2_NUM_TESTS
] = {
5349 { "register_test (offline)" },
5350 { "memory_test (offline)" },
5351 { "loopback_test (offline)" },
5352 { "nvram_test (online)" },
5353 { "interrupt_test (online)" },
5354 { "link_test (online)" },
5358 bnx2_self_test_count(struct net_device
*dev
)
5360 return BNX2_NUM_TESTS
;
5364 bnx2_self_test(struct net_device
*dev
, struct ethtool_test
*etest
, u64
*buf
)
5366 struct bnx2
*bp
= netdev_priv(dev
);
5368 memset(buf
, 0, sizeof(u64
) * BNX2_NUM_TESTS
);
5369 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
5372 bnx2_netif_stop(bp
);
5373 bnx2_reset_chip(bp
, BNX2_DRV_MSG_CODE_DIAG
);
5376 if (bnx2_test_registers(bp
) != 0) {
5378 etest
->flags
|= ETH_TEST_FL_FAILED
;
5380 if (bnx2_test_memory(bp
) != 0) {
5382 etest
->flags
|= ETH_TEST_FL_FAILED
;
5384 if ((buf
[2] = bnx2_test_loopback(bp
)) != 0)
5385 etest
->flags
|= ETH_TEST_FL_FAILED
;
5387 if (!netif_running(bp
->dev
)) {
5388 bnx2_reset_chip(bp
, BNX2_DRV_MSG_CODE_RESET
);
5392 bnx2_netif_start(bp
);
5395 /* wait for link up */
5396 for (i
= 0; i
< 7; i
++) {
5399 msleep_interruptible(1000);
5403 if (bnx2_test_nvram(bp
) != 0) {
5405 etest
->flags
|= ETH_TEST_FL_FAILED
;
5407 if (bnx2_test_intr(bp
) != 0) {
5409 etest
->flags
|= ETH_TEST_FL_FAILED
;
5412 if (bnx2_test_link(bp
) != 0) {
5414 etest
->flags
|= ETH_TEST_FL_FAILED
;
5420 bnx2_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
5422 switch (stringset
) {
5424 memcpy(buf
, bnx2_stats_str_arr
,
5425 sizeof(bnx2_stats_str_arr
));
5428 memcpy(buf
, bnx2_tests_str_arr
,
5429 sizeof(bnx2_tests_str_arr
));
5435 bnx2_get_stats_count(struct net_device
*dev
)
5437 return BNX2_NUM_STATS
;
5441 bnx2_get_ethtool_stats(struct net_device
*dev
,
5442 struct ethtool_stats
*stats
, u64
*buf
)
5444 struct bnx2
*bp
= netdev_priv(dev
);
5446 u32
*hw_stats
= (u32
*) bp
->stats_blk
;
5447 u8
*stats_len_arr
= NULL
;
5449 if (hw_stats
== NULL
) {
5450 memset(buf
, 0, sizeof(u64
) * BNX2_NUM_STATS
);
5454 if ((CHIP_ID(bp
) == CHIP_ID_5706_A0
) ||
5455 (CHIP_ID(bp
) == CHIP_ID_5706_A1
) ||
5456 (CHIP_ID(bp
) == CHIP_ID_5706_A2
) ||
5457 (CHIP_ID(bp
) == CHIP_ID_5708_A0
))
5458 stats_len_arr
= bnx2_5706_stats_len_arr
;
5460 stats_len_arr
= bnx2_5708_stats_len_arr
;
5462 for (i
= 0; i
< BNX2_NUM_STATS
; i
++) {
5463 if (stats_len_arr
[i
] == 0) {
5464 /* skip this counter */
5468 if (stats_len_arr
[i
] == 4) {
5469 /* 4-byte counter */
5471 *(hw_stats
+ bnx2_stats_offset_arr
[i
]);
5474 /* 8-byte counter */
5475 buf
[i
] = (((u64
) *(hw_stats
+
5476 bnx2_stats_offset_arr
[i
])) << 32) +
5477 *(hw_stats
+ bnx2_stats_offset_arr
[i
] + 1);
5482 bnx2_phys_id(struct net_device
*dev
, u32 data
)
5484 struct bnx2
*bp
= netdev_priv(dev
);
5491 save
= REG_RD(bp
, BNX2_MISC_CFG
);
5492 REG_WR(bp
, BNX2_MISC_CFG
, BNX2_MISC_CFG_LEDMODE_MAC
);
5494 for (i
= 0; i
< (data
* 2); i
++) {
5496 REG_WR(bp
, BNX2_EMAC_LED
, BNX2_EMAC_LED_OVERRIDE
);
5499 REG_WR(bp
, BNX2_EMAC_LED
, BNX2_EMAC_LED_OVERRIDE
|
5500 BNX2_EMAC_LED_1000MB_OVERRIDE
|
5501 BNX2_EMAC_LED_100MB_OVERRIDE
|
5502 BNX2_EMAC_LED_10MB_OVERRIDE
|
5503 BNX2_EMAC_LED_TRAFFIC_OVERRIDE
|
5504 BNX2_EMAC_LED_TRAFFIC
);
5506 msleep_interruptible(500);
5507 if (signal_pending(current
))
5510 REG_WR(bp
, BNX2_EMAC_LED
, 0);
5511 REG_WR(bp
, BNX2_MISC_CFG
, save
);
5515 static const struct ethtool_ops bnx2_ethtool_ops
= {
5516 .get_settings
= bnx2_get_settings
,
5517 .set_settings
= bnx2_set_settings
,
5518 .get_drvinfo
= bnx2_get_drvinfo
,
5519 .get_regs_len
= bnx2_get_regs_len
,
5520 .get_regs
= bnx2_get_regs
,
5521 .get_wol
= bnx2_get_wol
,
5522 .set_wol
= bnx2_set_wol
,
5523 .nway_reset
= bnx2_nway_reset
,
5524 .get_link
= ethtool_op_get_link
,
5525 .get_eeprom_len
= bnx2_get_eeprom_len
,
5526 .get_eeprom
= bnx2_get_eeprom
,
5527 .set_eeprom
= bnx2_set_eeprom
,
5528 .get_coalesce
= bnx2_get_coalesce
,
5529 .set_coalesce
= bnx2_set_coalesce
,
5530 .get_ringparam
= bnx2_get_ringparam
,
5531 .set_ringparam
= bnx2_set_ringparam
,
5532 .get_pauseparam
= bnx2_get_pauseparam
,
5533 .set_pauseparam
= bnx2_set_pauseparam
,
5534 .get_rx_csum
= bnx2_get_rx_csum
,
5535 .set_rx_csum
= bnx2_set_rx_csum
,
5536 .get_tx_csum
= ethtool_op_get_tx_csum
,
5537 .set_tx_csum
= ethtool_op_set_tx_csum
,
5538 .get_sg
= ethtool_op_get_sg
,
5539 .set_sg
= ethtool_op_set_sg
,
5540 .get_tso
= ethtool_op_get_tso
,
5541 .set_tso
= bnx2_set_tso
,
5542 .self_test_count
= bnx2_self_test_count
,
5543 .self_test
= bnx2_self_test
,
5544 .get_strings
= bnx2_get_strings
,
5545 .phys_id
= bnx2_phys_id
,
5546 .get_stats_count
= bnx2_get_stats_count
,
5547 .get_ethtool_stats
= bnx2_get_ethtool_stats
,
5548 .get_perm_addr
= ethtool_op_get_perm_addr
,
5551 /* Called with rtnl_lock */
5553 bnx2_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
5555 struct mii_ioctl_data
*data
= if_mii(ifr
);
5556 struct bnx2
*bp
= netdev_priv(dev
);
5561 data
->phy_id
= bp
->phy_addr
;
5567 if (!netif_running(dev
))
5570 spin_lock_bh(&bp
->phy_lock
);
5571 err
= bnx2_read_phy(bp
, data
->reg_num
& 0x1f, &mii_regval
);
5572 spin_unlock_bh(&bp
->phy_lock
);
5574 data
->val_out
= mii_regval
;
5580 if (!capable(CAP_NET_ADMIN
))
5583 if (!netif_running(dev
))
5586 spin_lock_bh(&bp
->phy_lock
);
5587 err
= bnx2_write_phy(bp
, data
->reg_num
& 0x1f, data
->val_in
);
5588 spin_unlock_bh(&bp
->phy_lock
);
5599 /* Called with rtnl_lock */
5601 bnx2_change_mac_addr(struct net_device
*dev
, void *p
)
5603 struct sockaddr
*addr
= p
;
5604 struct bnx2
*bp
= netdev_priv(dev
);
5606 if (!is_valid_ether_addr(addr
->sa_data
))
5609 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
5610 if (netif_running(dev
))
5611 bnx2_set_mac_addr(bp
);
5616 /* Called with rtnl_lock */
5618 bnx2_change_mtu(struct net_device
*dev
, int new_mtu
)
5620 struct bnx2
*bp
= netdev_priv(dev
);
5622 if (((new_mtu
+ ETH_HLEN
) > MAX_ETHERNET_JUMBO_PACKET_SIZE
) ||
5623 ((new_mtu
+ ETH_HLEN
) < MIN_ETHERNET_PACKET_SIZE
))
5627 if (netif_running(dev
)) {
5628 bnx2_netif_stop(bp
);
5632 bnx2_netif_start(bp
);
5637 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5639 poll_bnx2(struct net_device
*dev
)
5641 struct bnx2
*bp
= netdev_priv(dev
);
5643 disable_irq(bp
->pdev
->irq
);
5644 bnx2_interrupt(bp
->pdev
->irq
, dev
);
5645 enable_irq(bp
->pdev
->irq
);
5649 static void __devinit
5650 bnx2_get_5709_media(struct bnx2
*bp
)
5652 u32 val
= REG_RD(bp
, BNX2_MISC_DUAL_MEDIA_CTRL
);
5653 u32 bond_id
= val
& BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID
;
5656 if (bond_id
== BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C
)
5658 else if (bond_id
== BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S
) {
5659 bp
->phy_flags
|= PHY_SERDES_FLAG
;
5663 if (val
& BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE
)
5664 strap
= (val
& BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL
) >> 21;
5666 strap
= (val
& BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP
) >> 8;
5668 if (PCI_FUNC(bp
->pdev
->devfn
) == 0) {
5673 bp
->phy_flags
|= PHY_SERDES_FLAG
;
5681 bp
->phy_flags
|= PHY_SERDES_FLAG
;
5687 static int __devinit
5688 bnx2_init_board(struct pci_dev
*pdev
, struct net_device
*dev
)
5691 unsigned long mem_len
;
5695 SET_MODULE_OWNER(dev
);
5696 SET_NETDEV_DEV(dev
, &pdev
->dev
);
5697 bp
= netdev_priv(dev
);
5702 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5703 rc
= pci_enable_device(pdev
);
5705 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting.");
5709 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
5711 "Cannot find PCI device base address, aborting.\n");
5713 goto err_out_disable
;
5716 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
5718 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting.\n");
5719 goto err_out_disable
;
5722 pci_set_master(pdev
);
5724 bp
->pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
5725 if (bp
->pm_cap
== 0) {
5727 "Cannot find power management capability, aborting.\n");
5729 goto err_out_release
;
5732 if (pci_set_dma_mask(pdev
, DMA_64BIT_MASK
) == 0) {
5733 bp
->flags
|= USING_DAC_FLAG
;
5734 if (pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
) != 0) {
5736 "pci_set_consistent_dma_mask failed, aborting.\n");
5738 goto err_out_release
;
5741 else if (pci_set_dma_mask(pdev
, DMA_32BIT_MASK
) != 0) {
5742 dev_err(&pdev
->dev
, "System does not support DMA, aborting.\n");
5744 goto err_out_release
;
5750 spin_lock_init(&bp
->phy_lock
);
5751 INIT_WORK(&bp
->reset_task
, bnx2_reset_task
);
5753 dev
->base_addr
= dev
->mem_start
= pci_resource_start(pdev
, 0);
5754 mem_len
= MB_GET_CID_ADDR(TX_TSS_CID
+ 1);
5755 dev
->mem_end
= dev
->mem_start
+ mem_len
;
5756 dev
->irq
= pdev
->irq
;
5758 bp
->regview
= ioremap_nocache(dev
->base_addr
, mem_len
);
5761 dev_err(&pdev
->dev
, "Cannot map register space, aborting.\n");
5763 goto err_out_release
;
5766 /* Configure byte swap and enable write to the reg_window registers.
5767 * Rely on CPU to do target byte swapping on big endian systems
5768 * The chip's target access swapping will not swap all accesses
5770 pci_write_config_dword(bp
->pdev
, BNX2_PCICFG_MISC_CONFIG
,
5771 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA
|
5772 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP
);
5774 bnx2_set_power_state(bp
, PCI_D0
);
5776 bp
->chip_id
= REG_RD(bp
, BNX2_MISC_ID
);
5778 if (CHIP_NUM(bp
) != CHIP_NUM_5709
) {
5779 bp
->pcix_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PCIX
);
5780 if (bp
->pcix_cap
== 0) {
5782 "Cannot find PCIX capability, aborting.\n");
5788 /* Get bus information. */
5789 reg
= REG_RD(bp
, BNX2_PCICFG_MISC_STATUS
);
5790 if (reg
& BNX2_PCICFG_MISC_STATUS_PCIX_DET
) {
5793 bp
->flags
|= PCIX_FLAG
;
5795 clkreg
= REG_RD(bp
, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS
);
5797 clkreg
&= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET
;
5799 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ
:
5800 bp
->bus_speed_mhz
= 133;
5803 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ
:
5804 bp
->bus_speed_mhz
= 100;
5807 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ
:
5808 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ
:
5809 bp
->bus_speed_mhz
= 66;
5812 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ
:
5813 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ
:
5814 bp
->bus_speed_mhz
= 50;
5817 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW
:
5818 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ
:
5819 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ
:
5820 bp
->bus_speed_mhz
= 33;
5825 if (reg
& BNX2_PCICFG_MISC_STATUS_M66EN
)
5826 bp
->bus_speed_mhz
= 66;
5828 bp
->bus_speed_mhz
= 33;
5831 if (reg
& BNX2_PCICFG_MISC_STATUS_32BIT_DET
)
5832 bp
->flags
|= PCI_32BIT_FLAG
;
5834 /* 5706A0 may falsely detect SERR and PERR. */
5835 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
5836 reg
= REG_RD(bp
, PCI_COMMAND
);
5837 reg
&= ~(PCI_COMMAND_SERR
| PCI_COMMAND_PARITY
);
5838 REG_WR(bp
, PCI_COMMAND
, reg
);
5840 else if ((CHIP_ID(bp
) == CHIP_ID_5706_A1
) &&
5841 !(bp
->flags
& PCIX_FLAG
)) {
5844 "5706 A1 can only be used in a PCIX bus, aborting.\n");
5848 bnx2_init_nvram(bp
);
5850 reg
= REG_RD_IND(bp
, BNX2_SHM_HDR_SIGNATURE
);
5852 if ((reg
& BNX2_SHM_HDR_SIGNATURE_SIG_MASK
) ==
5853 BNX2_SHM_HDR_SIGNATURE_SIG
) {
5854 u32 off
= PCI_FUNC(pdev
->devfn
) << 2;
5856 bp
->shmem_base
= REG_RD_IND(bp
, BNX2_SHM_HDR_ADDR_0
+ off
);
5858 bp
->shmem_base
= HOST_VIEW_SHMEM_BASE
;
5860 /* Get the permanent MAC address. First we need to make sure the
5861 * firmware is actually running.
5863 reg
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_DEV_INFO_SIGNATURE
);
5865 if ((reg
& BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK
) !=
5866 BNX2_DEV_INFO_SIGNATURE_MAGIC
) {
5867 dev_err(&pdev
->dev
, "Firmware not running, aborting.\n");
5872 bp
->fw_ver
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_DEV_INFO_BC_REV
);
5874 reg
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_HW_CFG_MAC_UPPER
);
5875 bp
->mac_addr
[0] = (u8
) (reg
>> 8);
5876 bp
->mac_addr
[1] = (u8
) reg
;
5878 reg
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_HW_CFG_MAC_LOWER
);
5879 bp
->mac_addr
[2] = (u8
) (reg
>> 24);
5880 bp
->mac_addr
[3] = (u8
) (reg
>> 16);
5881 bp
->mac_addr
[4] = (u8
) (reg
>> 8);
5882 bp
->mac_addr
[5] = (u8
) reg
;
5884 bp
->tx_ring_size
= MAX_TX_DESC_CNT
;
5885 bnx2_set_rx_ring_size(bp
, 255);
5889 bp
->rx_offset
= sizeof(struct l2_fhdr
) + 2;
5891 bp
->tx_quick_cons_trip_int
= 20;
5892 bp
->tx_quick_cons_trip
= 20;
5893 bp
->tx_ticks_int
= 80;
5896 bp
->rx_quick_cons_trip_int
= 6;
5897 bp
->rx_quick_cons_trip
= 6;
5898 bp
->rx_ticks_int
= 18;
5901 bp
->stats_ticks
= 1000000 & 0xffff00;
5903 bp
->timer_interval
= HZ
;
5904 bp
->current_interval
= HZ
;
5908 /* Disable WOL support if we are running on a SERDES chip. */
5909 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
5910 bnx2_get_5709_media(bp
);
5911 else if (CHIP_BOND_ID(bp
) & CHIP_BOND_ID_SERDES_BIT
)
5912 bp
->phy_flags
|= PHY_SERDES_FLAG
;
5914 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
5915 bp
->flags
|= NO_WOL_FLAG
;
5916 if (CHIP_NUM(bp
) != CHIP_NUM_5706
) {
5918 reg
= REG_RD_IND(bp
, bp
->shmem_base
+
5919 BNX2_SHARED_HW_CFG_CONFIG
);
5920 if (reg
& BNX2_SHARED_HW_CFG_PHY_2_5G
)
5921 bp
->phy_flags
|= PHY_2_5G_CAPABLE_FLAG
;
5923 } else if (CHIP_NUM(bp
) == CHIP_NUM_5706
||
5924 CHIP_NUM(bp
) == CHIP_NUM_5708
)
5925 bp
->phy_flags
|= PHY_CRC_FIX_FLAG
;
5926 else if (CHIP_ID(bp
) == CHIP_ID_5709_A0
)
5927 bp
->phy_flags
|= PHY_DIS_EARLY_DAC_FLAG
;
5929 if ((CHIP_ID(bp
) == CHIP_ID_5708_A0
) ||
5930 (CHIP_ID(bp
) == CHIP_ID_5708_B0
) ||
5931 (CHIP_ID(bp
) == CHIP_ID_5708_B1
))
5932 bp
->flags
|= NO_WOL_FLAG
;
5934 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
5935 bp
->tx_quick_cons_trip_int
=
5936 bp
->tx_quick_cons_trip
;
5937 bp
->tx_ticks_int
= bp
->tx_ticks
;
5938 bp
->rx_quick_cons_trip_int
=
5939 bp
->rx_quick_cons_trip
;
5940 bp
->rx_ticks_int
= bp
->rx_ticks
;
5941 bp
->comp_prod_trip_int
= bp
->comp_prod_trip
;
5942 bp
->com_ticks_int
= bp
->com_ticks
;
5943 bp
->cmd_ticks_int
= bp
->cmd_ticks
;
5946 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5948 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5949 * with byte enables disabled on the unused 32-bit word. This is legal
5950 * but causes problems on the AMD 8132 which will eventually stop
5951 * responding after a while.
5953 * AMD believes this incompatibility is unique to the 5706, and
5954 * prefers to locally disable MSI rather than globally disabling it.
5956 if (CHIP_NUM(bp
) == CHIP_NUM_5706
&& disable_msi
== 0) {
5957 struct pci_dev
*amd_8132
= NULL
;
5959 while ((amd_8132
= pci_get_device(PCI_VENDOR_ID_AMD
,
5960 PCI_DEVICE_ID_AMD_8132_BRIDGE
,
5964 pci_read_config_byte(amd_8132
, PCI_REVISION_ID
, &rev
);
5965 if (rev
>= 0x10 && rev
<= 0x13) {
5967 pci_dev_put(amd_8132
);
5973 bp
->autoneg
= AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
;
5974 bp
->req_line_speed
= 0;
5975 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
5976 bp
->advertising
= ETHTOOL_ALL_FIBRE_SPEED
| ADVERTISED_Autoneg
;
5978 reg
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_HW_CFG_CONFIG
);
5979 reg
&= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK
;
5980 if (reg
== BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G
) {
5982 bp
->req_line_speed
= bp
->line_speed
= SPEED_1000
;
5983 bp
->req_duplex
= DUPLEX_FULL
;
5987 bp
->advertising
= ETHTOOL_ALL_COPPER_SPEED
| ADVERTISED_Autoneg
;
5990 bp
->req_flow_ctrl
= FLOW_CTRL_RX
| FLOW_CTRL_TX
;
5992 init_timer(&bp
->timer
);
5993 bp
->timer
.expires
= RUN_AT(bp
->timer_interval
);
5994 bp
->timer
.data
= (unsigned long) bp
;
5995 bp
->timer
.function
= bnx2_timer
;
6001 iounmap(bp
->regview
);
6006 pci_release_regions(pdev
);
6009 pci_disable_device(pdev
);
6010 pci_set_drvdata(pdev
, NULL
);
6016 static int __devinit
6017 bnx2_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
6019 static int version_printed
= 0;
6020 struct net_device
*dev
= NULL
;
6024 if (version_printed
++ == 0)
6025 printk(KERN_INFO
"%s", version
);
6027 /* dev zeroed in init_etherdev */
6028 dev
= alloc_etherdev(sizeof(*bp
));
6033 rc
= bnx2_init_board(pdev
, dev
);
6039 dev
->open
= bnx2_open
;
6040 dev
->hard_start_xmit
= bnx2_start_xmit
;
6041 dev
->stop
= bnx2_close
;
6042 dev
->get_stats
= bnx2_get_stats
;
6043 dev
->set_multicast_list
= bnx2_set_rx_mode
;
6044 dev
->do_ioctl
= bnx2_ioctl
;
6045 dev
->set_mac_address
= bnx2_change_mac_addr
;
6046 dev
->change_mtu
= bnx2_change_mtu
;
6047 dev
->tx_timeout
= bnx2_tx_timeout
;
6048 dev
->watchdog_timeo
= TX_TIMEOUT
;
6050 dev
->vlan_rx_register
= bnx2_vlan_rx_register
;
6051 dev
->vlan_rx_kill_vid
= bnx2_vlan_rx_kill_vid
;
6053 dev
->poll
= bnx2_poll
;
6054 dev
->ethtool_ops
= &bnx2_ethtool_ops
;
6057 bp
= netdev_priv(dev
);
6059 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6060 dev
->poll_controller
= poll_bnx2
;
6063 if ((rc
= register_netdev(dev
))) {
6064 dev_err(&pdev
->dev
, "Cannot register net device\n");
6066 iounmap(bp
->regview
);
6067 pci_release_regions(pdev
);
6068 pci_disable_device(pdev
);
6069 pci_set_drvdata(pdev
, NULL
);
6074 pci_set_drvdata(pdev
, dev
);
6076 memcpy(dev
->dev_addr
, bp
->mac_addr
, 6);
6077 memcpy(dev
->perm_addr
, bp
->mac_addr
, 6);
6078 bp
->name
= board_info
[ent
->driver_data
].name
,
6079 printk(KERN_INFO
"%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6083 ((CHIP_ID(bp
) & 0xf000) >> 12) + 'A',
6084 ((CHIP_ID(bp
) & 0x0ff0) >> 4),
6085 ((bp
->flags
& PCIX_FLAG
) ? "-X" : ""),
6086 ((bp
->flags
& PCI_32BIT_FLAG
) ? "32-bit" : "64-bit"),
6091 printk("node addr ");
6092 for (i
= 0; i
< 6; i
++)
6093 printk("%2.2x", dev
->dev_addr
[i
]);
6096 dev
->features
|= NETIF_F_SG
;
6097 if (bp
->flags
& USING_DAC_FLAG
)
6098 dev
->features
|= NETIF_F_HIGHDMA
;
6099 dev
->features
|= NETIF_F_IP_CSUM
;
6101 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
6103 dev
->features
|= NETIF_F_TSO
| NETIF_F_TSO_ECN
;
6105 netif_carrier_off(bp
->dev
);
6110 static void __devexit
6111 bnx2_remove_one(struct pci_dev
*pdev
)
6113 struct net_device
*dev
= pci_get_drvdata(pdev
);
6114 struct bnx2
*bp
= netdev_priv(dev
);
6116 flush_scheduled_work();
6118 unregister_netdev(dev
);
6121 iounmap(bp
->regview
);
6124 pci_release_regions(pdev
);
6125 pci_disable_device(pdev
);
6126 pci_set_drvdata(pdev
, NULL
);
6130 bnx2_suspend(struct pci_dev
*pdev
, pm_message_t state
)
6132 struct net_device
*dev
= pci_get_drvdata(pdev
);
6133 struct bnx2
*bp
= netdev_priv(dev
);
6136 if (!netif_running(dev
))
6139 flush_scheduled_work();
6140 bnx2_netif_stop(bp
);
6141 netif_device_detach(dev
);
6142 del_timer_sync(&bp
->timer
);
6143 if (bp
->flags
& NO_WOL_FLAG
)
6144 reset_code
= BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN
;
6146 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_WOL
;
6148 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL
;
6149 bnx2_reset_chip(bp
, reset_code
);
6151 pci_save_state(pdev
);
6152 bnx2_set_power_state(bp
, pci_choose_state(pdev
, state
));
6157 bnx2_resume(struct pci_dev
*pdev
)
6159 struct net_device
*dev
= pci_get_drvdata(pdev
);
6160 struct bnx2
*bp
= netdev_priv(dev
);
6162 if (!netif_running(dev
))
6165 pci_restore_state(pdev
);
6166 bnx2_set_power_state(bp
, PCI_D0
);
6167 netif_device_attach(dev
);
6169 bnx2_netif_start(bp
);
6173 static struct pci_driver bnx2_pci_driver
= {
6174 .name
= DRV_MODULE_NAME
,
6175 .id_table
= bnx2_pci_tbl
,
6176 .probe
= bnx2_init_one
,
6177 .remove
= __devexit_p(bnx2_remove_one
),
6178 .suspend
= bnx2_suspend
,
6179 .resume
= bnx2_resume
,
6182 static int __init
bnx2_init(void)
6184 return pci_register_driver(&bnx2_pci_driver
);
6187 static void __exit
bnx2_cleanup(void)
6189 pci_unregister_driver(&bnx2_pci_driver
);
6192 module_init(bnx2_init
);
6193 module_exit(bnx2_cleanup
);