1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define DRV_MODULE_NAME "bnx2"
56 #define PFX DRV_MODULE_NAME ": "
57 #define DRV_MODULE_VERSION "1.5.8"
58 #define DRV_MODULE_RELDATE "April 24, 2007"
60 #define RUN_AT(x) (jiffies + (x))
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT (5*HZ)
65 static const char version
[] __devinitdata
=
66 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME
" v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION
);
73 static int disable_msi
= 0;
75 module_param(disable_msi
, int, 0);
76 MODULE_PARM_DESC(disable_msi
, "Disable Message Signaled Interrupt (MSI)");
89 /* indexed by board_t, above */
92 } board_info
[] __devinitdata
= {
93 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
94 { "HP NC370T Multifunction Gigabit Server Adapter" },
95 { "HP NC370i Multifunction Gigabit Server Adapter" },
96 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
97 { "HP NC370F Multifunction Gigabit Server Adapter" },
98 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
100 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
103 static struct pci_device_id bnx2_pci_tbl
[] = {
104 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706
,
105 PCI_VENDOR_ID_HP
, 0x3101, 0, 0, NC370T
},
106 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706
,
107 PCI_VENDOR_ID_HP
, 0x3106, 0, 0, NC370I
},
108 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706
,
109 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5706
},
110 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5708
,
111 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5708
},
112 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706S
,
113 PCI_VENDOR_ID_HP
, 0x3102, 0, 0, NC370F
},
114 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706S
,
115 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5706S
},
116 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5708S
,
117 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5708S
},
118 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5709
,
119 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5709
},
123 static struct flash_spec flash_table
[] =
126 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
127 1, SEEPROM_PAGE_BITS
, SEEPROM_PAGE_SIZE
,
128 SEEPROM_BYTE_ADDR_MASK
, SEEPROM_TOTAL_SIZE
,
130 /* Expansion entry 0001 */
131 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
132 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
133 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
135 /* Saifun SA25F010 (non-buffered flash) */
136 /* strap, cfg1, & write1 need updates */
137 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
138 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
139 SAIFUN_FLASH_BYTE_ADDR_MASK
, SAIFUN_FLASH_BASE_TOTAL_SIZE
*2,
140 "Non-buffered flash (128kB)"},
141 /* Saifun SA25F020 (non-buffered flash) */
142 /* strap, cfg1, & write1 need updates */
143 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
144 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
145 SAIFUN_FLASH_BYTE_ADDR_MASK
, SAIFUN_FLASH_BASE_TOTAL_SIZE
*4,
146 "Non-buffered flash (256kB)"},
147 /* Expansion entry 0100 */
148 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
149 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
150 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
152 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
153 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
154 0, ST_MICRO_FLASH_PAGE_BITS
, ST_MICRO_FLASH_PAGE_SIZE
,
155 ST_MICRO_FLASH_BYTE_ADDR_MASK
, ST_MICRO_FLASH_BASE_TOTAL_SIZE
*2,
156 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
157 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
158 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
159 0, ST_MICRO_FLASH_PAGE_BITS
, ST_MICRO_FLASH_PAGE_SIZE
,
160 ST_MICRO_FLASH_BYTE_ADDR_MASK
, ST_MICRO_FLASH_BASE_TOTAL_SIZE
*4,
161 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
162 /* Saifun SA25F005 (non-buffered flash) */
163 /* strap, cfg1, & write1 need updates */
164 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
165 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
166 SAIFUN_FLASH_BYTE_ADDR_MASK
, SAIFUN_FLASH_BASE_TOTAL_SIZE
,
167 "Non-buffered flash (64kB)"},
169 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
170 1, SEEPROM_PAGE_BITS
, SEEPROM_PAGE_SIZE
,
171 SEEPROM_BYTE_ADDR_MASK
, SEEPROM_TOTAL_SIZE
,
173 /* Expansion entry 1001 */
174 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
175 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
176 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
178 /* Expansion entry 1010 */
179 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
180 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
181 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
183 /* ATMEL AT45DB011B (buffered flash) */
184 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
185 1, BUFFERED_FLASH_PAGE_BITS
, BUFFERED_FLASH_PAGE_SIZE
,
186 BUFFERED_FLASH_BYTE_ADDR_MASK
, BUFFERED_FLASH_TOTAL_SIZE
,
187 "Buffered flash (128kB)"},
188 /* Expansion entry 1100 */
189 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
190 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
191 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
193 /* Expansion entry 1101 */
194 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
195 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
196 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
198 /* Ateml Expansion entry 1110 */
199 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
200 1, BUFFERED_FLASH_PAGE_BITS
, BUFFERED_FLASH_PAGE_SIZE
,
201 BUFFERED_FLASH_BYTE_ADDR_MASK
, 0,
202 "Entry 1110 (Atmel)"},
203 /* ATMEL AT45DB021B (buffered flash) */
204 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
205 1, BUFFERED_FLASH_PAGE_BITS
, BUFFERED_FLASH_PAGE_SIZE
,
206 BUFFERED_FLASH_BYTE_ADDR_MASK
, BUFFERED_FLASH_TOTAL_SIZE
*2,
207 "Buffered flash (256kB)"},
210 MODULE_DEVICE_TABLE(pci
, bnx2_pci_tbl
);
212 static inline u32
bnx2_tx_avail(struct bnx2
*bp
)
218 /* The ring uses 256 indices for 255 entries, one of them
219 * needs to be skipped.
221 diff
= bp
->tx_prod
- bp
->tx_cons
;
222 if (unlikely(diff
>= TX_DESC_CNT
)) {
224 if (diff
== TX_DESC_CNT
)
225 diff
= MAX_TX_DESC_CNT
;
227 return (bp
->tx_ring_size
- diff
);
231 bnx2_reg_rd_ind(struct bnx2
*bp
, u32 offset
)
233 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW_ADDRESS
, offset
);
234 return (REG_RD(bp
, BNX2_PCICFG_REG_WINDOW
));
238 bnx2_reg_wr_ind(struct bnx2
*bp
, u32 offset
, u32 val
)
240 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW_ADDRESS
, offset
);
241 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW
, val
);
245 bnx2_ctx_wr(struct bnx2
*bp
, u32 cid_addr
, u32 offset
, u32 val
)
248 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
251 REG_WR(bp
, BNX2_CTX_CTX_DATA
, val
);
252 REG_WR(bp
, BNX2_CTX_CTX_CTRL
,
253 offset
| BNX2_CTX_CTX_CTRL_WRITE_REQ
);
254 for (i
= 0; i
< 5; i
++) {
256 val
= REG_RD(bp
, BNX2_CTX_CTX_CTRL
);
257 if ((val
& BNX2_CTX_CTX_CTRL_WRITE_REQ
) == 0)
262 REG_WR(bp
, BNX2_CTX_DATA_ADR
, offset
);
263 REG_WR(bp
, BNX2_CTX_DATA
, val
);
268 bnx2_read_phy(struct bnx2
*bp
, u32 reg
, u32
*val
)
273 if (bp
->phy_flags
& PHY_INT_MODE_AUTO_POLLING_FLAG
) {
274 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
275 val1
&= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
277 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
278 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
283 val1
= (bp
->phy_addr
<< 21) | (reg
<< 16) |
284 BNX2_EMAC_MDIO_COMM_COMMAND_READ
| BNX2_EMAC_MDIO_COMM_DISEXT
|
285 BNX2_EMAC_MDIO_COMM_START_BUSY
;
286 REG_WR(bp
, BNX2_EMAC_MDIO_COMM
, val1
);
288 for (i
= 0; i
< 50; i
++) {
291 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_COMM
);
292 if (!(val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
)) {
295 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_COMM
);
296 val1
&= BNX2_EMAC_MDIO_COMM_DATA
;
302 if (val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
) {
311 if (bp
->phy_flags
& PHY_INT_MODE_AUTO_POLLING_FLAG
) {
312 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
313 val1
|= BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
315 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
316 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
325 bnx2_write_phy(struct bnx2
*bp
, u32 reg
, u32 val
)
330 if (bp
->phy_flags
& PHY_INT_MODE_AUTO_POLLING_FLAG
) {
331 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
332 val1
&= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
334 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
335 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
340 val1
= (bp
->phy_addr
<< 21) | (reg
<< 16) | val
|
341 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE
|
342 BNX2_EMAC_MDIO_COMM_START_BUSY
| BNX2_EMAC_MDIO_COMM_DISEXT
;
343 REG_WR(bp
, BNX2_EMAC_MDIO_COMM
, val1
);
345 for (i
= 0; i
< 50; i
++) {
348 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_COMM
);
349 if (!(val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
)) {
355 if (val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
)
360 if (bp
->phy_flags
& PHY_INT_MODE_AUTO_POLLING_FLAG
) {
361 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
362 val1
|= BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
364 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
365 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
374 bnx2_disable_int(struct bnx2
*bp
)
376 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
377 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
378 REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
);
382 bnx2_enable_int(struct bnx2
*bp
)
384 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
385 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
386 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
| bp
->last_status_idx
);
388 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
389 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| bp
->last_status_idx
);
391 REG_WR(bp
, BNX2_HC_COMMAND
, bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW
);
395 bnx2_disable_int_sync(struct bnx2
*bp
)
397 atomic_inc(&bp
->intr_sem
);
398 bnx2_disable_int(bp
);
399 synchronize_irq(bp
->pdev
->irq
);
403 bnx2_netif_stop(struct bnx2
*bp
)
405 bnx2_disable_int_sync(bp
);
406 if (netif_running(bp
->dev
)) {
407 netif_poll_disable(bp
->dev
);
408 netif_tx_disable(bp
->dev
);
409 bp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
414 bnx2_netif_start(struct bnx2
*bp
)
416 if (atomic_dec_and_test(&bp
->intr_sem
)) {
417 if (netif_running(bp
->dev
)) {
418 netif_wake_queue(bp
->dev
);
419 netif_poll_enable(bp
->dev
);
426 bnx2_free_mem(struct bnx2
*bp
)
430 for (i
= 0; i
< bp
->ctx_pages
; i
++) {
431 if (bp
->ctx_blk
[i
]) {
432 pci_free_consistent(bp
->pdev
, BCM_PAGE_SIZE
,
434 bp
->ctx_blk_mapping
[i
]);
435 bp
->ctx_blk
[i
] = NULL
;
438 if (bp
->status_blk
) {
439 pci_free_consistent(bp
->pdev
, bp
->status_stats_size
,
440 bp
->status_blk
, bp
->status_blk_mapping
);
441 bp
->status_blk
= NULL
;
442 bp
->stats_blk
= NULL
;
444 if (bp
->tx_desc_ring
) {
445 pci_free_consistent(bp
->pdev
,
446 sizeof(struct tx_bd
) * TX_DESC_CNT
,
447 bp
->tx_desc_ring
, bp
->tx_desc_mapping
);
448 bp
->tx_desc_ring
= NULL
;
450 kfree(bp
->tx_buf_ring
);
451 bp
->tx_buf_ring
= NULL
;
452 for (i
= 0; i
< bp
->rx_max_ring
; i
++) {
453 if (bp
->rx_desc_ring
[i
])
454 pci_free_consistent(bp
->pdev
,
455 sizeof(struct rx_bd
) * RX_DESC_CNT
,
457 bp
->rx_desc_mapping
[i
]);
458 bp
->rx_desc_ring
[i
] = NULL
;
460 vfree(bp
->rx_buf_ring
);
461 bp
->rx_buf_ring
= NULL
;
465 bnx2_alloc_mem(struct bnx2
*bp
)
467 int i
, status_blk_size
;
469 bp
->tx_buf_ring
= kzalloc(sizeof(struct sw_bd
) * TX_DESC_CNT
,
471 if (bp
->tx_buf_ring
== NULL
)
474 bp
->tx_desc_ring
= pci_alloc_consistent(bp
->pdev
,
475 sizeof(struct tx_bd
) *
477 &bp
->tx_desc_mapping
);
478 if (bp
->tx_desc_ring
== NULL
)
481 bp
->rx_buf_ring
= vmalloc(sizeof(struct sw_bd
) * RX_DESC_CNT
*
483 if (bp
->rx_buf_ring
== NULL
)
486 memset(bp
->rx_buf_ring
, 0, sizeof(struct sw_bd
) * RX_DESC_CNT
*
489 for (i
= 0; i
< bp
->rx_max_ring
; i
++) {
490 bp
->rx_desc_ring
[i
] =
491 pci_alloc_consistent(bp
->pdev
,
492 sizeof(struct rx_bd
) * RX_DESC_CNT
,
493 &bp
->rx_desc_mapping
[i
]);
494 if (bp
->rx_desc_ring
[i
] == NULL
)
499 /* Combine status and statistics blocks into one allocation. */
500 status_blk_size
= L1_CACHE_ALIGN(sizeof(struct status_block
));
501 bp
->status_stats_size
= status_blk_size
+
502 sizeof(struct statistics_block
);
504 bp
->status_blk
= pci_alloc_consistent(bp
->pdev
, bp
->status_stats_size
,
505 &bp
->status_blk_mapping
);
506 if (bp
->status_blk
== NULL
)
509 memset(bp
->status_blk
, 0, bp
->status_stats_size
);
511 bp
->stats_blk
= (void *) ((unsigned long) bp
->status_blk
+
514 bp
->stats_blk_mapping
= bp
->status_blk_mapping
+ status_blk_size
;
516 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
517 bp
->ctx_pages
= 0x2000 / BCM_PAGE_SIZE
;
518 if (bp
->ctx_pages
== 0)
520 for (i
= 0; i
< bp
->ctx_pages
; i
++) {
521 bp
->ctx_blk
[i
] = pci_alloc_consistent(bp
->pdev
,
523 &bp
->ctx_blk_mapping
[i
]);
524 if (bp
->ctx_blk
[i
] == NULL
)
536 bnx2_report_fw_link(struct bnx2
*bp
)
538 u32 fw_link_status
= 0;
543 switch (bp
->line_speed
) {
545 if (bp
->duplex
== DUPLEX_HALF
)
546 fw_link_status
= BNX2_LINK_STATUS_10HALF
;
548 fw_link_status
= BNX2_LINK_STATUS_10FULL
;
551 if (bp
->duplex
== DUPLEX_HALF
)
552 fw_link_status
= BNX2_LINK_STATUS_100HALF
;
554 fw_link_status
= BNX2_LINK_STATUS_100FULL
;
557 if (bp
->duplex
== DUPLEX_HALF
)
558 fw_link_status
= BNX2_LINK_STATUS_1000HALF
;
560 fw_link_status
= BNX2_LINK_STATUS_1000FULL
;
563 if (bp
->duplex
== DUPLEX_HALF
)
564 fw_link_status
= BNX2_LINK_STATUS_2500HALF
;
566 fw_link_status
= BNX2_LINK_STATUS_2500FULL
;
570 fw_link_status
|= BNX2_LINK_STATUS_LINK_UP
;
573 fw_link_status
|= BNX2_LINK_STATUS_AN_ENABLED
;
575 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
576 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
578 if (!(bmsr
& BMSR_ANEGCOMPLETE
) ||
579 bp
->phy_flags
& PHY_PARALLEL_DETECT_FLAG
)
580 fw_link_status
|= BNX2_LINK_STATUS_PARALLEL_DET
;
582 fw_link_status
|= BNX2_LINK_STATUS_AN_COMPLETE
;
586 fw_link_status
= BNX2_LINK_STATUS_LINK_DOWN
;
588 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_LINK_STATUS
, fw_link_status
);
592 bnx2_report_link(struct bnx2
*bp
)
595 netif_carrier_on(bp
->dev
);
596 printk(KERN_INFO PFX
"%s NIC Link is Up, ", bp
->dev
->name
);
598 printk("%d Mbps ", bp
->line_speed
);
600 if (bp
->duplex
== DUPLEX_FULL
)
601 printk("full duplex");
603 printk("half duplex");
606 if (bp
->flow_ctrl
& FLOW_CTRL_RX
) {
607 printk(", receive ");
608 if (bp
->flow_ctrl
& FLOW_CTRL_TX
)
609 printk("& transmit ");
612 printk(", transmit ");
614 printk("flow control ON");
619 netif_carrier_off(bp
->dev
);
620 printk(KERN_ERR PFX
"%s NIC Link is Down\n", bp
->dev
->name
);
623 bnx2_report_fw_link(bp
);
627 bnx2_resolve_flow_ctrl(struct bnx2
*bp
)
629 u32 local_adv
, remote_adv
;
632 if ((bp
->autoneg
& (AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
)) !=
633 (AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
)) {
635 if (bp
->duplex
== DUPLEX_FULL
) {
636 bp
->flow_ctrl
= bp
->req_flow_ctrl
;
641 if (bp
->duplex
!= DUPLEX_FULL
) {
645 if ((bp
->phy_flags
& PHY_SERDES_FLAG
) &&
646 (CHIP_NUM(bp
) == CHIP_NUM_5708
)) {
649 bnx2_read_phy(bp
, BCM5708S_1000X_STAT1
, &val
);
650 if (val
& BCM5708S_1000X_STAT1_TX_PAUSE
)
651 bp
->flow_ctrl
|= FLOW_CTRL_TX
;
652 if (val
& BCM5708S_1000X_STAT1_RX_PAUSE
)
653 bp
->flow_ctrl
|= FLOW_CTRL_RX
;
657 bnx2_read_phy(bp
, MII_ADVERTISE
, &local_adv
);
658 bnx2_read_phy(bp
, MII_LPA
, &remote_adv
);
660 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
661 u32 new_local_adv
= 0;
662 u32 new_remote_adv
= 0;
664 if (local_adv
& ADVERTISE_1000XPAUSE
)
665 new_local_adv
|= ADVERTISE_PAUSE_CAP
;
666 if (local_adv
& ADVERTISE_1000XPSE_ASYM
)
667 new_local_adv
|= ADVERTISE_PAUSE_ASYM
;
668 if (remote_adv
& ADVERTISE_1000XPAUSE
)
669 new_remote_adv
|= ADVERTISE_PAUSE_CAP
;
670 if (remote_adv
& ADVERTISE_1000XPSE_ASYM
)
671 new_remote_adv
|= ADVERTISE_PAUSE_ASYM
;
673 local_adv
= new_local_adv
;
674 remote_adv
= new_remote_adv
;
677 /* See Table 28B-3 of 802.3ab-1999 spec. */
678 if (local_adv
& ADVERTISE_PAUSE_CAP
) {
679 if(local_adv
& ADVERTISE_PAUSE_ASYM
) {
680 if (remote_adv
& ADVERTISE_PAUSE_CAP
) {
681 bp
->flow_ctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
683 else if (remote_adv
& ADVERTISE_PAUSE_ASYM
) {
684 bp
->flow_ctrl
= FLOW_CTRL_RX
;
688 if (remote_adv
& ADVERTISE_PAUSE_CAP
) {
689 bp
->flow_ctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
693 else if (local_adv
& ADVERTISE_PAUSE_ASYM
) {
694 if ((remote_adv
& ADVERTISE_PAUSE_CAP
) &&
695 (remote_adv
& ADVERTISE_PAUSE_ASYM
)) {
697 bp
->flow_ctrl
= FLOW_CTRL_TX
;
703 bnx2_5708s_linkup(struct bnx2
*bp
)
708 bnx2_read_phy(bp
, BCM5708S_1000X_STAT1
, &val
);
709 switch (val
& BCM5708S_1000X_STAT1_SPEED_MASK
) {
710 case BCM5708S_1000X_STAT1_SPEED_10
:
711 bp
->line_speed
= SPEED_10
;
713 case BCM5708S_1000X_STAT1_SPEED_100
:
714 bp
->line_speed
= SPEED_100
;
716 case BCM5708S_1000X_STAT1_SPEED_1G
:
717 bp
->line_speed
= SPEED_1000
;
719 case BCM5708S_1000X_STAT1_SPEED_2G5
:
720 bp
->line_speed
= SPEED_2500
;
723 if (val
& BCM5708S_1000X_STAT1_FD
)
724 bp
->duplex
= DUPLEX_FULL
;
726 bp
->duplex
= DUPLEX_HALF
;
732 bnx2_5706s_linkup(struct bnx2
*bp
)
734 u32 bmcr
, local_adv
, remote_adv
, common
;
737 bp
->line_speed
= SPEED_1000
;
739 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
740 if (bmcr
& BMCR_FULLDPLX
) {
741 bp
->duplex
= DUPLEX_FULL
;
744 bp
->duplex
= DUPLEX_HALF
;
747 if (!(bmcr
& BMCR_ANENABLE
)) {
751 bnx2_read_phy(bp
, MII_ADVERTISE
, &local_adv
);
752 bnx2_read_phy(bp
, MII_LPA
, &remote_adv
);
754 common
= local_adv
& remote_adv
;
755 if (common
& (ADVERTISE_1000XHALF
| ADVERTISE_1000XFULL
)) {
757 if (common
& ADVERTISE_1000XFULL
) {
758 bp
->duplex
= DUPLEX_FULL
;
761 bp
->duplex
= DUPLEX_HALF
;
769 bnx2_copper_linkup(struct bnx2
*bp
)
773 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
774 if (bmcr
& BMCR_ANENABLE
) {
775 u32 local_adv
, remote_adv
, common
;
777 bnx2_read_phy(bp
, MII_CTRL1000
, &local_adv
);
778 bnx2_read_phy(bp
, MII_STAT1000
, &remote_adv
);
780 common
= local_adv
& (remote_adv
>> 2);
781 if (common
& ADVERTISE_1000FULL
) {
782 bp
->line_speed
= SPEED_1000
;
783 bp
->duplex
= DUPLEX_FULL
;
785 else if (common
& ADVERTISE_1000HALF
) {
786 bp
->line_speed
= SPEED_1000
;
787 bp
->duplex
= DUPLEX_HALF
;
790 bnx2_read_phy(bp
, MII_ADVERTISE
, &local_adv
);
791 bnx2_read_phy(bp
, MII_LPA
, &remote_adv
);
793 common
= local_adv
& remote_adv
;
794 if (common
& ADVERTISE_100FULL
) {
795 bp
->line_speed
= SPEED_100
;
796 bp
->duplex
= DUPLEX_FULL
;
798 else if (common
& ADVERTISE_100HALF
) {
799 bp
->line_speed
= SPEED_100
;
800 bp
->duplex
= DUPLEX_HALF
;
802 else if (common
& ADVERTISE_10FULL
) {
803 bp
->line_speed
= SPEED_10
;
804 bp
->duplex
= DUPLEX_FULL
;
806 else if (common
& ADVERTISE_10HALF
) {
807 bp
->line_speed
= SPEED_10
;
808 bp
->duplex
= DUPLEX_HALF
;
817 if (bmcr
& BMCR_SPEED100
) {
818 bp
->line_speed
= SPEED_100
;
821 bp
->line_speed
= SPEED_10
;
823 if (bmcr
& BMCR_FULLDPLX
) {
824 bp
->duplex
= DUPLEX_FULL
;
827 bp
->duplex
= DUPLEX_HALF
;
835 bnx2_set_mac_link(struct bnx2
*bp
)
839 REG_WR(bp
, BNX2_EMAC_TX_LENGTHS
, 0x2620);
840 if (bp
->link_up
&& (bp
->line_speed
== SPEED_1000
) &&
841 (bp
->duplex
== DUPLEX_HALF
)) {
842 REG_WR(bp
, BNX2_EMAC_TX_LENGTHS
, 0x26ff);
845 /* Configure the EMAC mode register. */
846 val
= REG_RD(bp
, BNX2_EMAC_MODE
);
848 val
&= ~(BNX2_EMAC_MODE_PORT
| BNX2_EMAC_MODE_HALF_DUPLEX
|
849 BNX2_EMAC_MODE_MAC_LOOP
| BNX2_EMAC_MODE_FORCE_LINK
|
850 BNX2_EMAC_MODE_25G_MODE
);
853 switch (bp
->line_speed
) {
855 if (CHIP_NUM(bp
) != CHIP_NUM_5706
) {
856 val
|= BNX2_EMAC_MODE_PORT_MII_10M
;
861 val
|= BNX2_EMAC_MODE_PORT_MII
;
864 val
|= BNX2_EMAC_MODE_25G_MODE
;
867 val
|= BNX2_EMAC_MODE_PORT_GMII
;
872 val
|= BNX2_EMAC_MODE_PORT_GMII
;
875 /* Set the MAC to operate in the appropriate duplex mode. */
876 if (bp
->duplex
== DUPLEX_HALF
)
877 val
|= BNX2_EMAC_MODE_HALF_DUPLEX
;
878 REG_WR(bp
, BNX2_EMAC_MODE
, val
);
880 /* Enable/disable rx PAUSE. */
881 bp
->rx_mode
&= ~BNX2_EMAC_RX_MODE_FLOW_EN
;
883 if (bp
->flow_ctrl
& FLOW_CTRL_RX
)
884 bp
->rx_mode
|= BNX2_EMAC_RX_MODE_FLOW_EN
;
885 REG_WR(bp
, BNX2_EMAC_RX_MODE
, bp
->rx_mode
);
887 /* Enable/disable tx PAUSE. */
888 val
= REG_RD(bp
, BNX2_EMAC_TX_MODE
);
889 val
&= ~BNX2_EMAC_TX_MODE_FLOW_EN
;
891 if (bp
->flow_ctrl
& FLOW_CTRL_TX
)
892 val
|= BNX2_EMAC_TX_MODE_FLOW_EN
;
893 REG_WR(bp
, BNX2_EMAC_TX_MODE
, val
);
895 /* Acknowledge the interrupt. */
896 REG_WR(bp
, BNX2_EMAC_STATUS
, BNX2_EMAC_STATUS_LINK_CHANGE
);
902 bnx2_set_link(struct bnx2
*bp
)
907 if (bp
->loopback
== MAC_LOOPBACK
|| bp
->loopback
== PHY_LOOPBACK
) {
912 link_up
= bp
->link_up
;
914 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
915 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
917 if ((bp
->phy_flags
& PHY_SERDES_FLAG
) &&
918 (CHIP_NUM(bp
) == CHIP_NUM_5706
)) {
921 val
= REG_RD(bp
, BNX2_EMAC_STATUS
);
922 if (val
& BNX2_EMAC_STATUS_LINK
)
923 bmsr
|= BMSR_LSTATUS
;
925 bmsr
&= ~BMSR_LSTATUS
;
928 if (bmsr
& BMSR_LSTATUS
) {
931 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
932 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
933 bnx2_5706s_linkup(bp
);
934 else if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
935 bnx2_5708s_linkup(bp
);
938 bnx2_copper_linkup(bp
);
940 bnx2_resolve_flow_ctrl(bp
);
943 if ((bp
->phy_flags
& PHY_SERDES_FLAG
) &&
944 (bp
->autoneg
& AUTONEG_SPEED
)) {
948 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
949 bmcr
&= ~BCM5708S_BMCR_FORCE_2500
;
950 if (!(bmcr
& BMCR_ANENABLE
)) {
951 bnx2_write_phy(bp
, MII_BMCR
, bmcr
|
955 bp
->phy_flags
&= ~PHY_PARALLEL_DETECT_FLAG
;
959 if (bp
->link_up
!= link_up
) {
960 bnx2_report_link(bp
);
963 bnx2_set_mac_link(bp
);
969 bnx2_reset_phy(struct bnx2
*bp
)
974 bnx2_write_phy(bp
, MII_BMCR
, BMCR_RESET
);
976 #define PHY_RESET_MAX_WAIT 100
977 for (i
= 0; i
< PHY_RESET_MAX_WAIT
; i
++) {
980 bnx2_read_phy(bp
, MII_BMCR
, ®
);
981 if (!(reg
& BMCR_RESET
)) {
986 if (i
== PHY_RESET_MAX_WAIT
) {
993 bnx2_phy_get_pause_adv(struct bnx2
*bp
)
997 if ((bp
->req_flow_ctrl
& (FLOW_CTRL_RX
| FLOW_CTRL_TX
)) ==
998 (FLOW_CTRL_RX
| FLOW_CTRL_TX
)) {
1000 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
1001 adv
= ADVERTISE_1000XPAUSE
;
1004 adv
= ADVERTISE_PAUSE_CAP
;
1007 else if (bp
->req_flow_ctrl
& FLOW_CTRL_TX
) {
1008 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
1009 adv
= ADVERTISE_1000XPSE_ASYM
;
1012 adv
= ADVERTISE_PAUSE_ASYM
;
1015 else if (bp
->req_flow_ctrl
& FLOW_CTRL_RX
) {
1016 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
1017 adv
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1020 adv
= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
1027 bnx2_setup_serdes_phy(struct bnx2
*bp
)
1032 if (!(bp
->autoneg
& AUTONEG_SPEED
)) {
1034 int force_link_down
= 0;
1036 bnx2_read_phy(bp
, MII_ADVERTISE
, &adv
);
1037 adv
&= ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
);
1039 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
1040 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BCM5708S_BMCR_FORCE_2500
);
1041 new_bmcr
|= BMCR_SPEED1000
;
1042 if (bp
->req_line_speed
== SPEED_2500
) {
1043 new_bmcr
|= BCM5708S_BMCR_FORCE_2500
;
1044 bnx2_read_phy(bp
, BCM5708S_UP1
, &up1
);
1045 if (!(up1
& BCM5708S_UP1_2G5
)) {
1046 up1
|= BCM5708S_UP1_2G5
;
1047 bnx2_write_phy(bp
, BCM5708S_UP1
, up1
);
1048 force_link_down
= 1;
1050 } else if (CHIP_NUM(bp
) == CHIP_NUM_5708
) {
1051 bnx2_read_phy(bp
, BCM5708S_UP1
, &up1
);
1052 if (up1
& BCM5708S_UP1_2G5
) {
1053 up1
&= ~BCM5708S_UP1_2G5
;
1054 bnx2_write_phy(bp
, BCM5708S_UP1
, up1
);
1055 force_link_down
= 1;
1059 if (bp
->req_duplex
== DUPLEX_FULL
) {
1060 adv
|= ADVERTISE_1000XFULL
;
1061 new_bmcr
|= BMCR_FULLDPLX
;
1064 adv
|= ADVERTISE_1000XHALF
;
1065 new_bmcr
&= ~BMCR_FULLDPLX
;
1067 if ((new_bmcr
!= bmcr
) || (force_link_down
)) {
1068 /* Force a link down visible on the other side */
1070 bnx2_write_phy(bp
, MII_ADVERTISE
, adv
&
1071 ~(ADVERTISE_1000XFULL
|
1072 ADVERTISE_1000XHALF
));
1073 bnx2_write_phy(bp
, MII_BMCR
, bmcr
|
1074 BMCR_ANRESTART
| BMCR_ANENABLE
);
1077 netif_carrier_off(bp
->dev
);
1078 bnx2_write_phy(bp
, MII_BMCR
, new_bmcr
);
1079 bnx2_report_link(bp
);
1081 bnx2_write_phy(bp
, MII_ADVERTISE
, adv
);
1082 bnx2_write_phy(bp
, MII_BMCR
, new_bmcr
);
1087 if (bp
->phy_flags
& PHY_2_5G_CAPABLE_FLAG
) {
1088 bnx2_read_phy(bp
, BCM5708S_UP1
, &up1
);
1089 up1
|= BCM5708S_UP1_2G5
;
1090 bnx2_write_phy(bp
, BCM5708S_UP1
, up1
);
1093 if (bp
->advertising
& ADVERTISED_1000baseT_Full
)
1094 new_adv
|= ADVERTISE_1000XFULL
;
1096 new_adv
|= bnx2_phy_get_pause_adv(bp
);
1098 bnx2_read_phy(bp
, MII_ADVERTISE
, &adv
);
1099 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
1101 bp
->serdes_an_pending
= 0;
1102 if ((adv
!= new_adv
) || ((bmcr
& BMCR_ANENABLE
) == 0)) {
1103 /* Force a link down visible on the other side */
1105 bnx2_write_phy(bp
, MII_BMCR
, BMCR_LOOPBACK
);
1106 spin_unlock_bh(&bp
->phy_lock
);
1108 spin_lock_bh(&bp
->phy_lock
);
1111 bnx2_write_phy(bp
, MII_ADVERTISE
, new_adv
);
1112 bnx2_write_phy(bp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
1114 /* Speed up link-up time when the link partner
1115 * does not autonegotiate which is very common
1116 * in blade servers. Some blade servers use
1117 * IPMI for kerboard input and it's important
1118 * to minimize link disruptions. Autoneg. involves
1119 * exchanging base pages plus 3 next pages and
1120 * normally completes in about 120 msec.
1122 bp
->current_interval
= SERDES_AN_TIMEOUT
;
1123 bp
->serdes_an_pending
= 1;
1124 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
1130 #define ETHTOOL_ALL_FIBRE_SPEED \
1131 (ADVERTISED_1000baseT_Full)
1133 #define ETHTOOL_ALL_COPPER_SPEED \
1134 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1135 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1136 ADVERTISED_1000baseT_Full)
1138 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1139 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1141 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1144 bnx2_setup_copper_phy(struct bnx2
*bp
)
1149 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
1151 if (bp
->autoneg
& AUTONEG_SPEED
) {
1152 u32 adv_reg
, adv1000_reg
;
1153 u32 new_adv_reg
= 0;
1154 u32 new_adv1000_reg
= 0;
1156 bnx2_read_phy(bp
, MII_ADVERTISE
, &adv_reg
);
1157 adv_reg
&= (PHY_ALL_10_100_SPEED
| ADVERTISE_PAUSE_CAP
|
1158 ADVERTISE_PAUSE_ASYM
);
1160 bnx2_read_phy(bp
, MII_CTRL1000
, &adv1000_reg
);
1161 adv1000_reg
&= PHY_ALL_1000_SPEED
;
1163 if (bp
->advertising
& ADVERTISED_10baseT_Half
)
1164 new_adv_reg
|= ADVERTISE_10HALF
;
1165 if (bp
->advertising
& ADVERTISED_10baseT_Full
)
1166 new_adv_reg
|= ADVERTISE_10FULL
;
1167 if (bp
->advertising
& ADVERTISED_100baseT_Half
)
1168 new_adv_reg
|= ADVERTISE_100HALF
;
1169 if (bp
->advertising
& ADVERTISED_100baseT_Full
)
1170 new_adv_reg
|= ADVERTISE_100FULL
;
1171 if (bp
->advertising
& ADVERTISED_1000baseT_Full
)
1172 new_adv1000_reg
|= ADVERTISE_1000FULL
;
1174 new_adv_reg
|= ADVERTISE_CSMA
;
1176 new_adv_reg
|= bnx2_phy_get_pause_adv(bp
);
1178 if ((adv1000_reg
!= new_adv1000_reg
) ||
1179 (adv_reg
!= new_adv_reg
) ||
1180 ((bmcr
& BMCR_ANENABLE
) == 0)) {
1182 bnx2_write_phy(bp
, MII_ADVERTISE
, new_adv_reg
);
1183 bnx2_write_phy(bp
, MII_CTRL1000
, new_adv1000_reg
);
1184 bnx2_write_phy(bp
, MII_BMCR
, BMCR_ANRESTART
|
1187 else if (bp
->link_up
) {
1188 /* Flow ctrl may have changed from auto to forced */
1189 /* or vice-versa. */
1191 bnx2_resolve_flow_ctrl(bp
);
1192 bnx2_set_mac_link(bp
);
1198 if (bp
->req_line_speed
== SPEED_100
) {
1199 new_bmcr
|= BMCR_SPEED100
;
1201 if (bp
->req_duplex
== DUPLEX_FULL
) {
1202 new_bmcr
|= BMCR_FULLDPLX
;
1204 if (new_bmcr
!= bmcr
) {
1207 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
1208 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
1210 if (bmsr
& BMSR_LSTATUS
) {
1211 /* Force link down */
1212 bnx2_write_phy(bp
, MII_BMCR
, BMCR_LOOPBACK
);
1213 spin_unlock_bh(&bp
->phy_lock
);
1215 spin_lock_bh(&bp
->phy_lock
);
1217 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
1218 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
1221 bnx2_write_phy(bp
, MII_BMCR
, new_bmcr
);
1223 /* Normally, the new speed is setup after the link has
1224 * gone down and up again. In some cases, link will not go
1225 * down so we need to set up the new speed here.
1227 if (bmsr
& BMSR_LSTATUS
) {
1228 bp
->line_speed
= bp
->req_line_speed
;
1229 bp
->duplex
= bp
->req_duplex
;
1230 bnx2_resolve_flow_ctrl(bp
);
1231 bnx2_set_mac_link(bp
);
1238 bnx2_setup_phy(struct bnx2
*bp
)
1240 if (bp
->loopback
== MAC_LOOPBACK
)
1243 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
1244 return (bnx2_setup_serdes_phy(bp
));
1247 return (bnx2_setup_copper_phy(bp
));
1252 bnx2_init_5708s_phy(struct bnx2
*bp
)
1256 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
, BCM5708S_BLK_ADDR_DIG3
);
1257 bnx2_write_phy(bp
, BCM5708S_DIG_3_0
, BCM5708S_DIG_3_0_USE_IEEE
);
1258 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
, BCM5708S_BLK_ADDR_DIG
);
1260 bnx2_read_phy(bp
, BCM5708S_1000X_CTL1
, &val
);
1261 val
|= BCM5708S_1000X_CTL1_FIBER_MODE
| BCM5708S_1000X_CTL1_AUTODET_EN
;
1262 bnx2_write_phy(bp
, BCM5708S_1000X_CTL1
, val
);
1264 bnx2_read_phy(bp
, BCM5708S_1000X_CTL2
, &val
);
1265 val
|= BCM5708S_1000X_CTL2_PLLEL_DET_EN
;
1266 bnx2_write_phy(bp
, BCM5708S_1000X_CTL2
, val
);
1268 if (bp
->phy_flags
& PHY_2_5G_CAPABLE_FLAG
) {
1269 bnx2_read_phy(bp
, BCM5708S_UP1
, &val
);
1270 val
|= BCM5708S_UP1_2G5
;
1271 bnx2_write_phy(bp
, BCM5708S_UP1
, val
);
1274 if ((CHIP_ID(bp
) == CHIP_ID_5708_A0
) ||
1275 (CHIP_ID(bp
) == CHIP_ID_5708_B0
) ||
1276 (CHIP_ID(bp
) == CHIP_ID_5708_B1
)) {
1277 /* increase tx signal amplitude */
1278 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
,
1279 BCM5708S_BLK_ADDR_TX_MISC
);
1280 bnx2_read_phy(bp
, BCM5708S_TX_ACTL1
, &val
);
1281 val
&= ~BCM5708S_TX_ACTL1_DRIVER_VCM
;
1282 bnx2_write_phy(bp
, BCM5708S_TX_ACTL1
, val
);
1283 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
, BCM5708S_BLK_ADDR_DIG
);
1286 val
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_HW_CFG_CONFIG
) &
1287 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK
;
1292 is_backplane
= REG_RD_IND(bp
, bp
->shmem_base
+
1293 BNX2_SHARED_HW_CFG_CONFIG
);
1294 if (is_backplane
& BNX2_SHARED_HW_CFG_PHY_BACKPLANE
) {
1295 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
,
1296 BCM5708S_BLK_ADDR_TX_MISC
);
1297 bnx2_write_phy(bp
, BCM5708S_TX_ACTL3
, val
);
1298 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
,
1299 BCM5708S_BLK_ADDR_DIG
);
1306 bnx2_init_5706s_phy(struct bnx2
*bp
)
1308 bp
->phy_flags
&= ~PHY_PARALLEL_DETECT_FLAG
;
1310 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
1311 REG_WR(bp
, BNX2_MISC_GP_HW_CTL0
, 0x300);
1313 if (bp
->dev
->mtu
> 1500) {
1316 /* Set extended packet length bit */
1317 bnx2_write_phy(bp
, 0x18, 0x7);
1318 bnx2_read_phy(bp
, 0x18, &val
);
1319 bnx2_write_phy(bp
, 0x18, (val
& 0xfff8) | 0x4000);
1321 bnx2_write_phy(bp
, 0x1c, 0x6c00);
1322 bnx2_read_phy(bp
, 0x1c, &val
);
1323 bnx2_write_phy(bp
, 0x1c, (val
& 0x3ff) | 0xec02);
1328 bnx2_write_phy(bp
, 0x18, 0x7);
1329 bnx2_read_phy(bp
, 0x18, &val
);
1330 bnx2_write_phy(bp
, 0x18, val
& ~0x4007);
1332 bnx2_write_phy(bp
, 0x1c, 0x6c00);
1333 bnx2_read_phy(bp
, 0x1c, &val
);
1334 bnx2_write_phy(bp
, 0x1c, (val
& 0x3fd) | 0xec00);
1341 bnx2_init_copper_phy(struct bnx2
*bp
)
1345 if (bp
->phy_flags
& PHY_CRC_FIX_FLAG
) {
1346 bnx2_write_phy(bp
, 0x18, 0x0c00);
1347 bnx2_write_phy(bp
, 0x17, 0x000a);
1348 bnx2_write_phy(bp
, 0x15, 0x310b);
1349 bnx2_write_phy(bp
, 0x17, 0x201f);
1350 bnx2_write_phy(bp
, 0x15, 0x9506);
1351 bnx2_write_phy(bp
, 0x17, 0x401f);
1352 bnx2_write_phy(bp
, 0x15, 0x14e2);
1353 bnx2_write_phy(bp
, 0x18, 0x0400);
1356 if (bp
->phy_flags
& PHY_DIS_EARLY_DAC_FLAG
) {
1357 bnx2_write_phy(bp
, MII_BNX2_DSP_ADDRESS
,
1358 MII_BNX2_DSP_EXPAND_REG
| 0x8);
1359 bnx2_read_phy(bp
, MII_BNX2_DSP_RW_PORT
, &val
);
1361 bnx2_write_phy(bp
, MII_BNX2_DSP_RW_PORT
, val
);
1364 if (bp
->dev
->mtu
> 1500) {
1365 /* Set extended packet length bit */
1366 bnx2_write_phy(bp
, 0x18, 0x7);
1367 bnx2_read_phy(bp
, 0x18, &val
);
1368 bnx2_write_phy(bp
, 0x18, val
| 0x4000);
1370 bnx2_read_phy(bp
, 0x10, &val
);
1371 bnx2_write_phy(bp
, 0x10, val
| 0x1);
1374 bnx2_write_phy(bp
, 0x18, 0x7);
1375 bnx2_read_phy(bp
, 0x18, &val
);
1376 bnx2_write_phy(bp
, 0x18, val
& ~0x4007);
1378 bnx2_read_phy(bp
, 0x10, &val
);
1379 bnx2_write_phy(bp
, 0x10, val
& ~0x1);
1382 /* ethernet@wirespeed */
1383 bnx2_write_phy(bp
, 0x18, 0x7007);
1384 bnx2_read_phy(bp
, 0x18, &val
);
1385 bnx2_write_phy(bp
, 0x18, val
| (1 << 15) | (1 << 4));
1391 bnx2_init_phy(struct bnx2
*bp
)
1396 bp
->phy_flags
&= ~PHY_INT_MODE_MASK_FLAG
;
1397 bp
->phy_flags
|= PHY_INT_MODE_LINK_READY_FLAG
;
1399 REG_WR(bp
, BNX2_EMAC_ATTENTION_ENA
, BNX2_EMAC_ATTENTION_ENA_LINK
);
1403 bnx2_read_phy(bp
, MII_PHYSID1
, &val
);
1404 bp
->phy_id
= val
<< 16;
1405 bnx2_read_phy(bp
, MII_PHYSID2
, &val
);
1406 bp
->phy_id
|= val
& 0xffff;
1408 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
1409 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
1410 rc
= bnx2_init_5706s_phy(bp
);
1411 else if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
1412 rc
= bnx2_init_5708s_phy(bp
);
1415 rc
= bnx2_init_copper_phy(bp
);
1424 bnx2_set_mac_loopback(struct bnx2
*bp
)
1428 mac_mode
= REG_RD(bp
, BNX2_EMAC_MODE
);
1429 mac_mode
&= ~BNX2_EMAC_MODE_PORT
;
1430 mac_mode
|= BNX2_EMAC_MODE_MAC_LOOP
| BNX2_EMAC_MODE_FORCE_LINK
;
1431 REG_WR(bp
, BNX2_EMAC_MODE
, mac_mode
);
1436 static int bnx2_test_link(struct bnx2
*);
1439 bnx2_set_phy_loopback(struct bnx2
*bp
)
1444 spin_lock_bh(&bp
->phy_lock
);
1445 rc
= bnx2_write_phy(bp
, MII_BMCR
, BMCR_LOOPBACK
| BMCR_FULLDPLX
|
1447 spin_unlock_bh(&bp
->phy_lock
);
1451 for (i
= 0; i
< 10; i
++) {
1452 if (bnx2_test_link(bp
) == 0)
1457 mac_mode
= REG_RD(bp
, BNX2_EMAC_MODE
);
1458 mac_mode
&= ~(BNX2_EMAC_MODE_PORT
| BNX2_EMAC_MODE_HALF_DUPLEX
|
1459 BNX2_EMAC_MODE_MAC_LOOP
| BNX2_EMAC_MODE_FORCE_LINK
|
1460 BNX2_EMAC_MODE_25G_MODE
);
1462 mac_mode
|= BNX2_EMAC_MODE_PORT_GMII
;
1463 REG_WR(bp
, BNX2_EMAC_MODE
, mac_mode
);
1469 bnx2_fw_sync(struct bnx2
*bp
, u32 msg_data
, int silent
)
1475 msg_data
|= bp
->fw_wr_seq
;
1477 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_DRV_MB
, msg_data
);
1479 /* wait for an acknowledgement. */
1480 for (i
= 0; i
< (FW_ACK_TIME_OUT_MS
/ 10); i
++) {
1483 val
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_FW_MB
);
1485 if ((val
& BNX2_FW_MSG_ACK
) == (msg_data
& BNX2_DRV_MSG_SEQ
))
1488 if ((msg_data
& BNX2_DRV_MSG_DATA
) == BNX2_DRV_MSG_DATA_WAIT0
)
1491 /* If we timed out, inform the firmware that this is the case. */
1492 if ((val
& BNX2_FW_MSG_ACK
) != (msg_data
& BNX2_DRV_MSG_SEQ
)) {
1494 printk(KERN_ERR PFX
"fw sync timeout, reset code = "
1497 msg_data
&= ~BNX2_DRV_MSG_CODE
;
1498 msg_data
|= BNX2_DRV_MSG_CODE_FW_TIMEOUT
;
1500 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_DRV_MB
, msg_data
);
1505 if ((val
& BNX2_FW_MSG_STATUS_MASK
) != BNX2_FW_MSG_STATUS_OK
)
1512 bnx2_init_5709_context(struct bnx2
*bp
)
1517 val
= BNX2_CTX_COMMAND_ENABLED
| BNX2_CTX_COMMAND_MEM_INIT
| (1 << 12);
1518 val
|= (BCM_PAGE_BITS
- 8) << 16;
1519 REG_WR(bp
, BNX2_CTX_COMMAND
, val
);
1520 for (i
= 0; i
< bp
->ctx_pages
; i
++) {
1523 REG_WR(bp
, BNX2_CTX_HOST_PAGE_TBL_DATA0
,
1524 (bp
->ctx_blk_mapping
[i
] & 0xffffffff) |
1525 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID
);
1526 REG_WR(bp
, BNX2_CTX_HOST_PAGE_TBL_DATA1
,
1527 (u64
) bp
->ctx_blk_mapping
[i
] >> 32);
1528 REG_WR(bp
, BNX2_CTX_HOST_PAGE_TBL_CTRL
, i
|
1529 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
);
1530 for (j
= 0; j
< 10; j
++) {
1532 val
= REG_RD(bp
, BNX2_CTX_HOST_PAGE_TBL_CTRL
);
1533 if (!(val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
))
1537 if (val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
) {
1546 bnx2_init_context(struct bnx2
*bp
)
1552 u32 vcid_addr
, pcid_addr
, offset
;
1556 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
1559 vcid_addr
= GET_PCID_ADDR(vcid
);
1561 new_vcid
= 0x60 + (vcid
& 0xf0) + (vcid
& 0x7);
1566 pcid_addr
= GET_PCID_ADDR(new_vcid
);
1569 vcid_addr
= GET_CID_ADDR(vcid
);
1570 pcid_addr
= vcid_addr
;
1573 REG_WR(bp
, BNX2_CTX_VIRT_ADDR
, 0x00);
1574 REG_WR(bp
, BNX2_CTX_PAGE_TBL
, pcid_addr
);
1576 /* Zero out the context. */
1577 for (offset
= 0; offset
< PHY_CTX_SIZE
; offset
+= 4) {
1578 CTX_WR(bp
, 0x00, offset
, 0);
1581 REG_WR(bp
, BNX2_CTX_VIRT_ADDR
, vcid_addr
);
1582 REG_WR(bp
, BNX2_CTX_PAGE_TBL
, pcid_addr
);
1587 bnx2_alloc_bad_rbuf(struct bnx2
*bp
)
1593 good_mbuf
= kmalloc(512 * sizeof(u16
), GFP_KERNEL
);
1594 if (good_mbuf
== NULL
) {
1595 printk(KERN_ERR PFX
"Failed to allocate memory in "
1596 "bnx2_alloc_bad_rbuf\n");
1600 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
,
1601 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE
);
1605 /* Allocate a bunch of mbufs and save the good ones in an array. */
1606 val
= REG_RD_IND(bp
, BNX2_RBUF_STATUS1
);
1607 while (val
& BNX2_RBUF_STATUS1_FREE_COUNT
) {
1608 REG_WR_IND(bp
, BNX2_RBUF_COMMAND
, BNX2_RBUF_COMMAND_ALLOC_REQ
);
1610 val
= REG_RD_IND(bp
, BNX2_RBUF_FW_BUF_ALLOC
);
1612 val
&= BNX2_RBUF_FW_BUF_ALLOC_VALUE
;
1614 /* The addresses with Bit 9 set are bad memory blocks. */
1615 if (!(val
& (1 << 9))) {
1616 good_mbuf
[good_mbuf_cnt
] = (u16
) val
;
1620 val
= REG_RD_IND(bp
, BNX2_RBUF_STATUS1
);
1623 /* Free the good ones back to the mbuf pool thus discarding
1624 * all the bad ones. */
1625 while (good_mbuf_cnt
) {
1628 val
= good_mbuf
[good_mbuf_cnt
];
1629 val
= (val
<< 9) | val
| 1;
1631 REG_WR_IND(bp
, BNX2_RBUF_FW_BUF_FREE
, val
);
1638 bnx2_set_mac_addr(struct bnx2
*bp
)
1641 u8
*mac_addr
= bp
->dev
->dev_addr
;
1643 val
= (mac_addr
[0] << 8) | mac_addr
[1];
1645 REG_WR(bp
, BNX2_EMAC_MAC_MATCH0
, val
);
1647 val
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
1648 (mac_addr
[4] << 8) | mac_addr
[5];
1650 REG_WR(bp
, BNX2_EMAC_MAC_MATCH1
, val
);
1654 bnx2_alloc_rx_skb(struct bnx2
*bp
, u16 index
)
1656 struct sk_buff
*skb
;
1657 struct sw_bd
*rx_buf
= &bp
->rx_buf_ring
[index
];
1659 struct rx_bd
*rxbd
= &bp
->rx_desc_ring
[RX_RING(index
)][RX_IDX(index
)];
1660 unsigned long align
;
1662 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
1667 if (unlikely((align
= (unsigned long) skb
->data
& (BNX2_RX_ALIGN
- 1))))
1668 skb_reserve(skb
, BNX2_RX_ALIGN
- align
);
1670 mapping
= pci_map_single(bp
->pdev
, skb
->data
, bp
->rx_buf_use_size
,
1671 PCI_DMA_FROMDEVICE
);
1674 pci_unmap_addr_set(rx_buf
, mapping
, mapping
);
1676 rxbd
->rx_bd_haddr_hi
= (u64
) mapping
>> 32;
1677 rxbd
->rx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
1679 bp
->rx_prod_bseq
+= bp
->rx_buf_use_size
;
1685 bnx2_phy_int(struct bnx2
*bp
)
1687 u32 new_link_state
, old_link_state
;
1689 new_link_state
= bp
->status_blk
->status_attn_bits
&
1690 STATUS_ATTN_BITS_LINK_STATE
;
1691 old_link_state
= bp
->status_blk
->status_attn_bits_ack
&
1692 STATUS_ATTN_BITS_LINK_STATE
;
1693 if (new_link_state
!= old_link_state
) {
1694 if (new_link_state
) {
1695 REG_WR(bp
, BNX2_PCICFG_STATUS_BIT_SET_CMD
,
1696 STATUS_ATTN_BITS_LINK_STATE
);
1699 REG_WR(bp
, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD
,
1700 STATUS_ATTN_BITS_LINK_STATE
);
1707 bnx2_tx_int(struct bnx2
*bp
)
1709 struct status_block
*sblk
= bp
->status_blk
;
1710 u16 hw_cons
, sw_cons
, sw_ring_cons
;
1713 hw_cons
= bp
->hw_tx_cons
= sblk
->status_tx_quick_consumer_index0
;
1714 if ((hw_cons
& MAX_TX_DESC_CNT
) == MAX_TX_DESC_CNT
) {
1717 sw_cons
= bp
->tx_cons
;
1719 while (sw_cons
!= hw_cons
) {
1720 struct sw_bd
*tx_buf
;
1721 struct sk_buff
*skb
;
1724 sw_ring_cons
= TX_RING_IDX(sw_cons
);
1726 tx_buf
= &bp
->tx_buf_ring
[sw_ring_cons
];
1729 /* partial BD completions possible with TSO packets */
1730 if (skb_is_gso(skb
)) {
1731 u16 last_idx
, last_ring_idx
;
1733 last_idx
= sw_cons
+
1734 skb_shinfo(skb
)->nr_frags
+ 1;
1735 last_ring_idx
= sw_ring_cons
+
1736 skb_shinfo(skb
)->nr_frags
+ 1;
1737 if (unlikely(last_ring_idx
>= MAX_TX_DESC_CNT
)) {
1740 if (((s16
) ((s16
) last_idx
- (s16
) hw_cons
)) > 0) {
1745 pci_unmap_single(bp
->pdev
, pci_unmap_addr(tx_buf
, mapping
),
1746 skb_headlen(skb
), PCI_DMA_TODEVICE
);
1749 last
= skb_shinfo(skb
)->nr_frags
;
1751 for (i
= 0; i
< last
; i
++) {
1752 sw_cons
= NEXT_TX_BD(sw_cons
);
1754 pci_unmap_page(bp
->pdev
,
1756 &bp
->tx_buf_ring
[TX_RING_IDX(sw_cons
)],
1758 skb_shinfo(skb
)->frags
[i
].size
,
1762 sw_cons
= NEXT_TX_BD(sw_cons
);
1764 tx_free_bd
+= last
+ 1;
1768 hw_cons
= bp
->hw_tx_cons
=
1769 sblk
->status_tx_quick_consumer_index0
;
1771 if ((hw_cons
& MAX_TX_DESC_CNT
) == MAX_TX_DESC_CNT
) {
1776 bp
->tx_cons
= sw_cons
;
1777 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1778 * before checking for netif_queue_stopped(). Without the
1779 * memory barrier, there is a small possibility that bnx2_start_xmit()
1780 * will miss it and cause the queue to be stopped forever.
1784 if (unlikely(netif_queue_stopped(bp
->dev
)) &&
1785 (bnx2_tx_avail(bp
) > bp
->tx_wake_thresh
)) {
1786 netif_tx_lock(bp
->dev
);
1787 if ((netif_queue_stopped(bp
->dev
)) &&
1788 (bnx2_tx_avail(bp
) > bp
->tx_wake_thresh
))
1789 netif_wake_queue(bp
->dev
);
1790 netif_tx_unlock(bp
->dev
);
1795 bnx2_reuse_rx_skb(struct bnx2
*bp
, struct sk_buff
*skb
,
1798 struct sw_bd
*cons_rx_buf
, *prod_rx_buf
;
1799 struct rx_bd
*cons_bd
, *prod_bd
;
1801 cons_rx_buf
= &bp
->rx_buf_ring
[cons
];
1802 prod_rx_buf
= &bp
->rx_buf_ring
[prod
];
1804 pci_dma_sync_single_for_device(bp
->pdev
,
1805 pci_unmap_addr(cons_rx_buf
, mapping
),
1806 bp
->rx_offset
+ RX_COPY_THRESH
, PCI_DMA_FROMDEVICE
);
1808 bp
->rx_prod_bseq
+= bp
->rx_buf_use_size
;
1810 prod_rx_buf
->skb
= skb
;
1815 pci_unmap_addr_set(prod_rx_buf
, mapping
,
1816 pci_unmap_addr(cons_rx_buf
, mapping
));
1818 cons_bd
= &bp
->rx_desc_ring
[RX_RING(cons
)][RX_IDX(cons
)];
1819 prod_bd
= &bp
->rx_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
1820 prod_bd
->rx_bd_haddr_hi
= cons_bd
->rx_bd_haddr_hi
;
1821 prod_bd
->rx_bd_haddr_lo
= cons_bd
->rx_bd_haddr_lo
;
1825 bnx2_rx_int(struct bnx2
*bp
, int budget
)
1827 struct status_block
*sblk
= bp
->status_blk
;
1828 u16 hw_cons
, sw_cons
, sw_ring_cons
, sw_prod
, sw_ring_prod
;
1829 struct l2_fhdr
*rx_hdr
;
1832 hw_cons
= bp
->hw_rx_cons
= sblk
->status_rx_quick_consumer_index0
;
1833 if ((hw_cons
& MAX_RX_DESC_CNT
) == MAX_RX_DESC_CNT
) {
1836 sw_cons
= bp
->rx_cons
;
1837 sw_prod
= bp
->rx_prod
;
1839 /* Memory barrier necessary as speculative reads of the rx
1840 * buffer can be ahead of the index in the status block
1843 while (sw_cons
!= hw_cons
) {
1846 struct sw_bd
*rx_buf
;
1847 struct sk_buff
*skb
;
1848 dma_addr_t dma_addr
;
1850 sw_ring_cons
= RX_RING_IDX(sw_cons
);
1851 sw_ring_prod
= RX_RING_IDX(sw_prod
);
1853 rx_buf
= &bp
->rx_buf_ring
[sw_ring_cons
];
1858 dma_addr
= pci_unmap_addr(rx_buf
, mapping
);
1860 pci_dma_sync_single_for_cpu(bp
->pdev
, dma_addr
,
1861 bp
->rx_offset
+ RX_COPY_THRESH
, PCI_DMA_FROMDEVICE
);
1863 rx_hdr
= (struct l2_fhdr
*) skb
->data
;
1864 len
= rx_hdr
->l2_fhdr_pkt_len
- 4;
1866 if ((status
= rx_hdr
->l2_fhdr_status
) &
1867 (L2_FHDR_ERRORS_BAD_CRC
|
1868 L2_FHDR_ERRORS_PHY_DECODE
|
1869 L2_FHDR_ERRORS_ALIGNMENT
|
1870 L2_FHDR_ERRORS_TOO_SHORT
|
1871 L2_FHDR_ERRORS_GIANT_FRAME
)) {
1876 /* Since we don't have a jumbo ring, copy small packets
1879 if ((bp
->dev
->mtu
> 1500) && (len
<= RX_COPY_THRESH
)) {
1880 struct sk_buff
*new_skb
;
1882 new_skb
= netdev_alloc_skb(bp
->dev
, len
+ 2);
1883 if (new_skb
== NULL
)
1887 skb_copy_from_linear_data_offset(skb
, bp
->rx_offset
- 2,
1888 new_skb
->data
, len
+ 2);
1889 skb_reserve(new_skb
, 2);
1890 skb_put(new_skb
, len
);
1892 bnx2_reuse_rx_skb(bp
, skb
,
1893 sw_ring_cons
, sw_ring_prod
);
1897 else if (bnx2_alloc_rx_skb(bp
, sw_ring_prod
) == 0) {
1898 pci_unmap_single(bp
->pdev
, dma_addr
,
1899 bp
->rx_buf_use_size
, PCI_DMA_FROMDEVICE
);
1901 skb_reserve(skb
, bp
->rx_offset
);
1906 bnx2_reuse_rx_skb(bp
, skb
,
1907 sw_ring_cons
, sw_ring_prod
);
1911 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1913 if ((len
> (bp
->dev
->mtu
+ ETH_HLEN
)) &&
1914 (ntohs(skb
->protocol
) != 0x8100)) {
1921 skb
->ip_summed
= CHECKSUM_NONE
;
1923 (status
& (L2_FHDR_STATUS_TCP_SEGMENT
|
1924 L2_FHDR_STATUS_UDP_DATAGRAM
))) {
1926 if (likely((status
& (L2_FHDR_ERRORS_TCP_XSUM
|
1927 L2_FHDR_ERRORS_UDP_XSUM
)) == 0))
1928 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1932 if ((status
& L2_FHDR_STATUS_L2_VLAN_TAG
) && (bp
->vlgrp
!= 0)) {
1933 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
,
1934 rx_hdr
->l2_fhdr_vlan_tag
);
1938 netif_receive_skb(skb
);
1940 bp
->dev
->last_rx
= jiffies
;
1944 sw_cons
= NEXT_RX_BD(sw_cons
);
1945 sw_prod
= NEXT_RX_BD(sw_prod
);
1947 if ((rx_pkt
== budget
))
1950 /* Refresh hw_cons to see if there is new work */
1951 if (sw_cons
== hw_cons
) {
1952 hw_cons
= bp
->hw_rx_cons
=
1953 sblk
->status_rx_quick_consumer_index0
;
1954 if ((hw_cons
& MAX_RX_DESC_CNT
) == MAX_RX_DESC_CNT
)
1959 bp
->rx_cons
= sw_cons
;
1960 bp
->rx_prod
= sw_prod
;
1962 REG_WR16(bp
, MB_RX_CID_ADDR
+ BNX2_L2CTX_HOST_BDIDX
, sw_prod
);
1964 REG_WR(bp
, MB_RX_CID_ADDR
+ BNX2_L2CTX_HOST_BSEQ
, bp
->rx_prod_bseq
);
1972 /* MSI ISR - The only difference between this and the INTx ISR
1973 * is that the MSI interrupt is always serviced.
1976 bnx2_msi(int irq
, void *dev_instance
)
1978 struct net_device
*dev
= dev_instance
;
1979 struct bnx2
*bp
= netdev_priv(dev
);
1981 prefetch(bp
->status_blk
);
1982 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
1983 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM
|
1984 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
1986 /* Return here if interrupt is disabled. */
1987 if (unlikely(atomic_read(&bp
->intr_sem
) != 0))
1990 netif_rx_schedule(dev
);
1996 bnx2_interrupt(int irq
, void *dev_instance
)
1998 struct net_device
*dev
= dev_instance
;
1999 struct bnx2
*bp
= netdev_priv(dev
);
2001 /* When using INTx, it is possible for the interrupt to arrive
2002 * at the CPU before the status block posted prior to the
2003 * interrupt. Reading a register will flush the status block.
2004 * When using MSI, the MSI message will always complete after
2005 * the status block write.
2007 if ((bp
->status_blk
->status_idx
== bp
->last_status_idx
) &&
2008 (REG_RD(bp
, BNX2_PCICFG_MISC_STATUS
) &
2009 BNX2_PCICFG_MISC_STATUS_INTA_VALUE
))
2012 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
2013 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM
|
2014 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
2016 /* Return here if interrupt is shared and is disabled. */
2017 if (unlikely(atomic_read(&bp
->intr_sem
) != 0))
2020 netif_rx_schedule(dev
);
2026 bnx2_has_work(struct bnx2
*bp
)
2028 struct status_block
*sblk
= bp
->status_blk
;
2030 if ((sblk
->status_rx_quick_consumer_index0
!= bp
->hw_rx_cons
) ||
2031 (sblk
->status_tx_quick_consumer_index0
!= bp
->hw_tx_cons
))
2034 if ((sblk
->status_attn_bits
& STATUS_ATTN_BITS_LINK_STATE
) !=
2035 (sblk
->status_attn_bits_ack
& STATUS_ATTN_BITS_LINK_STATE
))
2042 bnx2_poll(struct net_device
*dev
, int *budget
)
2044 struct bnx2
*bp
= netdev_priv(dev
);
2046 if ((bp
->status_blk
->status_attn_bits
&
2047 STATUS_ATTN_BITS_LINK_STATE
) !=
2048 (bp
->status_blk
->status_attn_bits_ack
&
2049 STATUS_ATTN_BITS_LINK_STATE
)) {
2051 spin_lock(&bp
->phy_lock
);
2053 spin_unlock(&bp
->phy_lock
);
2055 /* This is needed to take care of transient status
2056 * during link changes.
2058 REG_WR(bp
, BNX2_HC_COMMAND
,
2059 bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
2060 REG_RD(bp
, BNX2_HC_COMMAND
);
2063 if (bp
->status_blk
->status_tx_quick_consumer_index0
!= bp
->hw_tx_cons
)
2066 if (bp
->status_blk
->status_rx_quick_consumer_index0
!= bp
->hw_rx_cons
) {
2067 int orig_budget
= *budget
;
2070 if (orig_budget
> dev
->quota
)
2071 orig_budget
= dev
->quota
;
2073 work_done
= bnx2_rx_int(bp
, orig_budget
);
2074 *budget
-= work_done
;
2075 dev
->quota
-= work_done
;
2078 bp
->last_status_idx
= bp
->status_blk
->status_idx
;
2081 if (!bnx2_has_work(bp
)) {
2082 netif_rx_complete(dev
);
2083 if (likely(bp
->flags
& USING_MSI_FLAG
)) {
2084 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
2085 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
2086 bp
->last_status_idx
);
2089 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
2090 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
2091 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
|
2092 bp
->last_status_idx
);
2094 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
2095 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
2096 bp
->last_status_idx
);
2103 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2104 * from set_multicast.
2107 bnx2_set_rx_mode(struct net_device
*dev
)
2109 struct bnx2
*bp
= netdev_priv(dev
);
2110 u32 rx_mode
, sort_mode
;
2113 spin_lock_bh(&bp
->phy_lock
);
2115 rx_mode
= bp
->rx_mode
& ~(BNX2_EMAC_RX_MODE_PROMISCUOUS
|
2116 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
);
2117 sort_mode
= 1 | BNX2_RPM_SORT_USER0_BC_EN
;
2119 if (!bp
->vlgrp
&& !(bp
->flags
& ASF_ENABLE_FLAG
))
2120 rx_mode
|= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
;
2122 if (!(bp
->flags
& ASF_ENABLE_FLAG
))
2123 rx_mode
|= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
;
2125 if (dev
->flags
& IFF_PROMISC
) {
2126 /* Promiscuous mode. */
2127 rx_mode
|= BNX2_EMAC_RX_MODE_PROMISCUOUS
;
2128 sort_mode
|= BNX2_RPM_SORT_USER0_PROM_EN
|
2129 BNX2_RPM_SORT_USER0_PROM_VLAN
;
2131 else if (dev
->flags
& IFF_ALLMULTI
) {
2132 for (i
= 0; i
< NUM_MC_HASH_REGISTERS
; i
++) {
2133 REG_WR(bp
, BNX2_EMAC_MULTICAST_HASH0
+ (i
* 4),
2136 sort_mode
|= BNX2_RPM_SORT_USER0_MC_EN
;
2139 /* Accept one or more multicast(s). */
2140 struct dev_mc_list
*mclist
;
2141 u32 mc_filter
[NUM_MC_HASH_REGISTERS
];
2146 memset(mc_filter
, 0, 4 * NUM_MC_HASH_REGISTERS
);
2148 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
2149 i
++, mclist
= mclist
->next
) {
2151 crc
= ether_crc_le(ETH_ALEN
, mclist
->dmi_addr
);
2153 regidx
= (bit
& 0xe0) >> 5;
2155 mc_filter
[regidx
] |= (1 << bit
);
2158 for (i
= 0; i
< NUM_MC_HASH_REGISTERS
; i
++) {
2159 REG_WR(bp
, BNX2_EMAC_MULTICAST_HASH0
+ (i
* 4),
2163 sort_mode
|= BNX2_RPM_SORT_USER0_MC_HSH_EN
;
2166 if (rx_mode
!= bp
->rx_mode
) {
2167 bp
->rx_mode
= rx_mode
;
2168 REG_WR(bp
, BNX2_EMAC_RX_MODE
, rx_mode
);
2171 REG_WR(bp
, BNX2_RPM_SORT_USER0
, 0x0);
2172 REG_WR(bp
, BNX2_RPM_SORT_USER0
, sort_mode
);
2173 REG_WR(bp
, BNX2_RPM_SORT_USER0
, sort_mode
| BNX2_RPM_SORT_USER0_ENA
);
2175 spin_unlock_bh(&bp
->phy_lock
);
2178 #define FW_BUF_SIZE 0x8000
2181 bnx2_gunzip_init(struct bnx2
*bp
)
2183 if ((bp
->gunzip_buf
= vmalloc(FW_BUF_SIZE
)) == NULL
)
2186 if ((bp
->strm
= kmalloc(sizeof(*bp
->strm
), GFP_KERNEL
)) == NULL
)
2189 bp
->strm
->workspace
= kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL
);
2190 if (bp
->strm
->workspace
== NULL
)
2200 vfree(bp
->gunzip_buf
);
2201 bp
->gunzip_buf
= NULL
;
2204 printk(KERN_ERR PFX
"%s: Cannot allocate firmware buffer for "
2205 "uncompression.\n", bp
->dev
->name
);
2210 bnx2_gunzip_end(struct bnx2
*bp
)
2212 kfree(bp
->strm
->workspace
);
2217 if (bp
->gunzip_buf
) {
2218 vfree(bp
->gunzip_buf
);
2219 bp
->gunzip_buf
= NULL
;
2224 bnx2_gunzip(struct bnx2
*bp
, u8
*zbuf
, int len
, void **outbuf
, int *outlen
)
2228 /* check gzip header */
2229 if ((zbuf
[0] != 0x1f) || (zbuf
[1] != 0x8b) || (zbuf
[2] != Z_DEFLATED
))
2235 if (zbuf
[3] & FNAME
)
2236 while ((zbuf
[n
++] != 0) && (n
< len
));
2238 bp
->strm
->next_in
= zbuf
+ n
;
2239 bp
->strm
->avail_in
= len
- n
;
2240 bp
->strm
->next_out
= bp
->gunzip_buf
;
2241 bp
->strm
->avail_out
= FW_BUF_SIZE
;
2243 rc
= zlib_inflateInit2(bp
->strm
, -MAX_WBITS
);
2247 rc
= zlib_inflate(bp
->strm
, Z_FINISH
);
2249 *outlen
= FW_BUF_SIZE
- bp
->strm
->avail_out
;
2250 *outbuf
= bp
->gunzip_buf
;
2252 if ((rc
!= Z_OK
) && (rc
!= Z_STREAM_END
))
2253 printk(KERN_ERR PFX
"%s: Firmware decompression error: %s\n",
2254 bp
->dev
->name
, bp
->strm
->msg
);
2256 zlib_inflateEnd(bp
->strm
);
2258 if (rc
== Z_STREAM_END
)
2265 load_rv2p_fw(struct bnx2
*bp
, u32
*rv2p_code
, u32 rv2p_code_len
,
2272 for (i
= 0; i
< rv2p_code_len
; i
+= 8) {
2273 REG_WR(bp
, BNX2_RV2P_INSTR_HIGH
, cpu_to_le32(*rv2p_code
));
2275 REG_WR(bp
, BNX2_RV2P_INSTR_LOW
, cpu_to_le32(*rv2p_code
));
2278 if (rv2p_proc
== RV2P_PROC1
) {
2279 val
= (i
/ 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR
;
2280 REG_WR(bp
, BNX2_RV2P_PROC1_ADDR_CMD
, val
);
2283 val
= (i
/ 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR
;
2284 REG_WR(bp
, BNX2_RV2P_PROC2_ADDR_CMD
, val
);
2288 /* Reset the processor, un-stall is done later. */
2289 if (rv2p_proc
== RV2P_PROC1
) {
2290 REG_WR(bp
, BNX2_RV2P_COMMAND
, BNX2_RV2P_COMMAND_PROC1_RESET
);
2293 REG_WR(bp
, BNX2_RV2P_COMMAND
, BNX2_RV2P_COMMAND_PROC2_RESET
);
2298 load_cpu_fw(struct bnx2
*bp
, struct cpu_reg
*cpu_reg
, struct fw_info
*fw
)
2305 val
= REG_RD_IND(bp
, cpu_reg
->mode
);
2306 val
|= cpu_reg
->mode_value_halt
;
2307 REG_WR_IND(bp
, cpu_reg
->mode
, val
);
2308 REG_WR_IND(bp
, cpu_reg
->state
, cpu_reg
->state_value_clear
);
2310 /* Load the Text area. */
2311 offset
= cpu_reg
->spad_base
+ (fw
->text_addr
- cpu_reg
->mips_view_base
);
2316 rc
= bnx2_gunzip(bp
, fw
->gz_text
, fw
->gz_text_len
, &text
,
2326 for (j
= 0; j
< (fw
->text_len
/ 4); j
++, offset
+= 4) {
2327 REG_WR_IND(bp
, offset
, cpu_to_le32(fw
->text
[j
]));
2331 /* Load the Data area. */
2332 offset
= cpu_reg
->spad_base
+ (fw
->data_addr
- cpu_reg
->mips_view_base
);
2336 for (j
= 0; j
< (fw
->data_len
/ 4); j
++, offset
+= 4) {
2337 REG_WR_IND(bp
, offset
, fw
->data
[j
]);
2341 /* Load the SBSS area. */
2342 offset
= cpu_reg
->spad_base
+ (fw
->sbss_addr
- cpu_reg
->mips_view_base
);
2346 for (j
= 0; j
< (fw
->sbss_len
/ 4); j
++, offset
+= 4) {
2347 REG_WR_IND(bp
, offset
, fw
->sbss
[j
]);
2351 /* Load the BSS area. */
2352 offset
= cpu_reg
->spad_base
+ (fw
->bss_addr
- cpu_reg
->mips_view_base
);
2356 for (j
= 0; j
< (fw
->bss_len
/4); j
++, offset
+= 4) {
2357 REG_WR_IND(bp
, offset
, fw
->bss
[j
]);
2361 /* Load the Read-Only area. */
2362 offset
= cpu_reg
->spad_base
+
2363 (fw
->rodata_addr
- cpu_reg
->mips_view_base
);
2367 for (j
= 0; j
< (fw
->rodata_len
/ 4); j
++, offset
+= 4) {
2368 REG_WR_IND(bp
, offset
, fw
->rodata
[j
]);
2372 /* Clear the pre-fetch instruction. */
2373 REG_WR_IND(bp
, cpu_reg
->inst
, 0);
2374 REG_WR_IND(bp
, cpu_reg
->pc
, fw
->start_addr
);
2376 /* Start the CPU. */
2377 val
= REG_RD_IND(bp
, cpu_reg
->mode
);
2378 val
&= ~cpu_reg
->mode_value_halt
;
2379 REG_WR_IND(bp
, cpu_reg
->state
, cpu_reg
->state_value_clear
);
2380 REG_WR_IND(bp
, cpu_reg
->mode
, val
);
2386 bnx2_init_cpus(struct bnx2
*bp
)
2388 struct cpu_reg cpu_reg
;
2394 if ((rc
= bnx2_gunzip_init(bp
)) != 0)
2397 /* Initialize the RV2P processor. */
2398 rc
= bnx2_gunzip(bp
, bnx2_rv2p_proc1
, sizeof(bnx2_rv2p_proc1
), &text
,
2403 load_rv2p_fw(bp
, text
, text_len
, RV2P_PROC1
);
2405 rc
= bnx2_gunzip(bp
, bnx2_rv2p_proc2
, sizeof(bnx2_rv2p_proc2
), &text
,
2410 load_rv2p_fw(bp
, text
, text_len
, RV2P_PROC2
);
2412 /* Initialize the RX Processor. */
2413 cpu_reg
.mode
= BNX2_RXP_CPU_MODE
;
2414 cpu_reg
.mode_value_halt
= BNX2_RXP_CPU_MODE_SOFT_HALT
;
2415 cpu_reg
.mode_value_sstep
= BNX2_RXP_CPU_MODE_STEP_ENA
;
2416 cpu_reg
.state
= BNX2_RXP_CPU_STATE
;
2417 cpu_reg
.state_value_clear
= 0xffffff;
2418 cpu_reg
.gpr0
= BNX2_RXP_CPU_REG_FILE
;
2419 cpu_reg
.evmask
= BNX2_RXP_CPU_EVENT_MASK
;
2420 cpu_reg
.pc
= BNX2_RXP_CPU_PROGRAM_COUNTER
;
2421 cpu_reg
.inst
= BNX2_RXP_CPU_INSTRUCTION
;
2422 cpu_reg
.bp
= BNX2_RXP_CPU_HW_BREAKPOINT
;
2423 cpu_reg
.spad_base
= BNX2_RXP_SCRATCH
;
2424 cpu_reg
.mips_view_base
= 0x8000000;
2426 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
2427 fw
= &bnx2_rxp_fw_09
;
2429 fw
= &bnx2_rxp_fw_06
;
2431 rc
= load_cpu_fw(bp
, &cpu_reg
, fw
);
2435 /* Initialize the TX Processor. */
2436 cpu_reg
.mode
= BNX2_TXP_CPU_MODE
;
2437 cpu_reg
.mode_value_halt
= BNX2_TXP_CPU_MODE_SOFT_HALT
;
2438 cpu_reg
.mode_value_sstep
= BNX2_TXP_CPU_MODE_STEP_ENA
;
2439 cpu_reg
.state
= BNX2_TXP_CPU_STATE
;
2440 cpu_reg
.state_value_clear
= 0xffffff;
2441 cpu_reg
.gpr0
= BNX2_TXP_CPU_REG_FILE
;
2442 cpu_reg
.evmask
= BNX2_TXP_CPU_EVENT_MASK
;
2443 cpu_reg
.pc
= BNX2_TXP_CPU_PROGRAM_COUNTER
;
2444 cpu_reg
.inst
= BNX2_TXP_CPU_INSTRUCTION
;
2445 cpu_reg
.bp
= BNX2_TXP_CPU_HW_BREAKPOINT
;
2446 cpu_reg
.spad_base
= BNX2_TXP_SCRATCH
;
2447 cpu_reg
.mips_view_base
= 0x8000000;
2449 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
2450 fw
= &bnx2_txp_fw_09
;
2452 fw
= &bnx2_txp_fw_06
;
2454 rc
= load_cpu_fw(bp
, &cpu_reg
, fw
);
2458 /* Initialize the TX Patch-up Processor. */
2459 cpu_reg
.mode
= BNX2_TPAT_CPU_MODE
;
2460 cpu_reg
.mode_value_halt
= BNX2_TPAT_CPU_MODE_SOFT_HALT
;
2461 cpu_reg
.mode_value_sstep
= BNX2_TPAT_CPU_MODE_STEP_ENA
;
2462 cpu_reg
.state
= BNX2_TPAT_CPU_STATE
;
2463 cpu_reg
.state_value_clear
= 0xffffff;
2464 cpu_reg
.gpr0
= BNX2_TPAT_CPU_REG_FILE
;
2465 cpu_reg
.evmask
= BNX2_TPAT_CPU_EVENT_MASK
;
2466 cpu_reg
.pc
= BNX2_TPAT_CPU_PROGRAM_COUNTER
;
2467 cpu_reg
.inst
= BNX2_TPAT_CPU_INSTRUCTION
;
2468 cpu_reg
.bp
= BNX2_TPAT_CPU_HW_BREAKPOINT
;
2469 cpu_reg
.spad_base
= BNX2_TPAT_SCRATCH
;
2470 cpu_reg
.mips_view_base
= 0x8000000;
2472 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
2473 fw
= &bnx2_tpat_fw_09
;
2475 fw
= &bnx2_tpat_fw_06
;
2477 rc
= load_cpu_fw(bp
, &cpu_reg
, fw
);
2481 /* Initialize the Completion Processor. */
2482 cpu_reg
.mode
= BNX2_COM_CPU_MODE
;
2483 cpu_reg
.mode_value_halt
= BNX2_COM_CPU_MODE_SOFT_HALT
;
2484 cpu_reg
.mode_value_sstep
= BNX2_COM_CPU_MODE_STEP_ENA
;
2485 cpu_reg
.state
= BNX2_COM_CPU_STATE
;
2486 cpu_reg
.state_value_clear
= 0xffffff;
2487 cpu_reg
.gpr0
= BNX2_COM_CPU_REG_FILE
;
2488 cpu_reg
.evmask
= BNX2_COM_CPU_EVENT_MASK
;
2489 cpu_reg
.pc
= BNX2_COM_CPU_PROGRAM_COUNTER
;
2490 cpu_reg
.inst
= BNX2_COM_CPU_INSTRUCTION
;
2491 cpu_reg
.bp
= BNX2_COM_CPU_HW_BREAKPOINT
;
2492 cpu_reg
.spad_base
= BNX2_COM_SCRATCH
;
2493 cpu_reg
.mips_view_base
= 0x8000000;
2495 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
2496 fw
= &bnx2_com_fw_09
;
2498 fw
= &bnx2_com_fw_06
;
2500 rc
= load_cpu_fw(bp
, &cpu_reg
, fw
);
2504 /* Initialize the Command Processor. */
2505 cpu_reg
.mode
= BNX2_CP_CPU_MODE
;
2506 cpu_reg
.mode_value_halt
= BNX2_CP_CPU_MODE_SOFT_HALT
;
2507 cpu_reg
.mode_value_sstep
= BNX2_CP_CPU_MODE_STEP_ENA
;
2508 cpu_reg
.state
= BNX2_CP_CPU_STATE
;
2509 cpu_reg
.state_value_clear
= 0xffffff;
2510 cpu_reg
.gpr0
= BNX2_CP_CPU_REG_FILE
;
2511 cpu_reg
.evmask
= BNX2_CP_CPU_EVENT_MASK
;
2512 cpu_reg
.pc
= BNX2_CP_CPU_PROGRAM_COUNTER
;
2513 cpu_reg
.inst
= BNX2_CP_CPU_INSTRUCTION
;
2514 cpu_reg
.bp
= BNX2_CP_CPU_HW_BREAKPOINT
;
2515 cpu_reg
.spad_base
= BNX2_CP_SCRATCH
;
2516 cpu_reg
.mips_view_base
= 0x8000000;
2518 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
2519 fw
= &bnx2_cp_fw_09
;
2521 rc
= load_cpu_fw(bp
, &cpu_reg
, fw
);
2526 bnx2_gunzip_end(bp
);
2531 bnx2_set_power_state(struct bnx2
*bp
, pci_power_t state
)
2535 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
2541 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
2542 (pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
2543 PCI_PM_CTRL_PME_STATUS
);
2545 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
2546 /* delay required during transition out of D3hot */
2549 val
= REG_RD(bp
, BNX2_EMAC_MODE
);
2550 val
|= BNX2_EMAC_MODE_MPKT_RCVD
| BNX2_EMAC_MODE_ACPI_RCVD
;
2551 val
&= ~BNX2_EMAC_MODE_MPKT
;
2552 REG_WR(bp
, BNX2_EMAC_MODE
, val
);
2554 val
= REG_RD(bp
, BNX2_RPM_CONFIG
);
2555 val
&= ~BNX2_RPM_CONFIG_ACPI_ENA
;
2556 REG_WR(bp
, BNX2_RPM_CONFIG
, val
);
2567 autoneg
= bp
->autoneg
;
2568 advertising
= bp
->advertising
;
2570 bp
->autoneg
= AUTONEG_SPEED
;
2571 bp
->advertising
= ADVERTISED_10baseT_Half
|
2572 ADVERTISED_10baseT_Full
|
2573 ADVERTISED_100baseT_Half
|
2574 ADVERTISED_100baseT_Full
|
2577 bnx2_setup_copper_phy(bp
);
2579 bp
->autoneg
= autoneg
;
2580 bp
->advertising
= advertising
;
2582 bnx2_set_mac_addr(bp
);
2584 val
= REG_RD(bp
, BNX2_EMAC_MODE
);
2586 /* Enable port mode. */
2587 val
&= ~BNX2_EMAC_MODE_PORT
;
2588 val
|= BNX2_EMAC_MODE_PORT_MII
|
2589 BNX2_EMAC_MODE_MPKT_RCVD
|
2590 BNX2_EMAC_MODE_ACPI_RCVD
|
2591 BNX2_EMAC_MODE_MPKT
;
2593 REG_WR(bp
, BNX2_EMAC_MODE
, val
);
2595 /* receive all multicast */
2596 for (i
= 0; i
< NUM_MC_HASH_REGISTERS
; i
++) {
2597 REG_WR(bp
, BNX2_EMAC_MULTICAST_HASH0
+ (i
* 4),
2600 REG_WR(bp
, BNX2_EMAC_RX_MODE
,
2601 BNX2_EMAC_RX_MODE_SORT_MODE
);
2603 val
= 1 | BNX2_RPM_SORT_USER0_BC_EN
|
2604 BNX2_RPM_SORT_USER0_MC_EN
;
2605 REG_WR(bp
, BNX2_RPM_SORT_USER0
, 0x0);
2606 REG_WR(bp
, BNX2_RPM_SORT_USER0
, val
);
2607 REG_WR(bp
, BNX2_RPM_SORT_USER0
, val
|
2608 BNX2_RPM_SORT_USER0_ENA
);
2610 /* Need to enable EMAC and RPM for WOL. */
2611 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
,
2612 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE
|
2613 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE
|
2614 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE
);
2616 val
= REG_RD(bp
, BNX2_RPM_CONFIG
);
2617 val
&= ~BNX2_RPM_CONFIG_ACPI_ENA
;
2618 REG_WR(bp
, BNX2_RPM_CONFIG
, val
);
2620 wol_msg
= BNX2_DRV_MSG_CODE_SUSPEND_WOL
;
2623 wol_msg
= BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL
;
2626 if (!(bp
->flags
& NO_WOL_FLAG
))
2627 bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT3
| wol_msg
, 0);
2629 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
2630 if ((CHIP_ID(bp
) == CHIP_ID_5706_A0
) ||
2631 (CHIP_ID(bp
) == CHIP_ID_5706_A1
)) {
2640 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
2642 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
2645 /* No more memory access after this point until
2646 * device is brought back to D0.
2658 bnx2_acquire_nvram_lock(struct bnx2
*bp
)
2663 /* Request access to the flash interface. */
2664 REG_WR(bp
, BNX2_NVM_SW_ARB
, BNX2_NVM_SW_ARB_ARB_REQ_SET2
);
2665 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2666 val
= REG_RD(bp
, BNX2_NVM_SW_ARB
);
2667 if (val
& BNX2_NVM_SW_ARB_ARB_ARB2
)
2673 if (j
>= NVRAM_TIMEOUT_COUNT
)
2680 bnx2_release_nvram_lock(struct bnx2
*bp
)
2685 /* Relinquish nvram interface. */
2686 REG_WR(bp
, BNX2_NVM_SW_ARB
, BNX2_NVM_SW_ARB_ARB_REQ_CLR2
);
2688 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2689 val
= REG_RD(bp
, BNX2_NVM_SW_ARB
);
2690 if (!(val
& BNX2_NVM_SW_ARB_ARB_ARB2
))
2696 if (j
>= NVRAM_TIMEOUT_COUNT
)
2704 bnx2_enable_nvram_write(struct bnx2
*bp
)
2708 val
= REG_RD(bp
, BNX2_MISC_CFG
);
2709 REG_WR(bp
, BNX2_MISC_CFG
, val
| BNX2_MISC_CFG_NVM_WR_EN_PCI
);
2711 if (!bp
->flash_info
->buffered
) {
2714 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
2715 REG_WR(bp
, BNX2_NVM_COMMAND
,
2716 BNX2_NVM_COMMAND_WREN
| BNX2_NVM_COMMAND_DOIT
);
2718 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2721 val
= REG_RD(bp
, BNX2_NVM_COMMAND
);
2722 if (val
& BNX2_NVM_COMMAND_DONE
)
2726 if (j
>= NVRAM_TIMEOUT_COUNT
)
2733 bnx2_disable_nvram_write(struct bnx2
*bp
)
2737 val
= REG_RD(bp
, BNX2_MISC_CFG
);
2738 REG_WR(bp
, BNX2_MISC_CFG
, val
& ~BNX2_MISC_CFG_NVM_WR_EN
);
2743 bnx2_enable_nvram_access(struct bnx2
*bp
)
2747 val
= REG_RD(bp
, BNX2_NVM_ACCESS_ENABLE
);
2748 /* Enable both bits, even on read. */
2749 REG_WR(bp
, BNX2_NVM_ACCESS_ENABLE
,
2750 val
| BNX2_NVM_ACCESS_ENABLE_EN
| BNX2_NVM_ACCESS_ENABLE_WR_EN
);
2754 bnx2_disable_nvram_access(struct bnx2
*bp
)
2758 val
= REG_RD(bp
, BNX2_NVM_ACCESS_ENABLE
);
2759 /* Disable both bits, even after read. */
2760 REG_WR(bp
, BNX2_NVM_ACCESS_ENABLE
,
2761 val
& ~(BNX2_NVM_ACCESS_ENABLE_EN
|
2762 BNX2_NVM_ACCESS_ENABLE_WR_EN
));
2766 bnx2_nvram_erase_page(struct bnx2
*bp
, u32 offset
)
2771 if (bp
->flash_info
->buffered
)
2772 /* Buffered flash, no erase needed */
2775 /* Build an erase command */
2776 cmd
= BNX2_NVM_COMMAND_ERASE
| BNX2_NVM_COMMAND_WR
|
2777 BNX2_NVM_COMMAND_DOIT
;
2779 /* Need to clear DONE bit separately. */
2780 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
2782 /* Address of the NVRAM to read from. */
2783 REG_WR(bp
, BNX2_NVM_ADDR
, offset
& BNX2_NVM_ADDR_NVM_ADDR_VALUE
);
2785 /* Issue an erase command. */
2786 REG_WR(bp
, BNX2_NVM_COMMAND
, cmd
);
2788 /* Wait for completion. */
2789 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2794 val
= REG_RD(bp
, BNX2_NVM_COMMAND
);
2795 if (val
& BNX2_NVM_COMMAND_DONE
)
2799 if (j
>= NVRAM_TIMEOUT_COUNT
)
2806 bnx2_nvram_read_dword(struct bnx2
*bp
, u32 offset
, u8
*ret_val
, u32 cmd_flags
)
2811 /* Build the command word. */
2812 cmd
= BNX2_NVM_COMMAND_DOIT
| cmd_flags
;
2814 /* Calculate an offset of a buffered flash. */
2815 if (bp
->flash_info
->buffered
) {
2816 offset
= ((offset
/ bp
->flash_info
->page_size
) <<
2817 bp
->flash_info
->page_bits
) +
2818 (offset
% bp
->flash_info
->page_size
);
2821 /* Need to clear DONE bit separately. */
2822 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
2824 /* Address of the NVRAM to read from. */
2825 REG_WR(bp
, BNX2_NVM_ADDR
, offset
& BNX2_NVM_ADDR_NVM_ADDR_VALUE
);
2827 /* Issue a read command. */
2828 REG_WR(bp
, BNX2_NVM_COMMAND
, cmd
);
2830 /* Wait for completion. */
2831 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2836 val
= REG_RD(bp
, BNX2_NVM_COMMAND
);
2837 if (val
& BNX2_NVM_COMMAND_DONE
) {
2838 val
= REG_RD(bp
, BNX2_NVM_READ
);
2840 val
= be32_to_cpu(val
);
2841 memcpy(ret_val
, &val
, 4);
2845 if (j
>= NVRAM_TIMEOUT_COUNT
)
2853 bnx2_nvram_write_dword(struct bnx2
*bp
, u32 offset
, u8
*val
, u32 cmd_flags
)
2858 /* Build the command word. */
2859 cmd
= BNX2_NVM_COMMAND_DOIT
| BNX2_NVM_COMMAND_WR
| cmd_flags
;
2861 /* Calculate an offset of a buffered flash. */
2862 if (bp
->flash_info
->buffered
) {
2863 offset
= ((offset
/ bp
->flash_info
->page_size
) <<
2864 bp
->flash_info
->page_bits
) +
2865 (offset
% bp
->flash_info
->page_size
);
2868 /* Need to clear DONE bit separately. */
2869 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
2871 memcpy(&val32
, val
, 4);
2872 val32
= cpu_to_be32(val32
);
2874 /* Write the data. */
2875 REG_WR(bp
, BNX2_NVM_WRITE
, val32
);
2877 /* Address of the NVRAM to write to. */
2878 REG_WR(bp
, BNX2_NVM_ADDR
, offset
& BNX2_NVM_ADDR_NVM_ADDR_VALUE
);
2880 /* Issue the write command. */
2881 REG_WR(bp
, BNX2_NVM_COMMAND
, cmd
);
2883 /* Wait for completion. */
2884 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2887 if (REG_RD(bp
, BNX2_NVM_COMMAND
) & BNX2_NVM_COMMAND_DONE
)
2890 if (j
>= NVRAM_TIMEOUT_COUNT
)
2897 bnx2_init_nvram(struct bnx2
*bp
)
2900 int j
, entry_count
, rc
;
2901 struct flash_spec
*flash
;
2903 /* Determine the selected interface. */
2904 val
= REG_RD(bp
, BNX2_NVM_CFG1
);
2906 entry_count
= sizeof(flash_table
) / sizeof(struct flash_spec
);
2909 if (val
& 0x40000000) {
2911 /* Flash interface has been reconfigured */
2912 for (j
= 0, flash
= &flash_table
[0]; j
< entry_count
;
2914 if ((val
& FLASH_BACKUP_STRAP_MASK
) ==
2915 (flash
->config1
& FLASH_BACKUP_STRAP_MASK
)) {
2916 bp
->flash_info
= flash
;
2923 /* Not yet been reconfigured */
2925 if (val
& (1 << 23))
2926 mask
= FLASH_BACKUP_STRAP_MASK
;
2928 mask
= FLASH_STRAP_MASK
;
2930 for (j
= 0, flash
= &flash_table
[0]; j
< entry_count
;
2933 if ((val
& mask
) == (flash
->strapping
& mask
)) {
2934 bp
->flash_info
= flash
;
2936 /* Request access to the flash interface. */
2937 if ((rc
= bnx2_acquire_nvram_lock(bp
)) != 0)
2940 /* Enable access to flash interface */
2941 bnx2_enable_nvram_access(bp
);
2943 /* Reconfigure the flash interface */
2944 REG_WR(bp
, BNX2_NVM_CFG1
, flash
->config1
);
2945 REG_WR(bp
, BNX2_NVM_CFG2
, flash
->config2
);
2946 REG_WR(bp
, BNX2_NVM_CFG3
, flash
->config3
);
2947 REG_WR(bp
, BNX2_NVM_WRITE1
, flash
->write1
);
2949 /* Disable access to flash interface */
2950 bnx2_disable_nvram_access(bp
);
2951 bnx2_release_nvram_lock(bp
);
2956 } /* if (val & 0x40000000) */
2958 if (j
== entry_count
) {
2959 bp
->flash_info
= NULL
;
2960 printk(KERN_ALERT PFX
"Unknown flash/EEPROM type.\n");
2964 val
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_SHARED_HW_CFG_CONFIG2
);
2965 val
&= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK
;
2967 bp
->flash_size
= val
;
2969 bp
->flash_size
= bp
->flash_info
->total_size
;
2975 bnx2_nvram_read(struct bnx2
*bp
, u32 offset
, u8
*ret_buf
,
2979 u32 cmd_flags
, offset32
, len32
, extra
;
2984 /* Request access to the flash interface. */
2985 if ((rc
= bnx2_acquire_nvram_lock(bp
)) != 0)
2988 /* Enable access to flash interface */
2989 bnx2_enable_nvram_access(bp
);
3002 pre_len
= 4 - (offset
& 3);
3004 if (pre_len
>= len32
) {
3006 cmd_flags
= BNX2_NVM_COMMAND_FIRST
|
3007 BNX2_NVM_COMMAND_LAST
;
3010 cmd_flags
= BNX2_NVM_COMMAND_FIRST
;
3013 rc
= bnx2_nvram_read_dword(bp
, offset32
, buf
, cmd_flags
);
3018 memcpy(ret_buf
, buf
+ (offset
& 3), pre_len
);
3025 extra
= 4 - (len32
& 3);
3026 len32
= (len32
+ 4) & ~3;
3033 cmd_flags
= BNX2_NVM_COMMAND_LAST
;
3035 cmd_flags
= BNX2_NVM_COMMAND_FIRST
|
3036 BNX2_NVM_COMMAND_LAST
;
3038 rc
= bnx2_nvram_read_dword(bp
, offset32
, buf
, cmd_flags
);
3040 memcpy(ret_buf
, buf
, 4 - extra
);
3042 else if (len32
> 0) {
3045 /* Read the first word. */
3049 cmd_flags
= BNX2_NVM_COMMAND_FIRST
;
3051 rc
= bnx2_nvram_read_dword(bp
, offset32
, ret_buf
, cmd_flags
);
3053 /* Advance to the next dword. */
3058 while (len32
> 4 && rc
== 0) {
3059 rc
= bnx2_nvram_read_dword(bp
, offset32
, ret_buf
, 0);
3061 /* Advance to the next dword. */
3070 cmd_flags
= BNX2_NVM_COMMAND_LAST
;
3071 rc
= bnx2_nvram_read_dword(bp
, offset32
, buf
, cmd_flags
);
3073 memcpy(ret_buf
, buf
, 4 - extra
);
3076 /* Disable access to flash interface */
3077 bnx2_disable_nvram_access(bp
);
3079 bnx2_release_nvram_lock(bp
);
3085 bnx2_nvram_write(struct bnx2
*bp
, u32 offset
, u8
*data_buf
,
3088 u32 written
, offset32
, len32
;
3089 u8
*buf
, start
[4], end
[4], *align_buf
= NULL
, *flash_buffer
= NULL
;
3091 int align_start
, align_end
;
3096 align_start
= align_end
= 0;
3098 if ((align_start
= (offset32
& 3))) {
3100 len32
+= align_start
;
3103 if ((rc
= bnx2_nvram_read(bp
, offset32
, start
, 4)))
3108 align_end
= 4 - (len32
& 3);
3110 if ((rc
= bnx2_nvram_read(bp
, offset32
+ len32
- 4, end
, 4)))
3114 if (align_start
|| align_end
) {
3115 align_buf
= kmalloc(len32
, GFP_KERNEL
);
3116 if (align_buf
== NULL
)
3119 memcpy(align_buf
, start
, 4);
3122 memcpy(align_buf
+ len32
- 4, end
, 4);
3124 memcpy(align_buf
+ align_start
, data_buf
, buf_size
);
3128 if (bp
->flash_info
->buffered
== 0) {
3129 flash_buffer
= kmalloc(264, GFP_KERNEL
);
3130 if (flash_buffer
== NULL
) {
3132 goto nvram_write_end
;
3137 while ((written
< len32
) && (rc
== 0)) {
3138 u32 page_start
, page_end
, data_start
, data_end
;
3139 u32 addr
, cmd_flags
;
3142 /* Find the page_start addr */
3143 page_start
= offset32
+ written
;
3144 page_start
-= (page_start
% bp
->flash_info
->page_size
);
3145 /* Find the page_end addr */
3146 page_end
= page_start
+ bp
->flash_info
->page_size
;
3147 /* Find the data_start addr */
3148 data_start
= (written
== 0) ? offset32
: page_start
;
3149 /* Find the data_end addr */
3150 data_end
= (page_end
> offset32
+ len32
) ?
3151 (offset32
+ len32
) : page_end
;
3153 /* Request access to the flash interface. */
3154 if ((rc
= bnx2_acquire_nvram_lock(bp
)) != 0)
3155 goto nvram_write_end
;
3157 /* Enable access to flash interface */
3158 bnx2_enable_nvram_access(bp
);
3160 cmd_flags
= BNX2_NVM_COMMAND_FIRST
;
3161 if (bp
->flash_info
->buffered
== 0) {
3164 /* Read the whole page into the buffer
3165 * (non-buffer flash only) */
3166 for (j
= 0; j
< bp
->flash_info
->page_size
; j
+= 4) {
3167 if (j
== (bp
->flash_info
->page_size
- 4)) {
3168 cmd_flags
|= BNX2_NVM_COMMAND_LAST
;
3170 rc
= bnx2_nvram_read_dword(bp
,
3176 goto nvram_write_end
;
3182 /* Enable writes to flash interface (unlock write-protect) */
3183 if ((rc
= bnx2_enable_nvram_write(bp
)) != 0)
3184 goto nvram_write_end
;
3186 /* Loop to write back the buffer data from page_start to
3189 if (bp
->flash_info
->buffered
== 0) {
3190 /* Erase the page */
3191 if ((rc
= bnx2_nvram_erase_page(bp
, page_start
)) != 0)
3192 goto nvram_write_end
;
3194 /* Re-enable the write again for the actual write */
3195 bnx2_enable_nvram_write(bp
);
3197 for (addr
= page_start
; addr
< data_start
;
3198 addr
+= 4, i
+= 4) {
3200 rc
= bnx2_nvram_write_dword(bp
, addr
,
3201 &flash_buffer
[i
], cmd_flags
);
3204 goto nvram_write_end
;
3210 /* Loop to write the new data from data_start to data_end */
3211 for (addr
= data_start
; addr
< data_end
; addr
+= 4, i
+= 4) {
3212 if ((addr
== page_end
- 4) ||
3213 ((bp
->flash_info
->buffered
) &&
3214 (addr
== data_end
- 4))) {
3216 cmd_flags
|= BNX2_NVM_COMMAND_LAST
;
3218 rc
= bnx2_nvram_write_dword(bp
, addr
, buf
,
3222 goto nvram_write_end
;
3228 /* Loop to write back the buffer data from data_end
3230 if (bp
->flash_info
->buffered
== 0) {
3231 for (addr
= data_end
; addr
< page_end
;
3232 addr
+= 4, i
+= 4) {
3234 if (addr
== page_end
-4) {
3235 cmd_flags
= BNX2_NVM_COMMAND_LAST
;
3237 rc
= bnx2_nvram_write_dword(bp
, addr
,
3238 &flash_buffer
[i
], cmd_flags
);
3241 goto nvram_write_end
;
3247 /* Disable writes to flash interface (lock write-protect) */
3248 bnx2_disable_nvram_write(bp
);
3250 /* Disable access to flash interface */
3251 bnx2_disable_nvram_access(bp
);
3252 bnx2_release_nvram_lock(bp
);
3254 /* Increment written */
3255 written
+= data_end
- data_start
;
3259 kfree(flash_buffer
);
3265 bnx2_reset_chip(struct bnx2
*bp
, u32 reset_code
)
3270 /* Wait for the current PCI transaction to complete before
3271 * issuing a reset. */
3272 REG_WR(bp
, BNX2_MISC_ENABLE_CLR_BITS
,
3273 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE
|
3274 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE
|
3275 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE
|
3276 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE
);
3277 val
= REG_RD(bp
, BNX2_MISC_ENABLE_CLR_BITS
);
3280 /* Wait for the firmware to tell us it is ok to issue a reset. */
3281 bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT0
| reset_code
, 1);
3283 /* Deposit a driver reset signature so the firmware knows that
3284 * this is a soft reset. */
3285 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_DRV_RESET_SIGNATURE
,
3286 BNX2_DRV_RESET_SIGNATURE_MAGIC
);
3288 /* Do a dummy read to force the chip to complete all current transaction
3289 * before we issue a reset. */
3290 val
= REG_RD(bp
, BNX2_MISC_ID
);
3292 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
3293 REG_WR(bp
, BNX2_MISC_COMMAND
, BNX2_MISC_COMMAND_SW_RESET
);
3294 REG_RD(bp
, BNX2_MISC_COMMAND
);
3297 val
= BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA
|
3298 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP
;
3300 pci_write_config_dword(bp
->pdev
, BNX2_PCICFG_MISC_CONFIG
, val
);
3303 val
= BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ
|
3304 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA
|
3305 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP
;
3308 REG_WR(bp
, BNX2_PCICFG_MISC_CONFIG
, val
);
3310 if ((CHIP_ID(bp
) == CHIP_ID_5706_A0
) ||
3311 (CHIP_ID(bp
) == CHIP_ID_5706_A1
)) {
3312 current
->state
= TASK_UNINTERRUPTIBLE
;
3313 schedule_timeout(HZ
/ 50);
3316 /* Reset takes approximate 30 usec */
3317 for (i
= 0; i
< 10; i
++) {
3318 val
= REG_RD(bp
, BNX2_PCICFG_MISC_CONFIG
);
3319 if ((val
& (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ
|
3320 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY
)) == 0)
3325 if (val
& (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ
|
3326 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY
)) {
3327 printk(KERN_ERR PFX
"Chip reset did not complete\n");
3332 /* Make sure byte swapping is properly configured. */
3333 val
= REG_RD(bp
, BNX2_PCI_SWAP_DIAG0
);
3334 if (val
!= 0x01020304) {
3335 printk(KERN_ERR PFX
"Chip not in correct endian mode\n");
3339 /* Wait for the firmware to finish its initialization. */
3340 rc
= bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT1
| reset_code
, 0);
3344 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
3345 /* Adjust the voltage regular to two steps lower. The default
3346 * of this register is 0x0000000e. */
3347 REG_WR(bp
, BNX2_MISC_VREG_CONTROL
, 0x000000fa);
3349 /* Remove bad rbuf memory from the free pool. */
3350 rc
= bnx2_alloc_bad_rbuf(bp
);
3357 bnx2_init_chip(struct bnx2
*bp
)
3362 /* Make sure the interrupt is not active. */
3363 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
, BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
3365 val
= BNX2_DMA_CONFIG_DATA_BYTE_SWAP
|
3366 BNX2_DMA_CONFIG_DATA_WORD_SWAP
|
3368 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP
|
3370 BNX2_DMA_CONFIG_CNTL_WORD_SWAP
|
3371 DMA_READ_CHANS
<< 12 |
3372 DMA_WRITE_CHANS
<< 16;
3374 val
|= (0x2 << 20) | (1 << 11);
3376 if ((bp
->flags
& PCIX_FLAG
) && (bp
->bus_speed_mhz
== 133))
3379 if ((CHIP_NUM(bp
) == CHIP_NUM_5706
) &&
3380 (CHIP_ID(bp
) != CHIP_ID_5706_A0
) && !(bp
->flags
& PCIX_FLAG
))
3381 val
|= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA
;
3383 REG_WR(bp
, BNX2_DMA_CONFIG
, val
);
3385 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
3386 val
= REG_RD(bp
, BNX2_TDMA_CONFIG
);
3387 val
|= BNX2_TDMA_CONFIG_ONE_DMA
;
3388 REG_WR(bp
, BNX2_TDMA_CONFIG
, val
);
3391 if (bp
->flags
& PCIX_FLAG
) {
3394 pci_read_config_word(bp
->pdev
, bp
->pcix_cap
+ PCI_X_CMD
,
3396 pci_write_config_word(bp
->pdev
, bp
->pcix_cap
+ PCI_X_CMD
,
3397 val16
& ~PCI_X_CMD_ERO
);
3400 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
,
3401 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE
|
3402 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE
|
3403 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE
);
3405 /* Initialize context mapping and zero out the quick contexts. The
3406 * context block must have already been enabled. */
3407 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
3408 bnx2_init_5709_context(bp
);
3410 bnx2_init_context(bp
);
3412 if ((rc
= bnx2_init_cpus(bp
)) != 0)
3415 bnx2_init_nvram(bp
);
3417 bnx2_set_mac_addr(bp
);
3419 val
= REG_RD(bp
, BNX2_MQ_CONFIG
);
3420 val
&= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE
;
3421 val
|= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256
;
3422 if (CHIP_ID(bp
) == CHIP_ID_5709_A0
|| CHIP_ID(bp
) == CHIP_ID_5709_A1
)
3423 val
|= BNX2_MQ_CONFIG_HALT_DIS
;
3425 REG_WR(bp
, BNX2_MQ_CONFIG
, val
);
3427 val
= 0x10000 + (MAX_CID_CNT
* MB_KERNEL_CTX_SIZE
);
3428 REG_WR(bp
, BNX2_MQ_KNL_BYP_WIND_START
, val
);
3429 REG_WR(bp
, BNX2_MQ_KNL_WIND_END
, val
);
3431 val
= (BCM_PAGE_BITS
- 8) << 24;
3432 REG_WR(bp
, BNX2_RV2P_CONFIG
, val
);
3434 /* Configure page size. */
3435 val
= REG_RD(bp
, BNX2_TBDR_CONFIG
);
3436 val
&= ~BNX2_TBDR_CONFIG_PAGE_SIZE
;
3437 val
|= (BCM_PAGE_BITS
- 8) << 24 | 0x40;
3438 REG_WR(bp
, BNX2_TBDR_CONFIG
, val
);
3440 val
= bp
->mac_addr
[0] +
3441 (bp
->mac_addr
[1] << 8) +
3442 (bp
->mac_addr
[2] << 16) +
3444 (bp
->mac_addr
[4] << 8) +
3445 (bp
->mac_addr
[5] << 16);
3446 REG_WR(bp
, BNX2_EMAC_BACKOFF_SEED
, val
);
3448 /* Program the MTU. Also include 4 bytes for CRC32. */
3449 val
= bp
->dev
->mtu
+ ETH_HLEN
+ 4;
3450 if (val
> (MAX_ETHERNET_PACKET_SIZE
+ 4))
3451 val
|= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA
;
3452 REG_WR(bp
, BNX2_EMAC_RX_MTU_SIZE
, val
);
3454 bp
->last_status_idx
= 0;
3455 bp
->rx_mode
= BNX2_EMAC_RX_MODE_SORT_MODE
;
3457 /* Set up how to generate a link change interrupt. */
3458 REG_WR(bp
, BNX2_EMAC_ATTENTION_ENA
, BNX2_EMAC_ATTENTION_ENA_LINK
);
3460 REG_WR(bp
, BNX2_HC_STATUS_ADDR_L
,
3461 (u64
) bp
->status_blk_mapping
& 0xffffffff);
3462 REG_WR(bp
, BNX2_HC_STATUS_ADDR_H
, (u64
) bp
->status_blk_mapping
>> 32);
3464 REG_WR(bp
, BNX2_HC_STATISTICS_ADDR_L
,
3465 (u64
) bp
->stats_blk_mapping
& 0xffffffff);
3466 REG_WR(bp
, BNX2_HC_STATISTICS_ADDR_H
,
3467 (u64
) bp
->stats_blk_mapping
>> 32);
3469 REG_WR(bp
, BNX2_HC_TX_QUICK_CONS_TRIP
,
3470 (bp
->tx_quick_cons_trip_int
<< 16) | bp
->tx_quick_cons_trip
);
3472 REG_WR(bp
, BNX2_HC_RX_QUICK_CONS_TRIP
,
3473 (bp
->rx_quick_cons_trip_int
<< 16) | bp
->rx_quick_cons_trip
);
3475 REG_WR(bp
, BNX2_HC_COMP_PROD_TRIP
,
3476 (bp
->comp_prod_trip_int
<< 16) | bp
->comp_prod_trip
);
3478 REG_WR(bp
, BNX2_HC_TX_TICKS
, (bp
->tx_ticks_int
<< 16) | bp
->tx_ticks
);
3480 REG_WR(bp
, BNX2_HC_RX_TICKS
, (bp
->rx_ticks_int
<< 16) | bp
->rx_ticks
);
3482 REG_WR(bp
, BNX2_HC_COM_TICKS
,
3483 (bp
->com_ticks_int
<< 16) | bp
->com_ticks
);
3485 REG_WR(bp
, BNX2_HC_CMD_TICKS
,
3486 (bp
->cmd_ticks_int
<< 16) | bp
->cmd_ticks
);
3488 REG_WR(bp
, BNX2_HC_STATS_TICKS
, bp
->stats_ticks
& 0xffff00);
3489 REG_WR(bp
, BNX2_HC_STAT_COLLECT_TICKS
, 0xbb8); /* 3ms */
3491 if (CHIP_ID(bp
) == CHIP_ID_5706_A1
)
3492 REG_WR(bp
, BNX2_HC_CONFIG
, BNX2_HC_CONFIG_COLLECT_STATS
);
3494 REG_WR(bp
, BNX2_HC_CONFIG
, BNX2_HC_CONFIG_RX_TMR_MODE
|
3495 BNX2_HC_CONFIG_TX_TMR_MODE
|
3496 BNX2_HC_CONFIG_COLLECT_STATS
);
3499 /* Clear internal stats counters. */
3500 REG_WR(bp
, BNX2_HC_COMMAND
, BNX2_HC_COMMAND_CLR_STAT_NOW
);
3502 REG_WR(bp
, BNX2_HC_ATTN_BITS_ENABLE
, STATUS_ATTN_BITS_LINK_STATE
);
3504 if (REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_FEATURE
) &
3505 BNX2_PORT_FEATURE_ASF_ENABLED
)
3506 bp
->flags
|= ASF_ENABLE_FLAG
;
3508 /* Initialize the receive filter. */
3509 bnx2_set_rx_mode(bp
->dev
);
3511 rc
= bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT2
| BNX2_DRV_MSG_CODE_RESET
,
3514 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
, 0x5ffffff);
3515 REG_RD(bp
, BNX2_MISC_ENABLE_SET_BITS
);
3519 bp
->hc_cmd
= REG_RD(bp
, BNX2_HC_COMMAND
);
3525 bnx2_init_tx_context(struct bnx2
*bp
, u32 cid
)
3527 u32 val
, offset0
, offset1
, offset2
, offset3
;
3529 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
3530 offset0
= BNX2_L2CTX_TYPE_XI
;
3531 offset1
= BNX2_L2CTX_CMD_TYPE_XI
;
3532 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI_XI
;
3533 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO_XI
;
3535 offset0
= BNX2_L2CTX_TYPE
;
3536 offset1
= BNX2_L2CTX_CMD_TYPE
;
3537 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI
;
3538 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO
;
3540 val
= BNX2_L2CTX_TYPE_TYPE_L2
| BNX2_L2CTX_TYPE_SIZE_L2
;
3541 CTX_WR(bp
, GET_CID_ADDR(cid
), offset0
, val
);
3543 val
= BNX2_L2CTX_CMD_TYPE_TYPE_L2
| (8 << 16);
3544 CTX_WR(bp
, GET_CID_ADDR(cid
), offset1
, val
);
3546 val
= (u64
) bp
->tx_desc_mapping
>> 32;
3547 CTX_WR(bp
, GET_CID_ADDR(cid
), offset2
, val
);
3549 val
= (u64
) bp
->tx_desc_mapping
& 0xffffffff;
3550 CTX_WR(bp
, GET_CID_ADDR(cid
), offset3
, val
);
3554 bnx2_init_tx_ring(struct bnx2
*bp
)
3559 bp
->tx_wake_thresh
= bp
->tx_ring_size
/ 2;
3561 txbd
= &bp
->tx_desc_ring
[MAX_TX_DESC_CNT
];
3563 txbd
->tx_bd_haddr_hi
= (u64
) bp
->tx_desc_mapping
>> 32;
3564 txbd
->tx_bd_haddr_lo
= (u64
) bp
->tx_desc_mapping
& 0xffffffff;
3569 bp
->tx_prod_bseq
= 0;
3572 bp
->tx_bidx_addr
= MB_GET_CID_ADDR(cid
) + BNX2_L2CTX_TX_HOST_BIDX
;
3573 bp
->tx_bseq_addr
= MB_GET_CID_ADDR(cid
) + BNX2_L2CTX_TX_HOST_BSEQ
;
3575 bnx2_init_tx_context(bp
, cid
);
3579 bnx2_init_rx_ring(struct bnx2
*bp
)
3583 u16 prod
, ring_prod
;
3586 /* 8 for CRC and VLAN */
3587 bp
->rx_buf_use_size
= bp
->dev
->mtu
+ ETH_HLEN
+ bp
->rx_offset
+ 8;
3589 bp
->rx_buf_size
= bp
->rx_buf_use_size
+ BNX2_RX_ALIGN
;
3591 ring_prod
= prod
= bp
->rx_prod
= 0;
3594 bp
->rx_prod_bseq
= 0;
3596 for (i
= 0; i
< bp
->rx_max_ring
; i
++) {
3599 rxbd
= &bp
->rx_desc_ring
[i
][0];
3600 for (j
= 0; j
< MAX_RX_DESC_CNT
; j
++, rxbd
++) {
3601 rxbd
->rx_bd_len
= bp
->rx_buf_use_size
;
3602 rxbd
->rx_bd_flags
= RX_BD_FLAGS_START
| RX_BD_FLAGS_END
;
3604 if (i
== (bp
->rx_max_ring
- 1))
3608 rxbd
->rx_bd_haddr_hi
= (u64
) bp
->rx_desc_mapping
[j
] >> 32;
3609 rxbd
->rx_bd_haddr_lo
= (u64
) bp
->rx_desc_mapping
[j
] &
3613 val
= BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE
;
3614 val
|= BNX2_L2CTX_CTX_TYPE_SIZE_L2
;
3616 CTX_WR(bp
, GET_CID_ADDR(RX_CID
), BNX2_L2CTX_CTX_TYPE
, val
);
3618 val
= (u64
) bp
->rx_desc_mapping
[0] >> 32;
3619 CTX_WR(bp
, GET_CID_ADDR(RX_CID
), BNX2_L2CTX_NX_BDHADDR_HI
, val
);
3621 val
= (u64
) bp
->rx_desc_mapping
[0] & 0xffffffff;
3622 CTX_WR(bp
, GET_CID_ADDR(RX_CID
), BNX2_L2CTX_NX_BDHADDR_LO
, val
);
3624 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
3625 if (bnx2_alloc_rx_skb(bp
, ring_prod
) < 0) {
3628 prod
= NEXT_RX_BD(prod
);
3629 ring_prod
= RX_RING_IDX(prod
);
3633 REG_WR16(bp
, MB_RX_CID_ADDR
+ BNX2_L2CTX_HOST_BDIDX
, prod
);
3635 REG_WR(bp
, MB_RX_CID_ADDR
+ BNX2_L2CTX_HOST_BSEQ
, bp
->rx_prod_bseq
);
3639 bnx2_set_rx_ring_size(struct bnx2
*bp
, u32 size
)
3643 bp
->rx_ring_size
= size
;
3645 while (size
> MAX_RX_DESC_CNT
) {
3646 size
-= MAX_RX_DESC_CNT
;
3649 /* round to next power of 2 */
3651 while ((max
& num_rings
) == 0)
3654 if (num_rings
!= max
)
3657 bp
->rx_max_ring
= max
;
3658 bp
->rx_max_ring_idx
= (bp
->rx_max_ring
* RX_DESC_CNT
) - 1;
3662 bnx2_free_tx_skbs(struct bnx2
*bp
)
3666 if (bp
->tx_buf_ring
== NULL
)
3669 for (i
= 0; i
< TX_DESC_CNT
; ) {
3670 struct sw_bd
*tx_buf
= &bp
->tx_buf_ring
[i
];
3671 struct sk_buff
*skb
= tx_buf
->skb
;
3679 pci_unmap_single(bp
->pdev
, pci_unmap_addr(tx_buf
, mapping
),
3680 skb_headlen(skb
), PCI_DMA_TODEVICE
);
3684 last
= skb_shinfo(skb
)->nr_frags
;
3685 for (j
= 0; j
< last
; j
++) {
3686 tx_buf
= &bp
->tx_buf_ring
[i
+ j
+ 1];
3687 pci_unmap_page(bp
->pdev
,
3688 pci_unmap_addr(tx_buf
, mapping
),
3689 skb_shinfo(skb
)->frags
[j
].size
,
3699 bnx2_free_rx_skbs(struct bnx2
*bp
)
3703 if (bp
->rx_buf_ring
== NULL
)
3706 for (i
= 0; i
< bp
->rx_max_ring_idx
; i
++) {
3707 struct sw_bd
*rx_buf
= &bp
->rx_buf_ring
[i
];
3708 struct sk_buff
*skb
= rx_buf
->skb
;
3713 pci_unmap_single(bp
->pdev
, pci_unmap_addr(rx_buf
, mapping
),
3714 bp
->rx_buf_use_size
, PCI_DMA_FROMDEVICE
);
3723 bnx2_free_skbs(struct bnx2
*bp
)
3725 bnx2_free_tx_skbs(bp
);
3726 bnx2_free_rx_skbs(bp
);
3730 bnx2_reset_nic(struct bnx2
*bp
, u32 reset_code
)
3734 rc
= bnx2_reset_chip(bp
, reset_code
);
3739 if ((rc
= bnx2_init_chip(bp
)) != 0)
3742 bnx2_init_tx_ring(bp
);
3743 bnx2_init_rx_ring(bp
);
3748 bnx2_init_nic(struct bnx2
*bp
)
3752 if ((rc
= bnx2_reset_nic(bp
, BNX2_DRV_MSG_CODE_RESET
)) != 0)
3755 spin_lock_bh(&bp
->phy_lock
);
3757 spin_unlock_bh(&bp
->phy_lock
);
3763 bnx2_test_registers(struct bnx2
*bp
)
3767 static const struct {
3773 { 0x006c, 0, 0x00000000, 0x0000003f },
3774 { 0x0090, 0, 0xffffffff, 0x00000000 },
3775 { 0x0094, 0, 0x00000000, 0x00000000 },
3777 { 0x0404, 0, 0x00003f00, 0x00000000 },
3778 { 0x0418, 0, 0x00000000, 0xffffffff },
3779 { 0x041c, 0, 0x00000000, 0xffffffff },
3780 { 0x0420, 0, 0x00000000, 0x80ffffff },
3781 { 0x0424, 0, 0x00000000, 0x00000000 },
3782 { 0x0428, 0, 0x00000000, 0x00000001 },
3783 { 0x0450, 0, 0x00000000, 0x0000ffff },
3784 { 0x0454, 0, 0x00000000, 0xffffffff },
3785 { 0x0458, 0, 0x00000000, 0xffffffff },
3787 { 0x0808, 0, 0x00000000, 0xffffffff },
3788 { 0x0854, 0, 0x00000000, 0xffffffff },
3789 { 0x0868, 0, 0x00000000, 0x77777777 },
3790 { 0x086c, 0, 0x00000000, 0x77777777 },
3791 { 0x0870, 0, 0x00000000, 0x77777777 },
3792 { 0x0874, 0, 0x00000000, 0x77777777 },
3794 { 0x0c00, 0, 0x00000000, 0x00000001 },
3795 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3796 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3798 { 0x1000, 0, 0x00000000, 0x00000001 },
3799 { 0x1004, 0, 0x00000000, 0x000f0001 },
3801 { 0x1408, 0, 0x01c00800, 0x00000000 },
3802 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3803 { 0x14a8, 0, 0x00000000, 0x000001ff },
3804 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3805 { 0x14b0, 0, 0x00000002, 0x00000001 },
3806 { 0x14b8, 0, 0x00000000, 0x00000000 },
3807 { 0x14c0, 0, 0x00000000, 0x00000009 },
3808 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3809 { 0x14cc, 0, 0x00000000, 0x00000001 },
3810 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3812 { 0x1800, 0, 0x00000000, 0x00000001 },
3813 { 0x1804, 0, 0x00000000, 0x00000003 },
3815 { 0x2800, 0, 0x00000000, 0x00000001 },
3816 { 0x2804, 0, 0x00000000, 0x00003f01 },
3817 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3818 { 0x2810, 0, 0xffff0000, 0x00000000 },
3819 { 0x2814, 0, 0xffff0000, 0x00000000 },
3820 { 0x2818, 0, 0xffff0000, 0x00000000 },
3821 { 0x281c, 0, 0xffff0000, 0x00000000 },
3822 { 0x2834, 0, 0xffffffff, 0x00000000 },
3823 { 0x2840, 0, 0x00000000, 0xffffffff },
3824 { 0x2844, 0, 0x00000000, 0xffffffff },
3825 { 0x2848, 0, 0xffffffff, 0x00000000 },
3826 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3828 { 0x2c00, 0, 0x00000000, 0x00000011 },
3829 { 0x2c04, 0, 0x00000000, 0x00030007 },
3831 { 0x3c00, 0, 0x00000000, 0x00000001 },
3832 { 0x3c04, 0, 0x00000000, 0x00070000 },
3833 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3834 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3835 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3836 { 0x3c14, 0, 0x00000000, 0xffffffff },
3837 { 0x3c18, 0, 0x00000000, 0xffffffff },
3838 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3839 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3841 { 0x5004, 0, 0x00000000, 0x0000007f },
3842 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3843 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3845 { 0x5c00, 0, 0x00000000, 0x00000001 },
3846 { 0x5c04, 0, 0x00000000, 0x0003000f },
3847 { 0x5c08, 0, 0x00000003, 0x00000000 },
3848 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3849 { 0x5c10, 0, 0x00000000, 0xffffffff },
3850 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3851 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3852 { 0x5c88, 0, 0x00000000, 0x00077373 },
3853 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3855 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3856 { 0x680c, 0, 0xffffffff, 0x00000000 },
3857 { 0x6810, 0, 0xffffffff, 0x00000000 },
3858 { 0x6814, 0, 0xffffffff, 0x00000000 },
3859 { 0x6818, 0, 0xffffffff, 0x00000000 },
3860 { 0x681c, 0, 0xffffffff, 0x00000000 },
3861 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3862 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3863 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3864 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3865 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3866 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3867 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3868 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3869 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3870 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3871 { 0x684c, 0, 0xffffffff, 0x00000000 },
3872 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3873 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3874 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3875 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3876 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3877 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3879 { 0xffff, 0, 0x00000000, 0x00000000 },
3883 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
3884 u32 offset
, rw_mask
, ro_mask
, save_val
, val
;
3886 offset
= (u32
) reg_tbl
[i
].offset
;
3887 rw_mask
= reg_tbl
[i
].rw_mask
;
3888 ro_mask
= reg_tbl
[i
].ro_mask
;
3890 save_val
= readl(bp
->regview
+ offset
);
3892 writel(0, bp
->regview
+ offset
);
3894 val
= readl(bp
->regview
+ offset
);
3895 if ((val
& rw_mask
) != 0) {
3899 if ((val
& ro_mask
) != (save_val
& ro_mask
)) {
3903 writel(0xffffffff, bp
->regview
+ offset
);
3905 val
= readl(bp
->regview
+ offset
);
3906 if ((val
& rw_mask
) != rw_mask
) {
3910 if ((val
& ro_mask
) != (save_val
& ro_mask
)) {
3914 writel(save_val
, bp
->regview
+ offset
);
3918 writel(save_val
, bp
->regview
+ offset
);
3926 bnx2_do_mem_test(struct bnx2
*bp
, u32 start
, u32 size
)
3928 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0x55555555,
3929 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3932 for (i
= 0; i
< sizeof(test_pattern
) / 4; i
++) {
3935 for (offset
= 0; offset
< size
; offset
+= 4) {
3937 REG_WR_IND(bp
, start
+ offset
, test_pattern
[i
]);
3939 if (REG_RD_IND(bp
, start
+ offset
) !=
3949 bnx2_test_memory(struct bnx2
*bp
)
3953 static const struct {
3957 { 0x60000, 0x4000 },
3958 { 0xa0000, 0x3000 },
3959 { 0xe0000, 0x4000 },
3960 { 0x120000, 0x4000 },
3961 { 0x1a0000, 0x4000 },
3962 { 0x160000, 0x4000 },
3966 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
3967 if ((ret
= bnx2_do_mem_test(bp
, mem_tbl
[i
].offset
,
3968 mem_tbl
[i
].len
)) != 0) {
3976 #define BNX2_MAC_LOOPBACK 0
3977 #define BNX2_PHY_LOOPBACK 1
3980 bnx2_run_loopback(struct bnx2
*bp
, int loopback_mode
)
3982 unsigned int pkt_size
, num_pkts
, i
;
3983 struct sk_buff
*skb
, *rx_skb
;
3984 unsigned char *packet
;
3985 u16 rx_start_idx
, rx_idx
;
3988 struct sw_bd
*rx_buf
;
3989 struct l2_fhdr
*rx_hdr
;
3992 if (loopback_mode
== BNX2_MAC_LOOPBACK
) {
3993 bp
->loopback
= MAC_LOOPBACK
;
3994 bnx2_set_mac_loopback(bp
);
3996 else if (loopback_mode
== BNX2_PHY_LOOPBACK
) {
3997 bp
->loopback
= PHY_LOOPBACK
;
3998 bnx2_set_phy_loopback(bp
);
4004 skb
= netdev_alloc_skb(bp
->dev
, pkt_size
);
4007 packet
= skb_put(skb
, pkt_size
);
4008 memcpy(packet
, bp
->dev
->dev_addr
, 6);
4009 memset(packet
+ 6, 0x0, 8);
4010 for (i
= 14; i
< pkt_size
; i
++)
4011 packet
[i
] = (unsigned char) (i
& 0xff);
4013 map
= pci_map_single(bp
->pdev
, skb
->data
, pkt_size
,
4016 REG_WR(bp
, BNX2_HC_COMMAND
,
4017 bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
4019 REG_RD(bp
, BNX2_HC_COMMAND
);
4022 rx_start_idx
= bp
->status_blk
->status_rx_quick_consumer_index0
;
4026 txbd
= &bp
->tx_desc_ring
[TX_RING_IDX(bp
->tx_prod
)];
4028 txbd
->tx_bd_haddr_hi
= (u64
) map
>> 32;
4029 txbd
->tx_bd_haddr_lo
= (u64
) map
& 0xffffffff;
4030 txbd
->tx_bd_mss_nbytes
= pkt_size
;
4031 txbd
->tx_bd_vlan_tag_flags
= TX_BD_FLAGS_START
| TX_BD_FLAGS_END
;
4034 bp
->tx_prod
= NEXT_TX_BD(bp
->tx_prod
);
4035 bp
->tx_prod_bseq
+= pkt_size
;
4037 REG_WR16(bp
, bp
->tx_bidx_addr
, bp
->tx_prod
);
4038 REG_WR(bp
, bp
->tx_bseq_addr
, bp
->tx_prod_bseq
);
4042 REG_WR(bp
, BNX2_HC_COMMAND
,
4043 bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
4045 REG_RD(bp
, BNX2_HC_COMMAND
);
4049 pci_unmap_single(bp
->pdev
, map
, pkt_size
, PCI_DMA_TODEVICE
);
4052 if (bp
->status_blk
->status_tx_quick_consumer_index0
!= bp
->tx_prod
) {
4053 goto loopback_test_done
;
4056 rx_idx
= bp
->status_blk
->status_rx_quick_consumer_index0
;
4057 if (rx_idx
!= rx_start_idx
+ num_pkts
) {
4058 goto loopback_test_done
;
4061 rx_buf
= &bp
->rx_buf_ring
[rx_start_idx
];
4062 rx_skb
= rx_buf
->skb
;
4064 rx_hdr
= (struct l2_fhdr
*) rx_skb
->data
;
4065 skb_reserve(rx_skb
, bp
->rx_offset
);
4067 pci_dma_sync_single_for_cpu(bp
->pdev
,
4068 pci_unmap_addr(rx_buf
, mapping
),
4069 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
4071 if (rx_hdr
->l2_fhdr_status
&
4072 (L2_FHDR_ERRORS_BAD_CRC
|
4073 L2_FHDR_ERRORS_PHY_DECODE
|
4074 L2_FHDR_ERRORS_ALIGNMENT
|
4075 L2_FHDR_ERRORS_TOO_SHORT
|
4076 L2_FHDR_ERRORS_GIANT_FRAME
)) {
4078 goto loopback_test_done
;
4081 if ((rx_hdr
->l2_fhdr_pkt_len
- 4) != pkt_size
) {
4082 goto loopback_test_done
;
4085 for (i
= 14; i
< pkt_size
; i
++) {
4086 if (*(rx_skb
->data
+ i
) != (unsigned char) (i
& 0xff)) {
4087 goto loopback_test_done
;
4098 #define BNX2_MAC_LOOPBACK_FAILED 1
4099 #define BNX2_PHY_LOOPBACK_FAILED 2
4100 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4101 BNX2_PHY_LOOPBACK_FAILED)
4104 bnx2_test_loopback(struct bnx2
*bp
)
4108 if (!netif_running(bp
->dev
))
4109 return BNX2_LOOPBACK_FAILED
;
4111 bnx2_reset_nic(bp
, BNX2_DRV_MSG_CODE_RESET
);
4112 spin_lock_bh(&bp
->phy_lock
);
4114 spin_unlock_bh(&bp
->phy_lock
);
4115 if (bnx2_run_loopback(bp
, BNX2_MAC_LOOPBACK
))
4116 rc
|= BNX2_MAC_LOOPBACK_FAILED
;
4117 if (bnx2_run_loopback(bp
, BNX2_PHY_LOOPBACK
))
4118 rc
|= BNX2_PHY_LOOPBACK_FAILED
;
4122 #define NVRAM_SIZE 0x200
4123 #define CRC32_RESIDUAL 0xdebb20e3
4126 bnx2_test_nvram(struct bnx2
*bp
)
4128 u32 buf
[NVRAM_SIZE
/ 4];
4129 u8
*data
= (u8
*) buf
;
4133 if ((rc
= bnx2_nvram_read(bp
, 0, data
, 4)) != 0)
4134 goto test_nvram_done
;
4136 magic
= be32_to_cpu(buf
[0]);
4137 if (magic
!= 0x669955aa) {
4139 goto test_nvram_done
;
4142 if ((rc
= bnx2_nvram_read(bp
, 0x100, data
, NVRAM_SIZE
)) != 0)
4143 goto test_nvram_done
;
4145 csum
= ether_crc_le(0x100, data
);
4146 if (csum
!= CRC32_RESIDUAL
) {
4148 goto test_nvram_done
;
4151 csum
= ether_crc_le(0x100, data
+ 0x100);
4152 if (csum
!= CRC32_RESIDUAL
) {
4161 bnx2_test_link(struct bnx2
*bp
)
4165 spin_lock_bh(&bp
->phy_lock
);
4166 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
4167 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
4168 spin_unlock_bh(&bp
->phy_lock
);
4170 if (bmsr
& BMSR_LSTATUS
) {
4177 bnx2_test_intr(struct bnx2
*bp
)
4182 if (!netif_running(bp
->dev
))
4185 status_idx
= REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
) & 0xffff;
4187 /* This register is not touched during run-time. */
4188 REG_WR(bp
, BNX2_HC_COMMAND
, bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW
);
4189 REG_RD(bp
, BNX2_HC_COMMAND
);
4191 for (i
= 0; i
< 10; i
++) {
4192 if ((REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
) & 0xffff) !=
4198 msleep_interruptible(10);
4207 bnx2_5706_serdes_timer(struct bnx2
*bp
)
4209 spin_lock(&bp
->phy_lock
);
4210 if (bp
->serdes_an_pending
)
4211 bp
->serdes_an_pending
--;
4212 else if ((bp
->link_up
== 0) && (bp
->autoneg
& AUTONEG_SPEED
)) {
4215 bp
->current_interval
= bp
->timer_interval
;
4217 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
4219 if (bmcr
& BMCR_ANENABLE
) {
4222 bnx2_write_phy(bp
, 0x1c, 0x7c00);
4223 bnx2_read_phy(bp
, 0x1c, &phy1
);
4225 bnx2_write_phy(bp
, 0x17, 0x0f01);
4226 bnx2_read_phy(bp
, 0x15, &phy2
);
4227 bnx2_write_phy(bp
, 0x17, 0x0f01);
4228 bnx2_read_phy(bp
, 0x15, &phy2
);
4230 if ((phy1
& 0x10) && /* SIGNAL DETECT */
4231 !(phy2
& 0x20)) { /* no CONFIG */
4233 bmcr
&= ~BMCR_ANENABLE
;
4234 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
4235 bnx2_write_phy(bp
, MII_BMCR
, bmcr
);
4236 bp
->phy_flags
|= PHY_PARALLEL_DETECT_FLAG
;
4240 else if ((bp
->link_up
) && (bp
->autoneg
& AUTONEG_SPEED
) &&
4241 (bp
->phy_flags
& PHY_PARALLEL_DETECT_FLAG
)) {
4244 bnx2_write_phy(bp
, 0x17, 0x0f01);
4245 bnx2_read_phy(bp
, 0x15, &phy2
);
4249 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
4250 bmcr
|= BMCR_ANENABLE
;
4251 bnx2_write_phy(bp
, MII_BMCR
, bmcr
);
4253 bp
->phy_flags
&= ~PHY_PARALLEL_DETECT_FLAG
;
4256 bp
->current_interval
= bp
->timer_interval
;
4258 spin_unlock(&bp
->phy_lock
);
4262 bnx2_5708_serdes_timer(struct bnx2
*bp
)
4264 if ((bp
->phy_flags
& PHY_2_5G_CAPABLE_FLAG
) == 0) {
4265 bp
->serdes_an_pending
= 0;
4269 spin_lock(&bp
->phy_lock
);
4270 if (bp
->serdes_an_pending
)
4271 bp
->serdes_an_pending
--;
4272 else if ((bp
->link_up
== 0) && (bp
->autoneg
& AUTONEG_SPEED
)) {
4275 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
4277 if (bmcr
& BMCR_ANENABLE
) {
4278 bmcr
&= ~BMCR_ANENABLE
;
4279 bmcr
|= BMCR_FULLDPLX
| BCM5708S_BMCR_FORCE_2500
;
4280 bnx2_write_phy(bp
, MII_BMCR
, bmcr
);
4281 bp
->current_interval
= SERDES_FORCED_TIMEOUT
;
4283 bmcr
&= ~(BMCR_FULLDPLX
| BCM5708S_BMCR_FORCE_2500
);
4284 bmcr
|= BMCR_ANENABLE
;
4285 bnx2_write_phy(bp
, MII_BMCR
, bmcr
);
4286 bp
->serdes_an_pending
= 2;
4287 bp
->current_interval
= bp
->timer_interval
;
4291 bp
->current_interval
= bp
->timer_interval
;
4293 spin_unlock(&bp
->phy_lock
);
4297 bnx2_timer(unsigned long data
)
4299 struct bnx2
*bp
= (struct bnx2
*) data
;
4302 if (!netif_running(bp
->dev
))
4305 if (atomic_read(&bp
->intr_sem
) != 0)
4306 goto bnx2_restart_timer
;
4308 msg
= (u32
) ++bp
->fw_drv_pulse_wr_seq
;
4309 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_DRV_PULSE_MB
, msg
);
4311 bp
->stats_blk
->stat_FwRxDrop
= REG_RD_IND(bp
, BNX2_FW_RX_DROP_COUNT
);
4313 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
4314 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
4315 bnx2_5706_serdes_timer(bp
);
4316 else if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
4317 bnx2_5708_serdes_timer(bp
);
4321 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
4324 /* Called with rtnl_lock */
4326 bnx2_open(struct net_device
*dev
)
4328 struct bnx2
*bp
= netdev_priv(dev
);
4331 bnx2_set_power_state(bp
, PCI_D0
);
4332 bnx2_disable_int(bp
);
4334 rc
= bnx2_alloc_mem(bp
);
4338 if ((CHIP_ID(bp
) != CHIP_ID_5706_A0
) &&
4339 (CHIP_ID(bp
) != CHIP_ID_5706_A1
) &&
4342 if (pci_enable_msi(bp
->pdev
) == 0) {
4343 bp
->flags
|= USING_MSI_FLAG
;
4344 rc
= request_irq(bp
->pdev
->irq
, bnx2_msi
, 0, dev
->name
,
4348 rc
= request_irq(bp
->pdev
->irq
, bnx2_interrupt
,
4349 IRQF_SHARED
, dev
->name
, dev
);
4353 rc
= request_irq(bp
->pdev
->irq
, bnx2_interrupt
, IRQF_SHARED
,
4361 rc
= bnx2_init_nic(bp
);
4364 free_irq(bp
->pdev
->irq
, dev
);
4365 if (bp
->flags
& USING_MSI_FLAG
) {
4366 pci_disable_msi(bp
->pdev
);
4367 bp
->flags
&= ~USING_MSI_FLAG
;
4374 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
4376 atomic_set(&bp
->intr_sem
, 0);
4378 bnx2_enable_int(bp
);
4380 if (bp
->flags
& USING_MSI_FLAG
) {
4381 /* Test MSI to make sure it is working
4382 * If MSI test fails, go back to INTx mode
4384 if (bnx2_test_intr(bp
) != 0) {
4385 printk(KERN_WARNING PFX
"%s: No interrupt was generated"
4386 " using MSI, switching to INTx mode. Please"
4387 " report this failure to the PCI maintainer"
4388 " and include system chipset information.\n",
4391 bnx2_disable_int(bp
);
4392 free_irq(bp
->pdev
->irq
, dev
);
4393 pci_disable_msi(bp
->pdev
);
4394 bp
->flags
&= ~USING_MSI_FLAG
;
4396 rc
= bnx2_init_nic(bp
);
4399 rc
= request_irq(bp
->pdev
->irq
, bnx2_interrupt
,
4400 IRQF_SHARED
, dev
->name
, dev
);
4405 del_timer_sync(&bp
->timer
);
4408 bnx2_enable_int(bp
);
4411 if (bp
->flags
& USING_MSI_FLAG
) {
4412 printk(KERN_INFO PFX
"%s: using MSI\n", dev
->name
);
4415 netif_start_queue(dev
);
4421 bnx2_reset_task(struct work_struct
*work
)
4423 struct bnx2
*bp
= container_of(work
, struct bnx2
, reset_task
);
4425 if (!netif_running(bp
->dev
))
4428 bp
->in_reset_task
= 1;
4429 bnx2_netif_stop(bp
);
4433 atomic_set(&bp
->intr_sem
, 1);
4434 bnx2_netif_start(bp
);
4435 bp
->in_reset_task
= 0;
4439 bnx2_tx_timeout(struct net_device
*dev
)
4441 struct bnx2
*bp
= netdev_priv(dev
);
4443 /* This allows the netif to be shutdown gracefully before resetting */
4444 schedule_work(&bp
->reset_task
);
4448 /* Called with rtnl_lock */
4450 bnx2_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*vlgrp
)
4452 struct bnx2
*bp
= netdev_priv(dev
);
4454 bnx2_netif_stop(bp
);
4457 bnx2_set_rx_mode(dev
);
4459 bnx2_netif_start(bp
);
4462 /* Called with rtnl_lock */
4464 bnx2_vlan_rx_kill_vid(struct net_device
*dev
, uint16_t vid
)
4466 struct bnx2
*bp
= netdev_priv(dev
);
4468 bnx2_netif_stop(bp
);
4469 vlan_group_set_device(bp
->vlgrp
, vid
, NULL
);
4470 bnx2_set_rx_mode(dev
);
4472 bnx2_netif_start(bp
);
4476 /* Called with netif_tx_lock.
4477 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4478 * netif_wake_queue().
4481 bnx2_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
4483 struct bnx2
*bp
= netdev_priv(dev
);
4486 struct sw_bd
*tx_buf
;
4487 u32 len
, vlan_tag_flags
, last_frag
, mss
;
4488 u16 prod
, ring_prod
;
4491 if (unlikely(bnx2_tx_avail(bp
) < (skb_shinfo(skb
)->nr_frags
+ 1))) {
4492 netif_stop_queue(dev
);
4493 printk(KERN_ERR PFX
"%s: BUG! Tx ring full when queue awake!\n",
4496 return NETDEV_TX_BUSY
;
4498 len
= skb_headlen(skb
);
4500 ring_prod
= TX_RING_IDX(prod
);
4503 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
4504 vlan_tag_flags
|= TX_BD_FLAGS_TCP_UDP_CKSUM
;
4507 if (bp
->vlgrp
!= 0 && vlan_tx_tag_present(skb
)) {
4509 (TX_BD_FLAGS_VLAN_TAG
| (vlan_tx_tag_get(skb
) << 16));
4511 if ((mss
= skb_shinfo(skb
)->gso_size
) &&
4512 (skb
->len
> (bp
->dev
->mtu
+ ETH_HLEN
))) {
4513 u32 tcp_opt_len
, ip_tcp_len
;
4516 if (skb_header_cloned(skb
) &&
4517 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
4519 return NETDEV_TX_OK
;
4522 vlan_tag_flags
|= TX_BD_FLAGS_SW_LSO
;
4525 if (tcp_hdr(skb
)->doff
> 5)
4526 tcp_opt_len
= tcp_optlen(skb
);
4528 ip_tcp_len
= ip_hdrlen(skb
) + sizeof(struct tcphdr
);
4532 iph
->tot_len
= htons(mss
+ ip_tcp_len
+ tcp_opt_len
);
4533 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
4536 if (tcp_opt_len
|| (iph
->ihl
> 5)) {
4537 vlan_tag_flags
|= ((iph
->ihl
- 5) +
4538 (tcp_opt_len
>> 2)) << 8;
4546 mapping
= pci_map_single(bp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
4548 tx_buf
= &bp
->tx_buf_ring
[ring_prod
];
4550 pci_unmap_addr_set(tx_buf
, mapping
, mapping
);
4552 txbd
= &bp
->tx_desc_ring
[ring_prod
];
4554 txbd
->tx_bd_haddr_hi
= (u64
) mapping
>> 32;
4555 txbd
->tx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
4556 txbd
->tx_bd_mss_nbytes
= len
| (mss
<< 16);
4557 txbd
->tx_bd_vlan_tag_flags
= vlan_tag_flags
| TX_BD_FLAGS_START
;
4559 last_frag
= skb_shinfo(skb
)->nr_frags
;
4561 for (i
= 0; i
< last_frag
; i
++) {
4562 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
4564 prod
= NEXT_TX_BD(prod
);
4565 ring_prod
= TX_RING_IDX(prod
);
4566 txbd
= &bp
->tx_desc_ring
[ring_prod
];
4569 mapping
= pci_map_page(bp
->pdev
, frag
->page
, frag
->page_offset
,
4570 len
, PCI_DMA_TODEVICE
);
4571 pci_unmap_addr_set(&bp
->tx_buf_ring
[ring_prod
],
4574 txbd
->tx_bd_haddr_hi
= (u64
) mapping
>> 32;
4575 txbd
->tx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
4576 txbd
->tx_bd_mss_nbytes
= len
| (mss
<< 16);
4577 txbd
->tx_bd_vlan_tag_flags
= vlan_tag_flags
;
4580 txbd
->tx_bd_vlan_tag_flags
|= TX_BD_FLAGS_END
;
4582 prod
= NEXT_TX_BD(prod
);
4583 bp
->tx_prod_bseq
+= skb
->len
;
4585 REG_WR16(bp
, bp
->tx_bidx_addr
, prod
);
4586 REG_WR(bp
, bp
->tx_bseq_addr
, bp
->tx_prod_bseq
);
4591 dev
->trans_start
= jiffies
;
4593 if (unlikely(bnx2_tx_avail(bp
) <= MAX_SKB_FRAGS
)) {
4594 netif_stop_queue(dev
);
4595 if (bnx2_tx_avail(bp
) > bp
->tx_wake_thresh
)
4596 netif_wake_queue(dev
);
4599 return NETDEV_TX_OK
;
4602 /* Called with rtnl_lock */
4604 bnx2_close(struct net_device
*dev
)
4606 struct bnx2
*bp
= netdev_priv(dev
);
4609 /* Calling flush_scheduled_work() may deadlock because
4610 * linkwatch_event() may be on the workqueue and it will try to get
4611 * the rtnl_lock which we are holding.
4613 while (bp
->in_reset_task
)
4616 bnx2_netif_stop(bp
);
4617 del_timer_sync(&bp
->timer
);
4618 if (bp
->flags
& NO_WOL_FLAG
)
4619 reset_code
= BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN
;
4621 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_WOL
;
4623 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL
;
4624 bnx2_reset_chip(bp
, reset_code
);
4625 free_irq(bp
->pdev
->irq
, dev
);
4626 if (bp
->flags
& USING_MSI_FLAG
) {
4627 pci_disable_msi(bp
->pdev
);
4628 bp
->flags
&= ~USING_MSI_FLAG
;
4633 netif_carrier_off(bp
->dev
);
4634 bnx2_set_power_state(bp
, PCI_D3hot
);
4638 #define GET_NET_STATS64(ctr) \
4639 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4640 (unsigned long) (ctr##_lo)
4642 #define GET_NET_STATS32(ctr) \
4645 #if (BITS_PER_LONG == 64)
4646 #define GET_NET_STATS GET_NET_STATS64
4648 #define GET_NET_STATS GET_NET_STATS32
4651 static struct net_device_stats
*
4652 bnx2_get_stats(struct net_device
*dev
)
4654 struct bnx2
*bp
= netdev_priv(dev
);
4655 struct statistics_block
*stats_blk
= bp
->stats_blk
;
4656 struct net_device_stats
*net_stats
= &bp
->net_stats
;
4658 if (bp
->stats_blk
== NULL
) {
4661 net_stats
->rx_packets
=
4662 GET_NET_STATS(stats_blk
->stat_IfHCInUcastPkts
) +
4663 GET_NET_STATS(stats_blk
->stat_IfHCInMulticastPkts
) +
4664 GET_NET_STATS(stats_blk
->stat_IfHCInBroadcastPkts
);
4666 net_stats
->tx_packets
=
4667 GET_NET_STATS(stats_blk
->stat_IfHCOutUcastPkts
) +
4668 GET_NET_STATS(stats_blk
->stat_IfHCOutMulticastPkts
) +
4669 GET_NET_STATS(stats_blk
->stat_IfHCOutBroadcastPkts
);
4671 net_stats
->rx_bytes
=
4672 GET_NET_STATS(stats_blk
->stat_IfHCInOctets
);
4674 net_stats
->tx_bytes
=
4675 GET_NET_STATS(stats_blk
->stat_IfHCOutOctets
);
4677 net_stats
->multicast
=
4678 GET_NET_STATS(stats_blk
->stat_IfHCOutMulticastPkts
);
4680 net_stats
->collisions
=
4681 (unsigned long) stats_blk
->stat_EtherStatsCollisions
;
4683 net_stats
->rx_length_errors
=
4684 (unsigned long) (stats_blk
->stat_EtherStatsUndersizePkts
+
4685 stats_blk
->stat_EtherStatsOverrsizePkts
);
4687 net_stats
->rx_over_errors
=
4688 (unsigned long) stats_blk
->stat_IfInMBUFDiscards
;
4690 net_stats
->rx_frame_errors
=
4691 (unsigned long) stats_blk
->stat_Dot3StatsAlignmentErrors
;
4693 net_stats
->rx_crc_errors
=
4694 (unsigned long) stats_blk
->stat_Dot3StatsFCSErrors
;
4696 net_stats
->rx_errors
= net_stats
->rx_length_errors
+
4697 net_stats
->rx_over_errors
+ net_stats
->rx_frame_errors
+
4698 net_stats
->rx_crc_errors
;
4700 net_stats
->tx_aborted_errors
=
4701 (unsigned long) (stats_blk
->stat_Dot3StatsExcessiveCollisions
+
4702 stats_blk
->stat_Dot3StatsLateCollisions
);
4704 if ((CHIP_NUM(bp
) == CHIP_NUM_5706
) ||
4705 (CHIP_ID(bp
) == CHIP_ID_5708_A0
))
4706 net_stats
->tx_carrier_errors
= 0;
4708 net_stats
->tx_carrier_errors
=
4710 stats_blk
->stat_Dot3StatsCarrierSenseErrors
;
4713 net_stats
->tx_errors
=
4715 stats_blk
->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4717 net_stats
->tx_aborted_errors
+
4718 net_stats
->tx_carrier_errors
;
4720 net_stats
->rx_missed_errors
=
4721 (unsigned long) (stats_blk
->stat_IfInMBUFDiscards
+
4722 stats_blk
->stat_FwRxDrop
);
4727 /* All ethtool functions called with rtnl_lock */
4730 bnx2_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
4732 struct bnx2
*bp
= netdev_priv(dev
);
4734 cmd
->supported
= SUPPORTED_Autoneg
;
4735 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
4736 cmd
->supported
|= SUPPORTED_1000baseT_Full
|
4739 cmd
->port
= PORT_FIBRE
;
4742 cmd
->supported
|= SUPPORTED_10baseT_Half
|
4743 SUPPORTED_10baseT_Full
|
4744 SUPPORTED_100baseT_Half
|
4745 SUPPORTED_100baseT_Full
|
4746 SUPPORTED_1000baseT_Full
|
4749 cmd
->port
= PORT_TP
;
4752 cmd
->advertising
= bp
->advertising
;
4754 if (bp
->autoneg
& AUTONEG_SPEED
) {
4755 cmd
->autoneg
= AUTONEG_ENABLE
;
4758 cmd
->autoneg
= AUTONEG_DISABLE
;
4761 if (netif_carrier_ok(dev
)) {
4762 cmd
->speed
= bp
->line_speed
;
4763 cmd
->duplex
= bp
->duplex
;
4770 cmd
->transceiver
= XCVR_INTERNAL
;
4771 cmd
->phy_address
= bp
->phy_addr
;
4777 bnx2_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
4779 struct bnx2
*bp
= netdev_priv(dev
);
4780 u8 autoneg
= bp
->autoneg
;
4781 u8 req_duplex
= bp
->req_duplex
;
4782 u16 req_line_speed
= bp
->req_line_speed
;
4783 u32 advertising
= bp
->advertising
;
4785 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
4786 autoneg
|= AUTONEG_SPEED
;
4788 cmd
->advertising
&= ETHTOOL_ALL_COPPER_SPEED
;
4790 /* allow advertising 1 speed */
4791 if ((cmd
->advertising
== ADVERTISED_10baseT_Half
) ||
4792 (cmd
->advertising
== ADVERTISED_10baseT_Full
) ||
4793 (cmd
->advertising
== ADVERTISED_100baseT_Half
) ||
4794 (cmd
->advertising
== ADVERTISED_100baseT_Full
)) {
4796 if (bp
->phy_flags
& PHY_SERDES_FLAG
)
4799 advertising
= cmd
->advertising
;
4802 else if (cmd
->advertising
== ADVERTISED_1000baseT_Full
) {
4803 advertising
= cmd
->advertising
;
4805 else if (cmd
->advertising
== ADVERTISED_1000baseT_Half
) {
4809 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
4810 advertising
= ETHTOOL_ALL_FIBRE_SPEED
;
4813 advertising
= ETHTOOL_ALL_COPPER_SPEED
;
4816 advertising
|= ADVERTISED_Autoneg
;
4819 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
4820 if ((cmd
->speed
!= SPEED_1000
&&
4821 cmd
->speed
!= SPEED_2500
) ||
4822 (cmd
->duplex
!= DUPLEX_FULL
))
4825 if (cmd
->speed
== SPEED_2500
&&
4826 !(bp
->phy_flags
& PHY_2_5G_CAPABLE_FLAG
))
4829 else if (cmd
->speed
== SPEED_1000
) {
4832 autoneg
&= ~AUTONEG_SPEED
;
4833 req_line_speed
= cmd
->speed
;
4834 req_duplex
= cmd
->duplex
;
4838 bp
->autoneg
= autoneg
;
4839 bp
->advertising
= advertising
;
4840 bp
->req_line_speed
= req_line_speed
;
4841 bp
->req_duplex
= req_duplex
;
4843 spin_lock_bh(&bp
->phy_lock
);
4847 spin_unlock_bh(&bp
->phy_lock
);
4853 bnx2_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
4855 struct bnx2
*bp
= netdev_priv(dev
);
4857 strcpy(info
->driver
, DRV_MODULE_NAME
);
4858 strcpy(info
->version
, DRV_MODULE_VERSION
);
4859 strcpy(info
->bus_info
, pci_name(bp
->pdev
));
4860 info
->fw_version
[0] = ((bp
->fw_ver
& 0xff000000) >> 24) + '0';
4861 info
->fw_version
[2] = ((bp
->fw_ver
& 0xff0000) >> 16) + '0';
4862 info
->fw_version
[4] = ((bp
->fw_ver
& 0xff00) >> 8) + '0';
4863 info
->fw_version
[1] = info
->fw_version
[3] = '.';
4864 info
->fw_version
[5] = 0;
4867 #define BNX2_REGDUMP_LEN (32 * 1024)
4870 bnx2_get_regs_len(struct net_device
*dev
)
4872 return BNX2_REGDUMP_LEN
;
4876 bnx2_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
, void *_p
)
4878 u32
*p
= _p
, i
, offset
;
4880 struct bnx2
*bp
= netdev_priv(dev
);
4881 u32 reg_boundaries
[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4882 0x0800, 0x0880, 0x0c00, 0x0c10,
4883 0x0c30, 0x0d08, 0x1000, 0x101c,
4884 0x1040, 0x1048, 0x1080, 0x10a4,
4885 0x1400, 0x1490, 0x1498, 0x14f0,
4886 0x1500, 0x155c, 0x1580, 0x15dc,
4887 0x1600, 0x1658, 0x1680, 0x16d8,
4888 0x1800, 0x1820, 0x1840, 0x1854,
4889 0x1880, 0x1894, 0x1900, 0x1984,
4890 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4891 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4892 0x2000, 0x2030, 0x23c0, 0x2400,
4893 0x2800, 0x2820, 0x2830, 0x2850,
4894 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4895 0x3c00, 0x3c94, 0x4000, 0x4010,
4896 0x4080, 0x4090, 0x43c0, 0x4458,
4897 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4898 0x4fc0, 0x5010, 0x53c0, 0x5444,
4899 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4900 0x5fc0, 0x6000, 0x6400, 0x6428,
4901 0x6800, 0x6848, 0x684c, 0x6860,
4902 0x6888, 0x6910, 0x8000 };
4906 memset(p
, 0, BNX2_REGDUMP_LEN
);
4908 if (!netif_running(bp
->dev
))
4912 offset
= reg_boundaries
[0];
4914 while (offset
< BNX2_REGDUMP_LEN
) {
4915 *p
++ = REG_RD(bp
, offset
);
4917 if (offset
== reg_boundaries
[i
+ 1]) {
4918 offset
= reg_boundaries
[i
+ 2];
4919 p
= (u32
*) (orig_p
+ offset
);
4926 bnx2_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
4928 struct bnx2
*bp
= netdev_priv(dev
);
4930 if (bp
->flags
& NO_WOL_FLAG
) {
4935 wol
->supported
= WAKE_MAGIC
;
4937 wol
->wolopts
= WAKE_MAGIC
;
4941 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
4945 bnx2_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
4947 struct bnx2
*bp
= netdev_priv(dev
);
4949 if (wol
->wolopts
& ~WAKE_MAGIC
)
4952 if (wol
->wolopts
& WAKE_MAGIC
) {
4953 if (bp
->flags
& NO_WOL_FLAG
)
4965 bnx2_nway_reset(struct net_device
*dev
)
4967 struct bnx2
*bp
= netdev_priv(dev
);
4970 if (!(bp
->autoneg
& AUTONEG_SPEED
)) {
4974 spin_lock_bh(&bp
->phy_lock
);
4976 /* Force a link down visible on the other side */
4977 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
4978 bnx2_write_phy(bp
, MII_BMCR
, BMCR_LOOPBACK
);
4979 spin_unlock_bh(&bp
->phy_lock
);
4983 spin_lock_bh(&bp
->phy_lock
);
4985 bp
->current_interval
= SERDES_AN_TIMEOUT
;
4986 bp
->serdes_an_pending
= 1;
4987 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
4990 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
4991 bmcr
&= ~BMCR_LOOPBACK
;
4992 bnx2_write_phy(bp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
| BMCR_ANENABLE
);
4994 spin_unlock_bh(&bp
->phy_lock
);
5000 bnx2_get_eeprom_len(struct net_device
*dev
)
5002 struct bnx2
*bp
= netdev_priv(dev
);
5004 if (bp
->flash_info
== NULL
)
5007 return (int) bp
->flash_size
;
5011 bnx2_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
5014 struct bnx2
*bp
= netdev_priv(dev
);
5017 /* parameters already validated in ethtool_get_eeprom */
5019 rc
= bnx2_nvram_read(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
5025 bnx2_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
5028 struct bnx2
*bp
= netdev_priv(dev
);
5031 /* parameters already validated in ethtool_set_eeprom */
5033 rc
= bnx2_nvram_write(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
5039 bnx2_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*coal
)
5041 struct bnx2
*bp
= netdev_priv(dev
);
5043 memset(coal
, 0, sizeof(struct ethtool_coalesce
));
5045 coal
->rx_coalesce_usecs
= bp
->rx_ticks
;
5046 coal
->rx_max_coalesced_frames
= bp
->rx_quick_cons_trip
;
5047 coal
->rx_coalesce_usecs_irq
= bp
->rx_ticks_int
;
5048 coal
->rx_max_coalesced_frames_irq
= bp
->rx_quick_cons_trip_int
;
5050 coal
->tx_coalesce_usecs
= bp
->tx_ticks
;
5051 coal
->tx_max_coalesced_frames
= bp
->tx_quick_cons_trip
;
5052 coal
->tx_coalesce_usecs_irq
= bp
->tx_ticks_int
;
5053 coal
->tx_max_coalesced_frames_irq
= bp
->tx_quick_cons_trip_int
;
5055 coal
->stats_block_coalesce_usecs
= bp
->stats_ticks
;
5061 bnx2_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*coal
)
5063 struct bnx2
*bp
= netdev_priv(dev
);
5065 bp
->rx_ticks
= (u16
) coal
->rx_coalesce_usecs
;
5066 if (bp
->rx_ticks
> 0x3ff) bp
->rx_ticks
= 0x3ff;
5068 bp
->rx_quick_cons_trip
= (u16
) coal
->rx_max_coalesced_frames
;
5069 if (bp
->rx_quick_cons_trip
> 0xff) bp
->rx_quick_cons_trip
= 0xff;
5071 bp
->rx_ticks_int
= (u16
) coal
->rx_coalesce_usecs_irq
;
5072 if (bp
->rx_ticks_int
> 0x3ff) bp
->rx_ticks_int
= 0x3ff;
5074 bp
->rx_quick_cons_trip_int
= (u16
) coal
->rx_max_coalesced_frames_irq
;
5075 if (bp
->rx_quick_cons_trip_int
> 0xff)
5076 bp
->rx_quick_cons_trip_int
= 0xff;
5078 bp
->tx_ticks
= (u16
) coal
->tx_coalesce_usecs
;
5079 if (bp
->tx_ticks
> 0x3ff) bp
->tx_ticks
= 0x3ff;
5081 bp
->tx_quick_cons_trip
= (u16
) coal
->tx_max_coalesced_frames
;
5082 if (bp
->tx_quick_cons_trip
> 0xff) bp
->tx_quick_cons_trip
= 0xff;
5084 bp
->tx_ticks_int
= (u16
) coal
->tx_coalesce_usecs_irq
;
5085 if (bp
->tx_ticks_int
> 0x3ff) bp
->tx_ticks_int
= 0x3ff;
5087 bp
->tx_quick_cons_trip_int
= (u16
) coal
->tx_max_coalesced_frames_irq
;
5088 if (bp
->tx_quick_cons_trip_int
> 0xff) bp
->tx_quick_cons_trip_int
=
5091 bp
->stats_ticks
= coal
->stats_block_coalesce_usecs
;
5092 if (bp
->stats_ticks
> 0xffff00) bp
->stats_ticks
= 0xffff00;
5093 bp
->stats_ticks
&= 0xffff00;
5095 if (netif_running(bp
->dev
)) {
5096 bnx2_netif_stop(bp
);
5098 bnx2_netif_start(bp
);
5105 bnx2_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
5107 struct bnx2
*bp
= netdev_priv(dev
);
5109 ering
->rx_max_pending
= MAX_TOTAL_RX_DESC_CNT
;
5110 ering
->rx_mini_max_pending
= 0;
5111 ering
->rx_jumbo_max_pending
= 0;
5113 ering
->rx_pending
= bp
->rx_ring_size
;
5114 ering
->rx_mini_pending
= 0;
5115 ering
->rx_jumbo_pending
= 0;
5117 ering
->tx_max_pending
= MAX_TX_DESC_CNT
;
5118 ering
->tx_pending
= bp
->tx_ring_size
;
5122 bnx2_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
5124 struct bnx2
*bp
= netdev_priv(dev
);
5126 if ((ering
->rx_pending
> MAX_TOTAL_RX_DESC_CNT
) ||
5127 (ering
->tx_pending
> MAX_TX_DESC_CNT
) ||
5128 (ering
->tx_pending
<= MAX_SKB_FRAGS
)) {
5132 if (netif_running(bp
->dev
)) {
5133 bnx2_netif_stop(bp
);
5134 bnx2_reset_chip(bp
, BNX2_DRV_MSG_CODE_RESET
);
5139 bnx2_set_rx_ring_size(bp
, ering
->rx_pending
);
5140 bp
->tx_ring_size
= ering
->tx_pending
;
5142 if (netif_running(bp
->dev
)) {
5145 rc
= bnx2_alloc_mem(bp
);
5149 bnx2_netif_start(bp
);
5156 bnx2_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
5158 struct bnx2
*bp
= netdev_priv(dev
);
5160 epause
->autoneg
= ((bp
->autoneg
& AUTONEG_FLOW_CTRL
) != 0);
5161 epause
->rx_pause
= ((bp
->flow_ctrl
& FLOW_CTRL_RX
) != 0);
5162 epause
->tx_pause
= ((bp
->flow_ctrl
& FLOW_CTRL_TX
) != 0);
5166 bnx2_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
5168 struct bnx2
*bp
= netdev_priv(dev
);
5170 bp
->req_flow_ctrl
= 0;
5171 if (epause
->rx_pause
)
5172 bp
->req_flow_ctrl
|= FLOW_CTRL_RX
;
5173 if (epause
->tx_pause
)
5174 bp
->req_flow_ctrl
|= FLOW_CTRL_TX
;
5176 if (epause
->autoneg
) {
5177 bp
->autoneg
|= AUTONEG_FLOW_CTRL
;
5180 bp
->autoneg
&= ~AUTONEG_FLOW_CTRL
;
5183 spin_lock_bh(&bp
->phy_lock
);
5187 spin_unlock_bh(&bp
->phy_lock
);
5193 bnx2_get_rx_csum(struct net_device
*dev
)
5195 struct bnx2
*bp
= netdev_priv(dev
);
5201 bnx2_set_rx_csum(struct net_device
*dev
, u32 data
)
5203 struct bnx2
*bp
= netdev_priv(dev
);
5210 bnx2_set_tso(struct net_device
*dev
, u32 data
)
5213 dev
->features
|= NETIF_F_TSO
| NETIF_F_TSO_ECN
;
5215 dev
->features
&= ~(NETIF_F_TSO
| NETIF_F_TSO_ECN
);
5219 #define BNX2_NUM_STATS 46
5222 char string
[ETH_GSTRING_LEN
];
5223 } bnx2_stats_str_arr
[BNX2_NUM_STATS
] = {
5225 { "rx_error_bytes" },
5227 { "tx_error_bytes" },
5228 { "rx_ucast_packets" },
5229 { "rx_mcast_packets" },
5230 { "rx_bcast_packets" },
5231 { "tx_ucast_packets" },
5232 { "tx_mcast_packets" },
5233 { "tx_bcast_packets" },
5234 { "tx_mac_errors" },
5235 { "tx_carrier_errors" },
5236 { "rx_crc_errors" },
5237 { "rx_align_errors" },
5238 { "tx_single_collisions" },
5239 { "tx_multi_collisions" },
5241 { "tx_excess_collisions" },
5242 { "tx_late_collisions" },
5243 { "tx_total_collisions" },
5246 { "rx_undersize_packets" },
5247 { "rx_oversize_packets" },
5248 { "rx_64_byte_packets" },
5249 { "rx_65_to_127_byte_packets" },
5250 { "rx_128_to_255_byte_packets" },
5251 { "rx_256_to_511_byte_packets" },
5252 { "rx_512_to_1023_byte_packets" },
5253 { "rx_1024_to_1522_byte_packets" },
5254 { "rx_1523_to_9022_byte_packets" },
5255 { "tx_64_byte_packets" },
5256 { "tx_65_to_127_byte_packets" },
5257 { "tx_128_to_255_byte_packets" },
5258 { "tx_256_to_511_byte_packets" },
5259 { "tx_512_to_1023_byte_packets" },
5260 { "tx_1024_to_1522_byte_packets" },
5261 { "tx_1523_to_9022_byte_packets" },
5262 { "rx_xon_frames" },
5263 { "rx_xoff_frames" },
5264 { "tx_xon_frames" },
5265 { "tx_xoff_frames" },
5266 { "rx_mac_ctrl_frames" },
5267 { "rx_filtered_packets" },
5269 { "rx_fw_discards" },
5272 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5274 static const unsigned long bnx2_stats_offset_arr
[BNX2_NUM_STATS
] = {
5275 STATS_OFFSET32(stat_IfHCInOctets_hi
),
5276 STATS_OFFSET32(stat_IfHCInBadOctets_hi
),
5277 STATS_OFFSET32(stat_IfHCOutOctets_hi
),
5278 STATS_OFFSET32(stat_IfHCOutBadOctets_hi
),
5279 STATS_OFFSET32(stat_IfHCInUcastPkts_hi
),
5280 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi
),
5281 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi
),
5282 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi
),
5283 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi
),
5284 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi
),
5285 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors
),
5286 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors
),
5287 STATS_OFFSET32(stat_Dot3StatsFCSErrors
),
5288 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors
),
5289 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames
),
5290 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames
),
5291 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions
),
5292 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions
),
5293 STATS_OFFSET32(stat_Dot3StatsLateCollisions
),
5294 STATS_OFFSET32(stat_EtherStatsCollisions
),
5295 STATS_OFFSET32(stat_EtherStatsFragments
),
5296 STATS_OFFSET32(stat_EtherStatsJabbers
),
5297 STATS_OFFSET32(stat_EtherStatsUndersizePkts
),
5298 STATS_OFFSET32(stat_EtherStatsOverrsizePkts
),
5299 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets
),
5300 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets
),
5301 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets
),
5302 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets
),
5303 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets
),
5304 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets
),
5305 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets
),
5306 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets
),
5307 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets
),
5308 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets
),
5309 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets
),
5310 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets
),
5311 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets
),
5312 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets
),
5313 STATS_OFFSET32(stat_XonPauseFramesReceived
),
5314 STATS_OFFSET32(stat_XoffPauseFramesReceived
),
5315 STATS_OFFSET32(stat_OutXonSent
),
5316 STATS_OFFSET32(stat_OutXoffSent
),
5317 STATS_OFFSET32(stat_MacControlFramesReceived
),
5318 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards
),
5319 STATS_OFFSET32(stat_IfInMBUFDiscards
),
5320 STATS_OFFSET32(stat_FwRxDrop
),
5323 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5324 * skipped because of errata.
5326 static u8 bnx2_5706_stats_len_arr
[BNX2_NUM_STATS
] = {
5327 8,0,8,8,8,8,8,8,8,8,
5328 4,0,4,4,4,4,4,4,4,4,
5329 4,4,4,4,4,4,4,4,4,4,
5330 4,4,4,4,4,4,4,4,4,4,
5334 static u8 bnx2_5708_stats_len_arr
[BNX2_NUM_STATS
] = {
5335 8,0,8,8,8,8,8,8,8,8,
5336 4,4,4,4,4,4,4,4,4,4,
5337 4,4,4,4,4,4,4,4,4,4,
5338 4,4,4,4,4,4,4,4,4,4,
5342 #define BNX2_NUM_TESTS 6
5345 char string
[ETH_GSTRING_LEN
];
5346 } bnx2_tests_str_arr
[BNX2_NUM_TESTS
] = {
5347 { "register_test (offline)" },
5348 { "memory_test (offline)" },
5349 { "loopback_test (offline)" },
5350 { "nvram_test (online)" },
5351 { "interrupt_test (online)" },
5352 { "link_test (online)" },
5356 bnx2_self_test_count(struct net_device
*dev
)
5358 return BNX2_NUM_TESTS
;
5362 bnx2_self_test(struct net_device
*dev
, struct ethtool_test
*etest
, u64
*buf
)
5364 struct bnx2
*bp
= netdev_priv(dev
);
5366 memset(buf
, 0, sizeof(u64
) * BNX2_NUM_TESTS
);
5367 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
5370 bnx2_netif_stop(bp
);
5371 bnx2_reset_chip(bp
, BNX2_DRV_MSG_CODE_DIAG
);
5374 if (bnx2_test_registers(bp
) != 0) {
5376 etest
->flags
|= ETH_TEST_FL_FAILED
;
5378 if (bnx2_test_memory(bp
) != 0) {
5380 etest
->flags
|= ETH_TEST_FL_FAILED
;
5382 if ((buf
[2] = bnx2_test_loopback(bp
)) != 0)
5383 etest
->flags
|= ETH_TEST_FL_FAILED
;
5385 if (!netif_running(bp
->dev
)) {
5386 bnx2_reset_chip(bp
, BNX2_DRV_MSG_CODE_RESET
);
5390 bnx2_netif_start(bp
);
5393 /* wait for link up */
5394 for (i
= 0; i
< 7; i
++) {
5397 msleep_interruptible(1000);
5401 if (bnx2_test_nvram(bp
) != 0) {
5403 etest
->flags
|= ETH_TEST_FL_FAILED
;
5405 if (bnx2_test_intr(bp
) != 0) {
5407 etest
->flags
|= ETH_TEST_FL_FAILED
;
5410 if (bnx2_test_link(bp
) != 0) {
5412 etest
->flags
|= ETH_TEST_FL_FAILED
;
5418 bnx2_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
5420 switch (stringset
) {
5422 memcpy(buf
, bnx2_stats_str_arr
,
5423 sizeof(bnx2_stats_str_arr
));
5426 memcpy(buf
, bnx2_tests_str_arr
,
5427 sizeof(bnx2_tests_str_arr
));
5433 bnx2_get_stats_count(struct net_device
*dev
)
5435 return BNX2_NUM_STATS
;
5439 bnx2_get_ethtool_stats(struct net_device
*dev
,
5440 struct ethtool_stats
*stats
, u64
*buf
)
5442 struct bnx2
*bp
= netdev_priv(dev
);
5444 u32
*hw_stats
= (u32
*) bp
->stats_blk
;
5445 u8
*stats_len_arr
= NULL
;
5447 if (hw_stats
== NULL
) {
5448 memset(buf
, 0, sizeof(u64
) * BNX2_NUM_STATS
);
5452 if ((CHIP_ID(bp
) == CHIP_ID_5706_A0
) ||
5453 (CHIP_ID(bp
) == CHIP_ID_5706_A1
) ||
5454 (CHIP_ID(bp
) == CHIP_ID_5706_A2
) ||
5455 (CHIP_ID(bp
) == CHIP_ID_5708_A0
))
5456 stats_len_arr
= bnx2_5706_stats_len_arr
;
5458 stats_len_arr
= bnx2_5708_stats_len_arr
;
5460 for (i
= 0; i
< BNX2_NUM_STATS
; i
++) {
5461 if (stats_len_arr
[i
] == 0) {
5462 /* skip this counter */
5466 if (stats_len_arr
[i
] == 4) {
5467 /* 4-byte counter */
5469 *(hw_stats
+ bnx2_stats_offset_arr
[i
]);
5472 /* 8-byte counter */
5473 buf
[i
] = (((u64
) *(hw_stats
+
5474 bnx2_stats_offset_arr
[i
])) << 32) +
5475 *(hw_stats
+ bnx2_stats_offset_arr
[i
] + 1);
5480 bnx2_phys_id(struct net_device
*dev
, u32 data
)
5482 struct bnx2
*bp
= netdev_priv(dev
);
5489 save
= REG_RD(bp
, BNX2_MISC_CFG
);
5490 REG_WR(bp
, BNX2_MISC_CFG
, BNX2_MISC_CFG_LEDMODE_MAC
);
5492 for (i
= 0; i
< (data
* 2); i
++) {
5494 REG_WR(bp
, BNX2_EMAC_LED
, BNX2_EMAC_LED_OVERRIDE
);
5497 REG_WR(bp
, BNX2_EMAC_LED
, BNX2_EMAC_LED_OVERRIDE
|
5498 BNX2_EMAC_LED_1000MB_OVERRIDE
|
5499 BNX2_EMAC_LED_100MB_OVERRIDE
|
5500 BNX2_EMAC_LED_10MB_OVERRIDE
|
5501 BNX2_EMAC_LED_TRAFFIC_OVERRIDE
|
5502 BNX2_EMAC_LED_TRAFFIC
);
5504 msleep_interruptible(500);
5505 if (signal_pending(current
))
5508 REG_WR(bp
, BNX2_EMAC_LED
, 0);
5509 REG_WR(bp
, BNX2_MISC_CFG
, save
);
5513 static const struct ethtool_ops bnx2_ethtool_ops
= {
5514 .get_settings
= bnx2_get_settings
,
5515 .set_settings
= bnx2_set_settings
,
5516 .get_drvinfo
= bnx2_get_drvinfo
,
5517 .get_regs_len
= bnx2_get_regs_len
,
5518 .get_regs
= bnx2_get_regs
,
5519 .get_wol
= bnx2_get_wol
,
5520 .set_wol
= bnx2_set_wol
,
5521 .nway_reset
= bnx2_nway_reset
,
5522 .get_link
= ethtool_op_get_link
,
5523 .get_eeprom_len
= bnx2_get_eeprom_len
,
5524 .get_eeprom
= bnx2_get_eeprom
,
5525 .set_eeprom
= bnx2_set_eeprom
,
5526 .get_coalesce
= bnx2_get_coalesce
,
5527 .set_coalesce
= bnx2_set_coalesce
,
5528 .get_ringparam
= bnx2_get_ringparam
,
5529 .set_ringparam
= bnx2_set_ringparam
,
5530 .get_pauseparam
= bnx2_get_pauseparam
,
5531 .set_pauseparam
= bnx2_set_pauseparam
,
5532 .get_rx_csum
= bnx2_get_rx_csum
,
5533 .set_rx_csum
= bnx2_set_rx_csum
,
5534 .get_tx_csum
= ethtool_op_get_tx_csum
,
5535 .set_tx_csum
= ethtool_op_set_tx_csum
,
5536 .get_sg
= ethtool_op_get_sg
,
5537 .set_sg
= ethtool_op_set_sg
,
5538 .get_tso
= ethtool_op_get_tso
,
5539 .set_tso
= bnx2_set_tso
,
5540 .self_test_count
= bnx2_self_test_count
,
5541 .self_test
= bnx2_self_test
,
5542 .get_strings
= bnx2_get_strings
,
5543 .phys_id
= bnx2_phys_id
,
5544 .get_stats_count
= bnx2_get_stats_count
,
5545 .get_ethtool_stats
= bnx2_get_ethtool_stats
,
5546 .get_perm_addr
= ethtool_op_get_perm_addr
,
5549 /* Called with rtnl_lock */
5551 bnx2_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
5553 struct mii_ioctl_data
*data
= if_mii(ifr
);
5554 struct bnx2
*bp
= netdev_priv(dev
);
5559 data
->phy_id
= bp
->phy_addr
;
5565 spin_lock_bh(&bp
->phy_lock
);
5566 err
= bnx2_read_phy(bp
, data
->reg_num
& 0x1f, &mii_regval
);
5567 spin_unlock_bh(&bp
->phy_lock
);
5569 data
->val_out
= mii_regval
;
5575 if (!capable(CAP_NET_ADMIN
))
5578 spin_lock_bh(&bp
->phy_lock
);
5579 err
= bnx2_write_phy(bp
, data
->reg_num
& 0x1f, data
->val_in
);
5580 spin_unlock_bh(&bp
->phy_lock
);
5591 /* Called with rtnl_lock */
5593 bnx2_change_mac_addr(struct net_device
*dev
, void *p
)
5595 struct sockaddr
*addr
= p
;
5596 struct bnx2
*bp
= netdev_priv(dev
);
5598 if (!is_valid_ether_addr(addr
->sa_data
))
5601 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
5602 if (netif_running(dev
))
5603 bnx2_set_mac_addr(bp
);
5608 /* Called with rtnl_lock */
5610 bnx2_change_mtu(struct net_device
*dev
, int new_mtu
)
5612 struct bnx2
*bp
= netdev_priv(dev
);
5614 if (((new_mtu
+ ETH_HLEN
) > MAX_ETHERNET_JUMBO_PACKET_SIZE
) ||
5615 ((new_mtu
+ ETH_HLEN
) < MIN_ETHERNET_PACKET_SIZE
))
5619 if (netif_running(dev
)) {
5620 bnx2_netif_stop(bp
);
5624 bnx2_netif_start(bp
);
5629 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5631 poll_bnx2(struct net_device
*dev
)
5633 struct bnx2
*bp
= netdev_priv(dev
);
5635 disable_irq(bp
->pdev
->irq
);
5636 bnx2_interrupt(bp
->pdev
->irq
, dev
);
5637 enable_irq(bp
->pdev
->irq
);
5641 static void __devinit
5642 bnx2_get_5709_media(struct bnx2
*bp
)
5644 u32 val
= REG_RD(bp
, BNX2_MISC_DUAL_MEDIA_CTRL
);
5645 u32 bond_id
= val
& BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID
;
5648 if (bond_id
== BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C
)
5650 else if (bond_id
== BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S
) {
5651 bp
->phy_flags
|= PHY_SERDES_FLAG
;
5655 if (val
& BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE
)
5656 strap
= (val
& BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL
) >> 21;
5658 strap
= (val
& BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP
) >> 8;
5660 if (PCI_FUNC(bp
->pdev
->devfn
) == 0) {
5665 bp
->phy_flags
|= PHY_SERDES_FLAG
;
5673 bp
->phy_flags
|= PHY_SERDES_FLAG
;
5679 static int __devinit
5680 bnx2_init_board(struct pci_dev
*pdev
, struct net_device
*dev
)
5683 unsigned long mem_len
;
5687 SET_MODULE_OWNER(dev
);
5688 SET_NETDEV_DEV(dev
, &pdev
->dev
);
5689 bp
= netdev_priv(dev
);
5694 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5695 rc
= pci_enable_device(pdev
);
5697 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting.");
5701 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
5703 "Cannot find PCI device base address, aborting.\n");
5705 goto err_out_disable
;
5708 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
5710 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting.\n");
5711 goto err_out_disable
;
5714 pci_set_master(pdev
);
5716 bp
->pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
5717 if (bp
->pm_cap
== 0) {
5719 "Cannot find power management capability, aborting.\n");
5721 goto err_out_release
;
5724 if (pci_set_dma_mask(pdev
, DMA_64BIT_MASK
) == 0) {
5725 bp
->flags
|= USING_DAC_FLAG
;
5726 if (pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
) != 0) {
5728 "pci_set_consistent_dma_mask failed, aborting.\n");
5730 goto err_out_release
;
5733 else if (pci_set_dma_mask(pdev
, DMA_32BIT_MASK
) != 0) {
5734 dev_err(&pdev
->dev
, "System does not support DMA, aborting.\n");
5736 goto err_out_release
;
5742 spin_lock_init(&bp
->phy_lock
);
5743 INIT_WORK(&bp
->reset_task
, bnx2_reset_task
);
5745 dev
->base_addr
= dev
->mem_start
= pci_resource_start(pdev
, 0);
5746 mem_len
= MB_GET_CID_ADDR(TX_TSS_CID
+ 1);
5747 dev
->mem_end
= dev
->mem_start
+ mem_len
;
5748 dev
->irq
= pdev
->irq
;
5750 bp
->regview
= ioremap_nocache(dev
->base_addr
, mem_len
);
5753 dev_err(&pdev
->dev
, "Cannot map register space, aborting.\n");
5755 goto err_out_release
;
5758 /* Configure byte swap and enable write to the reg_window registers.
5759 * Rely on CPU to do target byte swapping on big endian systems
5760 * The chip's target access swapping will not swap all accesses
5762 pci_write_config_dword(bp
->pdev
, BNX2_PCICFG_MISC_CONFIG
,
5763 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA
|
5764 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP
);
5766 bnx2_set_power_state(bp
, PCI_D0
);
5768 bp
->chip_id
= REG_RD(bp
, BNX2_MISC_ID
);
5770 if (CHIP_NUM(bp
) != CHIP_NUM_5709
) {
5771 bp
->pcix_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PCIX
);
5772 if (bp
->pcix_cap
== 0) {
5774 "Cannot find PCIX capability, aborting.\n");
5780 /* Get bus information. */
5781 reg
= REG_RD(bp
, BNX2_PCICFG_MISC_STATUS
);
5782 if (reg
& BNX2_PCICFG_MISC_STATUS_PCIX_DET
) {
5785 bp
->flags
|= PCIX_FLAG
;
5787 clkreg
= REG_RD(bp
, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS
);
5789 clkreg
&= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET
;
5791 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ
:
5792 bp
->bus_speed_mhz
= 133;
5795 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ
:
5796 bp
->bus_speed_mhz
= 100;
5799 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ
:
5800 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ
:
5801 bp
->bus_speed_mhz
= 66;
5804 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ
:
5805 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ
:
5806 bp
->bus_speed_mhz
= 50;
5809 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW
:
5810 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ
:
5811 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ
:
5812 bp
->bus_speed_mhz
= 33;
5817 if (reg
& BNX2_PCICFG_MISC_STATUS_M66EN
)
5818 bp
->bus_speed_mhz
= 66;
5820 bp
->bus_speed_mhz
= 33;
5823 if (reg
& BNX2_PCICFG_MISC_STATUS_32BIT_DET
)
5824 bp
->flags
|= PCI_32BIT_FLAG
;
5826 /* 5706A0 may falsely detect SERR and PERR. */
5827 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
5828 reg
= REG_RD(bp
, PCI_COMMAND
);
5829 reg
&= ~(PCI_COMMAND_SERR
| PCI_COMMAND_PARITY
);
5830 REG_WR(bp
, PCI_COMMAND
, reg
);
5832 else if ((CHIP_ID(bp
) == CHIP_ID_5706_A1
) &&
5833 !(bp
->flags
& PCIX_FLAG
)) {
5836 "5706 A1 can only be used in a PCIX bus, aborting.\n");
5840 bnx2_init_nvram(bp
);
5842 reg
= REG_RD_IND(bp
, BNX2_SHM_HDR_SIGNATURE
);
5844 if ((reg
& BNX2_SHM_HDR_SIGNATURE_SIG_MASK
) ==
5845 BNX2_SHM_HDR_SIGNATURE_SIG
) {
5846 u32 off
= PCI_FUNC(pdev
->devfn
) << 2;
5848 bp
->shmem_base
= REG_RD_IND(bp
, BNX2_SHM_HDR_ADDR_0
+ off
);
5850 bp
->shmem_base
= HOST_VIEW_SHMEM_BASE
;
5852 /* Get the permanent MAC address. First we need to make sure the
5853 * firmware is actually running.
5855 reg
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_DEV_INFO_SIGNATURE
);
5857 if ((reg
& BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK
) !=
5858 BNX2_DEV_INFO_SIGNATURE_MAGIC
) {
5859 dev_err(&pdev
->dev
, "Firmware not running, aborting.\n");
5864 bp
->fw_ver
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_DEV_INFO_BC_REV
);
5866 reg
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_HW_CFG_MAC_UPPER
);
5867 bp
->mac_addr
[0] = (u8
) (reg
>> 8);
5868 bp
->mac_addr
[1] = (u8
) reg
;
5870 reg
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_HW_CFG_MAC_LOWER
);
5871 bp
->mac_addr
[2] = (u8
) (reg
>> 24);
5872 bp
->mac_addr
[3] = (u8
) (reg
>> 16);
5873 bp
->mac_addr
[4] = (u8
) (reg
>> 8);
5874 bp
->mac_addr
[5] = (u8
) reg
;
5876 bp
->tx_ring_size
= MAX_TX_DESC_CNT
;
5877 bnx2_set_rx_ring_size(bp
, 255);
5881 bp
->rx_offset
= sizeof(struct l2_fhdr
) + 2;
5883 bp
->tx_quick_cons_trip_int
= 20;
5884 bp
->tx_quick_cons_trip
= 20;
5885 bp
->tx_ticks_int
= 80;
5888 bp
->rx_quick_cons_trip_int
= 6;
5889 bp
->rx_quick_cons_trip
= 6;
5890 bp
->rx_ticks_int
= 18;
5893 bp
->stats_ticks
= 1000000 & 0xffff00;
5895 bp
->timer_interval
= HZ
;
5896 bp
->current_interval
= HZ
;
5900 /* Disable WOL support if we are running on a SERDES chip. */
5901 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
5902 bnx2_get_5709_media(bp
);
5903 else if (CHIP_BOND_ID(bp
) & CHIP_BOND_ID_SERDES_BIT
)
5904 bp
->phy_flags
|= PHY_SERDES_FLAG
;
5906 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
5907 bp
->flags
|= NO_WOL_FLAG
;
5908 if (CHIP_NUM(bp
) != CHIP_NUM_5706
) {
5910 reg
= REG_RD_IND(bp
, bp
->shmem_base
+
5911 BNX2_SHARED_HW_CFG_CONFIG
);
5912 if (reg
& BNX2_SHARED_HW_CFG_PHY_2_5G
)
5913 bp
->phy_flags
|= PHY_2_5G_CAPABLE_FLAG
;
5915 } else if (CHIP_NUM(bp
) == CHIP_NUM_5706
||
5916 CHIP_NUM(bp
) == CHIP_NUM_5708
)
5917 bp
->phy_flags
|= PHY_CRC_FIX_FLAG
;
5918 else if (CHIP_ID(bp
) == CHIP_ID_5709_A0
)
5919 bp
->phy_flags
|= PHY_DIS_EARLY_DAC_FLAG
;
5921 if ((CHIP_ID(bp
) == CHIP_ID_5708_A0
) ||
5922 (CHIP_ID(bp
) == CHIP_ID_5708_B0
) ||
5923 (CHIP_ID(bp
) == CHIP_ID_5708_B1
))
5924 bp
->flags
|= NO_WOL_FLAG
;
5926 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
5927 bp
->tx_quick_cons_trip_int
=
5928 bp
->tx_quick_cons_trip
;
5929 bp
->tx_ticks_int
= bp
->tx_ticks
;
5930 bp
->rx_quick_cons_trip_int
=
5931 bp
->rx_quick_cons_trip
;
5932 bp
->rx_ticks_int
= bp
->rx_ticks
;
5933 bp
->comp_prod_trip_int
= bp
->comp_prod_trip
;
5934 bp
->com_ticks_int
= bp
->com_ticks
;
5935 bp
->cmd_ticks_int
= bp
->cmd_ticks
;
5938 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5940 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5941 * with byte enables disabled on the unused 32-bit word. This is legal
5942 * but causes problems on the AMD 8132 which will eventually stop
5943 * responding after a while.
5945 * AMD believes this incompatibility is unique to the 5706, and
5946 * prefers to locally disable MSI rather than globally disabling it.
5948 if (CHIP_NUM(bp
) == CHIP_NUM_5706
&& disable_msi
== 0) {
5949 struct pci_dev
*amd_8132
= NULL
;
5951 while ((amd_8132
= pci_get_device(PCI_VENDOR_ID_AMD
,
5952 PCI_DEVICE_ID_AMD_8132_BRIDGE
,
5956 pci_read_config_byte(amd_8132
, PCI_REVISION_ID
, &rev
);
5957 if (rev
>= 0x10 && rev
<= 0x13) {
5959 pci_dev_put(amd_8132
);
5965 bp
->autoneg
= AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
;
5966 bp
->req_line_speed
= 0;
5967 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
5968 bp
->advertising
= ETHTOOL_ALL_FIBRE_SPEED
| ADVERTISED_Autoneg
;
5970 reg
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_HW_CFG_CONFIG
);
5971 reg
&= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK
;
5972 if (reg
== BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G
) {
5974 bp
->req_line_speed
= bp
->line_speed
= SPEED_1000
;
5975 bp
->req_duplex
= DUPLEX_FULL
;
5979 bp
->advertising
= ETHTOOL_ALL_COPPER_SPEED
| ADVERTISED_Autoneg
;
5982 bp
->req_flow_ctrl
= FLOW_CTRL_RX
| FLOW_CTRL_TX
;
5984 init_timer(&bp
->timer
);
5985 bp
->timer
.expires
= RUN_AT(bp
->timer_interval
);
5986 bp
->timer
.data
= (unsigned long) bp
;
5987 bp
->timer
.function
= bnx2_timer
;
5993 iounmap(bp
->regview
);
5998 pci_release_regions(pdev
);
6001 pci_disable_device(pdev
);
6002 pci_set_drvdata(pdev
, NULL
);
6008 static int __devinit
6009 bnx2_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
6011 static int version_printed
= 0;
6012 struct net_device
*dev
= NULL
;
6016 if (version_printed
++ == 0)
6017 printk(KERN_INFO
"%s", version
);
6019 /* dev zeroed in init_etherdev */
6020 dev
= alloc_etherdev(sizeof(*bp
));
6025 rc
= bnx2_init_board(pdev
, dev
);
6031 dev
->open
= bnx2_open
;
6032 dev
->hard_start_xmit
= bnx2_start_xmit
;
6033 dev
->stop
= bnx2_close
;
6034 dev
->get_stats
= bnx2_get_stats
;
6035 dev
->set_multicast_list
= bnx2_set_rx_mode
;
6036 dev
->do_ioctl
= bnx2_ioctl
;
6037 dev
->set_mac_address
= bnx2_change_mac_addr
;
6038 dev
->change_mtu
= bnx2_change_mtu
;
6039 dev
->tx_timeout
= bnx2_tx_timeout
;
6040 dev
->watchdog_timeo
= TX_TIMEOUT
;
6042 dev
->vlan_rx_register
= bnx2_vlan_rx_register
;
6043 dev
->vlan_rx_kill_vid
= bnx2_vlan_rx_kill_vid
;
6045 dev
->poll
= bnx2_poll
;
6046 dev
->ethtool_ops
= &bnx2_ethtool_ops
;
6049 bp
= netdev_priv(dev
);
6051 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6052 dev
->poll_controller
= poll_bnx2
;
6055 if ((rc
= register_netdev(dev
))) {
6056 dev_err(&pdev
->dev
, "Cannot register net device\n");
6058 iounmap(bp
->regview
);
6059 pci_release_regions(pdev
);
6060 pci_disable_device(pdev
);
6061 pci_set_drvdata(pdev
, NULL
);
6066 pci_set_drvdata(pdev
, dev
);
6068 memcpy(dev
->dev_addr
, bp
->mac_addr
, 6);
6069 memcpy(dev
->perm_addr
, bp
->mac_addr
, 6);
6070 bp
->name
= board_info
[ent
->driver_data
].name
,
6071 printk(KERN_INFO
"%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6075 ((CHIP_ID(bp
) & 0xf000) >> 12) + 'A',
6076 ((CHIP_ID(bp
) & 0x0ff0) >> 4),
6077 ((bp
->flags
& PCIX_FLAG
) ? "-X" : ""),
6078 ((bp
->flags
& PCI_32BIT_FLAG
) ? "32-bit" : "64-bit"),
6083 printk("node addr ");
6084 for (i
= 0; i
< 6; i
++)
6085 printk("%2.2x", dev
->dev_addr
[i
]);
6088 dev
->features
|= NETIF_F_SG
;
6089 if (bp
->flags
& USING_DAC_FLAG
)
6090 dev
->features
|= NETIF_F_HIGHDMA
;
6091 dev
->features
|= NETIF_F_IP_CSUM
;
6093 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
6095 dev
->features
|= NETIF_F_TSO
| NETIF_F_TSO_ECN
;
6097 netif_carrier_off(bp
->dev
);
6102 static void __devexit
6103 bnx2_remove_one(struct pci_dev
*pdev
)
6105 struct net_device
*dev
= pci_get_drvdata(pdev
);
6106 struct bnx2
*bp
= netdev_priv(dev
);
6108 flush_scheduled_work();
6110 unregister_netdev(dev
);
6113 iounmap(bp
->regview
);
6116 pci_release_regions(pdev
);
6117 pci_disable_device(pdev
);
6118 pci_set_drvdata(pdev
, NULL
);
6122 bnx2_suspend(struct pci_dev
*pdev
, pm_message_t state
)
6124 struct net_device
*dev
= pci_get_drvdata(pdev
);
6125 struct bnx2
*bp
= netdev_priv(dev
);
6128 if (!netif_running(dev
))
6131 flush_scheduled_work();
6132 bnx2_netif_stop(bp
);
6133 netif_device_detach(dev
);
6134 del_timer_sync(&bp
->timer
);
6135 if (bp
->flags
& NO_WOL_FLAG
)
6136 reset_code
= BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN
;
6138 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_WOL
;
6140 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL
;
6141 bnx2_reset_chip(bp
, reset_code
);
6143 bnx2_set_power_state(bp
, pci_choose_state(pdev
, state
));
6148 bnx2_resume(struct pci_dev
*pdev
)
6150 struct net_device
*dev
= pci_get_drvdata(pdev
);
6151 struct bnx2
*bp
= netdev_priv(dev
);
6153 if (!netif_running(dev
))
6156 bnx2_set_power_state(bp
, PCI_D0
);
6157 netif_device_attach(dev
);
6159 bnx2_netif_start(bp
);
6163 static struct pci_driver bnx2_pci_driver
= {
6164 .name
= DRV_MODULE_NAME
,
6165 .id_table
= bnx2_pci_tbl
,
6166 .probe
= bnx2_init_one
,
6167 .remove
= __devexit_p(bnx2_remove_one
),
6168 .suspend
= bnx2_suspend
,
6169 .resume
= bnx2_resume
,
6172 static int __init
bnx2_init(void)
6174 return pci_register_driver(&bnx2_pci_driver
);
6177 static void __exit
bnx2_cleanup(void)
6179 pci_unregister_driver(&bnx2_pci_driver
);
6182 module_init(bnx2_init
);
6183 module_exit(bnx2_cleanup
);