Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[wrt350n-kernel.git] / drivers / net / bnx2.c
blob18f0d4641cab337702c351ca635817023a5b140b
1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
55 #define FW_BUF_SIZE 0x10000
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 <<<<<<< HEAD:drivers/net/bnx2.c
60 #define DRV_MODULE_VERSION "1.7.3"
61 #define DRV_MODULE_RELDATE "January 29, 2008"
62 =======
63 #define DRV_MODULE_VERSION "1.7.4"
64 #define DRV_MODULE_RELDATE "February 18, 2008"
65 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/net/bnx2.c
67 #define RUN_AT(x) (jiffies + (x))
69 /* Time in jiffies before concluding the transmitter is hung. */
70 #define TX_TIMEOUT (5*HZ)
72 static char version[] __devinitdata =
73 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
76 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
77 MODULE_LICENSE("GPL");
78 MODULE_VERSION(DRV_MODULE_VERSION);
80 static int disable_msi = 0;
82 module_param(disable_msi, int, 0);
83 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
85 typedef enum {
86 BCM5706 = 0,
87 NC370T,
88 NC370I,
89 BCM5706S,
90 NC370F,
91 BCM5708,
92 BCM5708S,
93 BCM5709,
94 BCM5709S,
95 } board_t;
97 /* indexed by board_t, above */
98 static struct {
99 char *name;
100 } board_info[] __devinitdata = {
101 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
102 { "HP NC370T Multifunction Gigabit Server Adapter" },
103 { "HP NC370i Multifunction Gigabit Server Adapter" },
104 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
105 { "HP NC370F Multifunction Gigabit Server Adapter" },
106 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
107 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
108 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
109 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
112 static struct pci_device_id bnx2_pci_tbl[] = {
113 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
114 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
116 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
122 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
123 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
124 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
125 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
126 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
127 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
129 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
131 { 0, }
134 static struct flash_spec flash_table[] =
136 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
137 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
138 /* Slow EEPROM */
139 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
140 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
141 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
142 "EEPROM - slow"},
143 /* Expansion entry 0001 */
144 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
145 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
146 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
147 "Entry 0001"},
148 /* Saifun SA25F010 (non-buffered flash) */
149 /* strap, cfg1, & write1 need updates */
150 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
151 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
152 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
153 "Non-buffered flash (128kB)"},
154 /* Saifun SA25F020 (non-buffered flash) */
155 /* strap, cfg1, & write1 need updates */
156 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
159 "Non-buffered flash (256kB)"},
160 /* Expansion entry 0100 */
161 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
162 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
163 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
164 "Entry 0100"},
165 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
166 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
169 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
170 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
171 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
172 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
173 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
174 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
175 /* Saifun SA25F005 (non-buffered flash) */
176 /* strap, cfg1, & write1 need updates */
177 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
178 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
180 "Non-buffered flash (64kB)"},
181 /* Fast EEPROM */
182 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
183 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
184 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
185 "EEPROM - fast"},
186 /* Expansion entry 1001 */
187 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 "Entry 1001"},
191 /* Expansion entry 1010 */
192 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
193 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195 "Entry 1010"},
196 /* ATMEL AT45DB011B (buffered flash) */
197 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
198 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
199 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
200 "Buffered flash (128kB)"},
201 /* Expansion entry 1100 */
202 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1100"},
206 /* Expansion entry 1101 */
207 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
208 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1101"},
211 /* Ateml Expansion entry 1110 */
212 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
215 "Entry 1110 (Atmel)"},
216 /* ATMEL AT45DB021B (buffered flash) */
217 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
218 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
219 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
220 "Buffered flash (256kB)"},
223 static struct flash_spec flash_5709 = {
224 .flags = BNX2_NV_BUFFERED,
225 .page_bits = BCM5709_FLASH_PAGE_BITS,
226 .page_size = BCM5709_FLASH_PAGE_SIZE,
227 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
228 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
229 .name = "5709 Buffered flash (256kB)",
232 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
234 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
236 u32 diff;
238 smp_mb();
240 /* The ring uses 256 indices for 255 entries, one of them
241 * needs to be skipped.
243 diff = bp->tx_prod - bnapi->tx_cons;
244 if (unlikely(diff >= TX_DESC_CNT)) {
245 diff &= 0xffff;
246 if (diff == TX_DESC_CNT)
247 diff = MAX_TX_DESC_CNT;
249 return (bp->tx_ring_size - diff);
252 static u32
253 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
255 u32 val;
257 spin_lock_bh(&bp->indirect_lock);
258 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
259 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
260 spin_unlock_bh(&bp->indirect_lock);
261 return val;
264 static void
265 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
267 spin_lock_bh(&bp->indirect_lock);
268 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
269 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
270 spin_unlock_bh(&bp->indirect_lock);
273 static void
274 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
276 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
279 static u32
280 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
282 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
285 static void
286 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
288 offset += cid_addr;
289 spin_lock_bh(&bp->indirect_lock);
290 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
291 int i;
293 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
294 REG_WR(bp, BNX2_CTX_CTX_CTRL,
295 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
296 for (i = 0; i < 5; i++) {
297 u32 val;
298 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
299 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
300 break;
301 udelay(5);
303 } else {
304 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
305 REG_WR(bp, BNX2_CTX_DATA, val);
307 spin_unlock_bh(&bp->indirect_lock);
310 static int
311 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
313 u32 val1;
314 int i, ret;
316 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
318 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
320 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
321 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
323 udelay(40);
326 val1 = (bp->phy_addr << 21) | (reg << 16) |
327 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
328 BNX2_EMAC_MDIO_COMM_START_BUSY;
329 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
331 for (i = 0; i < 50; i++) {
332 udelay(10);
334 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
335 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
336 udelay(5);
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
339 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
341 break;
345 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
346 *val = 0x0;
347 ret = -EBUSY;
349 else {
350 *val = val1;
351 ret = 0;
354 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
355 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
356 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
358 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
359 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
361 udelay(40);
364 return ret;
367 static int
368 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
370 u32 val1;
371 int i, ret;
373 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
377 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
380 udelay(40);
383 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
384 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
385 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
386 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
388 for (i = 0; i < 50; i++) {
389 udelay(10);
391 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
392 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
393 udelay(5);
394 break;
398 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
399 ret = -EBUSY;
400 else
401 ret = 0;
403 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
404 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
405 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
407 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
408 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
410 udelay(40);
413 return ret;
416 static void
417 bnx2_disable_int(struct bnx2 *bp)
419 int i;
420 struct bnx2_napi *bnapi;
422 for (i = 0; i < bp->irq_nvecs; i++) {
423 bnapi = &bp->bnx2_napi[i];
424 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
425 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
427 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
430 static void
431 bnx2_enable_int(struct bnx2 *bp)
433 int i;
434 struct bnx2_napi *bnapi;
436 for (i = 0; i < bp->irq_nvecs; i++) {
437 bnapi = &bp->bnx2_napi[i];
439 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
440 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
441 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
442 bnapi->last_status_idx);
444 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
445 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
446 bnapi->last_status_idx);
448 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
451 static void
452 bnx2_disable_int_sync(struct bnx2 *bp)
454 int i;
456 atomic_inc(&bp->intr_sem);
457 bnx2_disable_int(bp);
458 for (i = 0; i < bp->irq_nvecs; i++)
459 synchronize_irq(bp->irq_tbl[i].vector);
462 static void
463 bnx2_napi_disable(struct bnx2 *bp)
465 int i;
467 for (i = 0; i < bp->irq_nvecs; i++)
468 napi_disable(&bp->bnx2_napi[i].napi);
471 static void
472 bnx2_napi_enable(struct bnx2 *bp)
474 int i;
476 for (i = 0; i < bp->irq_nvecs; i++)
477 napi_enable(&bp->bnx2_napi[i].napi);
480 static void
481 bnx2_netif_stop(struct bnx2 *bp)
483 bnx2_disable_int_sync(bp);
484 if (netif_running(bp->dev)) {
485 bnx2_napi_disable(bp);
486 netif_tx_disable(bp->dev);
487 bp->dev->trans_start = jiffies; /* prevent tx timeout */
491 static void
492 bnx2_netif_start(struct bnx2 *bp)
494 if (atomic_dec_and_test(&bp->intr_sem)) {
495 if (netif_running(bp->dev)) {
496 netif_wake_queue(bp->dev);
497 bnx2_napi_enable(bp);
498 bnx2_enable_int(bp);
503 static void
504 bnx2_free_mem(struct bnx2 *bp)
506 int i;
508 for (i = 0; i < bp->ctx_pages; i++) {
509 if (bp->ctx_blk[i]) {
510 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
511 bp->ctx_blk[i],
512 bp->ctx_blk_mapping[i]);
513 bp->ctx_blk[i] = NULL;
516 if (bp->status_blk) {
517 pci_free_consistent(bp->pdev, bp->status_stats_size,
518 bp->status_blk, bp->status_blk_mapping);
519 bp->status_blk = NULL;
520 bp->stats_blk = NULL;
522 if (bp->tx_desc_ring) {
523 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
524 bp->tx_desc_ring, bp->tx_desc_mapping);
525 bp->tx_desc_ring = NULL;
527 kfree(bp->tx_buf_ring);
528 bp->tx_buf_ring = NULL;
529 for (i = 0; i < bp->rx_max_ring; i++) {
530 if (bp->rx_desc_ring[i])
531 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
532 bp->rx_desc_ring[i],
533 bp->rx_desc_mapping[i]);
534 bp->rx_desc_ring[i] = NULL;
536 vfree(bp->rx_buf_ring);
537 bp->rx_buf_ring = NULL;
538 for (i = 0; i < bp->rx_max_pg_ring; i++) {
539 if (bp->rx_pg_desc_ring[i])
540 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
541 bp->rx_pg_desc_ring[i],
542 bp->rx_pg_desc_mapping[i]);
543 bp->rx_pg_desc_ring[i] = NULL;
545 if (bp->rx_pg_ring)
546 vfree(bp->rx_pg_ring);
547 bp->rx_pg_ring = NULL;
550 static int
551 bnx2_alloc_mem(struct bnx2 *bp)
553 int i, status_blk_size;
555 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
556 if (bp->tx_buf_ring == NULL)
557 return -ENOMEM;
559 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
560 &bp->tx_desc_mapping);
561 if (bp->tx_desc_ring == NULL)
562 goto alloc_mem_err;
564 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
565 if (bp->rx_buf_ring == NULL)
566 goto alloc_mem_err;
568 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
570 for (i = 0; i < bp->rx_max_ring; i++) {
571 bp->rx_desc_ring[i] =
572 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
573 &bp->rx_desc_mapping[i]);
574 if (bp->rx_desc_ring[i] == NULL)
575 goto alloc_mem_err;
579 if (bp->rx_pg_ring_size) {
580 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
581 bp->rx_max_pg_ring);
582 if (bp->rx_pg_ring == NULL)
583 goto alloc_mem_err;
585 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
586 bp->rx_max_pg_ring);
589 for (i = 0; i < bp->rx_max_pg_ring; i++) {
590 bp->rx_pg_desc_ring[i] =
591 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
592 &bp->rx_pg_desc_mapping[i]);
593 if (bp->rx_pg_desc_ring[i] == NULL)
594 goto alloc_mem_err;
598 /* Combine status and statistics blocks into one allocation. */
599 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
600 if (bp->flags & BNX2_FLAG_MSIX_CAP)
601 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
602 BNX2_SBLK_MSIX_ALIGN_SIZE);
603 bp->status_stats_size = status_blk_size +
604 sizeof(struct statistics_block);
606 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
607 &bp->status_blk_mapping);
608 if (bp->status_blk == NULL)
609 goto alloc_mem_err;
611 memset(bp->status_blk, 0, bp->status_stats_size);
613 bp->bnx2_napi[0].status_blk = bp->status_blk;
614 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
615 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
616 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
618 bnapi->status_blk_msix = (void *)
619 ((unsigned long) bp->status_blk +
620 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
621 bnapi->int_num = i << 24;
625 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
626 status_blk_size);
628 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
630 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
631 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
632 if (bp->ctx_pages == 0)
633 bp->ctx_pages = 1;
634 for (i = 0; i < bp->ctx_pages; i++) {
635 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
636 BCM_PAGE_SIZE,
637 &bp->ctx_blk_mapping[i]);
638 if (bp->ctx_blk[i] == NULL)
639 goto alloc_mem_err;
642 return 0;
644 alloc_mem_err:
645 bnx2_free_mem(bp);
646 return -ENOMEM;
649 static void
650 bnx2_report_fw_link(struct bnx2 *bp)
652 u32 fw_link_status = 0;
654 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
655 return;
657 if (bp->link_up) {
658 u32 bmsr;
660 switch (bp->line_speed) {
661 case SPEED_10:
662 if (bp->duplex == DUPLEX_HALF)
663 fw_link_status = BNX2_LINK_STATUS_10HALF;
664 else
665 fw_link_status = BNX2_LINK_STATUS_10FULL;
666 break;
667 case SPEED_100:
668 if (bp->duplex == DUPLEX_HALF)
669 fw_link_status = BNX2_LINK_STATUS_100HALF;
670 else
671 fw_link_status = BNX2_LINK_STATUS_100FULL;
672 break;
673 case SPEED_1000:
674 if (bp->duplex == DUPLEX_HALF)
675 fw_link_status = BNX2_LINK_STATUS_1000HALF;
676 else
677 fw_link_status = BNX2_LINK_STATUS_1000FULL;
678 break;
679 case SPEED_2500:
680 if (bp->duplex == DUPLEX_HALF)
681 fw_link_status = BNX2_LINK_STATUS_2500HALF;
682 else
683 fw_link_status = BNX2_LINK_STATUS_2500FULL;
684 break;
687 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
689 if (bp->autoneg) {
690 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
692 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
693 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
695 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
696 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
697 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
698 else
699 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
702 else
703 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
705 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
708 static char *
709 bnx2_xceiver_str(struct bnx2 *bp)
711 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
712 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
713 "Copper"));
716 static void
717 bnx2_report_link(struct bnx2 *bp)
719 if (bp->link_up) {
720 netif_carrier_on(bp->dev);
721 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
722 bnx2_xceiver_str(bp));
724 printk("%d Mbps ", bp->line_speed);
726 if (bp->duplex == DUPLEX_FULL)
727 printk("full duplex");
728 else
729 printk("half duplex");
731 if (bp->flow_ctrl) {
732 if (bp->flow_ctrl & FLOW_CTRL_RX) {
733 printk(", receive ");
734 if (bp->flow_ctrl & FLOW_CTRL_TX)
735 printk("& transmit ");
737 else {
738 printk(", transmit ");
740 printk("flow control ON");
742 printk("\n");
744 else {
745 netif_carrier_off(bp->dev);
746 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
747 bnx2_xceiver_str(bp));
750 bnx2_report_fw_link(bp);
753 static void
754 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
756 u32 local_adv, remote_adv;
758 bp->flow_ctrl = 0;
759 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
760 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
762 if (bp->duplex == DUPLEX_FULL) {
763 bp->flow_ctrl = bp->req_flow_ctrl;
765 return;
768 if (bp->duplex != DUPLEX_FULL) {
769 return;
772 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
773 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
774 u32 val;
776 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
777 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
778 bp->flow_ctrl |= FLOW_CTRL_TX;
779 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
780 bp->flow_ctrl |= FLOW_CTRL_RX;
781 return;
784 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
785 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
787 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
788 u32 new_local_adv = 0;
789 u32 new_remote_adv = 0;
791 if (local_adv & ADVERTISE_1000XPAUSE)
792 new_local_adv |= ADVERTISE_PAUSE_CAP;
793 if (local_adv & ADVERTISE_1000XPSE_ASYM)
794 new_local_adv |= ADVERTISE_PAUSE_ASYM;
795 if (remote_adv & ADVERTISE_1000XPAUSE)
796 new_remote_adv |= ADVERTISE_PAUSE_CAP;
797 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
798 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
800 local_adv = new_local_adv;
801 remote_adv = new_remote_adv;
804 /* See Table 28B-3 of 802.3ab-1999 spec. */
805 if (local_adv & ADVERTISE_PAUSE_CAP) {
806 if(local_adv & ADVERTISE_PAUSE_ASYM) {
807 if (remote_adv & ADVERTISE_PAUSE_CAP) {
808 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
810 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
811 bp->flow_ctrl = FLOW_CTRL_RX;
814 else {
815 if (remote_adv & ADVERTISE_PAUSE_CAP) {
816 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
820 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
821 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
822 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
824 bp->flow_ctrl = FLOW_CTRL_TX;
829 static int
830 bnx2_5709s_linkup(struct bnx2 *bp)
832 u32 val, speed;
834 bp->link_up = 1;
836 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
837 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
838 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
840 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
841 bp->line_speed = bp->req_line_speed;
842 bp->duplex = bp->req_duplex;
843 return 0;
845 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
846 switch (speed) {
847 case MII_BNX2_GP_TOP_AN_SPEED_10:
848 bp->line_speed = SPEED_10;
849 break;
850 case MII_BNX2_GP_TOP_AN_SPEED_100:
851 bp->line_speed = SPEED_100;
852 break;
853 case MII_BNX2_GP_TOP_AN_SPEED_1G:
854 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
855 bp->line_speed = SPEED_1000;
856 break;
857 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
858 bp->line_speed = SPEED_2500;
859 break;
861 if (val & MII_BNX2_GP_TOP_AN_FD)
862 bp->duplex = DUPLEX_FULL;
863 else
864 bp->duplex = DUPLEX_HALF;
865 return 0;
868 static int
869 bnx2_5708s_linkup(struct bnx2 *bp)
871 u32 val;
873 bp->link_up = 1;
874 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
875 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
876 case BCM5708S_1000X_STAT1_SPEED_10:
877 bp->line_speed = SPEED_10;
878 break;
879 case BCM5708S_1000X_STAT1_SPEED_100:
880 bp->line_speed = SPEED_100;
881 break;
882 case BCM5708S_1000X_STAT1_SPEED_1G:
883 bp->line_speed = SPEED_1000;
884 break;
885 case BCM5708S_1000X_STAT1_SPEED_2G5:
886 bp->line_speed = SPEED_2500;
887 break;
889 if (val & BCM5708S_1000X_STAT1_FD)
890 bp->duplex = DUPLEX_FULL;
891 else
892 bp->duplex = DUPLEX_HALF;
894 return 0;
897 static int
898 bnx2_5706s_linkup(struct bnx2 *bp)
900 u32 bmcr, local_adv, remote_adv, common;
902 bp->link_up = 1;
903 bp->line_speed = SPEED_1000;
905 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
906 if (bmcr & BMCR_FULLDPLX) {
907 bp->duplex = DUPLEX_FULL;
909 else {
910 bp->duplex = DUPLEX_HALF;
913 if (!(bmcr & BMCR_ANENABLE)) {
914 return 0;
917 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
918 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
920 common = local_adv & remote_adv;
921 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
923 if (common & ADVERTISE_1000XFULL) {
924 bp->duplex = DUPLEX_FULL;
926 else {
927 bp->duplex = DUPLEX_HALF;
931 return 0;
934 static int
935 bnx2_copper_linkup(struct bnx2 *bp)
937 u32 bmcr;
939 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
940 if (bmcr & BMCR_ANENABLE) {
941 u32 local_adv, remote_adv, common;
943 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
944 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
946 common = local_adv & (remote_adv >> 2);
947 if (common & ADVERTISE_1000FULL) {
948 bp->line_speed = SPEED_1000;
949 bp->duplex = DUPLEX_FULL;
951 else if (common & ADVERTISE_1000HALF) {
952 bp->line_speed = SPEED_1000;
953 bp->duplex = DUPLEX_HALF;
955 else {
956 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
957 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
959 common = local_adv & remote_adv;
960 if (common & ADVERTISE_100FULL) {
961 bp->line_speed = SPEED_100;
962 bp->duplex = DUPLEX_FULL;
964 else if (common & ADVERTISE_100HALF) {
965 bp->line_speed = SPEED_100;
966 bp->duplex = DUPLEX_HALF;
968 else if (common & ADVERTISE_10FULL) {
969 bp->line_speed = SPEED_10;
970 bp->duplex = DUPLEX_FULL;
972 else if (common & ADVERTISE_10HALF) {
973 bp->line_speed = SPEED_10;
974 bp->duplex = DUPLEX_HALF;
976 else {
977 bp->line_speed = 0;
978 bp->link_up = 0;
982 else {
983 if (bmcr & BMCR_SPEED100) {
984 bp->line_speed = SPEED_100;
986 else {
987 bp->line_speed = SPEED_10;
989 if (bmcr & BMCR_FULLDPLX) {
990 bp->duplex = DUPLEX_FULL;
992 else {
993 bp->duplex = DUPLEX_HALF;
997 return 0;
1000 static void
1001 bnx2_init_rx_context0(struct bnx2 *bp)
1003 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
1005 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1006 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1007 val |= 0x02 << 8;
1009 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1010 u32 lo_water, hi_water;
1012 if (bp->flow_ctrl & FLOW_CTRL_TX)
1013 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1014 else
1015 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1016 if (lo_water >= bp->rx_ring_size)
1017 lo_water = 0;
1019 hi_water = bp->rx_ring_size / 4;
1021 if (hi_water <= lo_water)
1022 lo_water = 0;
1024 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1025 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1027 if (hi_water > 0xf)
1028 hi_water = 0xf;
1029 else if (hi_water == 0)
1030 lo_water = 0;
1031 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1033 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1036 static int
1037 bnx2_set_mac_link(struct bnx2 *bp)
1039 u32 val;
1041 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1042 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1043 (bp->duplex == DUPLEX_HALF)) {
1044 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1047 /* Configure the EMAC mode register. */
1048 val = REG_RD(bp, BNX2_EMAC_MODE);
1050 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1051 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1052 BNX2_EMAC_MODE_25G_MODE);
1054 if (bp->link_up) {
1055 switch (bp->line_speed) {
1056 case SPEED_10:
1057 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1058 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1059 break;
1061 /* fall through */
1062 case SPEED_100:
1063 val |= BNX2_EMAC_MODE_PORT_MII;
1064 break;
1065 case SPEED_2500:
1066 val |= BNX2_EMAC_MODE_25G_MODE;
1067 /* fall through */
1068 case SPEED_1000:
1069 val |= BNX2_EMAC_MODE_PORT_GMII;
1070 break;
1073 else {
1074 val |= BNX2_EMAC_MODE_PORT_GMII;
1077 /* Set the MAC to operate in the appropriate duplex mode. */
1078 if (bp->duplex == DUPLEX_HALF)
1079 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1080 REG_WR(bp, BNX2_EMAC_MODE, val);
1082 /* Enable/disable rx PAUSE. */
1083 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1085 if (bp->flow_ctrl & FLOW_CTRL_RX)
1086 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1087 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1089 /* Enable/disable tx PAUSE. */
1090 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1091 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1093 if (bp->flow_ctrl & FLOW_CTRL_TX)
1094 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1095 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1097 /* Acknowledge the interrupt. */
1098 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1100 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1101 bnx2_init_rx_context0(bp);
1103 return 0;
1106 static void
1107 bnx2_enable_bmsr1(struct bnx2 *bp)
1109 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1110 (CHIP_NUM(bp) == CHIP_NUM_5709))
1111 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1112 MII_BNX2_BLK_ADDR_GP_STATUS);
1115 static void
1116 bnx2_disable_bmsr1(struct bnx2 *bp)
1118 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1119 (CHIP_NUM(bp) == CHIP_NUM_5709))
1120 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1121 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1124 static int
1125 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1127 u32 up1;
1128 int ret = 1;
1130 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1131 return 0;
1133 if (bp->autoneg & AUTONEG_SPEED)
1134 bp->advertising |= ADVERTISED_2500baseX_Full;
1136 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1137 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1139 bnx2_read_phy(bp, bp->mii_up1, &up1);
1140 if (!(up1 & BCM5708S_UP1_2G5)) {
1141 up1 |= BCM5708S_UP1_2G5;
1142 bnx2_write_phy(bp, bp->mii_up1, up1);
1143 ret = 0;
1146 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1147 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1148 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1150 return ret;
1153 static int
1154 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1156 u32 up1;
1157 int ret = 0;
1159 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1160 return 0;
1162 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1163 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1165 bnx2_read_phy(bp, bp->mii_up1, &up1);
1166 if (up1 & BCM5708S_UP1_2G5) {
1167 up1 &= ~BCM5708S_UP1_2G5;
1168 bnx2_write_phy(bp, bp->mii_up1, up1);
1169 ret = 1;
1172 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1173 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1174 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1176 return ret;
1179 static void
1180 bnx2_enable_forced_2g5(struct bnx2 *bp)
1182 u32 bmcr;
1184 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1185 return;
1187 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1188 u32 val;
1190 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1191 MII_BNX2_BLK_ADDR_SERDES_DIG);
1192 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1193 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1194 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1195 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1197 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1198 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1199 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1201 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1202 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1203 bmcr |= BCM5708S_BMCR_FORCE_2500;
1206 if (bp->autoneg & AUTONEG_SPEED) {
1207 bmcr &= ~BMCR_ANENABLE;
1208 if (bp->req_duplex == DUPLEX_FULL)
1209 bmcr |= BMCR_FULLDPLX;
1211 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1214 static void
1215 bnx2_disable_forced_2g5(struct bnx2 *bp)
1217 u32 bmcr;
1219 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1220 return;
1222 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1223 u32 val;
1225 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1226 MII_BNX2_BLK_ADDR_SERDES_DIG);
1227 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1228 val &= ~MII_BNX2_SD_MISC1_FORCE;
1229 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1231 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1232 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1233 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1235 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1236 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1237 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1240 if (bp->autoneg & AUTONEG_SPEED)
1241 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1242 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1245 static void
1246 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1248 u32 val;
1250 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1251 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1252 if (start)
1253 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1254 else
1255 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1258 static int
1259 bnx2_set_link(struct bnx2 *bp)
1261 u32 bmsr;
1262 u8 link_up;
1264 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1265 bp->link_up = 1;
1266 return 0;
1269 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1270 return 0;
1272 link_up = bp->link_up;
1274 bnx2_enable_bmsr1(bp);
1275 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1276 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1277 bnx2_disable_bmsr1(bp);
1279 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1280 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1281 <<<<<<< HEAD:drivers/net/bnx2.c
1282 u32 val;
1283 =======
1284 u32 val, an_dbg;
1285 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/net/bnx2.c
1287 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1288 bnx2_5706s_force_link_dn(bp, 0);
1289 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1291 val = REG_RD(bp, BNX2_EMAC_STATUS);
1292 <<<<<<< HEAD:drivers/net/bnx2.c
1293 if (val & BNX2_EMAC_STATUS_LINK)
1294 =======
1296 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1297 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1298 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1300 if ((val & BNX2_EMAC_STATUS_LINK) &&
1301 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1302 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/net/bnx2.c
1303 bmsr |= BMSR_LSTATUS;
1304 else
1305 bmsr &= ~BMSR_LSTATUS;
1308 if (bmsr & BMSR_LSTATUS) {
1309 bp->link_up = 1;
1311 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1312 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1313 bnx2_5706s_linkup(bp);
1314 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1315 bnx2_5708s_linkup(bp);
1316 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1317 bnx2_5709s_linkup(bp);
1319 else {
1320 bnx2_copper_linkup(bp);
1322 bnx2_resolve_flow_ctrl(bp);
1324 else {
1325 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1326 (bp->autoneg & AUTONEG_SPEED))
1327 bnx2_disable_forced_2g5(bp);
1329 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1330 u32 bmcr;
1332 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1333 bmcr |= BMCR_ANENABLE;
1334 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1336 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1338 bp->link_up = 0;
1341 if (bp->link_up != link_up) {
1342 bnx2_report_link(bp);
1345 bnx2_set_mac_link(bp);
1347 return 0;
1350 static int
1351 bnx2_reset_phy(struct bnx2 *bp)
1353 int i;
1354 u32 reg;
1356 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1358 #define PHY_RESET_MAX_WAIT 100
1359 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1360 udelay(10);
1362 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1363 if (!(reg & BMCR_RESET)) {
1364 udelay(20);
1365 break;
1368 if (i == PHY_RESET_MAX_WAIT) {
1369 return -EBUSY;
1371 return 0;
1374 static u32
1375 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1377 u32 adv = 0;
1379 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1380 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1382 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1383 adv = ADVERTISE_1000XPAUSE;
1385 else {
1386 adv = ADVERTISE_PAUSE_CAP;
1389 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1390 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1391 adv = ADVERTISE_1000XPSE_ASYM;
1393 else {
1394 adv = ADVERTISE_PAUSE_ASYM;
1397 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1398 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1399 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1401 else {
1402 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1405 return adv;
1408 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1410 static int
1411 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1413 u32 speed_arg = 0, pause_adv;
1415 pause_adv = bnx2_phy_get_pause_adv(bp);
1417 if (bp->autoneg & AUTONEG_SPEED) {
1418 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1419 if (bp->advertising & ADVERTISED_10baseT_Half)
1420 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1421 if (bp->advertising & ADVERTISED_10baseT_Full)
1422 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1423 if (bp->advertising & ADVERTISED_100baseT_Half)
1424 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1425 if (bp->advertising & ADVERTISED_100baseT_Full)
1426 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1427 if (bp->advertising & ADVERTISED_1000baseT_Full)
1428 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1429 if (bp->advertising & ADVERTISED_2500baseX_Full)
1430 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1431 } else {
1432 if (bp->req_line_speed == SPEED_2500)
1433 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1434 else if (bp->req_line_speed == SPEED_1000)
1435 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1436 else if (bp->req_line_speed == SPEED_100) {
1437 if (bp->req_duplex == DUPLEX_FULL)
1438 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1439 else
1440 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1441 } else if (bp->req_line_speed == SPEED_10) {
1442 if (bp->req_duplex == DUPLEX_FULL)
1443 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1444 else
1445 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1449 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1450 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1451 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1452 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1454 if (port == PORT_TP)
1455 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1456 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1458 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1460 spin_unlock_bh(&bp->phy_lock);
1461 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1462 spin_lock_bh(&bp->phy_lock);
1464 return 0;
1467 static int
1468 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1470 u32 adv, bmcr;
1471 u32 new_adv = 0;
1473 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1474 return (bnx2_setup_remote_phy(bp, port));
1476 if (!(bp->autoneg & AUTONEG_SPEED)) {
1477 u32 new_bmcr;
1478 int force_link_down = 0;
1480 if (bp->req_line_speed == SPEED_2500) {
1481 if (!bnx2_test_and_enable_2g5(bp))
1482 force_link_down = 1;
1483 } else if (bp->req_line_speed == SPEED_1000) {
1484 if (bnx2_test_and_disable_2g5(bp))
1485 force_link_down = 1;
1487 bnx2_read_phy(bp, bp->mii_adv, &adv);
1488 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1490 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1491 new_bmcr = bmcr & ~BMCR_ANENABLE;
1492 new_bmcr |= BMCR_SPEED1000;
1494 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1495 if (bp->req_line_speed == SPEED_2500)
1496 bnx2_enable_forced_2g5(bp);
1497 else if (bp->req_line_speed == SPEED_1000) {
1498 bnx2_disable_forced_2g5(bp);
1499 new_bmcr &= ~0x2000;
1502 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1503 if (bp->req_line_speed == SPEED_2500)
1504 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1505 else
1506 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1509 if (bp->req_duplex == DUPLEX_FULL) {
1510 adv |= ADVERTISE_1000XFULL;
1511 new_bmcr |= BMCR_FULLDPLX;
1513 else {
1514 adv |= ADVERTISE_1000XHALF;
1515 new_bmcr &= ~BMCR_FULLDPLX;
1517 if ((new_bmcr != bmcr) || (force_link_down)) {
1518 /* Force a link down visible on the other side */
1519 if (bp->link_up) {
1520 bnx2_write_phy(bp, bp->mii_adv, adv &
1521 ~(ADVERTISE_1000XFULL |
1522 ADVERTISE_1000XHALF));
1523 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1524 BMCR_ANRESTART | BMCR_ANENABLE);
1526 bp->link_up = 0;
1527 netif_carrier_off(bp->dev);
1528 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1529 bnx2_report_link(bp);
1531 bnx2_write_phy(bp, bp->mii_adv, adv);
1532 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1533 } else {
1534 bnx2_resolve_flow_ctrl(bp);
1535 bnx2_set_mac_link(bp);
1537 return 0;
1540 bnx2_test_and_enable_2g5(bp);
1542 if (bp->advertising & ADVERTISED_1000baseT_Full)
1543 new_adv |= ADVERTISE_1000XFULL;
1545 new_adv |= bnx2_phy_get_pause_adv(bp);
1547 bnx2_read_phy(bp, bp->mii_adv, &adv);
1548 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1550 bp->serdes_an_pending = 0;
1551 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1552 /* Force a link down visible on the other side */
1553 if (bp->link_up) {
1554 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1555 spin_unlock_bh(&bp->phy_lock);
1556 msleep(20);
1557 spin_lock_bh(&bp->phy_lock);
1560 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1561 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1562 BMCR_ANENABLE);
1563 /* Speed up link-up time when the link partner
1564 * does not autonegotiate which is very common
1565 * in blade servers. Some blade servers use
1566 * IPMI for kerboard input and it's important
1567 * to minimize link disruptions. Autoneg. involves
1568 * exchanging base pages plus 3 next pages and
1569 * normally completes in about 120 msec.
1571 bp->current_interval = SERDES_AN_TIMEOUT;
1572 bp->serdes_an_pending = 1;
1573 mod_timer(&bp->timer, jiffies + bp->current_interval);
1574 } else {
1575 bnx2_resolve_flow_ctrl(bp);
1576 bnx2_set_mac_link(bp);
1579 return 0;
1582 #define ETHTOOL_ALL_FIBRE_SPEED \
1583 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1584 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1585 (ADVERTISED_1000baseT_Full)
1587 #define ETHTOOL_ALL_COPPER_SPEED \
1588 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1589 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1590 ADVERTISED_1000baseT_Full)
1592 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1593 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1595 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1597 static void
1598 bnx2_set_default_remote_link(struct bnx2 *bp)
1600 u32 link;
1602 if (bp->phy_port == PORT_TP)
1603 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1604 else
1605 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1607 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1608 bp->req_line_speed = 0;
1609 bp->autoneg |= AUTONEG_SPEED;
1610 bp->advertising = ADVERTISED_Autoneg;
1611 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1612 bp->advertising |= ADVERTISED_10baseT_Half;
1613 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1614 bp->advertising |= ADVERTISED_10baseT_Full;
1615 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1616 bp->advertising |= ADVERTISED_100baseT_Half;
1617 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1618 bp->advertising |= ADVERTISED_100baseT_Full;
1619 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1620 bp->advertising |= ADVERTISED_1000baseT_Full;
1621 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1622 bp->advertising |= ADVERTISED_2500baseX_Full;
1623 } else {
1624 bp->autoneg = 0;
1625 bp->advertising = 0;
1626 bp->req_duplex = DUPLEX_FULL;
1627 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1628 bp->req_line_speed = SPEED_10;
1629 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1630 bp->req_duplex = DUPLEX_HALF;
1632 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1633 bp->req_line_speed = SPEED_100;
1634 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1635 bp->req_duplex = DUPLEX_HALF;
1637 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1638 bp->req_line_speed = SPEED_1000;
1639 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1640 bp->req_line_speed = SPEED_2500;
1644 static void
1645 bnx2_set_default_link(struct bnx2 *bp)
1647 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1648 return bnx2_set_default_remote_link(bp);
1650 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1651 bp->req_line_speed = 0;
1652 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1653 u32 reg;
1655 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1657 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1658 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1659 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1660 bp->autoneg = 0;
1661 bp->req_line_speed = bp->line_speed = SPEED_1000;
1662 bp->req_duplex = DUPLEX_FULL;
1664 } else
1665 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1668 static void
1669 bnx2_send_heart_beat(struct bnx2 *bp)
1671 u32 msg;
1672 u32 addr;
1674 spin_lock(&bp->indirect_lock);
1675 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1676 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1677 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1678 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1679 spin_unlock(&bp->indirect_lock);
1682 static void
1683 bnx2_remote_phy_event(struct bnx2 *bp)
1685 u32 msg;
1686 u8 link_up = bp->link_up;
1687 u8 old_port;
1689 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1691 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1692 bnx2_send_heart_beat(bp);
1694 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1696 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1697 bp->link_up = 0;
1698 else {
1699 u32 speed;
1701 bp->link_up = 1;
1702 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1703 bp->duplex = DUPLEX_FULL;
1704 switch (speed) {
1705 case BNX2_LINK_STATUS_10HALF:
1706 bp->duplex = DUPLEX_HALF;
1707 case BNX2_LINK_STATUS_10FULL:
1708 bp->line_speed = SPEED_10;
1709 break;
1710 case BNX2_LINK_STATUS_100HALF:
1711 bp->duplex = DUPLEX_HALF;
1712 case BNX2_LINK_STATUS_100BASE_T4:
1713 case BNX2_LINK_STATUS_100FULL:
1714 bp->line_speed = SPEED_100;
1715 break;
1716 case BNX2_LINK_STATUS_1000HALF:
1717 bp->duplex = DUPLEX_HALF;
1718 case BNX2_LINK_STATUS_1000FULL:
1719 bp->line_speed = SPEED_1000;
1720 break;
1721 case BNX2_LINK_STATUS_2500HALF:
1722 bp->duplex = DUPLEX_HALF;
1723 case BNX2_LINK_STATUS_2500FULL:
1724 bp->line_speed = SPEED_2500;
1725 break;
1726 default:
1727 bp->line_speed = 0;
1728 break;
1731 spin_lock(&bp->phy_lock);
1732 bp->flow_ctrl = 0;
1733 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1734 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1735 if (bp->duplex == DUPLEX_FULL)
1736 bp->flow_ctrl = bp->req_flow_ctrl;
1737 } else {
1738 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1739 bp->flow_ctrl |= FLOW_CTRL_TX;
1740 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1741 bp->flow_ctrl |= FLOW_CTRL_RX;
1744 old_port = bp->phy_port;
1745 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1746 bp->phy_port = PORT_FIBRE;
1747 else
1748 bp->phy_port = PORT_TP;
1750 if (old_port != bp->phy_port)
1751 bnx2_set_default_link(bp);
1753 spin_unlock(&bp->phy_lock);
1755 if (bp->link_up != link_up)
1756 bnx2_report_link(bp);
1758 bnx2_set_mac_link(bp);
1761 static int
1762 bnx2_set_remote_link(struct bnx2 *bp)
1764 u32 evt_code;
1766 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1767 switch (evt_code) {
1768 case BNX2_FW_EVT_CODE_LINK_EVENT:
1769 bnx2_remote_phy_event(bp);
1770 break;
1771 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1772 default:
1773 bnx2_send_heart_beat(bp);
1774 break;
1776 return 0;
1779 static int
1780 bnx2_setup_copper_phy(struct bnx2 *bp)
1782 u32 bmcr;
1783 u32 new_bmcr;
1785 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1787 if (bp->autoneg & AUTONEG_SPEED) {
1788 u32 adv_reg, adv1000_reg;
1789 u32 new_adv_reg = 0;
1790 u32 new_adv1000_reg = 0;
1792 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1793 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1794 ADVERTISE_PAUSE_ASYM);
1796 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1797 adv1000_reg &= PHY_ALL_1000_SPEED;
1799 if (bp->advertising & ADVERTISED_10baseT_Half)
1800 new_adv_reg |= ADVERTISE_10HALF;
1801 if (bp->advertising & ADVERTISED_10baseT_Full)
1802 new_adv_reg |= ADVERTISE_10FULL;
1803 if (bp->advertising & ADVERTISED_100baseT_Half)
1804 new_adv_reg |= ADVERTISE_100HALF;
1805 if (bp->advertising & ADVERTISED_100baseT_Full)
1806 new_adv_reg |= ADVERTISE_100FULL;
1807 if (bp->advertising & ADVERTISED_1000baseT_Full)
1808 new_adv1000_reg |= ADVERTISE_1000FULL;
1810 new_adv_reg |= ADVERTISE_CSMA;
1812 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1814 if ((adv1000_reg != new_adv1000_reg) ||
1815 (adv_reg != new_adv_reg) ||
1816 ((bmcr & BMCR_ANENABLE) == 0)) {
1818 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1819 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1820 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1821 BMCR_ANENABLE);
1823 else if (bp->link_up) {
1824 /* Flow ctrl may have changed from auto to forced */
1825 /* or vice-versa. */
1827 bnx2_resolve_flow_ctrl(bp);
1828 bnx2_set_mac_link(bp);
1830 return 0;
1833 new_bmcr = 0;
1834 if (bp->req_line_speed == SPEED_100) {
1835 new_bmcr |= BMCR_SPEED100;
1837 if (bp->req_duplex == DUPLEX_FULL) {
1838 new_bmcr |= BMCR_FULLDPLX;
1840 if (new_bmcr != bmcr) {
1841 u32 bmsr;
1843 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1844 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1846 if (bmsr & BMSR_LSTATUS) {
1847 /* Force link down */
1848 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1849 spin_unlock_bh(&bp->phy_lock);
1850 msleep(50);
1851 spin_lock_bh(&bp->phy_lock);
1853 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1854 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1857 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1859 /* Normally, the new speed is setup after the link has
1860 * gone down and up again. In some cases, link will not go
1861 * down so we need to set up the new speed here.
1863 if (bmsr & BMSR_LSTATUS) {
1864 bp->line_speed = bp->req_line_speed;
1865 bp->duplex = bp->req_duplex;
1866 bnx2_resolve_flow_ctrl(bp);
1867 bnx2_set_mac_link(bp);
1869 } else {
1870 bnx2_resolve_flow_ctrl(bp);
1871 bnx2_set_mac_link(bp);
1873 return 0;
1876 static int
1877 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1879 if (bp->loopback == MAC_LOOPBACK)
1880 return 0;
1882 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1883 return (bnx2_setup_serdes_phy(bp, port));
1885 else {
1886 return (bnx2_setup_copper_phy(bp));
1890 static int
1891 bnx2_init_5709s_phy(struct bnx2 *bp)
1893 u32 val;
1895 bp->mii_bmcr = MII_BMCR + 0x10;
1896 bp->mii_bmsr = MII_BMSR + 0x10;
1897 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1898 bp->mii_adv = MII_ADVERTISE + 0x10;
1899 bp->mii_lpa = MII_LPA + 0x10;
1900 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1902 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1903 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1905 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1906 bnx2_reset_phy(bp);
1908 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1910 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1911 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1912 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1913 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1915 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1916 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1917 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
1918 val |= BCM5708S_UP1_2G5;
1919 else
1920 val &= ~BCM5708S_UP1_2G5;
1921 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1923 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1924 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1925 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1926 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1928 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1930 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1931 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1932 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1934 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1936 return 0;
1939 static int
1940 bnx2_init_5708s_phy(struct bnx2 *bp)
1942 u32 val;
1944 bnx2_reset_phy(bp);
1946 bp->mii_up1 = BCM5708S_UP1;
1948 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1949 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1950 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1952 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1953 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1954 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1956 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1957 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1958 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1960 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
1961 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1962 val |= BCM5708S_UP1_2G5;
1963 bnx2_write_phy(bp, BCM5708S_UP1, val);
1966 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1967 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1968 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1969 /* increase tx signal amplitude */
1970 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1971 BCM5708S_BLK_ADDR_TX_MISC);
1972 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1973 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1974 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1975 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1978 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
1979 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1981 if (val) {
1982 u32 is_backplane;
1984 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
1985 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1986 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1987 BCM5708S_BLK_ADDR_TX_MISC);
1988 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1989 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1990 BCM5708S_BLK_ADDR_DIG);
1993 return 0;
1996 static int
1997 bnx2_init_5706s_phy(struct bnx2 *bp)
1999 bnx2_reset_phy(bp);
2001 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2003 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2004 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2006 if (bp->dev->mtu > 1500) {
2007 u32 val;
2009 /* Set extended packet length bit */
2010 bnx2_write_phy(bp, 0x18, 0x7);
2011 bnx2_read_phy(bp, 0x18, &val);
2012 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2014 bnx2_write_phy(bp, 0x1c, 0x6c00);
2015 bnx2_read_phy(bp, 0x1c, &val);
2016 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2018 else {
2019 u32 val;
2021 bnx2_write_phy(bp, 0x18, 0x7);
2022 bnx2_read_phy(bp, 0x18, &val);
2023 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2025 bnx2_write_phy(bp, 0x1c, 0x6c00);
2026 bnx2_read_phy(bp, 0x1c, &val);
2027 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2030 return 0;
2033 static int
2034 bnx2_init_copper_phy(struct bnx2 *bp)
2036 u32 val;
2038 bnx2_reset_phy(bp);
2040 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2041 bnx2_write_phy(bp, 0x18, 0x0c00);
2042 bnx2_write_phy(bp, 0x17, 0x000a);
2043 bnx2_write_phy(bp, 0x15, 0x310b);
2044 bnx2_write_phy(bp, 0x17, 0x201f);
2045 bnx2_write_phy(bp, 0x15, 0x9506);
2046 bnx2_write_phy(bp, 0x17, 0x401f);
2047 bnx2_write_phy(bp, 0x15, 0x14e2);
2048 bnx2_write_phy(bp, 0x18, 0x0400);
2051 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2052 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2053 MII_BNX2_DSP_EXPAND_REG | 0x8);
2054 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2055 val &= ~(1 << 8);
2056 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2059 if (bp->dev->mtu > 1500) {
2060 /* Set extended packet length bit */
2061 bnx2_write_phy(bp, 0x18, 0x7);
2062 bnx2_read_phy(bp, 0x18, &val);
2063 bnx2_write_phy(bp, 0x18, val | 0x4000);
2065 bnx2_read_phy(bp, 0x10, &val);
2066 bnx2_write_phy(bp, 0x10, val | 0x1);
2068 else {
2069 bnx2_write_phy(bp, 0x18, 0x7);
2070 bnx2_read_phy(bp, 0x18, &val);
2071 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2073 bnx2_read_phy(bp, 0x10, &val);
2074 bnx2_write_phy(bp, 0x10, val & ~0x1);
2077 /* ethernet@wirespeed */
2078 bnx2_write_phy(bp, 0x18, 0x7007);
2079 bnx2_read_phy(bp, 0x18, &val);
2080 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2081 return 0;
2085 static int
2086 bnx2_init_phy(struct bnx2 *bp)
2088 u32 val;
2089 int rc = 0;
2091 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2092 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2094 bp->mii_bmcr = MII_BMCR;
2095 bp->mii_bmsr = MII_BMSR;
2096 bp->mii_bmsr1 = MII_BMSR;
2097 bp->mii_adv = MII_ADVERTISE;
2098 bp->mii_lpa = MII_LPA;
2100 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2102 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2103 goto setup_phy;
2105 bnx2_read_phy(bp, MII_PHYSID1, &val);
2106 bp->phy_id = val << 16;
2107 bnx2_read_phy(bp, MII_PHYSID2, &val);
2108 bp->phy_id |= val & 0xffff;
2110 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2111 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2112 rc = bnx2_init_5706s_phy(bp);
2113 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2114 rc = bnx2_init_5708s_phy(bp);
2115 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2116 rc = bnx2_init_5709s_phy(bp);
2118 else {
2119 rc = bnx2_init_copper_phy(bp);
2122 setup_phy:
2123 if (!rc)
2124 rc = bnx2_setup_phy(bp, bp->phy_port);
2126 return rc;
2129 static int
2130 bnx2_set_mac_loopback(struct bnx2 *bp)
2132 u32 mac_mode;
2134 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2135 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2136 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2137 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2138 bp->link_up = 1;
2139 return 0;
2142 static int bnx2_test_link(struct bnx2 *);
2144 static int
2145 bnx2_set_phy_loopback(struct bnx2 *bp)
2147 u32 mac_mode;
2148 int rc, i;
2150 spin_lock_bh(&bp->phy_lock);
2151 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2152 BMCR_SPEED1000);
2153 spin_unlock_bh(&bp->phy_lock);
2154 if (rc)
2155 return rc;
2157 for (i = 0; i < 10; i++) {
2158 if (bnx2_test_link(bp) == 0)
2159 break;
2160 msleep(100);
2163 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2164 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2165 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2166 BNX2_EMAC_MODE_25G_MODE);
2168 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2169 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2170 bp->link_up = 1;
2171 return 0;
2174 static int
2175 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2177 int i;
2178 u32 val;
2180 bp->fw_wr_seq++;
2181 msg_data |= bp->fw_wr_seq;
2183 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2185 /* wait for an acknowledgement. */
2186 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2187 msleep(10);
2189 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2191 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2192 break;
2194 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2195 return 0;
2197 /* If we timed out, inform the firmware that this is the case. */
2198 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2199 if (!silent)
2200 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2201 "%x\n", msg_data);
2203 msg_data &= ~BNX2_DRV_MSG_CODE;
2204 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2206 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2208 return -EBUSY;
2211 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2212 return -EIO;
2214 return 0;
2217 static int
2218 bnx2_init_5709_context(struct bnx2 *bp)
2220 int i, ret = 0;
2221 u32 val;
2223 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2224 val |= (BCM_PAGE_BITS - 8) << 16;
2225 REG_WR(bp, BNX2_CTX_COMMAND, val);
2226 for (i = 0; i < 10; i++) {
2227 val = REG_RD(bp, BNX2_CTX_COMMAND);
2228 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2229 break;
2230 udelay(2);
2232 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2233 return -EBUSY;
2235 for (i = 0; i < bp->ctx_pages; i++) {
2236 int j;
2238 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2239 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2240 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2241 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2242 (u64) bp->ctx_blk_mapping[i] >> 32);
2243 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2244 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2245 for (j = 0; j < 10; j++) {
2247 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2248 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2249 break;
2250 udelay(5);
2252 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2253 ret = -EBUSY;
2254 break;
2257 return ret;
2260 static void
2261 bnx2_init_context(struct bnx2 *bp)
2263 u32 vcid;
2265 vcid = 96;
2266 while (vcid) {
2267 u32 vcid_addr, pcid_addr, offset;
2268 int i;
2270 vcid--;
2272 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2273 u32 new_vcid;
2275 vcid_addr = GET_PCID_ADDR(vcid);
2276 if (vcid & 0x8) {
2277 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2279 else {
2280 new_vcid = vcid;
2282 pcid_addr = GET_PCID_ADDR(new_vcid);
2284 else {
2285 vcid_addr = GET_CID_ADDR(vcid);
2286 pcid_addr = vcid_addr;
2289 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2290 vcid_addr += (i << PHY_CTX_SHIFT);
2291 pcid_addr += (i << PHY_CTX_SHIFT);
2293 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2294 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2296 /* Zero out the context. */
2297 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2298 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2303 static int
2304 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2306 u16 *good_mbuf;
2307 u32 good_mbuf_cnt;
2308 u32 val;
2310 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2311 if (good_mbuf == NULL) {
2312 printk(KERN_ERR PFX "Failed to allocate memory in "
2313 "bnx2_alloc_bad_rbuf\n");
2314 return -ENOMEM;
2317 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2318 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2320 good_mbuf_cnt = 0;
2322 /* Allocate a bunch of mbufs and save the good ones in an array. */
2323 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2324 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2325 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2326 BNX2_RBUF_COMMAND_ALLOC_REQ);
2328 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2330 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2332 /* The addresses with Bit 9 set are bad memory blocks. */
2333 if (!(val & (1 << 9))) {
2334 good_mbuf[good_mbuf_cnt] = (u16) val;
2335 good_mbuf_cnt++;
2338 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2341 /* Free the good ones back to the mbuf pool thus discarding
2342 * all the bad ones. */
2343 while (good_mbuf_cnt) {
2344 good_mbuf_cnt--;
2346 val = good_mbuf[good_mbuf_cnt];
2347 val = (val << 9) | val | 1;
2349 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2351 kfree(good_mbuf);
2352 return 0;
2355 static void
2356 bnx2_set_mac_addr(struct bnx2 *bp)
2358 u32 val;
2359 u8 *mac_addr = bp->dev->dev_addr;
2361 val = (mac_addr[0] << 8) | mac_addr[1];
2363 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2365 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2366 (mac_addr[4] << 8) | mac_addr[5];
2368 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2371 static inline int
2372 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2374 dma_addr_t mapping;
2375 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2376 struct rx_bd *rxbd =
2377 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2378 struct page *page = alloc_page(GFP_ATOMIC);
2380 if (!page)
2381 return -ENOMEM;
2382 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2383 PCI_DMA_FROMDEVICE);
2384 rx_pg->page = page;
2385 pci_unmap_addr_set(rx_pg, mapping, mapping);
2386 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2387 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2388 return 0;
2391 static void
2392 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2394 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2395 struct page *page = rx_pg->page;
2397 if (!page)
2398 return;
2400 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2401 PCI_DMA_FROMDEVICE);
2403 __free_page(page);
2404 rx_pg->page = NULL;
2407 static inline int
2408 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
2410 struct sk_buff *skb;
2411 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2412 dma_addr_t mapping;
2413 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2414 unsigned long align;
2416 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2417 if (skb == NULL) {
2418 return -ENOMEM;
2421 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2422 skb_reserve(skb, BNX2_RX_ALIGN - align);
2424 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2425 PCI_DMA_FROMDEVICE);
2427 rx_buf->skb = skb;
2428 pci_unmap_addr_set(rx_buf, mapping, mapping);
2430 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2431 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2433 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2435 return 0;
2438 static int
2439 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2441 struct status_block *sblk = bnapi->status_blk;
2442 u32 new_link_state, old_link_state;
2443 int is_set = 1;
2445 new_link_state = sblk->status_attn_bits & event;
2446 old_link_state = sblk->status_attn_bits_ack & event;
2447 if (new_link_state != old_link_state) {
2448 if (new_link_state)
2449 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2450 else
2451 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2452 } else
2453 is_set = 0;
2455 return is_set;
2458 static void
2459 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2461 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) {
2462 spin_lock(&bp->phy_lock);
2463 bnx2_set_link(bp);
2464 spin_unlock(&bp->phy_lock);
2466 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2467 bnx2_set_remote_link(bp);
2471 static inline u16
2472 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2474 u16 cons;
2476 if (bnapi->int_num == 0)
2477 cons = bnapi->status_blk->status_tx_quick_consumer_index0;
2478 else
2479 cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
2481 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2482 cons++;
2483 return cons;
2486 static int
2487 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2489 u16 hw_cons, sw_cons, sw_ring_cons;
2490 int tx_pkt = 0;
2492 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2493 sw_cons = bnapi->tx_cons;
2495 while (sw_cons != hw_cons) {
2496 struct sw_bd *tx_buf;
2497 struct sk_buff *skb;
2498 int i, last;
2500 sw_ring_cons = TX_RING_IDX(sw_cons);
2502 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2503 skb = tx_buf->skb;
2505 /* partial BD completions possible with TSO packets */
2506 if (skb_is_gso(skb)) {
2507 u16 last_idx, last_ring_idx;
2509 last_idx = sw_cons +
2510 skb_shinfo(skb)->nr_frags + 1;
2511 last_ring_idx = sw_ring_cons +
2512 skb_shinfo(skb)->nr_frags + 1;
2513 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2514 last_idx++;
2516 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2517 break;
2521 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2522 skb_headlen(skb), PCI_DMA_TODEVICE);
2524 tx_buf->skb = NULL;
2525 last = skb_shinfo(skb)->nr_frags;
2527 for (i = 0; i < last; i++) {
2528 sw_cons = NEXT_TX_BD(sw_cons);
2530 pci_unmap_page(bp->pdev,
2531 pci_unmap_addr(
2532 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2533 mapping),
2534 skb_shinfo(skb)->frags[i].size,
2535 PCI_DMA_TODEVICE);
2538 sw_cons = NEXT_TX_BD(sw_cons);
2540 dev_kfree_skb(skb);
2541 tx_pkt++;
2542 if (tx_pkt == budget)
2543 break;
2545 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2548 bnapi->hw_tx_cons = hw_cons;
2549 bnapi->tx_cons = sw_cons;
2550 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2551 * before checking for netif_queue_stopped(). Without the
2552 * memory barrier, there is a small possibility that bnx2_start_xmit()
2553 * will miss it and cause the queue to be stopped forever.
2555 smp_mb();
2557 if (unlikely(netif_queue_stopped(bp->dev)) &&
2558 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
2559 netif_tx_lock(bp->dev);
2560 if ((netif_queue_stopped(bp->dev)) &&
2561 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
2562 netif_wake_queue(bp->dev);
2563 netif_tx_unlock(bp->dev);
2565 return tx_pkt;
2568 static void
2569 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
2570 struct sk_buff *skb, int count)
2572 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2573 struct rx_bd *cons_bd, *prod_bd;
2574 dma_addr_t mapping;
2575 int i;
2576 u16 hw_prod = bnapi->rx_pg_prod, prod;
2577 u16 cons = bnapi->rx_pg_cons;
2579 for (i = 0; i < count; i++) {
2580 prod = RX_PG_RING_IDX(hw_prod);
2582 prod_rx_pg = &bp->rx_pg_ring[prod];
2583 cons_rx_pg = &bp->rx_pg_ring[cons];
2584 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2585 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2587 if (i == 0 && skb) {
2588 struct page *page;
2589 struct skb_shared_info *shinfo;
2591 shinfo = skb_shinfo(skb);
2592 shinfo->nr_frags--;
2593 page = shinfo->frags[shinfo->nr_frags].page;
2594 shinfo->frags[shinfo->nr_frags].page = NULL;
2595 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2596 PCI_DMA_FROMDEVICE);
2597 cons_rx_pg->page = page;
2598 pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2599 dev_kfree_skb(skb);
2601 if (prod != cons) {
2602 prod_rx_pg->page = cons_rx_pg->page;
2603 cons_rx_pg->page = NULL;
2604 pci_unmap_addr_set(prod_rx_pg, mapping,
2605 pci_unmap_addr(cons_rx_pg, mapping));
2607 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2608 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2611 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2612 hw_prod = NEXT_RX_BD(hw_prod);
2614 bnapi->rx_pg_prod = hw_prod;
2615 bnapi->rx_pg_cons = cons;
2618 static inline void
2619 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2620 u16 cons, u16 prod)
2622 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2623 struct rx_bd *cons_bd, *prod_bd;
2625 cons_rx_buf = &bp->rx_buf_ring[cons];
2626 prod_rx_buf = &bp->rx_buf_ring[prod];
2628 pci_dma_sync_single_for_device(bp->pdev,
2629 pci_unmap_addr(cons_rx_buf, mapping),
2630 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2632 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2634 prod_rx_buf->skb = skb;
2636 if (cons == prod)
2637 return;
2639 pci_unmap_addr_set(prod_rx_buf, mapping,
2640 pci_unmap_addr(cons_rx_buf, mapping));
2642 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2643 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2644 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2645 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2648 static int
2649 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2650 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2651 u32 ring_idx)
2653 int err;
2654 u16 prod = ring_idx & 0xffff;
2656 err = bnx2_alloc_rx_skb(bp, bnapi, prod);
2657 if (unlikely(err)) {
2658 bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
2659 if (hdr_len) {
2660 unsigned int raw_len = len + 4;
2661 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2663 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
2665 return err;
2668 skb_reserve(skb, bp->rx_offset);
2669 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2670 PCI_DMA_FROMDEVICE);
2672 if (hdr_len == 0) {
2673 skb_put(skb, len);
2674 return 0;
2675 } else {
2676 unsigned int i, frag_len, frag_size, pages;
2677 struct sw_pg *rx_pg;
2678 u16 pg_cons = bnapi->rx_pg_cons;
2679 u16 pg_prod = bnapi->rx_pg_prod;
2681 frag_size = len + 4 - hdr_len;
2682 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2683 skb_put(skb, hdr_len);
2685 for (i = 0; i < pages; i++) {
2686 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2687 if (unlikely(frag_len <= 4)) {
2688 unsigned int tail = 4 - frag_len;
2690 bnapi->rx_pg_cons = pg_cons;
2691 bnapi->rx_pg_prod = pg_prod;
2692 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
2693 pages - i);
2694 skb->len -= tail;
2695 if (i == 0) {
2696 skb->tail -= tail;
2697 } else {
2698 skb_frag_t *frag =
2699 &skb_shinfo(skb)->frags[i - 1];
2700 frag->size -= tail;
2701 skb->data_len -= tail;
2702 skb->truesize -= tail;
2704 return 0;
2706 rx_pg = &bp->rx_pg_ring[pg_cons];
2708 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2709 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2711 if (i == pages - 1)
2712 frag_len -= 4;
2714 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2715 rx_pg->page = NULL;
2717 err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2718 if (unlikely(err)) {
2719 bnapi->rx_pg_cons = pg_cons;
2720 bnapi->rx_pg_prod = pg_prod;
2721 bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
2722 pages - i);
2723 return err;
2726 frag_size -= frag_len;
2727 skb->data_len += frag_len;
2728 skb->truesize += frag_len;
2729 skb->len += frag_len;
2731 pg_prod = NEXT_RX_BD(pg_prod);
2732 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2734 bnapi->rx_pg_prod = pg_prod;
2735 bnapi->rx_pg_cons = pg_cons;
2737 return 0;
2740 static inline u16
2741 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2743 u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2745 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2746 cons++;
2747 return cons;
2750 static int
2751 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2753 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2754 struct l2_fhdr *rx_hdr;
2755 int rx_pkt = 0, pg_ring_used = 0;
2757 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2758 sw_cons = bnapi->rx_cons;
2759 sw_prod = bnapi->rx_prod;
2761 /* Memory barrier necessary as speculative reads of the rx
2762 * buffer can be ahead of the index in the status block
2764 rmb();
2765 while (sw_cons != hw_cons) {
2766 unsigned int len, hdr_len;
2767 u32 status;
2768 struct sw_bd *rx_buf;
2769 struct sk_buff *skb;
2770 dma_addr_t dma_addr;
2772 sw_ring_cons = RX_RING_IDX(sw_cons);
2773 sw_ring_prod = RX_RING_IDX(sw_prod);
2775 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2776 skb = rx_buf->skb;
2778 rx_buf->skb = NULL;
2780 dma_addr = pci_unmap_addr(rx_buf, mapping);
2782 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2783 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2785 rx_hdr = (struct l2_fhdr *) skb->data;
2786 len = rx_hdr->l2_fhdr_pkt_len;
2788 if ((status = rx_hdr->l2_fhdr_status) &
2789 (L2_FHDR_ERRORS_BAD_CRC |
2790 L2_FHDR_ERRORS_PHY_DECODE |
2791 L2_FHDR_ERRORS_ALIGNMENT |
2792 L2_FHDR_ERRORS_TOO_SHORT |
2793 L2_FHDR_ERRORS_GIANT_FRAME)) {
2795 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2796 sw_ring_prod);
2797 goto next_rx;
2799 hdr_len = 0;
2800 if (status & L2_FHDR_STATUS_SPLIT) {
2801 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2802 pg_ring_used = 1;
2803 } else if (len > bp->rx_jumbo_thresh) {
2804 hdr_len = bp->rx_jumbo_thresh;
2805 pg_ring_used = 1;
2808 len -= 4;
2810 if (len <= bp->rx_copy_thresh) {
2811 struct sk_buff *new_skb;
2813 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2814 if (new_skb == NULL) {
2815 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2816 sw_ring_prod);
2817 goto next_rx;
2820 /* aligned copy */
2821 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2822 new_skb->data, len + 2);
2823 skb_reserve(new_skb, 2);
2824 skb_put(new_skb, len);
2826 bnx2_reuse_rx_skb(bp, bnapi, skb,
2827 sw_ring_cons, sw_ring_prod);
2829 skb = new_skb;
2830 } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
2831 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2832 goto next_rx;
2834 skb->protocol = eth_type_trans(skb, bp->dev);
2836 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2837 (ntohs(skb->protocol) != 0x8100)) {
2839 dev_kfree_skb(skb);
2840 goto next_rx;
2844 skb->ip_summed = CHECKSUM_NONE;
2845 if (bp->rx_csum &&
2846 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2847 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2849 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2850 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2851 skb->ip_summed = CHECKSUM_UNNECESSARY;
2854 #ifdef BCM_VLAN
2855 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2856 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2857 rx_hdr->l2_fhdr_vlan_tag);
2859 else
2860 #endif
2861 netif_receive_skb(skb);
2863 bp->dev->last_rx = jiffies;
2864 rx_pkt++;
2866 next_rx:
2867 sw_cons = NEXT_RX_BD(sw_cons);
2868 sw_prod = NEXT_RX_BD(sw_prod);
2870 if ((rx_pkt == budget))
2871 break;
2873 /* Refresh hw_cons to see if there is new work */
2874 if (sw_cons == hw_cons) {
2875 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2876 rmb();
2879 bnapi->rx_cons = sw_cons;
2880 bnapi->rx_prod = sw_prod;
2882 if (pg_ring_used)
2883 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2884 bnapi->rx_pg_prod);
2886 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2888 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
2890 mmiowb();
2892 return rx_pkt;
2896 /* MSI ISR - The only difference between this and the INTx ISR
2897 * is that the MSI interrupt is always serviced.
2899 static irqreturn_t
2900 bnx2_msi(int irq, void *dev_instance)
2902 struct net_device *dev = dev_instance;
2903 struct bnx2 *bp = netdev_priv(dev);
2904 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2906 prefetch(bnapi->status_blk);
2907 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2908 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2909 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2911 /* Return here if interrupt is disabled. */
2912 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2913 return IRQ_HANDLED;
2915 netif_rx_schedule(dev, &bnapi->napi);
2917 return IRQ_HANDLED;
2920 static irqreturn_t
2921 bnx2_msi_1shot(int irq, void *dev_instance)
2923 struct net_device *dev = dev_instance;
2924 struct bnx2 *bp = netdev_priv(dev);
2925 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2927 prefetch(bnapi->status_blk);
2929 /* Return here if interrupt is disabled. */
2930 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2931 return IRQ_HANDLED;
2933 netif_rx_schedule(dev, &bnapi->napi);
2935 return IRQ_HANDLED;
2938 static irqreturn_t
2939 bnx2_interrupt(int irq, void *dev_instance)
2941 struct net_device *dev = dev_instance;
2942 struct bnx2 *bp = netdev_priv(dev);
2943 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2944 struct status_block *sblk = bnapi->status_blk;
2946 /* When using INTx, it is possible for the interrupt to arrive
2947 * at the CPU before the status block posted prior to the
2948 * interrupt. Reading a register will flush the status block.
2949 * When using MSI, the MSI message will always complete after
2950 * the status block write.
2952 if ((sblk->status_idx == bnapi->last_status_idx) &&
2953 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2954 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2955 return IRQ_NONE;
2957 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2958 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2959 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2961 /* Read back to deassert IRQ immediately to avoid too many
2962 * spurious interrupts.
2964 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2966 /* Return here if interrupt is shared and is disabled. */
2967 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2968 return IRQ_HANDLED;
2970 if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2971 bnapi->last_status_idx = sblk->status_idx;
2972 __netif_rx_schedule(dev, &bnapi->napi);
2975 return IRQ_HANDLED;
2978 static irqreturn_t
2979 bnx2_tx_msix(int irq, void *dev_instance)
2981 struct net_device *dev = dev_instance;
2982 struct bnx2 *bp = netdev_priv(dev);
2983 struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
2985 prefetch(bnapi->status_blk_msix);
2987 /* Return here if interrupt is disabled. */
2988 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2989 return IRQ_HANDLED;
2991 netif_rx_schedule(dev, &bnapi->napi);
2992 return IRQ_HANDLED;
2995 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2996 STATUS_ATTN_BITS_TIMER_ABORT)
2998 static inline int
2999 bnx2_has_work(struct bnx2_napi *bnapi)
3001 struct status_block *sblk = bnapi->status_blk;
3003 if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
3004 (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
3005 return 1;
3007 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3008 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3009 return 1;
3011 return 0;
3014 static int bnx2_tx_poll(struct napi_struct *napi, int budget)
3016 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3017 struct bnx2 *bp = bnapi->bp;
3018 int work_done = 0;
3019 struct status_block_msix *sblk = bnapi->status_blk_msix;
3021 do {
3022 work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
3023 if (unlikely(work_done >= budget))
3024 return work_done;
3026 bnapi->last_status_idx = sblk->status_idx;
3027 rmb();
3028 } while (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons);
3030 netif_rx_complete(bp->dev, napi);
3031 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3032 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3033 bnapi->last_status_idx);
3034 return work_done;
3037 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3038 int work_done, int budget)
3040 struct status_block *sblk = bnapi->status_blk;
3041 u32 status_attn_bits = sblk->status_attn_bits;
3042 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3044 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3045 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3047 bnx2_phy_int(bp, bnapi);
3049 /* This is needed to take care of transient status
3050 * during link changes.
3052 REG_WR(bp, BNX2_HC_COMMAND,
3053 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3054 REG_RD(bp, BNX2_HC_COMMAND);
3057 if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
3058 bnx2_tx_int(bp, bnapi, 0);
3060 if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
3061 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3063 return work_done;
3066 static int bnx2_poll(struct napi_struct *napi, int budget)
3068 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3069 struct bnx2 *bp = bnapi->bp;
3070 int work_done = 0;
3071 struct status_block *sblk = bnapi->status_blk;
3073 while (1) {
3074 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3076 if (unlikely(work_done >= budget))
3077 break;
3079 /* bnapi->last_status_idx is used below to tell the hw how
3080 * much work has been processed, so we must read it before
3081 * checking for more work.
3083 bnapi->last_status_idx = sblk->status_idx;
3084 rmb();
3085 if (likely(!bnx2_has_work(bnapi))) {
3086 netif_rx_complete(bp->dev, napi);
3087 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3088 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3089 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3090 bnapi->last_status_idx);
3091 break;
3093 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3094 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3095 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3096 bnapi->last_status_idx);
3098 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3099 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3100 bnapi->last_status_idx);
3101 break;
3105 return work_done;
3108 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3109 * from set_multicast.
3111 static void
3112 bnx2_set_rx_mode(struct net_device *dev)
3114 struct bnx2 *bp = netdev_priv(dev);
3115 u32 rx_mode, sort_mode;
3116 int i;
3118 spin_lock_bh(&bp->phy_lock);
3120 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3121 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3122 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3123 #ifdef BCM_VLAN
3124 if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3125 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3126 #else
3127 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3128 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3129 #endif
3130 if (dev->flags & IFF_PROMISC) {
3131 /* Promiscuous mode. */
3132 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3133 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3134 BNX2_RPM_SORT_USER0_PROM_VLAN;
3136 else if (dev->flags & IFF_ALLMULTI) {
3137 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3138 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3139 0xffffffff);
3141 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3143 else {
3144 /* Accept one or more multicast(s). */
3145 struct dev_mc_list *mclist;
3146 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3147 u32 regidx;
3148 u32 bit;
3149 u32 crc;
3151 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3153 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3154 i++, mclist = mclist->next) {
3156 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3157 bit = crc & 0xff;
3158 regidx = (bit & 0xe0) >> 5;
3159 bit &= 0x1f;
3160 mc_filter[regidx] |= (1 << bit);
3163 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3164 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3165 mc_filter[i]);
3168 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3171 if (rx_mode != bp->rx_mode) {
3172 bp->rx_mode = rx_mode;
3173 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3176 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3177 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3178 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3180 spin_unlock_bh(&bp->phy_lock);
3183 static void
3184 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3185 u32 rv2p_proc)
3187 int i;
3188 u32 val;
3191 for (i = 0; i < rv2p_code_len; i += 8) {
3192 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3193 rv2p_code++;
3194 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3195 rv2p_code++;
3197 if (rv2p_proc == RV2P_PROC1) {
3198 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3199 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3201 else {
3202 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3203 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3207 /* Reset the processor, un-stall is done later. */
3208 if (rv2p_proc == RV2P_PROC1) {
3209 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3211 else {
3212 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3216 static int
3217 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3219 u32 offset;
3220 u32 val;
3221 int rc;
3223 /* Halt the CPU. */
3224 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3225 val |= cpu_reg->mode_value_halt;
3226 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3227 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3229 /* Load the Text area. */
3230 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3231 if (fw->gz_text) {
3232 int j;
3234 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3235 fw->gz_text_len);
3236 if (rc < 0)
3237 return rc;
3239 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3240 bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3244 /* Load the Data area. */
3245 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3246 if (fw->data) {
3247 int j;
3249 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3250 bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3254 /* Load the SBSS area. */
3255 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3256 if (fw->sbss_len) {
3257 int j;
3259 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3260 bnx2_reg_wr_ind(bp, offset, 0);
3264 /* Load the BSS area. */
3265 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3266 if (fw->bss_len) {
3267 int j;
3269 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3270 bnx2_reg_wr_ind(bp, offset, 0);
3274 /* Load the Read-Only area. */
3275 offset = cpu_reg->spad_base +
3276 (fw->rodata_addr - cpu_reg->mips_view_base);
3277 if (fw->rodata) {
3278 int j;
3280 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3281 bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3285 /* Clear the pre-fetch instruction. */
3286 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3287 bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3289 /* Start the CPU. */
3290 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3291 val &= ~cpu_reg->mode_value_halt;
3292 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3293 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3295 return 0;
3298 static int
3299 bnx2_init_cpus(struct bnx2 *bp)
3301 struct cpu_reg cpu_reg;
3302 struct fw_info *fw;
3303 int rc, rv2p_len;
3304 void *text, *rv2p;
3306 /* Initialize the RV2P processor. */
3307 text = vmalloc(FW_BUF_SIZE);
3308 if (!text)
3309 return -ENOMEM;
3310 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3311 rv2p = bnx2_xi_rv2p_proc1;
3312 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3313 } else {
3314 rv2p = bnx2_rv2p_proc1;
3315 rv2p_len = sizeof(bnx2_rv2p_proc1);
3317 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3318 if (rc < 0)
3319 goto init_cpu_err;
3321 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3323 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3324 rv2p = bnx2_xi_rv2p_proc2;
3325 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3326 } else {
3327 rv2p = bnx2_rv2p_proc2;
3328 rv2p_len = sizeof(bnx2_rv2p_proc2);
3330 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3331 if (rc < 0)
3332 goto init_cpu_err;
3334 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3336 /* Initialize the RX Processor. */
3337 cpu_reg.mode = BNX2_RXP_CPU_MODE;
3338 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3339 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3340 cpu_reg.state = BNX2_RXP_CPU_STATE;
3341 cpu_reg.state_value_clear = 0xffffff;
3342 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3343 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3344 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3345 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3346 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3347 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3348 cpu_reg.mips_view_base = 0x8000000;
3350 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3351 fw = &bnx2_rxp_fw_09;
3352 else
3353 fw = &bnx2_rxp_fw_06;
3355 fw->text = text;
3356 rc = load_cpu_fw(bp, &cpu_reg, fw);
3357 if (rc)
3358 goto init_cpu_err;
3360 /* Initialize the TX Processor. */
3361 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3362 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3363 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3364 cpu_reg.state = BNX2_TXP_CPU_STATE;
3365 cpu_reg.state_value_clear = 0xffffff;
3366 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3367 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3368 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3369 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3370 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3371 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3372 cpu_reg.mips_view_base = 0x8000000;
3374 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3375 fw = &bnx2_txp_fw_09;
3376 else
3377 fw = &bnx2_txp_fw_06;
3379 fw->text = text;
3380 rc = load_cpu_fw(bp, &cpu_reg, fw);
3381 if (rc)
3382 goto init_cpu_err;
3384 /* Initialize the TX Patch-up Processor. */
3385 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3386 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3387 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3388 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3389 cpu_reg.state_value_clear = 0xffffff;
3390 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3391 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3392 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3393 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3394 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3395 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3396 cpu_reg.mips_view_base = 0x8000000;
3398 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3399 fw = &bnx2_tpat_fw_09;
3400 else
3401 fw = &bnx2_tpat_fw_06;
3403 fw->text = text;
3404 rc = load_cpu_fw(bp, &cpu_reg, fw);
3405 if (rc)
3406 goto init_cpu_err;
3408 /* Initialize the Completion Processor. */
3409 cpu_reg.mode = BNX2_COM_CPU_MODE;
3410 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3411 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3412 cpu_reg.state = BNX2_COM_CPU_STATE;
3413 cpu_reg.state_value_clear = 0xffffff;
3414 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3415 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3416 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3417 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3418 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3419 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3420 cpu_reg.mips_view_base = 0x8000000;
3422 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3423 fw = &bnx2_com_fw_09;
3424 else
3425 fw = &bnx2_com_fw_06;
3427 fw->text = text;
3428 rc = load_cpu_fw(bp, &cpu_reg, fw);
3429 if (rc)
3430 goto init_cpu_err;
3432 /* Initialize the Command Processor. */
3433 cpu_reg.mode = BNX2_CP_CPU_MODE;
3434 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3435 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3436 cpu_reg.state = BNX2_CP_CPU_STATE;
3437 cpu_reg.state_value_clear = 0xffffff;
3438 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3439 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3440 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3441 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3442 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3443 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3444 cpu_reg.mips_view_base = 0x8000000;
3446 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3447 fw = &bnx2_cp_fw_09;
3448 else
3449 fw = &bnx2_cp_fw_06;
3451 fw->text = text;
3452 rc = load_cpu_fw(bp, &cpu_reg, fw);
3454 init_cpu_err:
3455 vfree(text);
3456 return rc;
3459 static int
3460 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3462 u16 pmcsr;
3464 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3466 switch (state) {
3467 case PCI_D0: {
3468 u32 val;
3470 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3471 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3472 PCI_PM_CTRL_PME_STATUS);
3474 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3475 /* delay required during transition out of D3hot */
3476 msleep(20);
3478 val = REG_RD(bp, BNX2_EMAC_MODE);
3479 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3480 val &= ~BNX2_EMAC_MODE_MPKT;
3481 REG_WR(bp, BNX2_EMAC_MODE, val);
3483 val = REG_RD(bp, BNX2_RPM_CONFIG);
3484 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3485 REG_WR(bp, BNX2_RPM_CONFIG, val);
3486 break;
3488 case PCI_D3hot: {
3489 int i;
3490 u32 val, wol_msg;
3492 if (bp->wol) {
3493 u32 advertising;
3494 u8 autoneg;
3496 autoneg = bp->autoneg;
3497 advertising = bp->advertising;
3499 if (bp->phy_port == PORT_TP) {
3500 bp->autoneg = AUTONEG_SPEED;
3501 bp->advertising = ADVERTISED_10baseT_Half |
3502 ADVERTISED_10baseT_Full |
3503 ADVERTISED_100baseT_Half |
3504 ADVERTISED_100baseT_Full |
3505 ADVERTISED_Autoneg;
3508 spin_lock_bh(&bp->phy_lock);
3509 bnx2_setup_phy(bp, bp->phy_port);
3510 spin_unlock_bh(&bp->phy_lock);
3512 bp->autoneg = autoneg;
3513 bp->advertising = advertising;
3515 bnx2_set_mac_addr(bp);
3517 val = REG_RD(bp, BNX2_EMAC_MODE);
3519 /* Enable port mode. */
3520 val &= ~BNX2_EMAC_MODE_PORT;
3521 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3522 BNX2_EMAC_MODE_ACPI_RCVD |
3523 BNX2_EMAC_MODE_MPKT;
3524 if (bp->phy_port == PORT_TP)
3525 val |= BNX2_EMAC_MODE_PORT_MII;
3526 else {
3527 val |= BNX2_EMAC_MODE_PORT_GMII;
3528 if (bp->line_speed == SPEED_2500)
3529 val |= BNX2_EMAC_MODE_25G_MODE;
3532 REG_WR(bp, BNX2_EMAC_MODE, val);
3534 /* receive all multicast */
3535 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3536 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3537 0xffffffff);
3539 REG_WR(bp, BNX2_EMAC_RX_MODE,
3540 BNX2_EMAC_RX_MODE_SORT_MODE);
3542 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3543 BNX2_RPM_SORT_USER0_MC_EN;
3544 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3545 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3546 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3547 BNX2_RPM_SORT_USER0_ENA);
3549 /* Need to enable EMAC and RPM for WOL. */
3550 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3551 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3552 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3553 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3555 val = REG_RD(bp, BNX2_RPM_CONFIG);
3556 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3557 REG_WR(bp, BNX2_RPM_CONFIG, val);
3559 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3561 else {
3562 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3565 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3566 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3568 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3569 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3570 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3572 if (bp->wol)
3573 pmcsr |= 3;
3575 else {
3576 pmcsr |= 3;
3578 if (bp->wol) {
3579 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3581 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3582 pmcsr);
3584 /* No more memory access after this point until
3585 * device is brought back to D0.
3587 udelay(50);
3588 break;
3590 default:
3591 return -EINVAL;
3593 return 0;
3596 static int
3597 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3599 u32 val;
3600 int j;
3602 /* Request access to the flash interface. */
3603 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3604 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3605 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3606 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3607 break;
3609 udelay(5);
3612 if (j >= NVRAM_TIMEOUT_COUNT)
3613 return -EBUSY;
3615 return 0;
3618 static int
3619 bnx2_release_nvram_lock(struct bnx2 *bp)
3621 int j;
3622 u32 val;
3624 /* Relinquish nvram interface. */
3625 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3627 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3628 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3629 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3630 break;
3632 udelay(5);
3635 if (j >= NVRAM_TIMEOUT_COUNT)
3636 return -EBUSY;
3638 return 0;
3642 static int
3643 bnx2_enable_nvram_write(struct bnx2 *bp)
3645 u32 val;
3647 val = REG_RD(bp, BNX2_MISC_CFG);
3648 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3650 if (bp->flash_info->flags & BNX2_NV_WREN) {
3651 int j;
3653 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3654 REG_WR(bp, BNX2_NVM_COMMAND,
3655 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3657 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3658 udelay(5);
3660 val = REG_RD(bp, BNX2_NVM_COMMAND);
3661 if (val & BNX2_NVM_COMMAND_DONE)
3662 break;
3665 if (j >= NVRAM_TIMEOUT_COUNT)
3666 return -EBUSY;
3668 return 0;
3671 static void
3672 bnx2_disable_nvram_write(struct bnx2 *bp)
3674 u32 val;
3676 val = REG_RD(bp, BNX2_MISC_CFG);
3677 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3681 static void
3682 bnx2_enable_nvram_access(struct bnx2 *bp)
3684 u32 val;
3686 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3687 /* Enable both bits, even on read. */
3688 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3689 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3692 static void
3693 bnx2_disable_nvram_access(struct bnx2 *bp)
3695 u32 val;
3697 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3698 /* Disable both bits, even after read. */
3699 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3700 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3701 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3704 static int
3705 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3707 u32 cmd;
3708 int j;
3710 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3711 /* Buffered flash, no erase needed */
3712 return 0;
3714 /* Build an erase command */
3715 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3716 BNX2_NVM_COMMAND_DOIT;
3718 /* Need to clear DONE bit separately. */
3719 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3721 /* Address of the NVRAM to read from. */
3722 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3724 /* Issue an erase command. */
3725 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3727 /* Wait for completion. */
3728 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3729 u32 val;
3731 udelay(5);
3733 val = REG_RD(bp, BNX2_NVM_COMMAND);
3734 if (val & BNX2_NVM_COMMAND_DONE)
3735 break;
3738 if (j >= NVRAM_TIMEOUT_COUNT)
3739 return -EBUSY;
3741 return 0;
3744 static int
3745 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3747 u32 cmd;
3748 int j;
3750 /* Build the command word. */
3751 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3753 /* Calculate an offset of a buffered flash, not needed for 5709. */
3754 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3755 offset = ((offset / bp->flash_info->page_size) <<
3756 bp->flash_info->page_bits) +
3757 (offset % bp->flash_info->page_size);
3760 /* Need to clear DONE bit separately. */
3761 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3763 /* Address of the NVRAM to read from. */
3764 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3766 /* Issue a read command. */
3767 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3769 /* Wait for completion. */
3770 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3771 u32 val;
3773 udelay(5);
3775 val = REG_RD(bp, BNX2_NVM_COMMAND);
3776 if (val & BNX2_NVM_COMMAND_DONE) {
3777 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3778 memcpy(ret_val, &v, 4);
3779 break;
3782 if (j >= NVRAM_TIMEOUT_COUNT)
3783 return -EBUSY;
3785 return 0;
3789 static int
3790 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3792 u32 cmd;
3793 __be32 val32;
3794 int j;
3796 /* Build the command word. */
3797 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3799 /* Calculate an offset of a buffered flash, not needed for 5709. */
3800 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3801 offset = ((offset / bp->flash_info->page_size) <<
3802 bp->flash_info->page_bits) +
3803 (offset % bp->flash_info->page_size);
3806 /* Need to clear DONE bit separately. */
3807 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3809 memcpy(&val32, val, 4);
3811 /* Write the data. */
3812 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3814 /* Address of the NVRAM to write to. */
3815 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3817 /* Issue the write command. */
3818 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3820 /* Wait for completion. */
3821 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3822 udelay(5);
3824 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3825 break;
3827 if (j >= NVRAM_TIMEOUT_COUNT)
3828 return -EBUSY;
3830 return 0;
3833 static int
3834 bnx2_init_nvram(struct bnx2 *bp)
3836 u32 val;
3837 int j, entry_count, rc = 0;
3838 struct flash_spec *flash;
3840 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3841 bp->flash_info = &flash_5709;
3842 goto get_flash_size;
3845 /* Determine the selected interface. */
3846 val = REG_RD(bp, BNX2_NVM_CFG1);
3848 entry_count = ARRAY_SIZE(flash_table);
3850 if (val & 0x40000000) {
3852 /* Flash interface has been reconfigured */
3853 for (j = 0, flash = &flash_table[0]; j < entry_count;
3854 j++, flash++) {
3855 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3856 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3857 bp->flash_info = flash;
3858 break;
3862 else {
3863 u32 mask;
3864 /* Not yet been reconfigured */
3866 if (val & (1 << 23))
3867 mask = FLASH_BACKUP_STRAP_MASK;
3868 else
3869 mask = FLASH_STRAP_MASK;
3871 for (j = 0, flash = &flash_table[0]; j < entry_count;
3872 j++, flash++) {
3874 if ((val & mask) == (flash->strapping & mask)) {
3875 bp->flash_info = flash;
3877 /* Request access to the flash interface. */
3878 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3879 return rc;
3881 /* Enable access to flash interface */
3882 bnx2_enable_nvram_access(bp);
3884 /* Reconfigure the flash interface */
3885 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3886 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3887 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3888 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3890 /* Disable access to flash interface */
3891 bnx2_disable_nvram_access(bp);
3892 bnx2_release_nvram_lock(bp);
3894 break;
3897 } /* if (val & 0x40000000) */
3899 if (j == entry_count) {
3900 bp->flash_info = NULL;
3901 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3902 return -ENODEV;
3905 get_flash_size:
3906 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
3907 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3908 if (val)
3909 bp->flash_size = val;
3910 else
3911 bp->flash_size = bp->flash_info->total_size;
3913 return rc;
3916 static int
3917 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3918 int buf_size)
3920 int rc = 0;
3921 u32 cmd_flags, offset32, len32, extra;
3923 if (buf_size == 0)
3924 return 0;
3926 /* Request access to the flash interface. */
3927 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3928 return rc;
3930 /* Enable access to flash interface */
3931 bnx2_enable_nvram_access(bp);
3933 len32 = buf_size;
3934 offset32 = offset;
3935 extra = 0;
3937 cmd_flags = 0;
3939 if (offset32 & 3) {
3940 u8 buf[4];
3941 u32 pre_len;
3943 offset32 &= ~3;
3944 pre_len = 4 - (offset & 3);
3946 if (pre_len >= len32) {
3947 pre_len = len32;
3948 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3949 BNX2_NVM_COMMAND_LAST;
3951 else {
3952 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3955 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3957 if (rc)
3958 return rc;
3960 memcpy(ret_buf, buf + (offset & 3), pre_len);
3962 offset32 += 4;
3963 ret_buf += pre_len;
3964 len32 -= pre_len;
3966 if (len32 & 3) {
3967 extra = 4 - (len32 & 3);
3968 len32 = (len32 + 4) & ~3;
3971 if (len32 == 4) {
3972 u8 buf[4];
3974 if (cmd_flags)
3975 cmd_flags = BNX2_NVM_COMMAND_LAST;
3976 else
3977 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3978 BNX2_NVM_COMMAND_LAST;
3980 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3982 memcpy(ret_buf, buf, 4 - extra);
3984 else if (len32 > 0) {
3985 u8 buf[4];
3987 /* Read the first word. */
3988 if (cmd_flags)
3989 cmd_flags = 0;
3990 else
3991 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3993 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3995 /* Advance to the next dword. */
3996 offset32 += 4;
3997 ret_buf += 4;
3998 len32 -= 4;
4000 while (len32 > 4 && rc == 0) {
4001 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4003 /* Advance to the next dword. */
4004 offset32 += 4;
4005 ret_buf += 4;
4006 len32 -= 4;
4009 if (rc)
4010 return rc;
4012 cmd_flags = BNX2_NVM_COMMAND_LAST;
4013 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4015 memcpy(ret_buf, buf, 4 - extra);
4018 /* Disable access to flash interface */
4019 bnx2_disable_nvram_access(bp);
4021 bnx2_release_nvram_lock(bp);
4023 return rc;
4026 static int
4027 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4028 int buf_size)
4030 u32 written, offset32, len32;
4031 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4032 int rc = 0;
4033 int align_start, align_end;
4035 buf = data_buf;
4036 offset32 = offset;
4037 len32 = buf_size;
4038 align_start = align_end = 0;
4040 if ((align_start = (offset32 & 3))) {
4041 offset32 &= ~3;
4042 len32 += align_start;
4043 if (len32 < 4)
4044 len32 = 4;
4045 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4046 return rc;
4049 if (len32 & 3) {
4050 align_end = 4 - (len32 & 3);
4051 len32 += align_end;
4052 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4053 return rc;
4056 if (align_start || align_end) {
4057 align_buf = kmalloc(len32, GFP_KERNEL);
4058 if (align_buf == NULL)
4059 return -ENOMEM;
4060 if (align_start) {
4061 memcpy(align_buf, start, 4);
4063 if (align_end) {
4064 memcpy(align_buf + len32 - 4, end, 4);
4066 memcpy(align_buf + align_start, data_buf, buf_size);
4067 buf = align_buf;
4070 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4071 flash_buffer = kmalloc(264, GFP_KERNEL);
4072 if (flash_buffer == NULL) {
4073 rc = -ENOMEM;
4074 goto nvram_write_end;
4078 written = 0;
4079 while ((written < len32) && (rc == 0)) {
4080 u32 page_start, page_end, data_start, data_end;
4081 u32 addr, cmd_flags;
4082 int i;
4084 /* Find the page_start addr */
4085 page_start = offset32 + written;
4086 page_start -= (page_start % bp->flash_info->page_size);
4087 /* Find the page_end addr */
4088 page_end = page_start + bp->flash_info->page_size;
4089 /* Find the data_start addr */
4090 data_start = (written == 0) ? offset32 : page_start;
4091 /* Find the data_end addr */
4092 data_end = (page_end > offset32 + len32) ?
4093 (offset32 + len32) : page_end;
4095 /* Request access to the flash interface. */
4096 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4097 goto nvram_write_end;
4099 /* Enable access to flash interface */
4100 bnx2_enable_nvram_access(bp);
4102 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4103 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4104 int j;
4106 /* Read the whole page into the buffer
4107 * (non-buffer flash only) */
4108 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4109 if (j == (bp->flash_info->page_size - 4)) {
4110 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4112 rc = bnx2_nvram_read_dword(bp,
4113 page_start + j,
4114 &flash_buffer[j],
4115 cmd_flags);
4117 if (rc)
4118 goto nvram_write_end;
4120 cmd_flags = 0;
4124 /* Enable writes to flash interface (unlock write-protect) */
4125 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4126 goto nvram_write_end;
4128 /* Loop to write back the buffer data from page_start to
4129 * data_start */
4130 i = 0;
4131 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4132 /* Erase the page */
4133 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4134 goto nvram_write_end;
4136 /* Re-enable the write again for the actual write */
4137 bnx2_enable_nvram_write(bp);
4139 for (addr = page_start; addr < data_start;
4140 addr += 4, i += 4) {
4142 rc = bnx2_nvram_write_dword(bp, addr,
4143 &flash_buffer[i], cmd_flags);
4145 if (rc != 0)
4146 goto nvram_write_end;
4148 cmd_flags = 0;
4152 /* Loop to write the new data from data_start to data_end */
4153 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4154 if ((addr == page_end - 4) ||
4155 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4156 (addr == data_end - 4))) {
4158 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4160 rc = bnx2_nvram_write_dword(bp, addr, buf,
4161 cmd_flags);
4163 if (rc != 0)
4164 goto nvram_write_end;
4166 cmd_flags = 0;
4167 buf += 4;
4170 /* Loop to write back the buffer data from data_end
4171 * to page_end */
4172 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4173 for (addr = data_end; addr < page_end;
4174 addr += 4, i += 4) {
4176 if (addr == page_end-4) {
4177 cmd_flags = BNX2_NVM_COMMAND_LAST;
4179 rc = bnx2_nvram_write_dword(bp, addr,
4180 &flash_buffer[i], cmd_flags);
4182 if (rc != 0)
4183 goto nvram_write_end;
4185 cmd_flags = 0;
4189 /* Disable writes to flash interface (lock write-protect) */
4190 bnx2_disable_nvram_write(bp);
4192 /* Disable access to flash interface */
4193 bnx2_disable_nvram_access(bp);
4194 bnx2_release_nvram_lock(bp);
4196 /* Increment written */
4197 written += data_end - data_start;
4200 nvram_write_end:
4201 kfree(flash_buffer);
4202 kfree(align_buf);
4203 return rc;
4206 static void
4207 bnx2_init_remote_phy(struct bnx2 *bp)
4209 u32 val;
4211 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4212 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES))
4213 return;
4215 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4216 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4217 return;
4219 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4220 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4222 val = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4223 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4224 bp->phy_port = PORT_FIBRE;
4225 else
4226 bp->phy_port = PORT_TP;
4228 if (netif_running(bp->dev)) {
4229 u32 sig;
4231 if (val & BNX2_LINK_STATUS_LINK_UP) {
4232 bp->link_up = 1;
4233 netif_carrier_on(bp->dev);
4234 } else {
4235 bp->link_up = 0;
4236 netif_carrier_off(bp->dev);
4238 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4239 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4240 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4245 static void
4246 bnx2_setup_msix_tbl(struct bnx2 *bp)
4248 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4250 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4251 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4254 static int
4255 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4257 u32 val;
4258 int i, rc = 0;
4259 u8 old_port;
4261 /* Wait for the current PCI transaction to complete before
4262 * issuing a reset. */
4263 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4264 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4265 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4266 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4267 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4268 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4269 udelay(5);
4271 /* Wait for the firmware to tell us it is ok to issue a reset. */
4272 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4274 /* Deposit a driver reset signature so the firmware knows that
4275 * this is a soft reset. */
4276 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4277 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4279 /* Do a dummy read to force the chip to complete all current transaction
4280 * before we issue a reset. */
4281 val = REG_RD(bp, BNX2_MISC_ID);
4283 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4284 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4285 REG_RD(bp, BNX2_MISC_COMMAND);
4286 udelay(5);
4288 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4289 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4291 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4293 } else {
4294 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4295 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4296 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4298 /* Chip reset. */
4299 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4301 /* Reading back any register after chip reset will hang the
4302 * bus on 5706 A0 and A1. The msleep below provides plenty
4303 * of margin for write posting.
4305 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4306 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4307 msleep(20);
4309 /* Reset takes approximate 30 usec */
4310 for (i = 0; i < 10; i++) {
4311 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4312 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4313 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4314 break;
4315 udelay(10);
4318 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4319 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4320 printk(KERN_ERR PFX "Chip reset did not complete\n");
4321 return -EBUSY;
4325 /* Make sure byte swapping is properly configured. */
4326 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4327 if (val != 0x01020304) {
4328 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4329 return -ENODEV;
4332 /* Wait for the firmware to finish its initialization. */
4333 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4334 if (rc)
4335 return rc;
4337 spin_lock_bh(&bp->phy_lock);
4338 old_port = bp->phy_port;
4339 bnx2_init_remote_phy(bp);
4340 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4341 old_port != bp->phy_port)
4342 bnx2_set_default_remote_link(bp);
4343 spin_unlock_bh(&bp->phy_lock);
4345 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4346 /* Adjust the voltage regular to two steps lower. The default
4347 * of this register is 0x0000000e. */
4348 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4350 /* Remove bad rbuf memory from the free pool. */
4351 rc = bnx2_alloc_bad_rbuf(bp);
4354 if (bp->flags & BNX2_FLAG_USING_MSIX)
4355 bnx2_setup_msix_tbl(bp);
4357 return rc;
4360 static int
4361 bnx2_init_chip(struct bnx2 *bp)
4363 u32 val;
4364 int rc, i;
4366 /* Make sure the interrupt is not active. */
4367 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4369 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4370 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4371 #ifdef __BIG_ENDIAN
4372 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4373 #endif
4374 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4375 DMA_READ_CHANS << 12 |
4376 DMA_WRITE_CHANS << 16;
4378 val |= (0x2 << 20) | (1 << 11);
4380 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4381 val |= (1 << 23);
4383 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4384 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4385 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4387 REG_WR(bp, BNX2_DMA_CONFIG, val);
4389 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4390 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4391 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4392 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4395 if (bp->flags & BNX2_FLAG_PCIX) {
4396 u16 val16;
4398 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4399 &val16);
4400 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4401 val16 & ~PCI_X_CMD_ERO);
4404 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4405 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4406 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4407 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4409 /* Initialize context mapping and zero out the quick contexts. The
4410 * context block must have already been enabled. */
4411 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4412 rc = bnx2_init_5709_context(bp);
4413 if (rc)
4414 return rc;
4415 } else
4416 bnx2_init_context(bp);
4418 if ((rc = bnx2_init_cpus(bp)) != 0)
4419 return rc;
4421 bnx2_init_nvram(bp);
4423 bnx2_set_mac_addr(bp);
4425 val = REG_RD(bp, BNX2_MQ_CONFIG);
4426 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4427 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4428 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4429 val |= BNX2_MQ_CONFIG_HALT_DIS;
4431 REG_WR(bp, BNX2_MQ_CONFIG, val);
4433 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4434 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4435 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4437 val = (BCM_PAGE_BITS - 8) << 24;
4438 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4440 /* Configure page size. */
4441 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4442 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4443 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4444 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4446 val = bp->mac_addr[0] +
4447 (bp->mac_addr[1] << 8) +
4448 (bp->mac_addr[2] << 16) +
4449 bp->mac_addr[3] +
4450 (bp->mac_addr[4] << 8) +
4451 (bp->mac_addr[5] << 16);
4452 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4454 /* Program the MTU. Also include 4 bytes for CRC32. */
4455 val = bp->dev->mtu + ETH_HLEN + 4;
4456 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4457 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4458 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4460 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4461 bp->bnx2_napi[i].last_status_idx = 0;
4463 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4465 /* Set up how to generate a link change interrupt. */
4466 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4468 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4469 (u64) bp->status_blk_mapping & 0xffffffff);
4470 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4472 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4473 (u64) bp->stats_blk_mapping & 0xffffffff);
4474 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4475 (u64) bp->stats_blk_mapping >> 32);
4477 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4478 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4480 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4481 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4483 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4484 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4486 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4488 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4490 REG_WR(bp, BNX2_HC_COM_TICKS,
4491 (bp->com_ticks_int << 16) | bp->com_ticks);
4493 REG_WR(bp, BNX2_HC_CMD_TICKS,
4494 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4496 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4497 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4498 else
4499 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4500 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4502 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4503 val = BNX2_HC_CONFIG_COLLECT_STATS;
4504 else {
4505 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4506 BNX2_HC_CONFIG_COLLECT_STATS;
4509 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4510 u32 base = ((BNX2_TX_VEC - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4511 BNX2_HC_SB_CONFIG_1;
4513 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4514 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4516 REG_WR(bp, base,
4517 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4518 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4520 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4521 (bp->tx_quick_cons_trip_int << 16) |
4522 bp->tx_quick_cons_trip);
4524 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4525 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4527 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4530 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4531 val |= BNX2_HC_CONFIG_ONE_SHOT;
4533 REG_WR(bp, BNX2_HC_CONFIG, val);
4535 /* Clear internal stats counters. */
4536 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4538 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4540 /* Initialize the receive filter. */
4541 bnx2_set_rx_mode(bp->dev);
4543 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4544 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4545 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4546 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4548 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4551 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4552 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4554 udelay(20);
4556 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4558 return rc;
4561 static void
4562 bnx2_clear_ring_states(struct bnx2 *bp)
4564 struct bnx2_napi *bnapi;
4565 int i;
4567 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4568 bnapi = &bp->bnx2_napi[i];
4570 bnapi->tx_cons = 0;
4571 bnapi->hw_tx_cons = 0;
4572 bnapi->rx_prod_bseq = 0;
4573 bnapi->rx_prod = 0;
4574 bnapi->rx_cons = 0;
4575 bnapi->rx_pg_prod = 0;
4576 bnapi->rx_pg_cons = 0;
4580 static void
4581 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4583 u32 val, offset0, offset1, offset2, offset3;
4584 u32 cid_addr = GET_CID_ADDR(cid);
4586 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4587 offset0 = BNX2_L2CTX_TYPE_XI;
4588 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4589 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4590 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4591 } else {
4592 offset0 = BNX2_L2CTX_TYPE;
4593 offset1 = BNX2_L2CTX_CMD_TYPE;
4594 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4595 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4597 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4598 bnx2_ctx_wr(bp, cid_addr, offset0, val);
4600 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4601 bnx2_ctx_wr(bp, cid_addr, offset1, val);
4603 val = (u64) bp->tx_desc_mapping >> 32;
4604 bnx2_ctx_wr(bp, cid_addr, offset2, val);
4606 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4607 bnx2_ctx_wr(bp, cid_addr, offset3, val);
4610 static void
4611 bnx2_init_tx_ring(struct bnx2 *bp)
4613 struct tx_bd *txbd;
4614 u32 cid = TX_CID;
4615 struct bnx2_napi *bnapi;
4617 bp->tx_vec = 0;
4618 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4619 cid = TX_TSS_CID;
4620 bp->tx_vec = BNX2_TX_VEC;
4621 REG_WR(bp, BNX2_TSCH_TSS_CFG, BNX2_TX_INT_NUM |
4622 (TX_TSS_CID << 7));
4624 bnapi = &bp->bnx2_napi[bp->tx_vec];
4626 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4628 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4630 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4631 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4633 bp->tx_prod = 0;
4634 bp->tx_prod_bseq = 0;
4636 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4637 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4639 bnx2_init_tx_context(bp, cid);
4642 static void
4643 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4644 int num_rings)
4646 int i;
4647 struct rx_bd *rxbd;
4649 for (i = 0; i < num_rings; i++) {
4650 int j;
4652 rxbd = &rx_ring[i][0];
4653 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4654 rxbd->rx_bd_len = buf_size;
4655 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4657 if (i == (num_rings - 1))
4658 j = 0;
4659 else
4660 j = i + 1;
4661 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4662 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4666 static void
4667 bnx2_init_rx_ring(struct bnx2 *bp)
4669 int i;
4670 u16 prod, ring_prod;
4671 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4672 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
4674 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4675 bp->rx_buf_use_size, bp->rx_max_ring);
4677 bnx2_init_rx_context0(bp);
4679 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4680 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4681 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4684 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4685 if (bp->rx_pg_ring_size) {
4686 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4687 bp->rx_pg_desc_mapping,
4688 PAGE_SIZE, bp->rx_max_pg_ring);
4689 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4690 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4691 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4692 BNX2_L2CTX_RBDC_JUMBO_KEY);
4694 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4695 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4697 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4698 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4700 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4701 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4704 val = (u64) bp->rx_desc_mapping[0] >> 32;
4705 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4707 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4708 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4710 ring_prod = prod = bnapi->rx_pg_prod;
4711 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4712 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4713 break;
4714 prod = NEXT_RX_BD(prod);
4715 ring_prod = RX_PG_RING_IDX(prod);
4717 bnapi->rx_pg_prod = prod;
4719 ring_prod = prod = bnapi->rx_prod;
4720 for (i = 0; i < bp->rx_ring_size; i++) {
4721 if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) {
4722 break;
4724 prod = NEXT_RX_BD(prod);
4725 ring_prod = RX_RING_IDX(prod);
4727 bnapi->rx_prod = prod;
4729 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
4730 bnapi->rx_pg_prod);
4731 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4733 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
4736 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4738 u32 max, num_rings = 1;
4740 while (ring_size > MAX_RX_DESC_CNT) {
4741 ring_size -= MAX_RX_DESC_CNT;
4742 num_rings++;
4744 /* round to next power of 2 */
4745 max = max_size;
4746 while ((max & num_rings) == 0)
4747 max >>= 1;
4749 if (num_rings != max)
4750 max <<= 1;
4752 return max;
4755 static void
4756 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4758 u32 rx_size, rx_space, jumbo_size;
4760 /* 8 for CRC and VLAN */
4761 rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4763 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4764 sizeof(struct skb_shared_info);
4766 bp->rx_copy_thresh = RX_COPY_THRESH;
4767 bp->rx_pg_ring_size = 0;
4768 bp->rx_max_pg_ring = 0;
4769 bp->rx_max_pg_ring_idx = 0;
4770 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4771 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4773 jumbo_size = size * pages;
4774 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4775 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4777 bp->rx_pg_ring_size = jumbo_size;
4778 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4779 MAX_RX_PG_RINGS);
4780 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4781 rx_size = RX_COPY_THRESH + bp->rx_offset;
4782 bp->rx_copy_thresh = 0;
4785 bp->rx_buf_use_size = rx_size;
4786 /* hw alignment */
4787 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4788 bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
4789 bp->rx_ring_size = size;
4790 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4791 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4794 static void
4795 bnx2_free_tx_skbs(struct bnx2 *bp)
4797 int i;
4799 if (bp->tx_buf_ring == NULL)
4800 return;
4802 for (i = 0; i < TX_DESC_CNT; ) {
4803 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4804 struct sk_buff *skb = tx_buf->skb;
4805 int j, last;
4807 if (skb == NULL) {
4808 i++;
4809 continue;
4812 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4813 skb_headlen(skb), PCI_DMA_TODEVICE);
4815 tx_buf->skb = NULL;
4817 last = skb_shinfo(skb)->nr_frags;
4818 for (j = 0; j < last; j++) {
4819 tx_buf = &bp->tx_buf_ring[i + j + 1];
4820 pci_unmap_page(bp->pdev,
4821 pci_unmap_addr(tx_buf, mapping),
4822 skb_shinfo(skb)->frags[j].size,
4823 PCI_DMA_TODEVICE);
4825 dev_kfree_skb(skb);
4826 i += j + 1;
4831 static void
4832 bnx2_free_rx_skbs(struct bnx2 *bp)
4834 int i;
4836 if (bp->rx_buf_ring == NULL)
4837 return;
4839 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4840 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4841 struct sk_buff *skb = rx_buf->skb;
4843 if (skb == NULL)
4844 continue;
4846 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4847 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4849 rx_buf->skb = NULL;
4851 dev_kfree_skb(skb);
4853 for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4854 bnx2_free_rx_page(bp, i);
4857 static void
4858 bnx2_free_skbs(struct bnx2 *bp)
4860 bnx2_free_tx_skbs(bp);
4861 bnx2_free_rx_skbs(bp);
4864 static int
4865 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4867 int rc;
4869 rc = bnx2_reset_chip(bp, reset_code);
4870 bnx2_free_skbs(bp);
4871 if (rc)
4872 return rc;
4874 if ((rc = bnx2_init_chip(bp)) != 0)
4875 return rc;
4877 bnx2_clear_ring_states(bp);
4878 bnx2_init_tx_ring(bp);
4879 bnx2_init_rx_ring(bp);
4880 return 0;
4883 static int
4884 bnx2_init_nic(struct bnx2 *bp)
4886 int rc;
4888 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4889 return rc;
4891 spin_lock_bh(&bp->phy_lock);
4892 bnx2_init_phy(bp);
4893 bnx2_set_link(bp);
4894 spin_unlock_bh(&bp->phy_lock);
4895 return 0;
4898 static int
4899 bnx2_test_registers(struct bnx2 *bp)
4901 int ret;
4902 int i, is_5709;
4903 static const struct {
4904 u16 offset;
4905 u16 flags;
4906 #define BNX2_FL_NOT_5709 1
4907 u32 rw_mask;
4908 u32 ro_mask;
4909 } reg_tbl[] = {
4910 { 0x006c, 0, 0x00000000, 0x0000003f },
4911 { 0x0090, 0, 0xffffffff, 0x00000000 },
4912 { 0x0094, 0, 0x00000000, 0x00000000 },
4914 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4915 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4916 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4917 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4918 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4919 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4920 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4921 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4922 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4924 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4925 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4926 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4927 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4928 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4929 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4931 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4932 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4933 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4935 { 0x1000, 0, 0x00000000, 0x00000001 },
4936 { 0x1004, 0, 0x00000000, 0x000f0001 },
4938 { 0x1408, 0, 0x01c00800, 0x00000000 },
4939 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4940 { 0x14a8, 0, 0x00000000, 0x000001ff },
4941 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4942 { 0x14b0, 0, 0x00000002, 0x00000001 },
4943 { 0x14b8, 0, 0x00000000, 0x00000000 },
4944 { 0x14c0, 0, 0x00000000, 0x00000009 },
4945 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4946 { 0x14cc, 0, 0x00000000, 0x00000001 },
4947 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4949 { 0x1800, 0, 0x00000000, 0x00000001 },
4950 { 0x1804, 0, 0x00000000, 0x00000003 },
4952 { 0x2800, 0, 0x00000000, 0x00000001 },
4953 { 0x2804, 0, 0x00000000, 0x00003f01 },
4954 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4955 { 0x2810, 0, 0xffff0000, 0x00000000 },
4956 { 0x2814, 0, 0xffff0000, 0x00000000 },
4957 { 0x2818, 0, 0xffff0000, 0x00000000 },
4958 { 0x281c, 0, 0xffff0000, 0x00000000 },
4959 { 0x2834, 0, 0xffffffff, 0x00000000 },
4960 { 0x2840, 0, 0x00000000, 0xffffffff },
4961 { 0x2844, 0, 0x00000000, 0xffffffff },
4962 { 0x2848, 0, 0xffffffff, 0x00000000 },
4963 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4965 { 0x2c00, 0, 0x00000000, 0x00000011 },
4966 { 0x2c04, 0, 0x00000000, 0x00030007 },
4968 { 0x3c00, 0, 0x00000000, 0x00000001 },
4969 { 0x3c04, 0, 0x00000000, 0x00070000 },
4970 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4971 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4972 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4973 { 0x3c14, 0, 0x00000000, 0xffffffff },
4974 { 0x3c18, 0, 0x00000000, 0xffffffff },
4975 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4976 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4978 { 0x5004, 0, 0x00000000, 0x0000007f },
4979 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4981 { 0x5c00, 0, 0x00000000, 0x00000001 },
4982 { 0x5c04, 0, 0x00000000, 0x0003000f },
4983 { 0x5c08, 0, 0x00000003, 0x00000000 },
4984 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4985 { 0x5c10, 0, 0x00000000, 0xffffffff },
4986 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4987 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4988 { 0x5c88, 0, 0x00000000, 0x00077373 },
4989 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4991 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4992 { 0x680c, 0, 0xffffffff, 0x00000000 },
4993 { 0x6810, 0, 0xffffffff, 0x00000000 },
4994 { 0x6814, 0, 0xffffffff, 0x00000000 },
4995 { 0x6818, 0, 0xffffffff, 0x00000000 },
4996 { 0x681c, 0, 0xffffffff, 0x00000000 },
4997 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4998 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4999 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5000 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5001 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5002 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5003 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5004 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5005 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5006 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5007 { 0x684c, 0, 0xffffffff, 0x00000000 },
5008 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5009 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5010 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5011 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5012 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5013 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5015 { 0xffff, 0, 0x00000000, 0x00000000 },
5018 ret = 0;
5019 is_5709 = 0;
5020 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5021 is_5709 = 1;
5023 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5024 u32 offset, rw_mask, ro_mask, save_val, val;
5025 u16 flags = reg_tbl[i].flags;
5027 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5028 continue;
5030 offset = (u32) reg_tbl[i].offset;
5031 rw_mask = reg_tbl[i].rw_mask;
5032 ro_mask = reg_tbl[i].ro_mask;
5034 save_val = readl(bp->regview + offset);
5036 writel(0, bp->regview + offset);
5038 val = readl(bp->regview + offset);
5039 if ((val & rw_mask) != 0) {
5040 goto reg_test_err;
5043 if ((val & ro_mask) != (save_val & ro_mask)) {
5044 goto reg_test_err;
5047 writel(0xffffffff, bp->regview + offset);
5049 val = readl(bp->regview + offset);
5050 if ((val & rw_mask) != rw_mask) {
5051 goto reg_test_err;
5054 if ((val & ro_mask) != (save_val & ro_mask)) {
5055 goto reg_test_err;
5058 writel(save_val, bp->regview + offset);
5059 continue;
5061 reg_test_err:
5062 writel(save_val, bp->regview + offset);
5063 ret = -ENODEV;
5064 break;
5066 return ret;
5069 static int
5070 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5072 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5073 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5074 int i;
5076 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5077 u32 offset;
5079 for (offset = 0; offset < size; offset += 4) {
5081 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5083 if (bnx2_reg_rd_ind(bp, start + offset) !=
5084 test_pattern[i]) {
5085 return -ENODEV;
5089 return 0;
5092 static int
5093 bnx2_test_memory(struct bnx2 *bp)
5095 int ret = 0;
5096 int i;
5097 static struct mem_entry {
5098 u32 offset;
5099 u32 len;
5100 } mem_tbl_5706[] = {
5101 { 0x60000, 0x4000 },
5102 { 0xa0000, 0x3000 },
5103 { 0xe0000, 0x4000 },
5104 { 0x120000, 0x4000 },
5105 { 0x1a0000, 0x4000 },
5106 { 0x160000, 0x4000 },
5107 { 0xffffffff, 0 },
5109 mem_tbl_5709[] = {
5110 { 0x60000, 0x4000 },
5111 { 0xa0000, 0x3000 },
5112 { 0xe0000, 0x4000 },
5113 { 0x120000, 0x4000 },
5114 { 0x1a0000, 0x4000 },
5115 { 0xffffffff, 0 },
5117 struct mem_entry *mem_tbl;
5119 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5120 mem_tbl = mem_tbl_5709;
5121 else
5122 mem_tbl = mem_tbl_5706;
5124 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5125 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5126 mem_tbl[i].len)) != 0) {
5127 return ret;
5131 return ret;
5134 #define BNX2_MAC_LOOPBACK 0
5135 #define BNX2_PHY_LOOPBACK 1
5137 static int
5138 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5140 unsigned int pkt_size, num_pkts, i;
5141 struct sk_buff *skb, *rx_skb;
5142 unsigned char *packet;
5143 u16 rx_start_idx, rx_idx;
5144 dma_addr_t map;
5145 struct tx_bd *txbd;
5146 struct sw_bd *rx_buf;
5147 struct l2_fhdr *rx_hdr;
5148 int ret = -ENODEV;
5149 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5151 tx_napi = bnapi;
5152 if (bp->flags & BNX2_FLAG_USING_MSIX)
5153 tx_napi = &bp->bnx2_napi[BNX2_TX_VEC];
5155 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5156 bp->loopback = MAC_LOOPBACK;
5157 bnx2_set_mac_loopback(bp);
5159 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5160 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5161 return 0;
5163 bp->loopback = PHY_LOOPBACK;
5164 bnx2_set_phy_loopback(bp);
5166 else
5167 return -EINVAL;
5169 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5170 skb = netdev_alloc_skb(bp->dev, pkt_size);
5171 if (!skb)
5172 return -ENOMEM;
5173 packet = skb_put(skb, pkt_size);
5174 memcpy(packet, bp->dev->dev_addr, 6);
5175 memset(packet + 6, 0x0, 8);
5176 for (i = 14; i < pkt_size; i++)
5177 packet[i] = (unsigned char) (i & 0xff);
5179 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5180 PCI_DMA_TODEVICE);
5182 REG_WR(bp, BNX2_HC_COMMAND,
5183 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5185 REG_RD(bp, BNX2_HC_COMMAND);
5187 udelay(5);
5188 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5190 num_pkts = 0;
5192 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
5194 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5195 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5196 txbd->tx_bd_mss_nbytes = pkt_size;
5197 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5199 num_pkts++;
5200 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
5201 bp->tx_prod_bseq += pkt_size;
5203 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
5204 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5206 udelay(100);
5208 REG_WR(bp, BNX2_HC_COMMAND,
5209 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5211 REG_RD(bp, BNX2_HC_COMMAND);
5213 udelay(5);
5215 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5216 dev_kfree_skb(skb);
5218 if (bnx2_get_hw_tx_cons(tx_napi) != bp->tx_prod)
5219 goto loopback_test_done;
5221 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5222 if (rx_idx != rx_start_idx + num_pkts) {
5223 goto loopback_test_done;
5226 rx_buf = &bp->rx_buf_ring[rx_start_idx];
5227 rx_skb = rx_buf->skb;
5229 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5230 skb_reserve(rx_skb, bp->rx_offset);
5232 pci_dma_sync_single_for_cpu(bp->pdev,
5233 pci_unmap_addr(rx_buf, mapping),
5234 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5236 if (rx_hdr->l2_fhdr_status &
5237 (L2_FHDR_ERRORS_BAD_CRC |
5238 L2_FHDR_ERRORS_PHY_DECODE |
5239 L2_FHDR_ERRORS_ALIGNMENT |
5240 L2_FHDR_ERRORS_TOO_SHORT |
5241 L2_FHDR_ERRORS_GIANT_FRAME)) {
5243 goto loopback_test_done;
5246 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5247 goto loopback_test_done;
5250 for (i = 14; i < pkt_size; i++) {
5251 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5252 goto loopback_test_done;
5256 ret = 0;
5258 loopback_test_done:
5259 bp->loopback = 0;
5260 return ret;
5263 #define BNX2_MAC_LOOPBACK_FAILED 1
5264 #define BNX2_PHY_LOOPBACK_FAILED 2
5265 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5266 BNX2_PHY_LOOPBACK_FAILED)
5268 static int
5269 bnx2_test_loopback(struct bnx2 *bp)
5271 int rc = 0;
5273 if (!netif_running(bp->dev))
5274 return BNX2_LOOPBACK_FAILED;
5276 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5277 spin_lock_bh(&bp->phy_lock);
5278 bnx2_init_phy(bp);
5279 spin_unlock_bh(&bp->phy_lock);
5280 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5281 rc |= BNX2_MAC_LOOPBACK_FAILED;
5282 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5283 rc |= BNX2_PHY_LOOPBACK_FAILED;
5284 return rc;
5287 #define NVRAM_SIZE 0x200
5288 #define CRC32_RESIDUAL 0xdebb20e3
5290 static int
5291 bnx2_test_nvram(struct bnx2 *bp)
5293 __be32 buf[NVRAM_SIZE / 4];
5294 u8 *data = (u8 *) buf;
5295 int rc = 0;
5296 u32 magic, csum;
5298 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5299 goto test_nvram_done;
5301 magic = be32_to_cpu(buf[0]);
5302 if (magic != 0x669955aa) {
5303 rc = -ENODEV;
5304 goto test_nvram_done;
5307 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5308 goto test_nvram_done;
5310 csum = ether_crc_le(0x100, data);
5311 if (csum != CRC32_RESIDUAL) {
5312 rc = -ENODEV;
5313 goto test_nvram_done;
5316 csum = ether_crc_le(0x100, data + 0x100);
5317 if (csum != CRC32_RESIDUAL) {
5318 rc = -ENODEV;
5321 test_nvram_done:
5322 return rc;
5325 static int
5326 bnx2_test_link(struct bnx2 *bp)
5328 u32 bmsr;
5330 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5331 if (bp->link_up)
5332 return 0;
5333 return -ENODEV;
5335 spin_lock_bh(&bp->phy_lock);
5336 bnx2_enable_bmsr1(bp);
5337 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5338 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5339 bnx2_disable_bmsr1(bp);
5340 spin_unlock_bh(&bp->phy_lock);
5342 if (bmsr & BMSR_LSTATUS) {
5343 return 0;
5345 return -ENODEV;
5348 static int
5349 bnx2_test_intr(struct bnx2 *bp)
5351 int i;
5352 u16 status_idx;
5354 if (!netif_running(bp->dev))
5355 return -ENODEV;
5357 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5359 /* This register is not touched during run-time. */
5360 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5361 REG_RD(bp, BNX2_HC_COMMAND);
5363 for (i = 0; i < 10; i++) {
5364 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5365 status_idx) {
5367 break;
5370 msleep_interruptible(10);
5372 if (i < 10)
5373 return 0;
5375 return -ENODEV;
5378 <<<<<<< HEAD:drivers/net/bnx2.c
5379 =======
5380 /* Determining link for parallel detection. */
5381 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/net/bnx2.c
5382 static int
5383 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5385 u32 mode_ctl, an_dbg, exp;
5387 <<<<<<< HEAD:drivers/net/bnx2.c
5388 =======
5389 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5390 return 0;
5392 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/net/bnx2.c
5393 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5394 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5396 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5397 return 0;
5399 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5400 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5401 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5403 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5404 return 0;
5406 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5407 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5408 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5410 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5411 return 0;
5413 return 1;
5416 static void
5417 bnx2_5706_serdes_timer(struct bnx2 *bp)
5419 int check_link = 1;
5421 spin_lock(&bp->phy_lock);
5422 <<<<<<< HEAD:drivers/net/bnx2.c
5423 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
5424 bnx2_5706s_force_link_dn(bp, 0);
5425 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
5426 spin_unlock(&bp->phy_lock);
5427 return;
5430 =======
5431 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/net/bnx2.c
5432 if (bp->serdes_an_pending) {
5433 bp->serdes_an_pending--;
5434 check_link = 0;
5435 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5436 u32 bmcr;
5438 bp->current_interval = bp->timer_interval;
5440 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5442 if (bmcr & BMCR_ANENABLE) {
5443 if (bnx2_5706_serdes_has_link(bp)) {
5444 bmcr &= ~BMCR_ANENABLE;
5445 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5446 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5447 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5451 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5452 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5453 u32 phy2;
5455 <<<<<<< HEAD:drivers/net/bnx2.c
5456 check_link = 0;
5457 =======
5458 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/net/bnx2.c
5459 bnx2_write_phy(bp, 0x17, 0x0f01);
5460 bnx2_read_phy(bp, 0x15, &phy2);
5461 if (phy2 & 0x20) {
5462 u32 bmcr;
5464 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5465 bmcr |= BMCR_ANENABLE;
5466 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5468 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5470 } else
5471 bp->current_interval = bp->timer_interval;
5473 <<<<<<< HEAD:drivers/net/bnx2.c
5474 if (bp->link_up && (bp->autoneg & AUTONEG_SPEED) && check_link) {
5475 =======
5476 if (check_link) {
5477 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/net/bnx2.c
5478 u32 val;
5480 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5481 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5482 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5484 <<<<<<< HEAD:drivers/net/bnx2.c
5485 if (val & MISC_SHDW_AN_DBG_NOSYNC) {
5486 bnx2_5706s_force_link_dn(bp, 1);
5487 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5489 =======
5490 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5491 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5492 bnx2_5706s_force_link_dn(bp, 1);
5493 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5494 } else
5495 bnx2_set_link(bp);
5496 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5497 bnx2_set_link(bp);
5498 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/net/bnx2.c
5500 spin_unlock(&bp->phy_lock);
5503 static void
5504 bnx2_5708_serdes_timer(struct bnx2 *bp)
5506 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5507 return;
5509 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5510 bp->serdes_an_pending = 0;
5511 return;
5514 spin_lock(&bp->phy_lock);
5515 if (bp->serdes_an_pending)
5516 bp->serdes_an_pending--;
5517 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5518 u32 bmcr;
5520 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5521 if (bmcr & BMCR_ANENABLE) {
5522 bnx2_enable_forced_2g5(bp);
5523 bp->current_interval = SERDES_FORCED_TIMEOUT;
5524 } else {
5525 bnx2_disable_forced_2g5(bp);
5526 bp->serdes_an_pending = 2;
5527 bp->current_interval = bp->timer_interval;
5530 } else
5531 bp->current_interval = bp->timer_interval;
5533 spin_unlock(&bp->phy_lock);
5536 static void
5537 bnx2_timer(unsigned long data)
5539 struct bnx2 *bp = (struct bnx2 *) data;
5541 if (!netif_running(bp->dev))
5542 return;
5544 if (atomic_read(&bp->intr_sem) != 0)
5545 goto bnx2_restart_timer;
5547 bnx2_send_heart_beat(bp);
5549 bp->stats_blk->stat_FwRxDrop =
5550 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5552 /* workaround occasional corrupted counters */
5553 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5554 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5555 BNX2_HC_COMMAND_STATS_NOW);
5557 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5558 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5559 bnx2_5706_serdes_timer(bp);
5560 else
5561 bnx2_5708_serdes_timer(bp);
5564 bnx2_restart_timer:
5565 mod_timer(&bp->timer, jiffies + bp->current_interval);
5568 static int
5569 bnx2_request_irq(struct bnx2 *bp)
5571 struct net_device *dev = bp->dev;
5572 unsigned long flags;
5573 struct bnx2_irq *irq;
5574 int rc = 0, i;
5576 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5577 flags = 0;
5578 else
5579 flags = IRQF_SHARED;
5581 for (i = 0; i < bp->irq_nvecs; i++) {
5582 irq = &bp->irq_tbl[i];
5583 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5584 dev);
5585 if (rc)
5586 break;
5587 irq->requested = 1;
5589 return rc;
5592 static void
5593 bnx2_free_irq(struct bnx2 *bp)
5595 struct net_device *dev = bp->dev;
5596 struct bnx2_irq *irq;
5597 int i;
5599 for (i = 0; i < bp->irq_nvecs; i++) {
5600 irq = &bp->irq_tbl[i];
5601 if (irq->requested)
5602 free_irq(irq->vector, dev);
5603 irq->requested = 0;
5605 if (bp->flags & BNX2_FLAG_USING_MSI)
5606 pci_disable_msi(bp->pdev);
5607 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5608 pci_disable_msix(bp->pdev);
5610 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5613 static void
5614 bnx2_enable_msix(struct bnx2 *bp)
5616 int i, rc;
5617 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5619 bnx2_setup_msix_tbl(bp);
5620 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5621 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5622 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5624 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5625 msix_ent[i].entry = i;
5626 msix_ent[i].vector = 0;
5629 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5630 if (rc != 0)
5631 return;
5633 bp->irq_tbl[BNX2_BASE_VEC].handler = bnx2_msi_1shot;
5634 bp->irq_tbl[BNX2_TX_VEC].handler = bnx2_tx_msix;
5636 strcpy(bp->irq_tbl[BNX2_BASE_VEC].name, bp->dev->name);
5637 strcat(bp->irq_tbl[BNX2_BASE_VEC].name, "-base");
5638 strcpy(bp->irq_tbl[BNX2_TX_VEC].name, bp->dev->name);
5639 strcat(bp->irq_tbl[BNX2_TX_VEC].name, "-tx");
5641 bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
5642 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5643 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5644 bp->irq_tbl[i].vector = msix_ent[i].vector;
5647 static void
5648 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5650 bp->irq_tbl[0].handler = bnx2_interrupt;
5651 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5652 bp->irq_nvecs = 1;
5653 bp->irq_tbl[0].vector = bp->pdev->irq;
5655 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
5656 bnx2_enable_msix(bp);
5658 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5659 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5660 if (pci_enable_msi(bp->pdev) == 0) {
5661 bp->flags |= BNX2_FLAG_USING_MSI;
5662 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5663 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5664 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5665 } else
5666 bp->irq_tbl[0].handler = bnx2_msi;
5668 bp->irq_tbl[0].vector = bp->pdev->irq;
5673 /* Called with rtnl_lock */
5674 static int
5675 bnx2_open(struct net_device *dev)
5677 struct bnx2 *bp = netdev_priv(dev);
5678 int rc;
5680 netif_carrier_off(dev);
5682 bnx2_set_power_state(bp, PCI_D0);
5683 bnx2_disable_int(bp);
5685 rc = bnx2_alloc_mem(bp);
5686 if (rc)
5687 return rc;
5689 bnx2_setup_int_mode(bp, disable_msi);
5690 bnx2_napi_enable(bp);
5691 rc = bnx2_request_irq(bp);
5693 if (rc) {
5694 bnx2_napi_disable(bp);
5695 bnx2_free_mem(bp);
5696 return rc;
5699 rc = bnx2_init_nic(bp);
5701 if (rc) {
5702 bnx2_napi_disable(bp);
5703 bnx2_free_irq(bp);
5704 bnx2_free_skbs(bp);
5705 bnx2_free_mem(bp);
5706 return rc;
5709 mod_timer(&bp->timer, jiffies + bp->current_interval);
5711 atomic_set(&bp->intr_sem, 0);
5713 bnx2_enable_int(bp);
5715 if (bp->flags & BNX2_FLAG_USING_MSI) {
5716 /* Test MSI to make sure it is working
5717 * If MSI test fails, go back to INTx mode
5719 if (bnx2_test_intr(bp) != 0) {
5720 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5721 " using MSI, switching to INTx mode. Please"
5722 " report this failure to the PCI maintainer"
5723 " and include system chipset information.\n",
5724 bp->dev->name);
5726 bnx2_disable_int(bp);
5727 bnx2_free_irq(bp);
5729 bnx2_setup_int_mode(bp, 1);
5731 rc = bnx2_init_nic(bp);
5733 if (!rc)
5734 rc = bnx2_request_irq(bp);
5736 if (rc) {
5737 bnx2_napi_disable(bp);
5738 bnx2_free_skbs(bp);
5739 bnx2_free_mem(bp);
5740 del_timer_sync(&bp->timer);
5741 return rc;
5743 bnx2_enable_int(bp);
5746 if (bp->flags & BNX2_FLAG_USING_MSI)
5747 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5748 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5749 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5751 netif_start_queue(dev);
5753 return 0;
5756 static void
5757 bnx2_reset_task(struct work_struct *work)
5759 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5761 if (!netif_running(bp->dev))
5762 return;
5764 bp->in_reset_task = 1;
5765 bnx2_netif_stop(bp);
5767 bnx2_init_nic(bp);
5769 atomic_set(&bp->intr_sem, 1);
5770 bnx2_netif_start(bp);
5771 bp->in_reset_task = 0;
5774 static void
5775 bnx2_tx_timeout(struct net_device *dev)
5777 struct bnx2 *bp = netdev_priv(dev);
5779 /* This allows the netif to be shutdown gracefully before resetting */
5780 schedule_work(&bp->reset_task);
5783 #ifdef BCM_VLAN
5784 /* Called with rtnl_lock */
5785 static void
5786 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5788 struct bnx2 *bp = netdev_priv(dev);
5790 bnx2_netif_stop(bp);
5792 bp->vlgrp = vlgrp;
5793 bnx2_set_rx_mode(dev);
5795 bnx2_netif_start(bp);
5797 #endif
5799 /* Called with netif_tx_lock.
5800 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5801 * netif_wake_queue().
5803 static int
5804 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5806 struct bnx2 *bp = netdev_priv(dev);
5807 dma_addr_t mapping;
5808 struct tx_bd *txbd;
5809 struct sw_bd *tx_buf;
5810 u32 len, vlan_tag_flags, last_frag, mss;
5811 u16 prod, ring_prod;
5812 int i;
5813 struct bnx2_napi *bnapi = &bp->bnx2_napi[bp->tx_vec];
5815 if (unlikely(bnx2_tx_avail(bp, bnapi) <
5816 (skb_shinfo(skb)->nr_frags + 1))) {
5817 netif_stop_queue(dev);
5818 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5819 dev->name);
5821 return NETDEV_TX_BUSY;
5823 len = skb_headlen(skb);
5824 prod = bp->tx_prod;
5825 ring_prod = TX_RING_IDX(prod);
5827 vlan_tag_flags = 0;
5828 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5829 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5832 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5833 vlan_tag_flags |=
5834 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5836 if ((mss = skb_shinfo(skb)->gso_size)) {
5837 u32 tcp_opt_len, ip_tcp_len;
5838 struct iphdr *iph;
5840 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5842 tcp_opt_len = tcp_optlen(skb);
5844 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5845 u32 tcp_off = skb_transport_offset(skb) -
5846 sizeof(struct ipv6hdr) - ETH_HLEN;
5848 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5849 TX_BD_FLAGS_SW_FLAGS;
5850 if (likely(tcp_off == 0))
5851 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5852 else {
5853 tcp_off >>= 3;
5854 vlan_tag_flags |= ((tcp_off & 0x3) <<
5855 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5856 ((tcp_off & 0x10) <<
5857 TX_BD_FLAGS_TCP6_OFF4_SHL);
5858 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5860 } else {
5861 if (skb_header_cloned(skb) &&
5862 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5863 dev_kfree_skb(skb);
5864 return NETDEV_TX_OK;
5867 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5869 iph = ip_hdr(skb);
5870 iph->check = 0;
5871 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5872 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5873 iph->daddr, 0,
5874 IPPROTO_TCP,
5876 if (tcp_opt_len || (iph->ihl > 5)) {
5877 vlan_tag_flags |= ((iph->ihl - 5) +
5878 (tcp_opt_len >> 2)) << 8;
5881 } else
5882 mss = 0;
5884 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5886 tx_buf = &bp->tx_buf_ring[ring_prod];
5887 tx_buf->skb = skb;
5888 pci_unmap_addr_set(tx_buf, mapping, mapping);
5890 txbd = &bp->tx_desc_ring[ring_prod];
5892 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5893 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5894 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5895 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5897 last_frag = skb_shinfo(skb)->nr_frags;
5899 for (i = 0; i < last_frag; i++) {
5900 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5902 prod = NEXT_TX_BD(prod);
5903 ring_prod = TX_RING_IDX(prod);
5904 txbd = &bp->tx_desc_ring[ring_prod];
5906 len = frag->size;
5907 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5908 len, PCI_DMA_TODEVICE);
5909 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5910 mapping, mapping);
5912 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5913 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5914 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5915 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5918 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5920 prod = NEXT_TX_BD(prod);
5921 bp->tx_prod_bseq += skb->len;
5923 REG_WR16(bp, bp->tx_bidx_addr, prod);
5924 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5926 mmiowb();
5928 bp->tx_prod = prod;
5929 dev->trans_start = jiffies;
5931 if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
5932 netif_stop_queue(dev);
5933 if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
5934 netif_wake_queue(dev);
5937 return NETDEV_TX_OK;
5940 /* Called with rtnl_lock */
5941 static int
5942 bnx2_close(struct net_device *dev)
5944 struct bnx2 *bp = netdev_priv(dev);
5945 u32 reset_code;
5947 /* Calling flush_scheduled_work() may deadlock because
5948 * linkwatch_event() may be on the workqueue and it will try to get
5949 * the rtnl_lock which we are holding.
5951 while (bp->in_reset_task)
5952 msleep(1);
5954 bnx2_disable_int_sync(bp);
5955 bnx2_napi_disable(bp);
5956 del_timer_sync(&bp->timer);
5957 if (bp->flags & BNX2_FLAG_NO_WOL)
5958 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5959 else if (bp->wol)
5960 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5961 else
5962 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5963 bnx2_reset_chip(bp, reset_code);
5964 bnx2_free_irq(bp);
5965 bnx2_free_skbs(bp);
5966 bnx2_free_mem(bp);
5967 bp->link_up = 0;
5968 netif_carrier_off(bp->dev);
5969 bnx2_set_power_state(bp, PCI_D3hot);
5970 return 0;
5973 #define GET_NET_STATS64(ctr) \
5974 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5975 (unsigned long) (ctr##_lo)
5977 #define GET_NET_STATS32(ctr) \
5978 (ctr##_lo)
5980 #if (BITS_PER_LONG == 64)
5981 #define GET_NET_STATS GET_NET_STATS64
5982 #else
5983 #define GET_NET_STATS GET_NET_STATS32
5984 #endif
5986 static struct net_device_stats *
5987 bnx2_get_stats(struct net_device *dev)
5989 struct bnx2 *bp = netdev_priv(dev);
5990 struct statistics_block *stats_blk = bp->stats_blk;
5991 struct net_device_stats *net_stats = &bp->net_stats;
5993 if (bp->stats_blk == NULL) {
5994 return net_stats;
5996 net_stats->rx_packets =
5997 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5998 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5999 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6001 net_stats->tx_packets =
6002 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6003 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6004 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6006 net_stats->rx_bytes =
6007 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6009 net_stats->tx_bytes =
6010 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6012 net_stats->multicast =
6013 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6015 net_stats->collisions =
6016 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6018 net_stats->rx_length_errors =
6019 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6020 stats_blk->stat_EtherStatsOverrsizePkts);
6022 net_stats->rx_over_errors =
6023 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6025 net_stats->rx_frame_errors =
6026 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6028 net_stats->rx_crc_errors =
6029 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6031 net_stats->rx_errors = net_stats->rx_length_errors +
6032 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6033 net_stats->rx_crc_errors;
6035 net_stats->tx_aborted_errors =
6036 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6037 stats_blk->stat_Dot3StatsLateCollisions);
6039 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6040 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6041 net_stats->tx_carrier_errors = 0;
6042 else {
6043 net_stats->tx_carrier_errors =
6044 (unsigned long)
6045 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6048 net_stats->tx_errors =
6049 (unsigned long)
6050 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6052 net_stats->tx_aborted_errors +
6053 net_stats->tx_carrier_errors;
6055 net_stats->rx_missed_errors =
6056 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6057 stats_blk->stat_FwRxDrop);
6059 return net_stats;
6062 /* All ethtool functions called with rtnl_lock */
6064 static int
6065 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6067 struct bnx2 *bp = netdev_priv(dev);
6068 int support_serdes = 0, support_copper = 0;
6070 cmd->supported = SUPPORTED_Autoneg;
6071 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6072 support_serdes = 1;
6073 support_copper = 1;
6074 } else if (bp->phy_port == PORT_FIBRE)
6075 support_serdes = 1;
6076 else
6077 support_copper = 1;
6079 if (support_serdes) {
6080 cmd->supported |= SUPPORTED_1000baseT_Full |
6081 SUPPORTED_FIBRE;
6082 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6083 cmd->supported |= SUPPORTED_2500baseX_Full;
6086 if (support_copper) {
6087 cmd->supported |= SUPPORTED_10baseT_Half |
6088 SUPPORTED_10baseT_Full |
6089 SUPPORTED_100baseT_Half |
6090 SUPPORTED_100baseT_Full |
6091 SUPPORTED_1000baseT_Full |
6092 SUPPORTED_TP;
6096 spin_lock_bh(&bp->phy_lock);
6097 cmd->port = bp->phy_port;
6098 cmd->advertising = bp->advertising;
6100 if (bp->autoneg & AUTONEG_SPEED) {
6101 cmd->autoneg = AUTONEG_ENABLE;
6103 else {
6104 cmd->autoneg = AUTONEG_DISABLE;
6107 if (netif_carrier_ok(dev)) {
6108 cmd->speed = bp->line_speed;
6109 cmd->duplex = bp->duplex;
6111 else {
6112 cmd->speed = -1;
6113 cmd->duplex = -1;
6115 spin_unlock_bh(&bp->phy_lock);
6117 cmd->transceiver = XCVR_INTERNAL;
6118 cmd->phy_address = bp->phy_addr;
6120 return 0;
6123 static int
6124 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6126 struct bnx2 *bp = netdev_priv(dev);
6127 u8 autoneg = bp->autoneg;
6128 u8 req_duplex = bp->req_duplex;
6129 u16 req_line_speed = bp->req_line_speed;
6130 u32 advertising = bp->advertising;
6131 int err = -EINVAL;
6133 spin_lock_bh(&bp->phy_lock);
6135 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6136 goto err_out_unlock;
6138 if (cmd->port != bp->phy_port &&
6139 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6140 goto err_out_unlock;
6142 if (cmd->autoneg == AUTONEG_ENABLE) {
6143 autoneg |= AUTONEG_SPEED;
6145 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6147 /* allow advertising 1 speed */
6148 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6149 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6150 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6151 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6153 if (cmd->port == PORT_FIBRE)
6154 goto err_out_unlock;
6156 advertising = cmd->advertising;
6158 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6159 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6160 (cmd->port == PORT_TP))
6161 goto err_out_unlock;
6162 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6163 advertising = cmd->advertising;
6164 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6165 goto err_out_unlock;
6166 else {
6167 if (cmd->port == PORT_FIBRE)
6168 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6169 else
6170 advertising = ETHTOOL_ALL_COPPER_SPEED;
6172 advertising |= ADVERTISED_Autoneg;
6174 else {
6175 if (cmd->port == PORT_FIBRE) {
6176 if ((cmd->speed != SPEED_1000 &&
6177 cmd->speed != SPEED_2500) ||
6178 (cmd->duplex != DUPLEX_FULL))
6179 goto err_out_unlock;
6181 if (cmd->speed == SPEED_2500 &&
6182 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6183 goto err_out_unlock;
6185 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6186 goto err_out_unlock;
6188 autoneg &= ~AUTONEG_SPEED;
6189 req_line_speed = cmd->speed;
6190 req_duplex = cmd->duplex;
6191 advertising = 0;
6194 bp->autoneg = autoneg;
6195 bp->advertising = advertising;
6196 bp->req_line_speed = req_line_speed;
6197 bp->req_duplex = req_duplex;
6199 err = bnx2_setup_phy(bp, cmd->port);
6201 err_out_unlock:
6202 spin_unlock_bh(&bp->phy_lock);
6204 return err;
6207 static void
6208 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6210 struct bnx2 *bp = netdev_priv(dev);
6212 strcpy(info->driver, DRV_MODULE_NAME);
6213 strcpy(info->version, DRV_MODULE_VERSION);
6214 strcpy(info->bus_info, pci_name(bp->pdev));
6215 strcpy(info->fw_version, bp->fw_version);
6218 #define BNX2_REGDUMP_LEN (32 * 1024)
6220 static int
6221 bnx2_get_regs_len(struct net_device *dev)
6223 return BNX2_REGDUMP_LEN;
6226 static void
6227 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6229 u32 *p = _p, i, offset;
6230 u8 *orig_p = _p;
6231 struct bnx2 *bp = netdev_priv(dev);
6232 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6233 0x0800, 0x0880, 0x0c00, 0x0c10,
6234 0x0c30, 0x0d08, 0x1000, 0x101c,
6235 0x1040, 0x1048, 0x1080, 0x10a4,
6236 0x1400, 0x1490, 0x1498, 0x14f0,
6237 0x1500, 0x155c, 0x1580, 0x15dc,
6238 0x1600, 0x1658, 0x1680, 0x16d8,
6239 0x1800, 0x1820, 0x1840, 0x1854,
6240 0x1880, 0x1894, 0x1900, 0x1984,
6241 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6242 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6243 0x2000, 0x2030, 0x23c0, 0x2400,
6244 0x2800, 0x2820, 0x2830, 0x2850,
6245 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6246 0x3c00, 0x3c94, 0x4000, 0x4010,
6247 0x4080, 0x4090, 0x43c0, 0x4458,
6248 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6249 0x4fc0, 0x5010, 0x53c0, 0x5444,
6250 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6251 0x5fc0, 0x6000, 0x6400, 0x6428,
6252 0x6800, 0x6848, 0x684c, 0x6860,
6253 0x6888, 0x6910, 0x8000 };
6255 regs->version = 0;
6257 memset(p, 0, BNX2_REGDUMP_LEN);
6259 if (!netif_running(bp->dev))
6260 return;
6262 i = 0;
6263 offset = reg_boundaries[0];
6264 p += offset;
6265 while (offset < BNX2_REGDUMP_LEN) {
6266 *p++ = REG_RD(bp, offset);
6267 offset += 4;
6268 if (offset == reg_boundaries[i + 1]) {
6269 offset = reg_boundaries[i + 2];
6270 p = (u32 *) (orig_p + offset);
6271 i += 2;
6276 static void
6277 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6279 struct bnx2 *bp = netdev_priv(dev);
6281 if (bp->flags & BNX2_FLAG_NO_WOL) {
6282 wol->supported = 0;
6283 wol->wolopts = 0;
6285 else {
6286 wol->supported = WAKE_MAGIC;
6287 if (bp->wol)
6288 wol->wolopts = WAKE_MAGIC;
6289 else
6290 wol->wolopts = 0;
6292 memset(&wol->sopass, 0, sizeof(wol->sopass));
6295 static int
6296 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6298 struct bnx2 *bp = netdev_priv(dev);
6300 if (wol->wolopts & ~WAKE_MAGIC)
6301 return -EINVAL;
6303 if (wol->wolopts & WAKE_MAGIC) {
6304 if (bp->flags & BNX2_FLAG_NO_WOL)
6305 return -EINVAL;
6307 bp->wol = 1;
6309 else {
6310 bp->wol = 0;
6312 return 0;
6315 static int
6316 bnx2_nway_reset(struct net_device *dev)
6318 struct bnx2 *bp = netdev_priv(dev);
6319 u32 bmcr;
6321 if (!(bp->autoneg & AUTONEG_SPEED)) {
6322 return -EINVAL;
6325 spin_lock_bh(&bp->phy_lock);
6327 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6328 int rc;
6330 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6331 spin_unlock_bh(&bp->phy_lock);
6332 return rc;
6335 /* Force a link down visible on the other side */
6336 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6337 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6338 spin_unlock_bh(&bp->phy_lock);
6340 msleep(20);
6342 spin_lock_bh(&bp->phy_lock);
6344 bp->current_interval = SERDES_AN_TIMEOUT;
6345 bp->serdes_an_pending = 1;
6346 mod_timer(&bp->timer, jiffies + bp->current_interval);
6349 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6350 bmcr &= ~BMCR_LOOPBACK;
6351 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6353 spin_unlock_bh(&bp->phy_lock);
6355 return 0;
6358 static int
6359 bnx2_get_eeprom_len(struct net_device *dev)
6361 struct bnx2 *bp = netdev_priv(dev);
6363 if (bp->flash_info == NULL)
6364 return 0;
6366 return (int) bp->flash_size;
6369 static int
6370 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6371 u8 *eebuf)
6373 struct bnx2 *bp = netdev_priv(dev);
6374 int rc;
6376 /* parameters already validated in ethtool_get_eeprom */
6378 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6380 return rc;
6383 static int
6384 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6385 u8 *eebuf)
6387 struct bnx2 *bp = netdev_priv(dev);
6388 int rc;
6390 /* parameters already validated in ethtool_set_eeprom */
6392 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6394 return rc;
6397 static int
6398 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6400 struct bnx2 *bp = netdev_priv(dev);
6402 memset(coal, 0, sizeof(struct ethtool_coalesce));
6404 coal->rx_coalesce_usecs = bp->rx_ticks;
6405 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6406 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6407 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6409 coal->tx_coalesce_usecs = bp->tx_ticks;
6410 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6411 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6412 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6414 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6416 return 0;
6419 static int
6420 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6422 struct bnx2 *bp = netdev_priv(dev);
6424 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6425 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6427 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6428 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6430 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6431 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6433 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6434 if (bp->rx_quick_cons_trip_int > 0xff)
6435 bp->rx_quick_cons_trip_int = 0xff;
6437 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6438 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6440 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6441 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6443 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6444 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6446 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6447 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6448 0xff;
6450 bp->stats_ticks = coal->stats_block_coalesce_usecs;
6451 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6452 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6453 bp->stats_ticks = USEC_PER_SEC;
6455 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6456 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6457 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6459 if (netif_running(bp->dev)) {
6460 bnx2_netif_stop(bp);
6461 bnx2_init_nic(bp);
6462 bnx2_netif_start(bp);
6465 return 0;
6468 static void
6469 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6471 struct bnx2 *bp = netdev_priv(dev);
6473 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6474 ering->rx_mini_max_pending = 0;
6475 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6477 ering->rx_pending = bp->rx_ring_size;
6478 ering->rx_mini_pending = 0;
6479 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6481 ering->tx_max_pending = MAX_TX_DESC_CNT;
6482 ering->tx_pending = bp->tx_ring_size;
6485 static int
6486 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6488 if (netif_running(bp->dev)) {
6489 bnx2_netif_stop(bp);
6490 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6491 bnx2_free_skbs(bp);
6492 bnx2_free_mem(bp);
6495 bnx2_set_rx_ring_size(bp, rx);
6496 bp->tx_ring_size = tx;
6498 if (netif_running(bp->dev)) {
6499 int rc;
6501 rc = bnx2_alloc_mem(bp);
6502 if (rc)
6503 return rc;
6504 bnx2_init_nic(bp);
6505 bnx2_netif_start(bp);
6507 return 0;
6510 static int
6511 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6513 struct bnx2 *bp = netdev_priv(dev);
6514 int rc;
6516 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6517 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6518 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6520 return -EINVAL;
6522 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6523 return rc;
6526 static void
6527 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6529 struct bnx2 *bp = netdev_priv(dev);
6531 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6532 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6533 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6536 static int
6537 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6539 struct bnx2 *bp = netdev_priv(dev);
6541 bp->req_flow_ctrl = 0;
6542 if (epause->rx_pause)
6543 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6544 if (epause->tx_pause)
6545 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6547 if (epause->autoneg) {
6548 bp->autoneg |= AUTONEG_FLOW_CTRL;
6550 else {
6551 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6554 spin_lock_bh(&bp->phy_lock);
6556 bnx2_setup_phy(bp, bp->phy_port);
6558 spin_unlock_bh(&bp->phy_lock);
6560 return 0;
6563 static u32
6564 bnx2_get_rx_csum(struct net_device *dev)
6566 struct bnx2 *bp = netdev_priv(dev);
6568 return bp->rx_csum;
6571 static int
6572 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6574 struct bnx2 *bp = netdev_priv(dev);
6576 bp->rx_csum = data;
6577 return 0;
6580 static int
6581 bnx2_set_tso(struct net_device *dev, u32 data)
6583 struct bnx2 *bp = netdev_priv(dev);
6585 if (data) {
6586 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6587 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6588 dev->features |= NETIF_F_TSO6;
6589 } else
6590 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6591 NETIF_F_TSO_ECN);
6592 return 0;
6595 #define BNX2_NUM_STATS 46
6597 static struct {
6598 char string[ETH_GSTRING_LEN];
6599 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6600 { "rx_bytes" },
6601 { "rx_error_bytes" },
6602 { "tx_bytes" },
6603 { "tx_error_bytes" },
6604 { "rx_ucast_packets" },
6605 { "rx_mcast_packets" },
6606 { "rx_bcast_packets" },
6607 { "tx_ucast_packets" },
6608 { "tx_mcast_packets" },
6609 { "tx_bcast_packets" },
6610 { "tx_mac_errors" },
6611 { "tx_carrier_errors" },
6612 { "rx_crc_errors" },
6613 { "rx_align_errors" },
6614 { "tx_single_collisions" },
6615 { "tx_multi_collisions" },
6616 { "tx_deferred" },
6617 { "tx_excess_collisions" },
6618 { "tx_late_collisions" },
6619 { "tx_total_collisions" },
6620 { "rx_fragments" },
6621 { "rx_jabbers" },
6622 { "rx_undersize_packets" },
6623 { "rx_oversize_packets" },
6624 { "rx_64_byte_packets" },
6625 { "rx_65_to_127_byte_packets" },
6626 { "rx_128_to_255_byte_packets" },
6627 { "rx_256_to_511_byte_packets" },
6628 { "rx_512_to_1023_byte_packets" },
6629 { "rx_1024_to_1522_byte_packets" },
6630 { "rx_1523_to_9022_byte_packets" },
6631 { "tx_64_byte_packets" },
6632 { "tx_65_to_127_byte_packets" },
6633 { "tx_128_to_255_byte_packets" },
6634 { "tx_256_to_511_byte_packets" },
6635 { "tx_512_to_1023_byte_packets" },
6636 { "tx_1024_to_1522_byte_packets" },
6637 { "tx_1523_to_9022_byte_packets" },
6638 { "rx_xon_frames" },
6639 { "rx_xoff_frames" },
6640 { "tx_xon_frames" },
6641 { "tx_xoff_frames" },
6642 { "rx_mac_ctrl_frames" },
6643 { "rx_filtered_packets" },
6644 { "rx_discards" },
6645 { "rx_fw_discards" },
6648 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6650 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6651 STATS_OFFSET32(stat_IfHCInOctets_hi),
6652 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6653 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6654 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6655 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6656 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6657 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6658 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6659 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6660 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6661 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6662 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6663 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6664 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6665 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6666 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6667 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6668 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6669 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6670 STATS_OFFSET32(stat_EtherStatsCollisions),
6671 STATS_OFFSET32(stat_EtherStatsFragments),
6672 STATS_OFFSET32(stat_EtherStatsJabbers),
6673 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6674 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6675 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6676 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6677 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6678 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6679 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6680 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6681 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6682 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6683 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6684 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6685 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6686 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6687 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6688 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6689 STATS_OFFSET32(stat_XonPauseFramesReceived),
6690 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6691 STATS_OFFSET32(stat_OutXonSent),
6692 STATS_OFFSET32(stat_OutXoffSent),
6693 STATS_OFFSET32(stat_MacControlFramesReceived),
6694 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6695 STATS_OFFSET32(stat_IfInMBUFDiscards),
6696 STATS_OFFSET32(stat_FwRxDrop),
6699 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6700 * skipped because of errata.
6702 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6703 8,0,8,8,8,8,8,8,8,8,
6704 4,0,4,4,4,4,4,4,4,4,
6705 4,4,4,4,4,4,4,4,4,4,
6706 4,4,4,4,4,4,4,4,4,4,
6707 4,4,4,4,4,4,
6710 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6711 8,0,8,8,8,8,8,8,8,8,
6712 4,4,4,4,4,4,4,4,4,4,
6713 4,4,4,4,4,4,4,4,4,4,
6714 4,4,4,4,4,4,4,4,4,4,
6715 4,4,4,4,4,4,
6718 #define BNX2_NUM_TESTS 6
6720 static struct {
6721 char string[ETH_GSTRING_LEN];
6722 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6723 { "register_test (offline)" },
6724 { "memory_test (offline)" },
6725 { "loopback_test (offline)" },
6726 { "nvram_test (online)" },
6727 { "interrupt_test (online)" },
6728 { "link_test (online)" },
6731 static int
6732 bnx2_get_sset_count(struct net_device *dev, int sset)
6734 switch (sset) {
6735 case ETH_SS_TEST:
6736 return BNX2_NUM_TESTS;
6737 case ETH_SS_STATS:
6738 return BNX2_NUM_STATS;
6739 default:
6740 return -EOPNOTSUPP;
6744 static void
6745 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6747 struct bnx2 *bp = netdev_priv(dev);
6749 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6750 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6751 int i;
6753 bnx2_netif_stop(bp);
6754 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6755 bnx2_free_skbs(bp);
6757 if (bnx2_test_registers(bp) != 0) {
6758 buf[0] = 1;
6759 etest->flags |= ETH_TEST_FL_FAILED;
6761 if (bnx2_test_memory(bp) != 0) {
6762 buf[1] = 1;
6763 etest->flags |= ETH_TEST_FL_FAILED;
6765 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6766 etest->flags |= ETH_TEST_FL_FAILED;
6768 if (!netif_running(bp->dev)) {
6769 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6771 else {
6772 bnx2_init_nic(bp);
6773 bnx2_netif_start(bp);
6776 /* wait for link up */
6777 for (i = 0; i < 7; i++) {
6778 if (bp->link_up)
6779 break;
6780 msleep_interruptible(1000);
6784 if (bnx2_test_nvram(bp) != 0) {
6785 buf[3] = 1;
6786 etest->flags |= ETH_TEST_FL_FAILED;
6788 if (bnx2_test_intr(bp) != 0) {
6789 buf[4] = 1;
6790 etest->flags |= ETH_TEST_FL_FAILED;
6793 if (bnx2_test_link(bp) != 0) {
6794 buf[5] = 1;
6795 etest->flags |= ETH_TEST_FL_FAILED;
6800 static void
6801 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6803 switch (stringset) {
6804 case ETH_SS_STATS:
6805 memcpy(buf, bnx2_stats_str_arr,
6806 sizeof(bnx2_stats_str_arr));
6807 break;
6808 case ETH_SS_TEST:
6809 memcpy(buf, bnx2_tests_str_arr,
6810 sizeof(bnx2_tests_str_arr));
6811 break;
6815 static void
6816 bnx2_get_ethtool_stats(struct net_device *dev,
6817 struct ethtool_stats *stats, u64 *buf)
6819 struct bnx2 *bp = netdev_priv(dev);
6820 int i;
6821 u32 *hw_stats = (u32 *) bp->stats_blk;
6822 u8 *stats_len_arr = NULL;
6824 if (hw_stats == NULL) {
6825 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6826 return;
6829 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6830 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6831 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6832 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6833 stats_len_arr = bnx2_5706_stats_len_arr;
6834 else
6835 stats_len_arr = bnx2_5708_stats_len_arr;
6837 for (i = 0; i < BNX2_NUM_STATS; i++) {
6838 if (stats_len_arr[i] == 0) {
6839 /* skip this counter */
6840 buf[i] = 0;
6841 continue;
6843 if (stats_len_arr[i] == 4) {
6844 /* 4-byte counter */
6845 buf[i] = (u64)
6846 *(hw_stats + bnx2_stats_offset_arr[i]);
6847 continue;
6849 /* 8-byte counter */
6850 buf[i] = (((u64) *(hw_stats +
6851 bnx2_stats_offset_arr[i])) << 32) +
6852 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6856 static int
6857 bnx2_phys_id(struct net_device *dev, u32 data)
6859 struct bnx2 *bp = netdev_priv(dev);
6860 int i;
6861 u32 save;
6863 if (data == 0)
6864 data = 2;
6866 save = REG_RD(bp, BNX2_MISC_CFG);
6867 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6869 for (i = 0; i < (data * 2); i++) {
6870 if ((i % 2) == 0) {
6871 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6873 else {
6874 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6875 BNX2_EMAC_LED_1000MB_OVERRIDE |
6876 BNX2_EMAC_LED_100MB_OVERRIDE |
6877 BNX2_EMAC_LED_10MB_OVERRIDE |
6878 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6879 BNX2_EMAC_LED_TRAFFIC);
6881 msleep_interruptible(500);
6882 if (signal_pending(current))
6883 break;
6885 REG_WR(bp, BNX2_EMAC_LED, 0);
6886 REG_WR(bp, BNX2_MISC_CFG, save);
6887 return 0;
6890 static int
6891 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6893 struct bnx2 *bp = netdev_priv(dev);
6895 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6896 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6897 else
6898 return (ethtool_op_set_tx_csum(dev, data));
6901 static const struct ethtool_ops bnx2_ethtool_ops = {
6902 .get_settings = bnx2_get_settings,
6903 .set_settings = bnx2_set_settings,
6904 .get_drvinfo = bnx2_get_drvinfo,
6905 .get_regs_len = bnx2_get_regs_len,
6906 .get_regs = bnx2_get_regs,
6907 .get_wol = bnx2_get_wol,
6908 .set_wol = bnx2_set_wol,
6909 .nway_reset = bnx2_nway_reset,
6910 .get_link = ethtool_op_get_link,
6911 .get_eeprom_len = bnx2_get_eeprom_len,
6912 .get_eeprom = bnx2_get_eeprom,
6913 .set_eeprom = bnx2_set_eeprom,
6914 .get_coalesce = bnx2_get_coalesce,
6915 .set_coalesce = bnx2_set_coalesce,
6916 .get_ringparam = bnx2_get_ringparam,
6917 .set_ringparam = bnx2_set_ringparam,
6918 .get_pauseparam = bnx2_get_pauseparam,
6919 .set_pauseparam = bnx2_set_pauseparam,
6920 .get_rx_csum = bnx2_get_rx_csum,
6921 .set_rx_csum = bnx2_set_rx_csum,
6922 .set_tx_csum = bnx2_set_tx_csum,
6923 .set_sg = ethtool_op_set_sg,
6924 .set_tso = bnx2_set_tso,
6925 .self_test = bnx2_self_test,
6926 .get_strings = bnx2_get_strings,
6927 .phys_id = bnx2_phys_id,
6928 .get_ethtool_stats = bnx2_get_ethtool_stats,
6929 .get_sset_count = bnx2_get_sset_count,
6932 /* Called with rtnl_lock */
6933 static int
6934 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6936 struct mii_ioctl_data *data = if_mii(ifr);
6937 struct bnx2 *bp = netdev_priv(dev);
6938 int err;
6940 switch(cmd) {
6941 case SIOCGMIIPHY:
6942 data->phy_id = bp->phy_addr;
6944 /* fallthru */
6945 case SIOCGMIIREG: {
6946 u32 mii_regval;
6948 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6949 return -EOPNOTSUPP;
6951 if (!netif_running(dev))
6952 return -EAGAIN;
6954 spin_lock_bh(&bp->phy_lock);
6955 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6956 spin_unlock_bh(&bp->phy_lock);
6958 data->val_out = mii_regval;
6960 return err;
6963 case SIOCSMIIREG:
6964 if (!capable(CAP_NET_ADMIN))
6965 return -EPERM;
6967 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6968 return -EOPNOTSUPP;
6970 if (!netif_running(dev))
6971 return -EAGAIN;
6973 spin_lock_bh(&bp->phy_lock);
6974 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6975 spin_unlock_bh(&bp->phy_lock);
6977 return err;
6979 default:
6980 /* do nothing */
6981 break;
6983 return -EOPNOTSUPP;
6986 /* Called with rtnl_lock */
6987 static int
6988 bnx2_change_mac_addr(struct net_device *dev, void *p)
6990 struct sockaddr *addr = p;
6991 struct bnx2 *bp = netdev_priv(dev);
6993 if (!is_valid_ether_addr(addr->sa_data))
6994 return -EINVAL;
6996 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6997 if (netif_running(dev))
6998 bnx2_set_mac_addr(bp);
7000 return 0;
7003 /* Called with rtnl_lock */
7004 static int
7005 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7007 struct bnx2 *bp = netdev_priv(dev);
7009 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7010 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7011 return -EINVAL;
7013 dev->mtu = new_mtu;
7014 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7017 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7018 static void
7019 poll_bnx2(struct net_device *dev)
7021 struct bnx2 *bp = netdev_priv(dev);
7023 disable_irq(bp->pdev->irq);
7024 bnx2_interrupt(bp->pdev->irq, dev);
7025 enable_irq(bp->pdev->irq);
7027 #endif
7029 static void __devinit
7030 bnx2_get_5709_media(struct bnx2 *bp)
7032 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7033 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7034 u32 strap;
7036 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7037 return;
7038 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7039 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7040 return;
7043 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7044 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7045 else
7046 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7048 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7049 switch (strap) {
7050 case 0x4:
7051 case 0x5:
7052 case 0x6:
7053 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7054 return;
7056 } else {
7057 switch (strap) {
7058 case 0x1:
7059 case 0x2:
7060 case 0x4:
7061 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7062 return;
7067 static void __devinit
7068 bnx2_get_pci_speed(struct bnx2 *bp)
7070 u32 reg;
7072 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7073 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7074 u32 clkreg;
7076 bp->flags |= BNX2_FLAG_PCIX;
7078 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7080 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7081 switch (clkreg) {
7082 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7083 bp->bus_speed_mhz = 133;
7084 break;
7086 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7087 bp->bus_speed_mhz = 100;
7088 break;
7090 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7091 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7092 bp->bus_speed_mhz = 66;
7093 break;
7095 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7096 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7097 bp->bus_speed_mhz = 50;
7098 break;
7100 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7101 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7102 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7103 bp->bus_speed_mhz = 33;
7104 break;
7107 else {
7108 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7109 bp->bus_speed_mhz = 66;
7110 else
7111 bp->bus_speed_mhz = 33;
7114 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7115 bp->flags |= BNX2_FLAG_PCI_32BIT;
7119 static int __devinit
7120 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7122 struct bnx2 *bp;
7123 unsigned long mem_len;
7124 int rc, i, j;
7125 u32 reg;
7126 u64 dma_mask, persist_dma_mask;
7128 SET_NETDEV_DEV(dev, &pdev->dev);
7129 bp = netdev_priv(dev);
7131 bp->flags = 0;
7132 bp->phy_flags = 0;
7134 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7135 rc = pci_enable_device(pdev);
7136 if (rc) {
7137 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7138 goto err_out;
7141 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7142 dev_err(&pdev->dev,
7143 "Cannot find PCI device base address, aborting.\n");
7144 rc = -ENODEV;
7145 goto err_out_disable;
7148 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7149 if (rc) {
7150 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7151 goto err_out_disable;
7154 pci_set_master(pdev);
7156 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7157 if (bp->pm_cap == 0) {
7158 dev_err(&pdev->dev,
7159 "Cannot find power management capability, aborting.\n");
7160 rc = -EIO;
7161 goto err_out_release;
7164 bp->dev = dev;
7165 bp->pdev = pdev;
7167 spin_lock_init(&bp->phy_lock);
7168 spin_lock_init(&bp->indirect_lock);
7169 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7171 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7172 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7173 dev->mem_end = dev->mem_start + mem_len;
7174 dev->irq = pdev->irq;
7176 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7178 if (!bp->regview) {
7179 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7180 rc = -ENOMEM;
7181 goto err_out_release;
7184 /* Configure byte swap and enable write to the reg_window registers.
7185 * Rely on CPU to do target byte swapping on big endian systems
7186 * The chip's target access swapping will not swap all accesses
7188 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7189 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7190 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7192 bnx2_set_power_state(bp, PCI_D0);
7194 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7196 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7197 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7198 dev_err(&pdev->dev,
7199 "Cannot find PCIE capability, aborting.\n");
7200 rc = -EIO;
7201 goto err_out_unmap;
7203 bp->flags |= BNX2_FLAG_PCIE;
7204 if (CHIP_REV(bp) == CHIP_REV_Ax)
7205 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7206 } else {
7207 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7208 if (bp->pcix_cap == 0) {
7209 dev_err(&pdev->dev,
7210 "Cannot find PCIX capability, aborting.\n");
7211 rc = -EIO;
7212 goto err_out_unmap;
7216 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7217 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7218 bp->flags |= BNX2_FLAG_MSIX_CAP;
7221 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7222 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7223 bp->flags |= BNX2_FLAG_MSI_CAP;
7226 /* 5708 cannot support DMA addresses > 40-bit. */
7227 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7228 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7229 else
7230 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7232 /* Configure DMA attributes. */
7233 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7234 dev->features |= NETIF_F_HIGHDMA;
7235 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7236 if (rc) {
7237 dev_err(&pdev->dev,
7238 "pci_set_consistent_dma_mask failed, aborting.\n");
7239 goto err_out_unmap;
7241 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7242 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7243 goto err_out_unmap;
7246 if (!(bp->flags & BNX2_FLAG_PCIE))
7247 bnx2_get_pci_speed(bp);
7249 /* 5706A0 may falsely detect SERR and PERR. */
7250 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7251 reg = REG_RD(bp, PCI_COMMAND);
7252 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7253 REG_WR(bp, PCI_COMMAND, reg);
7255 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7256 !(bp->flags & BNX2_FLAG_PCIX)) {
7258 dev_err(&pdev->dev,
7259 "5706 A1 can only be used in a PCIX bus, aborting.\n");
7260 goto err_out_unmap;
7263 bnx2_init_nvram(bp);
7265 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7267 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7268 BNX2_SHM_HDR_SIGNATURE_SIG) {
7269 u32 off = PCI_FUNC(pdev->devfn) << 2;
7271 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7272 } else
7273 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7275 /* Get the permanent MAC address. First we need to make sure the
7276 * firmware is actually running.
7278 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7280 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7281 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7282 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7283 rc = -ENODEV;
7284 goto err_out_unmap;
7287 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7288 for (i = 0, j = 0; i < 3; i++) {
7289 u8 num, k, skip0;
7291 num = (u8) (reg >> (24 - (i * 8)));
7292 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7293 if (num >= k || !skip0 || k == 1) {
7294 bp->fw_version[j++] = (num / k) + '0';
7295 skip0 = 0;
7298 if (i != 2)
7299 bp->fw_version[j++] = '.';
7301 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7302 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7303 bp->wol = 1;
7305 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7306 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7308 for (i = 0; i < 30; i++) {
7309 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7310 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7311 break;
7312 msleep(10);
7315 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7316 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7317 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7318 reg != BNX2_CONDITION_MFW_RUN_NONE) {
7319 int i;
7320 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7322 bp->fw_version[j++] = ' ';
7323 for (i = 0; i < 3; i++) {
7324 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7325 reg = swab32(reg);
7326 memcpy(&bp->fw_version[j], &reg, 4);
7327 j += 4;
7331 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7332 bp->mac_addr[0] = (u8) (reg >> 8);
7333 bp->mac_addr[1] = (u8) reg;
7335 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7336 bp->mac_addr[2] = (u8) (reg >> 24);
7337 bp->mac_addr[3] = (u8) (reg >> 16);
7338 bp->mac_addr[4] = (u8) (reg >> 8);
7339 bp->mac_addr[5] = (u8) reg;
7341 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
7343 bp->tx_ring_size = MAX_TX_DESC_CNT;
7344 bnx2_set_rx_ring_size(bp, 255);
7346 bp->rx_csum = 1;
7348 bp->tx_quick_cons_trip_int = 20;
7349 bp->tx_quick_cons_trip = 20;
7350 bp->tx_ticks_int = 80;
7351 bp->tx_ticks = 80;
7353 bp->rx_quick_cons_trip_int = 6;
7354 bp->rx_quick_cons_trip = 6;
7355 bp->rx_ticks_int = 18;
7356 bp->rx_ticks = 18;
7358 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7360 bp->timer_interval = HZ;
7361 bp->current_interval = HZ;
7363 bp->phy_addr = 1;
7365 /* Disable WOL support if we are running on a SERDES chip. */
7366 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7367 bnx2_get_5709_media(bp);
7368 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7369 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7371 bp->phy_port = PORT_TP;
7372 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7373 bp->phy_port = PORT_FIBRE;
7374 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7375 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7376 bp->flags |= BNX2_FLAG_NO_WOL;
7377 bp->wol = 0;
7379 <<<<<<< HEAD:drivers/net/bnx2.c
7380 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
7381 =======
7382 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7383 /* Don't do parallel detect on this board because of
7384 * some board problems. The link will not go down
7385 * if we do parallel detect.
7387 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7388 pdev->subsystem_device == 0x310c)
7389 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7390 } else {
7391 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/net/bnx2.c
7392 bp->phy_addr = 2;
7393 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7394 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7396 bnx2_init_remote_phy(bp);
7398 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7399 CHIP_NUM(bp) == CHIP_NUM_5708)
7400 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7401 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7402 (CHIP_REV(bp) == CHIP_REV_Ax ||
7403 CHIP_REV(bp) == CHIP_REV_Bx))
7404 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7406 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7407 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7408 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7409 bp->flags |= BNX2_FLAG_NO_WOL;
7410 bp->wol = 0;
7413 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7414 bp->tx_quick_cons_trip_int =
7415 bp->tx_quick_cons_trip;
7416 bp->tx_ticks_int = bp->tx_ticks;
7417 bp->rx_quick_cons_trip_int =
7418 bp->rx_quick_cons_trip;
7419 bp->rx_ticks_int = bp->rx_ticks;
7420 bp->comp_prod_trip_int = bp->comp_prod_trip;
7421 bp->com_ticks_int = bp->com_ticks;
7422 bp->cmd_ticks_int = bp->cmd_ticks;
7425 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7427 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7428 * with byte enables disabled on the unused 32-bit word. This is legal
7429 * but causes problems on the AMD 8132 which will eventually stop
7430 * responding after a while.
7432 * AMD believes this incompatibility is unique to the 5706, and
7433 * prefers to locally disable MSI rather than globally disabling it.
7435 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7436 struct pci_dev *amd_8132 = NULL;
7438 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7439 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7440 amd_8132))) {
7442 if (amd_8132->revision >= 0x10 &&
7443 amd_8132->revision <= 0x13) {
7444 disable_msi = 1;
7445 pci_dev_put(amd_8132);
7446 break;
7451 bnx2_set_default_link(bp);
7452 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7454 init_timer(&bp->timer);
7455 bp->timer.expires = RUN_AT(bp->timer_interval);
7456 bp->timer.data = (unsigned long) bp;
7457 bp->timer.function = bnx2_timer;
7459 return 0;
7461 err_out_unmap:
7462 if (bp->regview) {
7463 iounmap(bp->regview);
7464 bp->regview = NULL;
7467 err_out_release:
7468 pci_release_regions(pdev);
7470 err_out_disable:
7471 pci_disable_device(pdev);
7472 pci_set_drvdata(pdev, NULL);
7474 err_out:
7475 return rc;
7478 static char * __devinit
7479 bnx2_bus_string(struct bnx2 *bp, char *str)
7481 char *s = str;
7483 if (bp->flags & BNX2_FLAG_PCIE) {
7484 s += sprintf(s, "PCI Express");
7485 } else {
7486 s += sprintf(s, "PCI");
7487 if (bp->flags & BNX2_FLAG_PCIX)
7488 s += sprintf(s, "-X");
7489 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7490 s += sprintf(s, " 32-bit");
7491 else
7492 s += sprintf(s, " 64-bit");
7493 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7495 return str;
7498 static void __devinit
7499 bnx2_init_napi(struct bnx2 *bp)
7501 int i;
7502 struct bnx2_napi *bnapi;
7504 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7505 bnapi = &bp->bnx2_napi[i];
7506 bnapi->bp = bp;
7508 netif_napi_add(bp->dev, &bp->bnx2_napi[0].napi, bnx2_poll, 64);
7509 netif_napi_add(bp->dev, &bp->bnx2_napi[BNX2_TX_VEC].napi, bnx2_tx_poll,
7510 64);
7513 static int __devinit
7514 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7516 static int version_printed = 0;
7517 struct net_device *dev = NULL;
7518 struct bnx2 *bp;
7519 int rc;
7520 char str[40];
7521 DECLARE_MAC_BUF(mac);
7523 if (version_printed++ == 0)
7524 printk(KERN_INFO "%s", version);
7526 /* dev zeroed in init_etherdev */
7527 dev = alloc_etherdev(sizeof(*bp));
7529 if (!dev)
7530 return -ENOMEM;
7532 rc = bnx2_init_board(pdev, dev);
7533 if (rc < 0) {
7534 free_netdev(dev);
7535 return rc;
7538 dev->open = bnx2_open;
7539 dev->hard_start_xmit = bnx2_start_xmit;
7540 dev->stop = bnx2_close;
7541 dev->get_stats = bnx2_get_stats;
7542 dev->set_multicast_list = bnx2_set_rx_mode;
7543 dev->do_ioctl = bnx2_ioctl;
7544 dev->set_mac_address = bnx2_change_mac_addr;
7545 dev->change_mtu = bnx2_change_mtu;
7546 dev->tx_timeout = bnx2_tx_timeout;
7547 dev->watchdog_timeo = TX_TIMEOUT;
7548 #ifdef BCM_VLAN
7549 dev->vlan_rx_register = bnx2_vlan_rx_register;
7550 #endif
7551 dev->ethtool_ops = &bnx2_ethtool_ops;
7553 bp = netdev_priv(dev);
7554 bnx2_init_napi(bp);
7556 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7557 dev->poll_controller = poll_bnx2;
7558 #endif
7560 pci_set_drvdata(pdev, dev);
7562 memcpy(dev->dev_addr, bp->mac_addr, 6);
7563 memcpy(dev->perm_addr, bp->mac_addr, 6);
7564 bp->name = board_info[ent->driver_data].name;
7566 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7567 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7568 dev->features |= NETIF_F_IPV6_CSUM;
7570 #ifdef BCM_VLAN
7571 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7572 #endif
7573 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7574 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7575 dev->features |= NETIF_F_TSO6;
7577 if ((rc = register_netdev(dev))) {
7578 dev_err(&pdev->dev, "Cannot register net device\n");
7579 if (bp->regview)
7580 iounmap(bp->regview);
7581 pci_release_regions(pdev);
7582 pci_disable_device(pdev);
7583 pci_set_drvdata(pdev, NULL);
7584 free_netdev(dev);
7585 return rc;
7588 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7589 "IRQ %d, node addr %s\n",
7590 dev->name,
7591 bp->name,
7592 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7593 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7594 bnx2_bus_string(bp, str),
7595 dev->base_addr,
7596 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7598 return 0;
7601 static void __devexit
7602 bnx2_remove_one(struct pci_dev *pdev)
7604 struct net_device *dev = pci_get_drvdata(pdev);
7605 struct bnx2 *bp = netdev_priv(dev);
7607 flush_scheduled_work();
7609 unregister_netdev(dev);
7611 if (bp->regview)
7612 iounmap(bp->regview);
7614 free_netdev(dev);
7615 pci_release_regions(pdev);
7616 pci_disable_device(pdev);
7617 pci_set_drvdata(pdev, NULL);
7620 static int
7621 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7623 struct net_device *dev = pci_get_drvdata(pdev);
7624 struct bnx2 *bp = netdev_priv(dev);
7625 u32 reset_code;
7627 /* PCI register 4 needs to be saved whether netif_running() or not.
7628 * MSI address and data need to be saved if using MSI and
7629 * netif_running().
7631 pci_save_state(pdev);
7632 if (!netif_running(dev))
7633 return 0;
7635 flush_scheduled_work();
7636 bnx2_netif_stop(bp);
7637 netif_device_detach(dev);
7638 del_timer_sync(&bp->timer);
7639 if (bp->flags & BNX2_FLAG_NO_WOL)
7640 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7641 else if (bp->wol)
7642 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7643 else
7644 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7645 bnx2_reset_chip(bp, reset_code);
7646 bnx2_free_skbs(bp);
7647 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7648 return 0;
7651 static int
7652 bnx2_resume(struct pci_dev *pdev)
7654 struct net_device *dev = pci_get_drvdata(pdev);
7655 struct bnx2 *bp = netdev_priv(dev);
7657 pci_restore_state(pdev);
7658 if (!netif_running(dev))
7659 return 0;
7661 bnx2_set_power_state(bp, PCI_D0);
7662 netif_device_attach(dev);
7663 bnx2_init_nic(bp);
7664 bnx2_netif_start(bp);
7665 return 0;
7668 static struct pci_driver bnx2_pci_driver = {
7669 .name = DRV_MODULE_NAME,
7670 .id_table = bnx2_pci_tbl,
7671 .probe = bnx2_init_one,
7672 .remove = __devexit_p(bnx2_remove_one),
7673 .suspend = bnx2_suspend,
7674 .resume = bnx2_resume,
7677 static int __init bnx2_init(void)
7679 return pci_register_driver(&bnx2_pci_driver);
7682 static void __exit bnx2_cleanup(void)
7684 pci_unregister_driver(&bnx2_pci_driver);
7687 module_init(bnx2_init);
7688 module_exit(bnx2_cleanup);