Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
[linux-2.6/mini2440.git] / drivers / net / bnx2.c
blobb0cb29d4cc01e735b7855e99f7e3a7c81650d3db
1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
52 #include "bnx2.h"
53 #include "bnx2_fw.h"
55 #define DRV_MODULE_NAME "bnx2"
56 #define PFX DRV_MODULE_NAME ": "
57 #define DRV_MODULE_VERSION "2.0.1"
58 #define DRV_MODULE_RELDATE "May 6, 2009"
59 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-4.6.16.fw"
60 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-4.6.16.fw"
61 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-4.6.17.fw"
62 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-4.6.15.fw"
64 #define RUN_AT(x) (jiffies + (x))
66 /* Time in jiffies before concluding the transmitter is hung. */
67 #define TX_TIMEOUT (5*HZ)
69 static char version[] __devinitdata =
70 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
72 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
73 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
74 MODULE_LICENSE("GPL");
75 MODULE_VERSION(DRV_MODULE_VERSION);
76 MODULE_FIRMWARE(FW_MIPS_FILE_06);
77 MODULE_FIRMWARE(FW_RV2P_FILE_06);
78 MODULE_FIRMWARE(FW_MIPS_FILE_09);
79 MODULE_FIRMWARE(FW_RV2P_FILE_09);
81 static int disable_msi = 0;
83 module_param(disable_msi, int, 0);
84 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
86 typedef enum {
87 BCM5706 = 0,
88 NC370T,
89 NC370I,
90 BCM5706S,
91 NC370F,
92 BCM5708,
93 BCM5708S,
94 BCM5709,
95 BCM5709S,
96 BCM5716,
97 BCM5716S,
98 } board_t;
100 /* indexed by board_t, above */
101 static struct {
102 char *name;
103 } board_info[] __devinitdata = {
104 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
105 { "HP NC370T Multifunction Gigabit Server Adapter" },
106 { "HP NC370i Multifunction Gigabit Server Adapter" },
107 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
108 { "HP NC370F Multifunction Gigabit Server Adapter" },
109 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
110 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
111 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
112 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
113 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
114 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
117 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
119 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
121 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
127 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
132 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
134 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
136 { PCI_VENDOR_ID_BROADCOM, 0x163b,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
138 { PCI_VENDOR_ID_BROADCOM, 0x163c,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
140 { 0, }
143 static struct flash_spec flash_table[] =
145 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
146 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
147 /* Slow EEPROM */
148 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
149 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
150 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
151 "EEPROM - slow"},
152 /* Expansion entry 0001 */
153 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
154 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
155 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
156 "Entry 0001"},
157 /* Saifun SA25F010 (non-buffered flash) */
158 /* strap, cfg1, & write1 need updates */
159 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
160 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
162 "Non-buffered flash (128kB)"},
163 /* Saifun SA25F020 (non-buffered flash) */
164 /* strap, cfg1, & write1 need updates */
165 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
166 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
168 "Non-buffered flash (256kB)"},
169 /* Expansion entry 0100 */
170 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
171 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
172 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
173 "Entry 0100"},
174 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
175 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
176 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
177 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
178 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
179 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
180 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
181 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
182 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
183 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
184 /* Saifun SA25F005 (non-buffered flash) */
185 /* strap, cfg1, & write1 need updates */
186 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
187 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
188 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
189 "Non-buffered flash (64kB)"},
190 /* Fast EEPROM */
191 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
192 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
193 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
194 "EEPROM - fast"},
195 /* Expansion entry 1001 */
196 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
197 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
198 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
199 "Entry 1001"},
200 /* Expansion entry 1010 */
201 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
202 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
203 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
204 "Entry 1010"},
205 /* ATMEL AT45DB011B (buffered flash) */
206 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
207 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
208 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
209 "Buffered flash (128kB)"},
210 /* Expansion entry 1100 */
211 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
212 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
213 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
214 "Entry 1100"},
215 /* Expansion entry 1101 */
216 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
217 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
218 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
219 "Entry 1101"},
220 /* Ateml Expansion entry 1110 */
221 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
222 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
223 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
224 "Entry 1110 (Atmel)"},
225 /* ATMEL AT45DB021B (buffered flash) */
226 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
227 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
228 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
229 "Buffered flash (256kB)"},
232 static struct flash_spec flash_5709 = {
233 .flags = BNX2_NV_BUFFERED,
234 .page_bits = BCM5709_FLASH_PAGE_BITS,
235 .page_size = BCM5709_FLASH_PAGE_SIZE,
236 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
237 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
238 .name = "5709 Buffered flash (256kB)",
241 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
243 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
245 u32 diff;
247 smp_mb();
249 /* The ring uses 256 indices for 255 entries, one of them
250 * needs to be skipped.
252 diff = txr->tx_prod - txr->tx_cons;
253 if (unlikely(diff >= TX_DESC_CNT)) {
254 diff &= 0xffff;
255 if (diff == TX_DESC_CNT)
256 diff = MAX_TX_DESC_CNT;
258 return (bp->tx_ring_size - diff);
261 static u32
262 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
264 u32 val;
266 spin_lock_bh(&bp->indirect_lock);
267 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
268 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
269 spin_unlock_bh(&bp->indirect_lock);
270 return val;
273 static void
274 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
276 spin_lock_bh(&bp->indirect_lock);
277 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
278 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
279 spin_unlock_bh(&bp->indirect_lock);
282 static void
283 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
285 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
288 static u32
289 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
291 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
294 static void
295 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
297 offset += cid_addr;
298 spin_lock_bh(&bp->indirect_lock);
299 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
300 int i;
302 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
303 REG_WR(bp, BNX2_CTX_CTX_CTRL,
304 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
305 for (i = 0; i < 5; i++) {
306 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
307 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
308 break;
309 udelay(5);
311 } else {
312 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
313 REG_WR(bp, BNX2_CTX_DATA, val);
315 spin_unlock_bh(&bp->indirect_lock);
318 static int
319 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
321 u32 val1;
322 int i, ret;
324 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
325 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
328 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
331 udelay(40);
334 val1 = (bp->phy_addr << 21) | (reg << 16) |
335 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
336 BNX2_EMAC_MDIO_COMM_START_BUSY;
337 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
339 for (i = 0; i < 50; i++) {
340 udelay(10);
342 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
343 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
344 udelay(5);
346 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
347 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
349 break;
353 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
354 *val = 0x0;
355 ret = -EBUSY;
357 else {
358 *val = val1;
359 ret = 0;
362 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
363 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
364 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
366 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
367 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
369 udelay(40);
372 return ret;
375 static int
376 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
378 u32 val1;
379 int i, ret;
381 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
382 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
383 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
385 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
386 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 udelay(40);
391 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
392 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
393 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
394 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
396 for (i = 0; i < 50; i++) {
397 udelay(10);
399 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
400 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
401 udelay(5);
402 break;
406 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
407 ret = -EBUSY;
408 else
409 ret = 0;
411 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
412 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
413 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
415 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
416 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
418 udelay(40);
421 return ret;
424 static void
425 bnx2_disable_int(struct bnx2 *bp)
427 int i;
428 struct bnx2_napi *bnapi;
430 for (i = 0; i < bp->irq_nvecs; i++) {
431 bnapi = &bp->bnx2_napi[i];
432 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
433 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
435 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
438 static void
439 bnx2_enable_int(struct bnx2 *bp)
441 int i;
442 struct bnx2_napi *bnapi;
444 for (i = 0; i < bp->irq_nvecs; i++) {
445 bnapi = &bp->bnx2_napi[i];
447 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
448 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
449 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
450 bnapi->last_status_idx);
452 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
453 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
454 bnapi->last_status_idx);
456 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
459 static void
460 bnx2_disable_int_sync(struct bnx2 *bp)
462 int i;
464 atomic_inc(&bp->intr_sem);
465 bnx2_disable_int(bp);
466 for (i = 0; i < bp->irq_nvecs; i++)
467 synchronize_irq(bp->irq_tbl[i].vector);
470 static void
471 bnx2_napi_disable(struct bnx2 *bp)
473 int i;
475 for (i = 0; i < bp->irq_nvecs; i++)
476 napi_disable(&bp->bnx2_napi[i].napi);
479 static void
480 bnx2_napi_enable(struct bnx2 *bp)
482 int i;
484 for (i = 0; i < bp->irq_nvecs; i++)
485 napi_enable(&bp->bnx2_napi[i].napi);
488 static void
489 bnx2_netif_stop(struct bnx2 *bp)
491 bnx2_disable_int_sync(bp);
492 if (netif_running(bp->dev)) {
493 bnx2_napi_disable(bp);
494 netif_tx_disable(bp->dev);
495 bp->dev->trans_start = jiffies; /* prevent tx timeout */
499 static void
500 bnx2_netif_start(struct bnx2 *bp)
502 if (atomic_dec_and_test(&bp->intr_sem)) {
503 if (netif_running(bp->dev)) {
504 netif_tx_wake_all_queues(bp->dev);
505 bnx2_napi_enable(bp);
506 bnx2_enable_int(bp);
511 static void
512 bnx2_free_tx_mem(struct bnx2 *bp)
514 int i;
516 for (i = 0; i < bp->num_tx_rings; i++) {
517 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
518 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
520 if (txr->tx_desc_ring) {
521 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
522 txr->tx_desc_ring,
523 txr->tx_desc_mapping);
524 txr->tx_desc_ring = NULL;
526 kfree(txr->tx_buf_ring);
527 txr->tx_buf_ring = NULL;
531 static void
532 bnx2_free_rx_mem(struct bnx2 *bp)
534 int i;
536 for (i = 0; i < bp->num_rx_rings; i++) {
537 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
538 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
539 int j;
541 for (j = 0; j < bp->rx_max_ring; j++) {
542 if (rxr->rx_desc_ring[j])
543 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
544 rxr->rx_desc_ring[j],
545 rxr->rx_desc_mapping[j]);
546 rxr->rx_desc_ring[j] = NULL;
548 if (rxr->rx_buf_ring)
549 vfree(rxr->rx_buf_ring);
550 rxr->rx_buf_ring = NULL;
552 for (j = 0; j < bp->rx_max_pg_ring; j++) {
553 if (rxr->rx_pg_desc_ring[j])
554 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
555 rxr->rx_pg_desc_ring[j],
556 rxr->rx_pg_desc_mapping[j]);
557 rxr->rx_pg_desc_ring[j] = NULL;
559 if (rxr->rx_pg_ring)
560 vfree(rxr->rx_pg_ring);
561 rxr->rx_pg_ring = NULL;
565 static int
566 bnx2_alloc_tx_mem(struct bnx2 *bp)
568 int i;
570 for (i = 0; i < bp->num_tx_rings; i++) {
571 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
572 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
574 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
575 if (txr->tx_buf_ring == NULL)
576 return -ENOMEM;
578 txr->tx_desc_ring =
579 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
580 &txr->tx_desc_mapping);
581 if (txr->tx_desc_ring == NULL)
582 return -ENOMEM;
584 return 0;
587 static int
588 bnx2_alloc_rx_mem(struct bnx2 *bp)
590 int i;
592 for (i = 0; i < bp->num_rx_rings; i++) {
593 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
594 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
595 int j;
597 rxr->rx_buf_ring =
598 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
599 if (rxr->rx_buf_ring == NULL)
600 return -ENOMEM;
602 memset(rxr->rx_buf_ring, 0,
603 SW_RXBD_RING_SIZE * bp->rx_max_ring);
605 for (j = 0; j < bp->rx_max_ring; j++) {
606 rxr->rx_desc_ring[j] =
607 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
608 &rxr->rx_desc_mapping[j]);
609 if (rxr->rx_desc_ring[j] == NULL)
610 return -ENOMEM;
614 if (bp->rx_pg_ring_size) {
615 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
616 bp->rx_max_pg_ring);
617 if (rxr->rx_pg_ring == NULL)
618 return -ENOMEM;
620 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
621 bp->rx_max_pg_ring);
624 for (j = 0; j < bp->rx_max_pg_ring; j++) {
625 rxr->rx_pg_desc_ring[j] =
626 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
627 &rxr->rx_pg_desc_mapping[j]);
628 if (rxr->rx_pg_desc_ring[j] == NULL)
629 return -ENOMEM;
633 return 0;
636 static void
637 bnx2_free_mem(struct bnx2 *bp)
639 int i;
640 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
642 bnx2_free_tx_mem(bp);
643 bnx2_free_rx_mem(bp);
645 for (i = 0; i < bp->ctx_pages; i++) {
646 if (bp->ctx_blk[i]) {
647 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
648 bp->ctx_blk[i],
649 bp->ctx_blk_mapping[i]);
650 bp->ctx_blk[i] = NULL;
653 if (bnapi->status_blk.msi) {
654 pci_free_consistent(bp->pdev, bp->status_stats_size,
655 bnapi->status_blk.msi,
656 bp->status_blk_mapping);
657 bnapi->status_blk.msi = NULL;
658 bp->stats_blk = NULL;
662 static int
663 bnx2_alloc_mem(struct bnx2 *bp)
665 int i, status_blk_size, err;
666 struct bnx2_napi *bnapi;
667 void *status_blk;
669 /* Combine status and statistics blocks into one allocation. */
670 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
671 if (bp->flags & BNX2_FLAG_MSIX_CAP)
672 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
673 BNX2_SBLK_MSIX_ALIGN_SIZE);
674 bp->status_stats_size = status_blk_size +
675 sizeof(struct statistics_block);
677 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
678 &bp->status_blk_mapping);
679 if (status_blk == NULL)
680 goto alloc_mem_err;
682 memset(status_blk, 0, bp->status_stats_size);
684 bnapi = &bp->bnx2_napi[0];
685 bnapi->status_blk.msi = status_blk;
686 bnapi->hw_tx_cons_ptr =
687 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
688 bnapi->hw_rx_cons_ptr =
689 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
690 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
691 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
692 struct status_block_msix *sblk;
694 bnapi = &bp->bnx2_napi[i];
696 sblk = (void *) (status_blk +
697 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
698 bnapi->status_blk.msix = sblk;
699 bnapi->hw_tx_cons_ptr =
700 &sblk->status_tx_quick_consumer_index;
701 bnapi->hw_rx_cons_ptr =
702 &sblk->status_rx_quick_consumer_index;
703 bnapi->int_num = i << 24;
707 bp->stats_blk = status_blk + status_blk_size;
709 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
711 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
712 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
713 if (bp->ctx_pages == 0)
714 bp->ctx_pages = 1;
715 for (i = 0; i < bp->ctx_pages; i++) {
716 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
717 BCM_PAGE_SIZE,
718 &bp->ctx_blk_mapping[i]);
719 if (bp->ctx_blk[i] == NULL)
720 goto alloc_mem_err;
724 err = bnx2_alloc_rx_mem(bp);
725 if (err)
726 goto alloc_mem_err;
728 err = bnx2_alloc_tx_mem(bp);
729 if (err)
730 goto alloc_mem_err;
732 return 0;
734 alloc_mem_err:
735 bnx2_free_mem(bp);
736 return -ENOMEM;
739 static void
740 bnx2_report_fw_link(struct bnx2 *bp)
742 u32 fw_link_status = 0;
744 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
745 return;
747 if (bp->link_up) {
748 u32 bmsr;
750 switch (bp->line_speed) {
751 case SPEED_10:
752 if (bp->duplex == DUPLEX_HALF)
753 fw_link_status = BNX2_LINK_STATUS_10HALF;
754 else
755 fw_link_status = BNX2_LINK_STATUS_10FULL;
756 break;
757 case SPEED_100:
758 if (bp->duplex == DUPLEX_HALF)
759 fw_link_status = BNX2_LINK_STATUS_100HALF;
760 else
761 fw_link_status = BNX2_LINK_STATUS_100FULL;
762 break;
763 case SPEED_1000:
764 if (bp->duplex == DUPLEX_HALF)
765 fw_link_status = BNX2_LINK_STATUS_1000HALF;
766 else
767 fw_link_status = BNX2_LINK_STATUS_1000FULL;
768 break;
769 case SPEED_2500:
770 if (bp->duplex == DUPLEX_HALF)
771 fw_link_status = BNX2_LINK_STATUS_2500HALF;
772 else
773 fw_link_status = BNX2_LINK_STATUS_2500FULL;
774 break;
777 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
779 if (bp->autoneg) {
780 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
782 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
783 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
785 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
786 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
787 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
788 else
789 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
792 else
793 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
795 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
798 static char *
799 bnx2_xceiver_str(struct bnx2 *bp)
801 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
802 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
803 "Copper"));
806 static void
807 bnx2_report_link(struct bnx2 *bp)
809 if (bp->link_up) {
810 netif_carrier_on(bp->dev);
811 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
812 bnx2_xceiver_str(bp));
814 printk("%d Mbps ", bp->line_speed);
816 if (bp->duplex == DUPLEX_FULL)
817 printk("full duplex");
818 else
819 printk("half duplex");
821 if (bp->flow_ctrl) {
822 if (bp->flow_ctrl & FLOW_CTRL_RX) {
823 printk(", receive ");
824 if (bp->flow_ctrl & FLOW_CTRL_TX)
825 printk("& transmit ");
827 else {
828 printk(", transmit ");
830 printk("flow control ON");
832 printk("\n");
834 else {
835 netif_carrier_off(bp->dev);
836 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
837 bnx2_xceiver_str(bp));
840 bnx2_report_fw_link(bp);
843 static void
844 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
846 u32 local_adv, remote_adv;
848 bp->flow_ctrl = 0;
849 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
850 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
852 if (bp->duplex == DUPLEX_FULL) {
853 bp->flow_ctrl = bp->req_flow_ctrl;
855 return;
858 if (bp->duplex != DUPLEX_FULL) {
859 return;
862 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
863 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
864 u32 val;
866 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
867 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
868 bp->flow_ctrl |= FLOW_CTRL_TX;
869 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
870 bp->flow_ctrl |= FLOW_CTRL_RX;
871 return;
874 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
875 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
877 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
878 u32 new_local_adv = 0;
879 u32 new_remote_adv = 0;
881 if (local_adv & ADVERTISE_1000XPAUSE)
882 new_local_adv |= ADVERTISE_PAUSE_CAP;
883 if (local_adv & ADVERTISE_1000XPSE_ASYM)
884 new_local_adv |= ADVERTISE_PAUSE_ASYM;
885 if (remote_adv & ADVERTISE_1000XPAUSE)
886 new_remote_adv |= ADVERTISE_PAUSE_CAP;
887 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
888 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
890 local_adv = new_local_adv;
891 remote_adv = new_remote_adv;
894 /* See Table 28B-3 of 802.3ab-1999 spec. */
895 if (local_adv & ADVERTISE_PAUSE_CAP) {
896 if(local_adv & ADVERTISE_PAUSE_ASYM) {
897 if (remote_adv & ADVERTISE_PAUSE_CAP) {
898 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
900 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
901 bp->flow_ctrl = FLOW_CTRL_RX;
904 else {
905 if (remote_adv & ADVERTISE_PAUSE_CAP) {
906 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
910 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
911 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
912 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
914 bp->flow_ctrl = FLOW_CTRL_TX;
919 static int
920 bnx2_5709s_linkup(struct bnx2 *bp)
922 u32 val, speed;
924 bp->link_up = 1;
926 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
927 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
928 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
930 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
931 bp->line_speed = bp->req_line_speed;
932 bp->duplex = bp->req_duplex;
933 return 0;
935 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
936 switch (speed) {
937 case MII_BNX2_GP_TOP_AN_SPEED_10:
938 bp->line_speed = SPEED_10;
939 break;
940 case MII_BNX2_GP_TOP_AN_SPEED_100:
941 bp->line_speed = SPEED_100;
942 break;
943 case MII_BNX2_GP_TOP_AN_SPEED_1G:
944 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
945 bp->line_speed = SPEED_1000;
946 break;
947 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
948 bp->line_speed = SPEED_2500;
949 break;
951 if (val & MII_BNX2_GP_TOP_AN_FD)
952 bp->duplex = DUPLEX_FULL;
953 else
954 bp->duplex = DUPLEX_HALF;
955 return 0;
958 static int
959 bnx2_5708s_linkup(struct bnx2 *bp)
961 u32 val;
963 bp->link_up = 1;
964 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
965 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
966 case BCM5708S_1000X_STAT1_SPEED_10:
967 bp->line_speed = SPEED_10;
968 break;
969 case BCM5708S_1000X_STAT1_SPEED_100:
970 bp->line_speed = SPEED_100;
971 break;
972 case BCM5708S_1000X_STAT1_SPEED_1G:
973 bp->line_speed = SPEED_1000;
974 break;
975 case BCM5708S_1000X_STAT1_SPEED_2G5:
976 bp->line_speed = SPEED_2500;
977 break;
979 if (val & BCM5708S_1000X_STAT1_FD)
980 bp->duplex = DUPLEX_FULL;
981 else
982 bp->duplex = DUPLEX_HALF;
984 return 0;
987 static int
988 bnx2_5706s_linkup(struct bnx2 *bp)
990 u32 bmcr, local_adv, remote_adv, common;
992 bp->link_up = 1;
993 bp->line_speed = SPEED_1000;
995 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
996 if (bmcr & BMCR_FULLDPLX) {
997 bp->duplex = DUPLEX_FULL;
999 else {
1000 bp->duplex = DUPLEX_HALF;
1003 if (!(bmcr & BMCR_ANENABLE)) {
1004 return 0;
1007 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1008 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1010 common = local_adv & remote_adv;
1011 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1013 if (common & ADVERTISE_1000XFULL) {
1014 bp->duplex = DUPLEX_FULL;
1016 else {
1017 bp->duplex = DUPLEX_HALF;
1021 return 0;
1024 static int
1025 bnx2_copper_linkup(struct bnx2 *bp)
1027 u32 bmcr;
1029 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1030 if (bmcr & BMCR_ANENABLE) {
1031 u32 local_adv, remote_adv, common;
1033 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1034 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1036 common = local_adv & (remote_adv >> 2);
1037 if (common & ADVERTISE_1000FULL) {
1038 bp->line_speed = SPEED_1000;
1039 bp->duplex = DUPLEX_FULL;
1041 else if (common & ADVERTISE_1000HALF) {
1042 bp->line_speed = SPEED_1000;
1043 bp->duplex = DUPLEX_HALF;
1045 else {
1046 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1047 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1049 common = local_adv & remote_adv;
1050 if (common & ADVERTISE_100FULL) {
1051 bp->line_speed = SPEED_100;
1052 bp->duplex = DUPLEX_FULL;
1054 else if (common & ADVERTISE_100HALF) {
1055 bp->line_speed = SPEED_100;
1056 bp->duplex = DUPLEX_HALF;
1058 else if (common & ADVERTISE_10FULL) {
1059 bp->line_speed = SPEED_10;
1060 bp->duplex = DUPLEX_FULL;
1062 else if (common & ADVERTISE_10HALF) {
1063 bp->line_speed = SPEED_10;
1064 bp->duplex = DUPLEX_HALF;
1066 else {
1067 bp->line_speed = 0;
1068 bp->link_up = 0;
1072 else {
1073 if (bmcr & BMCR_SPEED100) {
1074 bp->line_speed = SPEED_100;
1076 else {
1077 bp->line_speed = SPEED_10;
1079 if (bmcr & BMCR_FULLDPLX) {
1080 bp->duplex = DUPLEX_FULL;
1082 else {
1083 bp->duplex = DUPLEX_HALF;
1087 return 0;
1090 static void
1091 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1093 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1095 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1096 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1097 val |= 0x02 << 8;
1099 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1100 u32 lo_water, hi_water;
1102 if (bp->flow_ctrl & FLOW_CTRL_TX)
1103 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1104 else
1105 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1106 if (lo_water >= bp->rx_ring_size)
1107 lo_water = 0;
1109 hi_water = bp->rx_ring_size / 4;
1111 if (hi_water <= lo_water)
1112 lo_water = 0;
1114 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1115 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1117 if (hi_water > 0xf)
1118 hi_water = 0xf;
1119 else if (hi_water == 0)
1120 lo_water = 0;
1121 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1123 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1126 static void
1127 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1129 int i;
1130 u32 cid;
1132 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1133 if (i == 1)
1134 cid = RX_RSS_CID;
1135 bnx2_init_rx_context(bp, cid);
1139 static void
1140 bnx2_set_mac_link(struct bnx2 *bp)
1142 u32 val;
1144 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1145 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1146 (bp->duplex == DUPLEX_HALF)) {
1147 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1150 /* Configure the EMAC mode register. */
1151 val = REG_RD(bp, BNX2_EMAC_MODE);
1153 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1154 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1155 BNX2_EMAC_MODE_25G_MODE);
1157 if (bp->link_up) {
1158 switch (bp->line_speed) {
1159 case SPEED_10:
1160 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1161 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1162 break;
1164 /* fall through */
1165 case SPEED_100:
1166 val |= BNX2_EMAC_MODE_PORT_MII;
1167 break;
1168 case SPEED_2500:
1169 val |= BNX2_EMAC_MODE_25G_MODE;
1170 /* fall through */
1171 case SPEED_1000:
1172 val |= BNX2_EMAC_MODE_PORT_GMII;
1173 break;
1176 else {
1177 val |= BNX2_EMAC_MODE_PORT_GMII;
1180 /* Set the MAC to operate in the appropriate duplex mode. */
1181 if (bp->duplex == DUPLEX_HALF)
1182 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1183 REG_WR(bp, BNX2_EMAC_MODE, val);
1185 /* Enable/disable rx PAUSE. */
1186 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1188 if (bp->flow_ctrl & FLOW_CTRL_RX)
1189 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1190 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1192 /* Enable/disable tx PAUSE. */
1193 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1194 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1196 if (bp->flow_ctrl & FLOW_CTRL_TX)
1197 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1198 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1200 /* Acknowledge the interrupt. */
1201 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1203 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1204 bnx2_init_all_rx_contexts(bp);
1207 static void
1208 bnx2_enable_bmsr1(struct bnx2 *bp)
1210 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1211 (CHIP_NUM(bp) == CHIP_NUM_5709))
1212 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1213 MII_BNX2_BLK_ADDR_GP_STATUS);
1216 static void
1217 bnx2_disable_bmsr1(struct bnx2 *bp)
1219 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1220 (CHIP_NUM(bp) == CHIP_NUM_5709))
1221 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1222 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1225 static int
1226 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1228 u32 up1;
1229 int ret = 1;
1231 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1232 return 0;
1234 if (bp->autoneg & AUTONEG_SPEED)
1235 bp->advertising |= ADVERTISED_2500baseX_Full;
1237 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1238 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1240 bnx2_read_phy(bp, bp->mii_up1, &up1);
1241 if (!(up1 & BCM5708S_UP1_2G5)) {
1242 up1 |= BCM5708S_UP1_2G5;
1243 bnx2_write_phy(bp, bp->mii_up1, up1);
1244 ret = 0;
1247 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1248 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1249 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1251 return ret;
1254 static int
1255 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1257 u32 up1;
1258 int ret = 0;
1260 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1261 return 0;
1263 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1264 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1266 bnx2_read_phy(bp, bp->mii_up1, &up1);
1267 if (up1 & BCM5708S_UP1_2G5) {
1268 up1 &= ~BCM5708S_UP1_2G5;
1269 bnx2_write_phy(bp, bp->mii_up1, up1);
1270 ret = 1;
1273 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1274 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1275 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1277 return ret;
1280 static void
1281 bnx2_enable_forced_2g5(struct bnx2 *bp)
1283 u32 bmcr;
1285 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1286 return;
1288 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1289 u32 val;
1291 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1292 MII_BNX2_BLK_ADDR_SERDES_DIG);
1293 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1294 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1295 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1296 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1298 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1299 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1300 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1302 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1303 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1304 bmcr |= BCM5708S_BMCR_FORCE_2500;
1307 if (bp->autoneg & AUTONEG_SPEED) {
1308 bmcr &= ~BMCR_ANENABLE;
1309 if (bp->req_duplex == DUPLEX_FULL)
1310 bmcr |= BMCR_FULLDPLX;
1312 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1315 static void
1316 bnx2_disable_forced_2g5(struct bnx2 *bp)
1318 u32 bmcr;
1320 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1321 return;
1323 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1324 u32 val;
1326 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1327 MII_BNX2_BLK_ADDR_SERDES_DIG);
1328 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1329 val &= ~MII_BNX2_SD_MISC1_FORCE;
1330 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1332 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1333 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1334 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1336 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1337 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1338 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1341 if (bp->autoneg & AUTONEG_SPEED)
1342 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1343 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1346 static void
1347 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1349 u32 val;
1351 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1352 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1353 if (start)
1354 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1355 else
1356 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1359 static int
1360 bnx2_set_link(struct bnx2 *bp)
1362 u32 bmsr;
1363 u8 link_up;
1365 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1366 bp->link_up = 1;
1367 return 0;
1370 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1371 return 0;
1373 link_up = bp->link_up;
1375 bnx2_enable_bmsr1(bp);
1376 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1377 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1378 bnx2_disable_bmsr1(bp);
1380 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1381 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1382 u32 val, an_dbg;
1384 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1385 bnx2_5706s_force_link_dn(bp, 0);
1386 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1388 val = REG_RD(bp, BNX2_EMAC_STATUS);
1390 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1391 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1392 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1394 if ((val & BNX2_EMAC_STATUS_LINK) &&
1395 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1396 bmsr |= BMSR_LSTATUS;
1397 else
1398 bmsr &= ~BMSR_LSTATUS;
1401 if (bmsr & BMSR_LSTATUS) {
1402 bp->link_up = 1;
1404 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1405 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1406 bnx2_5706s_linkup(bp);
1407 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1408 bnx2_5708s_linkup(bp);
1409 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1410 bnx2_5709s_linkup(bp);
1412 else {
1413 bnx2_copper_linkup(bp);
1415 bnx2_resolve_flow_ctrl(bp);
1417 else {
1418 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1419 (bp->autoneg & AUTONEG_SPEED))
1420 bnx2_disable_forced_2g5(bp);
1422 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1423 u32 bmcr;
1425 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1426 bmcr |= BMCR_ANENABLE;
1427 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1429 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1431 bp->link_up = 0;
1434 if (bp->link_up != link_up) {
1435 bnx2_report_link(bp);
1438 bnx2_set_mac_link(bp);
1440 return 0;
1443 static int
1444 bnx2_reset_phy(struct bnx2 *bp)
1446 int i;
1447 u32 reg;
1449 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1451 #define PHY_RESET_MAX_WAIT 100
1452 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1453 udelay(10);
1455 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1456 if (!(reg & BMCR_RESET)) {
1457 udelay(20);
1458 break;
1461 if (i == PHY_RESET_MAX_WAIT) {
1462 return -EBUSY;
1464 return 0;
1467 static u32
1468 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1470 u32 adv = 0;
1472 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1473 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1475 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1476 adv = ADVERTISE_1000XPAUSE;
1478 else {
1479 adv = ADVERTISE_PAUSE_CAP;
1482 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1483 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1484 adv = ADVERTISE_1000XPSE_ASYM;
1486 else {
1487 adv = ADVERTISE_PAUSE_ASYM;
1490 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1491 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1492 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1494 else {
1495 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1498 return adv;
1501 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1503 static int
1504 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1505 __releases(&bp->phy_lock)
1506 __acquires(&bp->phy_lock)
1508 u32 speed_arg = 0, pause_adv;
1510 pause_adv = bnx2_phy_get_pause_adv(bp);
1512 if (bp->autoneg & AUTONEG_SPEED) {
1513 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1514 if (bp->advertising & ADVERTISED_10baseT_Half)
1515 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1516 if (bp->advertising & ADVERTISED_10baseT_Full)
1517 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1518 if (bp->advertising & ADVERTISED_100baseT_Half)
1519 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1520 if (bp->advertising & ADVERTISED_100baseT_Full)
1521 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1522 if (bp->advertising & ADVERTISED_1000baseT_Full)
1523 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1524 if (bp->advertising & ADVERTISED_2500baseX_Full)
1525 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1526 } else {
1527 if (bp->req_line_speed == SPEED_2500)
1528 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1529 else if (bp->req_line_speed == SPEED_1000)
1530 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1531 else if (bp->req_line_speed == SPEED_100) {
1532 if (bp->req_duplex == DUPLEX_FULL)
1533 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1534 else
1535 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1536 } else if (bp->req_line_speed == SPEED_10) {
1537 if (bp->req_duplex == DUPLEX_FULL)
1538 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1539 else
1540 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1544 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1545 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1546 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1547 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1549 if (port == PORT_TP)
1550 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1551 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1553 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1555 spin_unlock_bh(&bp->phy_lock);
1556 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1557 spin_lock_bh(&bp->phy_lock);
1559 return 0;
1562 static int
1563 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1564 __releases(&bp->phy_lock)
1565 __acquires(&bp->phy_lock)
1567 u32 adv, bmcr;
1568 u32 new_adv = 0;
1570 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1571 return (bnx2_setup_remote_phy(bp, port));
1573 if (!(bp->autoneg & AUTONEG_SPEED)) {
1574 u32 new_bmcr;
1575 int force_link_down = 0;
1577 if (bp->req_line_speed == SPEED_2500) {
1578 if (!bnx2_test_and_enable_2g5(bp))
1579 force_link_down = 1;
1580 } else if (bp->req_line_speed == SPEED_1000) {
1581 if (bnx2_test_and_disable_2g5(bp))
1582 force_link_down = 1;
1584 bnx2_read_phy(bp, bp->mii_adv, &adv);
1585 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1587 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1588 new_bmcr = bmcr & ~BMCR_ANENABLE;
1589 new_bmcr |= BMCR_SPEED1000;
1591 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1592 if (bp->req_line_speed == SPEED_2500)
1593 bnx2_enable_forced_2g5(bp);
1594 else if (bp->req_line_speed == SPEED_1000) {
1595 bnx2_disable_forced_2g5(bp);
1596 new_bmcr &= ~0x2000;
1599 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1600 if (bp->req_line_speed == SPEED_2500)
1601 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1602 else
1603 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1606 if (bp->req_duplex == DUPLEX_FULL) {
1607 adv |= ADVERTISE_1000XFULL;
1608 new_bmcr |= BMCR_FULLDPLX;
1610 else {
1611 adv |= ADVERTISE_1000XHALF;
1612 new_bmcr &= ~BMCR_FULLDPLX;
1614 if ((new_bmcr != bmcr) || (force_link_down)) {
1615 /* Force a link down visible on the other side */
1616 if (bp->link_up) {
1617 bnx2_write_phy(bp, bp->mii_adv, adv &
1618 ~(ADVERTISE_1000XFULL |
1619 ADVERTISE_1000XHALF));
1620 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1621 BMCR_ANRESTART | BMCR_ANENABLE);
1623 bp->link_up = 0;
1624 netif_carrier_off(bp->dev);
1625 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1626 bnx2_report_link(bp);
1628 bnx2_write_phy(bp, bp->mii_adv, adv);
1629 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1630 } else {
1631 bnx2_resolve_flow_ctrl(bp);
1632 bnx2_set_mac_link(bp);
1634 return 0;
1637 bnx2_test_and_enable_2g5(bp);
1639 if (bp->advertising & ADVERTISED_1000baseT_Full)
1640 new_adv |= ADVERTISE_1000XFULL;
1642 new_adv |= bnx2_phy_get_pause_adv(bp);
1644 bnx2_read_phy(bp, bp->mii_adv, &adv);
1645 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1647 bp->serdes_an_pending = 0;
1648 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1649 /* Force a link down visible on the other side */
1650 if (bp->link_up) {
1651 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1652 spin_unlock_bh(&bp->phy_lock);
1653 msleep(20);
1654 spin_lock_bh(&bp->phy_lock);
1657 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1658 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1659 BMCR_ANENABLE);
1660 /* Speed up link-up time when the link partner
1661 * does not autonegotiate which is very common
1662 * in blade servers. Some blade servers use
1663 * IPMI for kerboard input and it's important
1664 * to minimize link disruptions. Autoneg. involves
1665 * exchanging base pages plus 3 next pages and
1666 * normally completes in about 120 msec.
1668 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1669 bp->serdes_an_pending = 1;
1670 mod_timer(&bp->timer, jiffies + bp->current_interval);
1671 } else {
1672 bnx2_resolve_flow_ctrl(bp);
1673 bnx2_set_mac_link(bp);
1676 return 0;
1679 #define ETHTOOL_ALL_FIBRE_SPEED \
1680 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1681 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1682 (ADVERTISED_1000baseT_Full)
1684 #define ETHTOOL_ALL_COPPER_SPEED \
1685 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1686 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1687 ADVERTISED_1000baseT_Full)
1689 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1690 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1692 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1694 static void
1695 bnx2_set_default_remote_link(struct bnx2 *bp)
1697 u32 link;
1699 if (bp->phy_port == PORT_TP)
1700 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1701 else
1702 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1704 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1705 bp->req_line_speed = 0;
1706 bp->autoneg |= AUTONEG_SPEED;
1707 bp->advertising = ADVERTISED_Autoneg;
1708 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1709 bp->advertising |= ADVERTISED_10baseT_Half;
1710 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1711 bp->advertising |= ADVERTISED_10baseT_Full;
1712 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1713 bp->advertising |= ADVERTISED_100baseT_Half;
1714 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1715 bp->advertising |= ADVERTISED_100baseT_Full;
1716 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1717 bp->advertising |= ADVERTISED_1000baseT_Full;
1718 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1719 bp->advertising |= ADVERTISED_2500baseX_Full;
1720 } else {
1721 bp->autoneg = 0;
1722 bp->advertising = 0;
1723 bp->req_duplex = DUPLEX_FULL;
1724 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1725 bp->req_line_speed = SPEED_10;
1726 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1727 bp->req_duplex = DUPLEX_HALF;
1729 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1730 bp->req_line_speed = SPEED_100;
1731 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1732 bp->req_duplex = DUPLEX_HALF;
1734 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1735 bp->req_line_speed = SPEED_1000;
1736 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1737 bp->req_line_speed = SPEED_2500;
1741 static void
1742 bnx2_set_default_link(struct bnx2 *bp)
1744 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1745 bnx2_set_default_remote_link(bp);
1746 return;
1749 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1750 bp->req_line_speed = 0;
1751 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1752 u32 reg;
1754 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1756 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1757 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1758 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1759 bp->autoneg = 0;
1760 bp->req_line_speed = bp->line_speed = SPEED_1000;
1761 bp->req_duplex = DUPLEX_FULL;
1763 } else
1764 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1767 static void
1768 bnx2_send_heart_beat(struct bnx2 *bp)
1770 u32 msg;
1771 u32 addr;
1773 spin_lock(&bp->indirect_lock);
1774 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1775 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1776 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1777 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1778 spin_unlock(&bp->indirect_lock);
1781 static void
1782 bnx2_remote_phy_event(struct bnx2 *bp)
1784 u32 msg;
1785 u8 link_up = bp->link_up;
1786 u8 old_port;
1788 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1790 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1791 bnx2_send_heart_beat(bp);
1793 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1795 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1796 bp->link_up = 0;
1797 else {
1798 u32 speed;
1800 bp->link_up = 1;
1801 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1802 bp->duplex = DUPLEX_FULL;
1803 switch (speed) {
1804 case BNX2_LINK_STATUS_10HALF:
1805 bp->duplex = DUPLEX_HALF;
1806 case BNX2_LINK_STATUS_10FULL:
1807 bp->line_speed = SPEED_10;
1808 break;
1809 case BNX2_LINK_STATUS_100HALF:
1810 bp->duplex = DUPLEX_HALF;
1811 case BNX2_LINK_STATUS_100BASE_T4:
1812 case BNX2_LINK_STATUS_100FULL:
1813 bp->line_speed = SPEED_100;
1814 break;
1815 case BNX2_LINK_STATUS_1000HALF:
1816 bp->duplex = DUPLEX_HALF;
1817 case BNX2_LINK_STATUS_1000FULL:
1818 bp->line_speed = SPEED_1000;
1819 break;
1820 case BNX2_LINK_STATUS_2500HALF:
1821 bp->duplex = DUPLEX_HALF;
1822 case BNX2_LINK_STATUS_2500FULL:
1823 bp->line_speed = SPEED_2500;
1824 break;
1825 default:
1826 bp->line_speed = 0;
1827 break;
1830 bp->flow_ctrl = 0;
1831 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1832 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1833 if (bp->duplex == DUPLEX_FULL)
1834 bp->flow_ctrl = bp->req_flow_ctrl;
1835 } else {
1836 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1837 bp->flow_ctrl |= FLOW_CTRL_TX;
1838 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1839 bp->flow_ctrl |= FLOW_CTRL_RX;
1842 old_port = bp->phy_port;
1843 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1844 bp->phy_port = PORT_FIBRE;
1845 else
1846 bp->phy_port = PORT_TP;
1848 if (old_port != bp->phy_port)
1849 bnx2_set_default_link(bp);
1852 if (bp->link_up != link_up)
1853 bnx2_report_link(bp);
1855 bnx2_set_mac_link(bp);
1858 static int
1859 bnx2_set_remote_link(struct bnx2 *bp)
1861 u32 evt_code;
1863 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1864 switch (evt_code) {
1865 case BNX2_FW_EVT_CODE_LINK_EVENT:
1866 bnx2_remote_phy_event(bp);
1867 break;
1868 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1869 default:
1870 bnx2_send_heart_beat(bp);
1871 break;
1873 return 0;
1876 static int
1877 bnx2_setup_copper_phy(struct bnx2 *bp)
1878 __releases(&bp->phy_lock)
1879 __acquires(&bp->phy_lock)
1881 u32 bmcr;
1882 u32 new_bmcr;
1884 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1886 if (bp->autoneg & AUTONEG_SPEED) {
1887 u32 adv_reg, adv1000_reg;
1888 u32 new_adv_reg = 0;
1889 u32 new_adv1000_reg = 0;
1891 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1892 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1893 ADVERTISE_PAUSE_ASYM);
1895 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1896 adv1000_reg &= PHY_ALL_1000_SPEED;
1898 if (bp->advertising & ADVERTISED_10baseT_Half)
1899 new_adv_reg |= ADVERTISE_10HALF;
1900 if (bp->advertising & ADVERTISED_10baseT_Full)
1901 new_adv_reg |= ADVERTISE_10FULL;
1902 if (bp->advertising & ADVERTISED_100baseT_Half)
1903 new_adv_reg |= ADVERTISE_100HALF;
1904 if (bp->advertising & ADVERTISED_100baseT_Full)
1905 new_adv_reg |= ADVERTISE_100FULL;
1906 if (bp->advertising & ADVERTISED_1000baseT_Full)
1907 new_adv1000_reg |= ADVERTISE_1000FULL;
1909 new_adv_reg |= ADVERTISE_CSMA;
1911 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1913 if ((adv1000_reg != new_adv1000_reg) ||
1914 (adv_reg != new_adv_reg) ||
1915 ((bmcr & BMCR_ANENABLE) == 0)) {
1917 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1918 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1919 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1920 BMCR_ANENABLE);
1922 else if (bp->link_up) {
1923 /* Flow ctrl may have changed from auto to forced */
1924 /* or vice-versa. */
1926 bnx2_resolve_flow_ctrl(bp);
1927 bnx2_set_mac_link(bp);
1929 return 0;
1932 new_bmcr = 0;
1933 if (bp->req_line_speed == SPEED_100) {
1934 new_bmcr |= BMCR_SPEED100;
1936 if (bp->req_duplex == DUPLEX_FULL) {
1937 new_bmcr |= BMCR_FULLDPLX;
1939 if (new_bmcr != bmcr) {
1940 u32 bmsr;
1942 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1943 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1945 if (bmsr & BMSR_LSTATUS) {
1946 /* Force link down */
1947 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1948 spin_unlock_bh(&bp->phy_lock);
1949 msleep(50);
1950 spin_lock_bh(&bp->phy_lock);
1952 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1953 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1956 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1958 /* Normally, the new speed is setup after the link has
1959 * gone down and up again. In some cases, link will not go
1960 * down so we need to set up the new speed here.
1962 if (bmsr & BMSR_LSTATUS) {
1963 bp->line_speed = bp->req_line_speed;
1964 bp->duplex = bp->req_duplex;
1965 bnx2_resolve_flow_ctrl(bp);
1966 bnx2_set_mac_link(bp);
1968 } else {
1969 bnx2_resolve_flow_ctrl(bp);
1970 bnx2_set_mac_link(bp);
1972 return 0;
1975 static int
1976 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1977 __releases(&bp->phy_lock)
1978 __acquires(&bp->phy_lock)
1980 if (bp->loopback == MAC_LOOPBACK)
1981 return 0;
1983 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1984 return (bnx2_setup_serdes_phy(bp, port));
1986 else {
1987 return (bnx2_setup_copper_phy(bp));
1991 static int
1992 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1994 u32 val;
1996 bp->mii_bmcr = MII_BMCR + 0x10;
1997 bp->mii_bmsr = MII_BMSR + 0x10;
1998 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1999 bp->mii_adv = MII_ADVERTISE + 0x10;
2000 bp->mii_lpa = MII_LPA + 0x10;
2001 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2003 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2004 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2006 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2007 if (reset_phy)
2008 bnx2_reset_phy(bp);
2010 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2012 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2013 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2014 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2015 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2017 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2018 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2019 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2020 val |= BCM5708S_UP1_2G5;
2021 else
2022 val &= ~BCM5708S_UP1_2G5;
2023 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2025 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2026 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2027 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2028 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2030 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2032 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2033 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2034 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2036 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2038 return 0;
2041 static int
2042 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2044 u32 val;
2046 if (reset_phy)
2047 bnx2_reset_phy(bp);
2049 bp->mii_up1 = BCM5708S_UP1;
2051 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2052 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2053 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2055 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2056 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2057 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2059 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2060 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2061 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2063 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2064 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2065 val |= BCM5708S_UP1_2G5;
2066 bnx2_write_phy(bp, BCM5708S_UP1, val);
2069 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2070 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2071 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2072 /* increase tx signal amplitude */
2073 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2074 BCM5708S_BLK_ADDR_TX_MISC);
2075 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2076 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2077 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2078 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2081 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2082 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2084 if (val) {
2085 u32 is_backplane;
2087 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2088 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2089 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2090 BCM5708S_BLK_ADDR_TX_MISC);
2091 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2092 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2093 BCM5708S_BLK_ADDR_DIG);
2096 return 0;
2099 static int
2100 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2102 if (reset_phy)
2103 bnx2_reset_phy(bp);
2105 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2107 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2108 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2110 if (bp->dev->mtu > 1500) {
2111 u32 val;
2113 /* Set extended packet length bit */
2114 bnx2_write_phy(bp, 0x18, 0x7);
2115 bnx2_read_phy(bp, 0x18, &val);
2116 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2118 bnx2_write_phy(bp, 0x1c, 0x6c00);
2119 bnx2_read_phy(bp, 0x1c, &val);
2120 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2122 else {
2123 u32 val;
2125 bnx2_write_phy(bp, 0x18, 0x7);
2126 bnx2_read_phy(bp, 0x18, &val);
2127 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2129 bnx2_write_phy(bp, 0x1c, 0x6c00);
2130 bnx2_read_phy(bp, 0x1c, &val);
2131 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2134 return 0;
2137 static int
2138 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2140 u32 val;
2142 if (reset_phy)
2143 bnx2_reset_phy(bp);
2145 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2146 bnx2_write_phy(bp, 0x18, 0x0c00);
2147 bnx2_write_phy(bp, 0x17, 0x000a);
2148 bnx2_write_phy(bp, 0x15, 0x310b);
2149 bnx2_write_phy(bp, 0x17, 0x201f);
2150 bnx2_write_phy(bp, 0x15, 0x9506);
2151 bnx2_write_phy(bp, 0x17, 0x401f);
2152 bnx2_write_phy(bp, 0x15, 0x14e2);
2153 bnx2_write_phy(bp, 0x18, 0x0400);
2156 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2157 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2158 MII_BNX2_DSP_EXPAND_REG | 0x8);
2159 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2160 val &= ~(1 << 8);
2161 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2164 if (bp->dev->mtu > 1500) {
2165 /* Set extended packet length bit */
2166 bnx2_write_phy(bp, 0x18, 0x7);
2167 bnx2_read_phy(bp, 0x18, &val);
2168 bnx2_write_phy(bp, 0x18, val | 0x4000);
2170 bnx2_read_phy(bp, 0x10, &val);
2171 bnx2_write_phy(bp, 0x10, val | 0x1);
2173 else {
2174 bnx2_write_phy(bp, 0x18, 0x7);
2175 bnx2_read_phy(bp, 0x18, &val);
2176 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2178 bnx2_read_phy(bp, 0x10, &val);
2179 bnx2_write_phy(bp, 0x10, val & ~0x1);
2182 /* ethernet@wirespeed */
2183 bnx2_write_phy(bp, 0x18, 0x7007);
2184 bnx2_read_phy(bp, 0x18, &val);
2185 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2186 return 0;
2190 static int
2191 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2192 __releases(&bp->phy_lock)
2193 __acquires(&bp->phy_lock)
2195 u32 val;
2196 int rc = 0;
2198 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2199 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2201 bp->mii_bmcr = MII_BMCR;
2202 bp->mii_bmsr = MII_BMSR;
2203 bp->mii_bmsr1 = MII_BMSR;
2204 bp->mii_adv = MII_ADVERTISE;
2205 bp->mii_lpa = MII_LPA;
2207 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2209 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2210 goto setup_phy;
2212 bnx2_read_phy(bp, MII_PHYSID1, &val);
2213 bp->phy_id = val << 16;
2214 bnx2_read_phy(bp, MII_PHYSID2, &val);
2215 bp->phy_id |= val & 0xffff;
2217 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2218 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2219 rc = bnx2_init_5706s_phy(bp, reset_phy);
2220 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2221 rc = bnx2_init_5708s_phy(bp, reset_phy);
2222 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2223 rc = bnx2_init_5709s_phy(bp, reset_phy);
2225 else {
2226 rc = bnx2_init_copper_phy(bp, reset_phy);
2229 setup_phy:
2230 if (!rc)
2231 rc = bnx2_setup_phy(bp, bp->phy_port);
2233 return rc;
2236 static int
2237 bnx2_set_mac_loopback(struct bnx2 *bp)
2239 u32 mac_mode;
2241 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2242 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2243 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2244 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2245 bp->link_up = 1;
2246 return 0;
2249 static int bnx2_test_link(struct bnx2 *);
2251 static int
2252 bnx2_set_phy_loopback(struct bnx2 *bp)
2254 u32 mac_mode;
2255 int rc, i;
2257 spin_lock_bh(&bp->phy_lock);
2258 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2259 BMCR_SPEED1000);
2260 spin_unlock_bh(&bp->phy_lock);
2261 if (rc)
2262 return rc;
2264 for (i = 0; i < 10; i++) {
2265 if (bnx2_test_link(bp) == 0)
2266 break;
2267 msleep(100);
2270 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2271 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2272 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2273 BNX2_EMAC_MODE_25G_MODE);
2275 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2276 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2277 bp->link_up = 1;
2278 return 0;
2281 static int
2282 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2284 int i;
2285 u32 val;
2287 bp->fw_wr_seq++;
2288 msg_data |= bp->fw_wr_seq;
2290 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2292 if (!ack)
2293 return 0;
2295 /* wait for an acknowledgement. */
2296 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2297 msleep(10);
2299 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2301 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2302 break;
2304 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2305 return 0;
2307 /* If we timed out, inform the firmware that this is the case. */
2308 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2309 if (!silent)
2310 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2311 "%x\n", msg_data);
2313 msg_data &= ~BNX2_DRV_MSG_CODE;
2314 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2316 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2318 return -EBUSY;
2321 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2322 return -EIO;
2324 return 0;
2327 static int
2328 bnx2_init_5709_context(struct bnx2 *bp)
2330 int i, ret = 0;
2331 u32 val;
2333 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2334 val |= (BCM_PAGE_BITS - 8) << 16;
2335 REG_WR(bp, BNX2_CTX_COMMAND, val);
2336 for (i = 0; i < 10; i++) {
2337 val = REG_RD(bp, BNX2_CTX_COMMAND);
2338 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2339 break;
2340 udelay(2);
2342 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2343 return -EBUSY;
2345 for (i = 0; i < bp->ctx_pages; i++) {
2346 int j;
2348 if (bp->ctx_blk[i])
2349 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2350 else
2351 return -ENOMEM;
2353 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2354 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2355 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2356 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2357 (u64) bp->ctx_blk_mapping[i] >> 32);
2358 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2359 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2360 for (j = 0; j < 10; j++) {
2362 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2363 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2364 break;
2365 udelay(5);
2367 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2368 ret = -EBUSY;
2369 break;
2372 return ret;
2375 static void
2376 bnx2_init_context(struct bnx2 *bp)
2378 u32 vcid;
2380 vcid = 96;
2381 while (vcid) {
2382 u32 vcid_addr, pcid_addr, offset;
2383 int i;
2385 vcid--;
2387 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2388 u32 new_vcid;
2390 vcid_addr = GET_PCID_ADDR(vcid);
2391 if (vcid & 0x8) {
2392 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2394 else {
2395 new_vcid = vcid;
2397 pcid_addr = GET_PCID_ADDR(new_vcid);
2399 else {
2400 vcid_addr = GET_CID_ADDR(vcid);
2401 pcid_addr = vcid_addr;
2404 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2405 vcid_addr += (i << PHY_CTX_SHIFT);
2406 pcid_addr += (i << PHY_CTX_SHIFT);
2408 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2409 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2411 /* Zero out the context. */
2412 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2413 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2418 static int
2419 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2421 u16 *good_mbuf;
2422 u32 good_mbuf_cnt;
2423 u32 val;
2425 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2426 if (good_mbuf == NULL) {
2427 printk(KERN_ERR PFX "Failed to allocate memory in "
2428 "bnx2_alloc_bad_rbuf\n");
2429 return -ENOMEM;
2432 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2433 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2435 good_mbuf_cnt = 0;
2437 /* Allocate a bunch of mbufs and save the good ones in an array. */
2438 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2439 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2440 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2441 BNX2_RBUF_COMMAND_ALLOC_REQ);
2443 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2445 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2447 /* The addresses with Bit 9 set are bad memory blocks. */
2448 if (!(val & (1 << 9))) {
2449 good_mbuf[good_mbuf_cnt] = (u16) val;
2450 good_mbuf_cnt++;
2453 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2456 /* Free the good ones back to the mbuf pool thus discarding
2457 * all the bad ones. */
2458 while (good_mbuf_cnt) {
2459 good_mbuf_cnt--;
2461 val = good_mbuf[good_mbuf_cnt];
2462 val = (val << 9) | val | 1;
2464 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2466 kfree(good_mbuf);
2467 return 0;
2470 static void
2471 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2473 u32 val;
2475 val = (mac_addr[0] << 8) | mac_addr[1];
2477 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2479 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2480 (mac_addr[4] << 8) | mac_addr[5];
2482 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2485 static inline int
2486 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2488 dma_addr_t mapping;
2489 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2490 struct rx_bd *rxbd =
2491 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2492 struct page *page = alloc_page(GFP_ATOMIC);
2494 if (!page)
2495 return -ENOMEM;
2496 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2497 PCI_DMA_FROMDEVICE);
2498 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2499 __free_page(page);
2500 return -EIO;
2503 rx_pg->page = page;
2504 pci_unmap_addr_set(rx_pg, mapping, mapping);
2505 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2506 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2507 return 0;
2510 static void
2511 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2513 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2514 struct page *page = rx_pg->page;
2516 if (!page)
2517 return;
2519 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2520 PCI_DMA_FROMDEVICE);
2522 __free_page(page);
2523 rx_pg->page = NULL;
2526 static inline int
2527 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2529 struct sk_buff *skb;
2530 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2531 dma_addr_t mapping;
2532 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2533 unsigned long align;
2535 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2536 if (skb == NULL) {
2537 return -ENOMEM;
2540 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2541 skb_reserve(skb, BNX2_RX_ALIGN - align);
2543 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2544 PCI_DMA_FROMDEVICE);
2545 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2546 dev_kfree_skb(skb);
2547 return -EIO;
2550 rx_buf->skb = skb;
2551 pci_unmap_addr_set(rx_buf, mapping, mapping);
2553 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2554 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2556 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2558 return 0;
2561 static int
2562 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2564 struct status_block *sblk = bnapi->status_blk.msi;
2565 u32 new_link_state, old_link_state;
2566 int is_set = 1;
2568 new_link_state = sblk->status_attn_bits & event;
2569 old_link_state = sblk->status_attn_bits_ack & event;
2570 if (new_link_state != old_link_state) {
2571 if (new_link_state)
2572 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2573 else
2574 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2575 } else
2576 is_set = 0;
2578 return is_set;
2581 static void
2582 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2584 spin_lock(&bp->phy_lock);
2586 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2587 bnx2_set_link(bp);
2588 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2589 bnx2_set_remote_link(bp);
2591 spin_unlock(&bp->phy_lock);
2595 static inline u16
2596 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2598 u16 cons;
2600 /* Tell compiler that status block fields can change. */
2601 barrier();
2602 cons = *bnapi->hw_tx_cons_ptr;
2603 barrier();
2604 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2605 cons++;
2606 return cons;
2609 static int
2610 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2612 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2613 u16 hw_cons, sw_cons, sw_ring_cons;
2614 int tx_pkt = 0, index;
2615 struct netdev_queue *txq;
2617 index = (bnapi - bp->bnx2_napi);
2618 txq = netdev_get_tx_queue(bp->dev, index);
2620 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2621 sw_cons = txr->tx_cons;
2623 while (sw_cons != hw_cons) {
2624 struct sw_tx_bd *tx_buf;
2625 struct sk_buff *skb;
2626 int i, last;
2628 sw_ring_cons = TX_RING_IDX(sw_cons);
2630 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2631 skb = tx_buf->skb;
2633 /* partial BD completions possible with TSO packets */
2634 if (skb_is_gso(skb)) {
2635 u16 last_idx, last_ring_idx;
2637 last_idx = sw_cons +
2638 skb_shinfo(skb)->nr_frags + 1;
2639 last_ring_idx = sw_ring_cons +
2640 skb_shinfo(skb)->nr_frags + 1;
2641 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2642 last_idx++;
2644 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2645 break;
2649 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
2651 tx_buf->skb = NULL;
2652 last = skb_shinfo(skb)->nr_frags;
2654 for (i = 0; i < last; i++) {
2655 sw_cons = NEXT_TX_BD(sw_cons);
2658 sw_cons = NEXT_TX_BD(sw_cons);
2660 dev_kfree_skb(skb);
2661 tx_pkt++;
2662 if (tx_pkt == budget)
2663 break;
2665 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2668 txr->hw_tx_cons = hw_cons;
2669 txr->tx_cons = sw_cons;
2671 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2672 * before checking for netif_tx_queue_stopped(). Without the
2673 * memory barrier, there is a small possibility that bnx2_start_xmit()
2674 * will miss it and cause the queue to be stopped forever.
2676 smp_mb();
2678 if (unlikely(netif_tx_queue_stopped(txq)) &&
2679 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2680 __netif_tx_lock(txq, smp_processor_id());
2681 if ((netif_tx_queue_stopped(txq)) &&
2682 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2683 netif_tx_wake_queue(txq);
2684 __netif_tx_unlock(txq);
2687 return tx_pkt;
2690 static void
2691 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2692 struct sk_buff *skb, int count)
2694 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2695 struct rx_bd *cons_bd, *prod_bd;
2696 int i;
2697 u16 hw_prod, prod;
2698 u16 cons = rxr->rx_pg_cons;
2700 cons_rx_pg = &rxr->rx_pg_ring[cons];
2702 /* The caller was unable to allocate a new page to replace the
2703 * last one in the frags array, so we need to recycle that page
2704 * and then free the skb.
2706 if (skb) {
2707 struct page *page;
2708 struct skb_shared_info *shinfo;
2710 shinfo = skb_shinfo(skb);
2711 shinfo->nr_frags--;
2712 page = shinfo->frags[shinfo->nr_frags].page;
2713 shinfo->frags[shinfo->nr_frags].page = NULL;
2715 cons_rx_pg->page = page;
2716 dev_kfree_skb(skb);
2719 hw_prod = rxr->rx_pg_prod;
2721 for (i = 0; i < count; i++) {
2722 prod = RX_PG_RING_IDX(hw_prod);
2724 prod_rx_pg = &rxr->rx_pg_ring[prod];
2725 cons_rx_pg = &rxr->rx_pg_ring[cons];
2726 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2727 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2729 if (prod != cons) {
2730 prod_rx_pg->page = cons_rx_pg->page;
2731 cons_rx_pg->page = NULL;
2732 pci_unmap_addr_set(prod_rx_pg, mapping,
2733 pci_unmap_addr(cons_rx_pg, mapping));
2735 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2736 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2739 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2740 hw_prod = NEXT_RX_BD(hw_prod);
2742 rxr->rx_pg_prod = hw_prod;
2743 rxr->rx_pg_cons = cons;
2746 static inline void
2747 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2748 struct sk_buff *skb, u16 cons, u16 prod)
2750 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2751 struct rx_bd *cons_bd, *prod_bd;
2753 cons_rx_buf = &rxr->rx_buf_ring[cons];
2754 prod_rx_buf = &rxr->rx_buf_ring[prod];
2756 pci_dma_sync_single_for_device(bp->pdev,
2757 pci_unmap_addr(cons_rx_buf, mapping),
2758 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2760 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2762 prod_rx_buf->skb = skb;
2764 if (cons == prod)
2765 return;
2767 pci_unmap_addr_set(prod_rx_buf, mapping,
2768 pci_unmap_addr(cons_rx_buf, mapping));
2770 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2771 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2772 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2773 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2776 static int
2777 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2778 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2779 u32 ring_idx)
2781 int err;
2782 u16 prod = ring_idx & 0xffff;
2784 err = bnx2_alloc_rx_skb(bp, rxr, prod);
2785 if (unlikely(err)) {
2786 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2787 if (hdr_len) {
2788 unsigned int raw_len = len + 4;
2789 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2791 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2793 return err;
2796 skb_reserve(skb, BNX2_RX_OFFSET);
2797 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2798 PCI_DMA_FROMDEVICE);
2800 if (hdr_len == 0) {
2801 skb_put(skb, len);
2802 return 0;
2803 } else {
2804 unsigned int i, frag_len, frag_size, pages;
2805 struct sw_pg *rx_pg;
2806 u16 pg_cons = rxr->rx_pg_cons;
2807 u16 pg_prod = rxr->rx_pg_prod;
2809 frag_size = len + 4 - hdr_len;
2810 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2811 skb_put(skb, hdr_len);
2813 for (i = 0; i < pages; i++) {
2814 dma_addr_t mapping_old;
2816 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2817 if (unlikely(frag_len <= 4)) {
2818 unsigned int tail = 4 - frag_len;
2820 rxr->rx_pg_cons = pg_cons;
2821 rxr->rx_pg_prod = pg_prod;
2822 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2823 pages - i);
2824 skb->len -= tail;
2825 if (i == 0) {
2826 skb->tail -= tail;
2827 } else {
2828 skb_frag_t *frag =
2829 &skb_shinfo(skb)->frags[i - 1];
2830 frag->size -= tail;
2831 skb->data_len -= tail;
2832 skb->truesize -= tail;
2834 return 0;
2836 rx_pg = &rxr->rx_pg_ring[pg_cons];
2838 /* Don't unmap yet. If we're unable to allocate a new
2839 * page, we need to recycle the page and the DMA addr.
2841 mapping_old = pci_unmap_addr(rx_pg, mapping);
2842 if (i == pages - 1)
2843 frag_len -= 4;
2845 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2846 rx_pg->page = NULL;
2848 err = bnx2_alloc_rx_page(bp, rxr,
2849 RX_PG_RING_IDX(pg_prod));
2850 if (unlikely(err)) {
2851 rxr->rx_pg_cons = pg_cons;
2852 rxr->rx_pg_prod = pg_prod;
2853 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2854 pages - i);
2855 return err;
2858 pci_unmap_page(bp->pdev, mapping_old,
2859 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2861 frag_size -= frag_len;
2862 skb->data_len += frag_len;
2863 skb->truesize += frag_len;
2864 skb->len += frag_len;
2866 pg_prod = NEXT_RX_BD(pg_prod);
2867 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2869 rxr->rx_pg_prod = pg_prod;
2870 rxr->rx_pg_cons = pg_cons;
2872 return 0;
2875 static inline u16
2876 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2878 u16 cons;
2880 /* Tell compiler that status block fields can change. */
2881 barrier();
2882 cons = *bnapi->hw_rx_cons_ptr;
2883 barrier();
2884 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2885 cons++;
2886 return cons;
2889 static int
2890 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2892 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2893 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2894 struct l2_fhdr *rx_hdr;
2895 int rx_pkt = 0, pg_ring_used = 0;
2897 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2898 sw_cons = rxr->rx_cons;
2899 sw_prod = rxr->rx_prod;
2901 /* Memory barrier necessary as speculative reads of the rx
2902 * buffer can be ahead of the index in the status block
2904 rmb();
2905 while (sw_cons != hw_cons) {
2906 unsigned int len, hdr_len;
2907 u32 status;
2908 struct sw_bd *rx_buf;
2909 struct sk_buff *skb;
2910 dma_addr_t dma_addr;
2911 u16 vtag = 0;
2912 int hw_vlan __maybe_unused = 0;
2914 sw_ring_cons = RX_RING_IDX(sw_cons);
2915 sw_ring_prod = RX_RING_IDX(sw_prod);
2917 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2918 skb = rx_buf->skb;
2920 rx_buf->skb = NULL;
2922 dma_addr = pci_unmap_addr(rx_buf, mapping);
2924 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2925 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2926 PCI_DMA_FROMDEVICE);
2928 rx_hdr = (struct l2_fhdr *) skb->data;
2929 len = rx_hdr->l2_fhdr_pkt_len;
2930 status = rx_hdr->l2_fhdr_status;
2932 hdr_len = 0;
2933 if (status & L2_FHDR_STATUS_SPLIT) {
2934 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2935 pg_ring_used = 1;
2936 } else if (len > bp->rx_jumbo_thresh) {
2937 hdr_len = bp->rx_jumbo_thresh;
2938 pg_ring_used = 1;
2941 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
2942 L2_FHDR_ERRORS_PHY_DECODE |
2943 L2_FHDR_ERRORS_ALIGNMENT |
2944 L2_FHDR_ERRORS_TOO_SHORT |
2945 L2_FHDR_ERRORS_GIANT_FRAME))) {
2947 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2948 sw_ring_prod);
2949 if (pg_ring_used) {
2950 int pages;
2952 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
2954 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2956 goto next_rx;
2959 len -= 4;
2961 if (len <= bp->rx_copy_thresh) {
2962 struct sk_buff *new_skb;
2964 new_skb = netdev_alloc_skb(bp->dev, len + 6);
2965 if (new_skb == NULL) {
2966 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2967 sw_ring_prod);
2968 goto next_rx;
2971 /* aligned copy */
2972 skb_copy_from_linear_data_offset(skb,
2973 BNX2_RX_OFFSET - 6,
2974 new_skb->data, len + 6);
2975 skb_reserve(new_skb, 6);
2976 skb_put(new_skb, len);
2978 bnx2_reuse_rx_skb(bp, rxr, skb,
2979 sw_ring_cons, sw_ring_prod);
2981 skb = new_skb;
2982 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2983 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2984 goto next_rx;
2986 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
2987 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
2988 vtag = rx_hdr->l2_fhdr_vlan_tag;
2989 #ifdef BCM_VLAN
2990 if (bp->vlgrp)
2991 hw_vlan = 1;
2992 else
2993 #endif
2995 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
2996 __skb_push(skb, 4);
2998 memmove(ve, skb->data + 4, ETH_ALEN * 2);
2999 ve->h_vlan_proto = htons(ETH_P_8021Q);
3000 ve->h_vlan_TCI = htons(vtag);
3001 len += 4;
3005 skb->protocol = eth_type_trans(skb, bp->dev);
3007 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3008 (ntohs(skb->protocol) != 0x8100)) {
3010 dev_kfree_skb(skb);
3011 goto next_rx;
3015 skb->ip_summed = CHECKSUM_NONE;
3016 if (bp->rx_csum &&
3017 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3018 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3020 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3021 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3022 skb->ip_summed = CHECKSUM_UNNECESSARY;
3025 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3027 #ifdef BCM_VLAN
3028 if (hw_vlan)
3029 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3030 else
3031 #endif
3032 netif_receive_skb(skb);
3034 rx_pkt++;
3036 next_rx:
3037 sw_cons = NEXT_RX_BD(sw_cons);
3038 sw_prod = NEXT_RX_BD(sw_prod);
3040 if ((rx_pkt == budget))
3041 break;
3043 /* Refresh hw_cons to see if there is new work */
3044 if (sw_cons == hw_cons) {
3045 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3046 rmb();
3049 rxr->rx_cons = sw_cons;
3050 rxr->rx_prod = sw_prod;
3052 if (pg_ring_used)
3053 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3055 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3057 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3059 mmiowb();
3061 return rx_pkt;
3065 /* MSI ISR - The only difference between this and the INTx ISR
3066 * is that the MSI interrupt is always serviced.
3068 static irqreturn_t
3069 bnx2_msi(int irq, void *dev_instance)
3071 struct bnx2_napi *bnapi = dev_instance;
3072 struct bnx2 *bp = bnapi->bp;
3074 prefetch(bnapi->status_blk.msi);
3075 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3076 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3077 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3079 /* Return here if interrupt is disabled. */
3080 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3081 return IRQ_HANDLED;
3083 napi_schedule(&bnapi->napi);
3085 return IRQ_HANDLED;
3088 static irqreturn_t
3089 bnx2_msi_1shot(int irq, void *dev_instance)
3091 struct bnx2_napi *bnapi = dev_instance;
3092 struct bnx2 *bp = bnapi->bp;
3094 prefetch(bnapi->status_blk.msi);
3096 /* Return here if interrupt is disabled. */
3097 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3098 return IRQ_HANDLED;
3100 napi_schedule(&bnapi->napi);
3102 return IRQ_HANDLED;
3105 static irqreturn_t
3106 bnx2_interrupt(int irq, void *dev_instance)
3108 struct bnx2_napi *bnapi = dev_instance;
3109 struct bnx2 *bp = bnapi->bp;
3110 struct status_block *sblk = bnapi->status_blk.msi;
3112 /* When using INTx, it is possible for the interrupt to arrive
3113 * at the CPU before the status block posted prior to the
3114 * interrupt. Reading a register will flush the status block.
3115 * When using MSI, the MSI message will always complete after
3116 * the status block write.
3118 if ((sblk->status_idx == bnapi->last_status_idx) &&
3119 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3120 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3121 return IRQ_NONE;
3123 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3124 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3125 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3127 /* Read back to deassert IRQ immediately to avoid too many
3128 * spurious interrupts.
3130 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3132 /* Return here if interrupt is shared and is disabled. */
3133 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3134 return IRQ_HANDLED;
3136 if (napi_schedule_prep(&bnapi->napi)) {
3137 bnapi->last_status_idx = sblk->status_idx;
3138 __napi_schedule(&bnapi->napi);
3141 return IRQ_HANDLED;
3144 static inline int
3145 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3147 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3148 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3150 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3151 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3152 return 1;
3153 return 0;
3156 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3157 STATUS_ATTN_BITS_TIMER_ABORT)
3159 static inline int
3160 bnx2_has_work(struct bnx2_napi *bnapi)
3162 struct status_block *sblk = bnapi->status_blk.msi;
3164 if (bnx2_has_fast_work(bnapi))
3165 return 1;
3167 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3168 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3169 return 1;
3171 return 0;
3174 static void
3175 bnx2_chk_missed_msi(struct bnx2 *bp)
3177 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3178 u32 msi_ctrl;
3180 if (bnx2_has_work(bnapi)) {
3181 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3182 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3183 return;
3185 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3186 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3187 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3188 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3189 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3193 bp->idle_chk_status_idx = bnapi->last_status_idx;
3196 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3198 struct status_block *sblk = bnapi->status_blk.msi;
3199 u32 status_attn_bits = sblk->status_attn_bits;
3200 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3202 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3203 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3205 bnx2_phy_int(bp, bnapi);
3207 /* This is needed to take care of transient status
3208 * during link changes.
3210 REG_WR(bp, BNX2_HC_COMMAND,
3211 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3212 REG_RD(bp, BNX2_HC_COMMAND);
3216 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3217 int work_done, int budget)
3219 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3220 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3222 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3223 bnx2_tx_int(bp, bnapi, 0);
3225 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3226 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3228 return work_done;
3231 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3233 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3234 struct bnx2 *bp = bnapi->bp;
3235 int work_done = 0;
3236 struct status_block_msix *sblk = bnapi->status_blk.msix;
3238 while (1) {
3239 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3240 if (unlikely(work_done >= budget))
3241 break;
3243 bnapi->last_status_idx = sblk->status_idx;
3244 /* status idx must be read before checking for more work. */
3245 rmb();
3246 if (likely(!bnx2_has_fast_work(bnapi))) {
3248 napi_complete(napi);
3249 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3250 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3251 bnapi->last_status_idx);
3252 break;
3255 return work_done;
3258 static int bnx2_poll(struct napi_struct *napi, int budget)
3260 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3261 struct bnx2 *bp = bnapi->bp;
3262 int work_done = 0;
3263 struct status_block *sblk = bnapi->status_blk.msi;
3265 while (1) {
3266 bnx2_poll_link(bp, bnapi);
3268 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3270 /* bnapi->last_status_idx is used below to tell the hw how
3271 * much work has been processed, so we must read it before
3272 * checking for more work.
3274 bnapi->last_status_idx = sblk->status_idx;
3276 if (unlikely(work_done >= budget))
3277 break;
3279 rmb();
3280 if (likely(!bnx2_has_work(bnapi))) {
3281 napi_complete(napi);
3282 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3283 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3284 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3285 bnapi->last_status_idx);
3286 break;
3288 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3289 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3290 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3291 bnapi->last_status_idx);
3293 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3294 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3295 bnapi->last_status_idx);
3296 break;
3300 return work_done;
3303 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3304 * from set_multicast.
3306 static void
3307 bnx2_set_rx_mode(struct net_device *dev)
3309 struct bnx2 *bp = netdev_priv(dev);
3310 u32 rx_mode, sort_mode;
3311 struct dev_addr_list *uc_ptr;
3312 int i;
3314 if (!netif_running(dev))
3315 return;
3317 spin_lock_bh(&bp->phy_lock);
3319 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3320 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3321 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3322 #ifdef BCM_VLAN
3323 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3324 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3325 #else
3326 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3327 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3328 #endif
3329 if (dev->flags & IFF_PROMISC) {
3330 /* Promiscuous mode. */
3331 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3332 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3333 BNX2_RPM_SORT_USER0_PROM_VLAN;
3335 else if (dev->flags & IFF_ALLMULTI) {
3336 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3337 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3338 0xffffffff);
3340 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3342 else {
3343 /* Accept one or more multicast(s). */
3344 struct dev_mc_list *mclist;
3345 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3346 u32 regidx;
3347 u32 bit;
3348 u32 crc;
3350 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3352 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3353 i++, mclist = mclist->next) {
3355 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3356 bit = crc & 0xff;
3357 regidx = (bit & 0xe0) >> 5;
3358 bit &= 0x1f;
3359 mc_filter[regidx] |= (1 << bit);
3362 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3363 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3364 mc_filter[i]);
3367 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3370 uc_ptr = NULL;
3371 if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
3372 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3373 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3374 BNX2_RPM_SORT_USER0_PROM_VLAN;
3375 } else if (!(dev->flags & IFF_PROMISC)) {
3376 uc_ptr = dev->uc_list;
3378 /* Add all entries into to the match filter list */
3379 for (i = 0; i < dev->uc_count; i++) {
3380 bnx2_set_mac_addr(bp, uc_ptr->da_addr,
3381 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3382 sort_mode |= (1 <<
3383 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3384 uc_ptr = uc_ptr->next;
3389 if (rx_mode != bp->rx_mode) {
3390 bp->rx_mode = rx_mode;
3391 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3394 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3395 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3396 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3398 spin_unlock_bh(&bp->phy_lock);
3401 static int __devinit
3402 check_fw_section(const struct firmware *fw,
3403 const struct bnx2_fw_file_section *section,
3404 u32 alignment, bool non_empty)
3406 u32 offset = be32_to_cpu(section->offset);
3407 u32 len = be32_to_cpu(section->len);
3409 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3410 return -EINVAL;
3411 if ((non_empty && len == 0) || len > fw->size - offset ||
3412 len & (alignment - 1))
3413 return -EINVAL;
3414 return 0;
3417 static int __devinit
3418 check_mips_fw_entry(const struct firmware *fw,
3419 const struct bnx2_mips_fw_file_entry *entry)
3421 if (check_fw_section(fw, &entry->text, 4, true) ||
3422 check_fw_section(fw, &entry->data, 4, false) ||
3423 check_fw_section(fw, &entry->rodata, 4, false))
3424 return -EINVAL;
3425 return 0;
3428 static int __devinit
3429 bnx2_request_firmware(struct bnx2 *bp)
3431 const char *mips_fw_file, *rv2p_fw_file;
3432 const struct bnx2_mips_fw_file *mips_fw;
3433 const struct bnx2_rv2p_fw_file *rv2p_fw;
3434 int rc;
3436 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3437 mips_fw_file = FW_MIPS_FILE_09;
3438 rv2p_fw_file = FW_RV2P_FILE_09;
3439 } else {
3440 mips_fw_file = FW_MIPS_FILE_06;
3441 rv2p_fw_file = FW_RV2P_FILE_06;
3444 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3445 if (rc) {
3446 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3447 mips_fw_file);
3448 return rc;
3451 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3452 if (rc) {
3453 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3454 rv2p_fw_file);
3455 return rc;
3457 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3458 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3459 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3460 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3461 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3462 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3463 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3464 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3465 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3466 mips_fw_file);
3467 return -EINVAL;
3469 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3470 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3471 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3472 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3473 rv2p_fw_file);
3474 return -EINVAL;
3477 return 0;
3480 static u32
3481 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3483 switch (idx) {
3484 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3485 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3486 rv2p_code |= RV2P_BD_PAGE_SIZE;
3487 break;
3489 return rv2p_code;
3492 static int
3493 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3494 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3496 u32 rv2p_code_len, file_offset;
3497 __be32 *rv2p_code;
3498 int i;
3499 u32 val, cmd, addr;
3501 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3502 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3504 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3506 if (rv2p_proc == RV2P_PROC1) {
3507 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3508 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3509 } else {
3510 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3511 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3514 for (i = 0; i < rv2p_code_len; i += 8) {
3515 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3516 rv2p_code++;
3517 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3518 rv2p_code++;
3520 val = (i / 8) | cmd;
3521 REG_WR(bp, addr, val);
3524 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3525 for (i = 0; i < 8; i++) {
3526 u32 loc, code;
3528 loc = be32_to_cpu(fw_entry->fixup[i]);
3529 if (loc && ((loc * 4) < rv2p_code_len)) {
3530 code = be32_to_cpu(*(rv2p_code + loc - 1));
3531 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3532 code = be32_to_cpu(*(rv2p_code + loc));
3533 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3534 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3536 val = (loc / 2) | cmd;
3537 REG_WR(bp, addr, val);
3541 /* Reset the processor, un-stall is done later. */
3542 if (rv2p_proc == RV2P_PROC1) {
3543 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3545 else {
3546 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3549 return 0;
3552 static int
3553 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3554 const struct bnx2_mips_fw_file_entry *fw_entry)
3556 u32 addr, len, file_offset;
3557 __be32 *data;
3558 u32 offset;
3559 u32 val;
3561 /* Halt the CPU. */
3562 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3563 val |= cpu_reg->mode_value_halt;
3564 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3565 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3567 /* Load the Text area. */
3568 addr = be32_to_cpu(fw_entry->text.addr);
3569 len = be32_to_cpu(fw_entry->text.len);
3570 file_offset = be32_to_cpu(fw_entry->text.offset);
3571 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3573 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3574 if (len) {
3575 int j;
3577 for (j = 0; j < (len / 4); j++, offset += 4)
3578 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3581 /* Load the Data area. */
3582 addr = be32_to_cpu(fw_entry->data.addr);
3583 len = be32_to_cpu(fw_entry->data.len);
3584 file_offset = be32_to_cpu(fw_entry->data.offset);
3585 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3587 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3588 if (len) {
3589 int j;
3591 for (j = 0; j < (len / 4); j++, offset += 4)
3592 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3595 /* Load the Read-Only area. */
3596 addr = be32_to_cpu(fw_entry->rodata.addr);
3597 len = be32_to_cpu(fw_entry->rodata.len);
3598 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3599 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3601 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3602 if (len) {
3603 int j;
3605 for (j = 0; j < (len / 4); j++, offset += 4)
3606 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3609 /* Clear the pre-fetch instruction. */
3610 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3612 val = be32_to_cpu(fw_entry->start_addr);
3613 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3615 /* Start the CPU. */
3616 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3617 val &= ~cpu_reg->mode_value_halt;
3618 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3619 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3621 return 0;
3624 static int
3625 bnx2_init_cpus(struct bnx2 *bp)
3627 const struct bnx2_mips_fw_file *mips_fw =
3628 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3629 const struct bnx2_rv2p_fw_file *rv2p_fw =
3630 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3631 int rc;
3633 /* Initialize the RV2P processor. */
3634 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3635 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3637 /* Initialize the RX Processor. */
3638 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3639 if (rc)
3640 goto init_cpu_err;
3642 /* Initialize the TX Processor. */
3643 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3644 if (rc)
3645 goto init_cpu_err;
3647 /* Initialize the TX Patch-up Processor. */
3648 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3649 if (rc)
3650 goto init_cpu_err;
3652 /* Initialize the Completion Processor. */
3653 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3654 if (rc)
3655 goto init_cpu_err;
3657 /* Initialize the Command Processor. */
3658 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3660 init_cpu_err:
3661 return rc;
3664 static int
3665 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3667 u16 pmcsr;
3669 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3671 switch (state) {
3672 case PCI_D0: {
3673 u32 val;
3675 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3676 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3677 PCI_PM_CTRL_PME_STATUS);
3679 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3680 /* delay required during transition out of D3hot */
3681 msleep(20);
3683 val = REG_RD(bp, BNX2_EMAC_MODE);
3684 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3685 val &= ~BNX2_EMAC_MODE_MPKT;
3686 REG_WR(bp, BNX2_EMAC_MODE, val);
3688 val = REG_RD(bp, BNX2_RPM_CONFIG);
3689 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3690 REG_WR(bp, BNX2_RPM_CONFIG, val);
3691 break;
3693 case PCI_D3hot: {
3694 int i;
3695 u32 val, wol_msg;
3697 if (bp->wol) {
3698 u32 advertising;
3699 u8 autoneg;
3701 autoneg = bp->autoneg;
3702 advertising = bp->advertising;
3704 if (bp->phy_port == PORT_TP) {
3705 bp->autoneg = AUTONEG_SPEED;
3706 bp->advertising = ADVERTISED_10baseT_Half |
3707 ADVERTISED_10baseT_Full |
3708 ADVERTISED_100baseT_Half |
3709 ADVERTISED_100baseT_Full |
3710 ADVERTISED_Autoneg;
3713 spin_lock_bh(&bp->phy_lock);
3714 bnx2_setup_phy(bp, bp->phy_port);
3715 spin_unlock_bh(&bp->phy_lock);
3717 bp->autoneg = autoneg;
3718 bp->advertising = advertising;
3720 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3722 val = REG_RD(bp, BNX2_EMAC_MODE);
3724 /* Enable port mode. */
3725 val &= ~BNX2_EMAC_MODE_PORT;
3726 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3727 BNX2_EMAC_MODE_ACPI_RCVD |
3728 BNX2_EMAC_MODE_MPKT;
3729 if (bp->phy_port == PORT_TP)
3730 val |= BNX2_EMAC_MODE_PORT_MII;
3731 else {
3732 val |= BNX2_EMAC_MODE_PORT_GMII;
3733 if (bp->line_speed == SPEED_2500)
3734 val |= BNX2_EMAC_MODE_25G_MODE;
3737 REG_WR(bp, BNX2_EMAC_MODE, val);
3739 /* receive all multicast */
3740 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3741 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3742 0xffffffff);
3744 REG_WR(bp, BNX2_EMAC_RX_MODE,
3745 BNX2_EMAC_RX_MODE_SORT_MODE);
3747 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3748 BNX2_RPM_SORT_USER0_MC_EN;
3749 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3750 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3751 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3752 BNX2_RPM_SORT_USER0_ENA);
3754 /* Need to enable EMAC and RPM for WOL. */
3755 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3756 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3757 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3758 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3760 val = REG_RD(bp, BNX2_RPM_CONFIG);
3761 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3762 REG_WR(bp, BNX2_RPM_CONFIG, val);
3764 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3766 else {
3767 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3770 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3771 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3772 1, 0);
3774 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3775 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3776 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3778 if (bp->wol)
3779 pmcsr |= 3;
3781 else {
3782 pmcsr |= 3;
3784 if (bp->wol) {
3785 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3787 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3788 pmcsr);
3790 /* No more memory access after this point until
3791 * device is brought back to D0.
3793 udelay(50);
3794 break;
3796 default:
3797 return -EINVAL;
3799 return 0;
3802 static int
3803 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3805 u32 val;
3806 int j;
3808 /* Request access to the flash interface. */
3809 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3810 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3811 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3812 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3813 break;
3815 udelay(5);
3818 if (j >= NVRAM_TIMEOUT_COUNT)
3819 return -EBUSY;
3821 return 0;
3824 static int
3825 bnx2_release_nvram_lock(struct bnx2 *bp)
3827 int j;
3828 u32 val;
3830 /* Relinquish nvram interface. */
3831 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3833 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3834 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3835 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3836 break;
3838 udelay(5);
3841 if (j >= NVRAM_TIMEOUT_COUNT)
3842 return -EBUSY;
3844 return 0;
3848 static int
3849 bnx2_enable_nvram_write(struct bnx2 *bp)
3851 u32 val;
3853 val = REG_RD(bp, BNX2_MISC_CFG);
3854 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3856 if (bp->flash_info->flags & BNX2_NV_WREN) {
3857 int j;
3859 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3860 REG_WR(bp, BNX2_NVM_COMMAND,
3861 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3863 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3864 udelay(5);
3866 val = REG_RD(bp, BNX2_NVM_COMMAND);
3867 if (val & BNX2_NVM_COMMAND_DONE)
3868 break;
3871 if (j >= NVRAM_TIMEOUT_COUNT)
3872 return -EBUSY;
3874 return 0;
3877 static void
3878 bnx2_disable_nvram_write(struct bnx2 *bp)
3880 u32 val;
3882 val = REG_RD(bp, BNX2_MISC_CFG);
3883 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3887 static void
3888 bnx2_enable_nvram_access(struct bnx2 *bp)
3890 u32 val;
3892 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3893 /* Enable both bits, even on read. */
3894 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3895 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3898 static void
3899 bnx2_disable_nvram_access(struct bnx2 *bp)
3901 u32 val;
3903 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3904 /* Disable both bits, even after read. */
3905 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3906 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3907 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3910 static int
3911 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3913 u32 cmd;
3914 int j;
3916 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3917 /* Buffered flash, no erase needed */
3918 return 0;
3920 /* Build an erase command */
3921 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3922 BNX2_NVM_COMMAND_DOIT;
3924 /* Need to clear DONE bit separately. */
3925 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3927 /* Address of the NVRAM to read from. */
3928 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3930 /* Issue an erase command. */
3931 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3933 /* Wait for completion. */
3934 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3935 u32 val;
3937 udelay(5);
3939 val = REG_RD(bp, BNX2_NVM_COMMAND);
3940 if (val & BNX2_NVM_COMMAND_DONE)
3941 break;
3944 if (j >= NVRAM_TIMEOUT_COUNT)
3945 return -EBUSY;
3947 return 0;
3950 static int
3951 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3953 u32 cmd;
3954 int j;
3956 /* Build the command word. */
3957 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3959 /* Calculate an offset of a buffered flash, not needed for 5709. */
3960 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3961 offset = ((offset / bp->flash_info->page_size) <<
3962 bp->flash_info->page_bits) +
3963 (offset % bp->flash_info->page_size);
3966 /* Need to clear DONE bit separately. */
3967 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3969 /* Address of the NVRAM to read from. */
3970 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3972 /* Issue a read command. */
3973 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3975 /* Wait for completion. */
3976 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3977 u32 val;
3979 udelay(5);
3981 val = REG_RD(bp, BNX2_NVM_COMMAND);
3982 if (val & BNX2_NVM_COMMAND_DONE) {
3983 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3984 memcpy(ret_val, &v, 4);
3985 break;
3988 if (j >= NVRAM_TIMEOUT_COUNT)
3989 return -EBUSY;
3991 return 0;
3995 static int
3996 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3998 u32 cmd;
3999 __be32 val32;
4000 int j;
4002 /* Build the command word. */
4003 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4005 /* Calculate an offset of a buffered flash, not needed for 5709. */
4006 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4007 offset = ((offset / bp->flash_info->page_size) <<
4008 bp->flash_info->page_bits) +
4009 (offset % bp->flash_info->page_size);
4012 /* Need to clear DONE bit separately. */
4013 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4015 memcpy(&val32, val, 4);
4017 /* Write the data. */
4018 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4020 /* Address of the NVRAM to write to. */
4021 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4023 /* Issue the write command. */
4024 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4026 /* Wait for completion. */
4027 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4028 udelay(5);
4030 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4031 break;
4033 if (j >= NVRAM_TIMEOUT_COUNT)
4034 return -EBUSY;
4036 return 0;
4039 static int
4040 bnx2_init_nvram(struct bnx2 *bp)
4042 u32 val;
4043 int j, entry_count, rc = 0;
4044 struct flash_spec *flash;
4046 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4047 bp->flash_info = &flash_5709;
4048 goto get_flash_size;
4051 /* Determine the selected interface. */
4052 val = REG_RD(bp, BNX2_NVM_CFG1);
4054 entry_count = ARRAY_SIZE(flash_table);
4056 if (val & 0x40000000) {
4058 /* Flash interface has been reconfigured */
4059 for (j = 0, flash = &flash_table[0]; j < entry_count;
4060 j++, flash++) {
4061 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4062 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4063 bp->flash_info = flash;
4064 break;
4068 else {
4069 u32 mask;
4070 /* Not yet been reconfigured */
4072 if (val & (1 << 23))
4073 mask = FLASH_BACKUP_STRAP_MASK;
4074 else
4075 mask = FLASH_STRAP_MASK;
4077 for (j = 0, flash = &flash_table[0]; j < entry_count;
4078 j++, flash++) {
4080 if ((val & mask) == (flash->strapping & mask)) {
4081 bp->flash_info = flash;
4083 /* Request access to the flash interface. */
4084 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4085 return rc;
4087 /* Enable access to flash interface */
4088 bnx2_enable_nvram_access(bp);
4090 /* Reconfigure the flash interface */
4091 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4092 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4093 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4094 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4096 /* Disable access to flash interface */
4097 bnx2_disable_nvram_access(bp);
4098 bnx2_release_nvram_lock(bp);
4100 break;
4103 } /* if (val & 0x40000000) */
4105 if (j == entry_count) {
4106 bp->flash_info = NULL;
4107 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4108 return -ENODEV;
4111 get_flash_size:
4112 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4113 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4114 if (val)
4115 bp->flash_size = val;
4116 else
4117 bp->flash_size = bp->flash_info->total_size;
4119 return rc;
4122 static int
4123 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4124 int buf_size)
4126 int rc = 0;
4127 u32 cmd_flags, offset32, len32, extra;
4129 if (buf_size == 0)
4130 return 0;
4132 /* Request access to the flash interface. */
4133 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4134 return rc;
4136 /* Enable access to flash interface */
4137 bnx2_enable_nvram_access(bp);
4139 len32 = buf_size;
4140 offset32 = offset;
4141 extra = 0;
4143 cmd_flags = 0;
4145 if (offset32 & 3) {
4146 u8 buf[4];
4147 u32 pre_len;
4149 offset32 &= ~3;
4150 pre_len = 4 - (offset & 3);
4152 if (pre_len >= len32) {
4153 pre_len = len32;
4154 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4155 BNX2_NVM_COMMAND_LAST;
4157 else {
4158 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4161 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4163 if (rc)
4164 return rc;
4166 memcpy(ret_buf, buf + (offset & 3), pre_len);
4168 offset32 += 4;
4169 ret_buf += pre_len;
4170 len32 -= pre_len;
4172 if (len32 & 3) {
4173 extra = 4 - (len32 & 3);
4174 len32 = (len32 + 4) & ~3;
4177 if (len32 == 4) {
4178 u8 buf[4];
4180 if (cmd_flags)
4181 cmd_flags = BNX2_NVM_COMMAND_LAST;
4182 else
4183 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4184 BNX2_NVM_COMMAND_LAST;
4186 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4188 memcpy(ret_buf, buf, 4 - extra);
4190 else if (len32 > 0) {
4191 u8 buf[4];
4193 /* Read the first word. */
4194 if (cmd_flags)
4195 cmd_flags = 0;
4196 else
4197 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4199 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4201 /* Advance to the next dword. */
4202 offset32 += 4;
4203 ret_buf += 4;
4204 len32 -= 4;
4206 while (len32 > 4 && rc == 0) {
4207 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4209 /* Advance to the next dword. */
4210 offset32 += 4;
4211 ret_buf += 4;
4212 len32 -= 4;
4215 if (rc)
4216 return rc;
4218 cmd_flags = BNX2_NVM_COMMAND_LAST;
4219 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4221 memcpy(ret_buf, buf, 4 - extra);
4224 /* Disable access to flash interface */
4225 bnx2_disable_nvram_access(bp);
4227 bnx2_release_nvram_lock(bp);
4229 return rc;
4232 static int
4233 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4234 int buf_size)
4236 u32 written, offset32, len32;
4237 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4238 int rc = 0;
4239 int align_start, align_end;
4241 buf = data_buf;
4242 offset32 = offset;
4243 len32 = buf_size;
4244 align_start = align_end = 0;
4246 if ((align_start = (offset32 & 3))) {
4247 offset32 &= ~3;
4248 len32 += align_start;
4249 if (len32 < 4)
4250 len32 = 4;
4251 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4252 return rc;
4255 if (len32 & 3) {
4256 align_end = 4 - (len32 & 3);
4257 len32 += align_end;
4258 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4259 return rc;
4262 if (align_start || align_end) {
4263 align_buf = kmalloc(len32, GFP_KERNEL);
4264 if (align_buf == NULL)
4265 return -ENOMEM;
4266 if (align_start) {
4267 memcpy(align_buf, start, 4);
4269 if (align_end) {
4270 memcpy(align_buf + len32 - 4, end, 4);
4272 memcpy(align_buf + align_start, data_buf, buf_size);
4273 buf = align_buf;
4276 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4277 flash_buffer = kmalloc(264, GFP_KERNEL);
4278 if (flash_buffer == NULL) {
4279 rc = -ENOMEM;
4280 goto nvram_write_end;
4284 written = 0;
4285 while ((written < len32) && (rc == 0)) {
4286 u32 page_start, page_end, data_start, data_end;
4287 u32 addr, cmd_flags;
4288 int i;
4290 /* Find the page_start addr */
4291 page_start = offset32 + written;
4292 page_start -= (page_start % bp->flash_info->page_size);
4293 /* Find the page_end addr */
4294 page_end = page_start + bp->flash_info->page_size;
4295 /* Find the data_start addr */
4296 data_start = (written == 0) ? offset32 : page_start;
4297 /* Find the data_end addr */
4298 data_end = (page_end > offset32 + len32) ?
4299 (offset32 + len32) : page_end;
4301 /* Request access to the flash interface. */
4302 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4303 goto nvram_write_end;
4305 /* Enable access to flash interface */
4306 bnx2_enable_nvram_access(bp);
4308 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4309 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4310 int j;
4312 /* Read the whole page into the buffer
4313 * (non-buffer flash only) */
4314 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4315 if (j == (bp->flash_info->page_size - 4)) {
4316 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4318 rc = bnx2_nvram_read_dword(bp,
4319 page_start + j,
4320 &flash_buffer[j],
4321 cmd_flags);
4323 if (rc)
4324 goto nvram_write_end;
4326 cmd_flags = 0;
4330 /* Enable writes to flash interface (unlock write-protect) */
4331 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4332 goto nvram_write_end;
4334 /* Loop to write back the buffer data from page_start to
4335 * data_start */
4336 i = 0;
4337 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4338 /* Erase the page */
4339 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4340 goto nvram_write_end;
4342 /* Re-enable the write again for the actual write */
4343 bnx2_enable_nvram_write(bp);
4345 for (addr = page_start; addr < data_start;
4346 addr += 4, i += 4) {
4348 rc = bnx2_nvram_write_dword(bp, addr,
4349 &flash_buffer[i], cmd_flags);
4351 if (rc != 0)
4352 goto nvram_write_end;
4354 cmd_flags = 0;
4358 /* Loop to write the new data from data_start to data_end */
4359 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4360 if ((addr == page_end - 4) ||
4361 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4362 (addr == data_end - 4))) {
4364 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4366 rc = bnx2_nvram_write_dword(bp, addr, buf,
4367 cmd_flags);
4369 if (rc != 0)
4370 goto nvram_write_end;
4372 cmd_flags = 0;
4373 buf += 4;
4376 /* Loop to write back the buffer data from data_end
4377 * to page_end */
4378 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4379 for (addr = data_end; addr < page_end;
4380 addr += 4, i += 4) {
4382 if (addr == page_end-4) {
4383 cmd_flags = BNX2_NVM_COMMAND_LAST;
4385 rc = bnx2_nvram_write_dword(bp, addr,
4386 &flash_buffer[i], cmd_flags);
4388 if (rc != 0)
4389 goto nvram_write_end;
4391 cmd_flags = 0;
4395 /* Disable writes to flash interface (lock write-protect) */
4396 bnx2_disable_nvram_write(bp);
4398 /* Disable access to flash interface */
4399 bnx2_disable_nvram_access(bp);
4400 bnx2_release_nvram_lock(bp);
4402 /* Increment written */
4403 written += data_end - data_start;
4406 nvram_write_end:
4407 kfree(flash_buffer);
4408 kfree(align_buf);
4409 return rc;
4412 static void
4413 bnx2_init_fw_cap(struct bnx2 *bp)
4415 u32 val, sig = 0;
4417 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4418 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4420 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4421 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4423 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4424 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4425 return;
4427 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4428 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4429 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4432 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4433 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4434 u32 link;
4436 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4438 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4439 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4440 bp->phy_port = PORT_FIBRE;
4441 else
4442 bp->phy_port = PORT_TP;
4444 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4445 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4448 if (netif_running(bp->dev) && sig)
4449 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4452 static void
4453 bnx2_setup_msix_tbl(struct bnx2 *bp)
4455 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4457 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4458 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4461 static int
4462 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4464 u32 val;
4465 int i, rc = 0;
4466 u8 old_port;
4468 /* Wait for the current PCI transaction to complete before
4469 * issuing a reset. */
4470 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4471 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4472 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4473 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4474 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4475 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4476 udelay(5);
4478 /* Wait for the firmware to tell us it is ok to issue a reset. */
4479 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4481 /* Deposit a driver reset signature so the firmware knows that
4482 * this is a soft reset. */
4483 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4484 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4486 /* Do a dummy read to force the chip to complete all current transaction
4487 * before we issue a reset. */
4488 val = REG_RD(bp, BNX2_MISC_ID);
4490 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4491 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4492 REG_RD(bp, BNX2_MISC_COMMAND);
4493 udelay(5);
4495 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4496 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4498 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4500 } else {
4501 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4502 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4503 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4505 /* Chip reset. */
4506 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4508 /* Reading back any register after chip reset will hang the
4509 * bus on 5706 A0 and A1. The msleep below provides plenty
4510 * of margin for write posting.
4512 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4513 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4514 msleep(20);
4516 /* Reset takes approximate 30 usec */
4517 for (i = 0; i < 10; i++) {
4518 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4519 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4520 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4521 break;
4522 udelay(10);
4525 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4526 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4527 printk(KERN_ERR PFX "Chip reset did not complete\n");
4528 return -EBUSY;
4532 /* Make sure byte swapping is properly configured. */
4533 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4534 if (val != 0x01020304) {
4535 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4536 return -ENODEV;
4539 /* Wait for the firmware to finish its initialization. */
4540 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4541 if (rc)
4542 return rc;
4544 spin_lock_bh(&bp->phy_lock);
4545 old_port = bp->phy_port;
4546 bnx2_init_fw_cap(bp);
4547 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4548 old_port != bp->phy_port)
4549 bnx2_set_default_remote_link(bp);
4550 spin_unlock_bh(&bp->phy_lock);
4552 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4553 /* Adjust the voltage regular to two steps lower. The default
4554 * of this register is 0x0000000e. */
4555 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4557 /* Remove bad rbuf memory from the free pool. */
4558 rc = bnx2_alloc_bad_rbuf(bp);
4561 if (bp->flags & BNX2_FLAG_USING_MSIX)
4562 bnx2_setup_msix_tbl(bp);
4564 return rc;
4567 static int
4568 bnx2_init_chip(struct bnx2 *bp)
4570 u32 val, mtu;
4571 int rc, i;
4573 /* Make sure the interrupt is not active. */
4574 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4576 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4577 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4578 #ifdef __BIG_ENDIAN
4579 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4580 #endif
4581 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4582 DMA_READ_CHANS << 12 |
4583 DMA_WRITE_CHANS << 16;
4585 val |= (0x2 << 20) | (1 << 11);
4587 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4588 val |= (1 << 23);
4590 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4591 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4592 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4594 REG_WR(bp, BNX2_DMA_CONFIG, val);
4596 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4597 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4598 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4599 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4602 if (bp->flags & BNX2_FLAG_PCIX) {
4603 u16 val16;
4605 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4606 &val16);
4607 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4608 val16 & ~PCI_X_CMD_ERO);
4611 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4612 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4613 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4614 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4616 /* Initialize context mapping and zero out the quick contexts. The
4617 * context block must have already been enabled. */
4618 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4619 rc = bnx2_init_5709_context(bp);
4620 if (rc)
4621 return rc;
4622 } else
4623 bnx2_init_context(bp);
4625 if ((rc = bnx2_init_cpus(bp)) != 0)
4626 return rc;
4628 bnx2_init_nvram(bp);
4630 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4632 val = REG_RD(bp, BNX2_MQ_CONFIG);
4633 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4634 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4635 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4636 val |= BNX2_MQ_CONFIG_HALT_DIS;
4638 REG_WR(bp, BNX2_MQ_CONFIG, val);
4640 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4641 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4642 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4644 val = (BCM_PAGE_BITS - 8) << 24;
4645 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4647 /* Configure page size. */
4648 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4649 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4650 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4651 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4653 val = bp->mac_addr[0] +
4654 (bp->mac_addr[1] << 8) +
4655 (bp->mac_addr[2] << 16) +
4656 bp->mac_addr[3] +
4657 (bp->mac_addr[4] << 8) +
4658 (bp->mac_addr[5] << 16);
4659 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4661 /* Program the MTU. Also include 4 bytes for CRC32. */
4662 mtu = bp->dev->mtu;
4663 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4664 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4665 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4666 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4668 if (mtu < 1500)
4669 mtu = 1500;
4671 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4672 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4673 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4675 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4676 bp->bnx2_napi[i].last_status_idx = 0;
4678 bp->idle_chk_status_idx = 0xffff;
4680 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4682 /* Set up how to generate a link change interrupt. */
4683 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4685 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4686 (u64) bp->status_blk_mapping & 0xffffffff);
4687 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4689 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4690 (u64) bp->stats_blk_mapping & 0xffffffff);
4691 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4692 (u64) bp->stats_blk_mapping >> 32);
4694 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4695 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4697 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4698 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4700 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4701 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4703 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4705 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4707 REG_WR(bp, BNX2_HC_COM_TICKS,
4708 (bp->com_ticks_int << 16) | bp->com_ticks);
4710 REG_WR(bp, BNX2_HC_CMD_TICKS,
4711 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4713 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4714 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4715 else
4716 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4717 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4719 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4720 val = BNX2_HC_CONFIG_COLLECT_STATS;
4721 else {
4722 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4723 BNX2_HC_CONFIG_COLLECT_STATS;
4726 if (bp->irq_nvecs > 1) {
4727 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4728 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4730 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4733 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4734 val |= BNX2_HC_CONFIG_ONE_SHOT;
4736 REG_WR(bp, BNX2_HC_CONFIG, val);
4738 for (i = 1; i < bp->irq_nvecs; i++) {
4739 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4740 BNX2_HC_SB_CONFIG_1;
4742 REG_WR(bp, base,
4743 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4744 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4745 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4747 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4748 (bp->tx_quick_cons_trip_int << 16) |
4749 bp->tx_quick_cons_trip);
4751 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4752 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4754 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4755 (bp->rx_quick_cons_trip_int << 16) |
4756 bp->rx_quick_cons_trip);
4758 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4759 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4762 /* Clear internal stats counters. */
4763 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4765 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4767 /* Initialize the receive filter. */
4768 bnx2_set_rx_mode(bp->dev);
4770 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4771 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4772 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4773 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4775 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4776 1, 0);
4778 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4779 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4781 udelay(20);
4783 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4785 return rc;
4788 static void
4789 bnx2_clear_ring_states(struct bnx2 *bp)
4791 struct bnx2_napi *bnapi;
4792 struct bnx2_tx_ring_info *txr;
4793 struct bnx2_rx_ring_info *rxr;
4794 int i;
4796 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4797 bnapi = &bp->bnx2_napi[i];
4798 txr = &bnapi->tx_ring;
4799 rxr = &bnapi->rx_ring;
4801 txr->tx_cons = 0;
4802 txr->hw_tx_cons = 0;
4803 rxr->rx_prod_bseq = 0;
4804 rxr->rx_prod = 0;
4805 rxr->rx_cons = 0;
4806 rxr->rx_pg_prod = 0;
4807 rxr->rx_pg_cons = 0;
4811 static void
4812 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4814 u32 val, offset0, offset1, offset2, offset3;
4815 u32 cid_addr = GET_CID_ADDR(cid);
4817 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4818 offset0 = BNX2_L2CTX_TYPE_XI;
4819 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4820 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4821 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4822 } else {
4823 offset0 = BNX2_L2CTX_TYPE;
4824 offset1 = BNX2_L2CTX_CMD_TYPE;
4825 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4826 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4828 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4829 bnx2_ctx_wr(bp, cid_addr, offset0, val);
4831 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4832 bnx2_ctx_wr(bp, cid_addr, offset1, val);
4834 val = (u64) txr->tx_desc_mapping >> 32;
4835 bnx2_ctx_wr(bp, cid_addr, offset2, val);
4837 val = (u64) txr->tx_desc_mapping & 0xffffffff;
4838 bnx2_ctx_wr(bp, cid_addr, offset3, val);
4841 static void
4842 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4844 struct tx_bd *txbd;
4845 u32 cid = TX_CID;
4846 struct bnx2_napi *bnapi;
4847 struct bnx2_tx_ring_info *txr;
4849 bnapi = &bp->bnx2_napi[ring_num];
4850 txr = &bnapi->tx_ring;
4852 if (ring_num == 0)
4853 cid = TX_CID;
4854 else
4855 cid = TX_TSS_CID + ring_num - 1;
4857 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4859 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4861 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4862 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4864 txr->tx_prod = 0;
4865 txr->tx_prod_bseq = 0;
4867 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4868 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4870 bnx2_init_tx_context(bp, cid, txr);
4873 static void
4874 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4875 int num_rings)
4877 int i;
4878 struct rx_bd *rxbd;
4880 for (i = 0; i < num_rings; i++) {
4881 int j;
4883 rxbd = &rx_ring[i][0];
4884 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4885 rxbd->rx_bd_len = buf_size;
4886 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4888 if (i == (num_rings - 1))
4889 j = 0;
4890 else
4891 j = i + 1;
4892 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4893 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4897 static void
4898 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4900 int i;
4901 u16 prod, ring_prod;
4902 u32 cid, rx_cid_addr, val;
4903 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4904 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4906 if (ring_num == 0)
4907 cid = RX_CID;
4908 else
4909 cid = RX_RSS_CID + ring_num - 1;
4911 rx_cid_addr = GET_CID_ADDR(cid);
4913 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4914 bp->rx_buf_use_size, bp->rx_max_ring);
4916 bnx2_init_rx_context(bp, cid);
4918 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4919 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4920 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4923 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4924 if (bp->rx_pg_ring_size) {
4925 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4926 rxr->rx_pg_desc_mapping,
4927 PAGE_SIZE, bp->rx_max_pg_ring);
4928 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4929 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4930 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4931 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
4933 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4934 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4936 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4937 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4939 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4940 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4943 val = (u64) rxr->rx_desc_mapping[0] >> 32;
4944 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4946 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4947 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4949 ring_prod = prod = rxr->rx_pg_prod;
4950 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4951 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4952 break;
4953 prod = NEXT_RX_BD(prod);
4954 ring_prod = RX_PG_RING_IDX(prod);
4956 rxr->rx_pg_prod = prod;
4958 ring_prod = prod = rxr->rx_prod;
4959 for (i = 0; i < bp->rx_ring_size; i++) {
4960 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4961 break;
4962 prod = NEXT_RX_BD(prod);
4963 ring_prod = RX_RING_IDX(prod);
4965 rxr->rx_prod = prod;
4967 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4968 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4969 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4971 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4972 REG_WR16(bp, rxr->rx_bidx_addr, prod);
4974 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4977 static void
4978 bnx2_init_all_rings(struct bnx2 *bp)
4980 int i;
4981 u32 val;
4983 bnx2_clear_ring_states(bp);
4985 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4986 for (i = 0; i < bp->num_tx_rings; i++)
4987 bnx2_init_tx_ring(bp, i);
4989 if (bp->num_tx_rings > 1)
4990 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4991 (TX_TSS_CID << 7));
4993 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4994 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4996 for (i = 0; i < bp->num_rx_rings; i++)
4997 bnx2_init_rx_ring(bp, i);
4999 if (bp->num_rx_rings > 1) {
5000 u32 tbl_32;
5001 u8 *tbl = (u8 *) &tbl_32;
5003 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5004 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5006 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5007 tbl[i % 4] = i % (bp->num_rx_rings - 1);
5008 if ((i % 4) == 3)
5009 bnx2_reg_wr_ind(bp,
5010 BNX2_RXP_SCRATCH_RSS_TBL + i,
5011 cpu_to_be32(tbl_32));
5014 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5015 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5017 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5022 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5024 u32 max, num_rings = 1;
5026 while (ring_size > MAX_RX_DESC_CNT) {
5027 ring_size -= MAX_RX_DESC_CNT;
5028 num_rings++;
5030 /* round to next power of 2 */
5031 max = max_size;
5032 while ((max & num_rings) == 0)
5033 max >>= 1;
5035 if (num_rings != max)
5036 max <<= 1;
5038 return max;
5041 static void
5042 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5044 u32 rx_size, rx_space, jumbo_size;
5046 /* 8 for CRC and VLAN */
5047 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5049 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5050 sizeof(struct skb_shared_info);
5052 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5053 bp->rx_pg_ring_size = 0;
5054 bp->rx_max_pg_ring = 0;
5055 bp->rx_max_pg_ring_idx = 0;
5056 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5057 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5059 jumbo_size = size * pages;
5060 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5061 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5063 bp->rx_pg_ring_size = jumbo_size;
5064 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5065 MAX_RX_PG_RINGS);
5066 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5067 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5068 bp->rx_copy_thresh = 0;
5071 bp->rx_buf_use_size = rx_size;
5072 /* hw alignment */
5073 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5074 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5075 bp->rx_ring_size = size;
5076 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5077 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5080 static void
5081 bnx2_free_tx_skbs(struct bnx2 *bp)
5083 int i;
5085 for (i = 0; i < bp->num_tx_rings; i++) {
5086 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5087 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5088 int j;
5090 if (txr->tx_buf_ring == NULL)
5091 continue;
5093 for (j = 0; j < TX_DESC_CNT; ) {
5094 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5095 struct sk_buff *skb = tx_buf->skb;
5097 if (skb == NULL) {
5098 j++;
5099 continue;
5102 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5104 tx_buf->skb = NULL;
5106 j += skb_shinfo(skb)->nr_frags + 1;
5107 dev_kfree_skb(skb);
5112 static void
5113 bnx2_free_rx_skbs(struct bnx2 *bp)
5115 int i;
5117 for (i = 0; i < bp->num_rx_rings; i++) {
5118 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5119 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5120 int j;
5122 if (rxr->rx_buf_ring == NULL)
5123 return;
5125 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5126 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5127 struct sk_buff *skb = rx_buf->skb;
5129 if (skb == NULL)
5130 continue;
5132 pci_unmap_single(bp->pdev,
5133 pci_unmap_addr(rx_buf, mapping),
5134 bp->rx_buf_use_size,
5135 PCI_DMA_FROMDEVICE);
5137 rx_buf->skb = NULL;
5139 dev_kfree_skb(skb);
5141 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5142 bnx2_free_rx_page(bp, rxr, j);
5146 static void
5147 bnx2_free_skbs(struct bnx2 *bp)
5149 bnx2_free_tx_skbs(bp);
5150 bnx2_free_rx_skbs(bp);
5153 static int
5154 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5156 int rc;
5158 rc = bnx2_reset_chip(bp, reset_code);
5159 bnx2_free_skbs(bp);
5160 if (rc)
5161 return rc;
5163 if ((rc = bnx2_init_chip(bp)) != 0)
5164 return rc;
5166 bnx2_init_all_rings(bp);
5167 return 0;
5170 static int
5171 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5173 int rc;
5175 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5176 return rc;
5178 spin_lock_bh(&bp->phy_lock);
5179 bnx2_init_phy(bp, reset_phy);
5180 bnx2_set_link(bp);
5181 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5182 bnx2_remote_phy_event(bp);
5183 spin_unlock_bh(&bp->phy_lock);
5184 return 0;
5187 static int
5188 bnx2_shutdown_chip(struct bnx2 *bp)
5190 u32 reset_code;
5192 if (bp->flags & BNX2_FLAG_NO_WOL)
5193 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5194 else if (bp->wol)
5195 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5196 else
5197 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5199 return bnx2_reset_chip(bp, reset_code);
5202 static int
5203 bnx2_test_registers(struct bnx2 *bp)
5205 int ret;
5206 int i, is_5709;
5207 static const struct {
5208 u16 offset;
5209 u16 flags;
5210 #define BNX2_FL_NOT_5709 1
5211 u32 rw_mask;
5212 u32 ro_mask;
5213 } reg_tbl[] = {
5214 { 0x006c, 0, 0x00000000, 0x0000003f },
5215 { 0x0090, 0, 0xffffffff, 0x00000000 },
5216 { 0x0094, 0, 0x00000000, 0x00000000 },
5218 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5219 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5220 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5221 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5222 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5223 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5224 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5225 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5226 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5228 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5229 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5230 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5231 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5232 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5233 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5235 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5236 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5237 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5239 { 0x1000, 0, 0x00000000, 0x00000001 },
5240 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5242 { 0x1408, 0, 0x01c00800, 0x00000000 },
5243 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5244 { 0x14a8, 0, 0x00000000, 0x000001ff },
5245 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5246 { 0x14b0, 0, 0x00000002, 0x00000001 },
5247 { 0x14b8, 0, 0x00000000, 0x00000000 },
5248 { 0x14c0, 0, 0x00000000, 0x00000009 },
5249 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5250 { 0x14cc, 0, 0x00000000, 0x00000001 },
5251 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5253 { 0x1800, 0, 0x00000000, 0x00000001 },
5254 { 0x1804, 0, 0x00000000, 0x00000003 },
5256 { 0x2800, 0, 0x00000000, 0x00000001 },
5257 { 0x2804, 0, 0x00000000, 0x00003f01 },
5258 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5259 { 0x2810, 0, 0xffff0000, 0x00000000 },
5260 { 0x2814, 0, 0xffff0000, 0x00000000 },
5261 { 0x2818, 0, 0xffff0000, 0x00000000 },
5262 { 0x281c, 0, 0xffff0000, 0x00000000 },
5263 { 0x2834, 0, 0xffffffff, 0x00000000 },
5264 { 0x2840, 0, 0x00000000, 0xffffffff },
5265 { 0x2844, 0, 0x00000000, 0xffffffff },
5266 { 0x2848, 0, 0xffffffff, 0x00000000 },
5267 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5269 { 0x2c00, 0, 0x00000000, 0x00000011 },
5270 { 0x2c04, 0, 0x00000000, 0x00030007 },
5272 { 0x3c00, 0, 0x00000000, 0x00000001 },
5273 { 0x3c04, 0, 0x00000000, 0x00070000 },
5274 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5275 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5276 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5277 { 0x3c14, 0, 0x00000000, 0xffffffff },
5278 { 0x3c18, 0, 0x00000000, 0xffffffff },
5279 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5280 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5282 { 0x5004, 0, 0x00000000, 0x0000007f },
5283 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5285 { 0x5c00, 0, 0x00000000, 0x00000001 },
5286 { 0x5c04, 0, 0x00000000, 0x0003000f },
5287 { 0x5c08, 0, 0x00000003, 0x00000000 },
5288 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5289 { 0x5c10, 0, 0x00000000, 0xffffffff },
5290 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5291 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5292 { 0x5c88, 0, 0x00000000, 0x00077373 },
5293 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5295 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5296 { 0x680c, 0, 0xffffffff, 0x00000000 },
5297 { 0x6810, 0, 0xffffffff, 0x00000000 },
5298 { 0x6814, 0, 0xffffffff, 0x00000000 },
5299 { 0x6818, 0, 0xffffffff, 0x00000000 },
5300 { 0x681c, 0, 0xffffffff, 0x00000000 },
5301 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5302 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5303 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5304 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5305 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5306 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5307 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5308 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5309 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5310 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5311 { 0x684c, 0, 0xffffffff, 0x00000000 },
5312 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5313 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5314 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5315 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5316 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5317 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5319 { 0xffff, 0, 0x00000000, 0x00000000 },
5322 ret = 0;
5323 is_5709 = 0;
5324 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5325 is_5709 = 1;
5327 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5328 u32 offset, rw_mask, ro_mask, save_val, val;
5329 u16 flags = reg_tbl[i].flags;
5331 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5332 continue;
5334 offset = (u32) reg_tbl[i].offset;
5335 rw_mask = reg_tbl[i].rw_mask;
5336 ro_mask = reg_tbl[i].ro_mask;
5338 save_val = readl(bp->regview + offset);
5340 writel(0, bp->regview + offset);
5342 val = readl(bp->regview + offset);
5343 if ((val & rw_mask) != 0) {
5344 goto reg_test_err;
5347 if ((val & ro_mask) != (save_val & ro_mask)) {
5348 goto reg_test_err;
5351 writel(0xffffffff, bp->regview + offset);
5353 val = readl(bp->regview + offset);
5354 if ((val & rw_mask) != rw_mask) {
5355 goto reg_test_err;
5358 if ((val & ro_mask) != (save_val & ro_mask)) {
5359 goto reg_test_err;
5362 writel(save_val, bp->regview + offset);
5363 continue;
5365 reg_test_err:
5366 writel(save_val, bp->regview + offset);
5367 ret = -ENODEV;
5368 break;
5370 return ret;
5373 static int
5374 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5376 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5377 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5378 int i;
5380 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5381 u32 offset;
5383 for (offset = 0; offset < size; offset += 4) {
5385 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5387 if (bnx2_reg_rd_ind(bp, start + offset) !=
5388 test_pattern[i]) {
5389 return -ENODEV;
5393 return 0;
5396 static int
5397 bnx2_test_memory(struct bnx2 *bp)
5399 int ret = 0;
5400 int i;
5401 static struct mem_entry {
5402 u32 offset;
5403 u32 len;
5404 } mem_tbl_5706[] = {
5405 { 0x60000, 0x4000 },
5406 { 0xa0000, 0x3000 },
5407 { 0xe0000, 0x4000 },
5408 { 0x120000, 0x4000 },
5409 { 0x1a0000, 0x4000 },
5410 { 0x160000, 0x4000 },
5411 { 0xffffffff, 0 },
5413 mem_tbl_5709[] = {
5414 { 0x60000, 0x4000 },
5415 { 0xa0000, 0x3000 },
5416 { 0xe0000, 0x4000 },
5417 { 0x120000, 0x4000 },
5418 { 0x1a0000, 0x4000 },
5419 { 0xffffffff, 0 },
5421 struct mem_entry *mem_tbl;
5423 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5424 mem_tbl = mem_tbl_5709;
5425 else
5426 mem_tbl = mem_tbl_5706;
5428 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5429 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5430 mem_tbl[i].len)) != 0) {
5431 return ret;
5435 return ret;
5438 #define BNX2_MAC_LOOPBACK 0
5439 #define BNX2_PHY_LOOPBACK 1
5441 static int
5442 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5444 unsigned int pkt_size, num_pkts, i;
5445 struct sk_buff *skb, *rx_skb;
5446 unsigned char *packet;
5447 u16 rx_start_idx, rx_idx;
5448 dma_addr_t map;
5449 struct tx_bd *txbd;
5450 struct sw_bd *rx_buf;
5451 struct l2_fhdr *rx_hdr;
5452 int ret = -ENODEV;
5453 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5454 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5455 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5457 tx_napi = bnapi;
5459 txr = &tx_napi->tx_ring;
5460 rxr = &bnapi->rx_ring;
5461 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5462 bp->loopback = MAC_LOOPBACK;
5463 bnx2_set_mac_loopback(bp);
5465 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5466 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5467 return 0;
5469 bp->loopback = PHY_LOOPBACK;
5470 bnx2_set_phy_loopback(bp);
5472 else
5473 return -EINVAL;
5475 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5476 skb = netdev_alloc_skb(bp->dev, pkt_size);
5477 if (!skb)
5478 return -ENOMEM;
5479 packet = skb_put(skb, pkt_size);
5480 memcpy(packet, bp->dev->dev_addr, 6);
5481 memset(packet + 6, 0x0, 8);
5482 for (i = 14; i < pkt_size; i++)
5483 packet[i] = (unsigned char) (i & 0xff);
5485 if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
5486 dev_kfree_skb(skb);
5487 return -EIO;
5489 map = skb_shinfo(skb)->dma_maps[0];
5491 REG_WR(bp, BNX2_HC_COMMAND,
5492 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5494 REG_RD(bp, BNX2_HC_COMMAND);
5496 udelay(5);
5497 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5499 num_pkts = 0;
5501 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5503 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5504 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5505 txbd->tx_bd_mss_nbytes = pkt_size;
5506 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5508 num_pkts++;
5509 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5510 txr->tx_prod_bseq += pkt_size;
5512 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5513 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5515 udelay(100);
5517 REG_WR(bp, BNX2_HC_COMMAND,
5518 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5520 REG_RD(bp, BNX2_HC_COMMAND);
5522 udelay(5);
5524 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5525 dev_kfree_skb(skb);
5527 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5528 goto loopback_test_done;
5530 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5531 if (rx_idx != rx_start_idx + num_pkts) {
5532 goto loopback_test_done;
5535 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5536 rx_skb = rx_buf->skb;
5538 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5539 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5541 pci_dma_sync_single_for_cpu(bp->pdev,
5542 pci_unmap_addr(rx_buf, mapping),
5543 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5545 if (rx_hdr->l2_fhdr_status &
5546 (L2_FHDR_ERRORS_BAD_CRC |
5547 L2_FHDR_ERRORS_PHY_DECODE |
5548 L2_FHDR_ERRORS_ALIGNMENT |
5549 L2_FHDR_ERRORS_TOO_SHORT |
5550 L2_FHDR_ERRORS_GIANT_FRAME)) {
5552 goto loopback_test_done;
5555 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5556 goto loopback_test_done;
5559 for (i = 14; i < pkt_size; i++) {
5560 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5561 goto loopback_test_done;
5565 ret = 0;
5567 loopback_test_done:
5568 bp->loopback = 0;
5569 return ret;
5572 #define BNX2_MAC_LOOPBACK_FAILED 1
5573 #define BNX2_PHY_LOOPBACK_FAILED 2
5574 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5575 BNX2_PHY_LOOPBACK_FAILED)
5577 static int
5578 bnx2_test_loopback(struct bnx2 *bp)
5580 int rc = 0;
5582 if (!netif_running(bp->dev))
5583 return BNX2_LOOPBACK_FAILED;
5585 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5586 spin_lock_bh(&bp->phy_lock);
5587 bnx2_init_phy(bp, 1);
5588 spin_unlock_bh(&bp->phy_lock);
5589 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5590 rc |= BNX2_MAC_LOOPBACK_FAILED;
5591 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5592 rc |= BNX2_PHY_LOOPBACK_FAILED;
5593 return rc;
5596 #define NVRAM_SIZE 0x200
5597 #define CRC32_RESIDUAL 0xdebb20e3
5599 static int
5600 bnx2_test_nvram(struct bnx2 *bp)
5602 __be32 buf[NVRAM_SIZE / 4];
5603 u8 *data = (u8 *) buf;
5604 int rc = 0;
5605 u32 magic, csum;
5607 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5608 goto test_nvram_done;
5610 magic = be32_to_cpu(buf[0]);
5611 if (magic != 0x669955aa) {
5612 rc = -ENODEV;
5613 goto test_nvram_done;
5616 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5617 goto test_nvram_done;
5619 csum = ether_crc_le(0x100, data);
5620 if (csum != CRC32_RESIDUAL) {
5621 rc = -ENODEV;
5622 goto test_nvram_done;
5625 csum = ether_crc_le(0x100, data + 0x100);
5626 if (csum != CRC32_RESIDUAL) {
5627 rc = -ENODEV;
5630 test_nvram_done:
5631 return rc;
5634 static int
5635 bnx2_test_link(struct bnx2 *bp)
5637 u32 bmsr;
5639 if (!netif_running(bp->dev))
5640 return -ENODEV;
5642 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5643 if (bp->link_up)
5644 return 0;
5645 return -ENODEV;
5647 spin_lock_bh(&bp->phy_lock);
5648 bnx2_enable_bmsr1(bp);
5649 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5650 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5651 bnx2_disable_bmsr1(bp);
5652 spin_unlock_bh(&bp->phy_lock);
5654 if (bmsr & BMSR_LSTATUS) {
5655 return 0;
5657 return -ENODEV;
5660 static int
5661 bnx2_test_intr(struct bnx2 *bp)
5663 int i;
5664 u16 status_idx;
5666 if (!netif_running(bp->dev))
5667 return -ENODEV;
5669 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5671 /* This register is not touched during run-time. */
5672 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5673 REG_RD(bp, BNX2_HC_COMMAND);
5675 for (i = 0; i < 10; i++) {
5676 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5677 status_idx) {
5679 break;
5682 msleep_interruptible(10);
5684 if (i < 10)
5685 return 0;
5687 return -ENODEV;
5690 /* Determining link for parallel detection. */
5691 static int
5692 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5694 u32 mode_ctl, an_dbg, exp;
5696 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5697 return 0;
5699 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5700 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5702 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5703 return 0;
5705 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5706 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5707 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5709 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5710 return 0;
5712 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5713 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5714 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5716 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5717 return 0;
5719 return 1;
5722 static void
5723 bnx2_5706_serdes_timer(struct bnx2 *bp)
5725 int check_link = 1;
5727 spin_lock(&bp->phy_lock);
5728 if (bp->serdes_an_pending) {
5729 bp->serdes_an_pending--;
5730 check_link = 0;
5731 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5732 u32 bmcr;
5734 bp->current_interval = BNX2_TIMER_INTERVAL;
5736 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5738 if (bmcr & BMCR_ANENABLE) {
5739 if (bnx2_5706_serdes_has_link(bp)) {
5740 bmcr &= ~BMCR_ANENABLE;
5741 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5742 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5743 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5747 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5748 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5749 u32 phy2;
5751 bnx2_write_phy(bp, 0x17, 0x0f01);
5752 bnx2_read_phy(bp, 0x15, &phy2);
5753 if (phy2 & 0x20) {
5754 u32 bmcr;
5756 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5757 bmcr |= BMCR_ANENABLE;
5758 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5760 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5762 } else
5763 bp->current_interval = BNX2_TIMER_INTERVAL;
5765 if (check_link) {
5766 u32 val;
5768 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5769 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5770 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5772 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5773 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5774 bnx2_5706s_force_link_dn(bp, 1);
5775 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5776 } else
5777 bnx2_set_link(bp);
5778 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5779 bnx2_set_link(bp);
5781 spin_unlock(&bp->phy_lock);
5784 static void
5785 bnx2_5708_serdes_timer(struct bnx2 *bp)
5787 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5788 return;
5790 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5791 bp->serdes_an_pending = 0;
5792 return;
5795 spin_lock(&bp->phy_lock);
5796 if (bp->serdes_an_pending)
5797 bp->serdes_an_pending--;
5798 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5799 u32 bmcr;
5801 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5802 if (bmcr & BMCR_ANENABLE) {
5803 bnx2_enable_forced_2g5(bp);
5804 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
5805 } else {
5806 bnx2_disable_forced_2g5(bp);
5807 bp->serdes_an_pending = 2;
5808 bp->current_interval = BNX2_TIMER_INTERVAL;
5811 } else
5812 bp->current_interval = BNX2_TIMER_INTERVAL;
5814 spin_unlock(&bp->phy_lock);
5817 static void
5818 bnx2_timer(unsigned long data)
5820 struct bnx2 *bp = (struct bnx2 *) data;
5822 if (!netif_running(bp->dev))
5823 return;
5825 if (atomic_read(&bp->intr_sem) != 0)
5826 goto bnx2_restart_timer;
5828 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
5829 BNX2_FLAG_USING_MSI)
5830 bnx2_chk_missed_msi(bp);
5832 bnx2_send_heart_beat(bp);
5834 bp->stats_blk->stat_FwRxDrop =
5835 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5837 /* workaround occasional corrupted counters */
5838 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5839 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5840 BNX2_HC_COMMAND_STATS_NOW);
5842 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5843 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5844 bnx2_5706_serdes_timer(bp);
5845 else
5846 bnx2_5708_serdes_timer(bp);
5849 bnx2_restart_timer:
5850 mod_timer(&bp->timer, jiffies + bp->current_interval);
5853 static int
5854 bnx2_request_irq(struct bnx2 *bp)
5856 unsigned long flags;
5857 struct bnx2_irq *irq;
5858 int rc = 0, i;
5860 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5861 flags = 0;
5862 else
5863 flags = IRQF_SHARED;
5865 for (i = 0; i < bp->irq_nvecs; i++) {
5866 irq = &bp->irq_tbl[i];
5867 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5868 &bp->bnx2_napi[i]);
5869 if (rc)
5870 break;
5871 irq->requested = 1;
5873 return rc;
5876 static void
5877 bnx2_free_irq(struct bnx2 *bp)
5879 struct bnx2_irq *irq;
5880 int i;
5882 for (i = 0; i < bp->irq_nvecs; i++) {
5883 irq = &bp->irq_tbl[i];
5884 if (irq->requested)
5885 free_irq(irq->vector, &bp->bnx2_napi[i]);
5886 irq->requested = 0;
5888 if (bp->flags & BNX2_FLAG_USING_MSI)
5889 pci_disable_msi(bp->pdev);
5890 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5891 pci_disable_msix(bp->pdev);
5893 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5896 static void
5897 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5899 int i, rc;
5900 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5901 struct net_device *dev = bp->dev;
5902 const int len = sizeof(bp->irq_tbl[0].name);
5904 bnx2_setup_msix_tbl(bp);
5905 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5906 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5907 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5909 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5910 msix_ent[i].entry = i;
5911 msix_ent[i].vector = 0;
5914 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5915 if (rc != 0)
5916 return;
5918 bp->irq_nvecs = msix_vecs;
5919 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5920 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5921 bp->irq_tbl[i].vector = msix_ent[i].vector;
5922 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
5923 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5927 static void
5928 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5930 int cpus = num_online_cpus();
5931 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
5933 bp->irq_tbl[0].handler = bnx2_interrupt;
5934 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5935 bp->irq_nvecs = 1;
5936 bp->irq_tbl[0].vector = bp->pdev->irq;
5938 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5939 bnx2_enable_msix(bp, msix_vecs);
5941 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5942 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5943 if (pci_enable_msi(bp->pdev) == 0) {
5944 bp->flags |= BNX2_FLAG_USING_MSI;
5945 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5946 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5947 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5948 } else
5949 bp->irq_tbl[0].handler = bnx2_msi;
5951 bp->irq_tbl[0].vector = bp->pdev->irq;
5955 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
5956 bp->dev->real_num_tx_queues = bp->num_tx_rings;
5958 bp->num_rx_rings = bp->irq_nvecs;
5961 /* Called with rtnl_lock */
5962 static int
5963 bnx2_open(struct net_device *dev)
5965 struct bnx2 *bp = netdev_priv(dev);
5966 int rc;
5968 netif_carrier_off(dev);
5970 bnx2_set_power_state(bp, PCI_D0);
5971 bnx2_disable_int(bp);
5973 bnx2_setup_int_mode(bp, disable_msi);
5974 bnx2_napi_enable(bp);
5975 rc = bnx2_alloc_mem(bp);
5976 if (rc)
5977 goto open_err;
5979 rc = bnx2_request_irq(bp);
5980 if (rc)
5981 goto open_err;
5983 rc = bnx2_init_nic(bp, 1);
5984 if (rc)
5985 goto open_err;
5987 mod_timer(&bp->timer, jiffies + bp->current_interval);
5989 atomic_set(&bp->intr_sem, 0);
5991 bnx2_enable_int(bp);
5993 if (bp->flags & BNX2_FLAG_USING_MSI) {
5994 /* Test MSI to make sure it is working
5995 * If MSI test fails, go back to INTx mode
5997 if (bnx2_test_intr(bp) != 0) {
5998 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5999 " using MSI, switching to INTx mode. Please"
6000 " report this failure to the PCI maintainer"
6001 " and include system chipset information.\n",
6002 bp->dev->name);
6004 bnx2_disable_int(bp);
6005 bnx2_free_irq(bp);
6007 bnx2_setup_int_mode(bp, 1);
6009 rc = bnx2_init_nic(bp, 0);
6011 if (!rc)
6012 rc = bnx2_request_irq(bp);
6014 if (rc) {
6015 del_timer_sync(&bp->timer);
6016 goto open_err;
6018 bnx2_enable_int(bp);
6021 if (bp->flags & BNX2_FLAG_USING_MSI)
6022 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
6023 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6024 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
6026 netif_tx_start_all_queues(dev);
6028 return 0;
6030 open_err:
6031 bnx2_napi_disable(bp);
6032 bnx2_free_skbs(bp);
6033 bnx2_free_irq(bp);
6034 bnx2_free_mem(bp);
6035 return rc;
6038 static void
6039 bnx2_reset_task(struct work_struct *work)
6041 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6043 if (!netif_running(bp->dev))
6044 return;
6046 bnx2_netif_stop(bp);
6048 bnx2_init_nic(bp, 1);
6050 atomic_set(&bp->intr_sem, 1);
6051 bnx2_netif_start(bp);
6054 static void
6055 bnx2_tx_timeout(struct net_device *dev)
6057 struct bnx2 *bp = netdev_priv(dev);
6059 /* This allows the netif to be shutdown gracefully before resetting */
6060 schedule_work(&bp->reset_task);
6063 #ifdef BCM_VLAN
6064 /* Called with rtnl_lock */
6065 static void
6066 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6068 struct bnx2 *bp = netdev_priv(dev);
6070 bnx2_netif_stop(bp);
6072 bp->vlgrp = vlgrp;
6073 bnx2_set_rx_mode(dev);
6074 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6075 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6077 bnx2_netif_start(bp);
6079 #endif
6081 /* Called with netif_tx_lock.
6082 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6083 * netif_wake_queue().
6085 static int
6086 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6088 struct bnx2 *bp = netdev_priv(dev);
6089 dma_addr_t mapping;
6090 struct tx_bd *txbd;
6091 struct sw_tx_bd *tx_buf;
6092 u32 len, vlan_tag_flags, last_frag, mss;
6093 u16 prod, ring_prod;
6094 int i;
6095 struct bnx2_napi *bnapi;
6096 struct bnx2_tx_ring_info *txr;
6097 struct netdev_queue *txq;
6098 struct skb_shared_info *sp;
6100 /* Determine which tx ring we will be placed on */
6101 i = skb_get_queue_mapping(skb);
6102 bnapi = &bp->bnx2_napi[i];
6103 txr = &bnapi->tx_ring;
6104 txq = netdev_get_tx_queue(dev, i);
6106 if (unlikely(bnx2_tx_avail(bp, txr) <
6107 (skb_shinfo(skb)->nr_frags + 1))) {
6108 netif_tx_stop_queue(txq);
6109 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
6110 dev->name);
6112 return NETDEV_TX_BUSY;
6114 len = skb_headlen(skb);
6115 prod = txr->tx_prod;
6116 ring_prod = TX_RING_IDX(prod);
6118 vlan_tag_flags = 0;
6119 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6120 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6123 #ifdef BCM_VLAN
6124 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6125 vlan_tag_flags |=
6126 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6128 #endif
6129 if ((mss = skb_shinfo(skb)->gso_size)) {
6130 u32 tcp_opt_len;
6131 struct iphdr *iph;
6133 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6135 tcp_opt_len = tcp_optlen(skb);
6137 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6138 u32 tcp_off = skb_transport_offset(skb) -
6139 sizeof(struct ipv6hdr) - ETH_HLEN;
6141 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6142 TX_BD_FLAGS_SW_FLAGS;
6143 if (likely(tcp_off == 0))
6144 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6145 else {
6146 tcp_off >>= 3;
6147 vlan_tag_flags |= ((tcp_off & 0x3) <<
6148 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6149 ((tcp_off & 0x10) <<
6150 TX_BD_FLAGS_TCP6_OFF4_SHL);
6151 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6153 } else {
6154 iph = ip_hdr(skb);
6155 if (tcp_opt_len || (iph->ihl > 5)) {
6156 vlan_tag_flags |= ((iph->ihl - 5) +
6157 (tcp_opt_len >> 2)) << 8;
6160 } else
6161 mss = 0;
6163 if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
6164 dev_kfree_skb(skb);
6165 return NETDEV_TX_OK;
6168 sp = skb_shinfo(skb);
6169 mapping = sp->dma_maps[0];
6171 tx_buf = &txr->tx_buf_ring[ring_prod];
6172 tx_buf->skb = skb;
6174 txbd = &txr->tx_desc_ring[ring_prod];
6176 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6177 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6178 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6179 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6181 last_frag = skb_shinfo(skb)->nr_frags;
6183 for (i = 0; i < last_frag; i++) {
6184 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6186 prod = NEXT_TX_BD(prod);
6187 ring_prod = TX_RING_IDX(prod);
6188 txbd = &txr->tx_desc_ring[ring_prod];
6190 len = frag->size;
6191 mapping = sp->dma_maps[i + 1];
6193 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6194 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6195 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6196 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6199 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6201 prod = NEXT_TX_BD(prod);
6202 txr->tx_prod_bseq += skb->len;
6204 REG_WR16(bp, txr->tx_bidx_addr, prod);
6205 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6207 mmiowb();
6209 txr->tx_prod = prod;
6210 dev->trans_start = jiffies;
6212 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6213 netif_tx_stop_queue(txq);
6214 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6215 netif_tx_wake_queue(txq);
6218 return NETDEV_TX_OK;
6221 /* Called with rtnl_lock */
6222 static int
6223 bnx2_close(struct net_device *dev)
6225 struct bnx2 *bp = netdev_priv(dev);
6227 cancel_work_sync(&bp->reset_task);
6229 bnx2_disable_int_sync(bp);
6230 bnx2_napi_disable(bp);
6231 del_timer_sync(&bp->timer);
6232 bnx2_shutdown_chip(bp);
6233 bnx2_free_irq(bp);
6234 bnx2_free_skbs(bp);
6235 bnx2_free_mem(bp);
6236 bp->link_up = 0;
6237 netif_carrier_off(bp->dev);
6238 bnx2_set_power_state(bp, PCI_D3hot);
6239 return 0;
6242 #define GET_NET_STATS64(ctr) \
6243 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6244 (unsigned long) (ctr##_lo)
6246 #define GET_NET_STATS32(ctr) \
6247 (ctr##_lo)
6249 #if (BITS_PER_LONG == 64)
6250 #define GET_NET_STATS GET_NET_STATS64
6251 #else
6252 #define GET_NET_STATS GET_NET_STATS32
6253 #endif
6255 static struct net_device_stats *
6256 bnx2_get_stats(struct net_device *dev)
6258 struct bnx2 *bp = netdev_priv(dev);
6259 struct statistics_block *stats_blk = bp->stats_blk;
6260 struct net_device_stats *net_stats = &dev->stats;
6262 if (bp->stats_blk == NULL) {
6263 return net_stats;
6265 net_stats->rx_packets =
6266 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6267 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6268 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6270 net_stats->tx_packets =
6271 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6272 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6273 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6275 net_stats->rx_bytes =
6276 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6278 net_stats->tx_bytes =
6279 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6281 net_stats->multicast =
6282 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6284 net_stats->collisions =
6285 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6287 net_stats->rx_length_errors =
6288 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6289 stats_blk->stat_EtherStatsOverrsizePkts);
6291 net_stats->rx_over_errors =
6292 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6294 net_stats->rx_frame_errors =
6295 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6297 net_stats->rx_crc_errors =
6298 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6300 net_stats->rx_errors = net_stats->rx_length_errors +
6301 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6302 net_stats->rx_crc_errors;
6304 net_stats->tx_aborted_errors =
6305 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6306 stats_blk->stat_Dot3StatsLateCollisions);
6308 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6309 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6310 net_stats->tx_carrier_errors = 0;
6311 else {
6312 net_stats->tx_carrier_errors =
6313 (unsigned long)
6314 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6317 net_stats->tx_errors =
6318 (unsigned long)
6319 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6321 net_stats->tx_aborted_errors +
6322 net_stats->tx_carrier_errors;
6324 net_stats->rx_missed_errors =
6325 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6326 stats_blk->stat_FwRxDrop);
6328 return net_stats;
6331 /* All ethtool functions called with rtnl_lock */
6333 static int
6334 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6336 struct bnx2 *bp = netdev_priv(dev);
6337 int support_serdes = 0, support_copper = 0;
6339 cmd->supported = SUPPORTED_Autoneg;
6340 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6341 support_serdes = 1;
6342 support_copper = 1;
6343 } else if (bp->phy_port == PORT_FIBRE)
6344 support_serdes = 1;
6345 else
6346 support_copper = 1;
6348 if (support_serdes) {
6349 cmd->supported |= SUPPORTED_1000baseT_Full |
6350 SUPPORTED_FIBRE;
6351 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6352 cmd->supported |= SUPPORTED_2500baseX_Full;
6355 if (support_copper) {
6356 cmd->supported |= SUPPORTED_10baseT_Half |
6357 SUPPORTED_10baseT_Full |
6358 SUPPORTED_100baseT_Half |
6359 SUPPORTED_100baseT_Full |
6360 SUPPORTED_1000baseT_Full |
6361 SUPPORTED_TP;
6365 spin_lock_bh(&bp->phy_lock);
6366 cmd->port = bp->phy_port;
6367 cmd->advertising = bp->advertising;
6369 if (bp->autoneg & AUTONEG_SPEED) {
6370 cmd->autoneg = AUTONEG_ENABLE;
6372 else {
6373 cmd->autoneg = AUTONEG_DISABLE;
6376 if (netif_carrier_ok(dev)) {
6377 cmd->speed = bp->line_speed;
6378 cmd->duplex = bp->duplex;
6380 else {
6381 cmd->speed = -1;
6382 cmd->duplex = -1;
6384 spin_unlock_bh(&bp->phy_lock);
6386 cmd->transceiver = XCVR_INTERNAL;
6387 cmd->phy_address = bp->phy_addr;
6389 return 0;
6392 static int
6393 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6395 struct bnx2 *bp = netdev_priv(dev);
6396 u8 autoneg = bp->autoneg;
6397 u8 req_duplex = bp->req_duplex;
6398 u16 req_line_speed = bp->req_line_speed;
6399 u32 advertising = bp->advertising;
6400 int err = -EINVAL;
6402 spin_lock_bh(&bp->phy_lock);
6404 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6405 goto err_out_unlock;
6407 if (cmd->port != bp->phy_port &&
6408 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6409 goto err_out_unlock;
6411 /* If device is down, we can store the settings only if the user
6412 * is setting the currently active port.
6414 if (!netif_running(dev) && cmd->port != bp->phy_port)
6415 goto err_out_unlock;
6417 if (cmd->autoneg == AUTONEG_ENABLE) {
6418 autoneg |= AUTONEG_SPEED;
6420 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6422 /* allow advertising 1 speed */
6423 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6424 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6425 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6426 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6428 if (cmd->port == PORT_FIBRE)
6429 goto err_out_unlock;
6431 advertising = cmd->advertising;
6433 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6434 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6435 (cmd->port == PORT_TP))
6436 goto err_out_unlock;
6437 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6438 advertising = cmd->advertising;
6439 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6440 goto err_out_unlock;
6441 else {
6442 if (cmd->port == PORT_FIBRE)
6443 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6444 else
6445 advertising = ETHTOOL_ALL_COPPER_SPEED;
6447 advertising |= ADVERTISED_Autoneg;
6449 else {
6450 if (cmd->port == PORT_FIBRE) {
6451 if ((cmd->speed != SPEED_1000 &&
6452 cmd->speed != SPEED_2500) ||
6453 (cmd->duplex != DUPLEX_FULL))
6454 goto err_out_unlock;
6456 if (cmd->speed == SPEED_2500 &&
6457 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6458 goto err_out_unlock;
6460 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6461 goto err_out_unlock;
6463 autoneg &= ~AUTONEG_SPEED;
6464 req_line_speed = cmd->speed;
6465 req_duplex = cmd->duplex;
6466 advertising = 0;
6469 bp->autoneg = autoneg;
6470 bp->advertising = advertising;
6471 bp->req_line_speed = req_line_speed;
6472 bp->req_duplex = req_duplex;
6474 err = 0;
6475 /* If device is down, the new settings will be picked up when it is
6476 * brought up.
6478 if (netif_running(dev))
6479 err = bnx2_setup_phy(bp, cmd->port);
6481 err_out_unlock:
6482 spin_unlock_bh(&bp->phy_lock);
6484 return err;
6487 static void
6488 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6490 struct bnx2 *bp = netdev_priv(dev);
6492 strcpy(info->driver, DRV_MODULE_NAME);
6493 strcpy(info->version, DRV_MODULE_VERSION);
6494 strcpy(info->bus_info, pci_name(bp->pdev));
6495 strcpy(info->fw_version, bp->fw_version);
6498 #define BNX2_REGDUMP_LEN (32 * 1024)
6500 static int
6501 bnx2_get_regs_len(struct net_device *dev)
6503 return BNX2_REGDUMP_LEN;
6506 static void
6507 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6509 u32 *p = _p, i, offset;
6510 u8 *orig_p = _p;
6511 struct bnx2 *bp = netdev_priv(dev);
6512 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6513 0x0800, 0x0880, 0x0c00, 0x0c10,
6514 0x0c30, 0x0d08, 0x1000, 0x101c,
6515 0x1040, 0x1048, 0x1080, 0x10a4,
6516 0x1400, 0x1490, 0x1498, 0x14f0,
6517 0x1500, 0x155c, 0x1580, 0x15dc,
6518 0x1600, 0x1658, 0x1680, 0x16d8,
6519 0x1800, 0x1820, 0x1840, 0x1854,
6520 0x1880, 0x1894, 0x1900, 0x1984,
6521 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6522 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6523 0x2000, 0x2030, 0x23c0, 0x2400,
6524 0x2800, 0x2820, 0x2830, 0x2850,
6525 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6526 0x3c00, 0x3c94, 0x4000, 0x4010,
6527 0x4080, 0x4090, 0x43c0, 0x4458,
6528 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6529 0x4fc0, 0x5010, 0x53c0, 0x5444,
6530 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6531 0x5fc0, 0x6000, 0x6400, 0x6428,
6532 0x6800, 0x6848, 0x684c, 0x6860,
6533 0x6888, 0x6910, 0x8000 };
6535 regs->version = 0;
6537 memset(p, 0, BNX2_REGDUMP_LEN);
6539 if (!netif_running(bp->dev))
6540 return;
6542 i = 0;
6543 offset = reg_boundaries[0];
6544 p += offset;
6545 while (offset < BNX2_REGDUMP_LEN) {
6546 *p++ = REG_RD(bp, offset);
6547 offset += 4;
6548 if (offset == reg_boundaries[i + 1]) {
6549 offset = reg_boundaries[i + 2];
6550 p = (u32 *) (orig_p + offset);
6551 i += 2;
6556 static void
6557 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6559 struct bnx2 *bp = netdev_priv(dev);
6561 if (bp->flags & BNX2_FLAG_NO_WOL) {
6562 wol->supported = 0;
6563 wol->wolopts = 0;
6565 else {
6566 wol->supported = WAKE_MAGIC;
6567 if (bp->wol)
6568 wol->wolopts = WAKE_MAGIC;
6569 else
6570 wol->wolopts = 0;
6572 memset(&wol->sopass, 0, sizeof(wol->sopass));
6575 static int
6576 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6578 struct bnx2 *bp = netdev_priv(dev);
6580 if (wol->wolopts & ~WAKE_MAGIC)
6581 return -EINVAL;
6583 if (wol->wolopts & WAKE_MAGIC) {
6584 if (bp->flags & BNX2_FLAG_NO_WOL)
6585 return -EINVAL;
6587 bp->wol = 1;
6589 else {
6590 bp->wol = 0;
6592 return 0;
6595 static int
6596 bnx2_nway_reset(struct net_device *dev)
6598 struct bnx2 *bp = netdev_priv(dev);
6599 u32 bmcr;
6601 if (!netif_running(dev))
6602 return -EAGAIN;
6604 if (!(bp->autoneg & AUTONEG_SPEED)) {
6605 return -EINVAL;
6608 spin_lock_bh(&bp->phy_lock);
6610 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6611 int rc;
6613 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6614 spin_unlock_bh(&bp->phy_lock);
6615 return rc;
6618 /* Force a link down visible on the other side */
6619 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6620 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6621 spin_unlock_bh(&bp->phy_lock);
6623 msleep(20);
6625 spin_lock_bh(&bp->phy_lock);
6627 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6628 bp->serdes_an_pending = 1;
6629 mod_timer(&bp->timer, jiffies + bp->current_interval);
6632 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6633 bmcr &= ~BMCR_LOOPBACK;
6634 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6636 spin_unlock_bh(&bp->phy_lock);
6638 return 0;
6641 static int
6642 bnx2_get_eeprom_len(struct net_device *dev)
6644 struct bnx2 *bp = netdev_priv(dev);
6646 if (bp->flash_info == NULL)
6647 return 0;
6649 return (int) bp->flash_size;
6652 static int
6653 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6654 u8 *eebuf)
6656 struct bnx2 *bp = netdev_priv(dev);
6657 int rc;
6659 if (!netif_running(dev))
6660 return -EAGAIN;
6662 /* parameters already validated in ethtool_get_eeprom */
6664 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6666 return rc;
6669 static int
6670 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6671 u8 *eebuf)
6673 struct bnx2 *bp = netdev_priv(dev);
6674 int rc;
6676 if (!netif_running(dev))
6677 return -EAGAIN;
6679 /* parameters already validated in ethtool_set_eeprom */
6681 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6683 return rc;
6686 static int
6687 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6689 struct bnx2 *bp = netdev_priv(dev);
6691 memset(coal, 0, sizeof(struct ethtool_coalesce));
6693 coal->rx_coalesce_usecs = bp->rx_ticks;
6694 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6695 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6696 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6698 coal->tx_coalesce_usecs = bp->tx_ticks;
6699 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6700 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6701 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6703 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6705 return 0;
6708 static int
6709 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6711 struct bnx2 *bp = netdev_priv(dev);
6713 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6714 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6716 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6717 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6719 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6720 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6722 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6723 if (bp->rx_quick_cons_trip_int > 0xff)
6724 bp->rx_quick_cons_trip_int = 0xff;
6726 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6727 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6729 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6730 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6732 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6733 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6735 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6736 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6737 0xff;
6739 bp->stats_ticks = coal->stats_block_coalesce_usecs;
6740 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6741 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6742 bp->stats_ticks = USEC_PER_SEC;
6744 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6745 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6746 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6748 if (netif_running(bp->dev)) {
6749 bnx2_netif_stop(bp);
6750 bnx2_init_nic(bp, 0);
6751 bnx2_netif_start(bp);
6754 return 0;
6757 static void
6758 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6760 struct bnx2 *bp = netdev_priv(dev);
6762 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6763 ering->rx_mini_max_pending = 0;
6764 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6766 ering->rx_pending = bp->rx_ring_size;
6767 ering->rx_mini_pending = 0;
6768 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6770 ering->tx_max_pending = MAX_TX_DESC_CNT;
6771 ering->tx_pending = bp->tx_ring_size;
6774 static int
6775 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6777 if (netif_running(bp->dev)) {
6778 bnx2_netif_stop(bp);
6779 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6780 bnx2_free_skbs(bp);
6781 bnx2_free_mem(bp);
6784 bnx2_set_rx_ring_size(bp, rx);
6785 bp->tx_ring_size = tx;
6787 if (netif_running(bp->dev)) {
6788 int rc;
6790 rc = bnx2_alloc_mem(bp);
6791 if (rc)
6792 return rc;
6793 bnx2_init_nic(bp, 0);
6794 bnx2_netif_start(bp);
6796 return 0;
6799 static int
6800 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6802 struct bnx2 *bp = netdev_priv(dev);
6803 int rc;
6805 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6806 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6807 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6809 return -EINVAL;
6811 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6812 return rc;
6815 static void
6816 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6818 struct bnx2 *bp = netdev_priv(dev);
6820 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6821 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6822 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6825 static int
6826 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6828 struct bnx2 *bp = netdev_priv(dev);
6830 bp->req_flow_ctrl = 0;
6831 if (epause->rx_pause)
6832 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6833 if (epause->tx_pause)
6834 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6836 if (epause->autoneg) {
6837 bp->autoneg |= AUTONEG_FLOW_CTRL;
6839 else {
6840 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6843 if (netif_running(dev)) {
6844 spin_lock_bh(&bp->phy_lock);
6845 bnx2_setup_phy(bp, bp->phy_port);
6846 spin_unlock_bh(&bp->phy_lock);
6849 return 0;
6852 static u32
6853 bnx2_get_rx_csum(struct net_device *dev)
6855 struct bnx2 *bp = netdev_priv(dev);
6857 return bp->rx_csum;
6860 static int
6861 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6863 struct bnx2 *bp = netdev_priv(dev);
6865 bp->rx_csum = data;
6866 return 0;
6869 static int
6870 bnx2_set_tso(struct net_device *dev, u32 data)
6872 struct bnx2 *bp = netdev_priv(dev);
6874 if (data) {
6875 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6876 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6877 dev->features |= NETIF_F_TSO6;
6878 } else
6879 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6880 NETIF_F_TSO_ECN);
6881 return 0;
6884 #define BNX2_NUM_STATS 46
6886 static struct {
6887 char string[ETH_GSTRING_LEN];
6888 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6889 { "rx_bytes" },
6890 { "rx_error_bytes" },
6891 { "tx_bytes" },
6892 { "tx_error_bytes" },
6893 { "rx_ucast_packets" },
6894 { "rx_mcast_packets" },
6895 { "rx_bcast_packets" },
6896 { "tx_ucast_packets" },
6897 { "tx_mcast_packets" },
6898 { "tx_bcast_packets" },
6899 { "tx_mac_errors" },
6900 { "tx_carrier_errors" },
6901 { "rx_crc_errors" },
6902 { "rx_align_errors" },
6903 { "tx_single_collisions" },
6904 { "tx_multi_collisions" },
6905 { "tx_deferred" },
6906 { "tx_excess_collisions" },
6907 { "tx_late_collisions" },
6908 { "tx_total_collisions" },
6909 { "rx_fragments" },
6910 { "rx_jabbers" },
6911 { "rx_undersize_packets" },
6912 { "rx_oversize_packets" },
6913 { "rx_64_byte_packets" },
6914 { "rx_65_to_127_byte_packets" },
6915 { "rx_128_to_255_byte_packets" },
6916 { "rx_256_to_511_byte_packets" },
6917 { "rx_512_to_1023_byte_packets" },
6918 { "rx_1024_to_1522_byte_packets" },
6919 { "rx_1523_to_9022_byte_packets" },
6920 { "tx_64_byte_packets" },
6921 { "tx_65_to_127_byte_packets" },
6922 { "tx_128_to_255_byte_packets" },
6923 { "tx_256_to_511_byte_packets" },
6924 { "tx_512_to_1023_byte_packets" },
6925 { "tx_1024_to_1522_byte_packets" },
6926 { "tx_1523_to_9022_byte_packets" },
6927 { "rx_xon_frames" },
6928 { "rx_xoff_frames" },
6929 { "tx_xon_frames" },
6930 { "tx_xoff_frames" },
6931 { "rx_mac_ctrl_frames" },
6932 { "rx_filtered_packets" },
6933 { "rx_discards" },
6934 { "rx_fw_discards" },
6937 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6939 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6940 STATS_OFFSET32(stat_IfHCInOctets_hi),
6941 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6942 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6943 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6944 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6945 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6946 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6947 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6948 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6949 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6950 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6951 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6952 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6953 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6954 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6955 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6956 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6957 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6958 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6959 STATS_OFFSET32(stat_EtherStatsCollisions),
6960 STATS_OFFSET32(stat_EtherStatsFragments),
6961 STATS_OFFSET32(stat_EtherStatsJabbers),
6962 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6963 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6964 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6965 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6966 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6967 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6968 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6969 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6970 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6971 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6972 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6973 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6974 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6975 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6976 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6977 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6978 STATS_OFFSET32(stat_XonPauseFramesReceived),
6979 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6980 STATS_OFFSET32(stat_OutXonSent),
6981 STATS_OFFSET32(stat_OutXoffSent),
6982 STATS_OFFSET32(stat_MacControlFramesReceived),
6983 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6984 STATS_OFFSET32(stat_IfInMBUFDiscards),
6985 STATS_OFFSET32(stat_FwRxDrop),
6988 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6989 * skipped because of errata.
6991 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6992 8,0,8,8,8,8,8,8,8,8,
6993 4,0,4,4,4,4,4,4,4,4,
6994 4,4,4,4,4,4,4,4,4,4,
6995 4,4,4,4,4,4,4,4,4,4,
6996 4,4,4,4,4,4,
6999 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7000 8,0,8,8,8,8,8,8,8,8,
7001 4,4,4,4,4,4,4,4,4,4,
7002 4,4,4,4,4,4,4,4,4,4,
7003 4,4,4,4,4,4,4,4,4,4,
7004 4,4,4,4,4,4,
7007 #define BNX2_NUM_TESTS 6
7009 static struct {
7010 char string[ETH_GSTRING_LEN];
7011 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7012 { "register_test (offline)" },
7013 { "memory_test (offline)" },
7014 { "loopback_test (offline)" },
7015 { "nvram_test (online)" },
7016 { "interrupt_test (online)" },
7017 { "link_test (online)" },
7020 static int
7021 bnx2_get_sset_count(struct net_device *dev, int sset)
7023 switch (sset) {
7024 case ETH_SS_TEST:
7025 return BNX2_NUM_TESTS;
7026 case ETH_SS_STATS:
7027 return BNX2_NUM_STATS;
7028 default:
7029 return -EOPNOTSUPP;
7033 static void
7034 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7036 struct bnx2 *bp = netdev_priv(dev);
7038 bnx2_set_power_state(bp, PCI_D0);
7040 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7041 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7042 int i;
7044 bnx2_netif_stop(bp);
7045 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7046 bnx2_free_skbs(bp);
7048 if (bnx2_test_registers(bp) != 0) {
7049 buf[0] = 1;
7050 etest->flags |= ETH_TEST_FL_FAILED;
7052 if (bnx2_test_memory(bp) != 0) {
7053 buf[1] = 1;
7054 etest->flags |= ETH_TEST_FL_FAILED;
7056 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7057 etest->flags |= ETH_TEST_FL_FAILED;
7059 if (!netif_running(bp->dev))
7060 bnx2_shutdown_chip(bp);
7061 else {
7062 bnx2_init_nic(bp, 1);
7063 bnx2_netif_start(bp);
7066 /* wait for link up */
7067 for (i = 0; i < 7; i++) {
7068 if (bp->link_up)
7069 break;
7070 msleep_interruptible(1000);
7074 if (bnx2_test_nvram(bp) != 0) {
7075 buf[3] = 1;
7076 etest->flags |= ETH_TEST_FL_FAILED;
7078 if (bnx2_test_intr(bp) != 0) {
7079 buf[4] = 1;
7080 etest->flags |= ETH_TEST_FL_FAILED;
7083 if (bnx2_test_link(bp) != 0) {
7084 buf[5] = 1;
7085 etest->flags |= ETH_TEST_FL_FAILED;
7088 if (!netif_running(bp->dev))
7089 bnx2_set_power_state(bp, PCI_D3hot);
7092 static void
7093 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7095 switch (stringset) {
7096 case ETH_SS_STATS:
7097 memcpy(buf, bnx2_stats_str_arr,
7098 sizeof(bnx2_stats_str_arr));
7099 break;
7100 case ETH_SS_TEST:
7101 memcpy(buf, bnx2_tests_str_arr,
7102 sizeof(bnx2_tests_str_arr));
7103 break;
7107 static void
7108 bnx2_get_ethtool_stats(struct net_device *dev,
7109 struct ethtool_stats *stats, u64 *buf)
7111 struct bnx2 *bp = netdev_priv(dev);
7112 int i;
7113 u32 *hw_stats = (u32 *) bp->stats_blk;
7114 u8 *stats_len_arr = NULL;
7116 if (hw_stats == NULL) {
7117 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7118 return;
7121 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7122 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7123 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7124 (CHIP_ID(bp) == CHIP_ID_5708_A0))
7125 stats_len_arr = bnx2_5706_stats_len_arr;
7126 else
7127 stats_len_arr = bnx2_5708_stats_len_arr;
7129 for (i = 0; i < BNX2_NUM_STATS; i++) {
7130 if (stats_len_arr[i] == 0) {
7131 /* skip this counter */
7132 buf[i] = 0;
7133 continue;
7135 if (stats_len_arr[i] == 4) {
7136 /* 4-byte counter */
7137 buf[i] = (u64)
7138 *(hw_stats + bnx2_stats_offset_arr[i]);
7139 continue;
7141 /* 8-byte counter */
7142 buf[i] = (((u64) *(hw_stats +
7143 bnx2_stats_offset_arr[i])) << 32) +
7144 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
7148 static int
7149 bnx2_phys_id(struct net_device *dev, u32 data)
7151 struct bnx2 *bp = netdev_priv(dev);
7152 int i;
7153 u32 save;
7155 bnx2_set_power_state(bp, PCI_D0);
7157 if (data == 0)
7158 data = 2;
7160 save = REG_RD(bp, BNX2_MISC_CFG);
7161 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7163 for (i = 0; i < (data * 2); i++) {
7164 if ((i % 2) == 0) {
7165 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7167 else {
7168 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7169 BNX2_EMAC_LED_1000MB_OVERRIDE |
7170 BNX2_EMAC_LED_100MB_OVERRIDE |
7171 BNX2_EMAC_LED_10MB_OVERRIDE |
7172 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7173 BNX2_EMAC_LED_TRAFFIC);
7175 msleep_interruptible(500);
7176 if (signal_pending(current))
7177 break;
7179 REG_WR(bp, BNX2_EMAC_LED, 0);
7180 REG_WR(bp, BNX2_MISC_CFG, save);
7182 if (!netif_running(dev))
7183 bnx2_set_power_state(bp, PCI_D3hot);
7185 return 0;
7188 static int
7189 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7191 struct bnx2 *bp = netdev_priv(dev);
7193 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7194 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7195 else
7196 return (ethtool_op_set_tx_csum(dev, data));
7199 static const struct ethtool_ops bnx2_ethtool_ops = {
7200 .get_settings = bnx2_get_settings,
7201 .set_settings = bnx2_set_settings,
7202 .get_drvinfo = bnx2_get_drvinfo,
7203 .get_regs_len = bnx2_get_regs_len,
7204 .get_regs = bnx2_get_regs,
7205 .get_wol = bnx2_get_wol,
7206 .set_wol = bnx2_set_wol,
7207 .nway_reset = bnx2_nway_reset,
7208 .get_link = ethtool_op_get_link,
7209 .get_eeprom_len = bnx2_get_eeprom_len,
7210 .get_eeprom = bnx2_get_eeprom,
7211 .set_eeprom = bnx2_set_eeprom,
7212 .get_coalesce = bnx2_get_coalesce,
7213 .set_coalesce = bnx2_set_coalesce,
7214 .get_ringparam = bnx2_get_ringparam,
7215 .set_ringparam = bnx2_set_ringparam,
7216 .get_pauseparam = bnx2_get_pauseparam,
7217 .set_pauseparam = bnx2_set_pauseparam,
7218 .get_rx_csum = bnx2_get_rx_csum,
7219 .set_rx_csum = bnx2_set_rx_csum,
7220 .set_tx_csum = bnx2_set_tx_csum,
7221 .set_sg = ethtool_op_set_sg,
7222 .set_tso = bnx2_set_tso,
7223 .self_test = bnx2_self_test,
7224 .get_strings = bnx2_get_strings,
7225 .phys_id = bnx2_phys_id,
7226 .get_ethtool_stats = bnx2_get_ethtool_stats,
7227 .get_sset_count = bnx2_get_sset_count,
7230 /* Called with rtnl_lock */
7231 static int
7232 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7234 struct mii_ioctl_data *data = if_mii(ifr);
7235 struct bnx2 *bp = netdev_priv(dev);
7236 int err;
7238 switch(cmd) {
7239 case SIOCGMIIPHY:
7240 data->phy_id = bp->phy_addr;
7242 /* fallthru */
7243 case SIOCGMIIREG: {
7244 u32 mii_regval;
7246 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7247 return -EOPNOTSUPP;
7249 if (!netif_running(dev))
7250 return -EAGAIN;
7252 spin_lock_bh(&bp->phy_lock);
7253 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7254 spin_unlock_bh(&bp->phy_lock);
7256 data->val_out = mii_regval;
7258 return err;
7261 case SIOCSMIIREG:
7262 if (!capable(CAP_NET_ADMIN))
7263 return -EPERM;
7265 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7266 return -EOPNOTSUPP;
7268 if (!netif_running(dev))
7269 return -EAGAIN;
7271 spin_lock_bh(&bp->phy_lock);
7272 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7273 spin_unlock_bh(&bp->phy_lock);
7275 return err;
7277 default:
7278 /* do nothing */
7279 break;
7281 return -EOPNOTSUPP;
7284 /* Called with rtnl_lock */
7285 static int
7286 bnx2_change_mac_addr(struct net_device *dev, void *p)
7288 struct sockaddr *addr = p;
7289 struct bnx2 *bp = netdev_priv(dev);
7291 if (!is_valid_ether_addr(addr->sa_data))
7292 return -EINVAL;
7294 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7295 if (netif_running(dev))
7296 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7298 return 0;
7301 /* Called with rtnl_lock */
7302 static int
7303 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7305 struct bnx2 *bp = netdev_priv(dev);
7307 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7308 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7309 return -EINVAL;
7311 dev->mtu = new_mtu;
7312 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7315 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7316 static void
7317 poll_bnx2(struct net_device *dev)
7319 struct bnx2 *bp = netdev_priv(dev);
7320 int i;
7322 for (i = 0; i < bp->irq_nvecs; i++) {
7323 disable_irq(bp->irq_tbl[i].vector);
7324 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7325 enable_irq(bp->irq_tbl[i].vector);
7328 #endif
7330 static void __devinit
7331 bnx2_get_5709_media(struct bnx2 *bp)
7333 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7334 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7335 u32 strap;
7337 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7338 return;
7339 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7340 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7341 return;
7344 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7345 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7346 else
7347 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7349 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7350 switch (strap) {
7351 case 0x4:
7352 case 0x5:
7353 case 0x6:
7354 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7355 return;
7357 } else {
7358 switch (strap) {
7359 case 0x1:
7360 case 0x2:
7361 case 0x4:
7362 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7363 return;
7368 static void __devinit
7369 bnx2_get_pci_speed(struct bnx2 *bp)
7371 u32 reg;
7373 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7374 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7375 u32 clkreg;
7377 bp->flags |= BNX2_FLAG_PCIX;
7379 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7381 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7382 switch (clkreg) {
7383 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7384 bp->bus_speed_mhz = 133;
7385 break;
7387 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7388 bp->bus_speed_mhz = 100;
7389 break;
7391 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7392 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7393 bp->bus_speed_mhz = 66;
7394 break;
7396 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7397 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7398 bp->bus_speed_mhz = 50;
7399 break;
7401 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7402 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7403 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7404 bp->bus_speed_mhz = 33;
7405 break;
7408 else {
7409 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7410 bp->bus_speed_mhz = 66;
7411 else
7412 bp->bus_speed_mhz = 33;
7415 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7416 bp->flags |= BNX2_FLAG_PCI_32BIT;
7420 static int __devinit
7421 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7423 struct bnx2 *bp;
7424 unsigned long mem_len;
7425 int rc, i, j;
7426 u32 reg;
7427 u64 dma_mask, persist_dma_mask;
7429 SET_NETDEV_DEV(dev, &pdev->dev);
7430 bp = netdev_priv(dev);
7432 bp->flags = 0;
7433 bp->phy_flags = 0;
7435 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7436 rc = pci_enable_device(pdev);
7437 if (rc) {
7438 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7439 goto err_out;
7442 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7443 dev_err(&pdev->dev,
7444 "Cannot find PCI device base address, aborting.\n");
7445 rc = -ENODEV;
7446 goto err_out_disable;
7449 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7450 if (rc) {
7451 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7452 goto err_out_disable;
7455 pci_set_master(pdev);
7456 pci_save_state(pdev);
7458 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7459 if (bp->pm_cap == 0) {
7460 dev_err(&pdev->dev,
7461 "Cannot find power management capability, aborting.\n");
7462 rc = -EIO;
7463 goto err_out_release;
7466 bp->dev = dev;
7467 bp->pdev = pdev;
7469 spin_lock_init(&bp->phy_lock);
7470 spin_lock_init(&bp->indirect_lock);
7471 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7473 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7474 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
7475 dev->mem_end = dev->mem_start + mem_len;
7476 dev->irq = pdev->irq;
7478 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7480 if (!bp->regview) {
7481 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7482 rc = -ENOMEM;
7483 goto err_out_release;
7486 /* Configure byte swap and enable write to the reg_window registers.
7487 * Rely on CPU to do target byte swapping on big endian systems
7488 * The chip's target access swapping will not swap all accesses
7490 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7491 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7492 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7494 bnx2_set_power_state(bp, PCI_D0);
7496 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7498 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7499 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7500 dev_err(&pdev->dev,
7501 "Cannot find PCIE capability, aborting.\n");
7502 rc = -EIO;
7503 goto err_out_unmap;
7505 bp->flags |= BNX2_FLAG_PCIE;
7506 if (CHIP_REV(bp) == CHIP_REV_Ax)
7507 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7508 } else {
7509 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7510 if (bp->pcix_cap == 0) {
7511 dev_err(&pdev->dev,
7512 "Cannot find PCIX capability, aborting.\n");
7513 rc = -EIO;
7514 goto err_out_unmap;
7518 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7519 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7520 bp->flags |= BNX2_FLAG_MSIX_CAP;
7523 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7524 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7525 bp->flags |= BNX2_FLAG_MSI_CAP;
7528 /* 5708 cannot support DMA addresses > 40-bit. */
7529 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7530 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7531 else
7532 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7534 /* Configure DMA attributes. */
7535 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7536 dev->features |= NETIF_F_HIGHDMA;
7537 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7538 if (rc) {
7539 dev_err(&pdev->dev,
7540 "pci_set_consistent_dma_mask failed, aborting.\n");
7541 goto err_out_unmap;
7543 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7544 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7545 goto err_out_unmap;
7548 if (!(bp->flags & BNX2_FLAG_PCIE))
7549 bnx2_get_pci_speed(bp);
7551 /* 5706A0 may falsely detect SERR and PERR. */
7552 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7553 reg = REG_RD(bp, PCI_COMMAND);
7554 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7555 REG_WR(bp, PCI_COMMAND, reg);
7557 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7558 !(bp->flags & BNX2_FLAG_PCIX)) {
7560 dev_err(&pdev->dev,
7561 "5706 A1 can only be used in a PCIX bus, aborting.\n");
7562 goto err_out_unmap;
7565 bnx2_init_nvram(bp);
7567 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7569 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7570 BNX2_SHM_HDR_SIGNATURE_SIG) {
7571 u32 off = PCI_FUNC(pdev->devfn) << 2;
7573 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7574 } else
7575 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7577 /* Get the permanent MAC address. First we need to make sure the
7578 * firmware is actually running.
7580 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7582 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7583 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7584 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7585 rc = -ENODEV;
7586 goto err_out_unmap;
7589 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7590 for (i = 0, j = 0; i < 3; i++) {
7591 u8 num, k, skip0;
7593 num = (u8) (reg >> (24 - (i * 8)));
7594 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7595 if (num >= k || !skip0 || k == 1) {
7596 bp->fw_version[j++] = (num / k) + '0';
7597 skip0 = 0;
7600 if (i != 2)
7601 bp->fw_version[j++] = '.';
7603 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7604 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7605 bp->wol = 1;
7607 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7608 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7610 for (i = 0; i < 30; i++) {
7611 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7612 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7613 break;
7614 msleep(10);
7617 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7618 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7619 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7620 reg != BNX2_CONDITION_MFW_RUN_NONE) {
7621 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7623 bp->fw_version[j++] = ' ';
7624 for (i = 0; i < 3; i++) {
7625 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7626 reg = swab32(reg);
7627 memcpy(&bp->fw_version[j], &reg, 4);
7628 j += 4;
7632 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7633 bp->mac_addr[0] = (u8) (reg >> 8);
7634 bp->mac_addr[1] = (u8) reg;
7636 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7637 bp->mac_addr[2] = (u8) (reg >> 24);
7638 bp->mac_addr[3] = (u8) (reg >> 16);
7639 bp->mac_addr[4] = (u8) (reg >> 8);
7640 bp->mac_addr[5] = (u8) reg;
7642 bp->tx_ring_size = MAX_TX_DESC_CNT;
7643 bnx2_set_rx_ring_size(bp, 255);
7645 bp->rx_csum = 1;
7647 bp->tx_quick_cons_trip_int = 20;
7648 bp->tx_quick_cons_trip = 20;
7649 bp->tx_ticks_int = 80;
7650 bp->tx_ticks = 80;
7652 bp->rx_quick_cons_trip_int = 6;
7653 bp->rx_quick_cons_trip = 6;
7654 bp->rx_ticks_int = 18;
7655 bp->rx_ticks = 18;
7657 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7659 bp->current_interval = BNX2_TIMER_INTERVAL;
7661 bp->phy_addr = 1;
7663 /* Disable WOL support if we are running on a SERDES chip. */
7664 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7665 bnx2_get_5709_media(bp);
7666 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7667 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7669 bp->phy_port = PORT_TP;
7670 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7671 bp->phy_port = PORT_FIBRE;
7672 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7673 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7674 bp->flags |= BNX2_FLAG_NO_WOL;
7675 bp->wol = 0;
7677 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7678 /* Don't do parallel detect on this board because of
7679 * some board problems. The link will not go down
7680 * if we do parallel detect.
7682 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7683 pdev->subsystem_device == 0x310c)
7684 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7685 } else {
7686 bp->phy_addr = 2;
7687 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7688 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7690 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7691 CHIP_NUM(bp) == CHIP_NUM_5708)
7692 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7693 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7694 (CHIP_REV(bp) == CHIP_REV_Ax ||
7695 CHIP_REV(bp) == CHIP_REV_Bx))
7696 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7698 bnx2_init_fw_cap(bp);
7700 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7701 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7702 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
7703 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
7704 bp->flags |= BNX2_FLAG_NO_WOL;
7705 bp->wol = 0;
7708 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7709 bp->tx_quick_cons_trip_int =
7710 bp->tx_quick_cons_trip;
7711 bp->tx_ticks_int = bp->tx_ticks;
7712 bp->rx_quick_cons_trip_int =
7713 bp->rx_quick_cons_trip;
7714 bp->rx_ticks_int = bp->rx_ticks;
7715 bp->comp_prod_trip_int = bp->comp_prod_trip;
7716 bp->com_ticks_int = bp->com_ticks;
7717 bp->cmd_ticks_int = bp->cmd_ticks;
7720 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7722 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7723 * with byte enables disabled on the unused 32-bit word. This is legal
7724 * but causes problems on the AMD 8132 which will eventually stop
7725 * responding after a while.
7727 * AMD believes this incompatibility is unique to the 5706, and
7728 * prefers to locally disable MSI rather than globally disabling it.
7730 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7731 struct pci_dev *amd_8132 = NULL;
7733 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7734 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7735 amd_8132))) {
7737 if (amd_8132->revision >= 0x10 &&
7738 amd_8132->revision <= 0x13) {
7739 disable_msi = 1;
7740 pci_dev_put(amd_8132);
7741 break;
7746 bnx2_set_default_link(bp);
7747 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7749 init_timer(&bp->timer);
7750 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
7751 bp->timer.data = (unsigned long) bp;
7752 bp->timer.function = bnx2_timer;
7754 return 0;
7756 err_out_unmap:
7757 if (bp->regview) {
7758 iounmap(bp->regview);
7759 bp->regview = NULL;
7762 err_out_release:
7763 pci_release_regions(pdev);
7765 err_out_disable:
7766 pci_disable_device(pdev);
7767 pci_set_drvdata(pdev, NULL);
7769 err_out:
7770 return rc;
7773 static char * __devinit
7774 bnx2_bus_string(struct bnx2 *bp, char *str)
7776 char *s = str;
7778 if (bp->flags & BNX2_FLAG_PCIE) {
7779 s += sprintf(s, "PCI Express");
7780 } else {
7781 s += sprintf(s, "PCI");
7782 if (bp->flags & BNX2_FLAG_PCIX)
7783 s += sprintf(s, "-X");
7784 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7785 s += sprintf(s, " 32-bit");
7786 else
7787 s += sprintf(s, " 64-bit");
7788 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7790 return str;
7793 static void __devinit
7794 bnx2_init_napi(struct bnx2 *bp)
7796 int i;
7798 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7799 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7800 int (*poll)(struct napi_struct *, int);
7802 if (i == 0)
7803 poll = bnx2_poll;
7804 else
7805 poll = bnx2_poll_msix;
7807 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7808 bnapi->bp = bp;
7812 static const struct net_device_ops bnx2_netdev_ops = {
7813 .ndo_open = bnx2_open,
7814 .ndo_start_xmit = bnx2_start_xmit,
7815 .ndo_stop = bnx2_close,
7816 .ndo_get_stats = bnx2_get_stats,
7817 .ndo_set_rx_mode = bnx2_set_rx_mode,
7818 .ndo_do_ioctl = bnx2_ioctl,
7819 .ndo_validate_addr = eth_validate_addr,
7820 .ndo_set_mac_address = bnx2_change_mac_addr,
7821 .ndo_change_mtu = bnx2_change_mtu,
7822 .ndo_tx_timeout = bnx2_tx_timeout,
7823 #ifdef BCM_VLAN
7824 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
7825 #endif
7826 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7827 .ndo_poll_controller = poll_bnx2,
7828 #endif
7831 static int __devinit
7832 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7834 static int version_printed = 0;
7835 struct net_device *dev = NULL;
7836 struct bnx2 *bp;
7837 int rc;
7838 char str[40];
7840 if (version_printed++ == 0)
7841 printk(KERN_INFO "%s", version);
7843 /* dev zeroed in init_etherdev */
7844 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
7846 if (!dev)
7847 return -ENOMEM;
7849 rc = bnx2_init_board(pdev, dev);
7850 if (rc < 0) {
7851 free_netdev(dev);
7852 return rc;
7855 dev->netdev_ops = &bnx2_netdev_ops;
7856 dev->watchdog_timeo = TX_TIMEOUT;
7857 dev->ethtool_ops = &bnx2_ethtool_ops;
7859 bp = netdev_priv(dev);
7860 bnx2_init_napi(bp);
7862 pci_set_drvdata(pdev, dev);
7864 rc = bnx2_request_firmware(bp);
7865 if (rc)
7866 goto error;
7868 memcpy(dev->dev_addr, bp->mac_addr, 6);
7869 memcpy(dev->perm_addr, bp->mac_addr, 6);
7871 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7872 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7873 dev->features |= NETIF_F_IPV6_CSUM;
7875 #ifdef BCM_VLAN
7876 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7877 #endif
7878 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7879 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7880 dev->features |= NETIF_F_TSO6;
7882 if ((rc = register_netdev(dev))) {
7883 dev_err(&pdev->dev, "Cannot register net device\n");
7884 goto error;
7887 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7888 "IRQ %d, node addr %pM\n",
7889 dev->name,
7890 board_info[ent->driver_data].name,
7891 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7892 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7893 bnx2_bus_string(bp, str),
7894 dev->base_addr,
7895 bp->pdev->irq, dev->dev_addr);
7897 return 0;
7899 error:
7900 if (bp->mips_firmware)
7901 release_firmware(bp->mips_firmware);
7902 if (bp->rv2p_firmware)
7903 release_firmware(bp->rv2p_firmware);
7905 if (bp->regview)
7906 iounmap(bp->regview);
7907 pci_release_regions(pdev);
7908 pci_disable_device(pdev);
7909 pci_set_drvdata(pdev, NULL);
7910 free_netdev(dev);
7911 return rc;
7914 static void __devexit
7915 bnx2_remove_one(struct pci_dev *pdev)
7917 struct net_device *dev = pci_get_drvdata(pdev);
7918 struct bnx2 *bp = netdev_priv(dev);
7920 flush_scheduled_work();
7922 unregister_netdev(dev);
7924 if (bp->mips_firmware)
7925 release_firmware(bp->mips_firmware);
7926 if (bp->rv2p_firmware)
7927 release_firmware(bp->rv2p_firmware);
7929 if (bp->regview)
7930 iounmap(bp->regview);
7932 free_netdev(dev);
7933 pci_release_regions(pdev);
7934 pci_disable_device(pdev);
7935 pci_set_drvdata(pdev, NULL);
7938 static int
7939 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7941 struct net_device *dev = pci_get_drvdata(pdev);
7942 struct bnx2 *bp = netdev_priv(dev);
7944 /* PCI register 4 needs to be saved whether netif_running() or not.
7945 * MSI address and data need to be saved if using MSI and
7946 * netif_running().
7948 pci_save_state(pdev);
7949 if (!netif_running(dev))
7950 return 0;
7952 flush_scheduled_work();
7953 bnx2_netif_stop(bp);
7954 netif_device_detach(dev);
7955 del_timer_sync(&bp->timer);
7956 bnx2_shutdown_chip(bp);
7957 bnx2_free_skbs(bp);
7958 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7959 return 0;
7962 static int
7963 bnx2_resume(struct pci_dev *pdev)
7965 struct net_device *dev = pci_get_drvdata(pdev);
7966 struct bnx2 *bp = netdev_priv(dev);
7968 pci_restore_state(pdev);
7969 if (!netif_running(dev))
7970 return 0;
7972 bnx2_set_power_state(bp, PCI_D0);
7973 netif_device_attach(dev);
7974 bnx2_init_nic(bp, 1);
7975 bnx2_netif_start(bp);
7976 return 0;
7980 * bnx2_io_error_detected - called when PCI error is detected
7981 * @pdev: Pointer to PCI device
7982 * @state: The current pci connection state
7984 * This function is called after a PCI bus error affecting
7985 * this device has been detected.
7987 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7988 pci_channel_state_t state)
7990 struct net_device *dev = pci_get_drvdata(pdev);
7991 struct bnx2 *bp = netdev_priv(dev);
7993 rtnl_lock();
7994 netif_device_detach(dev);
7996 if (netif_running(dev)) {
7997 bnx2_netif_stop(bp);
7998 del_timer_sync(&bp->timer);
7999 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8002 pci_disable_device(pdev);
8003 rtnl_unlock();
8005 /* Request a slot slot reset. */
8006 return PCI_ERS_RESULT_NEED_RESET;
8010 * bnx2_io_slot_reset - called after the pci bus has been reset.
8011 * @pdev: Pointer to PCI device
8013 * Restart the card from scratch, as if from a cold-boot.
8015 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8017 struct net_device *dev = pci_get_drvdata(pdev);
8018 struct bnx2 *bp = netdev_priv(dev);
8020 rtnl_lock();
8021 if (pci_enable_device(pdev)) {
8022 dev_err(&pdev->dev,
8023 "Cannot re-enable PCI device after reset.\n");
8024 rtnl_unlock();
8025 return PCI_ERS_RESULT_DISCONNECT;
8027 pci_set_master(pdev);
8028 pci_restore_state(pdev);
8030 if (netif_running(dev)) {
8031 bnx2_set_power_state(bp, PCI_D0);
8032 bnx2_init_nic(bp, 1);
8035 rtnl_unlock();
8036 return PCI_ERS_RESULT_RECOVERED;
8040 * bnx2_io_resume - called when traffic can start flowing again.
8041 * @pdev: Pointer to PCI device
8043 * This callback is called when the error recovery driver tells us that
8044 * its OK to resume normal operation.
8046 static void bnx2_io_resume(struct pci_dev *pdev)
8048 struct net_device *dev = pci_get_drvdata(pdev);
8049 struct bnx2 *bp = netdev_priv(dev);
8051 rtnl_lock();
8052 if (netif_running(dev))
8053 bnx2_netif_start(bp);
8055 netif_device_attach(dev);
8056 rtnl_unlock();
8059 static struct pci_error_handlers bnx2_err_handler = {
8060 .error_detected = bnx2_io_error_detected,
8061 .slot_reset = bnx2_io_slot_reset,
8062 .resume = bnx2_io_resume,
8065 static struct pci_driver bnx2_pci_driver = {
8066 .name = DRV_MODULE_NAME,
8067 .id_table = bnx2_pci_tbl,
8068 .probe = bnx2_init_one,
8069 .remove = __devexit_p(bnx2_remove_one),
8070 .suspend = bnx2_suspend,
8071 .resume = bnx2_resume,
8072 .err_handler = &bnx2_err_handler,
8075 static int __init bnx2_init(void)
8077 return pci_register_driver(&bnx2_pci_driver);
8080 static void __exit bnx2_cleanup(void)
8082 pci_unregister_driver(&bnx2_pci_driver);
8085 module_init(bnx2_init);
8086 module_exit(bnx2_cleanup);