drivers/net/bnx2.c: Use (pr|netdev|netif)_<level> macro helpers
[linux-2.6.git] / drivers / net / bnx2.c
bloba6cc9d02e589be90c5b4c9d26cf12395cfdec7ed
1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
41 #define BCM_VLAN 1
42 #endif
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/firmware.h>
51 #include <linux/log2.h>
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
60 #define DRV_MODULE_NAME "bnx2"
61 #define DRV_MODULE_VERSION "2.0.8"
62 #define DRV_MODULE_RELDATE "Feb 15, 2010"
63 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw"
66 #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
67 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
69 #define RUN_AT(x) (jiffies + (x))
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT (5*HZ)
74 static char version[] __devinitdata =
75 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
87 static int disable_msi = 0;
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 typedef enum {
93 BCM5706 = 0,
94 NC370T,
95 NC370I,
96 BCM5706S,
97 NC370F,
98 BCM5708,
99 BCM5708S,
100 BCM5709,
101 BCM5709S,
102 BCM5716,
103 BCM5716S,
104 } board_t;
106 /* indexed by board_t, above */
107 static struct {
108 char *name;
109 } board_info[] __devinitdata = {
110 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111 { "HP NC370T Multifunction Gigabit Server Adapter" },
112 { "HP NC370i Multifunction Gigabit Server Adapter" },
113 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114 { "HP NC370F Multifunction Gigabit Server Adapter" },
115 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142 { PCI_VENDOR_ID_BROADCOM, 0x163b,
143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144 { PCI_VENDOR_ID_BROADCOM, 0x163c,
145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146 { 0, }
149 static const struct flash_spec flash_table[] =
151 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
153 /* Slow EEPROM */
154 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157 "EEPROM - slow"},
158 /* Expansion entry 0001 */
159 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162 "Entry 0001"},
163 /* Saifun SA25F010 (non-buffered flash) */
164 /* strap, cfg1, & write1 need updates */
165 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168 "Non-buffered flash (128kB)"},
169 /* Saifun SA25F020 (non-buffered flash) */
170 /* strap, cfg1, & write1 need updates */
171 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174 "Non-buffered flash (256kB)"},
175 /* Expansion entry 0100 */
176 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179 "Entry 0100"},
180 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190 /* Saifun SA25F005 (non-buffered flash) */
191 /* strap, cfg1, & write1 need updates */
192 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195 "Non-buffered flash (64kB)"},
196 /* Fast EEPROM */
197 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200 "EEPROM - fast"},
201 /* Expansion entry 1001 */
202 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1001"},
206 /* Expansion entry 1010 */
207 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1010"},
211 /* ATMEL AT45DB011B (buffered flash) */
212 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215 "Buffered flash (128kB)"},
216 /* Expansion entry 1100 */
217 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 "Entry 1100"},
221 /* Expansion entry 1101 */
222 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225 "Entry 1101"},
226 /* Ateml Expansion entry 1110 */
227 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230 "Entry 1110 (Atmel)"},
231 /* ATMEL AT45DB021B (buffered flash) */
232 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235 "Buffered flash (256kB)"},
238 static const struct flash_spec flash_5709 = {
239 .flags = BNX2_NV_BUFFERED,
240 .page_bits = BCM5709_FLASH_PAGE_BITS,
241 .page_size = BCM5709_FLASH_PAGE_SIZE,
242 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
243 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
244 .name = "5709 Buffered flash (256kB)",
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
249 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
251 u32 diff;
253 smp_mb();
255 /* The ring uses 256 indices for 255 entries, one of them
256 * needs to be skipped.
258 diff = txr->tx_prod - txr->tx_cons;
259 if (unlikely(diff >= TX_DESC_CNT)) {
260 diff &= 0xffff;
261 if (diff == TX_DESC_CNT)
262 diff = MAX_TX_DESC_CNT;
264 return (bp->tx_ring_size - diff);
267 static u32
268 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
270 u32 val;
272 spin_lock_bh(&bp->indirect_lock);
273 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
274 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
275 spin_unlock_bh(&bp->indirect_lock);
276 return val;
279 static void
280 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
282 spin_lock_bh(&bp->indirect_lock);
283 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
284 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
285 spin_unlock_bh(&bp->indirect_lock);
288 static void
289 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
291 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
294 static u32
295 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
297 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
300 static void
301 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
303 offset += cid_addr;
304 spin_lock_bh(&bp->indirect_lock);
305 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
306 int i;
308 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
309 REG_WR(bp, BNX2_CTX_CTX_CTRL,
310 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
311 for (i = 0; i < 5; i++) {
312 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
313 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
314 break;
315 udelay(5);
317 } else {
318 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
319 REG_WR(bp, BNX2_CTX_DATA, val);
321 spin_unlock_bh(&bp->indirect_lock);
324 #ifdef BCM_CNIC
325 static int
326 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
328 struct bnx2 *bp = netdev_priv(dev);
329 struct drv_ctl_io *io = &info->data.io;
331 switch (info->cmd) {
332 case DRV_CTL_IO_WR_CMD:
333 bnx2_reg_wr_ind(bp, io->offset, io->data);
334 break;
335 case DRV_CTL_IO_RD_CMD:
336 io->data = bnx2_reg_rd_ind(bp, io->offset);
337 break;
338 case DRV_CTL_CTX_WR_CMD:
339 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
340 break;
341 default:
342 return -EINVAL;
344 return 0;
347 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
349 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
350 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
351 int sb_id;
353 if (bp->flags & BNX2_FLAG_USING_MSIX) {
354 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
355 bnapi->cnic_present = 0;
356 sb_id = bp->irq_nvecs;
357 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
358 } else {
359 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
360 bnapi->cnic_tag = bnapi->last_status_idx;
361 bnapi->cnic_present = 1;
362 sb_id = 0;
363 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
366 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
367 cp->irq_arr[0].status_blk = (void *)
368 ((unsigned long) bnapi->status_blk.msi +
369 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
370 cp->irq_arr[0].status_blk_num = sb_id;
371 cp->num_irq = 1;
374 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
375 void *data)
377 struct bnx2 *bp = netdev_priv(dev);
378 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
380 if (ops == NULL)
381 return -EINVAL;
383 if (cp->drv_state & CNIC_DRV_STATE_REGD)
384 return -EBUSY;
386 bp->cnic_data = data;
387 rcu_assign_pointer(bp->cnic_ops, ops);
389 cp->num_irq = 0;
390 cp->drv_state = CNIC_DRV_STATE_REGD;
392 bnx2_setup_cnic_irq_info(bp);
394 return 0;
397 static int bnx2_unregister_cnic(struct net_device *dev)
399 struct bnx2 *bp = netdev_priv(dev);
400 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
401 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
403 mutex_lock(&bp->cnic_lock);
404 cp->drv_state = 0;
405 bnapi->cnic_present = 0;
406 rcu_assign_pointer(bp->cnic_ops, NULL);
407 mutex_unlock(&bp->cnic_lock);
408 synchronize_rcu();
409 return 0;
412 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
414 struct bnx2 *bp = netdev_priv(dev);
415 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
417 cp->drv_owner = THIS_MODULE;
418 cp->chip_id = bp->chip_id;
419 cp->pdev = bp->pdev;
420 cp->io_base = bp->regview;
421 cp->drv_ctl = bnx2_drv_ctl;
422 cp->drv_register_cnic = bnx2_register_cnic;
423 cp->drv_unregister_cnic = bnx2_unregister_cnic;
425 return cp;
427 EXPORT_SYMBOL(bnx2_cnic_probe);
429 static void
430 bnx2_cnic_stop(struct bnx2 *bp)
432 struct cnic_ops *c_ops;
433 struct cnic_ctl_info info;
435 mutex_lock(&bp->cnic_lock);
436 c_ops = bp->cnic_ops;
437 if (c_ops) {
438 info.cmd = CNIC_CTL_STOP_CMD;
439 c_ops->cnic_ctl(bp->cnic_data, &info);
441 mutex_unlock(&bp->cnic_lock);
444 static void
445 bnx2_cnic_start(struct bnx2 *bp)
447 struct cnic_ops *c_ops;
448 struct cnic_ctl_info info;
450 mutex_lock(&bp->cnic_lock);
451 c_ops = bp->cnic_ops;
452 if (c_ops) {
453 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
454 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
456 bnapi->cnic_tag = bnapi->last_status_idx;
458 info.cmd = CNIC_CTL_START_CMD;
459 c_ops->cnic_ctl(bp->cnic_data, &info);
461 mutex_unlock(&bp->cnic_lock);
464 #else
466 static void
467 bnx2_cnic_stop(struct bnx2 *bp)
471 static void
472 bnx2_cnic_start(struct bnx2 *bp)
476 #endif
478 static int
479 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
481 u32 val1;
482 int i, ret;
484 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
485 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
486 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
488 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
489 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
491 udelay(40);
494 val1 = (bp->phy_addr << 21) | (reg << 16) |
495 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
496 BNX2_EMAC_MDIO_COMM_START_BUSY;
497 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
499 for (i = 0; i < 50; i++) {
500 udelay(10);
502 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
503 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
504 udelay(5);
506 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
507 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
509 break;
513 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
514 *val = 0x0;
515 ret = -EBUSY;
517 else {
518 *val = val1;
519 ret = 0;
522 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
523 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
524 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
526 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
527 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
529 udelay(40);
532 return ret;
535 static int
536 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
538 u32 val1;
539 int i, ret;
541 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
542 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
543 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
545 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
546 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
548 udelay(40);
551 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
552 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
553 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
554 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
556 for (i = 0; i < 50; i++) {
557 udelay(10);
559 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
560 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
561 udelay(5);
562 break;
566 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
567 ret = -EBUSY;
568 else
569 ret = 0;
571 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
572 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
573 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
575 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
576 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
578 udelay(40);
581 return ret;
584 static void
585 bnx2_disable_int(struct bnx2 *bp)
587 int i;
588 struct bnx2_napi *bnapi;
590 for (i = 0; i < bp->irq_nvecs; i++) {
591 bnapi = &bp->bnx2_napi[i];
592 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
593 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
595 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
598 static void
599 bnx2_enable_int(struct bnx2 *bp)
601 int i;
602 struct bnx2_napi *bnapi;
604 for (i = 0; i < bp->irq_nvecs; i++) {
605 bnapi = &bp->bnx2_napi[i];
607 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
608 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
609 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
610 bnapi->last_status_idx);
612 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
613 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
614 bnapi->last_status_idx);
616 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
619 static void
620 bnx2_disable_int_sync(struct bnx2 *bp)
622 int i;
624 atomic_inc(&bp->intr_sem);
625 if (!netif_running(bp->dev))
626 return;
628 bnx2_disable_int(bp);
629 for (i = 0; i < bp->irq_nvecs; i++)
630 synchronize_irq(bp->irq_tbl[i].vector);
633 static void
634 bnx2_napi_disable(struct bnx2 *bp)
636 int i;
638 for (i = 0; i < bp->irq_nvecs; i++)
639 napi_disable(&bp->bnx2_napi[i].napi);
642 static void
643 bnx2_napi_enable(struct bnx2 *bp)
645 int i;
647 for (i = 0; i < bp->irq_nvecs; i++)
648 napi_enable(&bp->bnx2_napi[i].napi);
651 static void
652 bnx2_netif_stop(struct bnx2 *bp)
654 bnx2_cnic_stop(bp);
655 if (netif_running(bp->dev)) {
656 int i;
658 bnx2_napi_disable(bp);
659 netif_tx_disable(bp->dev);
660 /* prevent tx timeout */
661 for (i = 0; i < bp->dev->num_tx_queues; i++) {
662 struct netdev_queue *txq;
664 txq = netdev_get_tx_queue(bp->dev, i);
665 txq->trans_start = jiffies;
668 bnx2_disable_int_sync(bp);
671 static void
672 bnx2_netif_start(struct bnx2 *bp)
674 if (atomic_dec_and_test(&bp->intr_sem)) {
675 if (netif_running(bp->dev)) {
676 netif_tx_wake_all_queues(bp->dev);
677 bnx2_napi_enable(bp);
678 bnx2_enable_int(bp);
679 bnx2_cnic_start(bp);
684 static void
685 bnx2_free_tx_mem(struct bnx2 *bp)
687 int i;
689 for (i = 0; i < bp->num_tx_rings; i++) {
690 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
691 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
693 if (txr->tx_desc_ring) {
694 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
695 txr->tx_desc_ring,
696 txr->tx_desc_mapping);
697 txr->tx_desc_ring = NULL;
699 kfree(txr->tx_buf_ring);
700 txr->tx_buf_ring = NULL;
704 static void
705 bnx2_free_rx_mem(struct bnx2 *bp)
707 int i;
709 for (i = 0; i < bp->num_rx_rings; i++) {
710 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
711 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
712 int j;
714 for (j = 0; j < bp->rx_max_ring; j++) {
715 if (rxr->rx_desc_ring[j])
716 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
717 rxr->rx_desc_ring[j],
718 rxr->rx_desc_mapping[j]);
719 rxr->rx_desc_ring[j] = NULL;
721 vfree(rxr->rx_buf_ring);
722 rxr->rx_buf_ring = NULL;
724 for (j = 0; j < bp->rx_max_pg_ring; j++) {
725 if (rxr->rx_pg_desc_ring[j])
726 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
727 rxr->rx_pg_desc_ring[j],
728 rxr->rx_pg_desc_mapping[j]);
729 rxr->rx_pg_desc_ring[j] = NULL;
731 vfree(rxr->rx_pg_ring);
732 rxr->rx_pg_ring = NULL;
736 static int
737 bnx2_alloc_tx_mem(struct bnx2 *bp)
739 int i;
741 for (i = 0; i < bp->num_tx_rings; i++) {
742 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
743 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
745 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
746 if (txr->tx_buf_ring == NULL)
747 return -ENOMEM;
749 txr->tx_desc_ring =
750 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
751 &txr->tx_desc_mapping);
752 if (txr->tx_desc_ring == NULL)
753 return -ENOMEM;
755 return 0;
758 static int
759 bnx2_alloc_rx_mem(struct bnx2 *bp)
761 int i;
763 for (i = 0; i < bp->num_rx_rings; i++) {
764 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
765 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
766 int j;
768 rxr->rx_buf_ring =
769 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
770 if (rxr->rx_buf_ring == NULL)
771 return -ENOMEM;
773 memset(rxr->rx_buf_ring, 0,
774 SW_RXBD_RING_SIZE * bp->rx_max_ring);
776 for (j = 0; j < bp->rx_max_ring; j++) {
777 rxr->rx_desc_ring[j] =
778 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
779 &rxr->rx_desc_mapping[j]);
780 if (rxr->rx_desc_ring[j] == NULL)
781 return -ENOMEM;
785 if (bp->rx_pg_ring_size) {
786 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
787 bp->rx_max_pg_ring);
788 if (rxr->rx_pg_ring == NULL)
789 return -ENOMEM;
791 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
792 bp->rx_max_pg_ring);
795 for (j = 0; j < bp->rx_max_pg_ring; j++) {
796 rxr->rx_pg_desc_ring[j] =
797 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
798 &rxr->rx_pg_desc_mapping[j]);
799 if (rxr->rx_pg_desc_ring[j] == NULL)
800 return -ENOMEM;
804 return 0;
807 static void
808 bnx2_free_mem(struct bnx2 *bp)
810 int i;
811 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
813 bnx2_free_tx_mem(bp);
814 bnx2_free_rx_mem(bp);
816 for (i = 0; i < bp->ctx_pages; i++) {
817 if (bp->ctx_blk[i]) {
818 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
819 bp->ctx_blk[i],
820 bp->ctx_blk_mapping[i]);
821 bp->ctx_blk[i] = NULL;
824 if (bnapi->status_blk.msi) {
825 pci_free_consistent(bp->pdev, bp->status_stats_size,
826 bnapi->status_blk.msi,
827 bp->status_blk_mapping);
828 bnapi->status_blk.msi = NULL;
829 bp->stats_blk = NULL;
833 static int
834 bnx2_alloc_mem(struct bnx2 *bp)
836 int i, status_blk_size, err;
837 struct bnx2_napi *bnapi;
838 void *status_blk;
840 /* Combine status and statistics blocks into one allocation. */
841 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
842 if (bp->flags & BNX2_FLAG_MSIX_CAP)
843 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
844 BNX2_SBLK_MSIX_ALIGN_SIZE);
845 bp->status_stats_size = status_blk_size +
846 sizeof(struct statistics_block);
848 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
849 &bp->status_blk_mapping);
850 if (status_blk == NULL)
851 goto alloc_mem_err;
853 memset(status_blk, 0, bp->status_stats_size);
855 bnapi = &bp->bnx2_napi[0];
856 bnapi->status_blk.msi = status_blk;
857 bnapi->hw_tx_cons_ptr =
858 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
859 bnapi->hw_rx_cons_ptr =
860 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
861 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
862 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
863 struct status_block_msix *sblk;
865 bnapi = &bp->bnx2_napi[i];
867 sblk = (void *) (status_blk +
868 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
869 bnapi->status_blk.msix = sblk;
870 bnapi->hw_tx_cons_ptr =
871 &sblk->status_tx_quick_consumer_index;
872 bnapi->hw_rx_cons_ptr =
873 &sblk->status_rx_quick_consumer_index;
874 bnapi->int_num = i << 24;
878 bp->stats_blk = status_blk + status_blk_size;
880 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
882 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
883 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
884 if (bp->ctx_pages == 0)
885 bp->ctx_pages = 1;
886 for (i = 0; i < bp->ctx_pages; i++) {
887 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
888 BCM_PAGE_SIZE,
889 &bp->ctx_blk_mapping[i]);
890 if (bp->ctx_blk[i] == NULL)
891 goto alloc_mem_err;
895 err = bnx2_alloc_rx_mem(bp);
896 if (err)
897 goto alloc_mem_err;
899 err = bnx2_alloc_tx_mem(bp);
900 if (err)
901 goto alloc_mem_err;
903 return 0;
905 alloc_mem_err:
906 bnx2_free_mem(bp);
907 return -ENOMEM;
910 static void
911 bnx2_report_fw_link(struct bnx2 *bp)
913 u32 fw_link_status = 0;
915 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
916 return;
918 if (bp->link_up) {
919 u32 bmsr;
921 switch (bp->line_speed) {
922 case SPEED_10:
923 if (bp->duplex == DUPLEX_HALF)
924 fw_link_status = BNX2_LINK_STATUS_10HALF;
925 else
926 fw_link_status = BNX2_LINK_STATUS_10FULL;
927 break;
928 case SPEED_100:
929 if (bp->duplex == DUPLEX_HALF)
930 fw_link_status = BNX2_LINK_STATUS_100HALF;
931 else
932 fw_link_status = BNX2_LINK_STATUS_100FULL;
933 break;
934 case SPEED_1000:
935 if (bp->duplex == DUPLEX_HALF)
936 fw_link_status = BNX2_LINK_STATUS_1000HALF;
937 else
938 fw_link_status = BNX2_LINK_STATUS_1000FULL;
939 break;
940 case SPEED_2500:
941 if (bp->duplex == DUPLEX_HALF)
942 fw_link_status = BNX2_LINK_STATUS_2500HALF;
943 else
944 fw_link_status = BNX2_LINK_STATUS_2500FULL;
945 break;
948 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
950 if (bp->autoneg) {
951 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
953 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
954 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
956 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
957 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
958 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
959 else
960 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
963 else
964 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
966 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
969 static char *
970 bnx2_xceiver_str(struct bnx2 *bp)
972 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
973 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
974 "Copper"));
977 static void
978 bnx2_report_link(struct bnx2 *bp)
980 if (bp->link_up) {
981 netif_carrier_on(bp->dev);
982 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
983 bnx2_xceiver_str(bp),
984 bp->line_speed,
985 bp->duplex == DUPLEX_FULL ? "full" : "half");
987 if (bp->flow_ctrl) {
988 if (bp->flow_ctrl & FLOW_CTRL_RX) {
989 pr_cont(", receive ");
990 if (bp->flow_ctrl & FLOW_CTRL_TX)
991 pr_cont("& transmit ");
993 else {
994 pr_cont(", transmit ");
996 pr_cont("flow control ON");
998 pr_cont("\n");
999 } else {
1000 netif_carrier_off(bp->dev);
1001 netdev_err(bp->dev, "NIC %s Link is Down\n",
1002 bnx2_xceiver_str(bp));
1005 bnx2_report_fw_link(bp);
1008 static void
1009 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1011 u32 local_adv, remote_adv;
1013 bp->flow_ctrl = 0;
1014 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1015 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1017 if (bp->duplex == DUPLEX_FULL) {
1018 bp->flow_ctrl = bp->req_flow_ctrl;
1020 return;
1023 if (bp->duplex != DUPLEX_FULL) {
1024 return;
1027 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1028 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1029 u32 val;
1031 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1032 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1033 bp->flow_ctrl |= FLOW_CTRL_TX;
1034 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1035 bp->flow_ctrl |= FLOW_CTRL_RX;
1036 return;
1039 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1040 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1042 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1043 u32 new_local_adv = 0;
1044 u32 new_remote_adv = 0;
1046 if (local_adv & ADVERTISE_1000XPAUSE)
1047 new_local_adv |= ADVERTISE_PAUSE_CAP;
1048 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1049 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1050 if (remote_adv & ADVERTISE_1000XPAUSE)
1051 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1052 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1053 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1055 local_adv = new_local_adv;
1056 remote_adv = new_remote_adv;
1059 /* See Table 28B-3 of 802.3ab-1999 spec. */
1060 if (local_adv & ADVERTISE_PAUSE_CAP) {
1061 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1062 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1063 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1065 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1066 bp->flow_ctrl = FLOW_CTRL_RX;
1069 else {
1070 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1071 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1075 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1076 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1077 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1079 bp->flow_ctrl = FLOW_CTRL_TX;
1084 static int
1085 bnx2_5709s_linkup(struct bnx2 *bp)
1087 u32 val, speed;
1089 bp->link_up = 1;
1091 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1092 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1093 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1095 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1096 bp->line_speed = bp->req_line_speed;
1097 bp->duplex = bp->req_duplex;
1098 return 0;
1100 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1101 switch (speed) {
1102 case MII_BNX2_GP_TOP_AN_SPEED_10:
1103 bp->line_speed = SPEED_10;
1104 break;
1105 case MII_BNX2_GP_TOP_AN_SPEED_100:
1106 bp->line_speed = SPEED_100;
1107 break;
1108 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1109 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1110 bp->line_speed = SPEED_1000;
1111 break;
1112 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1113 bp->line_speed = SPEED_2500;
1114 break;
1116 if (val & MII_BNX2_GP_TOP_AN_FD)
1117 bp->duplex = DUPLEX_FULL;
1118 else
1119 bp->duplex = DUPLEX_HALF;
1120 return 0;
1123 static int
1124 bnx2_5708s_linkup(struct bnx2 *bp)
1126 u32 val;
1128 bp->link_up = 1;
1129 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1130 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1131 case BCM5708S_1000X_STAT1_SPEED_10:
1132 bp->line_speed = SPEED_10;
1133 break;
1134 case BCM5708S_1000X_STAT1_SPEED_100:
1135 bp->line_speed = SPEED_100;
1136 break;
1137 case BCM5708S_1000X_STAT1_SPEED_1G:
1138 bp->line_speed = SPEED_1000;
1139 break;
1140 case BCM5708S_1000X_STAT1_SPEED_2G5:
1141 bp->line_speed = SPEED_2500;
1142 break;
1144 if (val & BCM5708S_1000X_STAT1_FD)
1145 bp->duplex = DUPLEX_FULL;
1146 else
1147 bp->duplex = DUPLEX_HALF;
1149 return 0;
1152 static int
1153 bnx2_5706s_linkup(struct bnx2 *bp)
1155 u32 bmcr, local_adv, remote_adv, common;
1157 bp->link_up = 1;
1158 bp->line_speed = SPEED_1000;
1160 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1161 if (bmcr & BMCR_FULLDPLX) {
1162 bp->duplex = DUPLEX_FULL;
1164 else {
1165 bp->duplex = DUPLEX_HALF;
1168 if (!(bmcr & BMCR_ANENABLE)) {
1169 return 0;
1172 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1173 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1175 common = local_adv & remote_adv;
1176 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1178 if (common & ADVERTISE_1000XFULL) {
1179 bp->duplex = DUPLEX_FULL;
1181 else {
1182 bp->duplex = DUPLEX_HALF;
1186 return 0;
1189 static int
1190 bnx2_copper_linkup(struct bnx2 *bp)
1192 u32 bmcr;
1194 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1195 if (bmcr & BMCR_ANENABLE) {
1196 u32 local_adv, remote_adv, common;
1198 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1199 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1201 common = local_adv & (remote_adv >> 2);
1202 if (common & ADVERTISE_1000FULL) {
1203 bp->line_speed = SPEED_1000;
1204 bp->duplex = DUPLEX_FULL;
1206 else if (common & ADVERTISE_1000HALF) {
1207 bp->line_speed = SPEED_1000;
1208 bp->duplex = DUPLEX_HALF;
1210 else {
1211 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1212 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1214 common = local_adv & remote_adv;
1215 if (common & ADVERTISE_100FULL) {
1216 bp->line_speed = SPEED_100;
1217 bp->duplex = DUPLEX_FULL;
1219 else if (common & ADVERTISE_100HALF) {
1220 bp->line_speed = SPEED_100;
1221 bp->duplex = DUPLEX_HALF;
1223 else if (common & ADVERTISE_10FULL) {
1224 bp->line_speed = SPEED_10;
1225 bp->duplex = DUPLEX_FULL;
1227 else if (common & ADVERTISE_10HALF) {
1228 bp->line_speed = SPEED_10;
1229 bp->duplex = DUPLEX_HALF;
1231 else {
1232 bp->line_speed = 0;
1233 bp->link_up = 0;
1237 else {
1238 if (bmcr & BMCR_SPEED100) {
1239 bp->line_speed = SPEED_100;
1241 else {
1242 bp->line_speed = SPEED_10;
1244 if (bmcr & BMCR_FULLDPLX) {
1245 bp->duplex = DUPLEX_FULL;
1247 else {
1248 bp->duplex = DUPLEX_HALF;
1252 return 0;
1255 static void
1256 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1258 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1260 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1261 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1262 val |= 0x02 << 8;
1264 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1265 u32 lo_water, hi_water;
1267 if (bp->flow_ctrl & FLOW_CTRL_TX)
1268 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1269 else
1270 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1271 if (lo_water >= bp->rx_ring_size)
1272 lo_water = 0;
1274 hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
1276 if (hi_water <= lo_water)
1277 lo_water = 0;
1279 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1280 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1282 if (hi_water > 0xf)
1283 hi_water = 0xf;
1284 else if (hi_water == 0)
1285 lo_water = 0;
1286 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1288 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1291 static void
1292 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1294 int i;
1295 u32 cid;
1297 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1298 if (i == 1)
1299 cid = RX_RSS_CID;
1300 bnx2_init_rx_context(bp, cid);
1304 static void
1305 bnx2_set_mac_link(struct bnx2 *bp)
1307 u32 val;
1309 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1310 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1311 (bp->duplex == DUPLEX_HALF)) {
1312 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1315 /* Configure the EMAC mode register. */
1316 val = REG_RD(bp, BNX2_EMAC_MODE);
1318 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1319 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1320 BNX2_EMAC_MODE_25G_MODE);
1322 if (bp->link_up) {
1323 switch (bp->line_speed) {
1324 case SPEED_10:
1325 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1326 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1327 break;
1329 /* fall through */
1330 case SPEED_100:
1331 val |= BNX2_EMAC_MODE_PORT_MII;
1332 break;
1333 case SPEED_2500:
1334 val |= BNX2_EMAC_MODE_25G_MODE;
1335 /* fall through */
1336 case SPEED_1000:
1337 val |= BNX2_EMAC_MODE_PORT_GMII;
1338 break;
1341 else {
1342 val |= BNX2_EMAC_MODE_PORT_GMII;
1345 /* Set the MAC to operate in the appropriate duplex mode. */
1346 if (bp->duplex == DUPLEX_HALF)
1347 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1348 REG_WR(bp, BNX2_EMAC_MODE, val);
1350 /* Enable/disable rx PAUSE. */
1351 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1353 if (bp->flow_ctrl & FLOW_CTRL_RX)
1354 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1355 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1357 /* Enable/disable tx PAUSE. */
1358 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1359 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1361 if (bp->flow_ctrl & FLOW_CTRL_TX)
1362 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1363 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1365 /* Acknowledge the interrupt. */
1366 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1368 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1369 bnx2_init_all_rx_contexts(bp);
1372 static void
1373 bnx2_enable_bmsr1(struct bnx2 *bp)
1375 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1376 (CHIP_NUM(bp) == CHIP_NUM_5709))
1377 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1378 MII_BNX2_BLK_ADDR_GP_STATUS);
1381 static void
1382 bnx2_disable_bmsr1(struct bnx2 *bp)
1384 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1385 (CHIP_NUM(bp) == CHIP_NUM_5709))
1386 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1387 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1390 static int
1391 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1393 u32 up1;
1394 int ret = 1;
1396 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1397 return 0;
1399 if (bp->autoneg & AUTONEG_SPEED)
1400 bp->advertising |= ADVERTISED_2500baseX_Full;
1402 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1403 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1405 bnx2_read_phy(bp, bp->mii_up1, &up1);
1406 if (!(up1 & BCM5708S_UP1_2G5)) {
1407 up1 |= BCM5708S_UP1_2G5;
1408 bnx2_write_phy(bp, bp->mii_up1, up1);
1409 ret = 0;
1412 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1413 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1414 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1416 return ret;
1419 static int
1420 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1422 u32 up1;
1423 int ret = 0;
1425 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1426 return 0;
1428 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1429 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1431 bnx2_read_phy(bp, bp->mii_up1, &up1);
1432 if (up1 & BCM5708S_UP1_2G5) {
1433 up1 &= ~BCM5708S_UP1_2G5;
1434 bnx2_write_phy(bp, bp->mii_up1, up1);
1435 ret = 1;
1438 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1439 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1440 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1442 return ret;
1445 static void
1446 bnx2_enable_forced_2g5(struct bnx2 *bp)
1448 u32 bmcr;
1450 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1451 return;
1453 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1454 u32 val;
1456 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1457 MII_BNX2_BLK_ADDR_SERDES_DIG);
1458 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1459 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1460 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1461 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1463 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1464 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1465 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1467 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1468 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1469 bmcr |= BCM5708S_BMCR_FORCE_2500;
1470 } else {
1471 return;
1474 if (bp->autoneg & AUTONEG_SPEED) {
1475 bmcr &= ~BMCR_ANENABLE;
1476 if (bp->req_duplex == DUPLEX_FULL)
1477 bmcr |= BMCR_FULLDPLX;
1479 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1482 static void
1483 bnx2_disable_forced_2g5(struct bnx2 *bp)
1485 u32 bmcr;
1487 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1488 return;
1490 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1491 u32 val;
1493 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1494 MII_BNX2_BLK_ADDR_SERDES_DIG);
1495 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1496 val &= ~MII_BNX2_SD_MISC1_FORCE;
1497 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1499 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1500 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1501 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1503 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1504 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1505 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1506 } else {
1507 return;
1510 if (bp->autoneg & AUTONEG_SPEED)
1511 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1512 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1515 static void
1516 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1518 u32 val;
1520 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1521 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1522 if (start)
1523 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1524 else
1525 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1528 static int
1529 bnx2_set_link(struct bnx2 *bp)
1531 u32 bmsr;
1532 u8 link_up;
1534 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1535 bp->link_up = 1;
1536 return 0;
1539 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1540 return 0;
1542 link_up = bp->link_up;
1544 bnx2_enable_bmsr1(bp);
1545 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1546 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1547 bnx2_disable_bmsr1(bp);
1549 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1550 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1551 u32 val, an_dbg;
1553 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1554 bnx2_5706s_force_link_dn(bp, 0);
1555 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1557 val = REG_RD(bp, BNX2_EMAC_STATUS);
1559 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1560 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1561 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1563 if ((val & BNX2_EMAC_STATUS_LINK) &&
1564 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1565 bmsr |= BMSR_LSTATUS;
1566 else
1567 bmsr &= ~BMSR_LSTATUS;
1570 if (bmsr & BMSR_LSTATUS) {
1571 bp->link_up = 1;
1573 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1574 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1575 bnx2_5706s_linkup(bp);
1576 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1577 bnx2_5708s_linkup(bp);
1578 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1579 bnx2_5709s_linkup(bp);
1581 else {
1582 bnx2_copper_linkup(bp);
1584 bnx2_resolve_flow_ctrl(bp);
1586 else {
1587 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1588 (bp->autoneg & AUTONEG_SPEED))
1589 bnx2_disable_forced_2g5(bp);
1591 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1592 u32 bmcr;
1594 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1595 bmcr |= BMCR_ANENABLE;
1596 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1598 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1600 bp->link_up = 0;
1603 if (bp->link_up != link_up) {
1604 bnx2_report_link(bp);
1607 bnx2_set_mac_link(bp);
1609 return 0;
1612 static int
1613 bnx2_reset_phy(struct bnx2 *bp)
1615 int i;
1616 u32 reg;
1618 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1620 #define PHY_RESET_MAX_WAIT 100
1621 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1622 udelay(10);
1624 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1625 if (!(reg & BMCR_RESET)) {
1626 udelay(20);
1627 break;
1630 if (i == PHY_RESET_MAX_WAIT) {
1631 return -EBUSY;
1633 return 0;
1636 static u32
1637 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1639 u32 adv = 0;
1641 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1642 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1644 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1645 adv = ADVERTISE_1000XPAUSE;
1647 else {
1648 adv = ADVERTISE_PAUSE_CAP;
1651 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1652 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1653 adv = ADVERTISE_1000XPSE_ASYM;
1655 else {
1656 adv = ADVERTISE_PAUSE_ASYM;
1659 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1660 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1661 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1663 else {
1664 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1667 return adv;
1670 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1672 static int
1673 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1674 __releases(&bp->phy_lock)
1675 __acquires(&bp->phy_lock)
1677 u32 speed_arg = 0, pause_adv;
1679 pause_adv = bnx2_phy_get_pause_adv(bp);
1681 if (bp->autoneg & AUTONEG_SPEED) {
1682 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1683 if (bp->advertising & ADVERTISED_10baseT_Half)
1684 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1685 if (bp->advertising & ADVERTISED_10baseT_Full)
1686 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1687 if (bp->advertising & ADVERTISED_100baseT_Half)
1688 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1689 if (bp->advertising & ADVERTISED_100baseT_Full)
1690 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1691 if (bp->advertising & ADVERTISED_1000baseT_Full)
1692 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1693 if (bp->advertising & ADVERTISED_2500baseX_Full)
1694 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1695 } else {
1696 if (bp->req_line_speed == SPEED_2500)
1697 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1698 else if (bp->req_line_speed == SPEED_1000)
1699 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1700 else if (bp->req_line_speed == SPEED_100) {
1701 if (bp->req_duplex == DUPLEX_FULL)
1702 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1703 else
1704 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1705 } else if (bp->req_line_speed == SPEED_10) {
1706 if (bp->req_duplex == DUPLEX_FULL)
1707 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1708 else
1709 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1713 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1714 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1715 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1716 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1718 if (port == PORT_TP)
1719 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1720 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1722 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1724 spin_unlock_bh(&bp->phy_lock);
1725 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1726 spin_lock_bh(&bp->phy_lock);
1728 return 0;
1731 static int
1732 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1733 __releases(&bp->phy_lock)
1734 __acquires(&bp->phy_lock)
1736 u32 adv, bmcr;
1737 u32 new_adv = 0;
1739 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1740 return (bnx2_setup_remote_phy(bp, port));
1742 if (!(bp->autoneg & AUTONEG_SPEED)) {
1743 u32 new_bmcr;
1744 int force_link_down = 0;
1746 if (bp->req_line_speed == SPEED_2500) {
1747 if (!bnx2_test_and_enable_2g5(bp))
1748 force_link_down = 1;
1749 } else if (bp->req_line_speed == SPEED_1000) {
1750 if (bnx2_test_and_disable_2g5(bp))
1751 force_link_down = 1;
1753 bnx2_read_phy(bp, bp->mii_adv, &adv);
1754 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1756 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1757 new_bmcr = bmcr & ~BMCR_ANENABLE;
1758 new_bmcr |= BMCR_SPEED1000;
1760 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1761 if (bp->req_line_speed == SPEED_2500)
1762 bnx2_enable_forced_2g5(bp);
1763 else if (bp->req_line_speed == SPEED_1000) {
1764 bnx2_disable_forced_2g5(bp);
1765 new_bmcr &= ~0x2000;
1768 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1769 if (bp->req_line_speed == SPEED_2500)
1770 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1771 else
1772 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1775 if (bp->req_duplex == DUPLEX_FULL) {
1776 adv |= ADVERTISE_1000XFULL;
1777 new_bmcr |= BMCR_FULLDPLX;
1779 else {
1780 adv |= ADVERTISE_1000XHALF;
1781 new_bmcr &= ~BMCR_FULLDPLX;
1783 if ((new_bmcr != bmcr) || (force_link_down)) {
1784 /* Force a link down visible on the other side */
1785 if (bp->link_up) {
1786 bnx2_write_phy(bp, bp->mii_adv, adv &
1787 ~(ADVERTISE_1000XFULL |
1788 ADVERTISE_1000XHALF));
1789 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1790 BMCR_ANRESTART | BMCR_ANENABLE);
1792 bp->link_up = 0;
1793 netif_carrier_off(bp->dev);
1794 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1795 bnx2_report_link(bp);
1797 bnx2_write_phy(bp, bp->mii_adv, adv);
1798 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1799 } else {
1800 bnx2_resolve_flow_ctrl(bp);
1801 bnx2_set_mac_link(bp);
1803 return 0;
1806 bnx2_test_and_enable_2g5(bp);
1808 if (bp->advertising & ADVERTISED_1000baseT_Full)
1809 new_adv |= ADVERTISE_1000XFULL;
1811 new_adv |= bnx2_phy_get_pause_adv(bp);
1813 bnx2_read_phy(bp, bp->mii_adv, &adv);
1814 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1816 bp->serdes_an_pending = 0;
1817 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1818 /* Force a link down visible on the other side */
1819 if (bp->link_up) {
1820 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1821 spin_unlock_bh(&bp->phy_lock);
1822 msleep(20);
1823 spin_lock_bh(&bp->phy_lock);
1826 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1827 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1828 BMCR_ANENABLE);
1829 /* Speed up link-up time when the link partner
1830 * does not autonegotiate which is very common
1831 * in blade servers. Some blade servers use
1832 * IPMI for kerboard input and it's important
1833 * to minimize link disruptions. Autoneg. involves
1834 * exchanging base pages plus 3 next pages and
1835 * normally completes in about 120 msec.
1837 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1838 bp->serdes_an_pending = 1;
1839 mod_timer(&bp->timer, jiffies + bp->current_interval);
1840 } else {
1841 bnx2_resolve_flow_ctrl(bp);
1842 bnx2_set_mac_link(bp);
1845 return 0;
1848 #define ETHTOOL_ALL_FIBRE_SPEED \
1849 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1850 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1851 (ADVERTISED_1000baseT_Full)
1853 #define ETHTOOL_ALL_COPPER_SPEED \
1854 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1855 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1856 ADVERTISED_1000baseT_Full)
1858 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1859 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1861 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1863 static void
1864 bnx2_set_default_remote_link(struct bnx2 *bp)
1866 u32 link;
1868 if (bp->phy_port == PORT_TP)
1869 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1870 else
1871 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1873 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1874 bp->req_line_speed = 0;
1875 bp->autoneg |= AUTONEG_SPEED;
1876 bp->advertising = ADVERTISED_Autoneg;
1877 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1878 bp->advertising |= ADVERTISED_10baseT_Half;
1879 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1880 bp->advertising |= ADVERTISED_10baseT_Full;
1881 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1882 bp->advertising |= ADVERTISED_100baseT_Half;
1883 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1884 bp->advertising |= ADVERTISED_100baseT_Full;
1885 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1886 bp->advertising |= ADVERTISED_1000baseT_Full;
1887 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1888 bp->advertising |= ADVERTISED_2500baseX_Full;
1889 } else {
1890 bp->autoneg = 0;
1891 bp->advertising = 0;
1892 bp->req_duplex = DUPLEX_FULL;
1893 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1894 bp->req_line_speed = SPEED_10;
1895 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1896 bp->req_duplex = DUPLEX_HALF;
1898 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1899 bp->req_line_speed = SPEED_100;
1900 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1901 bp->req_duplex = DUPLEX_HALF;
1903 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1904 bp->req_line_speed = SPEED_1000;
1905 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1906 bp->req_line_speed = SPEED_2500;
1910 static void
1911 bnx2_set_default_link(struct bnx2 *bp)
1913 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1914 bnx2_set_default_remote_link(bp);
1915 return;
1918 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1919 bp->req_line_speed = 0;
1920 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1921 u32 reg;
1923 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1925 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1926 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1927 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1928 bp->autoneg = 0;
1929 bp->req_line_speed = bp->line_speed = SPEED_1000;
1930 bp->req_duplex = DUPLEX_FULL;
1932 } else
1933 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1936 static void
1937 bnx2_send_heart_beat(struct bnx2 *bp)
1939 u32 msg;
1940 u32 addr;
1942 spin_lock(&bp->indirect_lock);
1943 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1944 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1945 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1946 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1947 spin_unlock(&bp->indirect_lock);
1950 static void
1951 bnx2_remote_phy_event(struct bnx2 *bp)
1953 u32 msg;
1954 u8 link_up = bp->link_up;
1955 u8 old_port;
1957 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1959 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1960 bnx2_send_heart_beat(bp);
1962 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1964 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1965 bp->link_up = 0;
1966 else {
1967 u32 speed;
1969 bp->link_up = 1;
1970 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1971 bp->duplex = DUPLEX_FULL;
1972 switch (speed) {
1973 case BNX2_LINK_STATUS_10HALF:
1974 bp->duplex = DUPLEX_HALF;
1975 case BNX2_LINK_STATUS_10FULL:
1976 bp->line_speed = SPEED_10;
1977 break;
1978 case BNX2_LINK_STATUS_100HALF:
1979 bp->duplex = DUPLEX_HALF;
1980 case BNX2_LINK_STATUS_100BASE_T4:
1981 case BNX2_LINK_STATUS_100FULL:
1982 bp->line_speed = SPEED_100;
1983 break;
1984 case BNX2_LINK_STATUS_1000HALF:
1985 bp->duplex = DUPLEX_HALF;
1986 case BNX2_LINK_STATUS_1000FULL:
1987 bp->line_speed = SPEED_1000;
1988 break;
1989 case BNX2_LINK_STATUS_2500HALF:
1990 bp->duplex = DUPLEX_HALF;
1991 case BNX2_LINK_STATUS_2500FULL:
1992 bp->line_speed = SPEED_2500;
1993 break;
1994 default:
1995 bp->line_speed = 0;
1996 break;
1999 bp->flow_ctrl = 0;
2000 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2001 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2002 if (bp->duplex == DUPLEX_FULL)
2003 bp->flow_ctrl = bp->req_flow_ctrl;
2004 } else {
2005 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2006 bp->flow_ctrl |= FLOW_CTRL_TX;
2007 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2008 bp->flow_ctrl |= FLOW_CTRL_RX;
2011 old_port = bp->phy_port;
2012 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2013 bp->phy_port = PORT_FIBRE;
2014 else
2015 bp->phy_port = PORT_TP;
2017 if (old_port != bp->phy_port)
2018 bnx2_set_default_link(bp);
2021 if (bp->link_up != link_up)
2022 bnx2_report_link(bp);
2024 bnx2_set_mac_link(bp);
2027 static int
2028 bnx2_set_remote_link(struct bnx2 *bp)
2030 u32 evt_code;
2032 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2033 switch (evt_code) {
2034 case BNX2_FW_EVT_CODE_LINK_EVENT:
2035 bnx2_remote_phy_event(bp);
2036 break;
2037 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2038 default:
2039 bnx2_send_heart_beat(bp);
2040 break;
2042 return 0;
2045 static int
2046 bnx2_setup_copper_phy(struct bnx2 *bp)
2047 __releases(&bp->phy_lock)
2048 __acquires(&bp->phy_lock)
2050 u32 bmcr;
2051 u32 new_bmcr;
2053 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2055 if (bp->autoneg & AUTONEG_SPEED) {
2056 u32 adv_reg, adv1000_reg;
2057 u32 new_adv_reg = 0;
2058 u32 new_adv1000_reg = 0;
2060 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2061 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2062 ADVERTISE_PAUSE_ASYM);
2064 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2065 adv1000_reg &= PHY_ALL_1000_SPEED;
2067 if (bp->advertising & ADVERTISED_10baseT_Half)
2068 new_adv_reg |= ADVERTISE_10HALF;
2069 if (bp->advertising & ADVERTISED_10baseT_Full)
2070 new_adv_reg |= ADVERTISE_10FULL;
2071 if (bp->advertising & ADVERTISED_100baseT_Half)
2072 new_adv_reg |= ADVERTISE_100HALF;
2073 if (bp->advertising & ADVERTISED_100baseT_Full)
2074 new_adv_reg |= ADVERTISE_100FULL;
2075 if (bp->advertising & ADVERTISED_1000baseT_Full)
2076 new_adv1000_reg |= ADVERTISE_1000FULL;
2078 new_adv_reg |= ADVERTISE_CSMA;
2080 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2082 if ((adv1000_reg != new_adv1000_reg) ||
2083 (adv_reg != new_adv_reg) ||
2084 ((bmcr & BMCR_ANENABLE) == 0)) {
2086 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2087 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2088 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2089 BMCR_ANENABLE);
2091 else if (bp->link_up) {
2092 /* Flow ctrl may have changed from auto to forced */
2093 /* or vice-versa. */
2095 bnx2_resolve_flow_ctrl(bp);
2096 bnx2_set_mac_link(bp);
2098 return 0;
2101 new_bmcr = 0;
2102 if (bp->req_line_speed == SPEED_100) {
2103 new_bmcr |= BMCR_SPEED100;
2105 if (bp->req_duplex == DUPLEX_FULL) {
2106 new_bmcr |= BMCR_FULLDPLX;
2108 if (new_bmcr != bmcr) {
2109 u32 bmsr;
2111 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2112 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2114 if (bmsr & BMSR_LSTATUS) {
2115 /* Force link down */
2116 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2117 spin_unlock_bh(&bp->phy_lock);
2118 msleep(50);
2119 spin_lock_bh(&bp->phy_lock);
2121 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2122 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2125 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2127 /* Normally, the new speed is setup after the link has
2128 * gone down and up again. In some cases, link will not go
2129 * down so we need to set up the new speed here.
2131 if (bmsr & BMSR_LSTATUS) {
2132 bp->line_speed = bp->req_line_speed;
2133 bp->duplex = bp->req_duplex;
2134 bnx2_resolve_flow_ctrl(bp);
2135 bnx2_set_mac_link(bp);
2137 } else {
2138 bnx2_resolve_flow_ctrl(bp);
2139 bnx2_set_mac_link(bp);
2141 return 0;
2144 static int
2145 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2146 __releases(&bp->phy_lock)
2147 __acquires(&bp->phy_lock)
2149 if (bp->loopback == MAC_LOOPBACK)
2150 return 0;
2152 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2153 return (bnx2_setup_serdes_phy(bp, port));
2155 else {
2156 return (bnx2_setup_copper_phy(bp));
2160 static int
2161 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2163 u32 val;
2165 bp->mii_bmcr = MII_BMCR + 0x10;
2166 bp->mii_bmsr = MII_BMSR + 0x10;
2167 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2168 bp->mii_adv = MII_ADVERTISE + 0x10;
2169 bp->mii_lpa = MII_LPA + 0x10;
2170 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2172 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2173 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2175 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2176 if (reset_phy)
2177 bnx2_reset_phy(bp);
2179 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2181 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2182 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2183 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2184 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2186 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2187 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2188 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2189 val |= BCM5708S_UP1_2G5;
2190 else
2191 val &= ~BCM5708S_UP1_2G5;
2192 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2194 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2195 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2196 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2197 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2199 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2201 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2202 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2203 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2205 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2207 return 0;
2210 static int
2211 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2213 u32 val;
2215 if (reset_phy)
2216 bnx2_reset_phy(bp);
2218 bp->mii_up1 = BCM5708S_UP1;
2220 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2221 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2222 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2224 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2225 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2226 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2228 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2229 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2230 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2232 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2233 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2234 val |= BCM5708S_UP1_2G5;
2235 bnx2_write_phy(bp, BCM5708S_UP1, val);
2238 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2239 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2240 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2241 /* increase tx signal amplitude */
2242 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2243 BCM5708S_BLK_ADDR_TX_MISC);
2244 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2245 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2246 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2247 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2250 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2251 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2253 if (val) {
2254 u32 is_backplane;
2256 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2257 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2258 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2259 BCM5708S_BLK_ADDR_TX_MISC);
2260 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2261 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2262 BCM5708S_BLK_ADDR_DIG);
2265 return 0;
2268 static int
2269 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2271 if (reset_phy)
2272 bnx2_reset_phy(bp);
2274 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2276 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2277 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2279 if (bp->dev->mtu > 1500) {
2280 u32 val;
2282 /* Set extended packet length bit */
2283 bnx2_write_phy(bp, 0x18, 0x7);
2284 bnx2_read_phy(bp, 0x18, &val);
2285 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2287 bnx2_write_phy(bp, 0x1c, 0x6c00);
2288 bnx2_read_phy(bp, 0x1c, &val);
2289 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2291 else {
2292 u32 val;
2294 bnx2_write_phy(bp, 0x18, 0x7);
2295 bnx2_read_phy(bp, 0x18, &val);
2296 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2298 bnx2_write_phy(bp, 0x1c, 0x6c00);
2299 bnx2_read_phy(bp, 0x1c, &val);
2300 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2303 return 0;
2306 static int
2307 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2309 u32 val;
2311 if (reset_phy)
2312 bnx2_reset_phy(bp);
2314 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2315 bnx2_write_phy(bp, 0x18, 0x0c00);
2316 bnx2_write_phy(bp, 0x17, 0x000a);
2317 bnx2_write_phy(bp, 0x15, 0x310b);
2318 bnx2_write_phy(bp, 0x17, 0x201f);
2319 bnx2_write_phy(bp, 0x15, 0x9506);
2320 bnx2_write_phy(bp, 0x17, 0x401f);
2321 bnx2_write_phy(bp, 0x15, 0x14e2);
2322 bnx2_write_phy(bp, 0x18, 0x0400);
2325 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2326 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2327 MII_BNX2_DSP_EXPAND_REG | 0x8);
2328 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2329 val &= ~(1 << 8);
2330 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2333 if (bp->dev->mtu > 1500) {
2334 /* Set extended packet length bit */
2335 bnx2_write_phy(bp, 0x18, 0x7);
2336 bnx2_read_phy(bp, 0x18, &val);
2337 bnx2_write_phy(bp, 0x18, val | 0x4000);
2339 bnx2_read_phy(bp, 0x10, &val);
2340 bnx2_write_phy(bp, 0x10, val | 0x1);
2342 else {
2343 bnx2_write_phy(bp, 0x18, 0x7);
2344 bnx2_read_phy(bp, 0x18, &val);
2345 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2347 bnx2_read_phy(bp, 0x10, &val);
2348 bnx2_write_phy(bp, 0x10, val & ~0x1);
2351 /* ethernet@wirespeed */
2352 bnx2_write_phy(bp, 0x18, 0x7007);
2353 bnx2_read_phy(bp, 0x18, &val);
2354 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2355 return 0;
2359 static int
2360 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2361 __releases(&bp->phy_lock)
2362 __acquires(&bp->phy_lock)
2364 u32 val;
2365 int rc = 0;
2367 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2368 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2370 bp->mii_bmcr = MII_BMCR;
2371 bp->mii_bmsr = MII_BMSR;
2372 bp->mii_bmsr1 = MII_BMSR;
2373 bp->mii_adv = MII_ADVERTISE;
2374 bp->mii_lpa = MII_LPA;
2376 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2378 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2379 goto setup_phy;
2381 bnx2_read_phy(bp, MII_PHYSID1, &val);
2382 bp->phy_id = val << 16;
2383 bnx2_read_phy(bp, MII_PHYSID2, &val);
2384 bp->phy_id |= val & 0xffff;
2386 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2387 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2388 rc = bnx2_init_5706s_phy(bp, reset_phy);
2389 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2390 rc = bnx2_init_5708s_phy(bp, reset_phy);
2391 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2392 rc = bnx2_init_5709s_phy(bp, reset_phy);
2394 else {
2395 rc = bnx2_init_copper_phy(bp, reset_phy);
2398 setup_phy:
2399 if (!rc)
2400 rc = bnx2_setup_phy(bp, bp->phy_port);
2402 return rc;
2405 static int
2406 bnx2_set_mac_loopback(struct bnx2 *bp)
2408 u32 mac_mode;
2410 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2411 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2412 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2413 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2414 bp->link_up = 1;
2415 return 0;
2418 static int bnx2_test_link(struct bnx2 *);
2420 static int
2421 bnx2_set_phy_loopback(struct bnx2 *bp)
2423 u32 mac_mode;
2424 int rc, i;
2426 spin_lock_bh(&bp->phy_lock);
2427 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2428 BMCR_SPEED1000);
2429 spin_unlock_bh(&bp->phy_lock);
2430 if (rc)
2431 return rc;
2433 for (i = 0; i < 10; i++) {
2434 if (bnx2_test_link(bp) == 0)
2435 break;
2436 msleep(100);
2439 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2440 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2441 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2442 BNX2_EMAC_MODE_25G_MODE);
2444 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2445 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2446 bp->link_up = 1;
2447 return 0;
2450 static int
2451 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2453 int i;
2454 u32 val;
2456 bp->fw_wr_seq++;
2457 msg_data |= bp->fw_wr_seq;
2459 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2461 if (!ack)
2462 return 0;
2464 /* wait for an acknowledgement. */
2465 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2466 msleep(10);
2468 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2470 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2471 break;
2473 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2474 return 0;
2476 /* If we timed out, inform the firmware that this is the case. */
2477 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2478 if (!silent)
2479 pr_err("fw sync timeout, reset code = %x\n", msg_data);
2481 msg_data &= ~BNX2_DRV_MSG_CODE;
2482 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2484 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2486 return -EBUSY;
2489 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2490 return -EIO;
2492 return 0;
2495 static int
2496 bnx2_init_5709_context(struct bnx2 *bp)
2498 int i, ret = 0;
2499 u32 val;
2501 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2502 val |= (BCM_PAGE_BITS - 8) << 16;
2503 REG_WR(bp, BNX2_CTX_COMMAND, val);
2504 for (i = 0; i < 10; i++) {
2505 val = REG_RD(bp, BNX2_CTX_COMMAND);
2506 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2507 break;
2508 udelay(2);
2510 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2511 return -EBUSY;
2513 for (i = 0; i < bp->ctx_pages; i++) {
2514 int j;
2516 if (bp->ctx_blk[i])
2517 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2518 else
2519 return -ENOMEM;
2521 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2522 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2523 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2524 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2525 (u64) bp->ctx_blk_mapping[i] >> 32);
2526 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2527 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2528 for (j = 0; j < 10; j++) {
2530 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2531 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2532 break;
2533 udelay(5);
2535 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2536 ret = -EBUSY;
2537 break;
2540 return ret;
2543 static void
2544 bnx2_init_context(struct bnx2 *bp)
2546 u32 vcid;
2548 vcid = 96;
2549 while (vcid) {
2550 u32 vcid_addr, pcid_addr, offset;
2551 int i;
2553 vcid--;
2555 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2556 u32 new_vcid;
2558 vcid_addr = GET_PCID_ADDR(vcid);
2559 if (vcid & 0x8) {
2560 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2562 else {
2563 new_vcid = vcid;
2565 pcid_addr = GET_PCID_ADDR(new_vcid);
2567 else {
2568 vcid_addr = GET_CID_ADDR(vcid);
2569 pcid_addr = vcid_addr;
2572 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2573 vcid_addr += (i << PHY_CTX_SHIFT);
2574 pcid_addr += (i << PHY_CTX_SHIFT);
2576 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2577 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2579 /* Zero out the context. */
2580 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2581 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2586 static int
2587 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2589 u16 *good_mbuf;
2590 u32 good_mbuf_cnt;
2591 u32 val;
2593 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2594 if (good_mbuf == NULL) {
2595 pr_err("Failed to allocate memory in %s\n", __func__);
2596 return -ENOMEM;
2599 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2600 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2602 good_mbuf_cnt = 0;
2604 /* Allocate a bunch of mbufs and save the good ones in an array. */
2605 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2606 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2607 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2608 BNX2_RBUF_COMMAND_ALLOC_REQ);
2610 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2612 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2614 /* The addresses with Bit 9 set are bad memory blocks. */
2615 if (!(val & (1 << 9))) {
2616 good_mbuf[good_mbuf_cnt] = (u16) val;
2617 good_mbuf_cnt++;
2620 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2623 /* Free the good ones back to the mbuf pool thus discarding
2624 * all the bad ones. */
2625 while (good_mbuf_cnt) {
2626 good_mbuf_cnt--;
2628 val = good_mbuf[good_mbuf_cnt];
2629 val = (val << 9) | val | 1;
2631 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2633 kfree(good_mbuf);
2634 return 0;
2637 static void
2638 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2640 u32 val;
2642 val = (mac_addr[0] << 8) | mac_addr[1];
2644 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2646 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2647 (mac_addr[4] << 8) | mac_addr[5];
2649 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2652 static inline int
2653 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2655 dma_addr_t mapping;
2656 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2657 struct rx_bd *rxbd =
2658 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2659 struct page *page = alloc_page(GFP_ATOMIC);
2661 if (!page)
2662 return -ENOMEM;
2663 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2664 PCI_DMA_FROMDEVICE);
2665 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2666 __free_page(page);
2667 return -EIO;
2670 rx_pg->page = page;
2671 pci_unmap_addr_set(rx_pg, mapping, mapping);
2672 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2673 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2674 return 0;
2677 static void
2678 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2680 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2681 struct page *page = rx_pg->page;
2683 if (!page)
2684 return;
2686 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2687 PCI_DMA_FROMDEVICE);
2689 __free_page(page);
2690 rx_pg->page = NULL;
2693 static inline int
2694 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2696 struct sk_buff *skb;
2697 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2698 dma_addr_t mapping;
2699 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2700 unsigned long align;
2702 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2703 if (skb == NULL) {
2704 return -ENOMEM;
2707 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2708 skb_reserve(skb, BNX2_RX_ALIGN - align);
2710 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2711 PCI_DMA_FROMDEVICE);
2712 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2713 dev_kfree_skb(skb);
2714 return -EIO;
2717 rx_buf->skb = skb;
2718 pci_unmap_addr_set(rx_buf, mapping, mapping);
2720 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2721 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2723 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2725 return 0;
2728 static int
2729 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2731 struct status_block *sblk = bnapi->status_blk.msi;
2732 u32 new_link_state, old_link_state;
2733 int is_set = 1;
2735 new_link_state = sblk->status_attn_bits & event;
2736 old_link_state = sblk->status_attn_bits_ack & event;
2737 if (new_link_state != old_link_state) {
2738 if (new_link_state)
2739 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2740 else
2741 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2742 } else
2743 is_set = 0;
2745 return is_set;
2748 static void
2749 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2751 spin_lock(&bp->phy_lock);
2753 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2754 bnx2_set_link(bp);
2755 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2756 bnx2_set_remote_link(bp);
2758 spin_unlock(&bp->phy_lock);
2762 static inline u16
2763 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2765 u16 cons;
2767 /* Tell compiler that status block fields can change. */
2768 barrier();
2769 cons = *bnapi->hw_tx_cons_ptr;
2770 barrier();
2771 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2772 cons++;
2773 return cons;
2776 static int
2777 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2779 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2780 u16 hw_cons, sw_cons, sw_ring_cons;
2781 int tx_pkt = 0, index;
2782 struct netdev_queue *txq;
2784 index = (bnapi - bp->bnx2_napi);
2785 txq = netdev_get_tx_queue(bp->dev, index);
2787 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2788 sw_cons = txr->tx_cons;
2790 while (sw_cons != hw_cons) {
2791 struct sw_tx_bd *tx_buf;
2792 struct sk_buff *skb;
2793 int i, last;
2795 sw_ring_cons = TX_RING_IDX(sw_cons);
2797 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2798 skb = tx_buf->skb;
2800 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2801 prefetch(&skb->end);
2803 /* partial BD completions possible with TSO packets */
2804 if (tx_buf->is_gso) {
2805 u16 last_idx, last_ring_idx;
2807 last_idx = sw_cons + tx_buf->nr_frags + 1;
2808 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2809 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2810 last_idx++;
2812 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2813 break;
2817 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2818 skb_headlen(skb), PCI_DMA_TODEVICE);
2820 tx_buf->skb = NULL;
2821 last = tx_buf->nr_frags;
2823 for (i = 0; i < last; i++) {
2824 sw_cons = NEXT_TX_BD(sw_cons);
2826 pci_unmap_page(bp->pdev,
2827 pci_unmap_addr(
2828 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2829 mapping),
2830 skb_shinfo(skb)->frags[i].size,
2831 PCI_DMA_TODEVICE);
2834 sw_cons = NEXT_TX_BD(sw_cons);
2836 dev_kfree_skb(skb);
2837 tx_pkt++;
2838 if (tx_pkt == budget)
2839 break;
2841 if (hw_cons == sw_cons)
2842 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2845 txr->hw_tx_cons = hw_cons;
2846 txr->tx_cons = sw_cons;
2848 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2849 * before checking for netif_tx_queue_stopped(). Without the
2850 * memory barrier, there is a small possibility that bnx2_start_xmit()
2851 * will miss it and cause the queue to be stopped forever.
2853 smp_mb();
2855 if (unlikely(netif_tx_queue_stopped(txq)) &&
2856 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2857 __netif_tx_lock(txq, smp_processor_id());
2858 if ((netif_tx_queue_stopped(txq)) &&
2859 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2860 netif_tx_wake_queue(txq);
2861 __netif_tx_unlock(txq);
2864 return tx_pkt;
2867 static void
2868 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2869 struct sk_buff *skb, int count)
2871 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2872 struct rx_bd *cons_bd, *prod_bd;
2873 int i;
2874 u16 hw_prod, prod;
2875 u16 cons = rxr->rx_pg_cons;
2877 cons_rx_pg = &rxr->rx_pg_ring[cons];
2879 /* The caller was unable to allocate a new page to replace the
2880 * last one in the frags array, so we need to recycle that page
2881 * and then free the skb.
2883 if (skb) {
2884 struct page *page;
2885 struct skb_shared_info *shinfo;
2887 shinfo = skb_shinfo(skb);
2888 shinfo->nr_frags--;
2889 page = shinfo->frags[shinfo->nr_frags].page;
2890 shinfo->frags[shinfo->nr_frags].page = NULL;
2892 cons_rx_pg->page = page;
2893 dev_kfree_skb(skb);
2896 hw_prod = rxr->rx_pg_prod;
2898 for (i = 0; i < count; i++) {
2899 prod = RX_PG_RING_IDX(hw_prod);
2901 prod_rx_pg = &rxr->rx_pg_ring[prod];
2902 cons_rx_pg = &rxr->rx_pg_ring[cons];
2903 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2904 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2906 if (prod != cons) {
2907 prod_rx_pg->page = cons_rx_pg->page;
2908 cons_rx_pg->page = NULL;
2909 pci_unmap_addr_set(prod_rx_pg, mapping,
2910 pci_unmap_addr(cons_rx_pg, mapping));
2912 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2913 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2916 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2917 hw_prod = NEXT_RX_BD(hw_prod);
2919 rxr->rx_pg_prod = hw_prod;
2920 rxr->rx_pg_cons = cons;
2923 static inline void
2924 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2925 struct sk_buff *skb, u16 cons, u16 prod)
2927 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2928 struct rx_bd *cons_bd, *prod_bd;
2930 cons_rx_buf = &rxr->rx_buf_ring[cons];
2931 prod_rx_buf = &rxr->rx_buf_ring[prod];
2933 pci_dma_sync_single_for_device(bp->pdev,
2934 pci_unmap_addr(cons_rx_buf, mapping),
2935 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2937 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2939 prod_rx_buf->skb = skb;
2941 if (cons == prod)
2942 return;
2944 pci_unmap_addr_set(prod_rx_buf, mapping,
2945 pci_unmap_addr(cons_rx_buf, mapping));
2947 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2948 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2949 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2950 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2953 static int
2954 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2955 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2956 u32 ring_idx)
2958 int err;
2959 u16 prod = ring_idx & 0xffff;
2961 err = bnx2_alloc_rx_skb(bp, rxr, prod);
2962 if (unlikely(err)) {
2963 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2964 if (hdr_len) {
2965 unsigned int raw_len = len + 4;
2966 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2968 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2970 return err;
2973 skb_reserve(skb, BNX2_RX_OFFSET);
2974 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2975 PCI_DMA_FROMDEVICE);
2977 if (hdr_len == 0) {
2978 skb_put(skb, len);
2979 return 0;
2980 } else {
2981 unsigned int i, frag_len, frag_size, pages;
2982 struct sw_pg *rx_pg;
2983 u16 pg_cons = rxr->rx_pg_cons;
2984 u16 pg_prod = rxr->rx_pg_prod;
2986 frag_size = len + 4 - hdr_len;
2987 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2988 skb_put(skb, hdr_len);
2990 for (i = 0; i < pages; i++) {
2991 dma_addr_t mapping_old;
2993 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2994 if (unlikely(frag_len <= 4)) {
2995 unsigned int tail = 4 - frag_len;
2997 rxr->rx_pg_cons = pg_cons;
2998 rxr->rx_pg_prod = pg_prod;
2999 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3000 pages - i);
3001 skb->len -= tail;
3002 if (i == 0) {
3003 skb->tail -= tail;
3004 } else {
3005 skb_frag_t *frag =
3006 &skb_shinfo(skb)->frags[i - 1];
3007 frag->size -= tail;
3008 skb->data_len -= tail;
3009 skb->truesize -= tail;
3011 return 0;
3013 rx_pg = &rxr->rx_pg_ring[pg_cons];
3015 /* Don't unmap yet. If we're unable to allocate a new
3016 * page, we need to recycle the page and the DMA addr.
3018 mapping_old = pci_unmap_addr(rx_pg, mapping);
3019 if (i == pages - 1)
3020 frag_len -= 4;
3022 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3023 rx_pg->page = NULL;
3025 err = bnx2_alloc_rx_page(bp, rxr,
3026 RX_PG_RING_IDX(pg_prod));
3027 if (unlikely(err)) {
3028 rxr->rx_pg_cons = pg_cons;
3029 rxr->rx_pg_prod = pg_prod;
3030 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3031 pages - i);
3032 return err;
3035 pci_unmap_page(bp->pdev, mapping_old,
3036 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3038 frag_size -= frag_len;
3039 skb->data_len += frag_len;
3040 skb->truesize += frag_len;
3041 skb->len += frag_len;
3043 pg_prod = NEXT_RX_BD(pg_prod);
3044 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3046 rxr->rx_pg_prod = pg_prod;
3047 rxr->rx_pg_cons = pg_cons;
3049 return 0;
3052 static inline u16
3053 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3055 u16 cons;
3057 /* Tell compiler that status block fields can change. */
3058 barrier();
3059 cons = *bnapi->hw_rx_cons_ptr;
3060 barrier();
3061 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3062 cons++;
3063 return cons;
3066 static int
3067 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3069 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3070 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3071 struct l2_fhdr *rx_hdr;
3072 int rx_pkt = 0, pg_ring_used = 0;
3074 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3075 sw_cons = rxr->rx_cons;
3076 sw_prod = rxr->rx_prod;
3078 /* Memory barrier necessary as speculative reads of the rx
3079 * buffer can be ahead of the index in the status block
3081 rmb();
3082 while (sw_cons != hw_cons) {
3083 unsigned int len, hdr_len;
3084 u32 status;
3085 struct sw_bd *rx_buf;
3086 struct sk_buff *skb;
3087 dma_addr_t dma_addr;
3088 u16 vtag = 0;
3089 int hw_vlan __maybe_unused = 0;
3091 sw_ring_cons = RX_RING_IDX(sw_cons);
3092 sw_ring_prod = RX_RING_IDX(sw_prod);
3094 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3095 skb = rx_buf->skb;
3097 rx_buf->skb = NULL;
3099 dma_addr = pci_unmap_addr(rx_buf, mapping);
3101 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3102 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3103 PCI_DMA_FROMDEVICE);
3105 rx_hdr = (struct l2_fhdr *) skb->data;
3106 len = rx_hdr->l2_fhdr_pkt_len;
3107 status = rx_hdr->l2_fhdr_status;
3109 hdr_len = 0;
3110 if (status & L2_FHDR_STATUS_SPLIT) {
3111 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3112 pg_ring_used = 1;
3113 } else if (len > bp->rx_jumbo_thresh) {
3114 hdr_len = bp->rx_jumbo_thresh;
3115 pg_ring_used = 1;
3118 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3119 L2_FHDR_ERRORS_PHY_DECODE |
3120 L2_FHDR_ERRORS_ALIGNMENT |
3121 L2_FHDR_ERRORS_TOO_SHORT |
3122 L2_FHDR_ERRORS_GIANT_FRAME))) {
3124 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3125 sw_ring_prod);
3126 if (pg_ring_used) {
3127 int pages;
3129 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3131 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3133 goto next_rx;
3136 len -= 4;
3138 if (len <= bp->rx_copy_thresh) {
3139 struct sk_buff *new_skb;
3141 new_skb = netdev_alloc_skb(bp->dev, len + 6);
3142 if (new_skb == NULL) {
3143 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3144 sw_ring_prod);
3145 goto next_rx;
3148 /* aligned copy */
3149 skb_copy_from_linear_data_offset(skb,
3150 BNX2_RX_OFFSET - 6,
3151 new_skb->data, len + 6);
3152 skb_reserve(new_skb, 6);
3153 skb_put(new_skb, len);
3155 bnx2_reuse_rx_skb(bp, rxr, skb,
3156 sw_ring_cons, sw_ring_prod);
3158 skb = new_skb;
3159 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3160 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3161 goto next_rx;
3163 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3164 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3165 vtag = rx_hdr->l2_fhdr_vlan_tag;
3166 #ifdef BCM_VLAN
3167 if (bp->vlgrp)
3168 hw_vlan = 1;
3169 else
3170 #endif
3172 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3173 __skb_push(skb, 4);
3175 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3176 ve->h_vlan_proto = htons(ETH_P_8021Q);
3177 ve->h_vlan_TCI = htons(vtag);
3178 len += 4;
3182 skb->protocol = eth_type_trans(skb, bp->dev);
3184 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3185 (ntohs(skb->protocol) != 0x8100)) {
3187 dev_kfree_skb(skb);
3188 goto next_rx;
3192 skb->ip_summed = CHECKSUM_NONE;
3193 if (bp->rx_csum &&
3194 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3195 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3197 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3198 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3199 skb->ip_summed = CHECKSUM_UNNECESSARY;
3202 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3204 #ifdef BCM_VLAN
3205 if (hw_vlan)
3206 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3207 else
3208 #endif
3209 netif_receive_skb(skb);
3211 rx_pkt++;
3213 next_rx:
3214 sw_cons = NEXT_RX_BD(sw_cons);
3215 sw_prod = NEXT_RX_BD(sw_prod);
3217 if ((rx_pkt == budget))
3218 break;
3220 /* Refresh hw_cons to see if there is new work */
3221 if (sw_cons == hw_cons) {
3222 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3223 rmb();
3226 rxr->rx_cons = sw_cons;
3227 rxr->rx_prod = sw_prod;
3229 if (pg_ring_used)
3230 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3232 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3234 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3236 mmiowb();
3238 return rx_pkt;
3242 /* MSI ISR - The only difference between this and the INTx ISR
3243 * is that the MSI interrupt is always serviced.
3245 static irqreturn_t
3246 bnx2_msi(int irq, void *dev_instance)
3248 struct bnx2_napi *bnapi = dev_instance;
3249 struct bnx2 *bp = bnapi->bp;
3251 prefetch(bnapi->status_blk.msi);
3252 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3253 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3254 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3256 /* Return here if interrupt is disabled. */
3257 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3258 return IRQ_HANDLED;
3260 napi_schedule(&bnapi->napi);
3262 return IRQ_HANDLED;
3265 static irqreturn_t
3266 bnx2_msi_1shot(int irq, void *dev_instance)
3268 struct bnx2_napi *bnapi = dev_instance;
3269 struct bnx2 *bp = bnapi->bp;
3271 prefetch(bnapi->status_blk.msi);
3273 /* Return here if interrupt is disabled. */
3274 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3275 return IRQ_HANDLED;
3277 napi_schedule(&bnapi->napi);
3279 return IRQ_HANDLED;
3282 static irqreturn_t
3283 bnx2_interrupt(int irq, void *dev_instance)
3285 struct bnx2_napi *bnapi = dev_instance;
3286 struct bnx2 *bp = bnapi->bp;
3287 struct status_block *sblk = bnapi->status_blk.msi;
3289 /* When using INTx, it is possible for the interrupt to arrive
3290 * at the CPU before the status block posted prior to the
3291 * interrupt. Reading a register will flush the status block.
3292 * When using MSI, the MSI message will always complete after
3293 * the status block write.
3295 if ((sblk->status_idx == bnapi->last_status_idx) &&
3296 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3297 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3298 return IRQ_NONE;
3300 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3301 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3302 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3304 /* Read back to deassert IRQ immediately to avoid too many
3305 * spurious interrupts.
3307 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3309 /* Return here if interrupt is shared and is disabled. */
3310 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3311 return IRQ_HANDLED;
3313 if (napi_schedule_prep(&bnapi->napi)) {
3314 bnapi->last_status_idx = sblk->status_idx;
3315 __napi_schedule(&bnapi->napi);
3318 return IRQ_HANDLED;
3321 static inline int
3322 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3324 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3325 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3327 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3328 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3329 return 1;
3330 return 0;
3333 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3334 STATUS_ATTN_BITS_TIMER_ABORT)
3336 static inline int
3337 bnx2_has_work(struct bnx2_napi *bnapi)
3339 struct status_block *sblk = bnapi->status_blk.msi;
3341 if (bnx2_has_fast_work(bnapi))
3342 return 1;
3344 #ifdef BCM_CNIC
3345 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3346 return 1;
3347 #endif
3349 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3350 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3351 return 1;
3353 return 0;
3356 static void
3357 bnx2_chk_missed_msi(struct bnx2 *bp)
3359 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3360 u32 msi_ctrl;
3362 if (bnx2_has_work(bnapi)) {
3363 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3364 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3365 return;
3367 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3368 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3369 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3370 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3371 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3375 bp->idle_chk_status_idx = bnapi->last_status_idx;
3378 #ifdef BCM_CNIC
3379 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3381 struct cnic_ops *c_ops;
3383 if (!bnapi->cnic_present)
3384 return;
3386 rcu_read_lock();
3387 c_ops = rcu_dereference(bp->cnic_ops);
3388 if (c_ops)
3389 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3390 bnapi->status_blk.msi);
3391 rcu_read_unlock();
3393 #endif
3395 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3397 struct status_block *sblk = bnapi->status_blk.msi;
3398 u32 status_attn_bits = sblk->status_attn_bits;
3399 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3401 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3402 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3404 bnx2_phy_int(bp, bnapi);
3406 /* This is needed to take care of transient status
3407 * during link changes.
3409 REG_WR(bp, BNX2_HC_COMMAND,
3410 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3411 REG_RD(bp, BNX2_HC_COMMAND);
3415 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3416 int work_done, int budget)
3418 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3419 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3421 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3422 bnx2_tx_int(bp, bnapi, 0);
3424 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3425 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3427 return work_done;
3430 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3432 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3433 struct bnx2 *bp = bnapi->bp;
3434 int work_done = 0;
3435 struct status_block_msix *sblk = bnapi->status_blk.msix;
3437 while (1) {
3438 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3439 if (unlikely(work_done >= budget))
3440 break;
3442 bnapi->last_status_idx = sblk->status_idx;
3443 /* status idx must be read before checking for more work. */
3444 rmb();
3445 if (likely(!bnx2_has_fast_work(bnapi))) {
3447 napi_complete(napi);
3448 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3449 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3450 bnapi->last_status_idx);
3451 break;
3454 return work_done;
3457 static int bnx2_poll(struct napi_struct *napi, int budget)
3459 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3460 struct bnx2 *bp = bnapi->bp;
3461 int work_done = 0;
3462 struct status_block *sblk = bnapi->status_blk.msi;
3464 while (1) {
3465 bnx2_poll_link(bp, bnapi);
3467 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3469 #ifdef BCM_CNIC
3470 bnx2_poll_cnic(bp, bnapi);
3471 #endif
3473 /* bnapi->last_status_idx is used below to tell the hw how
3474 * much work has been processed, so we must read it before
3475 * checking for more work.
3477 bnapi->last_status_idx = sblk->status_idx;
3479 if (unlikely(work_done >= budget))
3480 break;
3482 rmb();
3483 if (likely(!bnx2_has_work(bnapi))) {
3484 napi_complete(napi);
3485 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3486 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3487 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3488 bnapi->last_status_idx);
3489 break;
3491 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3492 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3493 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3494 bnapi->last_status_idx);
3496 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3497 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3498 bnapi->last_status_idx);
3499 break;
3503 return work_done;
3506 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3507 * from set_multicast.
3509 static void
3510 bnx2_set_rx_mode(struct net_device *dev)
3512 struct bnx2 *bp = netdev_priv(dev);
3513 u32 rx_mode, sort_mode;
3514 struct netdev_hw_addr *ha;
3515 int i;
3517 if (!netif_running(dev))
3518 return;
3520 spin_lock_bh(&bp->phy_lock);
3522 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3523 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3524 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3525 #ifdef BCM_VLAN
3526 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3527 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3528 #else
3529 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3530 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3531 #endif
3532 if (dev->flags & IFF_PROMISC) {
3533 /* Promiscuous mode. */
3534 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3535 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3536 BNX2_RPM_SORT_USER0_PROM_VLAN;
3538 else if (dev->flags & IFF_ALLMULTI) {
3539 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3540 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3541 0xffffffff);
3543 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3545 else {
3546 /* Accept one or more multicast(s). */
3547 struct dev_mc_list *mclist;
3548 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3549 u32 regidx;
3550 u32 bit;
3551 u32 crc;
3553 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3555 for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
3556 i++, mclist = mclist->next) {
3558 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3559 bit = crc & 0xff;
3560 regidx = (bit & 0xe0) >> 5;
3561 bit &= 0x1f;
3562 mc_filter[regidx] |= (1 << bit);
3565 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3566 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3567 mc_filter[i]);
3570 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3573 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3574 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3575 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3576 BNX2_RPM_SORT_USER0_PROM_VLAN;
3577 } else if (!(dev->flags & IFF_PROMISC)) {
3578 /* Add all entries into to the match filter list */
3579 i = 0;
3580 netdev_for_each_uc_addr(ha, dev) {
3581 bnx2_set_mac_addr(bp, ha->addr,
3582 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3583 sort_mode |= (1 <<
3584 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3585 i++;
3590 if (rx_mode != bp->rx_mode) {
3591 bp->rx_mode = rx_mode;
3592 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3595 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3596 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3597 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3599 spin_unlock_bh(&bp->phy_lock);
3602 static int __devinit
3603 check_fw_section(const struct firmware *fw,
3604 const struct bnx2_fw_file_section *section,
3605 u32 alignment, bool non_empty)
3607 u32 offset = be32_to_cpu(section->offset);
3608 u32 len = be32_to_cpu(section->len);
3610 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3611 return -EINVAL;
3612 if ((non_empty && len == 0) || len > fw->size - offset ||
3613 len & (alignment - 1))
3614 return -EINVAL;
3615 return 0;
3618 static int __devinit
3619 check_mips_fw_entry(const struct firmware *fw,
3620 const struct bnx2_mips_fw_file_entry *entry)
3622 if (check_fw_section(fw, &entry->text, 4, true) ||
3623 check_fw_section(fw, &entry->data, 4, false) ||
3624 check_fw_section(fw, &entry->rodata, 4, false))
3625 return -EINVAL;
3626 return 0;
3629 static int __devinit
3630 bnx2_request_firmware(struct bnx2 *bp)
3632 const char *mips_fw_file, *rv2p_fw_file;
3633 const struct bnx2_mips_fw_file *mips_fw;
3634 const struct bnx2_rv2p_fw_file *rv2p_fw;
3635 int rc;
3637 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3638 mips_fw_file = FW_MIPS_FILE_09;
3639 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3640 (CHIP_ID(bp) == CHIP_ID_5709_A1))
3641 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3642 else
3643 rv2p_fw_file = FW_RV2P_FILE_09;
3644 } else {
3645 mips_fw_file = FW_MIPS_FILE_06;
3646 rv2p_fw_file = FW_RV2P_FILE_06;
3649 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3650 if (rc) {
3651 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3652 return rc;
3655 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3656 if (rc) {
3657 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3658 return rc;
3660 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3661 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3662 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3663 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3664 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3665 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3666 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3667 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3668 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3669 return -EINVAL;
3671 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3672 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3673 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3674 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3675 return -EINVAL;
3678 return 0;
3681 static u32
3682 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3684 switch (idx) {
3685 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3686 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3687 rv2p_code |= RV2P_BD_PAGE_SIZE;
3688 break;
3690 return rv2p_code;
3693 static int
3694 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3695 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3697 u32 rv2p_code_len, file_offset;
3698 __be32 *rv2p_code;
3699 int i;
3700 u32 val, cmd, addr;
3702 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3703 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3705 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3707 if (rv2p_proc == RV2P_PROC1) {
3708 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3709 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3710 } else {
3711 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3712 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3715 for (i = 0; i < rv2p_code_len; i += 8) {
3716 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3717 rv2p_code++;
3718 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3719 rv2p_code++;
3721 val = (i / 8) | cmd;
3722 REG_WR(bp, addr, val);
3725 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3726 for (i = 0; i < 8; i++) {
3727 u32 loc, code;
3729 loc = be32_to_cpu(fw_entry->fixup[i]);
3730 if (loc && ((loc * 4) < rv2p_code_len)) {
3731 code = be32_to_cpu(*(rv2p_code + loc - 1));
3732 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3733 code = be32_to_cpu(*(rv2p_code + loc));
3734 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3735 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3737 val = (loc / 2) | cmd;
3738 REG_WR(bp, addr, val);
3742 /* Reset the processor, un-stall is done later. */
3743 if (rv2p_proc == RV2P_PROC1) {
3744 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3746 else {
3747 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3750 return 0;
3753 static int
3754 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3755 const struct bnx2_mips_fw_file_entry *fw_entry)
3757 u32 addr, len, file_offset;
3758 __be32 *data;
3759 u32 offset;
3760 u32 val;
3762 /* Halt the CPU. */
3763 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3764 val |= cpu_reg->mode_value_halt;
3765 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3766 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3768 /* Load the Text area. */
3769 addr = be32_to_cpu(fw_entry->text.addr);
3770 len = be32_to_cpu(fw_entry->text.len);
3771 file_offset = be32_to_cpu(fw_entry->text.offset);
3772 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3774 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3775 if (len) {
3776 int j;
3778 for (j = 0; j < (len / 4); j++, offset += 4)
3779 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3782 /* Load the Data area. */
3783 addr = be32_to_cpu(fw_entry->data.addr);
3784 len = be32_to_cpu(fw_entry->data.len);
3785 file_offset = be32_to_cpu(fw_entry->data.offset);
3786 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3788 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3789 if (len) {
3790 int j;
3792 for (j = 0; j < (len / 4); j++, offset += 4)
3793 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3796 /* Load the Read-Only area. */
3797 addr = be32_to_cpu(fw_entry->rodata.addr);
3798 len = be32_to_cpu(fw_entry->rodata.len);
3799 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3800 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3802 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3803 if (len) {
3804 int j;
3806 for (j = 0; j < (len / 4); j++, offset += 4)
3807 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3810 /* Clear the pre-fetch instruction. */
3811 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3813 val = be32_to_cpu(fw_entry->start_addr);
3814 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3816 /* Start the CPU. */
3817 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3818 val &= ~cpu_reg->mode_value_halt;
3819 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3820 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3822 return 0;
3825 static int
3826 bnx2_init_cpus(struct bnx2 *bp)
3828 const struct bnx2_mips_fw_file *mips_fw =
3829 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3830 const struct bnx2_rv2p_fw_file *rv2p_fw =
3831 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3832 int rc;
3834 /* Initialize the RV2P processor. */
3835 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3836 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3838 /* Initialize the RX Processor. */
3839 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3840 if (rc)
3841 goto init_cpu_err;
3843 /* Initialize the TX Processor. */
3844 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3845 if (rc)
3846 goto init_cpu_err;
3848 /* Initialize the TX Patch-up Processor. */
3849 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3850 if (rc)
3851 goto init_cpu_err;
3853 /* Initialize the Completion Processor. */
3854 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3855 if (rc)
3856 goto init_cpu_err;
3858 /* Initialize the Command Processor. */
3859 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3861 init_cpu_err:
3862 return rc;
3865 static int
3866 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3868 u16 pmcsr;
3870 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3872 switch (state) {
3873 case PCI_D0: {
3874 u32 val;
3876 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3877 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3878 PCI_PM_CTRL_PME_STATUS);
3880 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3881 /* delay required during transition out of D3hot */
3882 msleep(20);
3884 val = REG_RD(bp, BNX2_EMAC_MODE);
3885 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3886 val &= ~BNX2_EMAC_MODE_MPKT;
3887 REG_WR(bp, BNX2_EMAC_MODE, val);
3889 val = REG_RD(bp, BNX2_RPM_CONFIG);
3890 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3891 REG_WR(bp, BNX2_RPM_CONFIG, val);
3892 break;
3894 case PCI_D3hot: {
3895 int i;
3896 u32 val, wol_msg;
3898 if (bp->wol) {
3899 u32 advertising;
3900 u8 autoneg;
3902 autoneg = bp->autoneg;
3903 advertising = bp->advertising;
3905 if (bp->phy_port == PORT_TP) {
3906 bp->autoneg = AUTONEG_SPEED;
3907 bp->advertising = ADVERTISED_10baseT_Half |
3908 ADVERTISED_10baseT_Full |
3909 ADVERTISED_100baseT_Half |
3910 ADVERTISED_100baseT_Full |
3911 ADVERTISED_Autoneg;
3914 spin_lock_bh(&bp->phy_lock);
3915 bnx2_setup_phy(bp, bp->phy_port);
3916 spin_unlock_bh(&bp->phy_lock);
3918 bp->autoneg = autoneg;
3919 bp->advertising = advertising;
3921 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3923 val = REG_RD(bp, BNX2_EMAC_MODE);
3925 /* Enable port mode. */
3926 val &= ~BNX2_EMAC_MODE_PORT;
3927 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3928 BNX2_EMAC_MODE_ACPI_RCVD |
3929 BNX2_EMAC_MODE_MPKT;
3930 if (bp->phy_port == PORT_TP)
3931 val |= BNX2_EMAC_MODE_PORT_MII;
3932 else {
3933 val |= BNX2_EMAC_MODE_PORT_GMII;
3934 if (bp->line_speed == SPEED_2500)
3935 val |= BNX2_EMAC_MODE_25G_MODE;
3938 REG_WR(bp, BNX2_EMAC_MODE, val);
3940 /* receive all multicast */
3941 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3942 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3943 0xffffffff);
3945 REG_WR(bp, BNX2_EMAC_RX_MODE,
3946 BNX2_EMAC_RX_MODE_SORT_MODE);
3948 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3949 BNX2_RPM_SORT_USER0_MC_EN;
3950 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3951 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3952 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3953 BNX2_RPM_SORT_USER0_ENA);
3955 /* Need to enable EMAC and RPM for WOL. */
3956 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3957 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3958 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3959 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3961 val = REG_RD(bp, BNX2_RPM_CONFIG);
3962 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3963 REG_WR(bp, BNX2_RPM_CONFIG, val);
3965 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3967 else {
3968 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3971 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3972 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3973 1, 0);
3975 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3976 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3977 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3979 if (bp->wol)
3980 pmcsr |= 3;
3982 else {
3983 pmcsr |= 3;
3985 if (bp->wol) {
3986 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3988 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3989 pmcsr);
3991 /* No more memory access after this point until
3992 * device is brought back to D0.
3994 udelay(50);
3995 break;
3997 default:
3998 return -EINVAL;
4000 return 0;
4003 static int
4004 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4006 u32 val;
4007 int j;
4009 /* Request access to the flash interface. */
4010 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4011 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4012 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4013 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4014 break;
4016 udelay(5);
4019 if (j >= NVRAM_TIMEOUT_COUNT)
4020 return -EBUSY;
4022 return 0;
4025 static int
4026 bnx2_release_nvram_lock(struct bnx2 *bp)
4028 int j;
4029 u32 val;
4031 /* Relinquish nvram interface. */
4032 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4034 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4035 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4036 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4037 break;
4039 udelay(5);
4042 if (j >= NVRAM_TIMEOUT_COUNT)
4043 return -EBUSY;
4045 return 0;
4049 static int
4050 bnx2_enable_nvram_write(struct bnx2 *bp)
4052 u32 val;
4054 val = REG_RD(bp, BNX2_MISC_CFG);
4055 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4057 if (bp->flash_info->flags & BNX2_NV_WREN) {
4058 int j;
4060 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4061 REG_WR(bp, BNX2_NVM_COMMAND,
4062 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4064 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4065 udelay(5);
4067 val = REG_RD(bp, BNX2_NVM_COMMAND);
4068 if (val & BNX2_NVM_COMMAND_DONE)
4069 break;
4072 if (j >= NVRAM_TIMEOUT_COUNT)
4073 return -EBUSY;
4075 return 0;
4078 static void
4079 bnx2_disable_nvram_write(struct bnx2 *bp)
4081 u32 val;
4083 val = REG_RD(bp, BNX2_MISC_CFG);
4084 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4088 static void
4089 bnx2_enable_nvram_access(struct bnx2 *bp)
4091 u32 val;
4093 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4094 /* Enable both bits, even on read. */
4095 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4096 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4099 static void
4100 bnx2_disable_nvram_access(struct bnx2 *bp)
4102 u32 val;
4104 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4105 /* Disable both bits, even after read. */
4106 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4107 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4108 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4111 static int
4112 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4114 u32 cmd;
4115 int j;
4117 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4118 /* Buffered flash, no erase needed */
4119 return 0;
4121 /* Build an erase command */
4122 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4123 BNX2_NVM_COMMAND_DOIT;
4125 /* Need to clear DONE bit separately. */
4126 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4128 /* Address of the NVRAM to read from. */
4129 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4131 /* Issue an erase command. */
4132 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4134 /* Wait for completion. */
4135 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4136 u32 val;
4138 udelay(5);
4140 val = REG_RD(bp, BNX2_NVM_COMMAND);
4141 if (val & BNX2_NVM_COMMAND_DONE)
4142 break;
4145 if (j >= NVRAM_TIMEOUT_COUNT)
4146 return -EBUSY;
4148 return 0;
4151 static int
4152 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4154 u32 cmd;
4155 int j;
4157 /* Build the command word. */
4158 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4160 /* Calculate an offset of a buffered flash, not needed for 5709. */
4161 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4162 offset = ((offset / bp->flash_info->page_size) <<
4163 bp->flash_info->page_bits) +
4164 (offset % bp->flash_info->page_size);
4167 /* Need to clear DONE bit separately. */
4168 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4170 /* Address of the NVRAM to read from. */
4171 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4173 /* Issue a read command. */
4174 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4176 /* Wait for completion. */
4177 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4178 u32 val;
4180 udelay(5);
4182 val = REG_RD(bp, BNX2_NVM_COMMAND);
4183 if (val & BNX2_NVM_COMMAND_DONE) {
4184 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4185 memcpy(ret_val, &v, 4);
4186 break;
4189 if (j >= NVRAM_TIMEOUT_COUNT)
4190 return -EBUSY;
4192 return 0;
4196 static int
4197 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4199 u32 cmd;
4200 __be32 val32;
4201 int j;
4203 /* Build the command word. */
4204 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4206 /* Calculate an offset of a buffered flash, not needed for 5709. */
4207 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4208 offset = ((offset / bp->flash_info->page_size) <<
4209 bp->flash_info->page_bits) +
4210 (offset % bp->flash_info->page_size);
4213 /* Need to clear DONE bit separately. */
4214 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4216 memcpy(&val32, val, 4);
4218 /* Write the data. */
4219 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4221 /* Address of the NVRAM to write to. */
4222 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4224 /* Issue the write command. */
4225 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4227 /* Wait for completion. */
4228 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4229 udelay(5);
4231 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4232 break;
4234 if (j >= NVRAM_TIMEOUT_COUNT)
4235 return -EBUSY;
4237 return 0;
4240 static int
4241 bnx2_init_nvram(struct bnx2 *bp)
4243 u32 val;
4244 int j, entry_count, rc = 0;
4245 const struct flash_spec *flash;
4247 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4248 bp->flash_info = &flash_5709;
4249 goto get_flash_size;
4252 /* Determine the selected interface. */
4253 val = REG_RD(bp, BNX2_NVM_CFG1);
4255 entry_count = ARRAY_SIZE(flash_table);
4257 if (val & 0x40000000) {
4259 /* Flash interface has been reconfigured */
4260 for (j = 0, flash = &flash_table[0]; j < entry_count;
4261 j++, flash++) {
4262 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4263 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4264 bp->flash_info = flash;
4265 break;
4269 else {
4270 u32 mask;
4271 /* Not yet been reconfigured */
4273 if (val & (1 << 23))
4274 mask = FLASH_BACKUP_STRAP_MASK;
4275 else
4276 mask = FLASH_STRAP_MASK;
4278 for (j = 0, flash = &flash_table[0]; j < entry_count;
4279 j++, flash++) {
4281 if ((val & mask) == (flash->strapping & mask)) {
4282 bp->flash_info = flash;
4284 /* Request access to the flash interface. */
4285 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4286 return rc;
4288 /* Enable access to flash interface */
4289 bnx2_enable_nvram_access(bp);
4291 /* Reconfigure the flash interface */
4292 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4293 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4294 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4295 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4297 /* Disable access to flash interface */
4298 bnx2_disable_nvram_access(bp);
4299 bnx2_release_nvram_lock(bp);
4301 break;
4304 } /* if (val & 0x40000000) */
4306 if (j == entry_count) {
4307 bp->flash_info = NULL;
4308 pr_alert("Unknown flash/EEPROM type\n");
4309 return -ENODEV;
4312 get_flash_size:
4313 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4314 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4315 if (val)
4316 bp->flash_size = val;
4317 else
4318 bp->flash_size = bp->flash_info->total_size;
4320 return rc;
4323 static int
4324 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4325 int buf_size)
4327 int rc = 0;
4328 u32 cmd_flags, offset32, len32, extra;
4330 if (buf_size == 0)
4331 return 0;
4333 /* Request access to the flash interface. */
4334 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4335 return rc;
4337 /* Enable access to flash interface */
4338 bnx2_enable_nvram_access(bp);
4340 len32 = buf_size;
4341 offset32 = offset;
4342 extra = 0;
4344 cmd_flags = 0;
4346 if (offset32 & 3) {
4347 u8 buf[4];
4348 u32 pre_len;
4350 offset32 &= ~3;
4351 pre_len = 4 - (offset & 3);
4353 if (pre_len >= len32) {
4354 pre_len = len32;
4355 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4356 BNX2_NVM_COMMAND_LAST;
4358 else {
4359 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4362 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4364 if (rc)
4365 return rc;
4367 memcpy(ret_buf, buf + (offset & 3), pre_len);
4369 offset32 += 4;
4370 ret_buf += pre_len;
4371 len32 -= pre_len;
4373 if (len32 & 3) {
4374 extra = 4 - (len32 & 3);
4375 len32 = (len32 + 4) & ~3;
4378 if (len32 == 4) {
4379 u8 buf[4];
4381 if (cmd_flags)
4382 cmd_flags = BNX2_NVM_COMMAND_LAST;
4383 else
4384 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4385 BNX2_NVM_COMMAND_LAST;
4387 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4389 memcpy(ret_buf, buf, 4 - extra);
4391 else if (len32 > 0) {
4392 u8 buf[4];
4394 /* Read the first word. */
4395 if (cmd_flags)
4396 cmd_flags = 0;
4397 else
4398 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4400 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4402 /* Advance to the next dword. */
4403 offset32 += 4;
4404 ret_buf += 4;
4405 len32 -= 4;
4407 while (len32 > 4 && rc == 0) {
4408 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4410 /* Advance to the next dword. */
4411 offset32 += 4;
4412 ret_buf += 4;
4413 len32 -= 4;
4416 if (rc)
4417 return rc;
4419 cmd_flags = BNX2_NVM_COMMAND_LAST;
4420 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4422 memcpy(ret_buf, buf, 4 - extra);
4425 /* Disable access to flash interface */
4426 bnx2_disable_nvram_access(bp);
4428 bnx2_release_nvram_lock(bp);
4430 return rc;
4433 static int
4434 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4435 int buf_size)
4437 u32 written, offset32, len32;
4438 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4439 int rc = 0;
4440 int align_start, align_end;
4442 buf = data_buf;
4443 offset32 = offset;
4444 len32 = buf_size;
4445 align_start = align_end = 0;
4447 if ((align_start = (offset32 & 3))) {
4448 offset32 &= ~3;
4449 len32 += align_start;
4450 if (len32 < 4)
4451 len32 = 4;
4452 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4453 return rc;
4456 if (len32 & 3) {
4457 align_end = 4 - (len32 & 3);
4458 len32 += align_end;
4459 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4460 return rc;
4463 if (align_start || align_end) {
4464 align_buf = kmalloc(len32, GFP_KERNEL);
4465 if (align_buf == NULL)
4466 return -ENOMEM;
4467 if (align_start) {
4468 memcpy(align_buf, start, 4);
4470 if (align_end) {
4471 memcpy(align_buf + len32 - 4, end, 4);
4473 memcpy(align_buf + align_start, data_buf, buf_size);
4474 buf = align_buf;
4477 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4478 flash_buffer = kmalloc(264, GFP_KERNEL);
4479 if (flash_buffer == NULL) {
4480 rc = -ENOMEM;
4481 goto nvram_write_end;
4485 written = 0;
4486 while ((written < len32) && (rc == 0)) {
4487 u32 page_start, page_end, data_start, data_end;
4488 u32 addr, cmd_flags;
4489 int i;
4491 /* Find the page_start addr */
4492 page_start = offset32 + written;
4493 page_start -= (page_start % bp->flash_info->page_size);
4494 /* Find the page_end addr */
4495 page_end = page_start + bp->flash_info->page_size;
4496 /* Find the data_start addr */
4497 data_start = (written == 0) ? offset32 : page_start;
4498 /* Find the data_end addr */
4499 data_end = (page_end > offset32 + len32) ?
4500 (offset32 + len32) : page_end;
4502 /* Request access to the flash interface. */
4503 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4504 goto nvram_write_end;
4506 /* Enable access to flash interface */
4507 bnx2_enable_nvram_access(bp);
4509 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4510 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4511 int j;
4513 /* Read the whole page into the buffer
4514 * (non-buffer flash only) */
4515 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4516 if (j == (bp->flash_info->page_size - 4)) {
4517 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4519 rc = bnx2_nvram_read_dword(bp,
4520 page_start + j,
4521 &flash_buffer[j],
4522 cmd_flags);
4524 if (rc)
4525 goto nvram_write_end;
4527 cmd_flags = 0;
4531 /* Enable writes to flash interface (unlock write-protect) */
4532 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4533 goto nvram_write_end;
4535 /* Loop to write back the buffer data from page_start to
4536 * data_start */
4537 i = 0;
4538 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4539 /* Erase the page */
4540 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4541 goto nvram_write_end;
4543 /* Re-enable the write again for the actual write */
4544 bnx2_enable_nvram_write(bp);
4546 for (addr = page_start; addr < data_start;
4547 addr += 4, i += 4) {
4549 rc = bnx2_nvram_write_dword(bp, addr,
4550 &flash_buffer[i], cmd_flags);
4552 if (rc != 0)
4553 goto nvram_write_end;
4555 cmd_flags = 0;
4559 /* Loop to write the new data from data_start to data_end */
4560 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4561 if ((addr == page_end - 4) ||
4562 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4563 (addr == data_end - 4))) {
4565 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4567 rc = bnx2_nvram_write_dword(bp, addr, buf,
4568 cmd_flags);
4570 if (rc != 0)
4571 goto nvram_write_end;
4573 cmd_flags = 0;
4574 buf += 4;
4577 /* Loop to write back the buffer data from data_end
4578 * to page_end */
4579 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4580 for (addr = data_end; addr < page_end;
4581 addr += 4, i += 4) {
4583 if (addr == page_end-4) {
4584 cmd_flags = BNX2_NVM_COMMAND_LAST;
4586 rc = bnx2_nvram_write_dword(bp, addr,
4587 &flash_buffer[i], cmd_flags);
4589 if (rc != 0)
4590 goto nvram_write_end;
4592 cmd_flags = 0;
4596 /* Disable writes to flash interface (lock write-protect) */
4597 bnx2_disable_nvram_write(bp);
4599 /* Disable access to flash interface */
4600 bnx2_disable_nvram_access(bp);
4601 bnx2_release_nvram_lock(bp);
4603 /* Increment written */
4604 written += data_end - data_start;
4607 nvram_write_end:
4608 kfree(flash_buffer);
4609 kfree(align_buf);
4610 return rc;
4613 static void
4614 bnx2_init_fw_cap(struct bnx2 *bp)
4616 u32 val, sig = 0;
4618 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4619 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4621 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4622 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4624 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4625 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4626 return;
4628 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4629 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4630 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4633 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4634 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4635 u32 link;
4637 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4639 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4640 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4641 bp->phy_port = PORT_FIBRE;
4642 else
4643 bp->phy_port = PORT_TP;
4645 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4646 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4649 if (netif_running(bp->dev) && sig)
4650 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4653 static void
4654 bnx2_setup_msix_tbl(struct bnx2 *bp)
4656 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4658 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4659 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4662 static int
4663 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4665 u32 val;
4666 int i, rc = 0;
4667 u8 old_port;
4669 /* Wait for the current PCI transaction to complete before
4670 * issuing a reset. */
4671 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4672 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4673 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4674 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4675 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4676 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4677 udelay(5);
4679 /* Wait for the firmware to tell us it is ok to issue a reset. */
4680 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4682 /* Deposit a driver reset signature so the firmware knows that
4683 * this is a soft reset. */
4684 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4685 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4687 /* Do a dummy read to force the chip to complete all current transaction
4688 * before we issue a reset. */
4689 val = REG_RD(bp, BNX2_MISC_ID);
4691 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4692 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4693 REG_RD(bp, BNX2_MISC_COMMAND);
4694 udelay(5);
4696 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4697 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4699 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4701 } else {
4702 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4703 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4704 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4706 /* Chip reset. */
4707 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4709 /* Reading back any register after chip reset will hang the
4710 * bus on 5706 A0 and A1. The msleep below provides plenty
4711 * of margin for write posting.
4713 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4714 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4715 msleep(20);
4717 /* Reset takes approximate 30 usec */
4718 for (i = 0; i < 10; i++) {
4719 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4720 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4721 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4722 break;
4723 udelay(10);
4726 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4727 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4728 pr_err("Chip reset did not complete\n");
4729 return -EBUSY;
4733 /* Make sure byte swapping is properly configured. */
4734 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4735 if (val != 0x01020304) {
4736 pr_err("Chip not in correct endian mode\n");
4737 return -ENODEV;
4740 /* Wait for the firmware to finish its initialization. */
4741 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4742 if (rc)
4743 return rc;
4745 spin_lock_bh(&bp->phy_lock);
4746 old_port = bp->phy_port;
4747 bnx2_init_fw_cap(bp);
4748 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4749 old_port != bp->phy_port)
4750 bnx2_set_default_remote_link(bp);
4751 spin_unlock_bh(&bp->phy_lock);
4753 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4754 /* Adjust the voltage regular to two steps lower. The default
4755 * of this register is 0x0000000e. */
4756 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4758 /* Remove bad rbuf memory from the free pool. */
4759 rc = bnx2_alloc_bad_rbuf(bp);
4762 if (bp->flags & BNX2_FLAG_USING_MSIX)
4763 bnx2_setup_msix_tbl(bp);
4765 return rc;
4768 static int
4769 bnx2_init_chip(struct bnx2 *bp)
4771 u32 val, mtu;
4772 int rc, i;
4774 /* Make sure the interrupt is not active. */
4775 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4777 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4778 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4779 #ifdef __BIG_ENDIAN
4780 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4781 #endif
4782 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4783 DMA_READ_CHANS << 12 |
4784 DMA_WRITE_CHANS << 16;
4786 val |= (0x2 << 20) | (1 << 11);
4788 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4789 val |= (1 << 23);
4791 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4792 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4793 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4795 REG_WR(bp, BNX2_DMA_CONFIG, val);
4797 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4798 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4799 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4800 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4803 if (bp->flags & BNX2_FLAG_PCIX) {
4804 u16 val16;
4806 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4807 &val16);
4808 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4809 val16 & ~PCI_X_CMD_ERO);
4812 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4813 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4814 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4815 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4817 /* Initialize context mapping and zero out the quick contexts. The
4818 * context block must have already been enabled. */
4819 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4820 rc = bnx2_init_5709_context(bp);
4821 if (rc)
4822 return rc;
4823 } else
4824 bnx2_init_context(bp);
4826 if ((rc = bnx2_init_cpus(bp)) != 0)
4827 return rc;
4829 bnx2_init_nvram(bp);
4831 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4833 val = REG_RD(bp, BNX2_MQ_CONFIG);
4834 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4835 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4836 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4837 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4838 if (CHIP_REV(bp) == CHIP_REV_Ax)
4839 val |= BNX2_MQ_CONFIG_HALT_DIS;
4842 REG_WR(bp, BNX2_MQ_CONFIG, val);
4844 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4845 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4846 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4848 val = (BCM_PAGE_BITS - 8) << 24;
4849 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4851 /* Configure page size. */
4852 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4853 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4854 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4855 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4857 val = bp->mac_addr[0] +
4858 (bp->mac_addr[1] << 8) +
4859 (bp->mac_addr[2] << 16) +
4860 bp->mac_addr[3] +
4861 (bp->mac_addr[4] << 8) +
4862 (bp->mac_addr[5] << 16);
4863 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4865 /* Program the MTU. Also include 4 bytes for CRC32. */
4866 mtu = bp->dev->mtu;
4867 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4868 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4869 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4870 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4872 if (mtu < 1500)
4873 mtu = 1500;
4875 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4876 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4877 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4879 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4880 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4881 bp->bnx2_napi[i].last_status_idx = 0;
4883 bp->idle_chk_status_idx = 0xffff;
4885 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4887 /* Set up how to generate a link change interrupt. */
4888 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4890 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4891 (u64) bp->status_blk_mapping & 0xffffffff);
4892 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4894 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4895 (u64) bp->stats_blk_mapping & 0xffffffff);
4896 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4897 (u64) bp->stats_blk_mapping >> 32);
4899 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4900 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4902 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4903 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4905 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4906 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4908 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4910 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4912 REG_WR(bp, BNX2_HC_COM_TICKS,
4913 (bp->com_ticks_int << 16) | bp->com_ticks);
4915 REG_WR(bp, BNX2_HC_CMD_TICKS,
4916 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4918 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4919 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4920 else
4921 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4922 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4924 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4925 val = BNX2_HC_CONFIG_COLLECT_STATS;
4926 else {
4927 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4928 BNX2_HC_CONFIG_COLLECT_STATS;
4931 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4932 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4933 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4935 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4938 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4939 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4941 REG_WR(bp, BNX2_HC_CONFIG, val);
4943 for (i = 1; i < bp->irq_nvecs; i++) {
4944 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4945 BNX2_HC_SB_CONFIG_1;
4947 REG_WR(bp, base,
4948 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4949 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4950 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4952 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4953 (bp->tx_quick_cons_trip_int << 16) |
4954 bp->tx_quick_cons_trip);
4956 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4957 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4959 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4960 (bp->rx_quick_cons_trip_int << 16) |
4961 bp->rx_quick_cons_trip);
4963 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4964 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4967 /* Clear internal stats counters. */
4968 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4970 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4972 /* Initialize the receive filter. */
4973 bnx2_set_rx_mode(bp->dev);
4975 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4976 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4977 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4978 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4980 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4981 1, 0);
4983 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4984 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4986 udelay(20);
4988 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4990 return rc;
4993 static void
4994 bnx2_clear_ring_states(struct bnx2 *bp)
4996 struct bnx2_napi *bnapi;
4997 struct bnx2_tx_ring_info *txr;
4998 struct bnx2_rx_ring_info *rxr;
4999 int i;
5001 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5002 bnapi = &bp->bnx2_napi[i];
5003 txr = &bnapi->tx_ring;
5004 rxr = &bnapi->rx_ring;
5006 txr->tx_cons = 0;
5007 txr->hw_tx_cons = 0;
5008 rxr->rx_prod_bseq = 0;
5009 rxr->rx_prod = 0;
5010 rxr->rx_cons = 0;
5011 rxr->rx_pg_prod = 0;
5012 rxr->rx_pg_cons = 0;
5016 static void
5017 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5019 u32 val, offset0, offset1, offset2, offset3;
5020 u32 cid_addr = GET_CID_ADDR(cid);
5022 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5023 offset0 = BNX2_L2CTX_TYPE_XI;
5024 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5025 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5026 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5027 } else {
5028 offset0 = BNX2_L2CTX_TYPE;
5029 offset1 = BNX2_L2CTX_CMD_TYPE;
5030 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5031 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5033 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5034 bnx2_ctx_wr(bp, cid_addr, offset0, val);
5036 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5037 bnx2_ctx_wr(bp, cid_addr, offset1, val);
5039 val = (u64) txr->tx_desc_mapping >> 32;
5040 bnx2_ctx_wr(bp, cid_addr, offset2, val);
5042 val = (u64) txr->tx_desc_mapping & 0xffffffff;
5043 bnx2_ctx_wr(bp, cid_addr, offset3, val);
5046 static void
5047 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5049 struct tx_bd *txbd;
5050 u32 cid = TX_CID;
5051 struct bnx2_napi *bnapi;
5052 struct bnx2_tx_ring_info *txr;
5054 bnapi = &bp->bnx2_napi[ring_num];
5055 txr = &bnapi->tx_ring;
5057 if (ring_num == 0)
5058 cid = TX_CID;
5059 else
5060 cid = TX_TSS_CID + ring_num - 1;
5062 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5064 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5066 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5067 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5069 txr->tx_prod = 0;
5070 txr->tx_prod_bseq = 0;
5072 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5073 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5075 bnx2_init_tx_context(bp, cid, txr);
5078 static void
5079 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5080 int num_rings)
5082 int i;
5083 struct rx_bd *rxbd;
5085 for (i = 0; i < num_rings; i++) {
5086 int j;
5088 rxbd = &rx_ring[i][0];
5089 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5090 rxbd->rx_bd_len = buf_size;
5091 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5093 if (i == (num_rings - 1))
5094 j = 0;
5095 else
5096 j = i + 1;
5097 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5098 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5102 static void
5103 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5105 int i;
5106 u16 prod, ring_prod;
5107 u32 cid, rx_cid_addr, val;
5108 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5109 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5111 if (ring_num == 0)
5112 cid = RX_CID;
5113 else
5114 cid = RX_RSS_CID + ring_num - 1;
5116 rx_cid_addr = GET_CID_ADDR(cid);
5118 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5119 bp->rx_buf_use_size, bp->rx_max_ring);
5121 bnx2_init_rx_context(bp, cid);
5123 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5124 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5125 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5128 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5129 if (bp->rx_pg_ring_size) {
5130 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5131 rxr->rx_pg_desc_mapping,
5132 PAGE_SIZE, bp->rx_max_pg_ring);
5133 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5134 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5135 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5136 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5138 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5139 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5141 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5142 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5144 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5145 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5148 val = (u64) rxr->rx_desc_mapping[0] >> 32;
5149 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5151 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5152 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5154 ring_prod = prod = rxr->rx_pg_prod;
5155 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5156 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
5157 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5158 ring_num, i, bp->rx_pg_ring_size);
5159 break;
5161 prod = NEXT_RX_BD(prod);
5162 ring_prod = RX_PG_RING_IDX(prod);
5164 rxr->rx_pg_prod = prod;
5166 ring_prod = prod = rxr->rx_prod;
5167 for (i = 0; i < bp->rx_ring_size; i++) {
5168 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
5169 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5170 ring_num, i, bp->rx_ring_size);
5171 break;
5173 prod = NEXT_RX_BD(prod);
5174 ring_prod = RX_RING_IDX(prod);
5176 rxr->rx_prod = prod;
5178 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5179 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5180 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5182 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5183 REG_WR16(bp, rxr->rx_bidx_addr, prod);
5185 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5188 static void
5189 bnx2_init_all_rings(struct bnx2 *bp)
5191 int i;
5192 u32 val;
5194 bnx2_clear_ring_states(bp);
5196 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5197 for (i = 0; i < bp->num_tx_rings; i++)
5198 bnx2_init_tx_ring(bp, i);
5200 if (bp->num_tx_rings > 1)
5201 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5202 (TX_TSS_CID << 7));
5204 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5205 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5207 for (i = 0; i < bp->num_rx_rings; i++)
5208 bnx2_init_rx_ring(bp, i);
5210 if (bp->num_rx_rings > 1) {
5211 u32 tbl_32;
5212 u8 *tbl = (u8 *) &tbl_32;
5214 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5215 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5217 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5218 tbl[i % 4] = i % (bp->num_rx_rings - 1);
5219 if ((i % 4) == 3)
5220 bnx2_reg_wr_ind(bp,
5221 BNX2_RXP_SCRATCH_RSS_TBL + i,
5222 cpu_to_be32(tbl_32));
5225 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5226 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5228 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5233 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5235 u32 max, num_rings = 1;
5237 while (ring_size > MAX_RX_DESC_CNT) {
5238 ring_size -= MAX_RX_DESC_CNT;
5239 num_rings++;
5241 /* round to next power of 2 */
5242 max = max_size;
5243 while ((max & num_rings) == 0)
5244 max >>= 1;
5246 if (num_rings != max)
5247 max <<= 1;
5249 return max;
5252 static void
5253 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5255 u32 rx_size, rx_space, jumbo_size;
5257 /* 8 for CRC and VLAN */
5258 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5260 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5261 sizeof(struct skb_shared_info);
5263 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5264 bp->rx_pg_ring_size = 0;
5265 bp->rx_max_pg_ring = 0;
5266 bp->rx_max_pg_ring_idx = 0;
5267 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5268 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5270 jumbo_size = size * pages;
5271 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5272 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5274 bp->rx_pg_ring_size = jumbo_size;
5275 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5276 MAX_RX_PG_RINGS);
5277 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5278 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5279 bp->rx_copy_thresh = 0;
5282 bp->rx_buf_use_size = rx_size;
5283 /* hw alignment */
5284 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5285 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5286 bp->rx_ring_size = size;
5287 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5288 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5291 static void
5292 bnx2_free_tx_skbs(struct bnx2 *bp)
5294 int i;
5296 for (i = 0; i < bp->num_tx_rings; i++) {
5297 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5298 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5299 int j;
5301 if (txr->tx_buf_ring == NULL)
5302 continue;
5304 for (j = 0; j < TX_DESC_CNT; ) {
5305 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5306 struct sk_buff *skb = tx_buf->skb;
5307 int k, last;
5309 if (skb == NULL) {
5310 j++;
5311 continue;
5314 pci_unmap_single(bp->pdev,
5315 pci_unmap_addr(tx_buf, mapping),
5316 skb_headlen(skb),
5317 PCI_DMA_TODEVICE);
5319 tx_buf->skb = NULL;
5321 last = tx_buf->nr_frags;
5322 j++;
5323 for (k = 0; k < last; k++, j++) {
5324 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5325 pci_unmap_page(bp->pdev,
5326 pci_unmap_addr(tx_buf, mapping),
5327 skb_shinfo(skb)->frags[k].size,
5328 PCI_DMA_TODEVICE);
5330 dev_kfree_skb(skb);
5335 static void
5336 bnx2_free_rx_skbs(struct bnx2 *bp)
5338 int i;
5340 for (i = 0; i < bp->num_rx_rings; i++) {
5341 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5342 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5343 int j;
5345 if (rxr->rx_buf_ring == NULL)
5346 return;
5348 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5349 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5350 struct sk_buff *skb = rx_buf->skb;
5352 if (skb == NULL)
5353 continue;
5355 pci_unmap_single(bp->pdev,
5356 pci_unmap_addr(rx_buf, mapping),
5357 bp->rx_buf_use_size,
5358 PCI_DMA_FROMDEVICE);
5360 rx_buf->skb = NULL;
5362 dev_kfree_skb(skb);
5364 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5365 bnx2_free_rx_page(bp, rxr, j);
5369 static void
5370 bnx2_free_skbs(struct bnx2 *bp)
5372 bnx2_free_tx_skbs(bp);
5373 bnx2_free_rx_skbs(bp);
5376 static int
5377 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5379 int rc;
5381 rc = bnx2_reset_chip(bp, reset_code);
5382 bnx2_free_skbs(bp);
5383 if (rc)
5384 return rc;
5386 if ((rc = bnx2_init_chip(bp)) != 0)
5387 return rc;
5389 bnx2_init_all_rings(bp);
5390 return 0;
5393 static int
5394 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5396 int rc;
5398 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5399 return rc;
5401 spin_lock_bh(&bp->phy_lock);
5402 bnx2_init_phy(bp, reset_phy);
5403 bnx2_set_link(bp);
5404 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5405 bnx2_remote_phy_event(bp);
5406 spin_unlock_bh(&bp->phy_lock);
5407 return 0;
5410 static int
5411 bnx2_shutdown_chip(struct bnx2 *bp)
5413 u32 reset_code;
5415 if (bp->flags & BNX2_FLAG_NO_WOL)
5416 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5417 else if (bp->wol)
5418 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5419 else
5420 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5422 return bnx2_reset_chip(bp, reset_code);
5425 static int
5426 bnx2_test_registers(struct bnx2 *bp)
5428 int ret;
5429 int i, is_5709;
5430 static const struct {
5431 u16 offset;
5432 u16 flags;
5433 #define BNX2_FL_NOT_5709 1
5434 u32 rw_mask;
5435 u32 ro_mask;
5436 } reg_tbl[] = {
5437 { 0x006c, 0, 0x00000000, 0x0000003f },
5438 { 0x0090, 0, 0xffffffff, 0x00000000 },
5439 { 0x0094, 0, 0x00000000, 0x00000000 },
5441 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5442 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5443 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5444 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5445 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5446 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5447 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5448 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5449 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5451 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5452 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5453 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5454 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5455 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5456 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5458 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5459 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5460 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5462 { 0x1000, 0, 0x00000000, 0x00000001 },
5463 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5465 { 0x1408, 0, 0x01c00800, 0x00000000 },
5466 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5467 { 0x14a8, 0, 0x00000000, 0x000001ff },
5468 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5469 { 0x14b0, 0, 0x00000002, 0x00000001 },
5470 { 0x14b8, 0, 0x00000000, 0x00000000 },
5471 { 0x14c0, 0, 0x00000000, 0x00000009 },
5472 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5473 { 0x14cc, 0, 0x00000000, 0x00000001 },
5474 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5476 { 0x1800, 0, 0x00000000, 0x00000001 },
5477 { 0x1804, 0, 0x00000000, 0x00000003 },
5479 { 0x2800, 0, 0x00000000, 0x00000001 },
5480 { 0x2804, 0, 0x00000000, 0x00003f01 },
5481 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5482 { 0x2810, 0, 0xffff0000, 0x00000000 },
5483 { 0x2814, 0, 0xffff0000, 0x00000000 },
5484 { 0x2818, 0, 0xffff0000, 0x00000000 },
5485 { 0x281c, 0, 0xffff0000, 0x00000000 },
5486 { 0x2834, 0, 0xffffffff, 0x00000000 },
5487 { 0x2840, 0, 0x00000000, 0xffffffff },
5488 { 0x2844, 0, 0x00000000, 0xffffffff },
5489 { 0x2848, 0, 0xffffffff, 0x00000000 },
5490 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5492 { 0x2c00, 0, 0x00000000, 0x00000011 },
5493 { 0x2c04, 0, 0x00000000, 0x00030007 },
5495 { 0x3c00, 0, 0x00000000, 0x00000001 },
5496 { 0x3c04, 0, 0x00000000, 0x00070000 },
5497 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5498 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5499 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5500 { 0x3c14, 0, 0x00000000, 0xffffffff },
5501 { 0x3c18, 0, 0x00000000, 0xffffffff },
5502 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5503 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5505 { 0x5004, 0, 0x00000000, 0x0000007f },
5506 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5508 { 0x5c00, 0, 0x00000000, 0x00000001 },
5509 { 0x5c04, 0, 0x00000000, 0x0003000f },
5510 { 0x5c08, 0, 0x00000003, 0x00000000 },
5511 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5512 { 0x5c10, 0, 0x00000000, 0xffffffff },
5513 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5514 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5515 { 0x5c88, 0, 0x00000000, 0x00077373 },
5516 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5518 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5519 { 0x680c, 0, 0xffffffff, 0x00000000 },
5520 { 0x6810, 0, 0xffffffff, 0x00000000 },
5521 { 0x6814, 0, 0xffffffff, 0x00000000 },
5522 { 0x6818, 0, 0xffffffff, 0x00000000 },
5523 { 0x681c, 0, 0xffffffff, 0x00000000 },
5524 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5525 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5526 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5527 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5528 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5529 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5530 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5531 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5532 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5533 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5534 { 0x684c, 0, 0xffffffff, 0x00000000 },
5535 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5536 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5537 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5538 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5539 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5540 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5542 { 0xffff, 0, 0x00000000, 0x00000000 },
5545 ret = 0;
5546 is_5709 = 0;
5547 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5548 is_5709 = 1;
5550 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5551 u32 offset, rw_mask, ro_mask, save_val, val;
5552 u16 flags = reg_tbl[i].flags;
5554 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5555 continue;
5557 offset = (u32) reg_tbl[i].offset;
5558 rw_mask = reg_tbl[i].rw_mask;
5559 ro_mask = reg_tbl[i].ro_mask;
5561 save_val = readl(bp->regview + offset);
5563 writel(0, bp->regview + offset);
5565 val = readl(bp->regview + offset);
5566 if ((val & rw_mask) != 0) {
5567 goto reg_test_err;
5570 if ((val & ro_mask) != (save_val & ro_mask)) {
5571 goto reg_test_err;
5574 writel(0xffffffff, bp->regview + offset);
5576 val = readl(bp->regview + offset);
5577 if ((val & rw_mask) != rw_mask) {
5578 goto reg_test_err;
5581 if ((val & ro_mask) != (save_val & ro_mask)) {
5582 goto reg_test_err;
5585 writel(save_val, bp->regview + offset);
5586 continue;
5588 reg_test_err:
5589 writel(save_val, bp->regview + offset);
5590 ret = -ENODEV;
5591 break;
5593 return ret;
5596 static int
5597 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5599 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5600 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5601 int i;
5603 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5604 u32 offset;
5606 for (offset = 0; offset < size; offset += 4) {
5608 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5610 if (bnx2_reg_rd_ind(bp, start + offset) !=
5611 test_pattern[i]) {
5612 return -ENODEV;
5616 return 0;
5619 static int
5620 bnx2_test_memory(struct bnx2 *bp)
5622 int ret = 0;
5623 int i;
5624 static struct mem_entry {
5625 u32 offset;
5626 u32 len;
5627 } mem_tbl_5706[] = {
5628 { 0x60000, 0x4000 },
5629 { 0xa0000, 0x3000 },
5630 { 0xe0000, 0x4000 },
5631 { 0x120000, 0x4000 },
5632 { 0x1a0000, 0x4000 },
5633 { 0x160000, 0x4000 },
5634 { 0xffffffff, 0 },
5636 mem_tbl_5709[] = {
5637 { 0x60000, 0x4000 },
5638 { 0xa0000, 0x3000 },
5639 { 0xe0000, 0x4000 },
5640 { 0x120000, 0x4000 },
5641 { 0x1a0000, 0x4000 },
5642 { 0xffffffff, 0 },
5644 struct mem_entry *mem_tbl;
5646 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5647 mem_tbl = mem_tbl_5709;
5648 else
5649 mem_tbl = mem_tbl_5706;
5651 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5652 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5653 mem_tbl[i].len)) != 0) {
5654 return ret;
5658 return ret;
5661 #define BNX2_MAC_LOOPBACK 0
5662 #define BNX2_PHY_LOOPBACK 1
5664 static int
5665 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5667 unsigned int pkt_size, num_pkts, i;
5668 struct sk_buff *skb, *rx_skb;
5669 unsigned char *packet;
5670 u16 rx_start_idx, rx_idx;
5671 dma_addr_t map;
5672 struct tx_bd *txbd;
5673 struct sw_bd *rx_buf;
5674 struct l2_fhdr *rx_hdr;
5675 int ret = -ENODEV;
5676 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5677 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5678 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5680 tx_napi = bnapi;
5682 txr = &tx_napi->tx_ring;
5683 rxr = &bnapi->rx_ring;
5684 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5685 bp->loopback = MAC_LOOPBACK;
5686 bnx2_set_mac_loopback(bp);
5688 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5689 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5690 return 0;
5692 bp->loopback = PHY_LOOPBACK;
5693 bnx2_set_phy_loopback(bp);
5695 else
5696 return -EINVAL;
5698 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5699 skb = netdev_alloc_skb(bp->dev, pkt_size);
5700 if (!skb)
5701 return -ENOMEM;
5702 packet = skb_put(skb, pkt_size);
5703 memcpy(packet, bp->dev->dev_addr, 6);
5704 memset(packet + 6, 0x0, 8);
5705 for (i = 14; i < pkt_size; i++)
5706 packet[i] = (unsigned char) (i & 0xff);
5708 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5709 PCI_DMA_TODEVICE);
5710 if (pci_dma_mapping_error(bp->pdev, map)) {
5711 dev_kfree_skb(skb);
5712 return -EIO;
5715 REG_WR(bp, BNX2_HC_COMMAND,
5716 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5718 REG_RD(bp, BNX2_HC_COMMAND);
5720 udelay(5);
5721 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5723 num_pkts = 0;
5725 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5727 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5728 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5729 txbd->tx_bd_mss_nbytes = pkt_size;
5730 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5732 num_pkts++;
5733 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5734 txr->tx_prod_bseq += pkt_size;
5736 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5737 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5739 udelay(100);
5741 REG_WR(bp, BNX2_HC_COMMAND,
5742 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5744 REG_RD(bp, BNX2_HC_COMMAND);
5746 udelay(5);
5748 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5749 dev_kfree_skb(skb);
5751 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5752 goto loopback_test_done;
5754 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5755 if (rx_idx != rx_start_idx + num_pkts) {
5756 goto loopback_test_done;
5759 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5760 rx_skb = rx_buf->skb;
5762 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5763 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5765 pci_dma_sync_single_for_cpu(bp->pdev,
5766 pci_unmap_addr(rx_buf, mapping),
5767 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5769 if (rx_hdr->l2_fhdr_status &
5770 (L2_FHDR_ERRORS_BAD_CRC |
5771 L2_FHDR_ERRORS_PHY_DECODE |
5772 L2_FHDR_ERRORS_ALIGNMENT |
5773 L2_FHDR_ERRORS_TOO_SHORT |
5774 L2_FHDR_ERRORS_GIANT_FRAME)) {
5776 goto loopback_test_done;
5779 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5780 goto loopback_test_done;
5783 for (i = 14; i < pkt_size; i++) {
5784 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5785 goto loopback_test_done;
5789 ret = 0;
5791 loopback_test_done:
5792 bp->loopback = 0;
5793 return ret;
5796 #define BNX2_MAC_LOOPBACK_FAILED 1
5797 #define BNX2_PHY_LOOPBACK_FAILED 2
5798 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5799 BNX2_PHY_LOOPBACK_FAILED)
5801 static int
5802 bnx2_test_loopback(struct bnx2 *bp)
5804 int rc = 0;
5806 if (!netif_running(bp->dev))
5807 return BNX2_LOOPBACK_FAILED;
5809 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5810 spin_lock_bh(&bp->phy_lock);
5811 bnx2_init_phy(bp, 1);
5812 spin_unlock_bh(&bp->phy_lock);
5813 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5814 rc |= BNX2_MAC_LOOPBACK_FAILED;
5815 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5816 rc |= BNX2_PHY_LOOPBACK_FAILED;
5817 return rc;
5820 #define NVRAM_SIZE 0x200
5821 #define CRC32_RESIDUAL 0xdebb20e3
5823 static int
5824 bnx2_test_nvram(struct bnx2 *bp)
5826 __be32 buf[NVRAM_SIZE / 4];
5827 u8 *data = (u8 *) buf;
5828 int rc = 0;
5829 u32 magic, csum;
5831 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5832 goto test_nvram_done;
5834 magic = be32_to_cpu(buf[0]);
5835 if (magic != 0x669955aa) {
5836 rc = -ENODEV;
5837 goto test_nvram_done;
5840 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5841 goto test_nvram_done;
5843 csum = ether_crc_le(0x100, data);
5844 if (csum != CRC32_RESIDUAL) {
5845 rc = -ENODEV;
5846 goto test_nvram_done;
5849 csum = ether_crc_le(0x100, data + 0x100);
5850 if (csum != CRC32_RESIDUAL) {
5851 rc = -ENODEV;
5854 test_nvram_done:
5855 return rc;
5858 static int
5859 bnx2_test_link(struct bnx2 *bp)
5861 u32 bmsr;
5863 if (!netif_running(bp->dev))
5864 return -ENODEV;
5866 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5867 if (bp->link_up)
5868 return 0;
5869 return -ENODEV;
5871 spin_lock_bh(&bp->phy_lock);
5872 bnx2_enable_bmsr1(bp);
5873 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5874 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5875 bnx2_disable_bmsr1(bp);
5876 spin_unlock_bh(&bp->phy_lock);
5878 if (bmsr & BMSR_LSTATUS) {
5879 return 0;
5881 return -ENODEV;
5884 static int
5885 bnx2_test_intr(struct bnx2 *bp)
5887 int i;
5888 u16 status_idx;
5890 if (!netif_running(bp->dev))
5891 return -ENODEV;
5893 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5895 /* This register is not touched during run-time. */
5896 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5897 REG_RD(bp, BNX2_HC_COMMAND);
5899 for (i = 0; i < 10; i++) {
5900 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5901 status_idx) {
5903 break;
5906 msleep_interruptible(10);
5908 if (i < 10)
5909 return 0;
5911 return -ENODEV;
5914 /* Determining link for parallel detection. */
5915 static int
5916 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5918 u32 mode_ctl, an_dbg, exp;
5920 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5921 return 0;
5923 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5924 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5926 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5927 return 0;
5929 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5930 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5931 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5933 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5934 return 0;
5936 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5937 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5938 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5940 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5941 return 0;
5943 return 1;
5946 static void
5947 bnx2_5706_serdes_timer(struct bnx2 *bp)
5949 int check_link = 1;
5951 spin_lock(&bp->phy_lock);
5952 if (bp->serdes_an_pending) {
5953 bp->serdes_an_pending--;
5954 check_link = 0;
5955 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5956 u32 bmcr;
5958 bp->current_interval = BNX2_TIMER_INTERVAL;
5960 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5962 if (bmcr & BMCR_ANENABLE) {
5963 if (bnx2_5706_serdes_has_link(bp)) {
5964 bmcr &= ~BMCR_ANENABLE;
5965 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5966 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5967 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5971 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5972 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5973 u32 phy2;
5975 bnx2_write_phy(bp, 0x17, 0x0f01);
5976 bnx2_read_phy(bp, 0x15, &phy2);
5977 if (phy2 & 0x20) {
5978 u32 bmcr;
5980 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5981 bmcr |= BMCR_ANENABLE;
5982 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5984 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5986 } else
5987 bp->current_interval = BNX2_TIMER_INTERVAL;
5989 if (check_link) {
5990 u32 val;
5992 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5993 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5994 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5996 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5997 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5998 bnx2_5706s_force_link_dn(bp, 1);
5999 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6000 } else
6001 bnx2_set_link(bp);
6002 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6003 bnx2_set_link(bp);
6005 spin_unlock(&bp->phy_lock);
6008 static void
6009 bnx2_5708_serdes_timer(struct bnx2 *bp)
6011 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6012 return;
6014 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6015 bp->serdes_an_pending = 0;
6016 return;
6019 spin_lock(&bp->phy_lock);
6020 if (bp->serdes_an_pending)
6021 bp->serdes_an_pending--;
6022 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6023 u32 bmcr;
6025 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6026 if (bmcr & BMCR_ANENABLE) {
6027 bnx2_enable_forced_2g5(bp);
6028 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6029 } else {
6030 bnx2_disable_forced_2g5(bp);
6031 bp->serdes_an_pending = 2;
6032 bp->current_interval = BNX2_TIMER_INTERVAL;
6035 } else
6036 bp->current_interval = BNX2_TIMER_INTERVAL;
6038 spin_unlock(&bp->phy_lock);
6041 static void
6042 bnx2_timer(unsigned long data)
6044 struct bnx2 *bp = (struct bnx2 *) data;
6046 if (!netif_running(bp->dev))
6047 return;
6049 if (atomic_read(&bp->intr_sem) != 0)
6050 goto bnx2_restart_timer;
6052 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6053 BNX2_FLAG_USING_MSI)
6054 bnx2_chk_missed_msi(bp);
6056 bnx2_send_heart_beat(bp);
6058 bp->stats_blk->stat_FwRxDrop =
6059 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6061 /* workaround occasional corrupted counters */
6062 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6063 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6064 BNX2_HC_COMMAND_STATS_NOW);
6066 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6067 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6068 bnx2_5706_serdes_timer(bp);
6069 else
6070 bnx2_5708_serdes_timer(bp);
6073 bnx2_restart_timer:
6074 mod_timer(&bp->timer, jiffies + bp->current_interval);
6077 static int
6078 bnx2_request_irq(struct bnx2 *bp)
6080 unsigned long flags;
6081 struct bnx2_irq *irq;
6082 int rc = 0, i;
6084 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6085 flags = 0;
6086 else
6087 flags = IRQF_SHARED;
6089 for (i = 0; i < bp->irq_nvecs; i++) {
6090 irq = &bp->irq_tbl[i];
6091 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6092 &bp->bnx2_napi[i]);
6093 if (rc)
6094 break;
6095 irq->requested = 1;
6097 return rc;
6100 static void
6101 bnx2_free_irq(struct bnx2 *bp)
6103 struct bnx2_irq *irq;
6104 int i;
6106 for (i = 0; i < bp->irq_nvecs; i++) {
6107 irq = &bp->irq_tbl[i];
6108 if (irq->requested)
6109 free_irq(irq->vector, &bp->bnx2_napi[i]);
6110 irq->requested = 0;
6112 if (bp->flags & BNX2_FLAG_USING_MSI)
6113 pci_disable_msi(bp->pdev);
6114 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6115 pci_disable_msix(bp->pdev);
6117 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6120 static void
6121 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6123 int i, rc;
6124 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6125 struct net_device *dev = bp->dev;
6126 const int len = sizeof(bp->irq_tbl[0].name);
6128 bnx2_setup_msix_tbl(bp);
6129 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6130 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6131 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6133 /* Need to flush the previous three writes to ensure MSI-X
6134 * is setup properly */
6135 REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6137 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6138 msix_ent[i].entry = i;
6139 msix_ent[i].vector = 0;
6142 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6143 if (rc != 0)
6144 return;
6146 bp->irq_nvecs = msix_vecs;
6147 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6148 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6149 bp->irq_tbl[i].vector = msix_ent[i].vector;
6150 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6151 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6155 static void
6156 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6158 int cpus = num_online_cpus();
6159 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6161 bp->irq_tbl[0].handler = bnx2_interrupt;
6162 strcpy(bp->irq_tbl[0].name, bp->dev->name);
6163 bp->irq_nvecs = 1;
6164 bp->irq_tbl[0].vector = bp->pdev->irq;
6166 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
6167 bnx2_enable_msix(bp, msix_vecs);
6169 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6170 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6171 if (pci_enable_msi(bp->pdev) == 0) {
6172 bp->flags |= BNX2_FLAG_USING_MSI;
6173 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6174 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6175 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6176 } else
6177 bp->irq_tbl[0].handler = bnx2_msi;
6179 bp->irq_tbl[0].vector = bp->pdev->irq;
6183 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6184 bp->dev->real_num_tx_queues = bp->num_tx_rings;
6186 bp->num_rx_rings = bp->irq_nvecs;
6189 /* Called with rtnl_lock */
6190 static int
6191 bnx2_open(struct net_device *dev)
6193 struct bnx2 *bp = netdev_priv(dev);
6194 int rc;
6196 netif_carrier_off(dev);
6198 bnx2_set_power_state(bp, PCI_D0);
6199 bnx2_disable_int(bp);
6201 bnx2_setup_int_mode(bp, disable_msi);
6202 bnx2_napi_enable(bp);
6203 rc = bnx2_alloc_mem(bp);
6204 if (rc)
6205 goto open_err;
6207 rc = bnx2_request_irq(bp);
6208 if (rc)
6209 goto open_err;
6211 rc = bnx2_init_nic(bp, 1);
6212 if (rc)
6213 goto open_err;
6215 mod_timer(&bp->timer, jiffies + bp->current_interval);
6217 atomic_set(&bp->intr_sem, 0);
6219 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6221 bnx2_enable_int(bp);
6223 if (bp->flags & BNX2_FLAG_USING_MSI) {
6224 /* Test MSI to make sure it is working
6225 * If MSI test fails, go back to INTx mode
6227 if (bnx2_test_intr(bp) != 0) {
6228 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6230 bnx2_disable_int(bp);
6231 bnx2_free_irq(bp);
6233 bnx2_setup_int_mode(bp, 1);
6235 rc = bnx2_init_nic(bp, 0);
6237 if (!rc)
6238 rc = bnx2_request_irq(bp);
6240 if (rc) {
6241 del_timer_sync(&bp->timer);
6242 goto open_err;
6244 bnx2_enable_int(bp);
6247 if (bp->flags & BNX2_FLAG_USING_MSI)
6248 netdev_info(dev, "using MSI\n");
6249 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6250 netdev_info(dev, "using MSIX\n");
6252 netif_tx_start_all_queues(dev);
6254 return 0;
6256 open_err:
6257 bnx2_napi_disable(bp);
6258 bnx2_free_skbs(bp);
6259 bnx2_free_irq(bp);
6260 bnx2_free_mem(bp);
6261 return rc;
6264 static void
6265 bnx2_reset_task(struct work_struct *work)
6267 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6269 rtnl_lock();
6270 if (!netif_running(bp->dev)) {
6271 rtnl_unlock();
6272 return;
6275 bnx2_netif_stop(bp);
6277 bnx2_init_nic(bp, 1);
6279 atomic_set(&bp->intr_sem, 1);
6280 bnx2_netif_start(bp);
6281 rtnl_unlock();
6284 static void
6285 bnx2_dump_state(struct bnx2 *bp)
6287 struct net_device *dev = bp->dev;
6289 netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
6290 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] RPM_MGMT_PKT_CTRL[%08x]\n",
6291 REG_RD(bp, BNX2_EMAC_TX_STATUS),
6292 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6293 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6294 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
6295 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
6296 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6297 REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6298 if (bp->flags & BNX2_FLAG_USING_MSIX)
6299 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6300 REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6303 static void
6304 bnx2_tx_timeout(struct net_device *dev)
6306 struct bnx2 *bp = netdev_priv(dev);
6308 bnx2_dump_state(bp);
6310 /* This allows the netif to be shutdown gracefully before resetting */
6311 schedule_work(&bp->reset_task);
6314 #ifdef BCM_VLAN
6315 /* Called with rtnl_lock */
6316 static void
6317 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6319 struct bnx2 *bp = netdev_priv(dev);
6321 if (netif_running(dev))
6322 bnx2_netif_stop(bp);
6324 bp->vlgrp = vlgrp;
6326 if (!netif_running(dev))
6327 return;
6329 bnx2_set_rx_mode(dev);
6330 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6331 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6333 bnx2_netif_start(bp);
6335 #endif
6337 /* Called with netif_tx_lock.
6338 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6339 * netif_wake_queue().
6341 static netdev_tx_t
6342 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6344 struct bnx2 *bp = netdev_priv(dev);
6345 dma_addr_t mapping;
6346 struct tx_bd *txbd;
6347 struct sw_tx_bd *tx_buf;
6348 u32 len, vlan_tag_flags, last_frag, mss;
6349 u16 prod, ring_prod;
6350 int i;
6351 struct bnx2_napi *bnapi;
6352 struct bnx2_tx_ring_info *txr;
6353 struct netdev_queue *txq;
6355 /* Determine which tx ring we will be placed on */
6356 i = skb_get_queue_mapping(skb);
6357 bnapi = &bp->bnx2_napi[i];
6358 txr = &bnapi->tx_ring;
6359 txq = netdev_get_tx_queue(dev, i);
6361 if (unlikely(bnx2_tx_avail(bp, txr) <
6362 (skb_shinfo(skb)->nr_frags + 1))) {
6363 netif_tx_stop_queue(txq);
6364 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6366 return NETDEV_TX_BUSY;
6368 len = skb_headlen(skb);
6369 prod = txr->tx_prod;
6370 ring_prod = TX_RING_IDX(prod);
6372 vlan_tag_flags = 0;
6373 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6374 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6377 #ifdef BCM_VLAN
6378 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6379 vlan_tag_flags |=
6380 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6382 #endif
6383 if ((mss = skb_shinfo(skb)->gso_size)) {
6384 u32 tcp_opt_len;
6385 struct iphdr *iph;
6387 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6389 tcp_opt_len = tcp_optlen(skb);
6391 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6392 u32 tcp_off = skb_transport_offset(skb) -
6393 sizeof(struct ipv6hdr) - ETH_HLEN;
6395 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6396 TX_BD_FLAGS_SW_FLAGS;
6397 if (likely(tcp_off == 0))
6398 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6399 else {
6400 tcp_off >>= 3;
6401 vlan_tag_flags |= ((tcp_off & 0x3) <<
6402 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6403 ((tcp_off & 0x10) <<
6404 TX_BD_FLAGS_TCP6_OFF4_SHL);
6405 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6407 } else {
6408 iph = ip_hdr(skb);
6409 if (tcp_opt_len || (iph->ihl > 5)) {
6410 vlan_tag_flags |= ((iph->ihl - 5) +
6411 (tcp_opt_len >> 2)) << 8;
6414 } else
6415 mss = 0;
6417 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6418 if (pci_dma_mapping_error(bp->pdev, mapping)) {
6419 dev_kfree_skb(skb);
6420 return NETDEV_TX_OK;
6423 tx_buf = &txr->tx_buf_ring[ring_prod];
6424 tx_buf->skb = skb;
6425 pci_unmap_addr_set(tx_buf, mapping, mapping);
6427 txbd = &txr->tx_desc_ring[ring_prod];
6429 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6430 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6431 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6432 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6434 last_frag = skb_shinfo(skb)->nr_frags;
6435 tx_buf->nr_frags = last_frag;
6436 tx_buf->is_gso = skb_is_gso(skb);
6438 for (i = 0; i < last_frag; i++) {
6439 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6441 prod = NEXT_TX_BD(prod);
6442 ring_prod = TX_RING_IDX(prod);
6443 txbd = &txr->tx_desc_ring[ring_prod];
6445 len = frag->size;
6446 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6447 len, PCI_DMA_TODEVICE);
6448 if (pci_dma_mapping_error(bp->pdev, mapping))
6449 goto dma_error;
6450 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6451 mapping);
6453 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6454 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6455 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6456 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6459 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6461 prod = NEXT_TX_BD(prod);
6462 txr->tx_prod_bseq += skb->len;
6464 REG_WR16(bp, txr->tx_bidx_addr, prod);
6465 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6467 mmiowb();
6469 txr->tx_prod = prod;
6471 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6472 netif_tx_stop_queue(txq);
6473 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6474 netif_tx_wake_queue(txq);
6477 return NETDEV_TX_OK;
6478 dma_error:
6479 /* save value of frag that failed */
6480 last_frag = i;
6482 /* start back at beginning and unmap skb */
6483 prod = txr->tx_prod;
6484 ring_prod = TX_RING_IDX(prod);
6485 tx_buf = &txr->tx_buf_ring[ring_prod];
6486 tx_buf->skb = NULL;
6487 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6488 skb_headlen(skb), PCI_DMA_TODEVICE);
6490 /* unmap remaining mapped pages */
6491 for (i = 0; i < last_frag; i++) {
6492 prod = NEXT_TX_BD(prod);
6493 ring_prod = TX_RING_IDX(prod);
6494 tx_buf = &txr->tx_buf_ring[ring_prod];
6495 pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6496 skb_shinfo(skb)->frags[i].size,
6497 PCI_DMA_TODEVICE);
6500 dev_kfree_skb(skb);
6501 return NETDEV_TX_OK;
6504 /* Called with rtnl_lock */
6505 static int
6506 bnx2_close(struct net_device *dev)
6508 struct bnx2 *bp = netdev_priv(dev);
6510 cancel_work_sync(&bp->reset_task);
6512 bnx2_disable_int_sync(bp);
6513 bnx2_napi_disable(bp);
6514 del_timer_sync(&bp->timer);
6515 bnx2_shutdown_chip(bp);
6516 bnx2_free_irq(bp);
6517 bnx2_free_skbs(bp);
6518 bnx2_free_mem(bp);
6519 bp->link_up = 0;
6520 netif_carrier_off(bp->dev);
6521 bnx2_set_power_state(bp, PCI_D3hot);
6522 return 0;
6525 static void
6526 bnx2_save_stats(struct bnx2 *bp)
6528 u32 *hw_stats = (u32 *) bp->stats_blk;
6529 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6530 int i;
6532 /* The 1st 10 counters are 64-bit counters */
6533 for (i = 0; i < 20; i += 2) {
6534 u32 hi;
6535 u64 lo;
6537 hi = temp_stats[i] + hw_stats[i];
6538 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6539 if (lo > 0xffffffff)
6540 hi++;
6541 temp_stats[i] = hi;
6542 temp_stats[i + 1] = lo & 0xffffffff;
6545 for ( ; i < sizeof(struct statistics_block) / 4; i++)
6546 temp_stats[i] += hw_stats[i];
6549 #define GET_64BIT_NET_STATS64(ctr) \
6550 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6551 (unsigned long) (ctr##_lo)
6553 #define GET_64BIT_NET_STATS32(ctr) \
6554 (ctr##_lo)
6556 #if (BITS_PER_LONG == 64)
6557 #define GET_64BIT_NET_STATS(ctr) \
6558 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6559 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6560 #else
6561 #define GET_64BIT_NET_STATS(ctr) \
6562 GET_64BIT_NET_STATS32(bp->stats_blk->ctr) + \
6563 GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
6564 #endif
6566 #define GET_32BIT_NET_STATS(ctr) \
6567 (unsigned long) (bp->stats_blk->ctr + \
6568 bp->temp_stats_blk->ctr)
6570 static struct net_device_stats *
6571 bnx2_get_stats(struct net_device *dev)
6573 struct bnx2 *bp = netdev_priv(dev);
6574 struct net_device_stats *net_stats = &dev->stats;
6576 if (bp->stats_blk == NULL) {
6577 return net_stats;
6579 net_stats->rx_packets =
6580 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6581 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6582 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6584 net_stats->tx_packets =
6585 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6586 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6587 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6589 net_stats->rx_bytes =
6590 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6592 net_stats->tx_bytes =
6593 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6595 net_stats->multicast =
6596 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
6598 net_stats->collisions =
6599 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6601 net_stats->rx_length_errors =
6602 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6603 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6605 net_stats->rx_over_errors =
6606 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6607 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6609 net_stats->rx_frame_errors =
6610 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6612 net_stats->rx_crc_errors =
6613 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6615 net_stats->rx_errors = net_stats->rx_length_errors +
6616 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6617 net_stats->rx_crc_errors;
6619 net_stats->tx_aborted_errors =
6620 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6621 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6623 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6624 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6625 net_stats->tx_carrier_errors = 0;
6626 else {
6627 net_stats->tx_carrier_errors =
6628 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6631 net_stats->tx_errors =
6632 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6633 net_stats->tx_aborted_errors +
6634 net_stats->tx_carrier_errors;
6636 net_stats->rx_missed_errors =
6637 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6638 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6639 GET_32BIT_NET_STATS(stat_FwRxDrop);
6641 return net_stats;
6644 /* All ethtool functions called with rtnl_lock */
6646 static int
6647 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6649 struct bnx2 *bp = netdev_priv(dev);
6650 int support_serdes = 0, support_copper = 0;
6652 cmd->supported = SUPPORTED_Autoneg;
6653 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6654 support_serdes = 1;
6655 support_copper = 1;
6656 } else if (bp->phy_port == PORT_FIBRE)
6657 support_serdes = 1;
6658 else
6659 support_copper = 1;
6661 if (support_serdes) {
6662 cmd->supported |= SUPPORTED_1000baseT_Full |
6663 SUPPORTED_FIBRE;
6664 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6665 cmd->supported |= SUPPORTED_2500baseX_Full;
6668 if (support_copper) {
6669 cmd->supported |= SUPPORTED_10baseT_Half |
6670 SUPPORTED_10baseT_Full |
6671 SUPPORTED_100baseT_Half |
6672 SUPPORTED_100baseT_Full |
6673 SUPPORTED_1000baseT_Full |
6674 SUPPORTED_TP;
6678 spin_lock_bh(&bp->phy_lock);
6679 cmd->port = bp->phy_port;
6680 cmd->advertising = bp->advertising;
6682 if (bp->autoneg & AUTONEG_SPEED) {
6683 cmd->autoneg = AUTONEG_ENABLE;
6685 else {
6686 cmd->autoneg = AUTONEG_DISABLE;
6689 if (netif_carrier_ok(dev)) {
6690 cmd->speed = bp->line_speed;
6691 cmd->duplex = bp->duplex;
6693 else {
6694 cmd->speed = -1;
6695 cmd->duplex = -1;
6697 spin_unlock_bh(&bp->phy_lock);
6699 cmd->transceiver = XCVR_INTERNAL;
6700 cmd->phy_address = bp->phy_addr;
6702 return 0;
6705 static int
6706 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6708 struct bnx2 *bp = netdev_priv(dev);
6709 u8 autoneg = bp->autoneg;
6710 u8 req_duplex = bp->req_duplex;
6711 u16 req_line_speed = bp->req_line_speed;
6712 u32 advertising = bp->advertising;
6713 int err = -EINVAL;
6715 spin_lock_bh(&bp->phy_lock);
6717 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6718 goto err_out_unlock;
6720 if (cmd->port != bp->phy_port &&
6721 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6722 goto err_out_unlock;
6724 /* If device is down, we can store the settings only if the user
6725 * is setting the currently active port.
6727 if (!netif_running(dev) && cmd->port != bp->phy_port)
6728 goto err_out_unlock;
6730 if (cmd->autoneg == AUTONEG_ENABLE) {
6731 autoneg |= AUTONEG_SPEED;
6733 advertising = cmd->advertising;
6734 if (cmd->port == PORT_TP) {
6735 advertising &= ETHTOOL_ALL_COPPER_SPEED;
6736 if (!advertising)
6737 advertising = ETHTOOL_ALL_COPPER_SPEED;
6738 } else {
6739 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6740 if (!advertising)
6741 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6743 advertising |= ADVERTISED_Autoneg;
6745 else {
6746 if (cmd->port == PORT_FIBRE) {
6747 if ((cmd->speed != SPEED_1000 &&
6748 cmd->speed != SPEED_2500) ||
6749 (cmd->duplex != DUPLEX_FULL))
6750 goto err_out_unlock;
6752 if (cmd->speed == SPEED_2500 &&
6753 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6754 goto err_out_unlock;
6756 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6757 goto err_out_unlock;
6759 autoneg &= ~AUTONEG_SPEED;
6760 req_line_speed = cmd->speed;
6761 req_duplex = cmd->duplex;
6762 advertising = 0;
6765 bp->autoneg = autoneg;
6766 bp->advertising = advertising;
6767 bp->req_line_speed = req_line_speed;
6768 bp->req_duplex = req_duplex;
6770 err = 0;
6771 /* If device is down, the new settings will be picked up when it is
6772 * brought up.
6774 if (netif_running(dev))
6775 err = bnx2_setup_phy(bp, cmd->port);
6777 err_out_unlock:
6778 spin_unlock_bh(&bp->phy_lock);
6780 return err;
6783 static void
6784 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6786 struct bnx2 *bp = netdev_priv(dev);
6788 strcpy(info->driver, DRV_MODULE_NAME);
6789 strcpy(info->version, DRV_MODULE_VERSION);
6790 strcpy(info->bus_info, pci_name(bp->pdev));
6791 strcpy(info->fw_version, bp->fw_version);
6794 #define BNX2_REGDUMP_LEN (32 * 1024)
6796 static int
6797 bnx2_get_regs_len(struct net_device *dev)
6799 return BNX2_REGDUMP_LEN;
6802 static void
6803 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6805 u32 *p = _p, i, offset;
6806 u8 *orig_p = _p;
6807 struct bnx2 *bp = netdev_priv(dev);
6808 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6809 0x0800, 0x0880, 0x0c00, 0x0c10,
6810 0x0c30, 0x0d08, 0x1000, 0x101c,
6811 0x1040, 0x1048, 0x1080, 0x10a4,
6812 0x1400, 0x1490, 0x1498, 0x14f0,
6813 0x1500, 0x155c, 0x1580, 0x15dc,
6814 0x1600, 0x1658, 0x1680, 0x16d8,
6815 0x1800, 0x1820, 0x1840, 0x1854,
6816 0x1880, 0x1894, 0x1900, 0x1984,
6817 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6818 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6819 0x2000, 0x2030, 0x23c0, 0x2400,
6820 0x2800, 0x2820, 0x2830, 0x2850,
6821 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6822 0x3c00, 0x3c94, 0x4000, 0x4010,
6823 0x4080, 0x4090, 0x43c0, 0x4458,
6824 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6825 0x4fc0, 0x5010, 0x53c0, 0x5444,
6826 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6827 0x5fc0, 0x6000, 0x6400, 0x6428,
6828 0x6800, 0x6848, 0x684c, 0x6860,
6829 0x6888, 0x6910, 0x8000 };
6831 regs->version = 0;
6833 memset(p, 0, BNX2_REGDUMP_LEN);
6835 if (!netif_running(bp->dev))
6836 return;
6838 i = 0;
6839 offset = reg_boundaries[0];
6840 p += offset;
6841 while (offset < BNX2_REGDUMP_LEN) {
6842 *p++ = REG_RD(bp, offset);
6843 offset += 4;
6844 if (offset == reg_boundaries[i + 1]) {
6845 offset = reg_boundaries[i + 2];
6846 p = (u32 *) (orig_p + offset);
6847 i += 2;
6852 static void
6853 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6855 struct bnx2 *bp = netdev_priv(dev);
6857 if (bp->flags & BNX2_FLAG_NO_WOL) {
6858 wol->supported = 0;
6859 wol->wolopts = 0;
6861 else {
6862 wol->supported = WAKE_MAGIC;
6863 if (bp->wol)
6864 wol->wolopts = WAKE_MAGIC;
6865 else
6866 wol->wolopts = 0;
6868 memset(&wol->sopass, 0, sizeof(wol->sopass));
6871 static int
6872 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6874 struct bnx2 *bp = netdev_priv(dev);
6876 if (wol->wolopts & ~WAKE_MAGIC)
6877 return -EINVAL;
6879 if (wol->wolopts & WAKE_MAGIC) {
6880 if (bp->flags & BNX2_FLAG_NO_WOL)
6881 return -EINVAL;
6883 bp->wol = 1;
6885 else {
6886 bp->wol = 0;
6888 return 0;
6891 static int
6892 bnx2_nway_reset(struct net_device *dev)
6894 struct bnx2 *bp = netdev_priv(dev);
6895 u32 bmcr;
6897 if (!netif_running(dev))
6898 return -EAGAIN;
6900 if (!(bp->autoneg & AUTONEG_SPEED)) {
6901 return -EINVAL;
6904 spin_lock_bh(&bp->phy_lock);
6906 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6907 int rc;
6909 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6910 spin_unlock_bh(&bp->phy_lock);
6911 return rc;
6914 /* Force a link down visible on the other side */
6915 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6916 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6917 spin_unlock_bh(&bp->phy_lock);
6919 msleep(20);
6921 spin_lock_bh(&bp->phy_lock);
6923 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6924 bp->serdes_an_pending = 1;
6925 mod_timer(&bp->timer, jiffies + bp->current_interval);
6928 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6929 bmcr &= ~BMCR_LOOPBACK;
6930 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6932 spin_unlock_bh(&bp->phy_lock);
6934 return 0;
6937 static u32
6938 bnx2_get_link(struct net_device *dev)
6940 struct bnx2 *bp = netdev_priv(dev);
6942 return bp->link_up;
6945 static int
6946 bnx2_get_eeprom_len(struct net_device *dev)
6948 struct bnx2 *bp = netdev_priv(dev);
6950 if (bp->flash_info == NULL)
6951 return 0;
6953 return (int) bp->flash_size;
6956 static int
6957 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6958 u8 *eebuf)
6960 struct bnx2 *bp = netdev_priv(dev);
6961 int rc;
6963 if (!netif_running(dev))
6964 return -EAGAIN;
6966 /* parameters already validated in ethtool_get_eeprom */
6968 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6970 return rc;
6973 static int
6974 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6975 u8 *eebuf)
6977 struct bnx2 *bp = netdev_priv(dev);
6978 int rc;
6980 if (!netif_running(dev))
6981 return -EAGAIN;
6983 /* parameters already validated in ethtool_set_eeprom */
6985 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6987 return rc;
6990 static int
6991 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6993 struct bnx2 *bp = netdev_priv(dev);
6995 memset(coal, 0, sizeof(struct ethtool_coalesce));
6997 coal->rx_coalesce_usecs = bp->rx_ticks;
6998 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6999 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7000 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7002 coal->tx_coalesce_usecs = bp->tx_ticks;
7003 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7004 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7005 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7007 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7009 return 0;
7012 static int
7013 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7015 struct bnx2 *bp = netdev_priv(dev);
7017 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7018 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7020 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7021 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7023 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7024 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7026 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7027 if (bp->rx_quick_cons_trip_int > 0xff)
7028 bp->rx_quick_cons_trip_int = 0xff;
7030 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7031 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7033 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7034 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7036 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7037 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7039 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7040 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7041 0xff;
7043 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7044 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7045 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7046 bp->stats_ticks = USEC_PER_SEC;
7048 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7049 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7050 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7052 if (netif_running(bp->dev)) {
7053 bnx2_netif_stop(bp);
7054 bnx2_init_nic(bp, 0);
7055 bnx2_netif_start(bp);
7058 return 0;
7061 static void
7062 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7064 struct bnx2 *bp = netdev_priv(dev);
7066 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7067 ering->rx_mini_max_pending = 0;
7068 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7070 ering->rx_pending = bp->rx_ring_size;
7071 ering->rx_mini_pending = 0;
7072 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7074 ering->tx_max_pending = MAX_TX_DESC_CNT;
7075 ering->tx_pending = bp->tx_ring_size;
7078 static int
7079 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7081 if (netif_running(bp->dev)) {
7082 /* Reset will erase chipset stats; save them */
7083 bnx2_save_stats(bp);
7085 bnx2_netif_stop(bp);
7086 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7087 bnx2_free_skbs(bp);
7088 bnx2_free_mem(bp);
7091 bnx2_set_rx_ring_size(bp, rx);
7092 bp->tx_ring_size = tx;
7094 if (netif_running(bp->dev)) {
7095 int rc;
7097 rc = bnx2_alloc_mem(bp);
7098 if (!rc)
7099 rc = bnx2_init_nic(bp, 0);
7101 if (rc) {
7102 bnx2_napi_enable(bp);
7103 dev_close(bp->dev);
7104 return rc;
7106 #ifdef BCM_CNIC
7107 mutex_lock(&bp->cnic_lock);
7108 /* Let cnic know about the new status block. */
7109 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7110 bnx2_setup_cnic_irq_info(bp);
7111 mutex_unlock(&bp->cnic_lock);
7112 #endif
7113 bnx2_netif_start(bp);
7115 return 0;
7118 static int
7119 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7121 struct bnx2 *bp = netdev_priv(dev);
7122 int rc;
7124 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7125 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7126 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7128 return -EINVAL;
7130 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7131 return rc;
7134 static void
7135 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7137 struct bnx2 *bp = netdev_priv(dev);
7139 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7140 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7141 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7144 static int
7145 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7147 struct bnx2 *bp = netdev_priv(dev);
7149 bp->req_flow_ctrl = 0;
7150 if (epause->rx_pause)
7151 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7152 if (epause->tx_pause)
7153 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7155 if (epause->autoneg) {
7156 bp->autoneg |= AUTONEG_FLOW_CTRL;
7158 else {
7159 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7162 if (netif_running(dev)) {
7163 spin_lock_bh(&bp->phy_lock);
7164 bnx2_setup_phy(bp, bp->phy_port);
7165 spin_unlock_bh(&bp->phy_lock);
7168 return 0;
7171 static u32
7172 bnx2_get_rx_csum(struct net_device *dev)
7174 struct bnx2 *bp = netdev_priv(dev);
7176 return bp->rx_csum;
7179 static int
7180 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7182 struct bnx2 *bp = netdev_priv(dev);
7184 bp->rx_csum = data;
7185 return 0;
7188 static int
7189 bnx2_set_tso(struct net_device *dev, u32 data)
7191 struct bnx2 *bp = netdev_priv(dev);
7193 if (data) {
7194 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7195 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7196 dev->features |= NETIF_F_TSO6;
7197 } else
7198 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7199 NETIF_F_TSO_ECN);
7200 return 0;
7203 static struct {
7204 char string[ETH_GSTRING_LEN];
7205 } bnx2_stats_str_arr[] = {
7206 { "rx_bytes" },
7207 { "rx_error_bytes" },
7208 { "tx_bytes" },
7209 { "tx_error_bytes" },
7210 { "rx_ucast_packets" },
7211 { "rx_mcast_packets" },
7212 { "rx_bcast_packets" },
7213 { "tx_ucast_packets" },
7214 { "tx_mcast_packets" },
7215 { "tx_bcast_packets" },
7216 { "tx_mac_errors" },
7217 { "tx_carrier_errors" },
7218 { "rx_crc_errors" },
7219 { "rx_align_errors" },
7220 { "tx_single_collisions" },
7221 { "tx_multi_collisions" },
7222 { "tx_deferred" },
7223 { "tx_excess_collisions" },
7224 { "tx_late_collisions" },
7225 { "tx_total_collisions" },
7226 { "rx_fragments" },
7227 { "rx_jabbers" },
7228 { "rx_undersize_packets" },
7229 { "rx_oversize_packets" },
7230 { "rx_64_byte_packets" },
7231 { "rx_65_to_127_byte_packets" },
7232 { "rx_128_to_255_byte_packets" },
7233 { "rx_256_to_511_byte_packets" },
7234 { "rx_512_to_1023_byte_packets" },
7235 { "rx_1024_to_1522_byte_packets" },
7236 { "rx_1523_to_9022_byte_packets" },
7237 { "tx_64_byte_packets" },
7238 { "tx_65_to_127_byte_packets" },
7239 { "tx_128_to_255_byte_packets" },
7240 { "tx_256_to_511_byte_packets" },
7241 { "tx_512_to_1023_byte_packets" },
7242 { "tx_1024_to_1522_byte_packets" },
7243 { "tx_1523_to_9022_byte_packets" },
7244 { "rx_xon_frames" },
7245 { "rx_xoff_frames" },
7246 { "tx_xon_frames" },
7247 { "tx_xoff_frames" },
7248 { "rx_mac_ctrl_frames" },
7249 { "rx_filtered_packets" },
7250 { "rx_ftq_discards" },
7251 { "rx_discards" },
7252 { "rx_fw_discards" },
7255 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7256 sizeof(bnx2_stats_str_arr[0]))
7258 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7260 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7261 STATS_OFFSET32(stat_IfHCInOctets_hi),
7262 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7263 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7264 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7265 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7266 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7267 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7268 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7269 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7270 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7271 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7272 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7273 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7274 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7275 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7276 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7277 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7278 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7279 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7280 STATS_OFFSET32(stat_EtherStatsCollisions),
7281 STATS_OFFSET32(stat_EtherStatsFragments),
7282 STATS_OFFSET32(stat_EtherStatsJabbers),
7283 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7284 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7285 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7286 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7287 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7288 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7289 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7290 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7291 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7292 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7293 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7294 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7295 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7296 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7297 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7298 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7299 STATS_OFFSET32(stat_XonPauseFramesReceived),
7300 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7301 STATS_OFFSET32(stat_OutXonSent),
7302 STATS_OFFSET32(stat_OutXoffSent),
7303 STATS_OFFSET32(stat_MacControlFramesReceived),
7304 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7305 STATS_OFFSET32(stat_IfInFTQDiscards),
7306 STATS_OFFSET32(stat_IfInMBUFDiscards),
7307 STATS_OFFSET32(stat_FwRxDrop),
7310 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7311 * skipped because of errata.
7313 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7314 8,0,8,8,8,8,8,8,8,8,
7315 4,0,4,4,4,4,4,4,4,4,
7316 4,4,4,4,4,4,4,4,4,4,
7317 4,4,4,4,4,4,4,4,4,4,
7318 4,4,4,4,4,4,4,
7321 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7322 8,0,8,8,8,8,8,8,8,8,
7323 4,4,4,4,4,4,4,4,4,4,
7324 4,4,4,4,4,4,4,4,4,4,
7325 4,4,4,4,4,4,4,4,4,4,
7326 4,4,4,4,4,4,4,
7329 #define BNX2_NUM_TESTS 6
7331 static struct {
7332 char string[ETH_GSTRING_LEN];
7333 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7334 { "register_test (offline)" },
7335 { "memory_test (offline)" },
7336 { "loopback_test (offline)" },
7337 { "nvram_test (online)" },
7338 { "interrupt_test (online)" },
7339 { "link_test (online)" },
7342 static int
7343 bnx2_get_sset_count(struct net_device *dev, int sset)
7345 switch (sset) {
7346 case ETH_SS_TEST:
7347 return BNX2_NUM_TESTS;
7348 case ETH_SS_STATS:
7349 return BNX2_NUM_STATS;
7350 default:
7351 return -EOPNOTSUPP;
7355 static void
7356 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7358 struct bnx2 *bp = netdev_priv(dev);
7360 bnx2_set_power_state(bp, PCI_D0);
7362 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7363 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7364 int i;
7366 bnx2_netif_stop(bp);
7367 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7368 bnx2_free_skbs(bp);
7370 if (bnx2_test_registers(bp) != 0) {
7371 buf[0] = 1;
7372 etest->flags |= ETH_TEST_FL_FAILED;
7374 if (bnx2_test_memory(bp) != 0) {
7375 buf[1] = 1;
7376 etest->flags |= ETH_TEST_FL_FAILED;
7378 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7379 etest->flags |= ETH_TEST_FL_FAILED;
7381 if (!netif_running(bp->dev))
7382 bnx2_shutdown_chip(bp);
7383 else {
7384 bnx2_init_nic(bp, 1);
7385 bnx2_netif_start(bp);
7388 /* wait for link up */
7389 for (i = 0; i < 7; i++) {
7390 if (bp->link_up)
7391 break;
7392 msleep_interruptible(1000);
7396 if (bnx2_test_nvram(bp) != 0) {
7397 buf[3] = 1;
7398 etest->flags |= ETH_TEST_FL_FAILED;
7400 if (bnx2_test_intr(bp) != 0) {
7401 buf[4] = 1;
7402 etest->flags |= ETH_TEST_FL_FAILED;
7405 if (bnx2_test_link(bp) != 0) {
7406 buf[5] = 1;
7407 etest->flags |= ETH_TEST_FL_FAILED;
7410 if (!netif_running(bp->dev))
7411 bnx2_set_power_state(bp, PCI_D3hot);
7414 static void
7415 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7417 switch (stringset) {
7418 case ETH_SS_STATS:
7419 memcpy(buf, bnx2_stats_str_arr,
7420 sizeof(bnx2_stats_str_arr));
7421 break;
7422 case ETH_SS_TEST:
7423 memcpy(buf, bnx2_tests_str_arr,
7424 sizeof(bnx2_tests_str_arr));
7425 break;
7429 static void
7430 bnx2_get_ethtool_stats(struct net_device *dev,
7431 struct ethtool_stats *stats, u64 *buf)
7433 struct bnx2 *bp = netdev_priv(dev);
7434 int i;
7435 u32 *hw_stats = (u32 *) bp->stats_blk;
7436 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7437 u8 *stats_len_arr = NULL;
7439 if (hw_stats == NULL) {
7440 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7441 return;
7444 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7445 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7446 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7447 (CHIP_ID(bp) == CHIP_ID_5708_A0))
7448 stats_len_arr = bnx2_5706_stats_len_arr;
7449 else
7450 stats_len_arr = bnx2_5708_stats_len_arr;
7452 for (i = 0; i < BNX2_NUM_STATS; i++) {
7453 unsigned long offset;
7455 if (stats_len_arr[i] == 0) {
7456 /* skip this counter */
7457 buf[i] = 0;
7458 continue;
7461 offset = bnx2_stats_offset_arr[i];
7462 if (stats_len_arr[i] == 4) {
7463 /* 4-byte counter */
7464 buf[i] = (u64) *(hw_stats + offset) +
7465 *(temp_stats + offset);
7466 continue;
7468 /* 8-byte counter */
7469 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7470 *(hw_stats + offset + 1) +
7471 (((u64) *(temp_stats + offset)) << 32) +
7472 *(temp_stats + offset + 1);
7476 static int
7477 bnx2_phys_id(struct net_device *dev, u32 data)
7479 struct bnx2 *bp = netdev_priv(dev);
7480 int i;
7481 u32 save;
7483 bnx2_set_power_state(bp, PCI_D0);
7485 if (data == 0)
7486 data = 2;
7488 save = REG_RD(bp, BNX2_MISC_CFG);
7489 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7491 for (i = 0; i < (data * 2); i++) {
7492 if ((i % 2) == 0) {
7493 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7495 else {
7496 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7497 BNX2_EMAC_LED_1000MB_OVERRIDE |
7498 BNX2_EMAC_LED_100MB_OVERRIDE |
7499 BNX2_EMAC_LED_10MB_OVERRIDE |
7500 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7501 BNX2_EMAC_LED_TRAFFIC);
7503 msleep_interruptible(500);
7504 if (signal_pending(current))
7505 break;
7507 REG_WR(bp, BNX2_EMAC_LED, 0);
7508 REG_WR(bp, BNX2_MISC_CFG, save);
7510 if (!netif_running(dev))
7511 bnx2_set_power_state(bp, PCI_D3hot);
7513 return 0;
7516 static int
7517 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7519 struct bnx2 *bp = netdev_priv(dev);
7521 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7522 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7523 else
7524 return (ethtool_op_set_tx_csum(dev, data));
7527 static const struct ethtool_ops bnx2_ethtool_ops = {
7528 .get_settings = bnx2_get_settings,
7529 .set_settings = bnx2_set_settings,
7530 .get_drvinfo = bnx2_get_drvinfo,
7531 .get_regs_len = bnx2_get_regs_len,
7532 .get_regs = bnx2_get_regs,
7533 .get_wol = bnx2_get_wol,
7534 .set_wol = bnx2_set_wol,
7535 .nway_reset = bnx2_nway_reset,
7536 .get_link = bnx2_get_link,
7537 .get_eeprom_len = bnx2_get_eeprom_len,
7538 .get_eeprom = bnx2_get_eeprom,
7539 .set_eeprom = bnx2_set_eeprom,
7540 .get_coalesce = bnx2_get_coalesce,
7541 .set_coalesce = bnx2_set_coalesce,
7542 .get_ringparam = bnx2_get_ringparam,
7543 .set_ringparam = bnx2_set_ringparam,
7544 .get_pauseparam = bnx2_get_pauseparam,
7545 .set_pauseparam = bnx2_set_pauseparam,
7546 .get_rx_csum = bnx2_get_rx_csum,
7547 .set_rx_csum = bnx2_set_rx_csum,
7548 .set_tx_csum = bnx2_set_tx_csum,
7549 .set_sg = ethtool_op_set_sg,
7550 .set_tso = bnx2_set_tso,
7551 .self_test = bnx2_self_test,
7552 .get_strings = bnx2_get_strings,
7553 .phys_id = bnx2_phys_id,
7554 .get_ethtool_stats = bnx2_get_ethtool_stats,
7555 .get_sset_count = bnx2_get_sset_count,
7558 /* Called with rtnl_lock */
7559 static int
7560 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7562 struct mii_ioctl_data *data = if_mii(ifr);
7563 struct bnx2 *bp = netdev_priv(dev);
7564 int err;
7566 switch(cmd) {
7567 case SIOCGMIIPHY:
7568 data->phy_id = bp->phy_addr;
7570 /* fallthru */
7571 case SIOCGMIIREG: {
7572 u32 mii_regval;
7574 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7575 return -EOPNOTSUPP;
7577 if (!netif_running(dev))
7578 return -EAGAIN;
7580 spin_lock_bh(&bp->phy_lock);
7581 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7582 spin_unlock_bh(&bp->phy_lock);
7584 data->val_out = mii_regval;
7586 return err;
7589 case SIOCSMIIREG:
7590 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7591 return -EOPNOTSUPP;
7593 if (!netif_running(dev))
7594 return -EAGAIN;
7596 spin_lock_bh(&bp->phy_lock);
7597 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7598 spin_unlock_bh(&bp->phy_lock);
7600 return err;
7602 default:
7603 /* do nothing */
7604 break;
7606 return -EOPNOTSUPP;
7609 /* Called with rtnl_lock */
7610 static int
7611 bnx2_change_mac_addr(struct net_device *dev, void *p)
7613 struct sockaddr *addr = p;
7614 struct bnx2 *bp = netdev_priv(dev);
7616 if (!is_valid_ether_addr(addr->sa_data))
7617 return -EINVAL;
7619 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7620 if (netif_running(dev))
7621 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7623 return 0;
7626 /* Called with rtnl_lock */
7627 static int
7628 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7630 struct bnx2 *bp = netdev_priv(dev);
7632 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7633 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7634 return -EINVAL;
7636 dev->mtu = new_mtu;
7637 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7640 #ifdef CONFIG_NET_POLL_CONTROLLER
7641 static void
7642 poll_bnx2(struct net_device *dev)
7644 struct bnx2 *bp = netdev_priv(dev);
7645 int i;
7647 for (i = 0; i < bp->irq_nvecs; i++) {
7648 disable_irq(bp->irq_tbl[i].vector);
7649 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7650 enable_irq(bp->irq_tbl[i].vector);
7653 #endif
7655 static void __devinit
7656 bnx2_get_5709_media(struct bnx2 *bp)
7658 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7659 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7660 u32 strap;
7662 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7663 return;
7664 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7665 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7666 return;
7669 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7670 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7671 else
7672 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7674 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7675 switch (strap) {
7676 case 0x4:
7677 case 0x5:
7678 case 0x6:
7679 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7680 return;
7682 } else {
7683 switch (strap) {
7684 case 0x1:
7685 case 0x2:
7686 case 0x4:
7687 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7688 return;
7693 static void __devinit
7694 bnx2_get_pci_speed(struct bnx2 *bp)
7696 u32 reg;
7698 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7699 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7700 u32 clkreg;
7702 bp->flags |= BNX2_FLAG_PCIX;
7704 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7706 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7707 switch (clkreg) {
7708 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7709 bp->bus_speed_mhz = 133;
7710 break;
7712 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7713 bp->bus_speed_mhz = 100;
7714 break;
7716 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7717 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7718 bp->bus_speed_mhz = 66;
7719 break;
7721 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7722 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7723 bp->bus_speed_mhz = 50;
7724 break;
7726 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7727 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7728 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7729 bp->bus_speed_mhz = 33;
7730 break;
7733 else {
7734 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7735 bp->bus_speed_mhz = 66;
7736 else
7737 bp->bus_speed_mhz = 33;
7740 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7741 bp->flags |= BNX2_FLAG_PCI_32BIT;
7745 static void __devinit
7746 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7748 int rc, i, v0_len = 0;
7749 u8 *data;
7750 u8 *v0_str = NULL;
7751 bool mn_match = false;
7753 #define BNX2_VPD_NVRAM_OFFSET 0x300
7754 #define BNX2_VPD_LEN 128
7755 #define BNX2_MAX_VER_SLEN 30
7757 data = kmalloc(256, GFP_KERNEL);
7758 if (!data)
7759 return;
7761 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7762 BNX2_VPD_LEN);
7763 if (rc)
7764 goto vpd_done;
7766 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7767 data[i] = data[i + BNX2_VPD_LEN + 3];
7768 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7769 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7770 data[i + 3] = data[i + BNX2_VPD_LEN];
7773 for (i = 0; i <= BNX2_VPD_LEN - 3; ) {
7774 unsigned char val = data[i];
7775 unsigned int block_end;
7777 if (val == 0x82 || val == 0x91) {
7778 i = (i + 3 + (data[i + 1] + (data[i + 2] << 8)));
7779 continue;
7782 if (val != 0x90)
7783 goto vpd_done;
7785 block_end = (i + 3 + (data[i + 1] + (data[i + 2] << 8)));
7786 i += 3;
7788 if (block_end > BNX2_VPD_LEN)
7789 goto vpd_done;
7791 while (i < (block_end - 2)) {
7792 int len = data[i + 2];
7794 if (i + 3 + len > block_end)
7795 goto vpd_done;
7797 if (data[i] == 'M' && data[i + 1] == 'N') {
7798 if (len != 4 ||
7799 memcmp(&data[i + 3], "1028", 4))
7800 goto vpd_done;
7801 mn_match = true;
7803 } else if (data[i] == 'V' && data[i + 1] == '0') {
7804 if (len > BNX2_MAX_VER_SLEN)
7805 goto vpd_done;
7807 v0_len = len;
7808 v0_str = &data[i + 3];
7810 i += 3 + len;
7812 if (mn_match && v0_str) {
7813 memcpy(bp->fw_version, v0_str, v0_len);
7814 bp->fw_version[v0_len] = ' ';
7815 goto vpd_done;
7818 goto vpd_done;
7821 vpd_done:
7822 kfree(data);
7825 static int __devinit
7826 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7828 struct bnx2 *bp;
7829 unsigned long mem_len;
7830 int rc, i, j;
7831 u32 reg;
7832 u64 dma_mask, persist_dma_mask;
7834 SET_NETDEV_DEV(dev, &pdev->dev);
7835 bp = netdev_priv(dev);
7837 bp->flags = 0;
7838 bp->phy_flags = 0;
7840 bp->temp_stats_blk =
7841 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7843 if (bp->temp_stats_blk == NULL) {
7844 rc = -ENOMEM;
7845 goto err_out;
7848 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7849 rc = pci_enable_device(pdev);
7850 if (rc) {
7851 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7852 goto err_out;
7855 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7856 dev_err(&pdev->dev,
7857 "Cannot find PCI device base address, aborting\n");
7858 rc = -ENODEV;
7859 goto err_out_disable;
7862 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7863 if (rc) {
7864 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7865 goto err_out_disable;
7868 pci_set_master(pdev);
7869 pci_save_state(pdev);
7871 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7872 if (bp->pm_cap == 0) {
7873 dev_err(&pdev->dev,
7874 "Cannot find power management capability, aborting\n");
7875 rc = -EIO;
7876 goto err_out_release;
7879 bp->dev = dev;
7880 bp->pdev = pdev;
7882 spin_lock_init(&bp->phy_lock);
7883 spin_lock_init(&bp->indirect_lock);
7884 #ifdef BCM_CNIC
7885 mutex_init(&bp->cnic_lock);
7886 #endif
7887 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7889 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7890 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7891 dev->mem_end = dev->mem_start + mem_len;
7892 dev->irq = pdev->irq;
7894 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7896 if (!bp->regview) {
7897 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7898 rc = -ENOMEM;
7899 goto err_out_release;
7902 /* Configure byte swap and enable write to the reg_window registers.
7903 * Rely on CPU to do target byte swapping on big endian systems
7904 * The chip's target access swapping will not swap all accesses
7906 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7907 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7908 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7910 bnx2_set_power_state(bp, PCI_D0);
7912 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7914 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7915 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7916 dev_err(&pdev->dev,
7917 "Cannot find PCIE capability, aborting\n");
7918 rc = -EIO;
7919 goto err_out_unmap;
7921 bp->flags |= BNX2_FLAG_PCIE;
7922 if (CHIP_REV(bp) == CHIP_REV_Ax)
7923 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7924 } else {
7925 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7926 if (bp->pcix_cap == 0) {
7927 dev_err(&pdev->dev,
7928 "Cannot find PCIX capability, aborting\n");
7929 rc = -EIO;
7930 goto err_out_unmap;
7932 bp->flags |= BNX2_FLAG_BROKEN_STATS;
7935 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7936 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7937 bp->flags |= BNX2_FLAG_MSIX_CAP;
7940 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7941 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7942 bp->flags |= BNX2_FLAG_MSI_CAP;
7945 /* 5708 cannot support DMA addresses > 40-bit. */
7946 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7947 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7948 else
7949 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7951 /* Configure DMA attributes. */
7952 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7953 dev->features |= NETIF_F_HIGHDMA;
7954 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7955 if (rc) {
7956 dev_err(&pdev->dev,
7957 "pci_set_consistent_dma_mask failed, aborting\n");
7958 goto err_out_unmap;
7960 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7961 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
7962 goto err_out_unmap;
7965 if (!(bp->flags & BNX2_FLAG_PCIE))
7966 bnx2_get_pci_speed(bp);
7968 /* 5706A0 may falsely detect SERR and PERR. */
7969 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7970 reg = REG_RD(bp, PCI_COMMAND);
7971 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7972 REG_WR(bp, PCI_COMMAND, reg);
7974 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7975 !(bp->flags & BNX2_FLAG_PCIX)) {
7977 dev_err(&pdev->dev,
7978 "5706 A1 can only be used in a PCIX bus, aborting\n");
7979 goto err_out_unmap;
7982 bnx2_init_nvram(bp);
7984 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7986 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7987 BNX2_SHM_HDR_SIGNATURE_SIG) {
7988 u32 off = PCI_FUNC(pdev->devfn) << 2;
7990 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7991 } else
7992 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7994 /* Get the permanent MAC address. First we need to make sure the
7995 * firmware is actually running.
7997 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7999 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8000 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8001 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8002 rc = -ENODEV;
8003 goto err_out_unmap;
8006 bnx2_read_vpd_fw_ver(bp);
8008 j = strlen(bp->fw_version);
8009 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8010 for (i = 0; i < 3 && j < 24; i++) {
8011 u8 num, k, skip0;
8013 if (i == 0) {
8014 bp->fw_version[j++] = 'b';
8015 bp->fw_version[j++] = 'c';
8016 bp->fw_version[j++] = ' ';
8018 num = (u8) (reg >> (24 - (i * 8)));
8019 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8020 if (num >= k || !skip0 || k == 1) {
8021 bp->fw_version[j++] = (num / k) + '0';
8022 skip0 = 0;
8025 if (i != 2)
8026 bp->fw_version[j++] = '.';
8028 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8029 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8030 bp->wol = 1;
8032 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8033 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8035 for (i = 0; i < 30; i++) {
8036 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8037 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8038 break;
8039 msleep(10);
8042 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8043 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8044 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8045 reg != BNX2_CONDITION_MFW_RUN_NONE) {
8046 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8048 if (j < 32)
8049 bp->fw_version[j++] = ' ';
8050 for (i = 0; i < 3 && j < 28; i++) {
8051 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8052 reg = swab32(reg);
8053 memcpy(&bp->fw_version[j], &reg, 4);
8054 j += 4;
8058 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8059 bp->mac_addr[0] = (u8) (reg >> 8);
8060 bp->mac_addr[1] = (u8) reg;
8062 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8063 bp->mac_addr[2] = (u8) (reg >> 24);
8064 bp->mac_addr[3] = (u8) (reg >> 16);
8065 bp->mac_addr[4] = (u8) (reg >> 8);
8066 bp->mac_addr[5] = (u8) reg;
8068 bp->tx_ring_size = MAX_TX_DESC_CNT;
8069 bnx2_set_rx_ring_size(bp, 255);
8071 bp->rx_csum = 1;
8073 bp->tx_quick_cons_trip_int = 2;
8074 bp->tx_quick_cons_trip = 20;
8075 bp->tx_ticks_int = 18;
8076 bp->tx_ticks = 80;
8078 bp->rx_quick_cons_trip_int = 2;
8079 bp->rx_quick_cons_trip = 12;
8080 bp->rx_ticks_int = 18;
8081 bp->rx_ticks = 18;
8083 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8085 bp->current_interval = BNX2_TIMER_INTERVAL;
8087 bp->phy_addr = 1;
8089 /* Disable WOL support if we are running on a SERDES chip. */
8090 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8091 bnx2_get_5709_media(bp);
8092 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8093 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8095 bp->phy_port = PORT_TP;
8096 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8097 bp->phy_port = PORT_FIBRE;
8098 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8099 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8100 bp->flags |= BNX2_FLAG_NO_WOL;
8101 bp->wol = 0;
8103 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8104 /* Don't do parallel detect on this board because of
8105 * some board problems. The link will not go down
8106 * if we do parallel detect.
8108 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8109 pdev->subsystem_device == 0x310c)
8110 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8111 } else {
8112 bp->phy_addr = 2;
8113 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8114 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8116 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8117 CHIP_NUM(bp) == CHIP_NUM_5708)
8118 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8119 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8120 (CHIP_REV(bp) == CHIP_REV_Ax ||
8121 CHIP_REV(bp) == CHIP_REV_Bx))
8122 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8124 bnx2_init_fw_cap(bp);
8126 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8127 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8128 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8129 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8130 bp->flags |= BNX2_FLAG_NO_WOL;
8131 bp->wol = 0;
8134 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8135 bp->tx_quick_cons_trip_int =
8136 bp->tx_quick_cons_trip;
8137 bp->tx_ticks_int = bp->tx_ticks;
8138 bp->rx_quick_cons_trip_int =
8139 bp->rx_quick_cons_trip;
8140 bp->rx_ticks_int = bp->rx_ticks;
8141 bp->comp_prod_trip_int = bp->comp_prod_trip;
8142 bp->com_ticks_int = bp->com_ticks;
8143 bp->cmd_ticks_int = bp->cmd_ticks;
8146 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8148 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8149 * with byte enables disabled on the unused 32-bit word. This is legal
8150 * but causes problems on the AMD 8132 which will eventually stop
8151 * responding after a while.
8153 * AMD believes this incompatibility is unique to the 5706, and
8154 * prefers to locally disable MSI rather than globally disabling it.
8156 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8157 struct pci_dev *amd_8132 = NULL;
8159 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8160 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8161 amd_8132))) {
8163 if (amd_8132->revision >= 0x10 &&
8164 amd_8132->revision <= 0x13) {
8165 disable_msi = 1;
8166 pci_dev_put(amd_8132);
8167 break;
8172 bnx2_set_default_link(bp);
8173 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8175 init_timer(&bp->timer);
8176 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8177 bp->timer.data = (unsigned long) bp;
8178 bp->timer.function = bnx2_timer;
8180 return 0;
8182 err_out_unmap:
8183 if (bp->regview) {
8184 iounmap(bp->regview);
8185 bp->regview = NULL;
8188 err_out_release:
8189 pci_release_regions(pdev);
8191 err_out_disable:
8192 pci_disable_device(pdev);
8193 pci_set_drvdata(pdev, NULL);
8195 err_out:
8196 return rc;
8199 static char * __devinit
8200 bnx2_bus_string(struct bnx2 *bp, char *str)
8202 char *s = str;
8204 if (bp->flags & BNX2_FLAG_PCIE) {
8205 s += sprintf(s, "PCI Express");
8206 } else {
8207 s += sprintf(s, "PCI");
8208 if (bp->flags & BNX2_FLAG_PCIX)
8209 s += sprintf(s, "-X");
8210 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8211 s += sprintf(s, " 32-bit");
8212 else
8213 s += sprintf(s, " 64-bit");
8214 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8216 return str;
8219 static void __devinit
8220 bnx2_init_napi(struct bnx2 *bp)
8222 int i;
8224 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
8225 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8226 int (*poll)(struct napi_struct *, int);
8228 if (i == 0)
8229 poll = bnx2_poll;
8230 else
8231 poll = bnx2_poll_msix;
8233 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8234 bnapi->bp = bp;
8238 static const struct net_device_ops bnx2_netdev_ops = {
8239 .ndo_open = bnx2_open,
8240 .ndo_start_xmit = bnx2_start_xmit,
8241 .ndo_stop = bnx2_close,
8242 .ndo_get_stats = bnx2_get_stats,
8243 .ndo_set_rx_mode = bnx2_set_rx_mode,
8244 .ndo_do_ioctl = bnx2_ioctl,
8245 .ndo_validate_addr = eth_validate_addr,
8246 .ndo_set_mac_address = bnx2_change_mac_addr,
8247 .ndo_change_mtu = bnx2_change_mtu,
8248 .ndo_tx_timeout = bnx2_tx_timeout,
8249 #ifdef BCM_VLAN
8250 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
8251 #endif
8252 #ifdef CONFIG_NET_POLL_CONTROLLER
8253 .ndo_poll_controller = poll_bnx2,
8254 #endif
8257 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8259 #ifdef BCM_VLAN
8260 dev->vlan_features |= flags;
8261 #endif
8264 static int __devinit
8265 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8267 static int version_printed = 0;
8268 struct net_device *dev = NULL;
8269 struct bnx2 *bp;
8270 int rc;
8271 char str[40];
8273 if (version_printed++ == 0)
8274 pr_info("%s", version);
8276 /* dev zeroed in init_etherdev */
8277 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8279 if (!dev)
8280 return -ENOMEM;
8282 rc = bnx2_init_board(pdev, dev);
8283 if (rc < 0) {
8284 free_netdev(dev);
8285 return rc;
8288 dev->netdev_ops = &bnx2_netdev_ops;
8289 dev->watchdog_timeo = TX_TIMEOUT;
8290 dev->ethtool_ops = &bnx2_ethtool_ops;
8292 bp = netdev_priv(dev);
8293 bnx2_init_napi(bp);
8295 pci_set_drvdata(pdev, dev);
8297 rc = bnx2_request_firmware(bp);
8298 if (rc)
8299 goto error;
8301 memcpy(dev->dev_addr, bp->mac_addr, 6);
8302 memcpy(dev->perm_addr, bp->mac_addr, 6);
8304 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
8305 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8306 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8307 dev->features |= NETIF_F_IPV6_CSUM;
8308 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8310 #ifdef BCM_VLAN
8311 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8312 #endif
8313 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8314 vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8315 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8316 dev->features |= NETIF_F_TSO6;
8317 vlan_features_add(dev, NETIF_F_TSO6);
8319 if ((rc = register_netdev(dev))) {
8320 dev_err(&pdev->dev, "Cannot register net device\n");
8321 goto error;
8324 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8325 board_info[ent->driver_data].name,
8326 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8327 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8328 bnx2_bus_string(bp, str),
8329 dev->base_addr,
8330 bp->pdev->irq, dev->dev_addr);
8332 return 0;
8334 error:
8335 if (bp->mips_firmware)
8336 release_firmware(bp->mips_firmware);
8337 if (bp->rv2p_firmware)
8338 release_firmware(bp->rv2p_firmware);
8340 if (bp->regview)
8341 iounmap(bp->regview);
8342 pci_release_regions(pdev);
8343 pci_disable_device(pdev);
8344 pci_set_drvdata(pdev, NULL);
8345 free_netdev(dev);
8346 return rc;
8349 static void __devexit
8350 bnx2_remove_one(struct pci_dev *pdev)
8352 struct net_device *dev = pci_get_drvdata(pdev);
8353 struct bnx2 *bp = netdev_priv(dev);
8355 flush_scheduled_work();
8357 unregister_netdev(dev);
8359 if (bp->mips_firmware)
8360 release_firmware(bp->mips_firmware);
8361 if (bp->rv2p_firmware)
8362 release_firmware(bp->rv2p_firmware);
8364 if (bp->regview)
8365 iounmap(bp->regview);
8367 kfree(bp->temp_stats_blk);
8369 free_netdev(dev);
8370 pci_release_regions(pdev);
8371 pci_disable_device(pdev);
8372 pci_set_drvdata(pdev, NULL);
8375 static int
8376 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8378 struct net_device *dev = pci_get_drvdata(pdev);
8379 struct bnx2 *bp = netdev_priv(dev);
8381 /* PCI register 4 needs to be saved whether netif_running() or not.
8382 * MSI address and data need to be saved if using MSI and
8383 * netif_running().
8385 pci_save_state(pdev);
8386 if (!netif_running(dev))
8387 return 0;
8389 flush_scheduled_work();
8390 bnx2_netif_stop(bp);
8391 netif_device_detach(dev);
8392 del_timer_sync(&bp->timer);
8393 bnx2_shutdown_chip(bp);
8394 bnx2_free_skbs(bp);
8395 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8396 return 0;
8399 static int
8400 bnx2_resume(struct pci_dev *pdev)
8402 struct net_device *dev = pci_get_drvdata(pdev);
8403 struct bnx2 *bp = netdev_priv(dev);
8405 pci_restore_state(pdev);
8406 if (!netif_running(dev))
8407 return 0;
8409 bnx2_set_power_state(bp, PCI_D0);
8410 netif_device_attach(dev);
8411 bnx2_init_nic(bp, 1);
8412 bnx2_netif_start(bp);
8413 return 0;
8417 * bnx2_io_error_detected - called when PCI error is detected
8418 * @pdev: Pointer to PCI device
8419 * @state: The current pci connection state
8421 * This function is called after a PCI bus error affecting
8422 * this device has been detected.
8424 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8425 pci_channel_state_t state)
8427 struct net_device *dev = pci_get_drvdata(pdev);
8428 struct bnx2 *bp = netdev_priv(dev);
8430 rtnl_lock();
8431 netif_device_detach(dev);
8433 if (state == pci_channel_io_perm_failure) {
8434 rtnl_unlock();
8435 return PCI_ERS_RESULT_DISCONNECT;
8438 if (netif_running(dev)) {
8439 bnx2_netif_stop(bp);
8440 del_timer_sync(&bp->timer);
8441 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8444 pci_disable_device(pdev);
8445 rtnl_unlock();
8447 /* Request a slot slot reset. */
8448 return PCI_ERS_RESULT_NEED_RESET;
8452 * bnx2_io_slot_reset - called after the pci bus has been reset.
8453 * @pdev: Pointer to PCI device
8455 * Restart the card from scratch, as if from a cold-boot.
8457 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8459 struct net_device *dev = pci_get_drvdata(pdev);
8460 struct bnx2 *bp = netdev_priv(dev);
8462 rtnl_lock();
8463 if (pci_enable_device(pdev)) {
8464 dev_err(&pdev->dev,
8465 "Cannot re-enable PCI device after reset\n");
8466 rtnl_unlock();
8467 return PCI_ERS_RESULT_DISCONNECT;
8469 pci_set_master(pdev);
8470 pci_restore_state(pdev);
8471 pci_save_state(pdev);
8473 if (netif_running(dev)) {
8474 bnx2_set_power_state(bp, PCI_D0);
8475 bnx2_init_nic(bp, 1);
8478 rtnl_unlock();
8479 return PCI_ERS_RESULT_RECOVERED;
8483 * bnx2_io_resume - called when traffic can start flowing again.
8484 * @pdev: Pointer to PCI device
8486 * This callback is called when the error recovery driver tells us that
8487 * its OK to resume normal operation.
8489 static void bnx2_io_resume(struct pci_dev *pdev)
8491 struct net_device *dev = pci_get_drvdata(pdev);
8492 struct bnx2 *bp = netdev_priv(dev);
8494 rtnl_lock();
8495 if (netif_running(dev))
8496 bnx2_netif_start(bp);
8498 netif_device_attach(dev);
8499 rtnl_unlock();
8502 static struct pci_error_handlers bnx2_err_handler = {
8503 .error_detected = bnx2_io_error_detected,
8504 .slot_reset = bnx2_io_slot_reset,
8505 .resume = bnx2_io_resume,
8508 static struct pci_driver bnx2_pci_driver = {
8509 .name = DRV_MODULE_NAME,
8510 .id_table = bnx2_pci_tbl,
8511 .probe = bnx2_init_one,
8512 .remove = __devexit_p(bnx2_remove_one),
8513 .suspend = bnx2_suspend,
8514 .resume = bnx2_resume,
8515 .err_handler = &bnx2_err_handler,
8518 static int __init bnx2_init(void)
8520 return pci_register_driver(&bnx2_pci_driver);
8523 static void __exit bnx2_cleanup(void)
8525 pci_unregister_driver(&bnx2_pci_driver);
8528 module_init(bnx2_init);
8529 module_exit(bnx2_cleanup);