Fix gcc 4.5.1 miscompiling drivers/char/i8k.c (again)
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / bnx2.c
blob972c98f0a6f6f679e966ddf9f7e4da6239b120fe
1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/list.h>
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
60 #define DRV_MODULE_NAME "bnx2"
61 #define PFX DRV_MODULE_NAME ": "
62 #define DRV_MODULE_VERSION "2.0.3"
63 #define DRV_MODULE_RELDATE "Dec 03, 2009"
64 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j3.fw"
65 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
66 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j3.fw"
67 #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j3.fw"
68 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j3.fw"
70 #define RUN_AT(x) (jiffies + (x))
72 /* Time in jiffies before concluding the transmitter is hung. */
73 #define TX_TIMEOUT (5*HZ)
75 static char version[] __devinitdata =
76 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
79 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
80 MODULE_LICENSE("GPL");
81 MODULE_VERSION(DRV_MODULE_VERSION);
82 MODULE_FIRMWARE(FW_MIPS_FILE_06);
83 MODULE_FIRMWARE(FW_RV2P_FILE_06);
84 MODULE_FIRMWARE(FW_MIPS_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09);
86 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
88 static int disable_msi = 0;
90 module_param(disable_msi, int, 0);
91 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
93 typedef enum {
94 BCM5706 = 0,
95 NC370T,
96 NC370I,
97 BCM5706S,
98 NC370F,
99 BCM5708,
100 BCM5708S,
101 BCM5709,
102 BCM5709S,
103 BCM5716,
104 BCM5716S,
105 } board_t;
107 /* indexed by board_t, above */
108 static struct {
109 char *name;
110 } board_info[] __devinitdata = {
111 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
112 { "HP NC370T Multifunction Gigabit Server Adapter" },
113 { "HP NC370i Multifunction Gigabit Server Adapter" },
114 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
115 { "HP NC370F Multifunction Gigabit Server Adapter" },
116 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
117 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
118 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
119 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
120 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
121 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
124 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
125 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
127 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
129 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
131 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
132 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
133 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
135 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
136 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
137 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
139 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
141 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
143 { PCI_VENDOR_ID_BROADCOM, 0x163b,
144 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
145 { PCI_VENDOR_ID_BROADCOM, 0x163c,
146 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
147 { 0, }
150 static const struct flash_spec flash_table[] =
152 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
153 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
154 /* Slow EEPROM */
155 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
156 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
157 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
158 "EEPROM - slow"},
159 /* Expansion entry 0001 */
160 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
161 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
163 "Entry 0001"},
164 /* Saifun SA25F010 (non-buffered flash) */
165 /* strap, cfg1, & write1 need updates */
166 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
168 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
169 "Non-buffered flash (128kB)"},
170 /* Saifun SA25F020 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
175 "Non-buffered flash (256kB)"},
176 /* Expansion entry 0100 */
177 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
178 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 "Entry 0100"},
181 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
182 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
184 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
185 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
186 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
187 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
189 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
190 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
191 /* Saifun SA25F005 (non-buffered flash) */
192 /* strap, cfg1, & write1 need updates */
193 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
194 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
196 "Non-buffered flash (64kB)"},
197 /* Fast EEPROM */
198 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
199 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
200 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
201 "EEPROM - fast"},
202 /* Expansion entry 1001 */
203 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
204 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
205 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1001"},
207 /* Expansion entry 1010 */
208 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
209 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
210 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
211 "Entry 1010"},
212 /* ATMEL AT45DB011B (buffered flash) */
213 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
214 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
215 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
216 "Buffered flash (128kB)"},
217 /* Expansion entry 1100 */
218 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
219 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
220 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
221 "Entry 1100"},
222 /* Expansion entry 1101 */
223 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
224 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
225 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
226 "Entry 1101"},
227 /* Ateml Expansion entry 1110 */
228 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
229 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
230 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
231 "Entry 1110 (Atmel)"},
232 /* ATMEL AT45DB021B (buffered flash) */
233 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
234 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
235 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
236 "Buffered flash (256kB)"},
239 static const struct flash_spec flash_5709 = {
240 .flags = BNX2_NV_BUFFERED,
241 .page_bits = BCM5709_FLASH_PAGE_BITS,
242 .page_size = BCM5709_FLASH_PAGE_SIZE,
243 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
244 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
245 .name = "5709 Buffered flash (256kB)",
248 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
250 static void bnx2_init_napi(struct bnx2 *bp);
251 static void bnx2_del_napi(struct bnx2 *bp);
253 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
255 u32 diff;
257 smp_mb();
259 /* The ring uses 256 indices for 255 entries, one of them
260 * needs to be skipped.
262 diff = txr->tx_prod - txr->tx_cons;
263 if (unlikely(diff >= TX_DESC_CNT)) {
264 diff &= 0xffff;
265 if (diff == TX_DESC_CNT)
266 diff = MAX_TX_DESC_CNT;
268 return (bp->tx_ring_size - diff);
271 static u32
272 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
274 u32 val;
276 spin_lock_bh(&bp->indirect_lock);
277 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
278 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
279 spin_unlock_bh(&bp->indirect_lock);
280 return val;
283 static void
284 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
286 spin_lock_bh(&bp->indirect_lock);
287 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
288 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
289 spin_unlock_bh(&bp->indirect_lock);
292 static void
293 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
295 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
298 static u32
299 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
301 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
304 static void
305 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
307 offset += cid_addr;
308 spin_lock_bh(&bp->indirect_lock);
309 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
310 int i;
312 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
313 REG_WR(bp, BNX2_CTX_CTX_CTRL,
314 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
315 for (i = 0; i < 5; i++) {
316 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
317 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
318 break;
319 udelay(5);
321 } else {
322 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
323 REG_WR(bp, BNX2_CTX_DATA, val);
325 spin_unlock_bh(&bp->indirect_lock);
328 #ifdef BCM_CNIC
329 static int
330 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
332 struct bnx2 *bp = netdev_priv(dev);
333 struct drv_ctl_io *io = &info->data.io;
335 switch (info->cmd) {
336 case DRV_CTL_IO_WR_CMD:
337 bnx2_reg_wr_ind(bp, io->offset, io->data);
338 break;
339 case DRV_CTL_IO_RD_CMD:
340 io->data = bnx2_reg_rd_ind(bp, io->offset);
341 break;
342 case DRV_CTL_CTX_WR_CMD:
343 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
344 break;
345 default:
346 return -EINVAL;
348 return 0;
351 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
353 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
354 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
355 int sb_id;
357 if (bp->flags & BNX2_FLAG_USING_MSIX) {
358 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
359 bnapi->cnic_present = 0;
360 sb_id = bp->irq_nvecs;
361 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
362 } else {
363 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
364 bnapi->cnic_tag = bnapi->last_status_idx;
365 bnapi->cnic_present = 1;
366 sb_id = 0;
367 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
370 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
371 cp->irq_arr[0].status_blk = (void *)
372 ((unsigned long) bnapi->status_blk.msi +
373 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
374 cp->irq_arr[0].status_blk_num = sb_id;
375 cp->num_irq = 1;
378 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
379 void *data)
381 struct bnx2 *bp = netdev_priv(dev);
382 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
384 if (ops == NULL)
385 return -EINVAL;
387 if (cp->drv_state & CNIC_DRV_STATE_REGD)
388 return -EBUSY;
390 bp->cnic_data = data;
391 rcu_assign_pointer(bp->cnic_ops, ops);
393 cp->num_irq = 0;
394 cp->drv_state = CNIC_DRV_STATE_REGD;
396 bnx2_setup_cnic_irq_info(bp);
398 return 0;
401 static int bnx2_unregister_cnic(struct net_device *dev)
403 struct bnx2 *bp = netdev_priv(dev);
404 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
405 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
407 mutex_lock(&bp->cnic_lock);
408 cp->drv_state = 0;
409 bnapi->cnic_present = 0;
410 rcu_assign_pointer(bp->cnic_ops, NULL);
411 mutex_unlock(&bp->cnic_lock);
412 synchronize_rcu();
413 return 0;
416 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
418 struct bnx2 *bp = netdev_priv(dev);
419 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
421 cp->drv_owner = THIS_MODULE;
422 cp->chip_id = bp->chip_id;
423 cp->pdev = bp->pdev;
424 cp->io_base = bp->regview;
425 cp->drv_ctl = bnx2_drv_ctl;
426 cp->drv_register_cnic = bnx2_register_cnic;
427 cp->drv_unregister_cnic = bnx2_unregister_cnic;
429 return cp;
431 EXPORT_SYMBOL(bnx2_cnic_probe);
433 static void
434 bnx2_cnic_stop(struct bnx2 *bp)
436 struct cnic_ops *c_ops;
437 struct cnic_ctl_info info;
439 mutex_lock(&bp->cnic_lock);
440 c_ops = bp->cnic_ops;
441 if (c_ops) {
442 info.cmd = CNIC_CTL_STOP_CMD;
443 c_ops->cnic_ctl(bp->cnic_data, &info);
445 mutex_unlock(&bp->cnic_lock);
448 static void
449 bnx2_cnic_start(struct bnx2 *bp)
451 struct cnic_ops *c_ops;
452 struct cnic_ctl_info info;
454 mutex_lock(&bp->cnic_lock);
455 c_ops = bp->cnic_ops;
456 if (c_ops) {
457 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
458 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
460 bnapi->cnic_tag = bnapi->last_status_idx;
462 info.cmd = CNIC_CTL_START_CMD;
463 c_ops->cnic_ctl(bp->cnic_data, &info);
465 mutex_unlock(&bp->cnic_lock);
468 #else
470 static void
471 bnx2_cnic_stop(struct bnx2 *bp)
475 static void
476 bnx2_cnic_start(struct bnx2 *bp)
480 #endif
482 static int
483 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
485 u32 val1;
486 int i, ret;
488 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
489 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
490 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
492 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
493 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
495 udelay(40);
498 val1 = (bp->phy_addr << 21) | (reg << 16) |
499 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
500 BNX2_EMAC_MDIO_COMM_START_BUSY;
501 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
503 for (i = 0; i < 50; i++) {
504 udelay(10);
506 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
507 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
508 udelay(5);
510 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
511 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
513 break;
517 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
518 *val = 0x0;
519 ret = -EBUSY;
521 else {
522 *val = val1;
523 ret = 0;
526 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
527 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
528 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
530 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
531 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
533 udelay(40);
536 return ret;
539 static int
540 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
542 u32 val1;
543 int i, ret;
545 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
546 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
547 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
549 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
550 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
552 udelay(40);
555 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
556 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
557 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
558 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
560 for (i = 0; i < 50; i++) {
561 udelay(10);
563 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
564 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
565 udelay(5);
566 break;
570 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
571 ret = -EBUSY;
572 else
573 ret = 0;
575 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
576 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
577 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
579 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
580 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
582 udelay(40);
585 return ret;
588 static void
589 bnx2_disable_int(struct bnx2 *bp)
591 int i;
592 struct bnx2_napi *bnapi;
594 for (i = 0; i < bp->irq_nvecs; i++) {
595 bnapi = &bp->bnx2_napi[i];
596 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
597 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
599 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
602 static void
603 bnx2_enable_int(struct bnx2 *bp)
605 int i;
606 struct bnx2_napi *bnapi;
608 for (i = 0; i < bp->irq_nvecs; i++) {
609 bnapi = &bp->bnx2_napi[i];
611 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
612 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
613 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
614 bnapi->last_status_idx);
616 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
617 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
618 bnapi->last_status_idx);
620 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
623 static void
624 bnx2_disable_int_sync(struct bnx2 *bp)
626 int i;
628 atomic_inc(&bp->intr_sem);
629 if (!netif_running(bp->dev))
630 return;
632 bnx2_disable_int(bp);
633 for (i = 0; i < bp->irq_nvecs; i++)
634 synchronize_irq(bp->irq_tbl[i].vector);
637 static void
638 bnx2_napi_disable(struct bnx2 *bp)
640 int i;
642 for (i = 0; i < bp->irq_nvecs; i++)
643 napi_disable(&bp->bnx2_napi[i].napi);
646 static void
647 bnx2_napi_enable(struct bnx2 *bp)
649 int i;
651 for (i = 0; i < bp->irq_nvecs; i++)
652 napi_enable(&bp->bnx2_napi[i].napi);
655 static void
656 bnx2_netif_stop(struct bnx2 *bp)
658 bnx2_cnic_stop(bp);
659 if (netif_running(bp->dev)) {
660 int i;
662 bnx2_napi_disable(bp);
663 netif_tx_disable(bp->dev);
664 /* prevent tx timeout */
665 for (i = 0; i < bp->dev->num_tx_queues; i++) {
666 struct netdev_queue *txq;
668 txq = netdev_get_tx_queue(bp->dev, i);
669 txq->trans_start = jiffies;
672 bnx2_disable_int_sync(bp);
675 static void
676 bnx2_netif_start(struct bnx2 *bp)
678 if (atomic_dec_and_test(&bp->intr_sem)) {
679 if (netif_running(bp->dev)) {
680 netif_tx_wake_all_queues(bp->dev);
681 bnx2_napi_enable(bp);
682 bnx2_enable_int(bp);
683 bnx2_cnic_start(bp);
688 static void
689 bnx2_free_tx_mem(struct bnx2 *bp)
691 int i;
693 for (i = 0; i < bp->num_tx_rings; i++) {
694 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
695 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
697 if (txr->tx_desc_ring) {
698 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
699 txr->tx_desc_ring,
700 txr->tx_desc_mapping);
701 txr->tx_desc_ring = NULL;
703 kfree(txr->tx_buf_ring);
704 txr->tx_buf_ring = NULL;
708 static void
709 bnx2_free_rx_mem(struct bnx2 *bp)
711 int i;
713 for (i = 0; i < bp->num_rx_rings; i++) {
714 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
715 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
716 int j;
718 for (j = 0; j < bp->rx_max_ring; j++) {
719 if (rxr->rx_desc_ring[j])
720 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
721 rxr->rx_desc_ring[j],
722 rxr->rx_desc_mapping[j]);
723 rxr->rx_desc_ring[j] = NULL;
725 vfree(rxr->rx_buf_ring);
726 rxr->rx_buf_ring = NULL;
728 for (j = 0; j < bp->rx_max_pg_ring; j++) {
729 if (rxr->rx_pg_desc_ring[j])
730 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
731 rxr->rx_pg_desc_ring[j],
732 rxr->rx_pg_desc_mapping[j]);
733 rxr->rx_pg_desc_ring[j] = NULL;
735 vfree(rxr->rx_pg_ring);
736 rxr->rx_pg_ring = NULL;
740 static int
741 bnx2_alloc_tx_mem(struct bnx2 *bp)
743 int i;
745 for (i = 0; i < bp->num_tx_rings; i++) {
746 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
747 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
749 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
750 if (txr->tx_buf_ring == NULL)
751 return -ENOMEM;
753 txr->tx_desc_ring =
754 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
755 &txr->tx_desc_mapping);
756 if (txr->tx_desc_ring == NULL)
757 return -ENOMEM;
759 return 0;
762 static int
763 bnx2_alloc_rx_mem(struct bnx2 *bp)
765 int i;
767 for (i = 0; i < bp->num_rx_rings; i++) {
768 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
769 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
770 int j;
772 rxr->rx_buf_ring =
773 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
774 if (rxr->rx_buf_ring == NULL)
775 return -ENOMEM;
777 memset(rxr->rx_buf_ring, 0,
778 SW_RXBD_RING_SIZE * bp->rx_max_ring);
780 for (j = 0; j < bp->rx_max_ring; j++) {
781 rxr->rx_desc_ring[j] =
782 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
783 &rxr->rx_desc_mapping[j]);
784 if (rxr->rx_desc_ring[j] == NULL)
785 return -ENOMEM;
789 if (bp->rx_pg_ring_size) {
790 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
791 bp->rx_max_pg_ring);
792 if (rxr->rx_pg_ring == NULL)
793 return -ENOMEM;
795 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
796 bp->rx_max_pg_ring);
799 for (j = 0; j < bp->rx_max_pg_ring; j++) {
800 rxr->rx_pg_desc_ring[j] =
801 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
802 &rxr->rx_pg_desc_mapping[j]);
803 if (rxr->rx_pg_desc_ring[j] == NULL)
804 return -ENOMEM;
808 return 0;
811 static void
812 bnx2_free_mem(struct bnx2 *bp)
814 int i;
815 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
817 bnx2_free_tx_mem(bp);
818 bnx2_free_rx_mem(bp);
820 for (i = 0; i < bp->ctx_pages; i++) {
821 if (bp->ctx_blk[i]) {
822 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
823 bp->ctx_blk[i],
824 bp->ctx_blk_mapping[i]);
825 bp->ctx_blk[i] = NULL;
828 if (bnapi->status_blk.msi) {
829 pci_free_consistent(bp->pdev, bp->status_stats_size,
830 bnapi->status_blk.msi,
831 bp->status_blk_mapping);
832 bnapi->status_blk.msi = NULL;
833 bp->stats_blk = NULL;
837 static int
838 bnx2_alloc_mem(struct bnx2 *bp)
840 int i, status_blk_size, err;
841 struct bnx2_napi *bnapi;
842 void *status_blk;
844 /* Combine status and statistics blocks into one allocation. */
845 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
846 if (bp->flags & BNX2_FLAG_MSIX_CAP)
847 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
848 BNX2_SBLK_MSIX_ALIGN_SIZE);
849 bp->status_stats_size = status_blk_size +
850 sizeof(struct statistics_block);
852 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
853 &bp->status_blk_mapping);
854 if (status_blk == NULL)
855 goto alloc_mem_err;
857 memset(status_blk, 0, bp->status_stats_size);
859 bnapi = &bp->bnx2_napi[0];
860 bnapi->status_blk.msi = status_blk;
861 bnapi->hw_tx_cons_ptr =
862 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
863 bnapi->hw_rx_cons_ptr =
864 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
865 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
866 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
867 struct status_block_msix *sblk;
869 bnapi = &bp->bnx2_napi[i];
871 sblk = (void *) (status_blk +
872 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
873 bnapi->status_blk.msix = sblk;
874 bnapi->hw_tx_cons_ptr =
875 &sblk->status_tx_quick_consumer_index;
876 bnapi->hw_rx_cons_ptr =
877 &sblk->status_rx_quick_consumer_index;
878 bnapi->int_num = i << 24;
882 bp->stats_blk = status_blk + status_blk_size;
884 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
886 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
887 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
888 if (bp->ctx_pages == 0)
889 bp->ctx_pages = 1;
890 for (i = 0; i < bp->ctx_pages; i++) {
891 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
892 BCM_PAGE_SIZE,
893 &bp->ctx_blk_mapping[i]);
894 if (bp->ctx_blk[i] == NULL)
895 goto alloc_mem_err;
899 err = bnx2_alloc_rx_mem(bp);
900 if (err)
901 goto alloc_mem_err;
903 err = bnx2_alloc_tx_mem(bp);
904 if (err)
905 goto alloc_mem_err;
907 return 0;
909 alloc_mem_err:
910 bnx2_free_mem(bp);
911 return -ENOMEM;
914 static void
915 bnx2_report_fw_link(struct bnx2 *bp)
917 u32 fw_link_status = 0;
919 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
920 return;
922 if (bp->link_up) {
923 u32 bmsr;
925 switch (bp->line_speed) {
926 case SPEED_10:
927 if (bp->duplex == DUPLEX_HALF)
928 fw_link_status = BNX2_LINK_STATUS_10HALF;
929 else
930 fw_link_status = BNX2_LINK_STATUS_10FULL;
931 break;
932 case SPEED_100:
933 if (bp->duplex == DUPLEX_HALF)
934 fw_link_status = BNX2_LINK_STATUS_100HALF;
935 else
936 fw_link_status = BNX2_LINK_STATUS_100FULL;
937 break;
938 case SPEED_1000:
939 if (bp->duplex == DUPLEX_HALF)
940 fw_link_status = BNX2_LINK_STATUS_1000HALF;
941 else
942 fw_link_status = BNX2_LINK_STATUS_1000FULL;
943 break;
944 case SPEED_2500:
945 if (bp->duplex == DUPLEX_HALF)
946 fw_link_status = BNX2_LINK_STATUS_2500HALF;
947 else
948 fw_link_status = BNX2_LINK_STATUS_2500FULL;
949 break;
952 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
954 if (bp->autoneg) {
955 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
957 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
958 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
960 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
961 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
962 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
963 else
964 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
967 else
968 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
970 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
973 static char *
974 bnx2_xceiver_str(struct bnx2 *bp)
976 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
977 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
978 "Copper"));
981 static void
982 bnx2_report_link(struct bnx2 *bp)
984 if (bp->link_up) {
985 netif_carrier_on(bp->dev);
986 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
987 bnx2_xceiver_str(bp));
989 printk("%d Mbps ", bp->line_speed);
991 if (bp->duplex == DUPLEX_FULL)
992 printk("full duplex");
993 else
994 printk("half duplex");
996 if (bp->flow_ctrl) {
997 if (bp->flow_ctrl & FLOW_CTRL_RX) {
998 printk(", receive ");
999 if (bp->flow_ctrl & FLOW_CTRL_TX)
1000 printk("& transmit ");
1002 else {
1003 printk(", transmit ");
1005 printk("flow control ON");
1007 printk("\n");
1009 else {
1010 netif_carrier_off(bp->dev);
1011 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
1012 bnx2_xceiver_str(bp));
1015 bnx2_report_fw_link(bp);
1018 static void
1019 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1021 u32 local_adv, remote_adv;
1023 bp->flow_ctrl = 0;
1024 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1025 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1027 if (bp->duplex == DUPLEX_FULL) {
1028 bp->flow_ctrl = bp->req_flow_ctrl;
1030 return;
1033 if (bp->duplex != DUPLEX_FULL) {
1034 return;
1037 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1038 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1039 u32 val;
1041 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1042 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1043 bp->flow_ctrl |= FLOW_CTRL_TX;
1044 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1045 bp->flow_ctrl |= FLOW_CTRL_RX;
1046 return;
1049 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1050 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1052 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1053 u32 new_local_adv = 0;
1054 u32 new_remote_adv = 0;
1056 if (local_adv & ADVERTISE_1000XPAUSE)
1057 new_local_adv |= ADVERTISE_PAUSE_CAP;
1058 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1059 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1060 if (remote_adv & ADVERTISE_1000XPAUSE)
1061 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1062 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1063 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1065 local_adv = new_local_adv;
1066 remote_adv = new_remote_adv;
1069 /* See Table 28B-3 of 802.3ab-1999 spec. */
1070 if (local_adv & ADVERTISE_PAUSE_CAP) {
1071 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1072 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1073 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1075 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1076 bp->flow_ctrl = FLOW_CTRL_RX;
1079 else {
1080 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1081 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1085 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1086 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1087 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1089 bp->flow_ctrl = FLOW_CTRL_TX;
1094 static int
1095 bnx2_5709s_linkup(struct bnx2 *bp)
1097 u32 val, speed;
1099 bp->link_up = 1;
1101 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1102 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1103 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1105 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1106 bp->line_speed = bp->req_line_speed;
1107 bp->duplex = bp->req_duplex;
1108 return 0;
1110 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1111 switch (speed) {
1112 case MII_BNX2_GP_TOP_AN_SPEED_10:
1113 bp->line_speed = SPEED_10;
1114 break;
1115 case MII_BNX2_GP_TOP_AN_SPEED_100:
1116 bp->line_speed = SPEED_100;
1117 break;
1118 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1119 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1120 bp->line_speed = SPEED_1000;
1121 break;
1122 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1123 bp->line_speed = SPEED_2500;
1124 break;
1126 if (val & MII_BNX2_GP_TOP_AN_FD)
1127 bp->duplex = DUPLEX_FULL;
1128 else
1129 bp->duplex = DUPLEX_HALF;
1130 return 0;
1133 static int
1134 bnx2_5708s_linkup(struct bnx2 *bp)
1136 u32 val;
1138 bp->link_up = 1;
1139 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1140 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1141 case BCM5708S_1000X_STAT1_SPEED_10:
1142 bp->line_speed = SPEED_10;
1143 break;
1144 case BCM5708S_1000X_STAT1_SPEED_100:
1145 bp->line_speed = SPEED_100;
1146 break;
1147 case BCM5708S_1000X_STAT1_SPEED_1G:
1148 bp->line_speed = SPEED_1000;
1149 break;
1150 case BCM5708S_1000X_STAT1_SPEED_2G5:
1151 bp->line_speed = SPEED_2500;
1152 break;
1154 if (val & BCM5708S_1000X_STAT1_FD)
1155 bp->duplex = DUPLEX_FULL;
1156 else
1157 bp->duplex = DUPLEX_HALF;
1159 return 0;
1162 static int
1163 bnx2_5706s_linkup(struct bnx2 *bp)
1165 u32 bmcr, local_adv, remote_adv, common;
1167 bp->link_up = 1;
1168 bp->line_speed = SPEED_1000;
1170 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1171 if (bmcr & BMCR_FULLDPLX) {
1172 bp->duplex = DUPLEX_FULL;
1174 else {
1175 bp->duplex = DUPLEX_HALF;
1178 if (!(bmcr & BMCR_ANENABLE)) {
1179 return 0;
1182 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1183 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1185 common = local_adv & remote_adv;
1186 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1188 if (common & ADVERTISE_1000XFULL) {
1189 bp->duplex = DUPLEX_FULL;
1191 else {
1192 bp->duplex = DUPLEX_HALF;
1196 return 0;
1199 static int
1200 bnx2_copper_linkup(struct bnx2 *bp)
1202 u32 bmcr;
1204 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1205 if (bmcr & BMCR_ANENABLE) {
1206 u32 local_adv, remote_adv, common;
1208 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1209 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1211 common = local_adv & (remote_adv >> 2);
1212 if (common & ADVERTISE_1000FULL) {
1213 bp->line_speed = SPEED_1000;
1214 bp->duplex = DUPLEX_FULL;
1216 else if (common & ADVERTISE_1000HALF) {
1217 bp->line_speed = SPEED_1000;
1218 bp->duplex = DUPLEX_HALF;
1220 else {
1221 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1222 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1224 common = local_adv & remote_adv;
1225 if (common & ADVERTISE_100FULL) {
1226 bp->line_speed = SPEED_100;
1227 bp->duplex = DUPLEX_FULL;
1229 else if (common & ADVERTISE_100HALF) {
1230 bp->line_speed = SPEED_100;
1231 bp->duplex = DUPLEX_HALF;
1233 else if (common & ADVERTISE_10FULL) {
1234 bp->line_speed = SPEED_10;
1235 bp->duplex = DUPLEX_FULL;
1237 else if (common & ADVERTISE_10HALF) {
1238 bp->line_speed = SPEED_10;
1239 bp->duplex = DUPLEX_HALF;
1241 else {
1242 bp->line_speed = 0;
1243 bp->link_up = 0;
1247 else {
1248 if (bmcr & BMCR_SPEED100) {
1249 bp->line_speed = SPEED_100;
1251 else {
1252 bp->line_speed = SPEED_10;
1254 if (bmcr & BMCR_FULLDPLX) {
1255 bp->duplex = DUPLEX_FULL;
1257 else {
1258 bp->duplex = DUPLEX_HALF;
1262 return 0;
1265 static void
1266 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1268 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1270 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1271 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1272 val |= 0x02 << 8;
1274 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1275 u32 lo_water, hi_water;
1277 if (bp->flow_ctrl & FLOW_CTRL_TX)
1278 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1279 else
1280 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1281 if (lo_water >= bp->rx_ring_size)
1282 lo_water = 0;
1284 hi_water = bp->rx_ring_size / 4;
1286 if (hi_water <= lo_water)
1287 lo_water = 0;
1289 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1290 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1292 if (hi_water > 0xf)
1293 hi_water = 0xf;
1294 else if (hi_water == 0)
1295 lo_water = 0;
1296 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1298 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1301 static void
1302 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1304 int i;
1305 u32 cid;
1307 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1308 if (i == 1)
1309 cid = RX_RSS_CID;
1310 bnx2_init_rx_context(bp, cid);
1314 static void
1315 bnx2_set_mac_link(struct bnx2 *bp)
1317 u32 val;
1319 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1320 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1321 (bp->duplex == DUPLEX_HALF)) {
1322 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1325 /* Configure the EMAC mode register. */
1326 val = REG_RD(bp, BNX2_EMAC_MODE);
1328 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1329 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1330 BNX2_EMAC_MODE_25G_MODE);
1332 if (bp->link_up) {
1333 switch (bp->line_speed) {
1334 case SPEED_10:
1335 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1336 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1337 break;
1339 /* fall through */
1340 case SPEED_100:
1341 val |= BNX2_EMAC_MODE_PORT_MII;
1342 break;
1343 case SPEED_2500:
1344 val |= BNX2_EMAC_MODE_25G_MODE;
1345 /* fall through */
1346 case SPEED_1000:
1347 val |= BNX2_EMAC_MODE_PORT_GMII;
1348 break;
1351 else {
1352 val |= BNX2_EMAC_MODE_PORT_GMII;
1355 /* Set the MAC to operate in the appropriate duplex mode. */
1356 if (bp->duplex == DUPLEX_HALF)
1357 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1358 REG_WR(bp, BNX2_EMAC_MODE, val);
1360 /* Enable/disable rx PAUSE. */
1361 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1363 if (bp->flow_ctrl & FLOW_CTRL_RX)
1364 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1365 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1367 /* Enable/disable tx PAUSE. */
1368 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1369 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1371 if (bp->flow_ctrl & FLOW_CTRL_TX)
1372 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1373 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1375 /* Acknowledge the interrupt. */
1376 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1378 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1379 bnx2_init_all_rx_contexts(bp);
1382 static void
1383 bnx2_enable_bmsr1(struct bnx2 *bp)
1385 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1386 (CHIP_NUM(bp) == CHIP_NUM_5709))
1387 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1388 MII_BNX2_BLK_ADDR_GP_STATUS);
1391 static void
1392 bnx2_disable_bmsr1(struct bnx2 *bp)
1394 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1395 (CHIP_NUM(bp) == CHIP_NUM_5709))
1396 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1397 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1400 static int
1401 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1403 u32 up1;
1404 int ret = 1;
1406 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1407 return 0;
1409 if (bp->autoneg & AUTONEG_SPEED)
1410 bp->advertising |= ADVERTISED_2500baseX_Full;
1412 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1413 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1415 bnx2_read_phy(bp, bp->mii_up1, &up1);
1416 if (!(up1 & BCM5708S_UP1_2G5)) {
1417 up1 |= BCM5708S_UP1_2G5;
1418 bnx2_write_phy(bp, bp->mii_up1, up1);
1419 ret = 0;
1422 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1423 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1424 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1426 return ret;
1429 static int
1430 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1432 u32 up1;
1433 int ret = 0;
1435 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1436 return 0;
1438 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1439 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1441 bnx2_read_phy(bp, bp->mii_up1, &up1);
1442 if (up1 & BCM5708S_UP1_2G5) {
1443 up1 &= ~BCM5708S_UP1_2G5;
1444 bnx2_write_phy(bp, bp->mii_up1, up1);
1445 ret = 1;
1448 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1449 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1450 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1452 return ret;
1455 static void
1456 bnx2_enable_forced_2g5(struct bnx2 *bp)
1458 u32 bmcr;
1460 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1461 return;
1463 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1464 u32 val;
1466 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1467 MII_BNX2_BLK_ADDR_SERDES_DIG);
1468 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1469 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1470 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1471 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1473 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1474 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1475 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1477 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1478 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1479 bmcr |= BCM5708S_BMCR_FORCE_2500;
1480 } else {
1481 return;
1484 if (bp->autoneg & AUTONEG_SPEED) {
1485 bmcr &= ~BMCR_ANENABLE;
1486 if (bp->req_duplex == DUPLEX_FULL)
1487 bmcr |= BMCR_FULLDPLX;
1489 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1492 static void
1493 bnx2_disable_forced_2g5(struct bnx2 *bp)
1495 u32 bmcr;
1497 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1498 return;
1500 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1501 u32 val;
1503 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1504 MII_BNX2_BLK_ADDR_SERDES_DIG);
1505 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1506 val &= ~MII_BNX2_SD_MISC1_FORCE;
1507 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1509 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1510 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1511 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1513 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1514 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1515 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1516 } else {
1517 return;
1520 if (bp->autoneg & AUTONEG_SPEED)
1521 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1522 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1525 static void
1526 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1528 u32 val;
1530 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1531 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1532 if (start)
1533 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1534 else
1535 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1538 static int
1539 bnx2_set_link(struct bnx2 *bp)
1541 u32 bmsr;
1542 u8 link_up;
1544 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1545 bp->link_up = 1;
1546 return 0;
1549 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1550 return 0;
1552 link_up = bp->link_up;
1554 bnx2_enable_bmsr1(bp);
1555 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1556 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1557 bnx2_disable_bmsr1(bp);
1559 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1560 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1561 u32 val, an_dbg;
1563 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1564 bnx2_5706s_force_link_dn(bp, 0);
1565 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1567 val = REG_RD(bp, BNX2_EMAC_STATUS);
1569 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1570 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1571 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1573 if ((val & BNX2_EMAC_STATUS_LINK) &&
1574 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1575 bmsr |= BMSR_LSTATUS;
1576 else
1577 bmsr &= ~BMSR_LSTATUS;
1580 if (bmsr & BMSR_LSTATUS) {
1581 bp->link_up = 1;
1583 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1584 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1585 bnx2_5706s_linkup(bp);
1586 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1587 bnx2_5708s_linkup(bp);
1588 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1589 bnx2_5709s_linkup(bp);
1591 else {
1592 bnx2_copper_linkup(bp);
1594 bnx2_resolve_flow_ctrl(bp);
1596 else {
1597 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1598 (bp->autoneg & AUTONEG_SPEED))
1599 bnx2_disable_forced_2g5(bp);
1601 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1602 u32 bmcr;
1604 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1605 bmcr |= BMCR_ANENABLE;
1606 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1608 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1610 bp->link_up = 0;
1613 if (bp->link_up != link_up) {
1614 bnx2_report_link(bp);
1617 bnx2_set_mac_link(bp);
1619 return 0;
1622 static int
1623 bnx2_reset_phy(struct bnx2 *bp)
1625 int i;
1626 u32 reg;
1628 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1630 #define PHY_RESET_MAX_WAIT 100
1631 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1632 udelay(10);
1634 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1635 if (!(reg & BMCR_RESET)) {
1636 udelay(20);
1637 break;
1640 if (i == PHY_RESET_MAX_WAIT) {
1641 return -EBUSY;
1643 return 0;
1646 static u32
1647 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1649 u32 adv = 0;
1651 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1652 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1654 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1655 adv = ADVERTISE_1000XPAUSE;
1657 else {
1658 adv = ADVERTISE_PAUSE_CAP;
1661 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1662 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1663 adv = ADVERTISE_1000XPSE_ASYM;
1665 else {
1666 adv = ADVERTISE_PAUSE_ASYM;
1669 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1670 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1671 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1673 else {
1674 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1677 return adv;
1680 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1682 static int
1683 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1684 __releases(&bp->phy_lock)
1685 __acquires(&bp->phy_lock)
1687 u32 speed_arg = 0, pause_adv;
1689 pause_adv = bnx2_phy_get_pause_adv(bp);
1691 if (bp->autoneg & AUTONEG_SPEED) {
1692 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1693 if (bp->advertising & ADVERTISED_10baseT_Half)
1694 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1695 if (bp->advertising & ADVERTISED_10baseT_Full)
1696 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1697 if (bp->advertising & ADVERTISED_100baseT_Half)
1698 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1699 if (bp->advertising & ADVERTISED_100baseT_Full)
1700 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1701 if (bp->advertising & ADVERTISED_1000baseT_Full)
1702 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1703 if (bp->advertising & ADVERTISED_2500baseX_Full)
1704 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1705 } else {
1706 if (bp->req_line_speed == SPEED_2500)
1707 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1708 else if (bp->req_line_speed == SPEED_1000)
1709 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1710 else if (bp->req_line_speed == SPEED_100) {
1711 if (bp->req_duplex == DUPLEX_FULL)
1712 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1713 else
1714 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1715 } else if (bp->req_line_speed == SPEED_10) {
1716 if (bp->req_duplex == DUPLEX_FULL)
1717 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1718 else
1719 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1723 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1724 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1725 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1726 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1728 if (port == PORT_TP)
1729 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1730 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1732 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1734 spin_unlock_bh(&bp->phy_lock);
1735 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1736 spin_lock_bh(&bp->phy_lock);
1738 return 0;
1741 static int
1742 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1743 __releases(&bp->phy_lock)
1744 __acquires(&bp->phy_lock)
1746 u32 adv, bmcr;
1747 u32 new_adv = 0;
1749 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1750 return (bnx2_setup_remote_phy(bp, port));
1752 if (!(bp->autoneg & AUTONEG_SPEED)) {
1753 u32 new_bmcr;
1754 int force_link_down = 0;
1756 if (bp->req_line_speed == SPEED_2500) {
1757 if (!bnx2_test_and_enable_2g5(bp))
1758 force_link_down = 1;
1759 } else if (bp->req_line_speed == SPEED_1000) {
1760 if (bnx2_test_and_disable_2g5(bp))
1761 force_link_down = 1;
1763 bnx2_read_phy(bp, bp->mii_adv, &adv);
1764 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1766 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1767 new_bmcr = bmcr & ~BMCR_ANENABLE;
1768 new_bmcr |= BMCR_SPEED1000;
1770 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1771 if (bp->req_line_speed == SPEED_2500)
1772 bnx2_enable_forced_2g5(bp);
1773 else if (bp->req_line_speed == SPEED_1000) {
1774 bnx2_disable_forced_2g5(bp);
1775 new_bmcr &= ~0x2000;
1778 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1779 if (bp->req_line_speed == SPEED_2500)
1780 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1781 else
1782 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1785 if (bp->req_duplex == DUPLEX_FULL) {
1786 adv |= ADVERTISE_1000XFULL;
1787 new_bmcr |= BMCR_FULLDPLX;
1789 else {
1790 adv |= ADVERTISE_1000XHALF;
1791 new_bmcr &= ~BMCR_FULLDPLX;
1793 if ((new_bmcr != bmcr) || (force_link_down)) {
1794 /* Force a link down visible on the other side */
1795 if (bp->link_up) {
1796 bnx2_write_phy(bp, bp->mii_adv, adv &
1797 ~(ADVERTISE_1000XFULL |
1798 ADVERTISE_1000XHALF));
1799 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1800 BMCR_ANRESTART | BMCR_ANENABLE);
1802 bp->link_up = 0;
1803 netif_carrier_off(bp->dev);
1804 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1805 bnx2_report_link(bp);
1807 bnx2_write_phy(bp, bp->mii_adv, adv);
1808 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1809 } else {
1810 bnx2_resolve_flow_ctrl(bp);
1811 bnx2_set_mac_link(bp);
1813 return 0;
1816 bnx2_test_and_enable_2g5(bp);
1818 if (bp->advertising & ADVERTISED_1000baseT_Full)
1819 new_adv |= ADVERTISE_1000XFULL;
1821 new_adv |= bnx2_phy_get_pause_adv(bp);
1823 bnx2_read_phy(bp, bp->mii_adv, &adv);
1824 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1826 bp->serdes_an_pending = 0;
1827 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1828 /* Force a link down visible on the other side */
1829 if (bp->link_up) {
1830 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1831 spin_unlock_bh(&bp->phy_lock);
1832 msleep(20);
1833 spin_lock_bh(&bp->phy_lock);
1836 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1837 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1838 BMCR_ANENABLE);
1839 /* Speed up link-up time when the link partner
1840 * does not autonegotiate which is very common
1841 * in blade servers. Some blade servers use
1842 * IPMI for kerboard input and it's important
1843 * to minimize link disruptions. Autoneg. involves
1844 * exchanging base pages plus 3 next pages and
1845 * normally completes in about 120 msec.
1847 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1848 bp->serdes_an_pending = 1;
1849 mod_timer(&bp->timer, jiffies + bp->current_interval);
1850 } else {
1851 bnx2_resolve_flow_ctrl(bp);
1852 bnx2_set_mac_link(bp);
1855 return 0;
1858 #define ETHTOOL_ALL_FIBRE_SPEED \
1859 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1860 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1861 (ADVERTISED_1000baseT_Full)
1863 #define ETHTOOL_ALL_COPPER_SPEED \
1864 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1865 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1866 ADVERTISED_1000baseT_Full)
1868 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1869 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1871 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1873 static void
1874 bnx2_set_default_remote_link(struct bnx2 *bp)
1876 u32 link;
1878 if (bp->phy_port == PORT_TP)
1879 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1880 else
1881 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1883 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1884 bp->req_line_speed = 0;
1885 bp->autoneg |= AUTONEG_SPEED;
1886 bp->advertising = ADVERTISED_Autoneg;
1887 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1888 bp->advertising |= ADVERTISED_10baseT_Half;
1889 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1890 bp->advertising |= ADVERTISED_10baseT_Full;
1891 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1892 bp->advertising |= ADVERTISED_100baseT_Half;
1893 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1894 bp->advertising |= ADVERTISED_100baseT_Full;
1895 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1896 bp->advertising |= ADVERTISED_1000baseT_Full;
1897 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1898 bp->advertising |= ADVERTISED_2500baseX_Full;
1899 } else {
1900 bp->autoneg = 0;
1901 bp->advertising = 0;
1902 bp->req_duplex = DUPLEX_FULL;
1903 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1904 bp->req_line_speed = SPEED_10;
1905 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1906 bp->req_duplex = DUPLEX_HALF;
1908 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1909 bp->req_line_speed = SPEED_100;
1910 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1911 bp->req_duplex = DUPLEX_HALF;
1913 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1914 bp->req_line_speed = SPEED_1000;
1915 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1916 bp->req_line_speed = SPEED_2500;
1920 static void
1921 bnx2_set_default_link(struct bnx2 *bp)
1923 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1924 bnx2_set_default_remote_link(bp);
1925 return;
1928 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1929 bp->req_line_speed = 0;
1930 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1931 u32 reg;
1933 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1935 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1936 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1937 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1938 bp->autoneg = 0;
1939 bp->req_line_speed = bp->line_speed = SPEED_1000;
1940 bp->req_duplex = DUPLEX_FULL;
1942 } else
1943 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1946 static void
1947 bnx2_send_heart_beat(struct bnx2 *bp)
1949 u32 msg;
1950 u32 addr;
1952 spin_lock(&bp->indirect_lock);
1953 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1954 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1955 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1956 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1957 spin_unlock(&bp->indirect_lock);
1960 static void
1961 bnx2_remote_phy_event(struct bnx2 *bp)
1963 u32 msg;
1964 u8 link_up = bp->link_up;
1965 u8 old_port;
1967 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1969 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1970 bnx2_send_heart_beat(bp);
1972 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1974 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1975 bp->link_up = 0;
1976 else {
1977 u32 speed;
1979 bp->link_up = 1;
1980 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1981 bp->duplex = DUPLEX_FULL;
1982 switch (speed) {
1983 case BNX2_LINK_STATUS_10HALF:
1984 bp->duplex = DUPLEX_HALF;
1985 case BNX2_LINK_STATUS_10FULL:
1986 bp->line_speed = SPEED_10;
1987 break;
1988 case BNX2_LINK_STATUS_100HALF:
1989 bp->duplex = DUPLEX_HALF;
1990 case BNX2_LINK_STATUS_100BASE_T4:
1991 case BNX2_LINK_STATUS_100FULL:
1992 bp->line_speed = SPEED_100;
1993 break;
1994 case BNX2_LINK_STATUS_1000HALF:
1995 bp->duplex = DUPLEX_HALF;
1996 case BNX2_LINK_STATUS_1000FULL:
1997 bp->line_speed = SPEED_1000;
1998 break;
1999 case BNX2_LINK_STATUS_2500HALF:
2000 bp->duplex = DUPLEX_HALF;
2001 case BNX2_LINK_STATUS_2500FULL:
2002 bp->line_speed = SPEED_2500;
2003 break;
2004 default:
2005 bp->line_speed = 0;
2006 break;
2009 bp->flow_ctrl = 0;
2010 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2011 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2012 if (bp->duplex == DUPLEX_FULL)
2013 bp->flow_ctrl = bp->req_flow_ctrl;
2014 } else {
2015 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2016 bp->flow_ctrl |= FLOW_CTRL_TX;
2017 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2018 bp->flow_ctrl |= FLOW_CTRL_RX;
2021 old_port = bp->phy_port;
2022 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2023 bp->phy_port = PORT_FIBRE;
2024 else
2025 bp->phy_port = PORT_TP;
2027 if (old_port != bp->phy_port)
2028 bnx2_set_default_link(bp);
2031 if (bp->link_up != link_up)
2032 bnx2_report_link(bp);
2034 bnx2_set_mac_link(bp);
2037 static int
2038 bnx2_set_remote_link(struct bnx2 *bp)
2040 u32 evt_code;
2042 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2043 switch (evt_code) {
2044 case BNX2_FW_EVT_CODE_LINK_EVENT:
2045 bnx2_remote_phy_event(bp);
2046 break;
2047 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2048 default:
2049 bnx2_send_heart_beat(bp);
2050 break;
2052 return 0;
2055 static int
2056 bnx2_setup_copper_phy(struct bnx2 *bp)
2057 __releases(&bp->phy_lock)
2058 __acquires(&bp->phy_lock)
2060 u32 bmcr;
2061 u32 new_bmcr;
2063 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2065 if (bp->autoneg & AUTONEG_SPEED) {
2066 u32 adv_reg, adv1000_reg;
2067 u32 new_adv_reg = 0;
2068 u32 new_adv1000_reg = 0;
2070 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2071 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2072 ADVERTISE_PAUSE_ASYM);
2074 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2075 adv1000_reg &= PHY_ALL_1000_SPEED;
2077 if (bp->advertising & ADVERTISED_10baseT_Half)
2078 new_adv_reg |= ADVERTISE_10HALF;
2079 if (bp->advertising & ADVERTISED_10baseT_Full)
2080 new_adv_reg |= ADVERTISE_10FULL;
2081 if (bp->advertising & ADVERTISED_100baseT_Half)
2082 new_adv_reg |= ADVERTISE_100HALF;
2083 if (bp->advertising & ADVERTISED_100baseT_Full)
2084 new_adv_reg |= ADVERTISE_100FULL;
2085 if (bp->advertising & ADVERTISED_1000baseT_Full)
2086 new_adv1000_reg |= ADVERTISE_1000FULL;
2088 new_adv_reg |= ADVERTISE_CSMA;
2090 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2092 if ((adv1000_reg != new_adv1000_reg) ||
2093 (adv_reg != new_adv_reg) ||
2094 ((bmcr & BMCR_ANENABLE) == 0)) {
2096 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2097 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2098 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2099 BMCR_ANENABLE);
2101 else if (bp->link_up) {
2102 /* Flow ctrl may have changed from auto to forced */
2103 /* or vice-versa. */
2105 bnx2_resolve_flow_ctrl(bp);
2106 bnx2_set_mac_link(bp);
2108 return 0;
2111 new_bmcr = 0;
2112 if (bp->req_line_speed == SPEED_100) {
2113 new_bmcr |= BMCR_SPEED100;
2115 if (bp->req_duplex == DUPLEX_FULL) {
2116 new_bmcr |= BMCR_FULLDPLX;
2118 if (new_bmcr != bmcr) {
2119 u32 bmsr;
2121 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2122 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2124 if (bmsr & BMSR_LSTATUS) {
2125 /* Force link down */
2126 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2127 spin_unlock_bh(&bp->phy_lock);
2128 msleep(50);
2129 spin_lock_bh(&bp->phy_lock);
2131 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2132 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2135 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2137 /* Normally, the new speed is setup after the link has
2138 * gone down and up again. In some cases, link will not go
2139 * down so we need to set up the new speed here.
2141 if (bmsr & BMSR_LSTATUS) {
2142 bp->line_speed = bp->req_line_speed;
2143 bp->duplex = bp->req_duplex;
2144 bnx2_resolve_flow_ctrl(bp);
2145 bnx2_set_mac_link(bp);
2147 } else {
2148 bnx2_resolve_flow_ctrl(bp);
2149 bnx2_set_mac_link(bp);
2151 return 0;
2154 static int
2155 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2156 __releases(&bp->phy_lock)
2157 __acquires(&bp->phy_lock)
2159 if (bp->loopback == MAC_LOOPBACK)
2160 return 0;
2162 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2163 return (bnx2_setup_serdes_phy(bp, port));
2165 else {
2166 return (bnx2_setup_copper_phy(bp));
2170 static int
2171 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2173 u32 val;
2175 bp->mii_bmcr = MII_BMCR + 0x10;
2176 bp->mii_bmsr = MII_BMSR + 0x10;
2177 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2178 bp->mii_adv = MII_ADVERTISE + 0x10;
2179 bp->mii_lpa = MII_LPA + 0x10;
2180 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2182 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2183 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2185 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2186 if (reset_phy)
2187 bnx2_reset_phy(bp);
2189 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2191 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2192 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2193 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2194 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2196 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2197 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2198 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2199 val |= BCM5708S_UP1_2G5;
2200 else
2201 val &= ~BCM5708S_UP1_2G5;
2202 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2204 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2205 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2206 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2207 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2209 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2211 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2212 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2213 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2215 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2217 return 0;
2220 static int
2221 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2223 u32 val;
2225 if (reset_phy)
2226 bnx2_reset_phy(bp);
2228 bp->mii_up1 = BCM5708S_UP1;
2230 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2231 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2232 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2234 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2235 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2236 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2238 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2239 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2240 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2242 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2243 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2244 val |= BCM5708S_UP1_2G5;
2245 bnx2_write_phy(bp, BCM5708S_UP1, val);
2248 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2249 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2250 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2251 /* increase tx signal amplitude */
2252 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2253 BCM5708S_BLK_ADDR_TX_MISC);
2254 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2255 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2256 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2257 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2260 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2261 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2263 if (val) {
2264 u32 is_backplane;
2266 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2267 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2268 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2269 BCM5708S_BLK_ADDR_TX_MISC);
2270 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2271 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2272 BCM5708S_BLK_ADDR_DIG);
2275 return 0;
2278 static int
2279 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2281 if (reset_phy)
2282 bnx2_reset_phy(bp);
2284 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2286 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2287 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2289 if (bp->dev->mtu > 1500) {
2290 u32 val;
2292 /* Set extended packet length bit */
2293 bnx2_write_phy(bp, 0x18, 0x7);
2294 bnx2_read_phy(bp, 0x18, &val);
2295 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2297 bnx2_write_phy(bp, 0x1c, 0x6c00);
2298 bnx2_read_phy(bp, 0x1c, &val);
2299 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2301 else {
2302 u32 val;
2304 bnx2_write_phy(bp, 0x18, 0x7);
2305 bnx2_read_phy(bp, 0x18, &val);
2306 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2308 bnx2_write_phy(bp, 0x1c, 0x6c00);
2309 bnx2_read_phy(bp, 0x1c, &val);
2310 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2313 return 0;
2316 static int
2317 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2319 u32 val;
2321 if (reset_phy)
2322 bnx2_reset_phy(bp);
2324 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2325 bnx2_write_phy(bp, 0x18, 0x0c00);
2326 bnx2_write_phy(bp, 0x17, 0x000a);
2327 bnx2_write_phy(bp, 0x15, 0x310b);
2328 bnx2_write_phy(bp, 0x17, 0x201f);
2329 bnx2_write_phy(bp, 0x15, 0x9506);
2330 bnx2_write_phy(bp, 0x17, 0x401f);
2331 bnx2_write_phy(bp, 0x15, 0x14e2);
2332 bnx2_write_phy(bp, 0x18, 0x0400);
2335 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2336 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2337 MII_BNX2_DSP_EXPAND_REG | 0x8);
2338 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2339 val &= ~(1 << 8);
2340 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2343 if (bp->dev->mtu > 1500) {
2344 /* Set extended packet length bit */
2345 bnx2_write_phy(bp, 0x18, 0x7);
2346 bnx2_read_phy(bp, 0x18, &val);
2347 bnx2_write_phy(bp, 0x18, val | 0x4000);
2349 bnx2_read_phy(bp, 0x10, &val);
2350 bnx2_write_phy(bp, 0x10, val | 0x1);
2352 else {
2353 bnx2_write_phy(bp, 0x18, 0x7);
2354 bnx2_read_phy(bp, 0x18, &val);
2355 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2357 bnx2_read_phy(bp, 0x10, &val);
2358 bnx2_write_phy(bp, 0x10, val & ~0x1);
2361 /* ethernet@wirespeed */
2362 bnx2_write_phy(bp, 0x18, 0x7007);
2363 bnx2_read_phy(bp, 0x18, &val);
2364 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2365 return 0;
2369 static int
2370 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2371 __releases(&bp->phy_lock)
2372 __acquires(&bp->phy_lock)
2374 u32 val;
2375 int rc = 0;
2377 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2378 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2380 bp->mii_bmcr = MII_BMCR;
2381 bp->mii_bmsr = MII_BMSR;
2382 bp->mii_bmsr1 = MII_BMSR;
2383 bp->mii_adv = MII_ADVERTISE;
2384 bp->mii_lpa = MII_LPA;
2386 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2388 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2389 goto setup_phy;
2391 bnx2_read_phy(bp, MII_PHYSID1, &val);
2392 bp->phy_id = val << 16;
2393 bnx2_read_phy(bp, MII_PHYSID2, &val);
2394 bp->phy_id |= val & 0xffff;
2396 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2397 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2398 rc = bnx2_init_5706s_phy(bp, reset_phy);
2399 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2400 rc = bnx2_init_5708s_phy(bp, reset_phy);
2401 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2402 rc = bnx2_init_5709s_phy(bp, reset_phy);
2404 else {
2405 rc = bnx2_init_copper_phy(bp, reset_phy);
2408 setup_phy:
2409 if (!rc)
2410 rc = bnx2_setup_phy(bp, bp->phy_port);
2412 return rc;
2415 static int
2416 bnx2_set_mac_loopback(struct bnx2 *bp)
2418 u32 mac_mode;
2420 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2421 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2422 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2423 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2424 bp->link_up = 1;
2425 return 0;
2428 static int bnx2_test_link(struct bnx2 *);
2430 static int
2431 bnx2_set_phy_loopback(struct bnx2 *bp)
2433 u32 mac_mode;
2434 int rc, i;
2436 spin_lock_bh(&bp->phy_lock);
2437 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2438 BMCR_SPEED1000);
2439 spin_unlock_bh(&bp->phy_lock);
2440 if (rc)
2441 return rc;
2443 for (i = 0; i < 10; i++) {
2444 if (bnx2_test_link(bp) == 0)
2445 break;
2446 msleep(100);
2449 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2450 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2451 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2452 BNX2_EMAC_MODE_25G_MODE);
2454 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2455 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2456 bp->link_up = 1;
2457 return 0;
2460 static int
2461 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2463 int i;
2464 u32 val;
2466 bp->fw_wr_seq++;
2467 msg_data |= bp->fw_wr_seq;
2469 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2471 if (!ack)
2472 return 0;
2474 /* wait for an acknowledgement. */
2475 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2476 msleep(10);
2478 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2480 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2481 break;
2483 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2484 return 0;
2486 /* If we timed out, inform the firmware that this is the case. */
2487 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2488 if (!silent)
2489 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2490 "%x\n", msg_data);
2492 msg_data &= ~BNX2_DRV_MSG_CODE;
2493 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2495 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2497 return -EBUSY;
2500 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2501 return -EIO;
2503 return 0;
2506 static int
2507 bnx2_init_5709_context(struct bnx2 *bp)
2509 int i, ret = 0;
2510 u32 val;
2512 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2513 val |= (BCM_PAGE_BITS - 8) << 16;
2514 REG_WR(bp, BNX2_CTX_COMMAND, val);
2515 for (i = 0; i < 10; i++) {
2516 val = REG_RD(bp, BNX2_CTX_COMMAND);
2517 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2518 break;
2519 udelay(2);
2521 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2522 return -EBUSY;
2524 for (i = 0; i < bp->ctx_pages; i++) {
2525 int j;
2527 if (bp->ctx_blk[i])
2528 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2529 else
2530 return -ENOMEM;
2532 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2533 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2534 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2535 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2536 (u64) bp->ctx_blk_mapping[i] >> 32);
2537 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2538 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2539 for (j = 0; j < 10; j++) {
2541 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2542 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2543 break;
2544 udelay(5);
2546 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2547 ret = -EBUSY;
2548 break;
2551 return ret;
2554 static void
2555 bnx2_init_context(struct bnx2 *bp)
2557 u32 vcid;
2559 vcid = 96;
2560 while (vcid) {
2561 u32 vcid_addr, pcid_addr, offset;
2562 int i;
2564 vcid--;
2566 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2567 u32 new_vcid;
2569 vcid_addr = GET_PCID_ADDR(vcid);
2570 if (vcid & 0x8) {
2571 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2573 else {
2574 new_vcid = vcid;
2576 pcid_addr = GET_PCID_ADDR(new_vcid);
2578 else {
2579 vcid_addr = GET_CID_ADDR(vcid);
2580 pcid_addr = vcid_addr;
2583 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2584 vcid_addr += (i << PHY_CTX_SHIFT);
2585 pcid_addr += (i << PHY_CTX_SHIFT);
2587 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2588 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2590 /* Zero out the context. */
2591 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2592 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2597 static int
2598 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2600 u16 *good_mbuf;
2601 u32 good_mbuf_cnt;
2602 u32 val;
2604 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2605 if (good_mbuf == NULL) {
2606 printk(KERN_ERR PFX "Failed to allocate memory in "
2607 "bnx2_alloc_bad_rbuf\n");
2608 return -ENOMEM;
2611 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2612 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2614 good_mbuf_cnt = 0;
2616 /* Allocate a bunch of mbufs and save the good ones in an array. */
2617 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2618 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2619 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2620 BNX2_RBUF_COMMAND_ALLOC_REQ);
2622 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2624 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2626 /* The addresses with Bit 9 set are bad memory blocks. */
2627 if (!(val & (1 << 9))) {
2628 good_mbuf[good_mbuf_cnt] = (u16) val;
2629 good_mbuf_cnt++;
2632 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2635 /* Free the good ones back to the mbuf pool thus discarding
2636 * all the bad ones. */
2637 while (good_mbuf_cnt) {
2638 good_mbuf_cnt--;
2640 val = good_mbuf[good_mbuf_cnt];
2641 val = (val << 9) | val | 1;
2643 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2645 kfree(good_mbuf);
2646 return 0;
2649 static void
2650 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2652 u32 val;
2654 val = (mac_addr[0] << 8) | mac_addr[1];
2656 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2658 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2659 (mac_addr[4] << 8) | mac_addr[5];
2661 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2664 static inline int
2665 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2667 dma_addr_t mapping;
2668 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2669 struct rx_bd *rxbd =
2670 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2671 struct page *page = alloc_page(GFP_ATOMIC);
2673 if (!page)
2674 return -ENOMEM;
2675 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2676 PCI_DMA_FROMDEVICE);
2677 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2678 __free_page(page);
2679 return -EIO;
2682 rx_pg->page = page;
2683 pci_unmap_addr_set(rx_pg, mapping, mapping);
2684 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2685 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2686 return 0;
2689 static void
2690 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2692 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2693 struct page *page = rx_pg->page;
2695 if (!page)
2696 return;
2698 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2699 PCI_DMA_FROMDEVICE);
2701 __free_page(page);
2702 rx_pg->page = NULL;
2705 static inline int
2706 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2708 struct sk_buff *skb;
2709 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2710 dma_addr_t mapping;
2711 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2712 unsigned long align;
2714 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2715 if (skb == NULL) {
2716 return -ENOMEM;
2719 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2720 skb_reserve(skb, BNX2_RX_ALIGN - align);
2722 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2723 PCI_DMA_FROMDEVICE);
2724 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2725 dev_kfree_skb(skb);
2726 return -EIO;
2729 rx_buf->skb = skb;
2730 pci_unmap_addr_set(rx_buf, mapping, mapping);
2732 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2733 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2735 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2737 return 0;
2740 static int
2741 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2743 struct status_block *sblk = bnapi->status_blk.msi;
2744 u32 new_link_state, old_link_state;
2745 int is_set = 1;
2747 new_link_state = sblk->status_attn_bits & event;
2748 old_link_state = sblk->status_attn_bits_ack & event;
2749 if (new_link_state != old_link_state) {
2750 if (new_link_state)
2751 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2752 else
2753 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2754 } else
2755 is_set = 0;
2757 return is_set;
2760 static void
2761 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2763 spin_lock(&bp->phy_lock);
2765 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2766 bnx2_set_link(bp);
2767 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2768 bnx2_set_remote_link(bp);
2770 spin_unlock(&bp->phy_lock);
2774 static inline u16
2775 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2777 u16 cons;
2779 /* Tell compiler that status block fields can change. */
2780 barrier();
2781 cons = *bnapi->hw_tx_cons_ptr;
2782 barrier();
2783 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2784 cons++;
2785 return cons;
2788 static int
2789 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2791 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2792 u16 hw_cons, sw_cons, sw_ring_cons;
2793 int tx_pkt = 0, index;
2794 struct netdev_queue *txq;
2796 index = (bnapi - bp->bnx2_napi);
2797 txq = netdev_get_tx_queue(bp->dev, index);
2799 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2800 sw_cons = txr->tx_cons;
2802 while (sw_cons != hw_cons) {
2803 struct sw_tx_bd *tx_buf;
2804 struct sk_buff *skb;
2805 int i, last;
2807 sw_ring_cons = TX_RING_IDX(sw_cons);
2809 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2810 skb = tx_buf->skb;
2812 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2813 prefetch(&skb->end);
2815 /* partial BD completions possible with TSO packets */
2816 if (tx_buf->is_gso) {
2817 u16 last_idx, last_ring_idx;
2819 last_idx = sw_cons + tx_buf->nr_frags + 1;
2820 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2821 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2822 last_idx++;
2824 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2825 break;
2829 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2830 skb_headlen(skb), PCI_DMA_TODEVICE);
2832 tx_buf->skb = NULL;
2833 last = tx_buf->nr_frags;
2835 for (i = 0; i < last; i++) {
2836 sw_cons = NEXT_TX_BD(sw_cons);
2838 pci_unmap_page(bp->pdev,
2839 pci_unmap_addr(
2840 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2841 mapping),
2842 skb_shinfo(skb)->frags[i].size,
2843 PCI_DMA_TODEVICE);
2846 sw_cons = NEXT_TX_BD(sw_cons);
2848 dev_kfree_skb(skb);
2849 tx_pkt++;
2850 if (tx_pkt == budget)
2851 break;
2853 if (hw_cons == sw_cons)
2854 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2857 txr->hw_tx_cons = hw_cons;
2858 txr->tx_cons = sw_cons;
2860 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2861 * before checking for netif_tx_queue_stopped(). Without the
2862 * memory barrier, there is a small possibility that bnx2_start_xmit()
2863 * will miss it and cause the queue to be stopped forever.
2865 smp_mb();
2867 if (unlikely(netif_tx_queue_stopped(txq)) &&
2868 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2869 __netif_tx_lock(txq, smp_processor_id());
2870 if ((netif_tx_queue_stopped(txq)) &&
2871 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2872 netif_tx_wake_queue(txq);
2873 __netif_tx_unlock(txq);
2876 return tx_pkt;
2879 static void
2880 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2881 struct sk_buff *skb, int count)
2883 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2884 struct rx_bd *cons_bd, *prod_bd;
2885 int i;
2886 u16 hw_prod, prod;
2887 u16 cons = rxr->rx_pg_cons;
2889 cons_rx_pg = &rxr->rx_pg_ring[cons];
2891 /* The caller was unable to allocate a new page to replace the
2892 * last one in the frags array, so we need to recycle that page
2893 * and then free the skb.
2895 if (skb) {
2896 struct page *page;
2897 struct skb_shared_info *shinfo;
2899 shinfo = skb_shinfo(skb);
2900 shinfo->nr_frags--;
2901 page = shinfo->frags[shinfo->nr_frags].page;
2902 shinfo->frags[shinfo->nr_frags].page = NULL;
2904 cons_rx_pg->page = page;
2905 dev_kfree_skb(skb);
2908 hw_prod = rxr->rx_pg_prod;
2910 for (i = 0; i < count; i++) {
2911 prod = RX_PG_RING_IDX(hw_prod);
2913 prod_rx_pg = &rxr->rx_pg_ring[prod];
2914 cons_rx_pg = &rxr->rx_pg_ring[cons];
2915 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2916 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2918 if (prod != cons) {
2919 prod_rx_pg->page = cons_rx_pg->page;
2920 cons_rx_pg->page = NULL;
2921 pci_unmap_addr_set(prod_rx_pg, mapping,
2922 pci_unmap_addr(cons_rx_pg, mapping));
2924 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2925 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2928 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2929 hw_prod = NEXT_RX_BD(hw_prod);
2931 rxr->rx_pg_prod = hw_prod;
2932 rxr->rx_pg_cons = cons;
2935 static inline void
2936 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2937 struct sk_buff *skb, u16 cons, u16 prod)
2939 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2940 struct rx_bd *cons_bd, *prod_bd;
2942 cons_rx_buf = &rxr->rx_buf_ring[cons];
2943 prod_rx_buf = &rxr->rx_buf_ring[prod];
2945 pci_dma_sync_single_for_device(bp->pdev,
2946 pci_unmap_addr(cons_rx_buf, mapping),
2947 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2949 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2951 prod_rx_buf->skb = skb;
2953 if (cons == prod)
2954 return;
2956 pci_unmap_addr_set(prod_rx_buf, mapping,
2957 pci_unmap_addr(cons_rx_buf, mapping));
2959 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2960 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2961 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2962 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2965 static int
2966 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2967 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2968 u32 ring_idx)
2970 int err;
2971 u16 prod = ring_idx & 0xffff;
2973 err = bnx2_alloc_rx_skb(bp, rxr, prod);
2974 if (unlikely(err)) {
2975 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2976 if (hdr_len) {
2977 unsigned int raw_len = len + 4;
2978 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2980 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2982 return err;
2985 skb_reserve(skb, BNX2_RX_OFFSET);
2986 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2987 PCI_DMA_FROMDEVICE);
2989 if (hdr_len == 0) {
2990 skb_put(skb, len);
2991 return 0;
2992 } else {
2993 unsigned int i, frag_len, frag_size, pages;
2994 struct sw_pg *rx_pg;
2995 u16 pg_cons = rxr->rx_pg_cons;
2996 u16 pg_prod = rxr->rx_pg_prod;
2998 frag_size = len + 4 - hdr_len;
2999 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3000 skb_put(skb, hdr_len);
3002 for (i = 0; i < pages; i++) {
3003 dma_addr_t mapping_old;
3005 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3006 if (unlikely(frag_len <= 4)) {
3007 unsigned int tail = 4 - frag_len;
3009 rxr->rx_pg_cons = pg_cons;
3010 rxr->rx_pg_prod = pg_prod;
3011 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3012 pages - i);
3013 skb->len -= tail;
3014 if (i == 0) {
3015 skb->tail -= tail;
3016 } else {
3017 skb_frag_t *frag =
3018 &skb_shinfo(skb)->frags[i - 1];
3019 frag->size -= tail;
3020 skb->data_len -= tail;
3021 skb->truesize -= tail;
3023 return 0;
3025 rx_pg = &rxr->rx_pg_ring[pg_cons];
3027 /* Don't unmap yet. If we're unable to allocate a new
3028 * page, we need to recycle the page and the DMA addr.
3030 mapping_old = pci_unmap_addr(rx_pg, mapping);
3031 if (i == pages - 1)
3032 frag_len -= 4;
3034 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3035 rx_pg->page = NULL;
3037 err = bnx2_alloc_rx_page(bp, rxr,
3038 RX_PG_RING_IDX(pg_prod));
3039 if (unlikely(err)) {
3040 rxr->rx_pg_cons = pg_cons;
3041 rxr->rx_pg_prod = pg_prod;
3042 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3043 pages - i);
3044 return err;
3047 pci_unmap_page(bp->pdev, mapping_old,
3048 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3050 frag_size -= frag_len;
3051 skb->data_len += frag_len;
3052 skb->truesize += frag_len;
3053 skb->len += frag_len;
3055 pg_prod = NEXT_RX_BD(pg_prod);
3056 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3058 rxr->rx_pg_prod = pg_prod;
3059 rxr->rx_pg_cons = pg_cons;
3061 return 0;
3064 static inline u16
3065 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3067 u16 cons;
3069 /* Tell compiler that status block fields can change. */
3070 barrier();
3071 cons = *bnapi->hw_rx_cons_ptr;
3072 barrier();
3073 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3074 cons++;
3075 return cons;
3078 static int
3079 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3081 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3082 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3083 struct l2_fhdr *rx_hdr;
3084 int rx_pkt = 0, pg_ring_used = 0;
3086 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3087 sw_cons = rxr->rx_cons;
3088 sw_prod = rxr->rx_prod;
3090 /* Memory barrier necessary as speculative reads of the rx
3091 * buffer can be ahead of the index in the status block
3093 rmb();
3094 while (sw_cons != hw_cons) {
3095 unsigned int len, hdr_len;
3096 u32 status;
3097 struct sw_bd *rx_buf;
3098 struct sk_buff *skb;
3099 dma_addr_t dma_addr;
3100 u16 vtag = 0;
3101 int hw_vlan __maybe_unused = 0;
3103 sw_ring_cons = RX_RING_IDX(sw_cons);
3104 sw_ring_prod = RX_RING_IDX(sw_prod);
3106 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3107 skb = rx_buf->skb;
3109 rx_buf->skb = NULL;
3111 dma_addr = pci_unmap_addr(rx_buf, mapping);
3113 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3114 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3115 PCI_DMA_FROMDEVICE);
3117 rx_hdr = (struct l2_fhdr *) skb->data;
3118 len = rx_hdr->l2_fhdr_pkt_len;
3119 status = rx_hdr->l2_fhdr_status;
3121 hdr_len = 0;
3122 if (status & L2_FHDR_STATUS_SPLIT) {
3123 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3124 pg_ring_used = 1;
3125 } else if (len > bp->rx_jumbo_thresh) {
3126 hdr_len = bp->rx_jumbo_thresh;
3127 pg_ring_used = 1;
3130 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3131 L2_FHDR_ERRORS_PHY_DECODE |
3132 L2_FHDR_ERRORS_ALIGNMENT |
3133 L2_FHDR_ERRORS_TOO_SHORT |
3134 L2_FHDR_ERRORS_GIANT_FRAME))) {
3136 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3137 sw_ring_prod);
3138 if (pg_ring_used) {
3139 int pages;
3141 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3143 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3145 goto next_rx;
3148 len -= 4;
3150 if (len <= bp->rx_copy_thresh) {
3151 struct sk_buff *new_skb;
3153 new_skb = netdev_alloc_skb(bp->dev, len + 6);
3154 if (new_skb == NULL) {
3155 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3156 sw_ring_prod);
3157 goto next_rx;
3160 /* aligned copy */
3161 skb_copy_from_linear_data_offset(skb,
3162 BNX2_RX_OFFSET - 6,
3163 new_skb->data, len + 6);
3164 skb_reserve(new_skb, 6);
3165 skb_put(new_skb, len);
3167 bnx2_reuse_rx_skb(bp, rxr, skb,
3168 sw_ring_cons, sw_ring_prod);
3170 skb = new_skb;
3171 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3172 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3173 goto next_rx;
3175 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3176 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3177 vtag = rx_hdr->l2_fhdr_vlan_tag;
3178 #ifdef BCM_VLAN
3179 if (bp->vlgrp)
3180 hw_vlan = 1;
3181 else
3182 #endif
3184 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3185 __skb_push(skb, 4);
3187 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3188 ve->h_vlan_proto = htons(ETH_P_8021Q);
3189 ve->h_vlan_TCI = htons(vtag);
3190 len += 4;
3194 skb->protocol = eth_type_trans(skb, bp->dev);
3196 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3197 (ntohs(skb->protocol) != 0x8100)) {
3199 dev_kfree_skb(skb);
3200 goto next_rx;
3204 skb->ip_summed = CHECKSUM_NONE;
3205 if (bp->rx_csum &&
3206 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3207 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3209 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3210 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3211 skb->ip_summed = CHECKSUM_UNNECESSARY;
3214 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3216 #ifdef BCM_VLAN
3217 if (hw_vlan)
3218 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3219 else
3220 #endif
3221 netif_receive_skb(skb);
3223 rx_pkt++;
3225 next_rx:
3226 sw_cons = NEXT_RX_BD(sw_cons);
3227 sw_prod = NEXT_RX_BD(sw_prod);
3229 if ((rx_pkt == budget))
3230 break;
3232 /* Refresh hw_cons to see if there is new work */
3233 if (sw_cons == hw_cons) {
3234 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3235 rmb();
3238 rxr->rx_cons = sw_cons;
3239 rxr->rx_prod = sw_prod;
3241 if (pg_ring_used)
3242 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3244 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3246 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3248 mmiowb();
3250 return rx_pkt;
3254 /* MSI ISR - The only difference between this and the INTx ISR
3255 * is that the MSI interrupt is always serviced.
3257 static irqreturn_t
3258 bnx2_msi(int irq, void *dev_instance)
3260 struct bnx2_napi *bnapi = dev_instance;
3261 struct bnx2 *bp = bnapi->bp;
3263 prefetch(bnapi->status_blk.msi);
3264 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3265 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3266 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3268 /* Return here if interrupt is disabled. */
3269 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3270 return IRQ_HANDLED;
3272 napi_schedule(&bnapi->napi);
3274 return IRQ_HANDLED;
3277 static irqreturn_t
3278 bnx2_msi_1shot(int irq, void *dev_instance)
3280 struct bnx2_napi *bnapi = dev_instance;
3281 struct bnx2 *bp = bnapi->bp;
3283 prefetch(bnapi->status_blk.msi);
3285 /* Return here if interrupt is disabled. */
3286 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3287 return IRQ_HANDLED;
3289 napi_schedule(&bnapi->napi);
3291 return IRQ_HANDLED;
3294 static irqreturn_t
3295 bnx2_interrupt(int irq, void *dev_instance)
3297 struct bnx2_napi *bnapi = dev_instance;
3298 struct bnx2 *bp = bnapi->bp;
3299 struct status_block *sblk = bnapi->status_blk.msi;
3301 /* When using INTx, it is possible for the interrupt to arrive
3302 * at the CPU before the status block posted prior to the
3303 * interrupt. Reading a register will flush the status block.
3304 * When using MSI, the MSI message will always complete after
3305 * the status block write.
3307 if ((sblk->status_idx == bnapi->last_status_idx) &&
3308 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3309 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3310 return IRQ_NONE;
3312 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3313 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3314 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3316 /* Read back to deassert IRQ immediately to avoid too many
3317 * spurious interrupts.
3319 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3321 /* Return here if interrupt is shared and is disabled. */
3322 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3323 return IRQ_HANDLED;
3325 if (napi_schedule_prep(&bnapi->napi)) {
3326 bnapi->last_status_idx = sblk->status_idx;
3327 __napi_schedule(&bnapi->napi);
3330 return IRQ_HANDLED;
3333 static inline int
3334 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3336 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3337 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3339 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3340 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3341 return 1;
3342 return 0;
3345 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3346 STATUS_ATTN_BITS_TIMER_ABORT)
3348 static inline int
3349 bnx2_has_work(struct bnx2_napi *bnapi)
3351 struct status_block *sblk = bnapi->status_blk.msi;
3353 if (bnx2_has_fast_work(bnapi))
3354 return 1;
3356 #ifdef BCM_CNIC
3357 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3358 return 1;
3359 #endif
3361 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3362 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3363 return 1;
3365 return 0;
3368 static void
3369 bnx2_chk_missed_msi(struct bnx2 *bp)
3371 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3372 u32 msi_ctrl;
3374 if (bnx2_has_work(bnapi)) {
3375 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3376 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3377 return;
3379 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3380 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3381 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3382 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3383 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3387 bp->idle_chk_status_idx = bnapi->last_status_idx;
3390 #ifdef BCM_CNIC
3391 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3393 struct cnic_ops *c_ops;
3395 if (!bnapi->cnic_present)
3396 return;
3398 rcu_read_lock();
3399 c_ops = rcu_dereference(bp->cnic_ops);
3400 if (c_ops)
3401 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3402 bnapi->status_blk.msi);
3403 rcu_read_unlock();
3405 #endif
3407 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3409 struct status_block *sblk = bnapi->status_blk.msi;
3410 u32 status_attn_bits = sblk->status_attn_bits;
3411 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3413 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3414 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3416 bnx2_phy_int(bp, bnapi);
3418 /* This is needed to take care of transient status
3419 * during link changes.
3421 REG_WR(bp, BNX2_HC_COMMAND,
3422 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3423 REG_RD(bp, BNX2_HC_COMMAND);
3427 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3428 int work_done, int budget)
3430 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3431 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3433 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3434 bnx2_tx_int(bp, bnapi, 0);
3436 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3437 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3439 return work_done;
3442 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3444 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3445 struct bnx2 *bp = bnapi->bp;
3446 int work_done = 0;
3447 struct status_block_msix *sblk = bnapi->status_blk.msix;
3449 while (1) {
3450 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3451 if (unlikely(work_done >= budget))
3452 break;
3454 bnapi->last_status_idx = sblk->status_idx;
3455 /* status idx must be read before checking for more work. */
3456 rmb();
3457 if (likely(!bnx2_has_fast_work(bnapi))) {
3459 napi_complete(napi);
3460 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3461 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3462 bnapi->last_status_idx);
3463 break;
3466 return work_done;
3469 static int bnx2_poll(struct napi_struct *napi, int budget)
3471 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3472 struct bnx2 *bp = bnapi->bp;
3473 int work_done = 0;
3474 struct status_block *sblk = bnapi->status_blk.msi;
3476 while (1) {
3477 bnx2_poll_link(bp, bnapi);
3479 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3481 #ifdef BCM_CNIC
3482 bnx2_poll_cnic(bp, bnapi);
3483 #endif
3485 /* bnapi->last_status_idx is used below to tell the hw how
3486 * much work has been processed, so we must read it before
3487 * checking for more work.
3489 bnapi->last_status_idx = sblk->status_idx;
3491 if (unlikely(work_done >= budget))
3492 break;
3494 rmb();
3495 if (likely(!bnx2_has_work(bnapi))) {
3496 napi_complete(napi);
3497 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3498 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3499 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3500 bnapi->last_status_idx);
3501 break;
3503 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3504 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3505 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3506 bnapi->last_status_idx);
3508 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3509 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3510 bnapi->last_status_idx);
3511 break;
3515 return work_done;
3518 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3519 * from set_multicast.
3521 static void
3522 bnx2_set_rx_mode(struct net_device *dev)
3524 struct bnx2 *bp = netdev_priv(dev);
3525 u32 rx_mode, sort_mode;
3526 struct netdev_hw_addr *ha;
3527 int i;
3529 if (!netif_running(dev))
3530 return;
3532 spin_lock_bh(&bp->phy_lock);
3534 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3535 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3536 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3537 #ifdef BCM_VLAN
3538 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3539 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3540 #else
3541 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3542 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3543 #endif
3544 if (dev->flags & IFF_PROMISC) {
3545 /* Promiscuous mode. */
3546 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3547 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3548 BNX2_RPM_SORT_USER0_PROM_VLAN;
3550 else if (dev->flags & IFF_ALLMULTI) {
3551 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3552 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3553 0xffffffff);
3555 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3557 else {
3558 /* Accept one or more multicast(s). */
3559 struct dev_mc_list *mclist;
3560 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3561 u32 regidx;
3562 u32 bit;
3563 u32 crc;
3565 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3567 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3568 i++, mclist = mclist->next) {
3570 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3571 bit = crc & 0xff;
3572 regidx = (bit & 0xe0) >> 5;
3573 bit &= 0x1f;
3574 mc_filter[regidx] |= (1 << bit);
3577 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3578 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3579 mc_filter[i]);
3582 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3585 if (dev->uc.count > BNX2_MAX_UNICAST_ADDRESSES) {
3586 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3587 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3588 BNX2_RPM_SORT_USER0_PROM_VLAN;
3589 } else if (!(dev->flags & IFF_PROMISC)) {
3590 /* Add all entries into to the match filter list */
3591 i = 0;
3592 list_for_each_entry(ha, &dev->uc.list, list) {
3593 bnx2_set_mac_addr(bp, ha->addr,
3594 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3595 sort_mode |= (1 <<
3596 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3597 i++;
3602 if (rx_mode != bp->rx_mode) {
3603 bp->rx_mode = rx_mode;
3604 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3607 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3608 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3609 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3611 spin_unlock_bh(&bp->phy_lock);
3614 static int __devinit
3615 check_fw_section(const struct firmware *fw,
3616 const struct bnx2_fw_file_section *section,
3617 u32 alignment, bool non_empty)
3619 u32 offset = be32_to_cpu(section->offset);
3620 u32 len = be32_to_cpu(section->len);
3622 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3623 return -EINVAL;
3624 if ((non_empty && len == 0) || len > fw->size - offset ||
3625 len & (alignment - 1))
3626 return -EINVAL;
3627 return 0;
3630 static int __devinit
3631 check_mips_fw_entry(const struct firmware *fw,
3632 const struct bnx2_mips_fw_file_entry *entry)
3634 if (check_fw_section(fw, &entry->text, 4, true) ||
3635 check_fw_section(fw, &entry->data, 4, false) ||
3636 check_fw_section(fw, &entry->rodata, 4, false))
3637 return -EINVAL;
3638 return 0;
3641 static int __devinit
3642 bnx2_request_firmware(struct bnx2 *bp)
3644 const char *mips_fw_file, *rv2p_fw_file;
3645 const struct bnx2_mips_fw_file *mips_fw;
3646 const struct bnx2_rv2p_fw_file *rv2p_fw;
3647 int rc;
3649 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3650 mips_fw_file = FW_MIPS_FILE_09;
3651 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3652 (CHIP_ID(bp) == CHIP_ID_5709_A1))
3653 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3654 else
3655 rv2p_fw_file = FW_RV2P_FILE_09;
3656 } else {
3657 mips_fw_file = FW_MIPS_FILE_06;
3658 rv2p_fw_file = FW_RV2P_FILE_06;
3661 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3662 if (rc) {
3663 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3664 mips_fw_file);
3665 return rc;
3668 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3669 if (rc) {
3670 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3671 rv2p_fw_file);
3672 return rc;
3674 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3675 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3676 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3677 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3678 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3679 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3680 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3681 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3682 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3683 mips_fw_file);
3684 return -EINVAL;
3686 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3687 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3688 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3689 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3690 rv2p_fw_file);
3691 return -EINVAL;
3694 return 0;
3697 static u32
3698 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3700 switch (idx) {
3701 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3702 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3703 rv2p_code |= RV2P_BD_PAGE_SIZE;
3704 break;
3706 return rv2p_code;
3709 static int
3710 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3711 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3713 u32 rv2p_code_len, file_offset;
3714 __be32 *rv2p_code;
3715 int i;
3716 u32 val, cmd, addr;
3718 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3719 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3721 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3723 if (rv2p_proc == RV2P_PROC1) {
3724 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3725 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3726 } else {
3727 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3728 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3731 for (i = 0; i < rv2p_code_len; i += 8) {
3732 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3733 rv2p_code++;
3734 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3735 rv2p_code++;
3737 val = (i / 8) | cmd;
3738 REG_WR(bp, addr, val);
3741 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3742 for (i = 0; i < 8; i++) {
3743 u32 loc, code;
3745 loc = be32_to_cpu(fw_entry->fixup[i]);
3746 if (loc && ((loc * 4) < rv2p_code_len)) {
3747 code = be32_to_cpu(*(rv2p_code + loc - 1));
3748 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3749 code = be32_to_cpu(*(rv2p_code + loc));
3750 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3751 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3753 val = (loc / 2) | cmd;
3754 REG_WR(bp, addr, val);
3758 /* Reset the processor, un-stall is done later. */
3759 if (rv2p_proc == RV2P_PROC1) {
3760 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3762 else {
3763 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3766 return 0;
3769 static int
3770 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3771 const struct bnx2_mips_fw_file_entry *fw_entry)
3773 u32 addr, len, file_offset;
3774 __be32 *data;
3775 u32 offset;
3776 u32 val;
3778 /* Halt the CPU. */
3779 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3780 val |= cpu_reg->mode_value_halt;
3781 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3782 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3784 /* Load the Text area. */
3785 addr = be32_to_cpu(fw_entry->text.addr);
3786 len = be32_to_cpu(fw_entry->text.len);
3787 file_offset = be32_to_cpu(fw_entry->text.offset);
3788 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3790 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3791 if (len) {
3792 int j;
3794 for (j = 0; j < (len / 4); j++, offset += 4)
3795 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3798 /* Load the Data area. */
3799 addr = be32_to_cpu(fw_entry->data.addr);
3800 len = be32_to_cpu(fw_entry->data.len);
3801 file_offset = be32_to_cpu(fw_entry->data.offset);
3802 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3804 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3805 if (len) {
3806 int j;
3808 for (j = 0; j < (len / 4); j++, offset += 4)
3809 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3812 /* Load the Read-Only area. */
3813 addr = be32_to_cpu(fw_entry->rodata.addr);
3814 len = be32_to_cpu(fw_entry->rodata.len);
3815 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3816 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3818 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3819 if (len) {
3820 int j;
3822 for (j = 0; j < (len / 4); j++, offset += 4)
3823 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3826 /* Clear the pre-fetch instruction. */
3827 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3829 val = be32_to_cpu(fw_entry->start_addr);
3830 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3832 /* Start the CPU. */
3833 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3834 val &= ~cpu_reg->mode_value_halt;
3835 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3836 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3838 return 0;
3841 static int
3842 bnx2_init_cpus(struct bnx2 *bp)
3844 const struct bnx2_mips_fw_file *mips_fw =
3845 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3846 const struct bnx2_rv2p_fw_file *rv2p_fw =
3847 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3848 int rc;
3850 /* Initialize the RV2P processor. */
3851 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3852 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3854 /* Initialize the RX Processor. */
3855 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3856 if (rc)
3857 goto init_cpu_err;
3859 /* Initialize the TX Processor. */
3860 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3861 if (rc)
3862 goto init_cpu_err;
3864 /* Initialize the TX Patch-up Processor. */
3865 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3866 if (rc)
3867 goto init_cpu_err;
3869 /* Initialize the Completion Processor. */
3870 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3871 if (rc)
3872 goto init_cpu_err;
3874 /* Initialize the Command Processor. */
3875 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3877 init_cpu_err:
3878 return rc;
3881 static int
3882 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3884 u16 pmcsr;
3886 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3888 switch (state) {
3889 case PCI_D0: {
3890 u32 val;
3892 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3893 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3894 PCI_PM_CTRL_PME_STATUS);
3896 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3897 /* delay required during transition out of D3hot */
3898 msleep(20);
3900 val = REG_RD(bp, BNX2_EMAC_MODE);
3901 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3902 val &= ~BNX2_EMAC_MODE_MPKT;
3903 REG_WR(bp, BNX2_EMAC_MODE, val);
3905 val = REG_RD(bp, BNX2_RPM_CONFIG);
3906 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3907 REG_WR(bp, BNX2_RPM_CONFIG, val);
3908 break;
3910 case PCI_D3hot: {
3911 int i;
3912 u32 val, wol_msg;
3914 if (bp->wol) {
3915 u32 advertising;
3916 u8 autoneg;
3918 autoneg = bp->autoneg;
3919 advertising = bp->advertising;
3921 if (bp->phy_port == PORT_TP) {
3922 bp->autoneg = AUTONEG_SPEED;
3923 bp->advertising = ADVERTISED_10baseT_Half |
3924 ADVERTISED_10baseT_Full |
3925 ADVERTISED_100baseT_Half |
3926 ADVERTISED_100baseT_Full |
3927 ADVERTISED_Autoneg;
3930 spin_lock_bh(&bp->phy_lock);
3931 bnx2_setup_phy(bp, bp->phy_port);
3932 spin_unlock_bh(&bp->phy_lock);
3934 bp->autoneg = autoneg;
3935 bp->advertising = advertising;
3937 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3939 val = REG_RD(bp, BNX2_EMAC_MODE);
3941 /* Enable port mode. */
3942 val &= ~BNX2_EMAC_MODE_PORT;
3943 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3944 BNX2_EMAC_MODE_ACPI_RCVD |
3945 BNX2_EMAC_MODE_MPKT;
3946 if (bp->phy_port == PORT_TP)
3947 val |= BNX2_EMAC_MODE_PORT_MII;
3948 else {
3949 val |= BNX2_EMAC_MODE_PORT_GMII;
3950 if (bp->line_speed == SPEED_2500)
3951 val |= BNX2_EMAC_MODE_25G_MODE;
3954 REG_WR(bp, BNX2_EMAC_MODE, val);
3956 /* receive all multicast */
3957 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3958 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3959 0xffffffff);
3961 REG_WR(bp, BNX2_EMAC_RX_MODE,
3962 BNX2_EMAC_RX_MODE_SORT_MODE);
3964 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3965 BNX2_RPM_SORT_USER0_MC_EN;
3966 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3967 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3968 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3969 BNX2_RPM_SORT_USER0_ENA);
3971 /* Need to enable EMAC and RPM for WOL. */
3972 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3973 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3974 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3975 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3977 val = REG_RD(bp, BNX2_RPM_CONFIG);
3978 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3979 REG_WR(bp, BNX2_RPM_CONFIG, val);
3981 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3983 else {
3984 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3987 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3988 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3989 1, 0);
3991 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3992 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3993 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3995 if (bp->wol)
3996 pmcsr |= 3;
3998 else {
3999 pmcsr |= 3;
4001 if (bp->wol) {
4002 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4004 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4005 pmcsr);
4007 /* No more memory access after this point until
4008 * device is brought back to D0.
4010 udelay(50);
4011 break;
4013 default:
4014 return -EINVAL;
4016 return 0;
4019 static int
4020 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4022 u32 val;
4023 int j;
4025 /* Request access to the flash interface. */
4026 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4027 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4028 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4029 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4030 break;
4032 udelay(5);
4035 if (j >= NVRAM_TIMEOUT_COUNT)
4036 return -EBUSY;
4038 return 0;
4041 static int
4042 bnx2_release_nvram_lock(struct bnx2 *bp)
4044 int j;
4045 u32 val;
4047 /* Relinquish nvram interface. */
4048 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4050 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4051 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4052 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4053 break;
4055 udelay(5);
4058 if (j >= NVRAM_TIMEOUT_COUNT)
4059 return -EBUSY;
4061 return 0;
4065 static int
4066 bnx2_enable_nvram_write(struct bnx2 *bp)
4068 u32 val;
4070 val = REG_RD(bp, BNX2_MISC_CFG);
4071 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4073 if (bp->flash_info->flags & BNX2_NV_WREN) {
4074 int j;
4076 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4077 REG_WR(bp, BNX2_NVM_COMMAND,
4078 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4080 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4081 udelay(5);
4083 val = REG_RD(bp, BNX2_NVM_COMMAND);
4084 if (val & BNX2_NVM_COMMAND_DONE)
4085 break;
4088 if (j >= NVRAM_TIMEOUT_COUNT)
4089 return -EBUSY;
4091 return 0;
4094 static void
4095 bnx2_disable_nvram_write(struct bnx2 *bp)
4097 u32 val;
4099 val = REG_RD(bp, BNX2_MISC_CFG);
4100 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4104 static void
4105 bnx2_enable_nvram_access(struct bnx2 *bp)
4107 u32 val;
4109 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4110 /* Enable both bits, even on read. */
4111 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4112 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4115 static void
4116 bnx2_disable_nvram_access(struct bnx2 *bp)
4118 u32 val;
4120 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4121 /* Disable both bits, even after read. */
4122 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4123 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4124 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4127 static int
4128 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4130 u32 cmd;
4131 int j;
4133 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4134 /* Buffered flash, no erase needed */
4135 return 0;
4137 /* Build an erase command */
4138 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4139 BNX2_NVM_COMMAND_DOIT;
4141 /* Need to clear DONE bit separately. */
4142 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4144 /* Address of the NVRAM to read from. */
4145 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4147 /* Issue an erase command. */
4148 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4150 /* Wait for completion. */
4151 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4152 u32 val;
4154 udelay(5);
4156 val = REG_RD(bp, BNX2_NVM_COMMAND);
4157 if (val & BNX2_NVM_COMMAND_DONE)
4158 break;
4161 if (j >= NVRAM_TIMEOUT_COUNT)
4162 return -EBUSY;
4164 return 0;
4167 static int
4168 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4170 u32 cmd;
4171 int j;
4173 /* Build the command word. */
4174 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4176 /* Calculate an offset of a buffered flash, not needed for 5709. */
4177 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4178 offset = ((offset / bp->flash_info->page_size) <<
4179 bp->flash_info->page_bits) +
4180 (offset % bp->flash_info->page_size);
4183 /* Need to clear DONE bit separately. */
4184 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4186 /* Address of the NVRAM to read from. */
4187 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4189 /* Issue a read command. */
4190 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4192 /* Wait for completion. */
4193 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4194 u32 val;
4196 udelay(5);
4198 val = REG_RD(bp, BNX2_NVM_COMMAND);
4199 if (val & BNX2_NVM_COMMAND_DONE) {
4200 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4201 memcpy(ret_val, &v, 4);
4202 break;
4205 if (j >= NVRAM_TIMEOUT_COUNT)
4206 return -EBUSY;
4208 return 0;
4212 static int
4213 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4215 u32 cmd;
4216 __be32 val32;
4217 int j;
4219 /* Build the command word. */
4220 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4222 /* Calculate an offset of a buffered flash, not needed for 5709. */
4223 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4224 offset = ((offset / bp->flash_info->page_size) <<
4225 bp->flash_info->page_bits) +
4226 (offset % bp->flash_info->page_size);
4229 /* Need to clear DONE bit separately. */
4230 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4232 memcpy(&val32, val, 4);
4234 /* Write the data. */
4235 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4237 /* Address of the NVRAM to write to. */
4238 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4240 /* Issue the write command. */
4241 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4243 /* Wait for completion. */
4244 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4245 udelay(5);
4247 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4248 break;
4250 if (j >= NVRAM_TIMEOUT_COUNT)
4251 return -EBUSY;
4253 return 0;
4256 static int
4257 bnx2_init_nvram(struct bnx2 *bp)
4259 u32 val;
4260 int j, entry_count, rc = 0;
4261 const struct flash_spec *flash;
4263 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4264 bp->flash_info = &flash_5709;
4265 goto get_flash_size;
4268 /* Determine the selected interface. */
4269 val = REG_RD(bp, BNX2_NVM_CFG1);
4271 entry_count = ARRAY_SIZE(flash_table);
4273 if (val & 0x40000000) {
4275 /* Flash interface has been reconfigured */
4276 for (j = 0, flash = &flash_table[0]; j < entry_count;
4277 j++, flash++) {
4278 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4279 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4280 bp->flash_info = flash;
4281 break;
4285 else {
4286 u32 mask;
4287 /* Not yet been reconfigured */
4289 if (val & (1 << 23))
4290 mask = FLASH_BACKUP_STRAP_MASK;
4291 else
4292 mask = FLASH_STRAP_MASK;
4294 for (j = 0, flash = &flash_table[0]; j < entry_count;
4295 j++, flash++) {
4297 if ((val & mask) == (flash->strapping & mask)) {
4298 bp->flash_info = flash;
4300 /* Request access to the flash interface. */
4301 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4302 return rc;
4304 /* Enable access to flash interface */
4305 bnx2_enable_nvram_access(bp);
4307 /* Reconfigure the flash interface */
4308 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4309 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4310 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4311 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4313 /* Disable access to flash interface */
4314 bnx2_disable_nvram_access(bp);
4315 bnx2_release_nvram_lock(bp);
4317 break;
4320 } /* if (val & 0x40000000) */
4322 if (j == entry_count) {
4323 bp->flash_info = NULL;
4324 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4325 return -ENODEV;
4328 get_flash_size:
4329 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4330 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4331 if (val)
4332 bp->flash_size = val;
4333 else
4334 bp->flash_size = bp->flash_info->total_size;
4336 return rc;
4339 static int
4340 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4341 int buf_size)
4343 int rc = 0;
4344 u32 cmd_flags, offset32, len32, extra;
4346 if (buf_size == 0)
4347 return 0;
4349 /* Request access to the flash interface. */
4350 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4351 return rc;
4353 /* Enable access to flash interface */
4354 bnx2_enable_nvram_access(bp);
4356 len32 = buf_size;
4357 offset32 = offset;
4358 extra = 0;
4360 cmd_flags = 0;
4362 if (offset32 & 3) {
4363 u8 buf[4];
4364 u32 pre_len;
4366 offset32 &= ~3;
4367 pre_len = 4 - (offset & 3);
4369 if (pre_len >= len32) {
4370 pre_len = len32;
4371 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4372 BNX2_NVM_COMMAND_LAST;
4374 else {
4375 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4378 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4380 if (rc)
4381 return rc;
4383 memcpy(ret_buf, buf + (offset & 3), pre_len);
4385 offset32 += 4;
4386 ret_buf += pre_len;
4387 len32 -= pre_len;
4389 if (len32 & 3) {
4390 extra = 4 - (len32 & 3);
4391 len32 = (len32 + 4) & ~3;
4394 if (len32 == 4) {
4395 u8 buf[4];
4397 if (cmd_flags)
4398 cmd_flags = BNX2_NVM_COMMAND_LAST;
4399 else
4400 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4401 BNX2_NVM_COMMAND_LAST;
4403 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4405 memcpy(ret_buf, buf, 4 - extra);
4407 else if (len32 > 0) {
4408 u8 buf[4];
4410 /* Read the first word. */
4411 if (cmd_flags)
4412 cmd_flags = 0;
4413 else
4414 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4416 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4418 /* Advance to the next dword. */
4419 offset32 += 4;
4420 ret_buf += 4;
4421 len32 -= 4;
4423 while (len32 > 4 && rc == 0) {
4424 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4426 /* Advance to the next dword. */
4427 offset32 += 4;
4428 ret_buf += 4;
4429 len32 -= 4;
4432 if (rc)
4433 return rc;
4435 cmd_flags = BNX2_NVM_COMMAND_LAST;
4436 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4438 memcpy(ret_buf, buf, 4 - extra);
4441 /* Disable access to flash interface */
4442 bnx2_disable_nvram_access(bp);
4444 bnx2_release_nvram_lock(bp);
4446 return rc;
4449 static int
4450 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4451 int buf_size)
4453 u32 written, offset32, len32;
4454 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4455 int rc = 0;
4456 int align_start, align_end;
4458 buf = data_buf;
4459 offset32 = offset;
4460 len32 = buf_size;
4461 align_start = align_end = 0;
4463 if ((align_start = (offset32 & 3))) {
4464 offset32 &= ~3;
4465 len32 += align_start;
4466 if (len32 < 4)
4467 len32 = 4;
4468 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4469 return rc;
4472 if (len32 & 3) {
4473 align_end = 4 - (len32 & 3);
4474 len32 += align_end;
4475 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4476 return rc;
4479 if (align_start || align_end) {
4480 align_buf = kmalloc(len32, GFP_KERNEL);
4481 if (align_buf == NULL)
4482 return -ENOMEM;
4483 if (align_start) {
4484 memcpy(align_buf, start, 4);
4486 if (align_end) {
4487 memcpy(align_buf + len32 - 4, end, 4);
4489 memcpy(align_buf + align_start, data_buf, buf_size);
4490 buf = align_buf;
4493 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4494 flash_buffer = kmalloc(264, GFP_KERNEL);
4495 if (flash_buffer == NULL) {
4496 rc = -ENOMEM;
4497 goto nvram_write_end;
4501 written = 0;
4502 while ((written < len32) && (rc == 0)) {
4503 u32 page_start, page_end, data_start, data_end;
4504 u32 addr, cmd_flags;
4505 int i;
4507 /* Find the page_start addr */
4508 page_start = offset32 + written;
4509 page_start -= (page_start % bp->flash_info->page_size);
4510 /* Find the page_end addr */
4511 page_end = page_start + bp->flash_info->page_size;
4512 /* Find the data_start addr */
4513 data_start = (written == 0) ? offset32 : page_start;
4514 /* Find the data_end addr */
4515 data_end = (page_end > offset32 + len32) ?
4516 (offset32 + len32) : page_end;
4518 /* Request access to the flash interface. */
4519 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4520 goto nvram_write_end;
4522 /* Enable access to flash interface */
4523 bnx2_enable_nvram_access(bp);
4525 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4526 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4527 int j;
4529 /* Read the whole page into the buffer
4530 * (non-buffer flash only) */
4531 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4532 if (j == (bp->flash_info->page_size - 4)) {
4533 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4535 rc = bnx2_nvram_read_dword(bp,
4536 page_start + j,
4537 &flash_buffer[j],
4538 cmd_flags);
4540 if (rc)
4541 goto nvram_write_end;
4543 cmd_flags = 0;
4547 /* Enable writes to flash interface (unlock write-protect) */
4548 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4549 goto nvram_write_end;
4551 /* Loop to write back the buffer data from page_start to
4552 * data_start */
4553 i = 0;
4554 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4555 /* Erase the page */
4556 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4557 goto nvram_write_end;
4559 /* Re-enable the write again for the actual write */
4560 bnx2_enable_nvram_write(bp);
4562 for (addr = page_start; addr < data_start;
4563 addr += 4, i += 4) {
4565 rc = bnx2_nvram_write_dword(bp, addr,
4566 &flash_buffer[i], cmd_flags);
4568 if (rc != 0)
4569 goto nvram_write_end;
4571 cmd_flags = 0;
4575 /* Loop to write the new data from data_start to data_end */
4576 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4577 if ((addr == page_end - 4) ||
4578 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4579 (addr == data_end - 4))) {
4581 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4583 rc = bnx2_nvram_write_dword(bp, addr, buf,
4584 cmd_flags);
4586 if (rc != 0)
4587 goto nvram_write_end;
4589 cmd_flags = 0;
4590 buf += 4;
4593 /* Loop to write back the buffer data from data_end
4594 * to page_end */
4595 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4596 for (addr = data_end; addr < page_end;
4597 addr += 4, i += 4) {
4599 if (addr == page_end-4) {
4600 cmd_flags = BNX2_NVM_COMMAND_LAST;
4602 rc = bnx2_nvram_write_dword(bp, addr,
4603 &flash_buffer[i], cmd_flags);
4605 if (rc != 0)
4606 goto nvram_write_end;
4608 cmd_flags = 0;
4612 /* Disable writes to flash interface (lock write-protect) */
4613 bnx2_disable_nvram_write(bp);
4615 /* Disable access to flash interface */
4616 bnx2_disable_nvram_access(bp);
4617 bnx2_release_nvram_lock(bp);
4619 /* Increment written */
4620 written += data_end - data_start;
4623 nvram_write_end:
4624 kfree(flash_buffer);
4625 kfree(align_buf);
4626 return rc;
4629 static void
4630 bnx2_init_fw_cap(struct bnx2 *bp)
4632 u32 val, sig = 0;
4634 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4635 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4637 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4638 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4640 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4641 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4642 return;
4644 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4645 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4646 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4649 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4650 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4651 u32 link;
4653 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4655 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4656 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4657 bp->phy_port = PORT_FIBRE;
4658 else
4659 bp->phy_port = PORT_TP;
4661 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4662 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4665 if (netif_running(bp->dev) && sig)
4666 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4669 static void
4670 bnx2_setup_msix_tbl(struct bnx2 *bp)
4672 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4674 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4675 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4678 static int
4679 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4681 u32 val;
4682 int i, rc = 0;
4683 u8 old_port;
4685 /* Wait for the current PCI transaction to complete before
4686 * issuing a reset. */
4687 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4688 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4689 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4690 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4691 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4692 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4693 udelay(5);
4695 /* Wait for the firmware to tell us it is ok to issue a reset. */
4696 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4698 /* Deposit a driver reset signature so the firmware knows that
4699 * this is a soft reset. */
4700 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4701 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4703 /* Do a dummy read to force the chip to complete all current transaction
4704 * before we issue a reset. */
4705 val = REG_RD(bp, BNX2_MISC_ID);
4707 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4708 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4709 REG_RD(bp, BNX2_MISC_COMMAND);
4710 udelay(5);
4712 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4713 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4715 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4717 } else {
4718 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4719 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4720 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4722 /* Chip reset. */
4723 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4725 /* Reading back any register after chip reset will hang the
4726 * bus on 5706 A0 and A1. The msleep below provides plenty
4727 * of margin for write posting.
4729 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4730 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4731 msleep(20);
4733 /* Reset takes approximate 30 usec */
4734 for (i = 0; i < 10; i++) {
4735 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4736 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4737 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4738 break;
4739 udelay(10);
4742 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4743 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4744 printk(KERN_ERR PFX "Chip reset did not complete\n");
4745 return -EBUSY;
4749 /* Make sure byte swapping is properly configured. */
4750 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4751 if (val != 0x01020304) {
4752 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4753 return -ENODEV;
4756 /* Wait for the firmware to finish its initialization. */
4757 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4758 if (rc)
4759 return rc;
4761 spin_lock_bh(&bp->phy_lock);
4762 old_port = bp->phy_port;
4763 bnx2_init_fw_cap(bp);
4764 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4765 old_port != bp->phy_port)
4766 bnx2_set_default_remote_link(bp);
4767 spin_unlock_bh(&bp->phy_lock);
4769 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4770 /* Adjust the voltage regular to two steps lower. The default
4771 * of this register is 0x0000000e. */
4772 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4774 /* Remove bad rbuf memory from the free pool. */
4775 rc = bnx2_alloc_bad_rbuf(bp);
4778 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4779 bnx2_setup_msix_tbl(bp);
4780 /* Prevent MSIX table reads and write from timing out */
4781 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4782 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4785 return rc;
4788 static int
4789 bnx2_init_chip(struct bnx2 *bp)
4791 u32 val, mtu;
4792 int rc, i;
4794 /* Make sure the interrupt is not active. */
4795 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4797 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4798 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4799 #ifdef __BIG_ENDIAN
4800 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4801 #endif
4802 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4803 DMA_READ_CHANS << 12 |
4804 DMA_WRITE_CHANS << 16;
4806 val |= (0x2 << 20) | (1 << 11);
4808 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4809 val |= (1 << 23);
4811 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4812 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4813 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4815 REG_WR(bp, BNX2_DMA_CONFIG, val);
4817 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4818 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4819 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4820 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4823 if (bp->flags & BNX2_FLAG_PCIX) {
4824 u16 val16;
4826 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4827 &val16);
4828 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4829 val16 & ~PCI_X_CMD_ERO);
4832 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4833 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4834 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4835 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4837 /* Initialize context mapping and zero out the quick contexts. The
4838 * context block must have already been enabled. */
4839 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4840 rc = bnx2_init_5709_context(bp);
4841 if (rc)
4842 return rc;
4843 } else
4844 bnx2_init_context(bp);
4846 if ((rc = bnx2_init_cpus(bp)) != 0)
4847 return rc;
4849 bnx2_init_nvram(bp);
4851 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4853 val = REG_RD(bp, BNX2_MQ_CONFIG);
4854 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4855 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4856 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4857 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4858 if (CHIP_REV(bp) == CHIP_REV_Ax)
4859 val |= BNX2_MQ_CONFIG_HALT_DIS;
4862 REG_WR(bp, BNX2_MQ_CONFIG, val);
4864 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4865 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4866 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4868 val = (BCM_PAGE_BITS - 8) << 24;
4869 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4871 /* Configure page size. */
4872 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4873 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4874 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4875 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4877 val = bp->mac_addr[0] +
4878 (bp->mac_addr[1] << 8) +
4879 (bp->mac_addr[2] << 16) +
4880 bp->mac_addr[3] +
4881 (bp->mac_addr[4] << 8) +
4882 (bp->mac_addr[5] << 16);
4883 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4885 /* Program the MTU. Also include 4 bytes for CRC32. */
4886 mtu = bp->dev->mtu;
4887 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4888 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4889 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4890 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4892 if (mtu < 1500)
4893 mtu = 1500;
4895 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4896 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4897 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4899 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4900 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4901 bp->bnx2_napi[i].last_status_idx = 0;
4903 bp->idle_chk_status_idx = 0xffff;
4905 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4907 /* Set up how to generate a link change interrupt. */
4908 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4910 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4911 (u64) bp->status_blk_mapping & 0xffffffff);
4912 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4914 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4915 (u64) bp->stats_blk_mapping & 0xffffffff);
4916 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4917 (u64) bp->stats_blk_mapping >> 32);
4919 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4920 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4922 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4923 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4925 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4926 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4928 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4930 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4932 REG_WR(bp, BNX2_HC_COM_TICKS,
4933 (bp->com_ticks_int << 16) | bp->com_ticks);
4935 REG_WR(bp, BNX2_HC_CMD_TICKS,
4936 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4938 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4939 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4940 else
4941 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4942 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4944 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4945 val = BNX2_HC_CONFIG_COLLECT_STATS;
4946 else {
4947 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4948 BNX2_HC_CONFIG_COLLECT_STATS;
4951 if (bp->irq_nvecs > 1) {
4952 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4953 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4955 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4958 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4959 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4961 REG_WR(bp, BNX2_HC_CONFIG, val);
4963 for (i = 1; i < bp->irq_nvecs; i++) {
4964 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4965 BNX2_HC_SB_CONFIG_1;
4967 REG_WR(bp, base,
4968 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4969 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4970 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4972 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4973 (bp->tx_quick_cons_trip_int << 16) |
4974 bp->tx_quick_cons_trip);
4976 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4977 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4979 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4980 (bp->rx_quick_cons_trip_int << 16) |
4981 bp->rx_quick_cons_trip);
4983 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4984 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4987 /* Clear internal stats counters. */
4988 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4990 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4992 /* Initialize the receive filter. */
4993 bnx2_set_rx_mode(bp->dev);
4995 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4996 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4997 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4998 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5000 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5001 1, 0);
5003 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5004 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5006 udelay(20);
5008 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5010 return rc;
5013 static void
5014 bnx2_clear_ring_states(struct bnx2 *bp)
5016 struct bnx2_napi *bnapi;
5017 struct bnx2_tx_ring_info *txr;
5018 struct bnx2_rx_ring_info *rxr;
5019 int i;
5021 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5022 bnapi = &bp->bnx2_napi[i];
5023 txr = &bnapi->tx_ring;
5024 rxr = &bnapi->rx_ring;
5026 txr->tx_cons = 0;
5027 txr->hw_tx_cons = 0;
5028 rxr->rx_prod_bseq = 0;
5029 rxr->rx_prod = 0;
5030 rxr->rx_cons = 0;
5031 rxr->rx_pg_prod = 0;
5032 rxr->rx_pg_cons = 0;
5036 static void
5037 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5039 u32 val, offset0, offset1, offset2, offset3;
5040 u32 cid_addr = GET_CID_ADDR(cid);
5042 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5043 offset0 = BNX2_L2CTX_TYPE_XI;
5044 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5045 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5046 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5047 } else {
5048 offset0 = BNX2_L2CTX_TYPE;
5049 offset1 = BNX2_L2CTX_CMD_TYPE;
5050 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5051 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5053 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5054 bnx2_ctx_wr(bp, cid_addr, offset0, val);
5056 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5057 bnx2_ctx_wr(bp, cid_addr, offset1, val);
5059 val = (u64) txr->tx_desc_mapping >> 32;
5060 bnx2_ctx_wr(bp, cid_addr, offset2, val);
5062 val = (u64) txr->tx_desc_mapping & 0xffffffff;
5063 bnx2_ctx_wr(bp, cid_addr, offset3, val);
5066 static void
5067 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5069 struct tx_bd *txbd;
5070 u32 cid = TX_CID;
5071 struct bnx2_napi *bnapi;
5072 struct bnx2_tx_ring_info *txr;
5074 bnapi = &bp->bnx2_napi[ring_num];
5075 txr = &bnapi->tx_ring;
5077 if (ring_num == 0)
5078 cid = TX_CID;
5079 else
5080 cid = TX_TSS_CID + ring_num - 1;
5082 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5084 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5086 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5087 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5089 txr->tx_prod = 0;
5090 txr->tx_prod_bseq = 0;
5092 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5093 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5095 bnx2_init_tx_context(bp, cid, txr);
5098 static void
5099 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5100 int num_rings)
5102 int i;
5103 struct rx_bd *rxbd;
5105 for (i = 0; i < num_rings; i++) {
5106 int j;
5108 rxbd = &rx_ring[i][0];
5109 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5110 rxbd->rx_bd_len = buf_size;
5111 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5113 if (i == (num_rings - 1))
5114 j = 0;
5115 else
5116 j = i + 1;
5117 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5118 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5122 static void
5123 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5125 int i;
5126 u16 prod, ring_prod;
5127 u32 cid, rx_cid_addr, val;
5128 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5129 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5131 if (ring_num == 0)
5132 cid = RX_CID;
5133 else
5134 cid = RX_RSS_CID + ring_num - 1;
5136 rx_cid_addr = GET_CID_ADDR(cid);
5138 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5139 bp->rx_buf_use_size, bp->rx_max_ring);
5141 bnx2_init_rx_context(bp, cid);
5143 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5144 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5145 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5148 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5149 if (bp->rx_pg_ring_size) {
5150 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5151 rxr->rx_pg_desc_mapping,
5152 PAGE_SIZE, bp->rx_max_pg_ring);
5153 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5154 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5155 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5156 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5158 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5159 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5161 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5162 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5164 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5165 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5168 val = (u64) rxr->rx_desc_mapping[0] >> 32;
5169 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5171 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5172 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5174 ring_prod = prod = rxr->rx_pg_prod;
5175 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5176 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
5177 printk(KERN_WARNING PFX "%s: init'ed rx page ring %d "
5178 "with %d/%d pages only\n",
5179 bp->dev->name, ring_num, i, bp->rx_pg_ring_size);
5180 break;
5182 prod = NEXT_RX_BD(prod);
5183 ring_prod = RX_PG_RING_IDX(prod);
5185 rxr->rx_pg_prod = prod;
5187 ring_prod = prod = rxr->rx_prod;
5188 for (i = 0; i < bp->rx_ring_size; i++) {
5189 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
5190 printk(KERN_WARNING PFX "%s: init'ed rx ring %d with "
5191 "%d/%d skbs only\n",
5192 bp->dev->name, ring_num, i, bp->rx_ring_size);
5193 break;
5195 prod = NEXT_RX_BD(prod);
5196 ring_prod = RX_RING_IDX(prod);
5198 rxr->rx_prod = prod;
5200 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5201 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5202 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5204 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5205 REG_WR16(bp, rxr->rx_bidx_addr, prod);
5207 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5210 static void
5211 bnx2_init_all_rings(struct bnx2 *bp)
5213 int i;
5214 u32 val;
5216 bnx2_clear_ring_states(bp);
5218 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5219 for (i = 0; i < bp->num_tx_rings; i++)
5220 bnx2_init_tx_ring(bp, i);
5222 if (bp->num_tx_rings > 1)
5223 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5224 (TX_TSS_CID << 7));
5226 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5227 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5229 for (i = 0; i < bp->num_rx_rings; i++)
5230 bnx2_init_rx_ring(bp, i);
5232 if (bp->num_rx_rings > 1) {
5233 u32 tbl_32;
5234 u8 *tbl = (u8 *) &tbl_32;
5236 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5237 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5239 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5240 tbl[i % 4] = i % (bp->num_rx_rings - 1);
5241 if ((i % 4) == 3)
5242 bnx2_reg_wr_ind(bp,
5243 BNX2_RXP_SCRATCH_RSS_TBL + i,
5244 cpu_to_be32(tbl_32));
5247 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5248 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5250 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5255 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5257 u32 max, num_rings = 1;
5259 while (ring_size > MAX_RX_DESC_CNT) {
5260 ring_size -= MAX_RX_DESC_CNT;
5261 num_rings++;
5263 /* round to next power of 2 */
5264 max = max_size;
5265 while ((max & num_rings) == 0)
5266 max >>= 1;
5268 if (num_rings != max)
5269 max <<= 1;
5271 return max;
5274 static void
5275 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5277 u32 rx_size, rx_space, jumbo_size;
5279 /* 8 for CRC and VLAN */
5280 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5282 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5283 sizeof(struct skb_shared_info);
5285 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5286 bp->rx_pg_ring_size = 0;
5287 bp->rx_max_pg_ring = 0;
5288 bp->rx_max_pg_ring_idx = 0;
5289 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5290 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5292 jumbo_size = size * pages;
5293 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5294 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5296 bp->rx_pg_ring_size = jumbo_size;
5297 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5298 MAX_RX_PG_RINGS);
5299 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5300 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5301 bp->rx_copy_thresh = 0;
5304 bp->rx_buf_use_size = rx_size;
5305 /* hw alignment */
5306 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5307 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5308 bp->rx_ring_size = size;
5309 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5310 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5313 static void
5314 bnx2_free_tx_skbs(struct bnx2 *bp)
5316 int i;
5318 for (i = 0; i < bp->num_tx_rings; i++) {
5319 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5320 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5321 int j;
5323 if (txr->tx_buf_ring == NULL)
5324 continue;
5326 for (j = 0; j < TX_DESC_CNT; ) {
5327 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5328 struct sk_buff *skb = tx_buf->skb;
5329 int k, last;
5331 if (skb == NULL) {
5332 j++;
5333 continue;
5336 pci_unmap_single(bp->pdev,
5337 pci_unmap_addr(tx_buf, mapping),
5338 skb_headlen(skb),
5339 PCI_DMA_TODEVICE);
5341 tx_buf->skb = NULL;
5343 last = tx_buf->nr_frags;
5344 j++;
5345 for (k = 0; k < last; k++, j++) {
5346 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5347 pci_unmap_page(bp->pdev,
5348 pci_unmap_addr(tx_buf, mapping),
5349 skb_shinfo(skb)->frags[k].size,
5350 PCI_DMA_TODEVICE);
5352 dev_kfree_skb(skb);
5357 static void
5358 bnx2_free_rx_skbs(struct bnx2 *bp)
5360 int i;
5362 for (i = 0; i < bp->num_rx_rings; i++) {
5363 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5364 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5365 int j;
5367 if (rxr->rx_buf_ring == NULL)
5368 return;
5370 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5371 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5372 struct sk_buff *skb = rx_buf->skb;
5374 if (skb == NULL)
5375 continue;
5377 pci_unmap_single(bp->pdev,
5378 pci_unmap_addr(rx_buf, mapping),
5379 bp->rx_buf_use_size,
5380 PCI_DMA_FROMDEVICE);
5382 rx_buf->skb = NULL;
5384 dev_kfree_skb(skb);
5386 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5387 bnx2_free_rx_page(bp, rxr, j);
5391 static void
5392 bnx2_free_skbs(struct bnx2 *bp)
5394 bnx2_free_tx_skbs(bp);
5395 bnx2_free_rx_skbs(bp);
5398 static int
5399 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5401 int rc;
5403 rc = bnx2_reset_chip(bp, reset_code);
5404 bnx2_free_skbs(bp);
5405 if (rc)
5406 return rc;
5408 if ((rc = bnx2_init_chip(bp)) != 0)
5409 return rc;
5411 bnx2_init_all_rings(bp);
5412 return 0;
5415 static int
5416 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5418 int rc;
5420 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5421 return rc;
5423 spin_lock_bh(&bp->phy_lock);
5424 bnx2_init_phy(bp, reset_phy);
5425 bnx2_set_link(bp);
5426 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5427 bnx2_remote_phy_event(bp);
5428 spin_unlock_bh(&bp->phy_lock);
5429 return 0;
5432 static int
5433 bnx2_shutdown_chip(struct bnx2 *bp)
5435 u32 reset_code;
5437 if (bp->flags & BNX2_FLAG_NO_WOL)
5438 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5439 else if (bp->wol)
5440 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5441 else
5442 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5444 return bnx2_reset_chip(bp, reset_code);
5447 static int
5448 bnx2_test_registers(struct bnx2 *bp)
5450 int ret;
5451 int i, is_5709;
5452 static const struct {
5453 u16 offset;
5454 u16 flags;
5455 #define BNX2_FL_NOT_5709 1
5456 u32 rw_mask;
5457 u32 ro_mask;
5458 } reg_tbl[] = {
5459 { 0x006c, 0, 0x00000000, 0x0000003f },
5460 { 0x0090, 0, 0xffffffff, 0x00000000 },
5461 { 0x0094, 0, 0x00000000, 0x00000000 },
5463 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5464 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5465 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5466 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5467 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5468 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5469 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5470 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5471 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5473 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5474 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5475 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5476 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5477 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5478 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5480 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5481 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5482 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5484 { 0x1000, 0, 0x00000000, 0x00000001 },
5485 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5487 { 0x1408, 0, 0x01c00800, 0x00000000 },
5488 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5489 { 0x14a8, 0, 0x00000000, 0x000001ff },
5490 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5491 { 0x14b0, 0, 0x00000002, 0x00000001 },
5492 { 0x14b8, 0, 0x00000000, 0x00000000 },
5493 { 0x14c0, 0, 0x00000000, 0x00000009 },
5494 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5495 { 0x14cc, 0, 0x00000000, 0x00000001 },
5496 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5498 { 0x1800, 0, 0x00000000, 0x00000001 },
5499 { 0x1804, 0, 0x00000000, 0x00000003 },
5501 { 0x2800, 0, 0x00000000, 0x00000001 },
5502 { 0x2804, 0, 0x00000000, 0x00003f01 },
5503 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5504 { 0x2810, 0, 0xffff0000, 0x00000000 },
5505 { 0x2814, 0, 0xffff0000, 0x00000000 },
5506 { 0x2818, 0, 0xffff0000, 0x00000000 },
5507 { 0x281c, 0, 0xffff0000, 0x00000000 },
5508 { 0x2834, 0, 0xffffffff, 0x00000000 },
5509 { 0x2840, 0, 0x00000000, 0xffffffff },
5510 { 0x2844, 0, 0x00000000, 0xffffffff },
5511 { 0x2848, 0, 0xffffffff, 0x00000000 },
5512 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5514 { 0x2c00, 0, 0x00000000, 0x00000011 },
5515 { 0x2c04, 0, 0x00000000, 0x00030007 },
5517 { 0x3c00, 0, 0x00000000, 0x00000001 },
5518 { 0x3c04, 0, 0x00000000, 0x00070000 },
5519 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5520 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5521 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5522 { 0x3c14, 0, 0x00000000, 0xffffffff },
5523 { 0x3c18, 0, 0x00000000, 0xffffffff },
5524 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5525 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5527 { 0x5004, 0, 0x00000000, 0x0000007f },
5528 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5530 { 0x5c00, 0, 0x00000000, 0x00000001 },
5531 { 0x5c04, 0, 0x00000000, 0x0003000f },
5532 { 0x5c08, 0, 0x00000003, 0x00000000 },
5533 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5534 { 0x5c10, 0, 0x00000000, 0xffffffff },
5535 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5536 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5537 { 0x5c88, 0, 0x00000000, 0x00077373 },
5538 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5540 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5541 { 0x680c, 0, 0xffffffff, 0x00000000 },
5542 { 0x6810, 0, 0xffffffff, 0x00000000 },
5543 { 0x6814, 0, 0xffffffff, 0x00000000 },
5544 { 0x6818, 0, 0xffffffff, 0x00000000 },
5545 { 0x681c, 0, 0xffffffff, 0x00000000 },
5546 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5547 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5548 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5549 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5550 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5551 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5552 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5553 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5554 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5555 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5556 { 0x684c, 0, 0xffffffff, 0x00000000 },
5557 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5558 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5559 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5560 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5561 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5562 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5564 { 0xffff, 0, 0x00000000, 0x00000000 },
5567 ret = 0;
5568 is_5709 = 0;
5569 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5570 is_5709 = 1;
5572 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5573 u32 offset, rw_mask, ro_mask, save_val, val;
5574 u16 flags = reg_tbl[i].flags;
5576 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5577 continue;
5579 offset = (u32) reg_tbl[i].offset;
5580 rw_mask = reg_tbl[i].rw_mask;
5581 ro_mask = reg_tbl[i].ro_mask;
5583 save_val = readl(bp->regview + offset);
5585 writel(0, bp->regview + offset);
5587 val = readl(bp->regview + offset);
5588 if ((val & rw_mask) != 0) {
5589 goto reg_test_err;
5592 if ((val & ro_mask) != (save_val & ro_mask)) {
5593 goto reg_test_err;
5596 writel(0xffffffff, bp->regview + offset);
5598 val = readl(bp->regview + offset);
5599 if ((val & rw_mask) != rw_mask) {
5600 goto reg_test_err;
5603 if ((val & ro_mask) != (save_val & ro_mask)) {
5604 goto reg_test_err;
5607 writel(save_val, bp->regview + offset);
5608 continue;
5610 reg_test_err:
5611 writel(save_val, bp->regview + offset);
5612 ret = -ENODEV;
5613 break;
5615 return ret;
5618 static int
5619 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5621 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5622 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5623 int i;
5625 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5626 u32 offset;
5628 for (offset = 0; offset < size; offset += 4) {
5630 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5632 if (bnx2_reg_rd_ind(bp, start + offset) !=
5633 test_pattern[i]) {
5634 return -ENODEV;
5638 return 0;
5641 static int
5642 bnx2_test_memory(struct bnx2 *bp)
5644 int ret = 0;
5645 int i;
5646 static struct mem_entry {
5647 u32 offset;
5648 u32 len;
5649 } mem_tbl_5706[] = {
5650 { 0x60000, 0x4000 },
5651 { 0xa0000, 0x3000 },
5652 { 0xe0000, 0x4000 },
5653 { 0x120000, 0x4000 },
5654 { 0x1a0000, 0x4000 },
5655 { 0x160000, 0x4000 },
5656 { 0xffffffff, 0 },
5658 mem_tbl_5709[] = {
5659 { 0x60000, 0x4000 },
5660 { 0xa0000, 0x3000 },
5661 { 0xe0000, 0x4000 },
5662 { 0x120000, 0x4000 },
5663 { 0x1a0000, 0x4000 },
5664 { 0xffffffff, 0 },
5666 struct mem_entry *mem_tbl;
5668 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5669 mem_tbl = mem_tbl_5709;
5670 else
5671 mem_tbl = mem_tbl_5706;
5673 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5674 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5675 mem_tbl[i].len)) != 0) {
5676 return ret;
5680 return ret;
5683 #define BNX2_MAC_LOOPBACK 0
5684 #define BNX2_PHY_LOOPBACK 1
5686 static int
5687 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5689 unsigned int pkt_size, num_pkts, i;
5690 struct sk_buff *skb, *rx_skb;
5691 unsigned char *packet;
5692 u16 rx_start_idx, rx_idx;
5693 dma_addr_t map;
5694 struct tx_bd *txbd;
5695 struct sw_bd *rx_buf;
5696 struct l2_fhdr *rx_hdr;
5697 int ret = -ENODEV;
5698 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5699 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5700 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5702 tx_napi = bnapi;
5704 txr = &tx_napi->tx_ring;
5705 rxr = &bnapi->rx_ring;
5706 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5707 bp->loopback = MAC_LOOPBACK;
5708 bnx2_set_mac_loopback(bp);
5710 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5711 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5712 return 0;
5714 bp->loopback = PHY_LOOPBACK;
5715 bnx2_set_phy_loopback(bp);
5717 else
5718 return -EINVAL;
5720 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5721 skb = netdev_alloc_skb(bp->dev, pkt_size);
5722 if (!skb)
5723 return -ENOMEM;
5724 packet = skb_put(skb, pkt_size);
5725 memcpy(packet, bp->dev->dev_addr, 6);
5726 memset(packet + 6, 0x0, 8);
5727 for (i = 14; i < pkt_size; i++)
5728 packet[i] = (unsigned char) (i & 0xff);
5730 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5731 PCI_DMA_TODEVICE);
5732 if (pci_dma_mapping_error(bp->pdev, map)) {
5733 dev_kfree_skb(skb);
5734 return -EIO;
5737 REG_WR(bp, BNX2_HC_COMMAND,
5738 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5740 REG_RD(bp, BNX2_HC_COMMAND);
5742 udelay(5);
5743 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5745 num_pkts = 0;
5747 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5749 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5750 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5751 txbd->tx_bd_mss_nbytes = pkt_size;
5752 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5754 num_pkts++;
5755 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5756 txr->tx_prod_bseq += pkt_size;
5758 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5759 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5761 udelay(100);
5763 REG_WR(bp, BNX2_HC_COMMAND,
5764 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5766 REG_RD(bp, BNX2_HC_COMMAND);
5768 udelay(5);
5770 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5771 dev_kfree_skb(skb);
5773 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5774 goto loopback_test_done;
5776 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5777 if (rx_idx != rx_start_idx + num_pkts) {
5778 goto loopback_test_done;
5781 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5782 rx_skb = rx_buf->skb;
5784 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5785 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5787 pci_dma_sync_single_for_cpu(bp->pdev,
5788 pci_unmap_addr(rx_buf, mapping),
5789 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5791 if (rx_hdr->l2_fhdr_status &
5792 (L2_FHDR_ERRORS_BAD_CRC |
5793 L2_FHDR_ERRORS_PHY_DECODE |
5794 L2_FHDR_ERRORS_ALIGNMENT |
5795 L2_FHDR_ERRORS_TOO_SHORT |
5796 L2_FHDR_ERRORS_GIANT_FRAME)) {
5798 goto loopback_test_done;
5801 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5802 goto loopback_test_done;
5805 for (i = 14; i < pkt_size; i++) {
5806 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5807 goto loopback_test_done;
5811 ret = 0;
5813 loopback_test_done:
5814 bp->loopback = 0;
5815 return ret;
5818 #define BNX2_MAC_LOOPBACK_FAILED 1
5819 #define BNX2_PHY_LOOPBACK_FAILED 2
5820 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5821 BNX2_PHY_LOOPBACK_FAILED)
5823 static int
5824 bnx2_test_loopback(struct bnx2 *bp)
5826 int rc = 0;
5828 if (!netif_running(bp->dev))
5829 return BNX2_LOOPBACK_FAILED;
5831 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5832 spin_lock_bh(&bp->phy_lock);
5833 bnx2_init_phy(bp, 1);
5834 spin_unlock_bh(&bp->phy_lock);
5835 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5836 rc |= BNX2_MAC_LOOPBACK_FAILED;
5837 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5838 rc |= BNX2_PHY_LOOPBACK_FAILED;
5839 return rc;
5842 #define NVRAM_SIZE 0x200
5843 #define CRC32_RESIDUAL 0xdebb20e3
5845 static int
5846 bnx2_test_nvram(struct bnx2 *bp)
5848 __be32 buf[NVRAM_SIZE / 4];
5849 u8 *data = (u8 *) buf;
5850 int rc = 0;
5851 u32 magic, csum;
5853 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5854 goto test_nvram_done;
5856 magic = be32_to_cpu(buf[0]);
5857 if (magic != 0x669955aa) {
5858 rc = -ENODEV;
5859 goto test_nvram_done;
5862 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5863 goto test_nvram_done;
5865 csum = ether_crc_le(0x100, data);
5866 if (csum != CRC32_RESIDUAL) {
5867 rc = -ENODEV;
5868 goto test_nvram_done;
5871 csum = ether_crc_le(0x100, data + 0x100);
5872 if (csum != CRC32_RESIDUAL) {
5873 rc = -ENODEV;
5876 test_nvram_done:
5877 return rc;
5880 static int
5881 bnx2_test_link(struct bnx2 *bp)
5883 u32 bmsr;
5885 if (!netif_running(bp->dev))
5886 return -ENODEV;
5888 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5889 if (bp->link_up)
5890 return 0;
5891 return -ENODEV;
5893 spin_lock_bh(&bp->phy_lock);
5894 bnx2_enable_bmsr1(bp);
5895 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5896 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5897 bnx2_disable_bmsr1(bp);
5898 spin_unlock_bh(&bp->phy_lock);
5900 if (bmsr & BMSR_LSTATUS) {
5901 return 0;
5903 return -ENODEV;
5906 static int
5907 bnx2_test_intr(struct bnx2 *bp)
5909 int i;
5910 u16 status_idx;
5912 if (!netif_running(bp->dev))
5913 return -ENODEV;
5915 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5917 /* This register is not touched during run-time. */
5918 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5919 REG_RD(bp, BNX2_HC_COMMAND);
5921 for (i = 0; i < 10; i++) {
5922 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5923 status_idx) {
5925 break;
5928 msleep_interruptible(10);
5930 if (i < 10)
5931 return 0;
5933 return -ENODEV;
5936 /* Determining link for parallel detection. */
5937 static int
5938 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5940 u32 mode_ctl, an_dbg, exp;
5942 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5943 return 0;
5945 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5946 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5948 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5949 return 0;
5951 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5952 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5953 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5955 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5956 return 0;
5958 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5959 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5960 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5962 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5963 return 0;
5965 return 1;
5968 static void
5969 bnx2_5706_serdes_timer(struct bnx2 *bp)
5971 int check_link = 1;
5973 spin_lock(&bp->phy_lock);
5974 if (bp->serdes_an_pending) {
5975 bp->serdes_an_pending--;
5976 check_link = 0;
5977 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5978 u32 bmcr;
5980 bp->current_interval = BNX2_TIMER_INTERVAL;
5982 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5984 if (bmcr & BMCR_ANENABLE) {
5985 if (bnx2_5706_serdes_has_link(bp)) {
5986 bmcr &= ~BMCR_ANENABLE;
5987 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5988 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5989 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5993 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5994 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5995 u32 phy2;
5997 bnx2_write_phy(bp, 0x17, 0x0f01);
5998 bnx2_read_phy(bp, 0x15, &phy2);
5999 if (phy2 & 0x20) {
6000 u32 bmcr;
6002 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6003 bmcr |= BMCR_ANENABLE;
6004 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6006 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6008 } else
6009 bp->current_interval = BNX2_TIMER_INTERVAL;
6011 if (check_link) {
6012 u32 val;
6014 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6015 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6016 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6018 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6019 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6020 bnx2_5706s_force_link_dn(bp, 1);
6021 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6022 } else
6023 bnx2_set_link(bp);
6024 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6025 bnx2_set_link(bp);
6027 spin_unlock(&bp->phy_lock);
6030 static void
6031 bnx2_5708_serdes_timer(struct bnx2 *bp)
6033 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6034 return;
6036 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6037 bp->serdes_an_pending = 0;
6038 return;
6041 spin_lock(&bp->phy_lock);
6042 if (bp->serdes_an_pending)
6043 bp->serdes_an_pending--;
6044 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6045 u32 bmcr;
6047 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6048 if (bmcr & BMCR_ANENABLE) {
6049 bnx2_enable_forced_2g5(bp);
6050 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6051 } else {
6052 bnx2_disable_forced_2g5(bp);
6053 bp->serdes_an_pending = 2;
6054 bp->current_interval = BNX2_TIMER_INTERVAL;
6057 } else
6058 bp->current_interval = BNX2_TIMER_INTERVAL;
6060 spin_unlock(&bp->phy_lock);
6063 static void
6064 bnx2_timer(unsigned long data)
6066 struct bnx2 *bp = (struct bnx2 *) data;
6068 if (!netif_running(bp->dev))
6069 return;
6071 if (atomic_read(&bp->intr_sem) != 0)
6072 goto bnx2_restart_timer;
6074 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6075 BNX2_FLAG_USING_MSI)
6076 bnx2_chk_missed_msi(bp);
6078 bnx2_send_heart_beat(bp);
6080 bp->stats_blk->stat_FwRxDrop =
6081 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6083 /* workaround occasional corrupted counters */
6084 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6085 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6086 BNX2_HC_COMMAND_STATS_NOW);
6088 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6089 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6090 bnx2_5706_serdes_timer(bp);
6091 else
6092 bnx2_5708_serdes_timer(bp);
6095 bnx2_restart_timer:
6096 mod_timer(&bp->timer, jiffies + bp->current_interval);
6099 static int
6100 bnx2_request_irq(struct bnx2 *bp)
6102 unsigned long flags;
6103 struct bnx2_irq *irq;
6104 int rc = 0, i;
6106 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6107 flags = 0;
6108 else
6109 flags = IRQF_SHARED;
6111 for (i = 0; i < bp->irq_nvecs; i++) {
6112 irq = &bp->irq_tbl[i];
6113 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6114 &bp->bnx2_napi[i]);
6115 if (rc)
6116 break;
6117 irq->requested = 1;
6119 return rc;
6122 static void
6123 bnx2_free_irq(struct bnx2 *bp)
6125 struct bnx2_irq *irq;
6126 int i;
6128 for (i = 0; i < bp->irq_nvecs; i++) {
6129 irq = &bp->irq_tbl[i];
6130 if (irq->requested)
6131 free_irq(irq->vector, &bp->bnx2_napi[i]);
6132 irq->requested = 0;
6134 if (bp->flags & BNX2_FLAG_USING_MSI)
6135 pci_disable_msi(bp->pdev);
6136 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6137 pci_disable_msix(bp->pdev);
6139 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6142 static void
6143 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6145 int i, rc;
6146 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6147 struct net_device *dev = bp->dev;
6148 const int len = sizeof(bp->irq_tbl[0].name);
6150 bnx2_setup_msix_tbl(bp);
6151 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6152 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6153 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6155 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6156 msix_ent[i].entry = i;
6157 msix_ent[i].vector = 0;
6160 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6161 if (rc != 0)
6162 return;
6164 bp->irq_nvecs = msix_vecs;
6165 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6166 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6167 bp->irq_tbl[i].vector = msix_ent[i].vector;
6168 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6169 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6173 static void
6174 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6176 int cpus = num_online_cpus();
6177 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6179 bp->irq_tbl[0].handler = bnx2_interrupt;
6180 strcpy(bp->irq_tbl[0].name, bp->dev->name);
6181 bp->irq_nvecs = 1;
6182 bp->irq_tbl[0].vector = bp->pdev->irq;
6184 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
6185 bnx2_enable_msix(bp, msix_vecs);
6187 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6188 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6189 if (pci_enable_msi(bp->pdev) == 0) {
6190 bp->flags |= BNX2_FLAG_USING_MSI;
6191 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6192 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6193 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6194 } else
6195 bp->irq_tbl[0].handler = bnx2_msi;
6197 bp->irq_tbl[0].vector = bp->pdev->irq;
6201 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6202 bp->dev->real_num_tx_queues = bp->num_tx_rings;
6204 bp->num_rx_rings = bp->irq_nvecs;
6207 /* Called with rtnl_lock */
6208 static int
6209 bnx2_open(struct net_device *dev)
6211 struct bnx2 *bp = netdev_priv(dev);
6212 int rc;
6214 netif_carrier_off(dev);
6216 bnx2_set_power_state(bp, PCI_D0);
6217 bnx2_disable_int(bp);
6219 bnx2_setup_int_mode(bp, disable_msi);
6220 bnx2_init_napi(bp);
6221 bnx2_napi_enable(bp);
6222 rc = bnx2_alloc_mem(bp);
6223 if (rc)
6224 goto open_err;
6226 rc = bnx2_request_irq(bp);
6227 if (rc)
6228 goto open_err;
6230 rc = bnx2_init_nic(bp, 1);
6231 if (rc)
6232 goto open_err;
6234 mod_timer(&bp->timer, jiffies + bp->current_interval);
6236 atomic_set(&bp->intr_sem, 0);
6238 bnx2_enable_int(bp);
6240 if (bp->flags & BNX2_FLAG_USING_MSI) {
6241 /* Test MSI to make sure it is working
6242 * If MSI test fails, go back to INTx mode
6244 if (bnx2_test_intr(bp) != 0) {
6245 printk(KERN_WARNING PFX "%s: No interrupt was generated"
6246 " using MSI, switching to INTx mode. Please"
6247 " report this failure to the PCI maintainer"
6248 " and include system chipset information.\n",
6249 bp->dev->name);
6251 bnx2_disable_int(bp);
6252 bnx2_free_irq(bp);
6254 bnx2_setup_int_mode(bp, 1);
6256 rc = bnx2_init_nic(bp, 0);
6258 if (!rc)
6259 rc = bnx2_request_irq(bp);
6261 if (rc) {
6262 del_timer_sync(&bp->timer);
6263 goto open_err;
6265 bnx2_enable_int(bp);
6268 if (bp->flags & BNX2_FLAG_USING_MSI)
6269 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
6270 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6271 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
6273 netif_tx_start_all_queues(dev);
6275 return 0;
6277 open_err:
6278 bnx2_napi_disable(bp);
6279 bnx2_free_skbs(bp);
6280 bnx2_free_irq(bp);
6281 bnx2_free_mem(bp);
6282 bnx2_del_napi(bp);
6283 return rc;
6286 static void
6287 bnx2_reset_task(struct work_struct *work)
6289 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6291 rtnl_lock();
6292 if (!netif_running(bp->dev)) {
6293 rtnl_unlock();
6294 return;
6297 bnx2_netif_stop(bp);
6299 bnx2_init_nic(bp, 1);
6301 atomic_set(&bp->intr_sem, 1);
6302 bnx2_netif_start(bp);
6303 rtnl_unlock();
6306 static void
6307 bnx2_dump_state(struct bnx2 *bp)
6309 struct net_device *dev = bp->dev;
6311 printk(KERN_ERR PFX "%s DEBUG: intr_sem[%x]\n", dev->name,
6312 atomic_read(&bp->intr_sem));
6313 printk(KERN_ERR PFX "%s DEBUG: EMAC_TX_STATUS[%08x] "
6314 "RPM_MGMT_PKT_CTRL[%08x]\n", dev->name,
6315 REG_RD(bp, BNX2_EMAC_TX_STATUS),
6316 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6317 printk(KERN_ERR PFX "%s DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6318 dev->name, bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
6319 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
6320 printk(KERN_ERR PFX "%s DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6321 dev->name, REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6322 if (bp->flags & BNX2_FLAG_USING_MSIX)
6323 printk(KERN_ERR PFX "%s DEBUG: PBA[%08x]\n", dev->name,
6324 REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6327 static void
6328 bnx2_tx_timeout(struct net_device *dev)
6330 struct bnx2 *bp = netdev_priv(dev);
6332 bnx2_dump_state(bp);
6334 /* This allows the netif to be shutdown gracefully before resetting */
6335 schedule_work(&bp->reset_task);
6338 #ifdef BCM_VLAN
6339 /* Called with rtnl_lock */
6340 static void
6341 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6343 struct bnx2 *bp = netdev_priv(dev);
6345 if (netif_running(dev))
6346 bnx2_netif_stop(bp);
6348 bp->vlgrp = vlgrp;
6350 if (!netif_running(dev))
6351 return;
6353 bnx2_set_rx_mode(dev);
6354 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6355 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6357 bnx2_netif_start(bp);
6359 #endif
6361 /* Called with netif_tx_lock.
6362 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6363 * netif_wake_queue().
6365 static netdev_tx_t
6366 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6368 struct bnx2 *bp = netdev_priv(dev);
6369 dma_addr_t mapping;
6370 struct tx_bd *txbd;
6371 struct sw_tx_bd *tx_buf;
6372 u32 len, vlan_tag_flags, last_frag, mss;
6373 u16 prod, ring_prod;
6374 int i;
6375 struct bnx2_napi *bnapi;
6376 struct bnx2_tx_ring_info *txr;
6377 struct netdev_queue *txq;
6379 /* Determine which tx ring we will be placed on */
6380 i = skb_get_queue_mapping(skb);
6381 bnapi = &bp->bnx2_napi[i];
6382 txr = &bnapi->tx_ring;
6383 txq = netdev_get_tx_queue(dev, i);
6385 if (unlikely(bnx2_tx_avail(bp, txr) <
6386 (skb_shinfo(skb)->nr_frags + 1))) {
6387 netif_tx_stop_queue(txq);
6388 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
6389 dev->name);
6391 return NETDEV_TX_BUSY;
6393 len = skb_headlen(skb);
6394 prod = txr->tx_prod;
6395 ring_prod = TX_RING_IDX(prod);
6397 vlan_tag_flags = 0;
6398 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6399 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6402 #ifdef BCM_VLAN
6403 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6404 vlan_tag_flags |=
6405 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6407 #endif
6408 if ((mss = skb_shinfo(skb)->gso_size)) {
6409 u32 tcp_opt_len;
6410 struct iphdr *iph;
6412 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6414 tcp_opt_len = tcp_optlen(skb);
6416 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6417 u32 tcp_off = skb_transport_offset(skb) -
6418 sizeof(struct ipv6hdr) - ETH_HLEN;
6420 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6421 TX_BD_FLAGS_SW_FLAGS;
6422 if (likely(tcp_off == 0))
6423 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6424 else {
6425 tcp_off >>= 3;
6426 vlan_tag_flags |= ((tcp_off & 0x3) <<
6427 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6428 ((tcp_off & 0x10) <<
6429 TX_BD_FLAGS_TCP6_OFF4_SHL);
6430 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6432 } else {
6433 iph = ip_hdr(skb);
6434 if (tcp_opt_len || (iph->ihl > 5)) {
6435 vlan_tag_flags |= ((iph->ihl - 5) +
6436 (tcp_opt_len >> 2)) << 8;
6439 } else
6440 mss = 0;
6442 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6443 if (pci_dma_mapping_error(bp->pdev, mapping)) {
6444 dev_kfree_skb(skb);
6445 return NETDEV_TX_OK;
6448 tx_buf = &txr->tx_buf_ring[ring_prod];
6449 tx_buf->skb = skb;
6450 pci_unmap_addr_set(tx_buf, mapping, mapping);
6452 txbd = &txr->tx_desc_ring[ring_prod];
6454 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6455 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6456 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6457 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6459 last_frag = skb_shinfo(skb)->nr_frags;
6460 tx_buf->nr_frags = last_frag;
6461 tx_buf->is_gso = skb_is_gso(skb);
6463 for (i = 0; i < last_frag; i++) {
6464 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6466 prod = NEXT_TX_BD(prod);
6467 ring_prod = TX_RING_IDX(prod);
6468 txbd = &txr->tx_desc_ring[ring_prod];
6470 len = frag->size;
6471 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6472 len, PCI_DMA_TODEVICE);
6473 if (pci_dma_mapping_error(bp->pdev, mapping))
6474 goto dma_error;
6475 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6476 mapping);
6478 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6479 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6480 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6481 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6484 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6486 prod = NEXT_TX_BD(prod);
6487 txr->tx_prod_bseq += skb->len;
6489 REG_WR16(bp, txr->tx_bidx_addr, prod);
6490 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6492 mmiowb();
6494 txr->tx_prod = prod;
6496 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6497 netif_tx_stop_queue(txq);
6498 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6499 netif_tx_wake_queue(txq);
6502 return NETDEV_TX_OK;
6503 dma_error:
6504 /* save value of frag that failed */
6505 last_frag = i;
6507 /* start back at beginning and unmap skb */
6508 prod = txr->tx_prod;
6509 ring_prod = TX_RING_IDX(prod);
6510 tx_buf = &txr->tx_buf_ring[ring_prod];
6511 tx_buf->skb = NULL;
6512 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6513 skb_headlen(skb), PCI_DMA_TODEVICE);
6515 /* unmap remaining mapped pages */
6516 for (i = 0; i < last_frag; i++) {
6517 prod = NEXT_TX_BD(prod);
6518 ring_prod = TX_RING_IDX(prod);
6519 tx_buf = &txr->tx_buf_ring[ring_prod];
6520 pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6521 skb_shinfo(skb)->frags[i].size,
6522 PCI_DMA_TODEVICE);
6525 dev_kfree_skb(skb);
6526 return NETDEV_TX_OK;
6529 /* Called with rtnl_lock */
6530 static int
6531 bnx2_close(struct net_device *dev)
6533 struct bnx2 *bp = netdev_priv(dev);
6535 cancel_work_sync(&bp->reset_task);
6537 bnx2_disable_int_sync(bp);
6538 bnx2_napi_disable(bp);
6539 del_timer_sync(&bp->timer);
6540 bnx2_shutdown_chip(bp);
6541 bnx2_free_irq(bp);
6542 bnx2_free_skbs(bp);
6543 bnx2_free_mem(bp);
6544 bnx2_del_napi(bp);
6545 bp->link_up = 0;
6546 netif_carrier_off(bp->dev);
6547 bnx2_set_power_state(bp, PCI_D3hot);
6548 return 0;
6551 #define GET_NET_STATS64(ctr) \
6552 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6553 (unsigned long) (ctr##_lo)
6555 #define GET_NET_STATS32(ctr) \
6556 (ctr##_lo)
6558 #if (BITS_PER_LONG == 64)
6559 #define GET_NET_STATS GET_NET_STATS64
6560 #else
6561 #define GET_NET_STATS GET_NET_STATS32
6562 #endif
6564 static struct net_device_stats *
6565 bnx2_get_stats(struct net_device *dev)
6567 struct bnx2 *bp = netdev_priv(dev);
6568 struct statistics_block *stats_blk = bp->stats_blk;
6569 struct net_device_stats *net_stats = &dev->stats;
6571 if (bp->stats_blk == NULL) {
6572 return net_stats;
6574 net_stats->rx_packets =
6575 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6576 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6577 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6579 net_stats->tx_packets =
6580 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6581 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6582 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6584 net_stats->rx_bytes =
6585 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6587 net_stats->tx_bytes =
6588 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6590 net_stats->multicast =
6591 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6593 net_stats->collisions =
6594 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6596 net_stats->rx_length_errors =
6597 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6598 stats_blk->stat_EtherStatsOverrsizePkts);
6600 net_stats->rx_over_errors =
6601 (unsigned long) (stats_blk->stat_IfInFTQDiscards +
6602 stats_blk->stat_IfInMBUFDiscards);
6604 net_stats->rx_frame_errors =
6605 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6607 net_stats->rx_crc_errors =
6608 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6610 net_stats->rx_errors = net_stats->rx_length_errors +
6611 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6612 net_stats->rx_crc_errors;
6614 net_stats->tx_aborted_errors =
6615 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6616 stats_blk->stat_Dot3StatsLateCollisions);
6618 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6619 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6620 net_stats->tx_carrier_errors = 0;
6621 else {
6622 net_stats->tx_carrier_errors =
6623 (unsigned long)
6624 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6627 net_stats->tx_errors =
6628 (unsigned long)
6629 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6631 net_stats->tx_aborted_errors +
6632 net_stats->tx_carrier_errors;
6634 net_stats->rx_missed_errors =
6635 (unsigned long) (stats_blk->stat_IfInFTQDiscards +
6636 stats_blk->stat_IfInMBUFDiscards + stats_blk->stat_FwRxDrop);
6638 return net_stats;
6641 /* All ethtool functions called with rtnl_lock */
6643 static int
6644 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6646 struct bnx2 *bp = netdev_priv(dev);
6647 int support_serdes = 0, support_copper = 0;
6649 cmd->supported = SUPPORTED_Autoneg;
6650 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6651 support_serdes = 1;
6652 support_copper = 1;
6653 } else if (bp->phy_port == PORT_FIBRE)
6654 support_serdes = 1;
6655 else
6656 support_copper = 1;
6658 if (support_serdes) {
6659 cmd->supported |= SUPPORTED_1000baseT_Full |
6660 SUPPORTED_FIBRE;
6661 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6662 cmd->supported |= SUPPORTED_2500baseX_Full;
6665 if (support_copper) {
6666 cmd->supported |= SUPPORTED_10baseT_Half |
6667 SUPPORTED_10baseT_Full |
6668 SUPPORTED_100baseT_Half |
6669 SUPPORTED_100baseT_Full |
6670 SUPPORTED_1000baseT_Full |
6671 SUPPORTED_TP;
6675 spin_lock_bh(&bp->phy_lock);
6676 cmd->port = bp->phy_port;
6677 cmd->advertising = bp->advertising;
6679 if (bp->autoneg & AUTONEG_SPEED) {
6680 cmd->autoneg = AUTONEG_ENABLE;
6682 else {
6683 cmd->autoneg = AUTONEG_DISABLE;
6686 if (netif_carrier_ok(dev)) {
6687 cmd->speed = bp->line_speed;
6688 cmd->duplex = bp->duplex;
6690 else {
6691 cmd->speed = -1;
6692 cmd->duplex = -1;
6694 spin_unlock_bh(&bp->phy_lock);
6696 cmd->transceiver = XCVR_INTERNAL;
6697 cmd->phy_address = bp->phy_addr;
6699 return 0;
6702 static int
6703 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6705 struct bnx2 *bp = netdev_priv(dev);
6706 u8 autoneg = bp->autoneg;
6707 u8 req_duplex = bp->req_duplex;
6708 u16 req_line_speed = bp->req_line_speed;
6709 u32 advertising = bp->advertising;
6710 int err = -EINVAL;
6712 spin_lock_bh(&bp->phy_lock);
6714 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6715 goto err_out_unlock;
6717 if (cmd->port != bp->phy_port &&
6718 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6719 goto err_out_unlock;
6721 /* If device is down, we can store the settings only if the user
6722 * is setting the currently active port.
6724 if (!netif_running(dev) && cmd->port != bp->phy_port)
6725 goto err_out_unlock;
6727 if (cmd->autoneg == AUTONEG_ENABLE) {
6728 autoneg |= AUTONEG_SPEED;
6730 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6732 /* allow advertising 1 speed */
6733 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6734 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6735 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6736 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6738 if (cmd->port == PORT_FIBRE)
6739 goto err_out_unlock;
6741 advertising = cmd->advertising;
6743 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6744 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6745 (cmd->port == PORT_TP))
6746 goto err_out_unlock;
6747 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6748 advertising = cmd->advertising;
6749 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6750 goto err_out_unlock;
6751 else {
6752 if (cmd->port == PORT_FIBRE)
6753 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6754 else
6755 advertising = ETHTOOL_ALL_COPPER_SPEED;
6757 advertising |= ADVERTISED_Autoneg;
6759 else {
6760 if (cmd->port == PORT_FIBRE) {
6761 if ((cmd->speed != SPEED_1000 &&
6762 cmd->speed != SPEED_2500) ||
6763 (cmd->duplex != DUPLEX_FULL))
6764 goto err_out_unlock;
6766 if (cmd->speed == SPEED_2500 &&
6767 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6768 goto err_out_unlock;
6770 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6771 goto err_out_unlock;
6773 autoneg &= ~AUTONEG_SPEED;
6774 req_line_speed = cmd->speed;
6775 req_duplex = cmd->duplex;
6776 advertising = 0;
6779 bp->autoneg = autoneg;
6780 bp->advertising = advertising;
6781 bp->req_line_speed = req_line_speed;
6782 bp->req_duplex = req_duplex;
6784 err = 0;
6785 /* If device is down, the new settings will be picked up when it is
6786 * brought up.
6788 if (netif_running(dev))
6789 err = bnx2_setup_phy(bp, cmd->port);
6791 err_out_unlock:
6792 spin_unlock_bh(&bp->phy_lock);
6794 return err;
6797 static void
6798 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6800 struct bnx2 *bp = netdev_priv(dev);
6802 strcpy(info->driver, DRV_MODULE_NAME);
6803 strcpy(info->version, DRV_MODULE_VERSION);
6804 strcpy(info->bus_info, pci_name(bp->pdev));
6805 strcpy(info->fw_version, bp->fw_version);
6808 #define BNX2_REGDUMP_LEN (32 * 1024)
6810 static int
6811 bnx2_get_regs_len(struct net_device *dev)
6813 return BNX2_REGDUMP_LEN;
6816 static void
6817 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6819 u32 *p = _p, i, offset;
6820 u8 *orig_p = _p;
6821 struct bnx2 *bp = netdev_priv(dev);
6822 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6823 0x0800, 0x0880, 0x0c00, 0x0c10,
6824 0x0c30, 0x0d08, 0x1000, 0x101c,
6825 0x1040, 0x1048, 0x1080, 0x10a4,
6826 0x1400, 0x1490, 0x1498, 0x14f0,
6827 0x1500, 0x155c, 0x1580, 0x15dc,
6828 0x1600, 0x1658, 0x1680, 0x16d8,
6829 0x1800, 0x1820, 0x1840, 0x1854,
6830 0x1880, 0x1894, 0x1900, 0x1984,
6831 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6832 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6833 0x2000, 0x2030, 0x23c0, 0x2400,
6834 0x2800, 0x2820, 0x2830, 0x2850,
6835 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6836 0x3c00, 0x3c94, 0x4000, 0x4010,
6837 0x4080, 0x4090, 0x43c0, 0x4458,
6838 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6839 0x4fc0, 0x5010, 0x53c0, 0x5444,
6840 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6841 0x5fc0, 0x6000, 0x6400, 0x6428,
6842 0x6800, 0x6848, 0x684c, 0x6860,
6843 0x6888, 0x6910, 0x8000 };
6845 regs->version = 0;
6847 memset(p, 0, BNX2_REGDUMP_LEN);
6849 if (!netif_running(bp->dev))
6850 return;
6852 i = 0;
6853 offset = reg_boundaries[0];
6854 p += offset;
6855 while (offset < BNX2_REGDUMP_LEN) {
6856 *p++ = REG_RD(bp, offset);
6857 offset += 4;
6858 if (offset == reg_boundaries[i + 1]) {
6859 offset = reg_boundaries[i + 2];
6860 p = (u32 *) (orig_p + offset);
6861 i += 2;
6866 static void
6867 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6869 struct bnx2 *bp = netdev_priv(dev);
6871 if (bp->flags & BNX2_FLAG_NO_WOL) {
6872 wol->supported = 0;
6873 wol->wolopts = 0;
6875 else {
6876 wol->supported = WAKE_MAGIC;
6877 if (bp->wol)
6878 wol->wolopts = WAKE_MAGIC;
6879 else
6880 wol->wolopts = 0;
6882 memset(&wol->sopass, 0, sizeof(wol->sopass));
6885 static int
6886 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6888 struct bnx2 *bp = netdev_priv(dev);
6890 if (wol->wolopts & ~WAKE_MAGIC)
6891 return -EINVAL;
6893 if (wol->wolopts & WAKE_MAGIC) {
6894 if (bp->flags & BNX2_FLAG_NO_WOL)
6895 return -EINVAL;
6897 bp->wol = 1;
6899 else {
6900 bp->wol = 0;
6902 return 0;
6905 static int
6906 bnx2_nway_reset(struct net_device *dev)
6908 struct bnx2 *bp = netdev_priv(dev);
6909 u32 bmcr;
6911 if (!netif_running(dev))
6912 return -EAGAIN;
6914 if (!(bp->autoneg & AUTONEG_SPEED)) {
6915 return -EINVAL;
6918 spin_lock_bh(&bp->phy_lock);
6920 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6921 int rc;
6923 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6924 spin_unlock_bh(&bp->phy_lock);
6925 return rc;
6928 /* Force a link down visible on the other side */
6929 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6930 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6931 spin_unlock_bh(&bp->phy_lock);
6933 msleep(20);
6935 spin_lock_bh(&bp->phy_lock);
6937 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6938 bp->serdes_an_pending = 1;
6939 mod_timer(&bp->timer, jiffies + bp->current_interval);
6942 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6943 bmcr &= ~BMCR_LOOPBACK;
6944 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6946 spin_unlock_bh(&bp->phy_lock);
6948 return 0;
6951 static u32
6952 bnx2_get_link(struct net_device *dev)
6954 struct bnx2 *bp = netdev_priv(dev);
6956 return bp->link_up;
6959 static int
6960 bnx2_get_eeprom_len(struct net_device *dev)
6962 struct bnx2 *bp = netdev_priv(dev);
6964 if (bp->flash_info == NULL)
6965 return 0;
6967 return (int) bp->flash_size;
6970 static int
6971 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6972 u8 *eebuf)
6974 struct bnx2 *bp = netdev_priv(dev);
6975 int rc;
6977 if (!netif_running(dev))
6978 return -EAGAIN;
6980 /* parameters already validated in ethtool_get_eeprom */
6982 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6984 return rc;
6987 static int
6988 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6989 u8 *eebuf)
6991 struct bnx2 *bp = netdev_priv(dev);
6992 int rc;
6994 if (!netif_running(dev))
6995 return -EAGAIN;
6997 /* parameters already validated in ethtool_set_eeprom */
6999 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7001 return rc;
7004 static int
7005 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7007 struct bnx2 *bp = netdev_priv(dev);
7009 memset(coal, 0, sizeof(struct ethtool_coalesce));
7011 coal->rx_coalesce_usecs = bp->rx_ticks;
7012 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7013 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7014 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7016 coal->tx_coalesce_usecs = bp->tx_ticks;
7017 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7018 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7019 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7021 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7023 return 0;
7026 static int
7027 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7029 struct bnx2 *bp = netdev_priv(dev);
7031 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7032 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7034 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7035 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7037 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7038 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7040 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7041 if (bp->rx_quick_cons_trip_int > 0xff)
7042 bp->rx_quick_cons_trip_int = 0xff;
7044 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7045 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7047 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7048 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7050 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7051 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7053 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7054 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7055 0xff;
7057 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7058 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7059 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7060 bp->stats_ticks = USEC_PER_SEC;
7062 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7063 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7064 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7066 if (netif_running(bp->dev)) {
7067 bnx2_netif_stop(bp);
7068 bnx2_init_nic(bp, 0);
7069 bnx2_netif_start(bp);
7072 return 0;
7075 static void
7076 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7078 struct bnx2 *bp = netdev_priv(dev);
7080 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7081 ering->rx_mini_max_pending = 0;
7082 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7084 ering->rx_pending = bp->rx_ring_size;
7085 ering->rx_mini_pending = 0;
7086 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7088 ering->tx_max_pending = MAX_TX_DESC_CNT;
7089 ering->tx_pending = bp->tx_ring_size;
7092 static int
7093 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7095 if (netif_running(bp->dev)) {
7096 bnx2_netif_stop(bp);
7097 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7098 bnx2_free_skbs(bp);
7099 bnx2_free_mem(bp);
7102 bnx2_set_rx_ring_size(bp, rx);
7103 bp->tx_ring_size = tx;
7105 if (netif_running(bp->dev)) {
7106 int rc;
7108 rc = bnx2_alloc_mem(bp);
7109 if (!rc)
7110 rc = bnx2_init_nic(bp, 0);
7112 if (rc) {
7113 bnx2_napi_enable(bp);
7114 dev_close(bp->dev);
7115 return rc;
7117 bnx2_netif_start(bp);
7119 return 0;
7122 static int
7123 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7125 struct bnx2 *bp = netdev_priv(dev);
7126 int rc;
7128 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7129 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7130 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7132 return -EINVAL;
7134 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7135 return rc;
7138 static void
7139 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7141 struct bnx2 *bp = netdev_priv(dev);
7143 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7144 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7145 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7148 static int
7149 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7151 struct bnx2 *bp = netdev_priv(dev);
7153 bp->req_flow_ctrl = 0;
7154 if (epause->rx_pause)
7155 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7156 if (epause->tx_pause)
7157 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7159 if (epause->autoneg) {
7160 bp->autoneg |= AUTONEG_FLOW_CTRL;
7162 else {
7163 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7166 if (netif_running(dev)) {
7167 spin_lock_bh(&bp->phy_lock);
7168 bnx2_setup_phy(bp, bp->phy_port);
7169 spin_unlock_bh(&bp->phy_lock);
7172 return 0;
7175 static u32
7176 bnx2_get_rx_csum(struct net_device *dev)
7178 struct bnx2 *bp = netdev_priv(dev);
7180 return bp->rx_csum;
7183 static int
7184 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7186 struct bnx2 *bp = netdev_priv(dev);
7188 bp->rx_csum = data;
7189 return 0;
7192 static int
7193 bnx2_set_tso(struct net_device *dev, u32 data)
7195 struct bnx2 *bp = netdev_priv(dev);
7197 if (data) {
7198 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7199 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7200 dev->features |= NETIF_F_TSO6;
7201 } else
7202 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7203 NETIF_F_TSO_ECN);
7204 return 0;
7207 static struct {
7208 char string[ETH_GSTRING_LEN];
7209 } bnx2_stats_str_arr[] = {
7210 { "rx_bytes" },
7211 { "rx_error_bytes" },
7212 { "tx_bytes" },
7213 { "tx_error_bytes" },
7214 { "rx_ucast_packets" },
7215 { "rx_mcast_packets" },
7216 { "rx_bcast_packets" },
7217 { "tx_ucast_packets" },
7218 { "tx_mcast_packets" },
7219 { "tx_bcast_packets" },
7220 { "tx_mac_errors" },
7221 { "tx_carrier_errors" },
7222 { "rx_crc_errors" },
7223 { "rx_align_errors" },
7224 { "tx_single_collisions" },
7225 { "tx_multi_collisions" },
7226 { "tx_deferred" },
7227 { "tx_excess_collisions" },
7228 { "tx_late_collisions" },
7229 { "tx_total_collisions" },
7230 { "rx_fragments" },
7231 { "rx_jabbers" },
7232 { "rx_undersize_packets" },
7233 { "rx_oversize_packets" },
7234 { "rx_64_byte_packets" },
7235 { "rx_65_to_127_byte_packets" },
7236 { "rx_128_to_255_byte_packets" },
7237 { "rx_256_to_511_byte_packets" },
7238 { "rx_512_to_1023_byte_packets" },
7239 { "rx_1024_to_1522_byte_packets" },
7240 { "rx_1523_to_9022_byte_packets" },
7241 { "tx_64_byte_packets" },
7242 { "tx_65_to_127_byte_packets" },
7243 { "tx_128_to_255_byte_packets" },
7244 { "tx_256_to_511_byte_packets" },
7245 { "tx_512_to_1023_byte_packets" },
7246 { "tx_1024_to_1522_byte_packets" },
7247 { "tx_1523_to_9022_byte_packets" },
7248 { "rx_xon_frames" },
7249 { "rx_xoff_frames" },
7250 { "tx_xon_frames" },
7251 { "tx_xoff_frames" },
7252 { "rx_mac_ctrl_frames" },
7253 { "rx_filtered_packets" },
7254 { "rx_ftq_discards" },
7255 { "rx_discards" },
7256 { "rx_fw_discards" },
7259 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7260 sizeof(bnx2_stats_str_arr[0]))
7262 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7264 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7265 STATS_OFFSET32(stat_IfHCInOctets_hi),
7266 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7267 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7268 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7269 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7270 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7271 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7272 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7273 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7274 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7275 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7276 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7277 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7278 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7279 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7280 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7281 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7282 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7283 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7284 STATS_OFFSET32(stat_EtherStatsCollisions),
7285 STATS_OFFSET32(stat_EtherStatsFragments),
7286 STATS_OFFSET32(stat_EtherStatsJabbers),
7287 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7288 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7289 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7290 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7291 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7292 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7293 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7294 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7295 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7296 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7297 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7298 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7299 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7300 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7301 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7302 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7303 STATS_OFFSET32(stat_XonPauseFramesReceived),
7304 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7305 STATS_OFFSET32(stat_OutXonSent),
7306 STATS_OFFSET32(stat_OutXoffSent),
7307 STATS_OFFSET32(stat_MacControlFramesReceived),
7308 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7309 STATS_OFFSET32(stat_IfInFTQDiscards),
7310 STATS_OFFSET32(stat_IfInMBUFDiscards),
7311 STATS_OFFSET32(stat_FwRxDrop),
7314 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7315 * skipped because of errata.
7317 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7318 8,0,8,8,8,8,8,8,8,8,
7319 4,0,4,4,4,4,4,4,4,4,
7320 4,4,4,4,4,4,4,4,4,4,
7321 4,4,4,4,4,4,4,4,4,4,
7322 4,4,4,4,4,4,4,
7325 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7326 8,0,8,8,8,8,8,8,8,8,
7327 4,4,4,4,4,4,4,4,4,4,
7328 4,4,4,4,4,4,4,4,4,4,
7329 4,4,4,4,4,4,4,4,4,4,
7330 4,4,4,4,4,4,4,
7333 #define BNX2_NUM_TESTS 6
7335 static struct {
7336 char string[ETH_GSTRING_LEN];
7337 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7338 { "register_test (offline)" },
7339 { "memory_test (offline)" },
7340 { "loopback_test (offline)" },
7341 { "nvram_test (online)" },
7342 { "interrupt_test (online)" },
7343 { "link_test (online)" },
7346 static int
7347 bnx2_get_sset_count(struct net_device *dev, int sset)
7349 switch (sset) {
7350 case ETH_SS_TEST:
7351 return BNX2_NUM_TESTS;
7352 case ETH_SS_STATS:
7353 return BNX2_NUM_STATS;
7354 default:
7355 return -EOPNOTSUPP;
7359 static void
7360 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7362 struct bnx2 *bp = netdev_priv(dev);
7364 bnx2_set_power_state(bp, PCI_D0);
7366 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7367 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7368 int i;
7370 bnx2_netif_stop(bp);
7371 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7372 bnx2_free_skbs(bp);
7374 if (bnx2_test_registers(bp) != 0) {
7375 buf[0] = 1;
7376 etest->flags |= ETH_TEST_FL_FAILED;
7378 if (bnx2_test_memory(bp) != 0) {
7379 buf[1] = 1;
7380 etest->flags |= ETH_TEST_FL_FAILED;
7382 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7383 etest->flags |= ETH_TEST_FL_FAILED;
7385 if (!netif_running(bp->dev))
7386 bnx2_shutdown_chip(bp);
7387 else {
7388 bnx2_init_nic(bp, 1);
7389 bnx2_netif_start(bp);
7392 /* wait for link up */
7393 for (i = 0; i < 7; i++) {
7394 if (bp->link_up)
7395 break;
7396 msleep_interruptible(1000);
7400 if (bnx2_test_nvram(bp) != 0) {
7401 buf[3] = 1;
7402 etest->flags |= ETH_TEST_FL_FAILED;
7404 if (bnx2_test_intr(bp) != 0) {
7405 buf[4] = 1;
7406 etest->flags |= ETH_TEST_FL_FAILED;
7409 if (bnx2_test_link(bp) != 0) {
7410 buf[5] = 1;
7411 etest->flags |= ETH_TEST_FL_FAILED;
7414 if (!netif_running(bp->dev))
7415 bnx2_set_power_state(bp, PCI_D3hot);
7418 static void
7419 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7421 switch (stringset) {
7422 case ETH_SS_STATS:
7423 memcpy(buf, bnx2_stats_str_arr,
7424 sizeof(bnx2_stats_str_arr));
7425 break;
7426 case ETH_SS_TEST:
7427 memcpy(buf, bnx2_tests_str_arr,
7428 sizeof(bnx2_tests_str_arr));
7429 break;
7433 static void
7434 bnx2_get_ethtool_stats(struct net_device *dev,
7435 struct ethtool_stats *stats, u64 *buf)
7437 struct bnx2 *bp = netdev_priv(dev);
7438 int i;
7439 u32 *hw_stats = (u32 *) bp->stats_blk;
7440 u8 *stats_len_arr = NULL;
7442 if (hw_stats == NULL) {
7443 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7444 return;
7447 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7448 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7449 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7450 (CHIP_ID(bp) == CHIP_ID_5708_A0))
7451 stats_len_arr = bnx2_5706_stats_len_arr;
7452 else
7453 stats_len_arr = bnx2_5708_stats_len_arr;
7455 for (i = 0; i < BNX2_NUM_STATS; i++) {
7456 if (stats_len_arr[i] == 0) {
7457 /* skip this counter */
7458 buf[i] = 0;
7459 continue;
7461 if (stats_len_arr[i] == 4) {
7462 /* 4-byte counter */
7463 buf[i] = (u64)
7464 *(hw_stats + bnx2_stats_offset_arr[i]);
7465 continue;
7467 /* 8-byte counter */
7468 buf[i] = (((u64) *(hw_stats +
7469 bnx2_stats_offset_arr[i])) << 32) +
7470 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
7474 static int
7475 bnx2_phys_id(struct net_device *dev, u32 data)
7477 struct bnx2 *bp = netdev_priv(dev);
7478 int i;
7479 u32 save;
7481 bnx2_set_power_state(bp, PCI_D0);
7483 if (data == 0)
7484 data = 2;
7486 save = REG_RD(bp, BNX2_MISC_CFG);
7487 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7489 for (i = 0; i < (data * 2); i++) {
7490 if ((i % 2) == 0) {
7491 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7493 else {
7494 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7495 BNX2_EMAC_LED_1000MB_OVERRIDE |
7496 BNX2_EMAC_LED_100MB_OVERRIDE |
7497 BNX2_EMAC_LED_10MB_OVERRIDE |
7498 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7499 BNX2_EMAC_LED_TRAFFIC);
7501 msleep_interruptible(500);
7502 if (signal_pending(current))
7503 break;
7505 REG_WR(bp, BNX2_EMAC_LED, 0);
7506 REG_WR(bp, BNX2_MISC_CFG, save);
7508 if (!netif_running(dev))
7509 bnx2_set_power_state(bp, PCI_D3hot);
7511 return 0;
7514 static int
7515 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7517 struct bnx2 *bp = netdev_priv(dev);
7519 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7520 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7521 else
7522 return (ethtool_op_set_tx_csum(dev, data));
7525 static const struct ethtool_ops bnx2_ethtool_ops = {
7526 .get_settings = bnx2_get_settings,
7527 .set_settings = bnx2_set_settings,
7528 .get_drvinfo = bnx2_get_drvinfo,
7529 .get_regs_len = bnx2_get_regs_len,
7530 .get_regs = bnx2_get_regs,
7531 .get_wol = bnx2_get_wol,
7532 .set_wol = bnx2_set_wol,
7533 .nway_reset = bnx2_nway_reset,
7534 .get_link = bnx2_get_link,
7535 .get_eeprom_len = bnx2_get_eeprom_len,
7536 .get_eeprom = bnx2_get_eeprom,
7537 .set_eeprom = bnx2_set_eeprom,
7538 .get_coalesce = bnx2_get_coalesce,
7539 .set_coalesce = bnx2_set_coalesce,
7540 .get_ringparam = bnx2_get_ringparam,
7541 .set_ringparam = bnx2_set_ringparam,
7542 .get_pauseparam = bnx2_get_pauseparam,
7543 .set_pauseparam = bnx2_set_pauseparam,
7544 .get_rx_csum = bnx2_get_rx_csum,
7545 .set_rx_csum = bnx2_set_rx_csum,
7546 .set_tx_csum = bnx2_set_tx_csum,
7547 .set_sg = ethtool_op_set_sg,
7548 .set_tso = bnx2_set_tso,
7549 .self_test = bnx2_self_test,
7550 .get_strings = bnx2_get_strings,
7551 .phys_id = bnx2_phys_id,
7552 .get_ethtool_stats = bnx2_get_ethtool_stats,
7553 .get_sset_count = bnx2_get_sset_count,
7556 /* Called with rtnl_lock */
7557 static int
7558 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7560 struct mii_ioctl_data *data = if_mii(ifr);
7561 struct bnx2 *bp = netdev_priv(dev);
7562 int err;
7564 switch(cmd) {
7565 case SIOCGMIIPHY:
7566 data->phy_id = bp->phy_addr;
7568 /* fallthru */
7569 case SIOCGMIIREG: {
7570 u32 mii_regval;
7572 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7573 return -EOPNOTSUPP;
7575 if (!netif_running(dev))
7576 return -EAGAIN;
7578 spin_lock_bh(&bp->phy_lock);
7579 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7580 spin_unlock_bh(&bp->phy_lock);
7582 data->val_out = mii_regval;
7584 return err;
7587 case SIOCSMIIREG:
7588 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7589 return -EOPNOTSUPP;
7591 if (!netif_running(dev))
7592 return -EAGAIN;
7594 spin_lock_bh(&bp->phy_lock);
7595 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7596 spin_unlock_bh(&bp->phy_lock);
7598 return err;
7600 default:
7601 /* do nothing */
7602 break;
7604 return -EOPNOTSUPP;
7607 /* Called with rtnl_lock */
7608 static int
7609 bnx2_change_mac_addr(struct net_device *dev, void *p)
7611 struct sockaddr *addr = p;
7612 struct bnx2 *bp = netdev_priv(dev);
7614 if (!is_valid_ether_addr(addr->sa_data))
7615 return -EINVAL;
7617 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7618 if (netif_running(dev))
7619 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7621 return 0;
7624 /* Called with rtnl_lock */
7625 static int
7626 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7628 struct bnx2 *bp = netdev_priv(dev);
7630 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7631 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7632 return -EINVAL;
7634 dev->mtu = new_mtu;
7635 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7638 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7639 static void
7640 poll_bnx2(struct net_device *dev)
7642 struct bnx2 *bp = netdev_priv(dev);
7643 int i;
7645 for (i = 0; i < bp->irq_nvecs; i++) {
7646 disable_irq(bp->irq_tbl[i].vector);
7647 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7648 enable_irq(bp->irq_tbl[i].vector);
7651 #endif
7653 static void __devinit
7654 bnx2_get_5709_media(struct bnx2 *bp)
7656 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7657 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7658 u32 strap;
7660 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7661 return;
7662 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7663 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7664 return;
7667 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7668 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7669 else
7670 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7672 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7673 switch (strap) {
7674 case 0x4:
7675 case 0x5:
7676 case 0x6:
7677 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7678 return;
7680 } else {
7681 switch (strap) {
7682 case 0x1:
7683 case 0x2:
7684 case 0x4:
7685 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7686 return;
7691 static void __devinit
7692 bnx2_get_pci_speed(struct bnx2 *bp)
7694 u32 reg;
7696 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7697 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7698 u32 clkreg;
7700 bp->flags |= BNX2_FLAG_PCIX;
7702 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7704 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7705 switch (clkreg) {
7706 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7707 bp->bus_speed_mhz = 133;
7708 break;
7710 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7711 bp->bus_speed_mhz = 100;
7712 break;
7714 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7715 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7716 bp->bus_speed_mhz = 66;
7717 break;
7719 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7720 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7721 bp->bus_speed_mhz = 50;
7722 break;
7724 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7725 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7726 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7727 bp->bus_speed_mhz = 33;
7728 break;
7731 else {
7732 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7733 bp->bus_speed_mhz = 66;
7734 else
7735 bp->bus_speed_mhz = 33;
7738 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7739 bp->flags |= BNX2_FLAG_PCI_32BIT;
7743 static void __devinit
7744 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7746 int rc, i, v0_len = 0;
7747 u8 *data;
7748 u8 *v0_str = NULL;
7749 bool mn_match = false;
7751 #define BNX2_VPD_NVRAM_OFFSET 0x300
7752 #define BNX2_VPD_LEN 128
7753 #define BNX2_MAX_VER_SLEN 30
7755 data = kmalloc(256, GFP_KERNEL);
7756 if (!data)
7757 return;
7759 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7760 BNX2_VPD_LEN);
7761 if (rc)
7762 goto vpd_done;
7764 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7765 data[i] = data[i + BNX2_VPD_LEN + 3];
7766 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7767 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7768 data[i + 3] = data[i + BNX2_VPD_LEN];
7771 for (i = 0; i <= BNX2_VPD_LEN - 3; ) {
7772 unsigned char val = data[i];
7773 unsigned int block_end;
7775 if (val == 0x82 || val == 0x91) {
7776 i = (i + 3 + (data[i + 1] + (data[i + 2] << 8)));
7777 continue;
7780 if (val != 0x90)
7781 goto vpd_done;
7783 block_end = (i + 3 + (data[i + 1] + (data[i + 2] << 8)));
7784 i += 3;
7786 if (block_end > BNX2_VPD_LEN)
7787 goto vpd_done;
7789 while (i < (block_end - 2)) {
7790 int len = data[i + 2];
7792 if (i + 3 + len > block_end)
7793 goto vpd_done;
7795 if (data[i] == 'M' && data[i + 1] == 'N') {
7796 if (len != 4 ||
7797 memcmp(&data[i + 3], "1028", 4))
7798 goto vpd_done;
7799 mn_match = true;
7801 } else if (data[i] == 'V' && data[i + 1] == '0') {
7802 if (len > BNX2_MAX_VER_SLEN)
7803 goto vpd_done;
7805 v0_len = len;
7806 v0_str = &data[i + 3];
7808 i += 3 + len;
7810 if (mn_match && v0_str) {
7811 memcpy(bp->fw_version, v0_str, v0_len);
7812 bp->fw_version[v0_len] = ' ';
7813 goto vpd_done;
7816 goto vpd_done;
7819 vpd_done:
7820 kfree(data);
7823 static int __devinit
7824 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7826 struct bnx2 *bp;
7827 unsigned long mem_len;
7828 int rc, i, j;
7829 u32 reg;
7830 u64 dma_mask, persist_dma_mask;
7832 SET_NETDEV_DEV(dev, &pdev->dev);
7833 bp = netdev_priv(dev);
7835 bp->flags = 0;
7836 bp->phy_flags = 0;
7838 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7839 rc = pci_enable_device(pdev);
7840 if (rc) {
7841 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7842 goto err_out;
7845 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7846 dev_err(&pdev->dev,
7847 "Cannot find PCI device base address, aborting.\n");
7848 rc = -ENODEV;
7849 goto err_out_disable;
7852 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7853 if (rc) {
7854 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7855 goto err_out_disable;
7858 pci_set_master(pdev);
7859 pci_save_state(pdev);
7861 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7862 if (bp->pm_cap == 0) {
7863 dev_err(&pdev->dev,
7864 "Cannot find power management capability, aborting.\n");
7865 rc = -EIO;
7866 goto err_out_release;
7869 bp->dev = dev;
7870 bp->pdev = pdev;
7872 spin_lock_init(&bp->phy_lock);
7873 spin_lock_init(&bp->indirect_lock);
7874 #ifdef BCM_CNIC
7875 mutex_init(&bp->cnic_lock);
7876 #endif
7877 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7879 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7880 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7881 dev->mem_end = dev->mem_start + mem_len;
7882 dev->irq = pdev->irq;
7884 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7886 if (!bp->regview) {
7887 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7888 rc = -ENOMEM;
7889 goto err_out_release;
7892 /* Configure byte swap and enable write to the reg_window registers.
7893 * Rely on CPU to do target byte swapping on big endian systems
7894 * The chip's target access swapping will not swap all accesses
7896 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7897 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7898 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7900 bnx2_set_power_state(bp, PCI_D0);
7902 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7904 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7905 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7906 dev_err(&pdev->dev,
7907 "Cannot find PCIE capability, aborting.\n");
7908 rc = -EIO;
7909 goto err_out_unmap;
7911 bp->flags |= BNX2_FLAG_PCIE;
7912 if (CHIP_REV(bp) == CHIP_REV_Ax)
7913 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7914 } else {
7915 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7916 if (bp->pcix_cap == 0) {
7917 dev_err(&pdev->dev,
7918 "Cannot find PCIX capability, aborting.\n");
7919 rc = -EIO;
7920 goto err_out_unmap;
7922 bp->flags |= BNX2_FLAG_BROKEN_STATS;
7925 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7926 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7927 bp->flags |= BNX2_FLAG_MSIX_CAP;
7930 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7931 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7932 bp->flags |= BNX2_FLAG_MSI_CAP;
7935 /* 5708 cannot support DMA addresses > 40-bit. */
7936 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7937 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7938 else
7939 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7941 /* Configure DMA attributes. */
7942 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7943 dev->features |= NETIF_F_HIGHDMA;
7944 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7945 if (rc) {
7946 dev_err(&pdev->dev,
7947 "pci_set_consistent_dma_mask failed, aborting.\n");
7948 goto err_out_unmap;
7950 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7951 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7952 goto err_out_unmap;
7955 if (!(bp->flags & BNX2_FLAG_PCIE))
7956 bnx2_get_pci_speed(bp);
7958 /* 5706A0 may falsely detect SERR and PERR. */
7959 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7960 reg = REG_RD(bp, PCI_COMMAND);
7961 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7962 REG_WR(bp, PCI_COMMAND, reg);
7964 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7965 !(bp->flags & BNX2_FLAG_PCIX)) {
7967 dev_err(&pdev->dev,
7968 "5706 A1 can only be used in a PCIX bus, aborting.\n");
7969 goto err_out_unmap;
7972 bnx2_init_nvram(bp);
7974 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7976 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7977 BNX2_SHM_HDR_SIGNATURE_SIG) {
7978 u32 off = PCI_FUNC(pdev->devfn) << 2;
7980 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7981 } else
7982 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7984 /* Get the permanent MAC address. First we need to make sure the
7985 * firmware is actually running.
7987 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7989 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7990 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7991 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7992 rc = -ENODEV;
7993 goto err_out_unmap;
7996 bnx2_read_vpd_fw_ver(bp);
7998 j = strlen(bp->fw_version);
7999 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8000 for (i = 0; i < 3 && j < 24; i++) {
8001 u8 num, k, skip0;
8003 if (i == 0) {
8004 bp->fw_version[j++] = 'b';
8005 bp->fw_version[j++] = 'c';
8006 bp->fw_version[j++] = ' ';
8008 num = (u8) (reg >> (24 - (i * 8)));
8009 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8010 if (num >= k || !skip0 || k == 1) {
8011 bp->fw_version[j++] = (num / k) + '0';
8012 skip0 = 0;
8015 if (i != 2)
8016 bp->fw_version[j++] = '.';
8018 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8019 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8020 bp->wol = 1;
8022 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8023 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8025 for (i = 0; i < 30; i++) {
8026 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8027 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8028 break;
8029 msleep(10);
8032 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8033 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8034 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8035 reg != BNX2_CONDITION_MFW_RUN_NONE) {
8036 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8038 if (j < 32)
8039 bp->fw_version[j++] = ' ';
8040 for (i = 0; i < 3 && j < 28; i++) {
8041 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8042 reg = swab32(reg);
8043 memcpy(&bp->fw_version[j], &reg, 4);
8044 j += 4;
8048 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8049 bp->mac_addr[0] = (u8) (reg >> 8);
8050 bp->mac_addr[1] = (u8) reg;
8052 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8053 bp->mac_addr[2] = (u8) (reg >> 24);
8054 bp->mac_addr[3] = (u8) (reg >> 16);
8055 bp->mac_addr[4] = (u8) (reg >> 8);
8056 bp->mac_addr[5] = (u8) reg;
8058 bp->tx_ring_size = MAX_TX_DESC_CNT;
8059 bnx2_set_rx_ring_size(bp, 255);
8061 bp->rx_csum = 1;
8063 bp->tx_quick_cons_trip_int = 2;
8064 bp->tx_quick_cons_trip = 20;
8065 bp->tx_ticks_int = 18;
8066 bp->tx_ticks = 80;
8068 bp->rx_quick_cons_trip_int = 2;
8069 bp->rx_quick_cons_trip = 12;
8070 bp->rx_ticks_int = 18;
8071 bp->rx_ticks = 18;
8073 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8075 bp->current_interval = BNX2_TIMER_INTERVAL;
8077 bp->phy_addr = 1;
8079 /* Disable WOL support if we are running on a SERDES chip. */
8080 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8081 bnx2_get_5709_media(bp);
8082 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8083 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8085 bp->phy_port = PORT_TP;
8086 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8087 bp->phy_port = PORT_FIBRE;
8088 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8089 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8090 bp->flags |= BNX2_FLAG_NO_WOL;
8091 bp->wol = 0;
8093 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8094 /* Don't do parallel detect on this board because of
8095 * some board problems. The link will not go down
8096 * if we do parallel detect.
8098 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8099 pdev->subsystem_device == 0x310c)
8100 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8101 } else {
8102 bp->phy_addr = 2;
8103 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8104 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8106 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8107 CHIP_NUM(bp) == CHIP_NUM_5708)
8108 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8109 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8110 (CHIP_REV(bp) == CHIP_REV_Ax ||
8111 CHIP_REV(bp) == CHIP_REV_Bx))
8112 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8114 bnx2_init_fw_cap(bp);
8116 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8117 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8118 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8119 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8120 bp->flags |= BNX2_FLAG_NO_WOL;
8121 bp->wol = 0;
8124 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8125 bp->tx_quick_cons_trip_int =
8126 bp->tx_quick_cons_trip;
8127 bp->tx_ticks_int = bp->tx_ticks;
8128 bp->rx_quick_cons_trip_int =
8129 bp->rx_quick_cons_trip;
8130 bp->rx_ticks_int = bp->rx_ticks;
8131 bp->comp_prod_trip_int = bp->comp_prod_trip;
8132 bp->com_ticks_int = bp->com_ticks;
8133 bp->cmd_ticks_int = bp->cmd_ticks;
8136 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8138 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8139 * with byte enables disabled on the unused 32-bit word. This is legal
8140 * but causes problems on the AMD 8132 which will eventually stop
8141 * responding after a while.
8143 * AMD believes this incompatibility is unique to the 5706, and
8144 * prefers to locally disable MSI rather than globally disabling it.
8146 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8147 struct pci_dev *amd_8132 = NULL;
8149 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8150 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8151 amd_8132))) {
8153 if (amd_8132->revision >= 0x10 &&
8154 amd_8132->revision <= 0x13) {
8155 disable_msi = 1;
8156 pci_dev_put(amd_8132);
8157 break;
8162 bnx2_set_default_link(bp);
8163 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8165 init_timer(&bp->timer);
8166 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8167 bp->timer.data = (unsigned long) bp;
8168 bp->timer.function = bnx2_timer;
8170 return 0;
8172 err_out_unmap:
8173 if (bp->regview) {
8174 iounmap(bp->regview);
8175 bp->regview = NULL;
8178 err_out_release:
8179 pci_release_regions(pdev);
8181 err_out_disable:
8182 pci_disable_device(pdev);
8183 pci_set_drvdata(pdev, NULL);
8185 err_out:
8186 return rc;
8189 static char * __devinit
8190 bnx2_bus_string(struct bnx2 *bp, char *str)
8192 char *s = str;
8194 if (bp->flags & BNX2_FLAG_PCIE) {
8195 s += sprintf(s, "PCI Express");
8196 } else {
8197 s += sprintf(s, "PCI");
8198 if (bp->flags & BNX2_FLAG_PCIX)
8199 s += sprintf(s, "-X");
8200 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8201 s += sprintf(s, " 32-bit");
8202 else
8203 s += sprintf(s, " 64-bit");
8204 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8206 return str;
8209 static void
8210 bnx2_del_napi(struct bnx2 *bp)
8212 int i;
8214 for (i = 0; i < bp->irq_nvecs; i++)
8215 netif_napi_del(&bp->bnx2_napi[i].napi);
8218 static void
8219 bnx2_init_napi(struct bnx2 *bp)
8221 int i;
8223 for (i = 0; i < bp->irq_nvecs; i++) {
8224 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8225 int (*poll)(struct napi_struct *, int);
8227 if (i == 0)
8228 poll = bnx2_poll;
8229 else
8230 poll = bnx2_poll_msix;
8232 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8233 bnapi->bp = bp;
8237 static const struct net_device_ops bnx2_netdev_ops = {
8238 .ndo_open = bnx2_open,
8239 .ndo_start_xmit = bnx2_start_xmit,
8240 .ndo_stop = bnx2_close,
8241 .ndo_get_stats = bnx2_get_stats,
8242 .ndo_set_rx_mode = bnx2_set_rx_mode,
8243 .ndo_do_ioctl = bnx2_ioctl,
8244 .ndo_validate_addr = eth_validate_addr,
8245 .ndo_set_mac_address = bnx2_change_mac_addr,
8246 .ndo_change_mtu = bnx2_change_mtu,
8247 .ndo_tx_timeout = bnx2_tx_timeout,
8248 #ifdef BCM_VLAN
8249 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
8250 #endif
8251 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
8252 .ndo_poll_controller = poll_bnx2,
8253 #endif
8256 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8258 #ifdef BCM_VLAN
8259 dev->vlan_features |= flags;
8260 #endif
8263 static int __devinit
8264 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8266 static int version_printed = 0;
8267 struct net_device *dev = NULL;
8268 struct bnx2 *bp;
8269 int rc;
8270 char str[40];
8272 if (version_printed++ == 0)
8273 printk(KERN_INFO "%s", version);
8275 /* dev zeroed in init_etherdev */
8276 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8278 if (!dev)
8279 return -ENOMEM;
8281 rc = bnx2_init_board(pdev, dev);
8282 if (rc < 0) {
8283 free_netdev(dev);
8284 return rc;
8287 dev->netdev_ops = &bnx2_netdev_ops;
8288 dev->watchdog_timeo = TX_TIMEOUT;
8289 dev->ethtool_ops = &bnx2_ethtool_ops;
8291 bp = netdev_priv(dev);
8293 pci_set_drvdata(pdev, dev);
8295 rc = bnx2_request_firmware(bp);
8296 if (rc)
8297 goto error;
8299 memcpy(dev->dev_addr, bp->mac_addr, 6);
8300 memcpy(dev->perm_addr, bp->mac_addr, 6);
8302 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
8303 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8304 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8305 dev->features |= NETIF_F_IPV6_CSUM;
8306 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8308 #ifdef BCM_VLAN
8309 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8310 #endif
8311 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8312 vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8313 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8314 dev->features |= NETIF_F_TSO6;
8315 vlan_features_add(dev, NETIF_F_TSO6);
8317 if ((rc = register_netdev(dev))) {
8318 dev_err(&pdev->dev, "Cannot register net device\n");
8319 goto error;
8322 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
8323 "IRQ %d, node addr %pM\n",
8324 dev->name,
8325 board_info[ent->driver_data].name,
8326 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8327 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8328 bnx2_bus_string(bp, str),
8329 dev->base_addr,
8330 bp->pdev->irq, dev->dev_addr);
8332 return 0;
8334 error:
8335 if (bp->mips_firmware)
8336 release_firmware(bp->mips_firmware);
8337 if (bp->rv2p_firmware)
8338 release_firmware(bp->rv2p_firmware);
8340 if (bp->regview)
8341 iounmap(bp->regview);
8342 pci_release_regions(pdev);
8343 pci_disable_device(pdev);
8344 pci_set_drvdata(pdev, NULL);
8345 free_netdev(dev);
8346 return rc;
8349 static void __devexit
8350 bnx2_remove_one(struct pci_dev *pdev)
8352 struct net_device *dev = pci_get_drvdata(pdev);
8353 struct bnx2 *bp = netdev_priv(dev);
8355 flush_scheduled_work();
8357 unregister_netdev(dev);
8359 if (bp->mips_firmware)
8360 release_firmware(bp->mips_firmware);
8361 if (bp->rv2p_firmware)
8362 release_firmware(bp->rv2p_firmware);
8364 if (bp->regview)
8365 iounmap(bp->regview);
8367 free_netdev(dev);
8368 pci_release_regions(pdev);
8369 pci_disable_device(pdev);
8370 pci_set_drvdata(pdev, NULL);
8373 static int
8374 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8376 struct net_device *dev = pci_get_drvdata(pdev);
8377 struct bnx2 *bp = netdev_priv(dev);
8379 /* PCI register 4 needs to be saved whether netif_running() or not.
8380 * MSI address and data need to be saved if using MSI and
8381 * netif_running().
8383 pci_save_state(pdev);
8384 if (!netif_running(dev))
8385 return 0;
8387 flush_scheduled_work();
8388 bnx2_netif_stop(bp);
8389 netif_device_detach(dev);
8390 del_timer_sync(&bp->timer);
8391 bnx2_shutdown_chip(bp);
8392 bnx2_free_skbs(bp);
8393 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8394 return 0;
8397 static int
8398 bnx2_resume(struct pci_dev *pdev)
8400 struct net_device *dev = pci_get_drvdata(pdev);
8401 struct bnx2 *bp = netdev_priv(dev);
8403 pci_restore_state(pdev);
8404 if (!netif_running(dev))
8405 return 0;
8407 bnx2_set_power_state(bp, PCI_D0);
8408 netif_device_attach(dev);
8409 bnx2_init_nic(bp, 1);
8410 bnx2_netif_start(bp);
8411 return 0;
8415 * bnx2_io_error_detected - called when PCI error is detected
8416 * @pdev: Pointer to PCI device
8417 * @state: The current pci connection state
8419 * This function is called after a PCI bus error affecting
8420 * this device has been detected.
8422 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8423 pci_channel_state_t state)
8425 struct net_device *dev = pci_get_drvdata(pdev);
8426 struct bnx2 *bp = netdev_priv(dev);
8428 rtnl_lock();
8429 netif_device_detach(dev);
8431 if (state == pci_channel_io_perm_failure) {
8432 rtnl_unlock();
8433 return PCI_ERS_RESULT_DISCONNECT;
8436 if (netif_running(dev)) {
8437 bnx2_netif_stop(bp);
8438 del_timer_sync(&bp->timer);
8439 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8442 pci_disable_device(pdev);
8443 rtnl_unlock();
8445 /* Request a slot slot reset. */
8446 return PCI_ERS_RESULT_NEED_RESET;
8450 * bnx2_io_slot_reset - called after the pci bus has been reset.
8451 * @pdev: Pointer to PCI device
8453 * Restart the card from scratch, as if from a cold-boot.
8455 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8457 struct net_device *dev = pci_get_drvdata(pdev);
8458 struct bnx2 *bp = netdev_priv(dev);
8460 rtnl_lock();
8461 if (pci_enable_device(pdev)) {
8462 dev_err(&pdev->dev,
8463 "Cannot re-enable PCI device after reset.\n");
8464 rtnl_unlock();
8465 return PCI_ERS_RESULT_DISCONNECT;
8467 pci_set_master(pdev);
8468 pci_restore_state(pdev);
8469 pci_save_state(pdev);
8471 if (netif_running(dev)) {
8472 bnx2_set_power_state(bp, PCI_D0);
8473 bnx2_init_nic(bp, 1);
8476 rtnl_unlock();
8477 return PCI_ERS_RESULT_RECOVERED;
8481 * bnx2_io_resume - called when traffic can start flowing again.
8482 * @pdev: Pointer to PCI device
8484 * This callback is called when the error recovery driver tells us that
8485 * its OK to resume normal operation.
8487 static void bnx2_io_resume(struct pci_dev *pdev)
8489 struct net_device *dev = pci_get_drvdata(pdev);
8490 struct bnx2 *bp = netdev_priv(dev);
8492 rtnl_lock();
8493 if (netif_running(dev))
8494 bnx2_netif_start(bp);
8496 netif_device_attach(dev);
8497 rtnl_unlock();
8500 static struct pci_error_handlers bnx2_err_handler = {
8501 .error_detected = bnx2_io_error_detected,
8502 .slot_reset = bnx2_io_slot_reset,
8503 .resume = bnx2_io_resume,
8506 static struct pci_driver bnx2_pci_driver = {
8507 .name = DRV_MODULE_NAME,
8508 .id_table = bnx2_pci_tbl,
8509 .probe = bnx2_init_one,
8510 .remove = __devexit_p(bnx2_remove_one),
8511 .suspend = bnx2_suspend,
8512 .resume = bnx2_resume,
8513 .err_handler = &bnx2_err_handler,
8516 static int __init bnx2_init(void)
8518 return pci_register_driver(&bnx2_pci_driver);
8521 static void __exit bnx2_cleanup(void)
8523 pci_unregister_driver(&bnx2_pci_driver);
8526 module_init(bnx2_init);
8527 module_exit(bnx2_cleanup);