bnx2: use the dma state API instead of the pci equivalents
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / bnx2.c
blob53326fed6c8151870b9d634af5bcd30b29b716b9
1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
41 #define BCM_VLAN 1
42 #endif
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/firmware.h>
51 #include <linux/log2.h>
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
60 #define DRV_MODULE_NAME "bnx2"
61 #define DRV_MODULE_VERSION "2.0.8"
62 #define DRV_MODULE_RELDATE "Feb 15, 2010"
63 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw"
66 #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
67 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
69 #define RUN_AT(x) (jiffies + (x))
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT (5*HZ)
74 static char version[] __devinitdata =
75 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
87 static int disable_msi = 0;
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 typedef enum {
93 BCM5706 = 0,
94 NC370T,
95 NC370I,
96 BCM5706S,
97 NC370F,
98 BCM5708,
99 BCM5708S,
100 BCM5709,
101 BCM5709S,
102 BCM5716,
103 BCM5716S,
104 } board_t;
106 /* indexed by board_t, above */
107 static struct {
108 char *name;
109 } board_info[] __devinitdata = {
110 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111 { "HP NC370T Multifunction Gigabit Server Adapter" },
112 { "HP NC370i Multifunction Gigabit Server Adapter" },
113 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114 { "HP NC370F Multifunction Gigabit Server Adapter" },
115 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142 { PCI_VENDOR_ID_BROADCOM, 0x163b,
143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144 { PCI_VENDOR_ID_BROADCOM, 0x163c,
145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146 { 0, }
149 static const struct flash_spec flash_table[] =
151 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
153 /* Slow EEPROM */
154 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157 "EEPROM - slow"},
158 /* Expansion entry 0001 */
159 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162 "Entry 0001"},
163 /* Saifun SA25F010 (non-buffered flash) */
164 /* strap, cfg1, & write1 need updates */
165 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168 "Non-buffered flash (128kB)"},
169 /* Saifun SA25F020 (non-buffered flash) */
170 /* strap, cfg1, & write1 need updates */
171 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174 "Non-buffered flash (256kB)"},
175 /* Expansion entry 0100 */
176 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179 "Entry 0100"},
180 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190 /* Saifun SA25F005 (non-buffered flash) */
191 /* strap, cfg1, & write1 need updates */
192 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195 "Non-buffered flash (64kB)"},
196 /* Fast EEPROM */
197 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200 "EEPROM - fast"},
201 /* Expansion entry 1001 */
202 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1001"},
206 /* Expansion entry 1010 */
207 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1010"},
211 /* ATMEL AT45DB011B (buffered flash) */
212 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215 "Buffered flash (128kB)"},
216 /* Expansion entry 1100 */
217 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 "Entry 1100"},
221 /* Expansion entry 1101 */
222 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225 "Entry 1101"},
226 /* Ateml Expansion entry 1110 */
227 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230 "Entry 1110 (Atmel)"},
231 /* ATMEL AT45DB021B (buffered flash) */
232 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235 "Buffered flash (256kB)"},
238 static const struct flash_spec flash_5709 = {
239 .flags = BNX2_NV_BUFFERED,
240 .page_bits = BCM5709_FLASH_PAGE_BITS,
241 .page_size = BCM5709_FLASH_PAGE_SIZE,
242 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
243 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
244 .name = "5709 Buffered flash (256kB)",
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
249 static void bnx2_init_napi(struct bnx2 *bp);
251 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253 u32 diff;
255 smp_mb();
257 /* The ring uses 256 indices for 255 entries, one of them
258 * needs to be skipped.
260 diff = txr->tx_prod - txr->tx_cons;
261 if (unlikely(diff >= TX_DESC_CNT)) {
262 diff &= 0xffff;
263 if (diff == TX_DESC_CNT)
264 diff = MAX_TX_DESC_CNT;
266 return (bp->tx_ring_size - diff);
269 static u32
270 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
272 u32 val;
274 spin_lock_bh(&bp->indirect_lock);
275 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
276 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
277 spin_unlock_bh(&bp->indirect_lock);
278 return val;
281 static void
282 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
284 spin_lock_bh(&bp->indirect_lock);
285 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
286 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
287 spin_unlock_bh(&bp->indirect_lock);
290 static void
291 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
293 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
296 static u32
297 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
299 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
302 static void
303 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
305 offset += cid_addr;
306 spin_lock_bh(&bp->indirect_lock);
307 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
308 int i;
310 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
311 REG_WR(bp, BNX2_CTX_CTX_CTRL,
312 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
313 for (i = 0; i < 5; i++) {
314 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
315 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
316 break;
317 udelay(5);
319 } else {
320 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
321 REG_WR(bp, BNX2_CTX_DATA, val);
323 spin_unlock_bh(&bp->indirect_lock);
326 #ifdef BCM_CNIC
327 static int
328 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
330 struct bnx2 *bp = netdev_priv(dev);
331 struct drv_ctl_io *io = &info->data.io;
333 switch (info->cmd) {
334 case DRV_CTL_IO_WR_CMD:
335 bnx2_reg_wr_ind(bp, io->offset, io->data);
336 break;
337 case DRV_CTL_IO_RD_CMD:
338 io->data = bnx2_reg_rd_ind(bp, io->offset);
339 break;
340 case DRV_CTL_CTX_WR_CMD:
341 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
342 break;
343 default:
344 return -EINVAL;
346 return 0;
349 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
351 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
352 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
353 int sb_id;
355 if (bp->flags & BNX2_FLAG_USING_MSIX) {
356 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
357 bnapi->cnic_present = 0;
358 sb_id = bp->irq_nvecs;
359 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
360 } else {
361 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
362 bnapi->cnic_tag = bnapi->last_status_idx;
363 bnapi->cnic_present = 1;
364 sb_id = 0;
365 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
368 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
369 cp->irq_arr[0].status_blk = (void *)
370 ((unsigned long) bnapi->status_blk.msi +
371 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
372 cp->irq_arr[0].status_blk_num = sb_id;
373 cp->num_irq = 1;
376 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
377 void *data)
379 struct bnx2 *bp = netdev_priv(dev);
380 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
382 if (ops == NULL)
383 return -EINVAL;
385 if (cp->drv_state & CNIC_DRV_STATE_REGD)
386 return -EBUSY;
388 bp->cnic_data = data;
389 rcu_assign_pointer(bp->cnic_ops, ops);
391 cp->num_irq = 0;
392 cp->drv_state = CNIC_DRV_STATE_REGD;
394 bnx2_setup_cnic_irq_info(bp);
396 return 0;
399 static int bnx2_unregister_cnic(struct net_device *dev)
401 struct bnx2 *bp = netdev_priv(dev);
402 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
403 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
405 mutex_lock(&bp->cnic_lock);
406 cp->drv_state = 0;
407 bnapi->cnic_present = 0;
408 rcu_assign_pointer(bp->cnic_ops, NULL);
409 mutex_unlock(&bp->cnic_lock);
410 synchronize_rcu();
411 return 0;
414 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
416 struct bnx2 *bp = netdev_priv(dev);
417 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
419 cp->drv_owner = THIS_MODULE;
420 cp->chip_id = bp->chip_id;
421 cp->pdev = bp->pdev;
422 cp->io_base = bp->regview;
423 cp->drv_ctl = bnx2_drv_ctl;
424 cp->drv_register_cnic = bnx2_register_cnic;
425 cp->drv_unregister_cnic = bnx2_unregister_cnic;
427 return cp;
429 EXPORT_SYMBOL(bnx2_cnic_probe);
431 static void
432 bnx2_cnic_stop(struct bnx2 *bp)
434 struct cnic_ops *c_ops;
435 struct cnic_ctl_info info;
437 mutex_lock(&bp->cnic_lock);
438 c_ops = bp->cnic_ops;
439 if (c_ops) {
440 info.cmd = CNIC_CTL_STOP_CMD;
441 c_ops->cnic_ctl(bp->cnic_data, &info);
443 mutex_unlock(&bp->cnic_lock);
446 static void
447 bnx2_cnic_start(struct bnx2 *bp)
449 struct cnic_ops *c_ops;
450 struct cnic_ctl_info info;
452 mutex_lock(&bp->cnic_lock);
453 c_ops = bp->cnic_ops;
454 if (c_ops) {
455 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
456 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
458 bnapi->cnic_tag = bnapi->last_status_idx;
460 info.cmd = CNIC_CTL_START_CMD;
461 c_ops->cnic_ctl(bp->cnic_data, &info);
463 mutex_unlock(&bp->cnic_lock);
466 #else
468 static void
469 bnx2_cnic_stop(struct bnx2 *bp)
473 static void
474 bnx2_cnic_start(struct bnx2 *bp)
478 #endif
480 static int
481 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
483 u32 val1;
484 int i, ret;
486 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
487 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
488 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
490 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
491 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
493 udelay(40);
496 val1 = (bp->phy_addr << 21) | (reg << 16) |
497 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
498 BNX2_EMAC_MDIO_COMM_START_BUSY;
499 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
501 for (i = 0; i < 50; i++) {
502 udelay(10);
504 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
505 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
506 udelay(5);
508 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
509 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
511 break;
515 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
516 *val = 0x0;
517 ret = -EBUSY;
519 else {
520 *val = val1;
521 ret = 0;
524 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
525 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
526 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
528 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
529 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
531 udelay(40);
534 return ret;
537 static int
538 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
540 u32 val1;
541 int i, ret;
543 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
544 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
545 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
547 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
548 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
550 udelay(40);
553 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
554 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
555 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
556 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
558 for (i = 0; i < 50; i++) {
559 udelay(10);
561 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
562 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
563 udelay(5);
564 break;
568 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
569 ret = -EBUSY;
570 else
571 ret = 0;
573 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
574 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
575 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
577 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
578 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
580 udelay(40);
583 return ret;
586 static void
587 bnx2_disable_int(struct bnx2 *bp)
589 int i;
590 struct bnx2_napi *bnapi;
592 for (i = 0; i < bp->irq_nvecs; i++) {
593 bnapi = &bp->bnx2_napi[i];
594 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
595 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
597 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
600 static void
601 bnx2_enable_int(struct bnx2 *bp)
603 int i;
604 struct bnx2_napi *bnapi;
606 for (i = 0; i < bp->irq_nvecs; i++) {
607 bnapi = &bp->bnx2_napi[i];
609 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
610 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
611 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
612 bnapi->last_status_idx);
614 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
615 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
616 bnapi->last_status_idx);
618 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
621 static void
622 bnx2_disable_int_sync(struct bnx2 *bp)
624 int i;
626 atomic_inc(&bp->intr_sem);
627 if (!netif_running(bp->dev))
628 return;
630 bnx2_disable_int(bp);
631 for (i = 0; i < bp->irq_nvecs; i++)
632 synchronize_irq(bp->irq_tbl[i].vector);
635 static void
636 bnx2_napi_disable(struct bnx2 *bp)
638 int i;
640 for (i = 0; i < bp->irq_nvecs; i++)
641 napi_disable(&bp->bnx2_napi[i].napi);
644 static void
645 bnx2_napi_enable(struct bnx2 *bp)
647 int i;
649 for (i = 0; i < bp->irq_nvecs; i++)
650 napi_enable(&bp->bnx2_napi[i].napi);
653 static void
654 bnx2_netif_stop(struct bnx2 *bp)
656 bnx2_cnic_stop(bp);
657 if (netif_running(bp->dev)) {
658 int i;
660 bnx2_napi_disable(bp);
661 netif_tx_disable(bp->dev);
662 /* prevent tx timeout */
663 for (i = 0; i < bp->dev->num_tx_queues; i++) {
664 struct netdev_queue *txq;
666 txq = netdev_get_tx_queue(bp->dev, i);
667 txq->trans_start = jiffies;
670 bnx2_disable_int_sync(bp);
673 static void
674 bnx2_netif_start(struct bnx2 *bp)
676 if (atomic_dec_and_test(&bp->intr_sem)) {
677 if (netif_running(bp->dev)) {
678 netif_tx_wake_all_queues(bp->dev);
679 bnx2_napi_enable(bp);
680 bnx2_enable_int(bp);
681 bnx2_cnic_start(bp);
686 static void
687 bnx2_free_tx_mem(struct bnx2 *bp)
689 int i;
691 for (i = 0; i < bp->num_tx_rings; i++) {
692 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
693 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
695 if (txr->tx_desc_ring) {
696 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
697 txr->tx_desc_ring,
698 txr->tx_desc_mapping);
699 txr->tx_desc_ring = NULL;
701 kfree(txr->tx_buf_ring);
702 txr->tx_buf_ring = NULL;
706 static void
707 bnx2_free_rx_mem(struct bnx2 *bp)
709 int i;
711 for (i = 0; i < bp->num_rx_rings; i++) {
712 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
713 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
714 int j;
716 for (j = 0; j < bp->rx_max_ring; j++) {
717 if (rxr->rx_desc_ring[j])
718 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
719 rxr->rx_desc_ring[j],
720 rxr->rx_desc_mapping[j]);
721 rxr->rx_desc_ring[j] = NULL;
723 vfree(rxr->rx_buf_ring);
724 rxr->rx_buf_ring = NULL;
726 for (j = 0; j < bp->rx_max_pg_ring; j++) {
727 if (rxr->rx_pg_desc_ring[j])
728 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
729 rxr->rx_pg_desc_ring[j],
730 rxr->rx_pg_desc_mapping[j]);
731 rxr->rx_pg_desc_ring[j] = NULL;
733 vfree(rxr->rx_pg_ring);
734 rxr->rx_pg_ring = NULL;
738 static int
739 bnx2_alloc_tx_mem(struct bnx2 *bp)
741 int i;
743 for (i = 0; i < bp->num_tx_rings; i++) {
744 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
745 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
747 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
748 if (txr->tx_buf_ring == NULL)
749 return -ENOMEM;
751 txr->tx_desc_ring =
752 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
753 &txr->tx_desc_mapping);
754 if (txr->tx_desc_ring == NULL)
755 return -ENOMEM;
757 return 0;
760 static int
761 bnx2_alloc_rx_mem(struct bnx2 *bp)
763 int i;
765 for (i = 0; i < bp->num_rx_rings; i++) {
766 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
767 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
768 int j;
770 rxr->rx_buf_ring =
771 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
772 if (rxr->rx_buf_ring == NULL)
773 return -ENOMEM;
775 memset(rxr->rx_buf_ring, 0,
776 SW_RXBD_RING_SIZE * bp->rx_max_ring);
778 for (j = 0; j < bp->rx_max_ring; j++) {
779 rxr->rx_desc_ring[j] =
780 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
781 &rxr->rx_desc_mapping[j]);
782 if (rxr->rx_desc_ring[j] == NULL)
783 return -ENOMEM;
787 if (bp->rx_pg_ring_size) {
788 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
789 bp->rx_max_pg_ring);
790 if (rxr->rx_pg_ring == NULL)
791 return -ENOMEM;
793 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
794 bp->rx_max_pg_ring);
797 for (j = 0; j < bp->rx_max_pg_ring; j++) {
798 rxr->rx_pg_desc_ring[j] =
799 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
800 &rxr->rx_pg_desc_mapping[j]);
801 if (rxr->rx_pg_desc_ring[j] == NULL)
802 return -ENOMEM;
806 return 0;
809 static void
810 bnx2_free_mem(struct bnx2 *bp)
812 int i;
813 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
815 bnx2_free_tx_mem(bp);
816 bnx2_free_rx_mem(bp);
818 for (i = 0; i < bp->ctx_pages; i++) {
819 if (bp->ctx_blk[i]) {
820 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
821 bp->ctx_blk[i],
822 bp->ctx_blk_mapping[i]);
823 bp->ctx_blk[i] = NULL;
826 if (bnapi->status_blk.msi) {
827 pci_free_consistent(bp->pdev, bp->status_stats_size,
828 bnapi->status_blk.msi,
829 bp->status_blk_mapping);
830 bnapi->status_blk.msi = NULL;
831 bp->stats_blk = NULL;
835 static int
836 bnx2_alloc_mem(struct bnx2 *bp)
838 int i, status_blk_size, err;
839 struct bnx2_napi *bnapi;
840 void *status_blk;
842 /* Combine status and statistics blocks into one allocation. */
843 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
844 if (bp->flags & BNX2_FLAG_MSIX_CAP)
845 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
846 BNX2_SBLK_MSIX_ALIGN_SIZE);
847 bp->status_stats_size = status_blk_size +
848 sizeof(struct statistics_block);
850 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
851 &bp->status_blk_mapping);
852 if (status_blk == NULL)
853 goto alloc_mem_err;
855 memset(status_blk, 0, bp->status_stats_size);
857 bnapi = &bp->bnx2_napi[0];
858 bnapi->status_blk.msi = status_blk;
859 bnapi->hw_tx_cons_ptr =
860 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
861 bnapi->hw_rx_cons_ptr =
862 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
863 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
864 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
865 struct status_block_msix *sblk;
867 bnapi = &bp->bnx2_napi[i];
869 sblk = (void *) (status_blk +
870 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
871 bnapi->status_blk.msix = sblk;
872 bnapi->hw_tx_cons_ptr =
873 &sblk->status_tx_quick_consumer_index;
874 bnapi->hw_rx_cons_ptr =
875 &sblk->status_rx_quick_consumer_index;
876 bnapi->int_num = i << 24;
880 bp->stats_blk = status_blk + status_blk_size;
882 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
884 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
885 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
886 if (bp->ctx_pages == 0)
887 bp->ctx_pages = 1;
888 for (i = 0; i < bp->ctx_pages; i++) {
889 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
890 BCM_PAGE_SIZE,
891 &bp->ctx_blk_mapping[i]);
892 if (bp->ctx_blk[i] == NULL)
893 goto alloc_mem_err;
897 err = bnx2_alloc_rx_mem(bp);
898 if (err)
899 goto alloc_mem_err;
901 err = bnx2_alloc_tx_mem(bp);
902 if (err)
903 goto alloc_mem_err;
905 return 0;
907 alloc_mem_err:
908 bnx2_free_mem(bp);
909 return -ENOMEM;
912 static void
913 bnx2_report_fw_link(struct bnx2 *bp)
915 u32 fw_link_status = 0;
917 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
918 return;
920 if (bp->link_up) {
921 u32 bmsr;
923 switch (bp->line_speed) {
924 case SPEED_10:
925 if (bp->duplex == DUPLEX_HALF)
926 fw_link_status = BNX2_LINK_STATUS_10HALF;
927 else
928 fw_link_status = BNX2_LINK_STATUS_10FULL;
929 break;
930 case SPEED_100:
931 if (bp->duplex == DUPLEX_HALF)
932 fw_link_status = BNX2_LINK_STATUS_100HALF;
933 else
934 fw_link_status = BNX2_LINK_STATUS_100FULL;
935 break;
936 case SPEED_1000:
937 if (bp->duplex == DUPLEX_HALF)
938 fw_link_status = BNX2_LINK_STATUS_1000HALF;
939 else
940 fw_link_status = BNX2_LINK_STATUS_1000FULL;
941 break;
942 case SPEED_2500:
943 if (bp->duplex == DUPLEX_HALF)
944 fw_link_status = BNX2_LINK_STATUS_2500HALF;
945 else
946 fw_link_status = BNX2_LINK_STATUS_2500FULL;
947 break;
950 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
952 if (bp->autoneg) {
953 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
955 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
956 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
958 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
959 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
960 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
961 else
962 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
965 else
966 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
968 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
971 static char *
972 bnx2_xceiver_str(struct bnx2 *bp)
974 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
975 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
976 "Copper"));
979 static void
980 bnx2_report_link(struct bnx2 *bp)
982 if (bp->link_up) {
983 netif_carrier_on(bp->dev);
984 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
985 bnx2_xceiver_str(bp),
986 bp->line_speed,
987 bp->duplex == DUPLEX_FULL ? "full" : "half");
989 if (bp->flow_ctrl) {
990 if (bp->flow_ctrl & FLOW_CTRL_RX) {
991 pr_cont(", receive ");
992 if (bp->flow_ctrl & FLOW_CTRL_TX)
993 pr_cont("& transmit ");
995 else {
996 pr_cont(", transmit ");
998 pr_cont("flow control ON");
1000 pr_cont("\n");
1001 } else {
1002 netif_carrier_off(bp->dev);
1003 netdev_err(bp->dev, "NIC %s Link is Down\n",
1004 bnx2_xceiver_str(bp));
1007 bnx2_report_fw_link(bp);
1010 static void
1011 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1013 u32 local_adv, remote_adv;
1015 bp->flow_ctrl = 0;
1016 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1017 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1019 if (bp->duplex == DUPLEX_FULL) {
1020 bp->flow_ctrl = bp->req_flow_ctrl;
1022 return;
1025 if (bp->duplex != DUPLEX_FULL) {
1026 return;
1029 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1030 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1031 u32 val;
1033 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1034 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1035 bp->flow_ctrl |= FLOW_CTRL_TX;
1036 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1037 bp->flow_ctrl |= FLOW_CTRL_RX;
1038 return;
1041 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1042 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1044 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1045 u32 new_local_adv = 0;
1046 u32 new_remote_adv = 0;
1048 if (local_adv & ADVERTISE_1000XPAUSE)
1049 new_local_adv |= ADVERTISE_PAUSE_CAP;
1050 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1051 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1052 if (remote_adv & ADVERTISE_1000XPAUSE)
1053 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1054 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1055 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1057 local_adv = new_local_adv;
1058 remote_adv = new_remote_adv;
1061 /* See Table 28B-3 of 802.3ab-1999 spec. */
1062 if (local_adv & ADVERTISE_PAUSE_CAP) {
1063 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1064 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1065 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1067 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1068 bp->flow_ctrl = FLOW_CTRL_RX;
1071 else {
1072 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1073 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1077 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1078 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1079 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1081 bp->flow_ctrl = FLOW_CTRL_TX;
1086 static int
1087 bnx2_5709s_linkup(struct bnx2 *bp)
1089 u32 val, speed;
1091 bp->link_up = 1;
1093 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1094 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1095 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1097 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1098 bp->line_speed = bp->req_line_speed;
1099 bp->duplex = bp->req_duplex;
1100 return 0;
1102 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1103 switch (speed) {
1104 case MII_BNX2_GP_TOP_AN_SPEED_10:
1105 bp->line_speed = SPEED_10;
1106 break;
1107 case MII_BNX2_GP_TOP_AN_SPEED_100:
1108 bp->line_speed = SPEED_100;
1109 break;
1110 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1111 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1112 bp->line_speed = SPEED_1000;
1113 break;
1114 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1115 bp->line_speed = SPEED_2500;
1116 break;
1118 if (val & MII_BNX2_GP_TOP_AN_FD)
1119 bp->duplex = DUPLEX_FULL;
1120 else
1121 bp->duplex = DUPLEX_HALF;
1122 return 0;
1125 static int
1126 bnx2_5708s_linkup(struct bnx2 *bp)
1128 u32 val;
1130 bp->link_up = 1;
1131 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1132 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1133 case BCM5708S_1000X_STAT1_SPEED_10:
1134 bp->line_speed = SPEED_10;
1135 break;
1136 case BCM5708S_1000X_STAT1_SPEED_100:
1137 bp->line_speed = SPEED_100;
1138 break;
1139 case BCM5708S_1000X_STAT1_SPEED_1G:
1140 bp->line_speed = SPEED_1000;
1141 break;
1142 case BCM5708S_1000X_STAT1_SPEED_2G5:
1143 bp->line_speed = SPEED_2500;
1144 break;
1146 if (val & BCM5708S_1000X_STAT1_FD)
1147 bp->duplex = DUPLEX_FULL;
1148 else
1149 bp->duplex = DUPLEX_HALF;
1151 return 0;
1154 static int
1155 bnx2_5706s_linkup(struct bnx2 *bp)
1157 u32 bmcr, local_adv, remote_adv, common;
1159 bp->link_up = 1;
1160 bp->line_speed = SPEED_1000;
1162 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1163 if (bmcr & BMCR_FULLDPLX) {
1164 bp->duplex = DUPLEX_FULL;
1166 else {
1167 bp->duplex = DUPLEX_HALF;
1170 if (!(bmcr & BMCR_ANENABLE)) {
1171 return 0;
1174 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1175 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1177 common = local_adv & remote_adv;
1178 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1180 if (common & ADVERTISE_1000XFULL) {
1181 bp->duplex = DUPLEX_FULL;
1183 else {
1184 bp->duplex = DUPLEX_HALF;
1188 return 0;
1191 static int
1192 bnx2_copper_linkup(struct bnx2 *bp)
1194 u32 bmcr;
1196 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1197 if (bmcr & BMCR_ANENABLE) {
1198 u32 local_adv, remote_adv, common;
1200 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1201 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1203 common = local_adv & (remote_adv >> 2);
1204 if (common & ADVERTISE_1000FULL) {
1205 bp->line_speed = SPEED_1000;
1206 bp->duplex = DUPLEX_FULL;
1208 else if (common & ADVERTISE_1000HALF) {
1209 bp->line_speed = SPEED_1000;
1210 bp->duplex = DUPLEX_HALF;
1212 else {
1213 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1214 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1216 common = local_adv & remote_adv;
1217 if (common & ADVERTISE_100FULL) {
1218 bp->line_speed = SPEED_100;
1219 bp->duplex = DUPLEX_FULL;
1221 else if (common & ADVERTISE_100HALF) {
1222 bp->line_speed = SPEED_100;
1223 bp->duplex = DUPLEX_HALF;
1225 else if (common & ADVERTISE_10FULL) {
1226 bp->line_speed = SPEED_10;
1227 bp->duplex = DUPLEX_FULL;
1229 else if (common & ADVERTISE_10HALF) {
1230 bp->line_speed = SPEED_10;
1231 bp->duplex = DUPLEX_HALF;
1233 else {
1234 bp->line_speed = 0;
1235 bp->link_up = 0;
1239 else {
1240 if (bmcr & BMCR_SPEED100) {
1241 bp->line_speed = SPEED_100;
1243 else {
1244 bp->line_speed = SPEED_10;
1246 if (bmcr & BMCR_FULLDPLX) {
1247 bp->duplex = DUPLEX_FULL;
1249 else {
1250 bp->duplex = DUPLEX_HALF;
1254 return 0;
1257 static void
1258 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1260 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1262 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1263 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1264 val |= 0x02 << 8;
1266 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1267 u32 lo_water, hi_water;
1269 if (bp->flow_ctrl & FLOW_CTRL_TX)
1270 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1271 else
1272 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1273 if (lo_water >= bp->rx_ring_size)
1274 lo_water = 0;
1276 hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
1278 if (hi_water <= lo_water)
1279 lo_water = 0;
1281 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1282 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1284 if (hi_water > 0xf)
1285 hi_water = 0xf;
1286 else if (hi_water == 0)
1287 lo_water = 0;
1288 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1290 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1293 static void
1294 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1296 int i;
1297 u32 cid;
1299 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1300 if (i == 1)
1301 cid = RX_RSS_CID;
1302 bnx2_init_rx_context(bp, cid);
1306 static void
1307 bnx2_set_mac_link(struct bnx2 *bp)
1309 u32 val;
1311 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1312 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1313 (bp->duplex == DUPLEX_HALF)) {
1314 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1317 /* Configure the EMAC mode register. */
1318 val = REG_RD(bp, BNX2_EMAC_MODE);
1320 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1321 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1322 BNX2_EMAC_MODE_25G_MODE);
1324 if (bp->link_up) {
1325 switch (bp->line_speed) {
1326 case SPEED_10:
1327 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1328 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1329 break;
1331 /* fall through */
1332 case SPEED_100:
1333 val |= BNX2_EMAC_MODE_PORT_MII;
1334 break;
1335 case SPEED_2500:
1336 val |= BNX2_EMAC_MODE_25G_MODE;
1337 /* fall through */
1338 case SPEED_1000:
1339 val |= BNX2_EMAC_MODE_PORT_GMII;
1340 break;
1343 else {
1344 val |= BNX2_EMAC_MODE_PORT_GMII;
1347 /* Set the MAC to operate in the appropriate duplex mode. */
1348 if (bp->duplex == DUPLEX_HALF)
1349 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1350 REG_WR(bp, BNX2_EMAC_MODE, val);
1352 /* Enable/disable rx PAUSE. */
1353 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1355 if (bp->flow_ctrl & FLOW_CTRL_RX)
1356 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1357 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1359 /* Enable/disable tx PAUSE. */
1360 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1361 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1363 if (bp->flow_ctrl & FLOW_CTRL_TX)
1364 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1365 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1367 /* Acknowledge the interrupt. */
1368 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1370 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1371 bnx2_init_all_rx_contexts(bp);
1374 static void
1375 bnx2_enable_bmsr1(struct bnx2 *bp)
1377 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1378 (CHIP_NUM(bp) == CHIP_NUM_5709))
1379 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1380 MII_BNX2_BLK_ADDR_GP_STATUS);
1383 static void
1384 bnx2_disable_bmsr1(struct bnx2 *bp)
1386 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1387 (CHIP_NUM(bp) == CHIP_NUM_5709))
1388 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1389 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1392 static int
1393 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1395 u32 up1;
1396 int ret = 1;
1398 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1399 return 0;
1401 if (bp->autoneg & AUTONEG_SPEED)
1402 bp->advertising |= ADVERTISED_2500baseX_Full;
1404 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1405 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1407 bnx2_read_phy(bp, bp->mii_up1, &up1);
1408 if (!(up1 & BCM5708S_UP1_2G5)) {
1409 up1 |= BCM5708S_UP1_2G5;
1410 bnx2_write_phy(bp, bp->mii_up1, up1);
1411 ret = 0;
1414 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1415 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1416 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1418 return ret;
1421 static int
1422 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1424 u32 up1;
1425 int ret = 0;
1427 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1428 return 0;
1430 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1431 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1433 bnx2_read_phy(bp, bp->mii_up1, &up1);
1434 if (up1 & BCM5708S_UP1_2G5) {
1435 up1 &= ~BCM5708S_UP1_2G5;
1436 bnx2_write_phy(bp, bp->mii_up1, up1);
1437 ret = 1;
1440 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1441 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1442 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1444 return ret;
1447 static void
1448 bnx2_enable_forced_2g5(struct bnx2 *bp)
1450 u32 bmcr;
1452 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1453 return;
1455 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1456 u32 val;
1458 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1459 MII_BNX2_BLK_ADDR_SERDES_DIG);
1460 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1461 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1462 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1463 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1465 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1466 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1467 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1469 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1470 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1471 bmcr |= BCM5708S_BMCR_FORCE_2500;
1472 } else {
1473 return;
1476 if (bp->autoneg & AUTONEG_SPEED) {
1477 bmcr &= ~BMCR_ANENABLE;
1478 if (bp->req_duplex == DUPLEX_FULL)
1479 bmcr |= BMCR_FULLDPLX;
1481 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1484 static void
1485 bnx2_disable_forced_2g5(struct bnx2 *bp)
1487 u32 bmcr;
1489 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1490 return;
1492 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1493 u32 val;
1495 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1496 MII_BNX2_BLK_ADDR_SERDES_DIG);
1497 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1498 val &= ~MII_BNX2_SD_MISC1_FORCE;
1499 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1501 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1502 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1503 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1505 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1506 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1507 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1508 } else {
1509 return;
1512 if (bp->autoneg & AUTONEG_SPEED)
1513 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1514 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1517 static void
1518 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1520 u32 val;
1522 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1523 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1524 if (start)
1525 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1526 else
1527 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1530 static int
1531 bnx2_set_link(struct bnx2 *bp)
1533 u32 bmsr;
1534 u8 link_up;
1536 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1537 bp->link_up = 1;
1538 return 0;
1541 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1542 return 0;
1544 link_up = bp->link_up;
1546 bnx2_enable_bmsr1(bp);
1547 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1548 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1549 bnx2_disable_bmsr1(bp);
1551 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1552 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1553 u32 val, an_dbg;
1555 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1556 bnx2_5706s_force_link_dn(bp, 0);
1557 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1559 val = REG_RD(bp, BNX2_EMAC_STATUS);
1561 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1562 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1563 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1565 if ((val & BNX2_EMAC_STATUS_LINK) &&
1566 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1567 bmsr |= BMSR_LSTATUS;
1568 else
1569 bmsr &= ~BMSR_LSTATUS;
1572 if (bmsr & BMSR_LSTATUS) {
1573 bp->link_up = 1;
1575 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1576 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1577 bnx2_5706s_linkup(bp);
1578 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1579 bnx2_5708s_linkup(bp);
1580 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1581 bnx2_5709s_linkup(bp);
1583 else {
1584 bnx2_copper_linkup(bp);
1586 bnx2_resolve_flow_ctrl(bp);
1588 else {
1589 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1590 (bp->autoneg & AUTONEG_SPEED))
1591 bnx2_disable_forced_2g5(bp);
1593 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1594 u32 bmcr;
1596 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1597 bmcr |= BMCR_ANENABLE;
1598 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1600 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1602 bp->link_up = 0;
1605 if (bp->link_up != link_up) {
1606 bnx2_report_link(bp);
1609 bnx2_set_mac_link(bp);
1611 return 0;
1614 static int
1615 bnx2_reset_phy(struct bnx2 *bp)
1617 int i;
1618 u32 reg;
1620 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1622 #define PHY_RESET_MAX_WAIT 100
1623 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1624 udelay(10);
1626 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1627 if (!(reg & BMCR_RESET)) {
1628 udelay(20);
1629 break;
1632 if (i == PHY_RESET_MAX_WAIT) {
1633 return -EBUSY;
1635 return 0;
1638 static u32
1639 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1641 u32 adv = 0;
1643 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1644 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1646 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1647 adv = ADVERTISE_1000XPAUSE;
1649 else {
1650 adv = ADVERTISE_PAUSE_CAP;
1653 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1654 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1655 adv = ADVERTISE_1000XPSE_ASYM;
1657 else {
1658 adv = ADVERTISE_PAUSE_ASYM;
1661 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1662 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1663 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1665 else {
1666 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1669 return adv;
1672 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1674 static int
1675 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1676 __releases(&bp->phy_lock)
1677 __acquires(&bp->phy_lock)
1679 u32 speed_arg = 0, pause_adv;
1681 pause_adv = bnx2_phy_get_pause_adv(bp);
1683 if (bp->autoneg & AUTONEG_SPEED) {
1684 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1685 if (bp->advertising & ADVERTISED_10baseT_Half)
1686 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1687 if (bp->advertising & ADVERTISED_10baseT_Full)
1688 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1689 if (bp->advertising & ADVERTISED_100baseT_Half)
1690 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1691 if (bp->advertising & ADVERTISED_100baseT_Full)
1692 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1693 if (bp->advertising & ADVERTISED_1000baseT_Full)
1694 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1695 if (bp->advertising & ADVERTISED_2500baseX_Full)
1696 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1697 } else {
1698 if (bp->req_line_speed == SPEED_2500)
1699 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1700 else if (bp->req_line_speed == SPEED_1000)
1701 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1702 else if (bp->req_line_speed == SPEED_100) {
1703 if (bp->req_duplex == DUPLEX_FULL)
1704 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1705 else
1706 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1707 } else if (bp->req_line_speed == SPEED_10) {
1708 if (bp->req_duplex == DUPLEX_FULL)
1709 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1710 else
1711 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1715 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1716 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1717 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1718 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1720 if (port == PORT_TP)
1721 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1722 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1724 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1726 spin_unlock_bh(&bp->phy_lock);
1727 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1728 spin_lock_bh(&bp->phy_lock);
1730 return 0;
1733 static int
1734 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1735 __releases(&bp->phy_lock)
1736 __acquires(&bp->phy_lock)
1738 u32 adv, bmcr;
1739 u32 new_adv = 0;
1741 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1742 return (bnx2_setup_remote_phy(bp, port));
1744 if (!(bp->autoneg & AUTONEG_SPEED)) {
1745 u32 new_bmcr;
1746 int force_link_down = 0;
1748 if (bp->req_line_speed == SPEED_2500) {
1749 if (!bnx2_test_and_enable_2g5(bp))
1750 force_link_down = 1;
1751 } else if (bp->req_line_speed == SPEED_1000) {
1752 if (bnx2_test_and_disable_2g5(bp))
1753 force_link_down = 1;
1755 bnx2_read_phy(bp, bp->mii_adv, &adv);
1756 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1758 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1759 new_bmcr = bmcr & ~BMCR_ANENABLE;
1760 new_bmcr |= BMCR_SPEED1000;
1762 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1763 if (bp->req_line_speed == SPEED_2500)
1764 bnx2_enable_forced_2g5(bp);
1765 else if (bp->req_line_speed == SPEED_1000) {
1766 bnx2_disable_forced_2g5(bp);
1767 new_bmcr &= ~0x2000;
1770 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1771 if (bp->req_line_speed == SPEED_2500)
1772 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1773 else
1774 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1777 if (bp->req_duplex == DUPLEX_FULL) {
1778 adv |= ADVERTISE_1000XFULL;
1779 new_bmcr |= BMCR_FULLDPLX;
1781 else {
1782 adv |= ADVERTISE_1000XHALF;
1783 new_bmcr &= ~BMCR_FULLDPLX;
1785 if ((new_bmcr != bmcr) || (force_link_down)) {
1786 /* Force a link down visible on the other side */
1787 if (bp->link_up) {
1788 bnx2_write_phy(bp, bp->mii_adv, adv &
1789 ~(ADVERTISE_1000XFULL |
1790 ADVERTISE_1000XHALF));
1791 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1792 BMCR_ANRESTART | BMCR_ANENABLE);
1794 bp->link_up = 0;
1795 netif_carrier_off(bp->dev);
1796 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1797 bnx2_report_link(bp);
1799 bnx2_write_phy(bp, bp->mii_adv, adv);
1800 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1801 } else {
1802 bnx2_resolve_flow_ctrl(bp);
1803 bnx2_set_mac_link(bp);
1805 return 0;
1808 bnx2_test_and_enable_2g5(bp);
1810 if (bp->advertising & ADVERTISED_1000baseT_Full)
1811 new_adv |= ADVERTISE_1000XFULL;
1813 new_adv |= bnx2_phy_get_pause_adv(bp);
1815 bnx2_read_phy(bp, bp->mii_adv, &adv);
1816 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1818 bp->serdes_an_pending = 0;
1819 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1820 /* Force a link down visible on the other side */
1821 if (bp->link_up) {
1822 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1823 spin_unlock_bh(&bp->phy_lock);
1824 msleep(20);
1825 spin_lock_bh(&bp->phy_lock);
1828 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1829 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1830 BMCR_ANENABLE);
1831 /* Speed up link-up time when the link partner
1832 * does not autonegotiate which is very common
1833 * in blade servers. Some blade servers use
1834 * IPMI for kerboard input and it's important
1835 * to minimize link disruptions. Autoneg. involves
1836 * exchanging base pages plus 3 next pages and
1837 * normally completes in about 120 msec.
1839 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1840 bp->serdes_an_pending = 1;
1841 mod_timer(&bp->timer, jiffies + bp->current_interval);
1842 } else {
1843 bnx2_resolve_flow_ctrl(bp);
1844 bnx2_set_mac_link(bp);
1847 return 0;
1850 #define ETHTOOL_ALL_FIBRE_SPEED \
1851 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1852 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1853 (ADVERTISED_1000baseT_Full)
1855 #define ETHTOOL_ALL_COPPER_SPEED \
1856 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1857 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1858 ADVERTISED_1000baseT_Full)
1860 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1861 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1863 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1865 static void
1866 bnx2_set_default_remote_link(struct bnx2 *bp)
1868 u32 link;
1870 if (bp->phy_port == PORT_TP)
1871 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1872 else
1873 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1875 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1876 bp->req_line_speed = 0;
1877 bp->autoneg |= AUTONEG_SPEED;
1878 bp->advertising = ADVERTISED_Autoneg;
1879 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1880 bp->advertising |= ADVERTISED_10baseT_Half;
1881 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1882 bp->advertising |= ADVERTISED_10baseT_Full;
1883 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1884 bp->advertising |= ADVERTISED_100baseT_Half;
1885 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1886 bp->advertising |= ADVERTISED_100baseT_Full;
1887 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1888 bp->advertising |= ADVERTISED_1000baseT_Full;
1889 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1890 bp->advertising |= ADVERTISED_2500baseX_Full;
1891 } else {
1892 bp->autoneg = 0;
1893 bp->advertising = 0;
1894 bp->req_duplex = DUPLEX_FULL;
1895 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1896 bp->req_line_speed = SPEED_10;
1897 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1898 bp->req_duplex = DUPLEX_HALF;
1900 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1901 bp->req_line_speed = SPEED_100;
1902 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1903 bp->req_duplex = DUPLEX_HALF;
1905 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1906 bp->req_line_speed = SPEED_1000;
1907 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1908 bp->req_line_speed = SPEED_2500;
1912 static void
1913 bnx2_set_default_link(struct bnx2 *bp)
1915 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1916 bnx2_set_default_remote_link(bp);
1917 return;
1920 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1921 bp->req_line_speed = 0;
1922 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1923 u32 reg;
1925 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1927 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1928 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1929 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1930 bp->autoneg = 0;
1931 bp->req_line_speed = bp->line_speed = SPEED_1000;
1932 bp->req_duplex = DUPLEX_FULL;
1934 } else
1935 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1938 static void
1939 bnx2_send_heart_beat(struct bnx2 *bp)
1941 u32 msg;
1942 u32 addr;
1944 spin_lock(&bp->indirect_lock);
1945 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1946 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1947 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1948 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1949 spin_unlock(&bp->indirect_lock);
1952 static void
1953 bnx2_remote_phy_event(struct bnx2 *bp)
1955 u32 msg;
1956 u8 link_up = bp->link_up;
1957 u8 old_port;
1959 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1961 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1962 bnx2_send_heart_beat(bp);
1964 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1966 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1967 bp->link_up = 0;
1968 else {
1969 u32 speed;
1971 bp->link_up = 1;
1972 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1973 bp->duplex = DUPLEX_FULL;
1974 switch (speed) {
1975 case BNX2_LINK_STATUS_10HALF:
1976 bp->duplex = DUPLEX_HALF;
1977 case BNX2_LINK_STATUS_10FULL:
1978 bp->line_speed = SPEED_10;
1979 break;
1980 case BNX2_LINK_STATUS_100HALF:
1981 bp->duplex = DUPLEX_HALF;
1982 case BNX2_LINK_STATUS_100BASE_T4:
1983 case BNX2_LINK_STATUS_100FULL:
1984 bp->line_speed = SPEED_100;
1985 break;
1986 case BNX2_LINK_STATUS_1000HALF:
1987 bp->duplex = DUPLEX_HALF;
1988 case BNX2_LINK_STATUS_1000FULL:
1989 bp->line_speed = SPEED_1000;
1990 break;
1991 case BNX2_LINK_STATUS_2500HALF:
1992 bp->duplex = DUPLEX_HALF;
1993 case BNX2_LINK_STATUS_2500FULL:
1994 bp->line_speed = SPEED_2500;
1995 break;
1996 default:
1997 bp->line_speed = 0;
1998 break;
2001 bp->flow_ctrl = 0;
2002 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2003 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2004 if (bp->duplex == DUPLEX_FULL)
2005 bp->flow_ctrl = bp->req_flow_ctrl;
2006 } else {
2007 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2008 bp->flow_ctrl |= FLOW_CTRL_TX;
2009 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2010 bp->flow_ctrl |= FLOW_CTRL_RX;
2013 old_port = bp->phy_port;
2014 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2015 bp->phy_port = PORT_FIBRE;
2016 else
2017 bp->phy_port = PORT_TP;
2019 if (old_port != bp->phy_port)
2020 bnx2_set_default_link(bp);
2023 if (bp->link_up != link_up)
2024 bnx2_report_link(bp);
2026 bnx2_set_mac_link(bp);
2029 static int
2030 bnx2_set_remote_link(struct bnx2 *bp)
2032 u32 evt_code;
2034 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2035 switch (evt_code) {
2036 case BNX2_FW_EVT_CODE_LINK_EVENT:
2037 bnx2_remote_phy_event(bp);
2038 break;
2039 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2040 default:
2041 bnx2_send_heart_beat(bp);
2042 break;
2044 return 0;
2047 static int
2048 bnx2_setup_copper_phy(struct bnx2 *bp)
2049 __releases(&bp->phy_lock)
2050 __acquires(&bp->phy_lock)
2052 u32 bmcr;
2053 u32 new_bmcr;
2055 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2057 if (bp->autoneg & AUTONEG_SPEED) {
2058 u32 adv_reg, adv1000_reg;
2059 u32 new_adv_reg = 0;
2060 u32 new_adv1000_reg = 0;
2062 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2063 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2064 ADVERTISE_PAUSE_ASYM);
2066 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2067 adv1000_reg &= PHY_ALL_1000_SPEED;
2069 if (bp->advertising & ADVERTISED_10baseT_Half)
2070 new_adv_reg |= ADVERTISE_10HALF;
2071 if (bp->advertising & ADVERTISED_10baseT_Full)
2072 new_adv_reg |= ADVERTISE_10FULL;
2073 if (bp->advertising & ADVERTISED_100baseT_Half)
2074 new_adv_reg |= ADVERTISE_100HALF;
2075 if (bp->advertising & ADVERTISED_100baseT_Full)
2076 new_adv_reg |= ADVERTISE_100FULL;
2077 if (bp->advertising & ADVERTISED_1000baseT_Full)
2078 new_adv1000_reg |= ADVERTISE_1000FULL;
2080 new_adv_reg |= ADVERTISE_CSMA;
2082 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2084 if ((adv1000_reg != new_adv1000_reg) ||
2085 (adv_reg != new_adv_reg) ||
2086 ((bmcr & BMCR_ANENABLE) == 0)) {
2088 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2089 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2090 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2091 BMCR_ANENABLE);
2093 else if (bp->link_up) {
2094 /* Flow ctrl may have changed from auto to forced */
2095 /* or vice-versa. */
2097 bnx2_resolve_flow_ctrl(bp);
2098 bnx2_set_mac_link(bp);
2100 return 0;
2103 new_bmcr = 0;
2104 if (bp->req_line_speed == SPEED_100) {
2105 new_bmcr |= BMCR_SPEED100;
2107 if (bp->req_duplex == DUPLEX_FULL) {
2108 new_bmcr |= BMCR_FULLDPLX;
2110 if (new_bmcr != bmcr) {
2111 u32 bmsr;
2113 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2114 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2116 if (bmsr & BMSR_LSTATUS) {
2117 /* Force link down */
2118 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2119 spin_unlock_bh(&bp->phy_lock);
2120 msleep(50);
2121 spin_lock_bh(&bp->phy_lock);
2123 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2124 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2127 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2129 /* Normally, the new speed is setup after the link has
2130 * gone down and up again. In some cases, link will not go
2131 * down so we need to set up the new speed here.
2133 if (bmsr & BMSR_LSTATUS) {
2134 bp->line_speed = bp->req_line_speed;
2135 bp->duplex = bp->req_duplex;
2136 bnx2_resolve_flow_ctrl(bp);
2137 bnx2_set_mac_link(bp);
2139 } else {
2140 bnx2_resolve_flow_ctrl(bp);
2141 bnx2_set_mac_link(bp);
2143 return 0;
2146 static int
2147 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2148 __releases(&bp->phy_lock)
2149 __acquires(&bp->phy_lock)
2151 if (bp->loopback == MAC_LOOPBACK)
2152 return 0;
2154 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2155 return (bnx2_setup_serdes_phy(bp, port));
2157 else {
2158 return (bnx2_setup_copper_phy(bp));
2162 static int
2163 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2165 u32 val;
2167 bp->mii_bmcr = MII_BMCR + 0x10;
2168 bp->mii_bmsr = MII_BMSR + 0x10;
2169 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2170 bp->mii_adv = MII_ADVERTISE + 0x10;
2171 bp->mii_lpa = MII_LPA + 0x10;
2172 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2174 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2175 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2177 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2178 if (reset_phy)
2179 bnx2_reset_phy(bp);
2181 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2183 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2184 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2185 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2186 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2188 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2189 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2190 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2191 val |= BCM5708S_UP1_2G5;
2192 else
2193 val &= ~BCM5708S_UP1_2G5;
2194 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2196 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2197 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2198 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2199 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2201 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2203 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2204 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2205 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2207 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2209 return 0;
2212 static int
2213 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2215 u32 val;
2217 if (reset_phy)
2218 bnx2_reset_phy(bp);
2220 bp->mii_up1 = BCM5708S_UP1;
2222 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2223 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2224 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2226 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2227 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2228 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2230 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2231 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2232 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2234 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2235 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2236 val |= BCM5708S_UP1_2G5;
2237 bnx2_write_phy(bp, BCM5708S_UP1, val);
2240 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2241 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2242 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2243 /* increase tx signal amplitude */
2244 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2245 BCM5708S_BLK_ADDR_TX_MISC);
2246 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2247 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2248 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2249 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2252 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2253 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2255 if (val) {
2256 u32 is_backplane;
2258 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2259 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2260 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2261 BCM5708S_BLK_ADDR_TX_MISC);
2262 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2263 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2264 BCM5708S_BLK_ADDR_DIG);
2267 return 0;
2270 static int
2271 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2273 if (reset_phy)
2274 bnx2_reset_phy(bp);
2276 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2278 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2279 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2281 if (bp->dev->mtu > 1500) {
2282 u32 val;
2284 /* Set extended packet length bit */
2285 bnx2_write_phy(bp, 0x18, 0x7);
2286 bnx2_read_phy(bp, 0x18, &val);
2287 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2289 bnx2_write_phy(bp, 0x1c, 0x6c00);
2290 bnx2_read_phy(bp, 0x1c, &val);
2291 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2293 else {
2294 u32 val;
2296 bnx2_write_phy(bp, 0x18, 0x7);
2297 bnx2_read_phy(bp, 0x18, &val);
2298 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2300 bnx2_write_phy(bp, 0x1c, 0x6c00);
2301 bnx2_read_phy(bp, 0x1c, &val);
2302 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2305 return 0;
2308 static int
2309 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2311 u32 val;
2313 if (reset_phy)
2314 bnx2_reset_phy(bp);
2316 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2317 bnx2_write_phy(bp, 0x18, 0x0c00);
2318 bnx2_write_phy(bp, 0x17, 0x000a);
2319 bnx2_write_phy(bp, 0x15, 0x310b);
2320 bnx2_write_phy(bp, 0x17, 0x201f);
2321 bnx2_write_phy(bp, 0x15, 0x9506);
2322 bnx2_write_phy(bp, 0x17, 0x401f);
2323 bnx2_write_phy(bp, 0x15, 0x14e2);
2324 bnx2_write_phy(bp, 0x18, 0x0400);
2327 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2328 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2329 MII_BNX2_DSP_EXPAND_REG | 0x8);
2330 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2331 val &= ~(1 << 8);
2332 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2335 if (bp->dev->mtu > 1500) {
2336 /* Set extended packet length bit */
2337 bnx2_write_phy(bp, 0x18, 0x7);
2338 bnx2_read_phy(bp, 0x18, &val);
2339 bnx2_write_phy(bp, 0x18, val | 0x4000);
2341 bnx2_read_phy(bp, 0x10, &val);
2342 bnx2_write_phy(bp, 0x10, val | 0x1);
2344 else {
2345 bnx2_write_phy(bp, 0x18, 0x7);
2346 bnx2_read_phy(bp, 0x18, &val);
2347 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2349 bnx2_read_phy(bp, 0x10, &val);
2350 bnx2_write_phy(bp, 0x10, val & ~0x1);
2353 /* ethernet@wirespeed */
2354 bnx2_write_phy(bp, 0x18, 0x7007);
2355 bnx2_read_phy(bp, 0x18, &val);
2356 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2357 return 0;
2361 static int
2362 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2363 __releases(&bp->phy_lock)
2364 __acquires(&bp->phy_lock)
2366 u32 val;
2367 int rc = 0;
2369 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2370 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2372 bp->mii_bmcr = MII_BMCR;
2373 bp->mii_bmsr = MII_BMSR;
2374 bp->mii_bmsr1 = MII_BMSR;
2375 bp->mii_adv = MII_ADVERTISE;
2376 bp->mii_lpa = MII_LPA;
2378 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2380 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2381 goto setup_phy;
2383 bnx2_read_phy(bp, MII_PHYSID1, &val);
2384 bp->phy_id = val << 16;
2385 bnx2_read_phy(bp, MII_PHYSID2, &val);
2386 bp->phy_id |= val & 0xffff;
2388 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2389 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2390 rc = bnx2_init_5706s_phy(bp, reset_phy);
2391 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2392 rc = bnx2_init_5708s_phy(bp, reset_phy);
2393 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2394 rc = bnx2_init_5709s_phy(bp, reset_phy);
2396 else {
2397 rc = bnx2_init_copper_phy(bp, reset_phy);
2400 setup_phy:
2401 if (!rc)
2402 rc = bnx2_setup_phy(bp, bp->phy_port);
2404 return rc;
2407 static int
2408 bnx2_set_mac_loopback(struct bnx2 *bp)
2410 u32 mac_mode;
2412 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2413 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2414 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2415 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2416 bp->link_up = 1;
2417 return 0;
2420 static int bnx2_test_link(struct bnx2 *);
2422 static int
2423 bnx2_set_phy_loopback(struct bnx2 *bp)
2425 u32 mac_mode;
2426 int rc, i;
2428 spin_lock_bh(&bp->phy_lock);
2429 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2430 BMCR_SPEED1000);
2431 spin_unlock_bh(&bp->phy_lock);
2432 if (rc)
2433 return rc;
2435 for (i = 0; i < 10; i++) {
2436 if (bnx2_test_link(bp) == 0)
2437 break;
2438 msleep(100);
2441 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2442 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2443 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2444 BNX2_EMAC_MODE_25G_MODE);
2446 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2447 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2448 bp->link_up = 1;
2449 return 0;
2452 static int
2453 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2455 int i;
2456 u32 val;
2458 bp->fw_wr_seq++;
2459 msg_data |= bp->fw_wr_seq;
2461 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2463 if (!ack)
2464 return 0;
2466 /* wait for an acknowledgement. */
2467 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2468 msleep(10);
2470 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2472 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2473 break;
2475 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2476 return 0;
2478 /* If we timed out, inform the firmware that this is the case. */
2479 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2480 if (!silent)
2481 pr_err("fw sync timeout, reset code = %x\n", msg_data);
2483 msg_data &= ~BNX2_DRV_MSG_CODE;
2484 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2486 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2488 return -EBUSY;
2491 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2492 return -EIO;
2494 return 0;
2497 static int
2498 bnx2_init_5709_context(struct bnx2 *bp)
2500 int i, ret = 0;
2501 u32 val;
2503 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2504 val |= (BCM_PAGE_BITS - 8) << 16;
2505 REG_WR(bp, BNX2_CTX_COMMAND, val);
2506 for (i = 0; i < 10; i++) {
2507 val = REG_RD(bp, BNX2_CTX_COMMAND);
2508 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2509 break;
2510 udelay(2);
2512 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2513 return -EBUSY;
2515 for (i = 0; i < bp->ctx_pages; i++) {
2516 int j;
2518 if (bp->ctx_blk[i])
2519 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2520 else
2521 return -ENOMEM;
2523 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2524 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2525 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2526 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2527 (u64) bp->ctx_blk_mapping[i] >> 32);
2528 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2529 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2530 for (j = 0; j < 10; j++) {
2532 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2533 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2534 break;
2535 udelay(5);
2537 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2538 ret = -EBUSY;
2539 break;
2542 return ret;
2545 static void
2546 bnx2_init_context(struct bnx2 *bp)
2548 u32 vcid;
2550 vcid = 96;
2551 while (vcid) {
2552 u32 vcid_addr, pcid_addr, offset;
2553 int i;
2555 vcid--;
2557 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2558 u32 new_vcid;
2560 vcid_addr = GET_PCID_ADDR(vcid);
2561 if (vcid & 0x8) {
2562 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2564 else {
2565 new_vcid = vcid;
2567 pcid_addr = GET_PCID_ADDR(new_vcid);
2569 else {
2570 vcid_addr = GET_CID_ADDR(vcid);
2571 pcid_addr = vcid_addr;
2574 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2575 vcid_addr += (i << PHY_CTX_SHIFT);
2576 pcid_addr += (i << PHY_CTX_SHIFT);
2578 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2579 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2581 /* Zero out the context. */
2582 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2583 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2588 static int
2589 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2591 u16 *good_mbuf;
2592 u32 good_mbuf_cnt;
2593 u32 val;
2595 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2596 if (good_mbuf == NULL) {
2597 pr_err("Failed to allocate memory in %s\n", __func__);
2598 return -ENOMEM;
2601 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2602 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2604 good_mbuf_cnt = 0;
2606 /* Allocate a bunch of mbufs and save the good ones in an array. */
2607 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2608 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2609 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2610 BNX2_RBUF_COMMAND_ALLOC_REQ);
2612 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2614 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2616 /* The addresses with Bit 9 set are bad memory blocks. */
2617 if (!(val & (1 << 9))) {
2618 good_mbuf[good_mbuf_cnt] = (u16) val;
2619 good_mbuf_cnt++;
2622 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2625 /* Free the good ones back to the mbuf pool thus discarding
2626 * all the bad ones. */
2627 while (good_mbuf_cnt) {
2628 good_mbuf_cnt--;
2630 val = good_mbuf[good_mbuf_cnt];
2631 val = (val << 9) | val | 1;
2633 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2635 kfree(good_mbuf);
2636 return 0;
2639 static void
2640 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2642 u32 val;
2644 val = (mac_addr[0] << 8) | mac_addr[1];
2646 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2648 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2649 (mac_addr[4] << 8) | mac_addr[5];
2651 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2654 static inline int
2655 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2657 dma_addr_t mapping;
2658 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2659 struct rx_bd *rxbd =
2660 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2661 struct page *page = alloc_page(GFP_ATOMIC);
2663 if (!page)
2664 return -ENOMEM;
2665 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2666 PCI_DMA_FROMDEVICE);
2667 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2668 __free_page(page);
2669 return -EIO;
2672 rx_pg->page = page;
2673 dma_unmap_addr_set(rx_pg, mapping, mapping);
2674 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2675 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2676 return 0;
2679 static void
2680 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2682 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2683 struct page *page = rx_pg->page;
2685 if (!page)
2686 return;
2688 pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2689 PCI_DMA_FROMDEVICE);
2691 __free_page(page);
2692 rx_pg->page = NULL;
2695 static inline int
2696 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2698 struct sk_buff *skb;
2699 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2700 dma_addr_t mapping;
2701 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2702 unsigned long align;
2704 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2705 if (skb == NULL) {
2706 return -ENOMEM;
2709 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2710 skb_reserve(skb, BNX2_RX_ALIGN - align);
2712 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2713 PCI_DMA_FROMDEVICE);
2714 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2715 dev_kfree_skb(skb);
2716 return -EIO;
2719 rx_buf->skb = skb;
2720 dma_unmap_addr_set(rx_buf, mapping, mapping);
2722 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2723 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2725 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2727 return 0;
2730 static int
2731 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2733 struct status_block *sblk = bnapi->status_blk.msi;
2734 u32 new_link_state, old_link_state;
2735 int is_set = 1;
2737 new_link_state = sblk->status_attn_bits & event;
2738 old_link_state = sblk->status_attn_bits_ack & event;
2739 if (new_link_state != old_link_state) {
2740 if (new_link_state)
2741 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2742 else
2743 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2744 } else
2745 is_set = 0;
2747 return is_set;
2750 static void
2751 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2753 spin_lock(&bp->phy_lock);
2755 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2756 bnx2_set_link(bp);
2757 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2758 bnx2_set_remote_link(bp);
2760 spin_unlock(&bp->phy_lock);
2764 static inline u16
2765 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2767 u16 cons;
2769 /* Tell compiler that status block fields can change. */
2770 barrier();
2771 cons = *bnapi->hw_tx_cons_ptr;
2772 barrier();
2773 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2774 cons++;
2775 return cons;
2778 static int
2779 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2781 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2782 u16 hw_cons, sw_cons, sw_ring_cons;
2783 int tx_pkt = 0, index;
2784 struct netdev_queue *txq;
2786 index = (bnapi - bp->bnx2_napi);
2787 txq = netdev_get_tx_queue(bp->dev, index);
2789 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2790 sw_cons = txr->tx_cons;
2792 while (sw_cons != hw_cons) {
2793 struct sw_tx_bd *tx_buf;
2794 struct sk_buff *skb;
2795 int i, last;
2797 sw_ring_cons = TX_RING_IDX(sw_cons);
2799 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2800 skb = tx_buf->skb;
2802 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2803 prefetch(&skb->end);
2805 /* partial BD completions possible with TSO packets */
2806 if (tx_buf->is_gso) {
2807 u16 last_idx, last_ring_idx;
2809 last_idx = sw_cons + tx_buf->nr_frags + 1;
2810 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2811 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2812 last_idx++;
2814 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2815 break;
2819 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
2820 skb_headlen(skb), PCI_DMA_TODEVICE);
2822 tx_buf->skb = NULL;
2823 last = tx_buf->nr_frags;
2825 for (i = 0; i < last; i++) {
2826 sw_cons = NEXT_TX_BD(sw_cons);
2828 pci_unmap_page(bp->pdev,
2829 dma_unmap_addr(
2830 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2831 mapping),
2832 skb_shinfo(skb)->frags[i].size,
2833 PCI_DMA_TODEVICE);
2836 sw_cons = NEXT_TX_BD(sw_cons);
2838 dev_kfree_skb(skb);
2839 tx_pkt++;
2840 if (tx_pkt == budget)
2841 break;
2843 if (hw_cons == sw_cons)
2844 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2847 txr->hw_tx_cons = hw_cons;
2848 txr->tx_cons = sw_cons;
2850 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2851 * before checking for netif_tx_queue_stopped(). Without the
2852 * memory barrier, there is a small possibility that bnx2_start_xmit()
2853 * will miss it and cause the queue to be stopped forever.
2855 smp_mb();
2857 if (unlikely(netif_tx_queue_stopped(txq)) &&
2858 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2859 __netif_tx_lock(txq, smp_processor_id());
2860 if ((netif_tx_queue_stopped(txq)) &&
2861 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2862 netif_tx_wake_queue(txq);
2863 __netif_tx_unlock(txq);
2866 return tx_pkt;
2869 static void
2870 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2871 struct sk_buff *skb, int count)
2873 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2874 struct rx_bd *cons_bd, *prod_bd;
2875 int i;
2876 u16 hw_prod, prod;
2877 u16 cons = rxr->rx_pg_cons;
2879 cons_rx_pg = &rxr->rx_pg_ring[cons];
2881 /* The caller was unable to allocate a new page to replace the
2882 * last one in the frags array, so we need to recycle that page
2883 * and then free the skb.
2885 if (skb) {
2886 struct page *page;
2887 struct skb_shared_info *shinfo;
2889 shinfo = skb_shinfo(skb);
2890 shinfo->nr_frags--;
2891 page = shinfo->frags[shinfo->nr_frags].page;
2892 shinfo->frags[shinfo->nr_frags].page = NULL;
2894 cons_rx_pg->page = page;
2895 dev_kfree_skb(skb);
2898 hw_prod = rxr->rx_pg_prod;
2900 for (i = 0; i < count; i++) {
2901 prod = RX_PG_RING_IDX(hw_prod);
2903 prod_rx_pg = &rxr->rx_pg_ring[prod];
2904 cons_rx_pg = &rxr->rx_pg_ring[cons];
2905 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2906 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2908 if (prod != cons) {
2909 prod_rx_pg->page = cons_rx_pg->page;
2910 cons_rx_pg->page = NULL;
2911 dma_unmap_addr_set(prod_rx_pg, mapping,
2912 dma_unmap_addr(cons_rx_pg, mapping));
2914 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2915 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2918 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2919 hw_prod = NEXT_RX_BD(hw_prod);
2921 rxr->rx_pg_prod = hw_prod;
2922 rxr->rx_pg_cons = cons;
2925 static inline void
2926 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2927 struct sk_buff *skb, u16 cons, u16 prod)
2929 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2930 struct rx_bd *cons_bd, *prod_bd;
2932 cons_rx_buf = &rxr->rx_buf_ring[cons];
2933 prod_rx_buf = &rxr->rx_buf_ring[prod];
2935 pci_dma_sync_single_for_device(bp->pdev,
2936 dma_unmap_addr(cons_rx_buf, mapping),
2937 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2939 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2941 prod_rx_buf->skb = skb;
2943 if (cons == prod)
2944 return;
2946 dma_unmap_addr_set(prod_rx_buf, mapping,
2947 dma_unmap_addr(cons_rx_buf, mapping));
2949 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2950 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2951 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2952 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2955 static int
2956 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2957 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2958 u32 ring_idx)
2960 int err;
2961 u16 prod = ring_idx & 0xffff;
2963 err = bnx2_alloc_rx_skb(bp, rxr, prod);
2964 if (unlikely(err)) {
2965 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2966 if (hdr_len) {
2967 unsigned int raw_len = len + 4;
2968 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2970 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2972 return err;
2975 skb_reserve(skb, BNX2_RX_OFFSET);
2976 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2977 PCI_DMA_FROMDEVICE);
2979 if (hdr_len == 0) {
2980 skb_put(skb, len);
2981 return 0;
2982 } else {
2983 unsigned int i, frag_len, frag_size, pages;
2984 struct sw_pg *rx_pg;
2985 u16 pg_cons = rxr->rx_pg_cons;
2986 u16 pg_prod = rxr->rx_pg_prod;
2988 frag_size = len + 4 - hdr_len;
2989 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2990 skb_put(skb, hdr_len);
2992 for (i = 0; i < pages; i++) {
2993 dma_addr_t mapping_old;
2995 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2996 if (unlikely(frag_len <= 4)) {
2997 unsigned int tail = 4 - frag_len;
2999 rxr->rx_pg_cons = pg_cons;
3000 rxr->rx_pg_prod = pg_prod;
3001 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3002 pages - i);
3003 skb->len -= tail;
3004 if (i == 0) {
3005 skb->tail -= tail;
3006 } else {
3007 skb_frag_t *frag =
3008 &skb_shinfo(skb)->frags[i - 1];
3009 frag->size -= tail;
3010 skb->data_len -= tail;
3011 skb->truesize -= tail;
3013 return 0;
3015 rx_pg = &rxr->rx_pg_ring[pg_cons];
3017 /* Don't unmap yet. If we're unable to allocate a new
3018 * page, we need to recycle the page and the DMA addr.
3020 mapping_old = dma_unmap_addr(rx_pg, mapping);
3021 if (i == pages - 1)
3022 frag_len -= 4;
3024 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3025 rx_pg->page = NULL;
3027 err = bnx2_alloc_rx_page(bp, rxr,
3028 RX_PG_RING_IDX(pg_prod));
3029 if (unlikely(err)) {
3030 rxr->rx_pg_cons = pg_cons;
3031 rxr->rx_pg_prod = pg_prod;
3032 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3033 pages - i);
3034 return err;
3037 pci_unmap_page(bp->pdev, mapping_old,
3038 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3040 frag_size -= frag_len;
3041 skb->data_len += frag_len;
3042 skb->truesize += frag_len;
3043 skb->len += frag_len;
3045 pg_prod = NEXT_RX_BD(pg_prod);
3046 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3048 rxr->rx_pg_prod = pg_prod;
3049 rxr->rx_pg_cons = pg_cons;
3051 return 0;
3054 static inline u16
3055 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3057 u16 cons;
3059 /* Tell compiler that status block fields can change. */
3060 barrier();
3061 cons = *bnapi->hw_rx_cons_ptr;
3062 barrier();
3063 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3064 cons++;
3065 return cons;
3068 static int
3069 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3071 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3072 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3073 struct l2_fhdr *rx_hdr;
3074 int rx_pkt = 0, pg_ring_used = 0;
3076 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3077 sw_cons = rxr->rx_cons;
3078 sw_prod = rxr->rx_prod;
3080 /* Memory barrier necessary as speculative reads of the rx
3081 * buffer can be ahead of the index in the status block
3083 rmb();
3084 while (sw_cons != hw_cons) {
3085 unsigned int len, hdr_len;
3086 u32 status;
3087 struct sw_bd *rx_buf;
3088 struct sk_buff *skb;
3089 dma_addr_t dma_addr;
3090 u16 vtag = 0;
3091 int hw_vlan __maybe_unused = 0;
3093 sw_ring_cons = RX_RING_IDX(sw_cons);
3094 sw_ring_prod = RX_RING_IDX(sw_prod);
3096 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3097 skb = rx_buf->skb;
3099 rx_buf->skb = NULL;
3101 dma_addr = dma_unmap_addr(rx_buf, mapping);
3103 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3104 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3105 PCI_DMA_FROMDEVICE);
3107 rx_hdr = (struct l2_fhdr *) skb->data;
3108 len = rx_hdr->l2_fhdr_pkt_len;
3109 status = rx_hdr->l2_fhdr_status;
3111 hdr_len = 0;
3112 if (status & L2_FHDR_STATUS_SPLIT) {
3113 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3114 pg_ring_used = 1;
3115 } else if (len > bp->rx_jumbo_thresh) {
3116 hdr_len = bp->rx_jumbo_thresh;
3117 pg_ring_used = 1;
3120 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3121 L2_FHDR_ERRORS_PHY_DECODE |
3122 L2_FHDR_ERRORS_ALIGNMENT |
3123 L2_FHDR_ERRORS_TOO_SHORT |
3124 L2_FHDR_ERRORS_GIANT_FRAME))) {
3126 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3127 sw_ring_prod);
3128 if (pg_ring_used) {
3129 int pages;
3131 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3133 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3135 goto next_rx;
3138 len -= 4;
3140 if (len <= bp->rx_copy_thresh) {
3141 struct sk_buff *new_skb;
3143 new_skb = netdev_alloc_skb(bp->dev, len + 6);
3144 if (new_skb == NULL) {
3145 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3146 sw_ring_prod);
3147 goto next_rx;
3150 /* aligned copy */
3151 skb_copy_from_linear_data_offset(skb,
3152 BNX2_RX_OFFSET - 6,
3153 new_skb->data, len + 6);
3154 skb_reserve(new_skb, 6);
3155 skb_put(new_skb, len);
3157 bnx2_reuse_rx_skb(bp, rxr, skb,
3158 sw_ring_cons, sw_ring_prod);
3160 skb = new_skb;
3161 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3162 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3163 goto next_rx;
3165 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3166 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3167 vtag = rx_hdr->l2_fhdr_vlan_tag;
3168 #ifdef BCM_VLAN
3169 if (bp->vlgrp)
3170 hw_vlan = 1;
3171 else
3172 #endif
3174 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3175 __skb_push(skb, 4);
3177 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3178 ve->h_vlan_proto = htons(ETH_P_8021Q);
3179 ve->h_vlan_TCI = htons(vtag);
3180 len += 4;
3184 skb->protocol = eth_type_trans(skb, bp->dev);
3186 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3187 (ntohs(skb->protocol) != 0x8100)) {
3189 dev_kfree_skb(skb);
3190 goto next_rx;
3194 skb->ip_summed = CHECKSUM_NONE;
3195 if (bp->rx_csum &&
3196 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3197 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3199 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3200 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3201 skb->ip_summed = CHECKSUM_UNNECESSARY;
3204 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3206 #ifdef BCM_VLAN
3207 if (hw_vlan)
3208 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3209 else
3210 #endif
3211 netif_receive_skb(skb);
3213 rx_pkt++;
3215 next_rx:
3216 sw_cons = NEXT_RX_BD(sw_cons);
3217 sw_prod = NEXT_RX_BD(sw_prod);
3219 if ((rx_pkt == budget))
3220 break;
3222 /* Refresh hw_cons to see if there is new work */
3223 if (sw_cons == hw_cons) {
3224 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3225 rmb();
3228 rxr->rx_cons = sw_cons;
3229 rxr->rx_prod = sw_prod;
3231 if (pg_ring_used)
3232 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3234 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3236 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3238 mmiowb();
3240 return rx_pkt;
3244 /* MSI ISR - The only difference between this and the INTx ISR
3245 * is that the MSI interrupt is always serviced.
3247 static irqreturn_t
3248 bnx2_msi(int irq, void *dev_instance)
3250 struct bnx2_napi *bnapi = dev_instance;
3251 struct bnx2 *bp = bnapi->bp;
3253 prefetch(bnapi->status_blk.msi);
3254 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3255 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3256 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3258 /* Return here if interrupt is disabled. */
3259 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3260 return IRQ_HANDLED;
3262 napi_schedule(&bnapi->napi);
3264 return IRQ_HANDLED;
3267 static irqreturn_t
3268 bnx2_msi_1shot(int irq, void *dev_instance)
3270 struct bnx2_napi *bnapi = dev_instance;
3271 struct bnx2 *bp = bnapi->bp;
3273 prefetch(bnapi->status_blk.msi);
3275 /* Return here if interrupt is disabled. */
3276 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3277 return IRQ_HANDLED;
3279 napi_schedule(&bnapi->napi);
3281 return IRQ_HANDLED;
3284 static irqreturn_t
3285 bnx2_interrupt(int irq, void *dev_instance)
3287 struct bnx2_napi *bnapi = dev_instance;
3288 struct bnx2 *bp = bnapi->bp;
3289 struct status_block *sblk = bnapi->status_blk.msi;
3291 /* When using INTx, it is possible for the interrupt to arrive
3292 * at the CPU before the status block posted prior to the
3293 * interrupt. Reading a register will flush the status block.
3294 * When using MSI, the MSI message will always complete after
3295 * the status block write.
3297 if ((sblk->status_idx == bnapi->last_status_idx) &&
3298 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3299 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3300 return IRQ_NONE;
3302 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3303 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3304 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3306 /* Read back to deassert IRQ immediately to avoid too many
3307 * spurious interrupts.
3309 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3311 /* Return here if interrupt is shared and is disabled. */
3312 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3313 return IRQ_HANDLED;
3315 if (napi_schedule_prep(&bnapi->napi)) {
3316 bnapi->last_status_idx = sblk->status_idx;
3317 __napi_schedule(&bnapi->napi);
3320 return IRQ_HANDLED;
3323 static inline int
3324 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3326 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3327 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3329 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3330 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3331 return 1;
3332 return 0;
3335 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3336 STATUS_ATTN_BITS_TIMER_ABORT)
3338 static inline int
3339 bnx2_has_work(struct bnx2_napi *bnapi)
3341 struct status_block *sblk = bnapi->status_blk.msi;
3343 if (bnx2_has_fast_work(bnapi))
3344 return 1;
3346 #ifdef BCM_CNIC
3347 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3348 return 1;
3349 #endif
3351 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3352 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3353 return 1;
3355 return 0;
3358 static void
3359 bnx2_chk_missed_msi(struct bnx2 *bp)
3361 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3362 u32 msi_ctrl;
3364 if (bnx2_has_work(bnapi)) {
3365 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3366 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3367 return;
3369 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3370 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3371 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3372 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3373 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3377 bp->idle_chk_status_idx = bnapi->last_status_idx;
3380 #ifdef BCM_CNIC
3381 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3383 struct cnic_ops *c_ops;
3385 if (!bnapi->cnic_present)
3386 return;
3388 rcu_read_lock();
3389 c_ops = rcu_dereference(bp->cnic_ops);
3390 if (c_ops)
3391 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3392 bnapi->status_blk.msi);
3393 rcu_read_unlock();
3395 #endif
3397 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3399 struct status_block *sblk = bnapi->status_blk.msi;
3400 u32 status_attn_bits = sblk->status_attn_bits;
3401 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3403 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3404 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3406 bnx2_phy_int(bp, bnapi);
3408 /* This is needed to take care of transient status
3409 * during link changes.
3411 REG_WR(bp, BNX2_HC_COMMAND,
3412 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3413 REG_RD(bp, BNX2_HC_COMMAND);
3417 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3418 int work_done, int budget)
3420 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3421 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3423 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3424 bnx2_tx_int(bp, bnapi, 0);
3426 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3427 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3429 return work_done;
3432 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3434 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3435 struct bnx2 *bp = bnapi->bp;
3436 int work_done = 0;
3437 struct status_block_msix *sblk = bnapi->status_blk.msix;
3439 while (1) {
3440 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3441 if (unlikely(work_done >= budget))
3442 break;
3444 bnapi->last_status_idx = sblk->status_idx;
3445 /* status idx must be read before checking for more work. */
3446 rmb();
3447 if (likely(!bnx2_has_fast_work(bnapi))) {
3449 napi_complete(napi);
3450 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3451 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3452 bnapi->last_status_idx);
3453 break;
3456 return work_done;
3459 static int bnx2_poll(struct napi_struct *napi, int budget)
3461 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3462 struct bnx2 *bp = bnapi->bp;
3463 int work_done = 0;
3464 struct status_block *sblk = bnapi->status_blk.msi;
3466 while (1) {
3467 bnx2_poll_link(bp, bnapi);
3469 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3471 #ifdef BCM_CNIC
3472 bnx2_poll_cnic(bp, bnapi);
3473 #endif
3475 /* bnapi->last_status_idx is used below to tell the hw how
3476 * much work has been processed, so we must read it before
3477 * checking for more work.
3479 bnapi->last_status_idx = sblk->status_idx;
3481 if (unlikely(work_done >= budget))
3482 break;
3484 rmb();
3485 if (likely(!bnx2_has_work(bnapi))) {
3486 napi_complete(napi);
3487 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3488 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3489 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3490 bnapi->last_status_idx);
3491 break;
3493 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3494 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3495 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3496 bnapi->last_status_idx);
3498 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3499 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3500 bnapi->last_status_idx);
3501 break;
3505 return work_done;
3508 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3509 * from set_multicast.
3511 static void
3512 bnx2_set_rx_mode(struct net_device *dev)
3514 struct bnx2 *bp = netdev_priv(dev);
3515 u32 rx_mode, sort_mode;
3516 struct netdev_hw_addr *ha;
3517 int i;
3519 if (!netif_running(dev))
3520 return;
3522 spin_lock_bh(&bp->phy_lock);
3524 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3525 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3526 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3527 #ifdef BCM_VLAN
3528 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3529 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3530 #else
3531 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3532 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3533 #endif
3534 if (dev->flags & IFF_PROMISC) {
3535 /* Promiscuous mode. */
3536 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3537 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3538 BNX2_RPM_SORT_USER0_PROM_VLAN;
3540 else if (dev->flags & IFF_ALLMULTI) {
3541 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3542 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3543 0xffffffff);
3545 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3547 else {
3548 /* Accept one or more multicast(s). */
3549 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3550 u32 regidx;
3551 u32 bit;
3552 u32 crc;
3554 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3556 netdev_for_each_mc_addr(ha, dev) {
3557 crc = ether_crc_le(ETH_ALEN, ha->addr);
3558 bit = crc & 0xff;
3559 regidx = (bit & 0xe0) >> 5;
3560 bit &= 0x1f;
3561 mc_filter[regidx] |= (1 << bit);
3564 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3565 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3566 mc_filter[i]);
3569 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3572 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3573 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3574 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3575 BNX2_RPM_SORT_USER0_PROM_VLAN;
3576 } else if (!(dev->flags & IFF_PROMISC)) {
3577 /* Add all entries into to the match filter list */
3578 i = 0;
3579 netdev_for_each_uc_addr(ha, dev) {
3580 bnx2_set_mac_addr(bp, ha->addr,
3581 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3582 sort_mode |= (1 <<
3583 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3584 i++;
3589 if (rx_mode != bp->rx_mode) {
3590 bp->rx_mode = rx_mode;
3591 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3594 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3595 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3596 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3598 spin_unlock_bh(&bp->phy_lock);
3601 static int __devinit
3602 check_fw_section(const struct firmware *fw,
3603 const struct bnx2_fw_file_section *section,
3604 u32 alignment, bool non_empty)
3606 u32 offset = be32_to_cpu(section->offset);
3607 u32 len = be32_to_cpu(section->len);
3609 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3610 return -EINVAL;
3611 if ((non_empty && len == 0) || len > fw->size - offset ||
3612 len & (alignment - 1))
3613 return -EINVAL;
3614 return 0;
3617 static int __devinit
3618 check_mips_fw_entry(const struct firmware *fw,
3619 const struct bnx2_mips_fw_file_entry *entry)
3621 if (check_fw_section(fw, &entry->text, 4, true) ||
3622 check_fw_section(fw, &entry->data, 4, false) ||
3623 check_fw_section(fw, &entry->rodata, 4, false))
3624 return -EINVAL;
3625 return 0;
3628 static int __devinit
3629 bnx2_request_firmware(struct bnx2 *bp)
3631 const char *mips_fw_file, *rv2p_fw_file;
3632 const struct bnx2_mips_fw_file *mips_fw;
3633 const struct bnx2_rv2p_fw_file *rv2p_fw;
3634 int rc;
3636 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3637 mips_fw_file = FW_MIPS_FILE_09;
3638 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3639 (CHIP_ID(bp) == CHIP_ID_5709_A1))
3640 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3641 else
3642 rv2p_fw_file = FW_RV2P_FILE_09;
3643 } else {
3644 mips_fw_file = FW_MIPS_FILE_06;
3645 rv2p_fw_file = FW_RV2P_FILE_06;
3648 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3649 if (rc) {
3650 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3651 return rc;
3654 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3655 if (rc) {
3656 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3657 return rc;
3659 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3660 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3661 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3662 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3663 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3664 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3665 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3666 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3667 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3668 return -EINVAL;
3670 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3671 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3672 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3673 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3674 return -EINVAL;
3677 return 0;
3680 static u32
3681 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3683 switch (idx) {
3684 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3685 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3686 rv2p_code |= RV2P_BD_PAGE_SIZE;
3687 break;
3689 return rv2p_code;
3692 static int
3693 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3694 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3696 u32 rv2p_code_len, file_offset;
3697 __be32 *rv2p_code;
3698 int i;
3699 u32 val, cmd, addr;
3701 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3702 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3704 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3706 if (rv2p_proc == RV2P_PROC1) {
3707 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3708 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3709 } else {
3710 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3711 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3714 for (i = 0; i < rv2p_code_len; i += 8) {
3715 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3716 rv2p_code++;
3717 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3718 rv2p_code++;
3720 val = (i / 8) | cmd;
3721 REG_WR(bp, addr, val);
3724 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3725 for (i = 0; i < 8; i++) {
3726 u32 loc, code;
3728 loc = be32_to_cpu(fw_entry->fixup[i]);
3729 if (loc && ((loc * 4) < rv2p_code_len)) {
3730 code = be32_to_cpu(*(rv2p_code + loc - 1));
3731 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3732 code = be32_to_cpu(*(rv2p_code + loc));
3733 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3734 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3736 val = (loc / 2) | cmd;
3737 REG_WR(bp, addr, val);
3741 /* Reset the processor, un-stall is done later. */
3742 if (rv2p_proc == RV2P_PROC1) {
3743 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3745 else {
3746 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3749 return 0;
3752 static int
3753 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3754 const struct bnx2_mips_fw_file_entry *fw_entry)
3756 u32 addr, len, file_offset;
3757 __be32 *data;
3758 u32 offset;
3759 u32 val;
3761 /* Halt the CPU. */
3762 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3763 val |= cpu_reg->mode_value_halt;
3764 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3765 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3767 /* Load the Text area. */
3768 addr = be32_to_cpu(fw_entry->text.addr);
3769 len = be32_to_cpu(fw_entry->text.len);
3770 file_offset = be32_to_cpu(fw_entry->text.offset);
3771 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3773 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3774 if (len) {
3775 int j;
3777 for (j = 0; j < (len / 4); j++, offset += 4)
3778 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3781 /* Load the Data area. */
3782 addr = be32_to_cpu(fw_entry->data.addr);
3783 len = be32_to_cpu(fw_entry->data.len);
3784 file_offset = be32_to_cpu(fw_entry->data.offset);
3785 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3787 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3788 if (len) {
3789 int j;
3791 for (j = 0; j < (len / 4); j++, offset += 4)
3792 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3795 /* Load the Read-Only area. */
3796 addr = be32_to_cpu(fw_entry->rodata.addr);
3797 len = be32_to_cpu(fw_entry->rodata.len);
3798 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3799 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3801 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3802 if (len) {
3803 int j;
3805 for (j = 0; j < (len / 4); j++, offset += 4)
3806 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3809 /* Clear the pre-fetch instruction. */
3810 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3812 val = be32_to_cpu(fw_entry->start_addr);
3813 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3815 /* Start the CPU. */
3816 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3817 val &= ~cpu_reg->mode_value_halt;
3818 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3819 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3821 return 0;
3824 static int
3825 bnx2_init_cpus(struct bnx2 *bp)
3827 const struct bnx2_mips_fw_file *mips_fw =
3828 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3829 const struct bnx2_rv2p_fw_file *rv2p_fw =
3830 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3831 int rc;
3833 /* Initialize the RV2P processor. */
3834 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3835 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3837 /* Initialize the RX Processor. */
3838 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3839 if (rc)
3840 goto init_cpu_err;
3842 /* Initialize the TX Processor. */
3843 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3844 if (rc)
3845 goto init_cpu_err;
3847 /* Initialize the TX Patch-up Processor. */
3848 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3849 if (rc)
3850 goto init_cpu_err;
3852 /* Initialize the Completion Processor. */
3853 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3854 if (rc)
3855 goto init_cpu_err;
3857 /* Initialize the Command Processor. */
3858 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3860 init_cpu_err:
3861 return rc;
3864 static int
3865 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3867 u16 pmcsr;
3869 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3871 switch (state) {
3872 case PCI_D0: {
3873 u32 val;
3875 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3876 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3877 PCI_PM_CTRL_PME_STATUS);
3879 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3880 /* delay required during transition out of D3hot */
3881 msleep(20);
3883 val = REG_RD(bp, BNX2_EMAC_MODE);
3884 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3885 val &= ~BNX2_EMAC_MODE_MPKT;
3886 REG_WR(bp, BNX2_EMAC_MODE, val);
3888 val = REG_RD(bp, BNX2_RPM_CONFIG);
3889 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3890 REG_WR(bp, BNX2_RPM_CONFIG, val);
3891 break;
3893 case PCI_D3hot: {
3894 int i;
3895 u32 val, wol_msg;
3897 if (bp->wol) {
3898 u32 advertising;
3899 u8 autoneg;
3901 autoneg = bp->autoneg;
3902 advertising = bp->advertising;
3904 if (bp->phy_port == PORT_TP) {
3905 bp->autoneg = AUTONEG_SPEED;
3906 bp->advertising = ADVERTISED_10baseT_Half |
3907 ADVERTISED_10baseT_Full |
3908 ADVERTISED_100baseT_Half |
3909 ADVERTISED_100baseT_Full |
3910 ADVERTISED_Autoneg;
3913 spin_lock_bh(&bp->phy_lock);
3914 bnx2_setup_phy(bp, bp->phy_port);
3915 spin_unlock_bh(&bp->phy_lock);
3917 bp->autoneg = autoneg;
3918 bp->advertising = advertising;
3920 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3922 val = REG_RD(bp, BNX2_EMAC_MODE);
3924 /* Enable port mode. */
3925 val &= ~BNX2_EMAC_MODE_PORT;
3926 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3927 BNX2_EMAC_MODE_ACPI_RCVD |
3928 BNX2_EMAC_MODE_MPKT;
3929 if (bp->phy_port == PORT_TP)
3930 val |= BNX2_EMAC_MODE_PORT_MII;
3931 else {
3932 val |= BNX2_EMAC_MODE_PORT_GMII;
3933 if (bp->line_speed == SPEED_2500)
3934 val |= BNX2_EMAC_MODE_25G_MODE;
3937 REG_WR(bp, BNX2_EMAC_MODE, val);
3939 /* receive all multicast */
3940 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3941 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3942 0xffffffff);
3944 REG_WR(bp, BNX2_EMAC_RX_MODE,
3945 BNX2_EMAC_RX_MODE_SORT_MODE);
3947 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3948 BNX2_RPM_SORT_USER0_MC_EN;
3949 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3950 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3951 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3952 BNX2_RPM_SORT_USER0_ENA);
3954 /* Need to enable EMAC and RPM for WOL. */
3955 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3956 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3957 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3958 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3960 val = REG_RD(bp, BNX2_RPM_CONFIG);
3961 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3962 REG_WR(bp, BNX2_RPM_CONFIG, val);
3964 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3966 else {
3967 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3970 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3971 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3972 1, 0);
3974 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3975 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3976 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3978 if (bp->wol)
3979 pmcsr |= 3;
3981 else {
3982 pmcsr |= 3;
3984 if (bp->wol) {
3985 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3987 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3988 pmcsr);
3990 /* No more memory access after this point until
3991 * device is brought back to D0.
3993 udelay(50);
3994 break;
3996 default:
3997 return -EINVAL;
3999 return 0;
4002 static int
4003 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4005 u32 val;
4006 int j;
4008 /* Request access to the flash interface. */
4009 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4010 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4011 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4012 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4013 break;
4015 udelay(5);
4018 if (j >= NVRAM_TIMEOUT_COUNT)
4019 return -EBUSY;
4021 return 0;
4024 static int
4025 bnx2_release_nvram_lock(struct bnx2 *bp)
4027 int j;
4028 u32 val;
4030 /* Relinquish nvram interface. */
4031 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4033 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4034 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4035 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4036 break;
4038 udelay(5);
4041 if (j >= NVRAM_TIMEOUT_COUNT)
4042 return -EBUSY;
4044 return 0;
4048 static int
4049 bnx2_enable_nvram_write(struct bnx2 *bp)
4051 u32 val;
4053 val = REG_RD(bp, BNX2_MISC_CFG);
4054 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4056 if (bp->flash_info->flags & BNX2_NV_WREN) {
4057 int j;
4059 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4060 REG_WR(bp, BNX2_NVM_COMMAND,
4061 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4063 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4064 udelay(5);
4066 val = REG_RD(bp, BNX2_NVM_COMMAND);
4067 if (val & BNX2_NVM_COMMAND_DONE)
4068 break;
4071 if (j >= NVRAM_TIMEOUT_COUNT)
4072 return -EBUSY;
4074 return 0;
4077 static void
4078 bnx2_disable_nvram_write(struct bnx2 *bp)
4080 u32 val;
4082 val = REG_RD(bp, BNX2_MISC_CFG);
4083 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4087 static void
4088 bnx2_enable_nvram_access(struct bnx2 *bp)
4090 u32 val;
4092 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4093 /* Enable both bits, even on read. */
4094 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4095 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4098 static void
4099 bnx2_disable_nvram_access(struct bnx2 *bp)
4101 u32 val;
4103 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4104 /* Disable both bits, even after read. */
4105 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4106 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4107 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4110 static int
4111 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4113 u32 cmd;
4114 int j;
4116 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4117 /* Buffered flash, no erase needed */
4118 return 0;
4120 /* Build an erase command */
4121 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4122 BNX2_NVM_COMMAND_DOIT;
4124 /* Need to clear DONE bit separately. */
4125 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4127 /* Address of the NVRAM to read from. */
4128 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4130 /* Issue an erase command. */
4131 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4133 /* Wait for completion. */
4134 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4135 u32 val;
4137 udelay(5);
4139 val = REG_RD(bp, BNX2_NVM_COMMAND);
4140 if (val & BNX2_NVM_COMMAND_DONE)
4141 break;
4144 if (j >= NVRAM_TIMEOUT_COUNT)
4145 return -EBUSY;
4147 return 0;
4150 static int
4151 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4153 u32 cmd;
4154 int j;
4156 /* Build the command word. */
4157 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4159 /* Calculate an offset of a buffered flash, not needed for 5709. */
4160 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4161 offset = ((offset / bp->flash_info->page_size) <<
4162 bp->flash_info->page_bits) +
4163 (offset % bp->flash_info->page_size);
4166 /* Need to clear DONE bit separately. */
4167 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4169 /* Address of the NVRAM to read from. */
4170 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4172 /* Issue a read command. */
4173 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4175 /* Wait for completion. */
4176 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4177 u32 val;
4179 udelay(5);
4181 val = REG_RD(bp, BNX2_NVM_COMMAND);
4182 if (val & BNX2_NVM_COMMAND_DONE) {
4183 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4184 memcpy(ret_val, &v, 4);
4185 break;
4188 if (j >= NVRAM_TIMEOUT_COUNT)
4189 return -EBUSY;
4191 return 0;
4195 static int
4196 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4198 u32 cmd;
4199 __be32 val32;
4200 int j;
4202 /* Build the command word. */
4203 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4205 /* Calculate an offset of a buffered flash, not needed for 5709. */
4206 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4207 offset = ((offset / bp->flash_info->page_size) <<
4208 bp->flash_info->page_bits) +
4209 (offset % bp->flash_info->page_size);
4212 /* Need to clear DONE bit separately. */
4213 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4215 memcpy(&val32, val, 4);
4217 /* Write the data. */
4218 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4220 /* Address of the NVRAM to write to. */
4221 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4223 /* Issue the write command. */
4224 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4226 /* Wait for completion. */
4227 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4228 udelay(5);
4230 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4231 break;
4233 if (j >= NVRAM_TIMEOUT_COUNT)
4234 return -EBUSY;
4236 return 0;
4239 static int
4240 bnx2_init_nvram(struct bnx2 *bp)
4242 u32 val;
4243 int j, entry_count, rc = 0;
4244 const struct flash_spec *flash;
4246 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4247 bp->flash_info = &flash_5709;
4248 goto get_flash_size;
4251 /* Determine the selected interface. */
4252 val = REG_RD(bp, BNX2_NVM_CFG1);
4254 entry_count = ARRAY_SIZE(flash_table);
4256 if (val & 0x40000000) {
4258 /* Flash interface has been reconfigured */
4259 for (j = 0, flash = &flash_table[0]; j < entry_count;
4260 j++, flash++) {
4261 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4262 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4263 bp->flash_info = flash;
4264 break;
4268 else {
4269 u32 mask;
4270 /* Not yet been reconfigured */
4272 if (val & (1 << 23))
4273 mask = FLASH_BACKUP_STRAP_MASK;
4274 else
4275 mask = FLASH_STRAP_MASK;
4277 for (j = 0, flash = &flash_table[0]; j < entry_count;
4278 j++, flash++) {
4280 if ((val & mask) == (flash->strapping & mask)) {
4281 bp->flash_info = flash;
4283 /* Request access to the flash interface. */
4284 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4285 return rc;
4287 /* Enable access to flash interface */
4288 bnx2_enable_nvram_access(bp);
4290 /* Reconfigure the flash interface */
4291 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4292 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4293 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4294 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4296 /* Disable access to flash interface */
4297 bnx2_disable_nvram_access(bp);
4298 bnx2_release_nvram_lock(bp);
4300 break;
4303 } /* if (val & 0x40000000) */
4305 if (j == entry_count) {
4306 bp->flash_info = NULL;
4307 pr_alert("Unknown flash/EEPROM type\n");
4308 return -ENODEV;
4311 get_flash_size:
4312 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4313 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4314 if (val)
4315 bp->flash_size = val;
4316 else
4317 bp->flash_size = bp->flash_info->total_size;
4319 return rc;
4322 static int
4323 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4324 int buf_size)
4326 int rc = 0;
4327 u32 cmd_flags, offset32, len32, extra;
4329 if (buf_size == 0)
4330 return 0;
4332 /* Request access to the flash interface. */
4333 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4334 return rc;
4336 /* Enable access to flash interface */
4337 bnx2_enable_nvram_access(bp);
4339 len32 = buf_size;
4340 offset32 = offset;
4341 extra = 0;
4343 cmd_flags = 0;
4345 if (offset32 & 3) {
4346 u8 buf[4];
4347 u32 pre_len;
4349 offset32 &= ~3;
4350 pre_len = 4 - (offset & 3);
4352 if (pre_len >= len32) {
4353 pre_len = len32;
4354 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4355 BNX2_NVM_COMMAND_LAST;
4357 else {
4358 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4361 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4363 if (rc)
4364 return rc;
4366 memcpy(ret_buf, buf + (offset & 3), pre_len);
4368 offset32 += 4;
4369 ret_buf += pre_len;
4370 len32 -= pre_len;
4372 if (len32 & 3) {
4373 extra = 4 - (len32 & 3);
4374 len32 = (len32 + 4) & ~3;
4377 if (len32 == 4) {
4378 u8 buf[4];
4380 if (cmd_flags)
4381 cmd_flags = BNX2_NVM_COMMAND_LAST;
4382 else
4383 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4384 BNX2_NVM_COMMAND_LAST;
4386 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4388 memcpy(ret_buf, buf, 4 - extra);
4390 else if (len32 > 0) {
4391 u8 buf[4];
4393 /* Read the first word. */
4394 if (cmd_flags)
4395 cmd_flags = 0;
4396 else
4397 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4399 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4401 /* Advance to the next dword. */
4402 offset32 += 4;
4403 ret_buf += 4;
4404 len32 -= 4;
4406 while (len32 > 4 && rc == 0) {
4407 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4409 /* Advance to the next dword. */
4410 offset32 += 4;
4411 ret_buf += 4;
4412 len32 -= 4;
4415 if (rc)
4416 return rc;
4418 cmd_flags = BNX2_NVM_COMMAND_LAST;
4419 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4421 memcpy(ret_buf, buf, 4 - extra);
4424 /* Disable access to flash interface */
4425 bnx2_disable_nvram_access(bp);
4427 bnx2_release_nvram_lock(bp);
4429 return rc;
4432 static int
4433 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4434 int buf_size)
4436 u32 written, offset32, len32;
4437 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4438 int rc = 0;
4439 int align_start, align_end;
4441 buf = data_buf;
4442 offset32 = offset;
4443 len32 = buf_size;
4444 align_start = align_end = 0;
4446 if ((align_start = (offset32 & 3))) {
4447 offset32 &= ~3;
4448 len32 += align_start;
4449 if (len32 < 4)
4450 len32 = 4;
4451 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4452 return rc;
4455 if (len32 & 3) {
4456 align_end = 4 - (len32 & 3);
4457 len32 += align_end;
4458 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4459 return rc;
4462 if (align_start || align_end) {
4463 align_buf = kmalloc(len32, GFP_KERNEL);
4464 if (align_buf == NULL)
4465 return -ENOMEM;
4466 if (align_start) {
4467 memcpy(align_buf, start, 4);
4469 if (align_end) {
4470 memcpy(align_buf + len32 - 4, end, 4);
4472 memcpy(align_buf + align_start, data_buf, buf_size);
4473 buf = align_buf;
4476 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4477 flash_buffer = kmalloc(264, GFP_KERNEL);
4478 if (flash_buffer == NULL) {
4479 rc = -ENOMEM;
4480 goto nvram_write_end;
4484 written = 0;
4485 while ((written < len32) && (rc == 0)) {
4486 u32 page_start, page_end, data_start, data_end;
4487 u32 addr, cmd_flags;
4488 int i;
4490 /* Find the page_start addr */
4491 page_start = offset32 + written;
4492 page_start -= (page_start % bp->flash_info->page_size);
4493 /* Find the page_end addr */
4494 page_end = page_start + bp->flash_info->page_size;
4495 /* Find the data_start addr */
4496 data_start = (written == 0) ? offset32 : page_start;
4497 /* Find the data_end addr */
4498 data_end = (page_end > offset32 + len32) ?
4499 (offset32 + len32) : page_end;
4501 /* Request access to the flash interface. */
4502 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4503 goto nvram_write_end;
4505 /* Enable access to flash interface */
4506 bnx2_enable_nvram_access(bp);
4508 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4509 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4510 int j;
4512 /* Read the whole page into the buffer
4513 * (non-buffer flash only) */
4514 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4515 if (j == (bp->flash_info->page_size - 4)) {
4516 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4518 rc = bnx2_nvram_read_dword(bp,
4519 page_start + j,
4520 &flash_buffer[j],
4521 cmd_flags);
4523 if (rc)
4524 goto nvram_write_end;
4526 cmd_flags = 0;
4530 /* Enable writes to flash interface (unlock write-protect) */
4531 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4532 goto nvram_write_end;
4534 /* Loop to write back the buffer data from page_start to
4535 * data_start */
4536 i = 0;
4537 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4538 /* Erase the page */
4539 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4540 goto nvram_write_end;
4542 /* Re-enable the write again for the actual write */
4543 bnx2_enable_nvram_write(bp);
4545 for (addr = page_start; addr < data_start;
4546 addr += 4, i += 4) {
4548 rc = bnx2_nvram_write_dword(bp, addr,
4549 &flash_buffer[i], cmd_flags);
4551 if (rc != 0)
4552 goto nvram_write_end;
4554 cmd_flags = 0;
4558 /* Loop to write the new data from data_start to data_end */
4559 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4560 if ((addr == page_end - 4) ||
4561 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4562 (addr == data_end - 4))) {
4564 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4566 rc = bnx2_nvram_write_dword(bp, addr, buf,
4567 cmd_flags);
4569 if (rc != 0)
4570 goto nvram_write_end;
4572 cmd_flags = 0;
4573 buf += 4;
4576 /* Loop to write back the buffer data from data_end
4577 * to page_end */
4578 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4579 for (addr = data_end; addr < page_end;
4580 addr += 4, i += 4) {
4582 if (addr == page_end-4) {
4583 cmd_flags = BNX2_NVM_COMMAND_LAST;
4585 rc = bnx2_nvram_write_dword(bp, addr,
4586 &flash_buffer[i], cmd_flags);
4588 if (rc != 0)
4589 goto nvram_write_end;
4591 cmd_flags = 0;
4595 /* Disable writes to flash interface (lock write-protect) */
4596 bnx2_disable_nvram_write(bp);
4598 /* Disable access to flash interface */
4599 bnx2_disable_nvram_access(bp);
4600 bnx2_release_nvram_lock(bp);
4602 /* Increment written */
4603 written += data_end - data_start;
4606 nvram_write_end:
4607 kfree(flash_buffer);
4608 kfree(align_buf);
4609 return rc;
4612 static void
4613 bnx2_init_fw_cap(struct bnx2 *bp)
4615 u32 val, sig = 0;
4617 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4618 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4620 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4621 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4623 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4624 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4625 return;
4627 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4628 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4629 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4632 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4633 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4634 u32 link;
4636 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4638 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4639 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4640 bp->phy_port = PORT_FIBRE;
4641 else
4642 bp->phy_port = PORT_TP;
4644 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4645 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4648 if (netif_running(bp->dev) && sig)
4649 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4652 static void
4653 bnx2_setup_msix_tbl(struct bnx2 *bp)
4655 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4657 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4658 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4661 static int
4662 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4664 u32 val;
4665 int i, rc = 0;
4666 u8 old_port;
4668 /* Wait for the current PCI transaction to complete before
4669 * issuing a reset. */
4670 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4671 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4672 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4673 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4674 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4675 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4676 udelay(5);
4678 /* Wait for the firmware to tell us it is ok to issue a reset. */
4679 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4681 /* Deposit a driver reset signature so the firmware knows that
4682 * this is a soft reset. */
4683 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4684 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4686 /* Do a dummy read to force the chip to complete all current transaction
4687 * before we issue a reset. */
4688 val = REG_RD(bp, BNX2_MISC_ID);
4690 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4691 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4692 REG_RD(bp, BNX2_MISC_COMMAND);
4693 udelay(5);
4695 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4696 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4698 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4700 } else {
4701 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4702 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4703 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4705 /* Chip reset. */
4706 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4708 /* Reading back any register after chip reset will hang the
4709 * bus on 5706 A0 and A1. The msleep below provides plenty
4710 * of margin for write posting.
4712 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4713 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4714 msleep(20);
4716 /* Reset takes approximate 30 usec */
4717 for (i = 0; i < 10; i++) {
4718 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4719 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4720 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4721 break;
4722 udelay(10);
4725 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4726 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4727 pr_err("Chip reset did not complete\n");
4728 return -EBUSY;
4732 /* Make sure byte swapping is properly configured. */
4733 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4734 if (val != 0x01020304) {
4735 pr_err("Chip not in correct endian mode\n");
4736 return -ENODEV;
4739 /* Wait for the firmware to finish its initialization. */
4740 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4741 if (rc)
4742 return rc;
4744 spin_lock_bh(&bp->phy_lock);
4745 old_port = bp->phy_port;
4746 bnx2_init_fw_cap(bp);
4747 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4748 old_port != bp->phy_port)
4749 bnx2_set_default_remote_link(bp);
4750 spin_unlock_bh(&bp->phy_lock);
4752 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4753 /* Adjust the voltage regular to two steps lower. The default
4754 * of this register is 0x0000000e. */
4755 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4757 /* Remove bad rbuf memory from the free pool. */
4758 rc = bnx2_alloc_bad_rbuf(bp);
4761 if (bp->flags & BNX2_FLAG_USING_MSIX)
4762 bnx2_setup_msix_tbl(bp);
4764 return rc;
4767 static int
4768 bnx2_init_chip(struct bnx2 *bp)
4770 u32 val, mtu;
4771 int rc, i;
4773 /* Make sure the interrupt is not active. */
4774 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4776 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4777 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4778 #ifdef __BIG_ENDIAN
4779 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4780 #endif
4781 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4782 DMA_READ_CHANS << 12 |
4783 DMA_WRITE_CHANS << 16;
4785 val |= (0x2 << 20) | (1 << 11);
4787 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4788 val |= (1 << 23);
4790 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4791 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4792 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4794 REG_WR(bp, BNX2_DMA_CONFIG, val);
4796 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4797 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4798 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4799 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4802 if (bp->flags & BNX2_FLAG_PCIX) {
4803 u16 val16;
4805 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4806 &val16);
4807 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4808 val16 & ~PCI_X_CMD_ERO);
4811 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4812 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4813 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4814 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4816 /* Initialize context mapping and zero out the quick contexts. The
4817 * context block must have already been enabled. */
4818 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4819 rc = bnx2_init_5709_context(bp);
4820 if (rc)
4821 return rc;
4822 } else
4823 bnx2_init_context(bp);
4825 if ((rc = bnx2_init_cpus(bp)) != 0)
4826 return rc;
4828 bnx2_init_nvram(bp);
4830 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4832 val = REG_RD(bp, BNX2_MQ_CONFIG);
4833 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4834 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4835 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4836 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4837 if (CHIP_REV(bp) == CHIP_REV_Ax)
4838 val |= BNX2_MQ_CONFIG_HALT_DIS;
4841 REG_WR(bp, BNX2_MQ_CONFIG, val);
4843 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4844 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4845 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4847 val = (BCM_PAGE_BITS - 8) << 24;
4848 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4850 /* Configure page size. */
4851 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4852 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4853 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4854 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4856 val = bp->mac_addr[0] +
4857 (bp->mac_addr[1] << 8) +
4858 (bp->mac_addr[2] << 16) +
4859 bp->mac_addr[3] +
4860 (bp->mac_addr[4] << 8) +
4861 (bp->mac_addr[5] << 16);
4862 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4864 /* Program the MTU. Also include 4 bytes for CRC32. */
4865 mtu = bp->dev->mtu;
4866 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4867 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4868 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4869 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4871 if (mtu < 1500)
4872 mtu = 1500;
4874 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4875 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4876 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4878 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4879 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4880 bp->bnx2_napi[i].last_status_idx = 0;
4882 bp->idle_chk_status_idx = 0xffff;
4884 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4886 /* Set up how to generate a link change interrupt. */
4887 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4889 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4890 (u64) bp->status_blk_mapping & 0xffffffff);
4891 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4893 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4894 (u64) bp->stats_blk_mapping & 0xffffffff);
4895 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4896 (u64) bp->stats_blk_mapping >> 32);
4898 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4899 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4901 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4902 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4904 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4905 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4907 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4909 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4911 REG_WR(bp, BNX2_HC_COM_TICKS,
4912 (bp->com_ticks_int << 16) | bp->com_ticks);
4914 REG_WR(bp, BNX2_HC_CMD_TICKS,
4915 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4917 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4918 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4919 else
4920 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4921 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4923 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4924 val = BNX2_HC_CONFIG_COLLECT_STATS;
4925 else {
4926 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4927 BNX2_HC_CONFIG_COLLECT_STATS;
4930 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4931 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4932 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4934 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4937 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4938 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4940 REG_WR(bp, BNX2_HC_CONFIG, val);
4942 for (i = 1; i < bp->irq_nvecs; i++) {
4943 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4944 BNX2_HC_SB_CONFIG_1;
4946 REG_WR(bp, base,
4947 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4948 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4949 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4951 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4952 (bp->tx_quick_cons_trip_int << 16) |
4953 bp->tx_quick_cons_trip);
4955 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4956 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4958 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4959 (bp->rx_quick_cons_trip_int << 16) |
4960 bp->rx_quick_cons_trip);
4962 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4963 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4966 /* Clear internal stats counters. */
4967 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4969 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4971 /* Initialize the receive filter. */
4972 bnx2_set_rx_mode(bp->dev);
4974 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4975 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4976 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4977 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4979 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4980 1, 0);
4982 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4983 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4985 udelay(20);
4987 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4989 return rc;
4992 static void
4993 bnx2_clear_ring_states(struct bnx2 *bp)
4995 struct bnx2_napi *bnapi;
4996 struct bnx2_tx_ring_info *txr;
4997 struct bnx2_rx_ring_info *rxr;
4998 int i;
5000 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5001 bnapi = &bp->bnx2_napi[i];
5002 txr = &bnapi->tx_ring;
5003 rxr = &bnapi->rx_ring;
5005 txr->tx_cons = 0;
5006 txr->hw_tx_cons = 0;
5007 rxr->rx_prod_bseq = 0;
5008 rxr->rx_prod = 0;
5009 rxr->rx_cons = 0;
5010 rxr->rx_pg_prod = 0;
5011 rxr->rx_pg_cons = 0;
5015 static void
5016 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5018 u32 val, offset0, offset1, offset2, offset3;
5019 u32 cid_addr = GET_CID_ADDR(cid);
5021 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5022 offset0 = BNX2_L2CTX_TYPE_XI;
5023 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5024 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5025 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5026 } else {
5027 offset0 = BNX2_L2CTX_TYPE;
5028 offset1 = BNX2_L2CTX_CMD_TYPE;
5029 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5030 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5032 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5033 bnx2_ctx_wr(bp, cid_addr, offset0, val);
5035 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5036 bnx2_ctx_wr(bp, cid_addr, offset1, val);
5038 val = (u64) txr->tx_desc_mapping >> 32;
5039 bnx2_ctx_wr(bp, cid_addr, offset2, val);
5041 val = (u64) txr->tx_desc_mapping & 0xffffffff;
5042 bnx2_ctx_wr(bp, cid_addr, offset3, val);
5045 static void
5046 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5048 struct tx_bd *txbd;
5049 u32 cid = TX_CID;
5050 struct bnx2_napi *bnapi;
5051 struct bnx2_tx_ring_info *txr;
5053 bnapi = &bp->bnx2_napi[ring_num];
5054 txr = &bnapi->tx_ring;
5056 if (ring_num == 0)
5057 cid = TX_CID;
5058 else
5059 cid = TX_TSS_CID + ring_num - 1;
5061 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5063 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5065 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5066 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5068 txr->tx_prod = 0;
5069 txr->tx_prod_bseq = 0;
5071 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5072 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5074 bnx2_init_tx_context(bp, cid, txr);
5077 static void
5078 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5079 int num_rings)
5081 int i;
5082 struct rx_bd *rxbd;
5084 for (i = 0; i < num_rings; i++) {
5085 int j;
5087 rxbd = &rx_ring[i][0];
5088 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5089 rxbd->rx_bd_len = buf_size;
5090 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5092 if (i == (num_rings - 1))
5093 j = 0;
5094 else
5095 j = i + 1;
5096 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5097 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5101 static void
5102 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5104 int i;
5105 u16 prod, ring_prod;
5106 u32 cid, rx_cid_addr, val;
5107 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5108 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5110 if (ring_num == 0)
5111 cid = RX_CID;
5112 else
5113 cid = RX_RSS_CID + ring_num - 1;
5115 rx_cid_addr = GET_CID_ADDR(cid);
5117 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5118 bp->rx_buf_use_size, bp->rx_max_ring);
5120 bnx2_init_rx_context(bp, cid);
5122 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5123 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5124 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5127 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5128 if (bp->rx_pg_ring_size) {
5129 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5130 rxr->rx_pg_desc_mapping,
5131 PAGE_SIZE, bp->rx_max_pg_ring);
5132 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5133 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5134 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5135 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5137 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5138 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5140 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5141 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5143 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5144 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5147 val = (u64) rxr->rx_desc_mapping[0] >> 32;
5148 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5150 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5151 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5153 ring_prod = prod = rxr->rx_pg_prod;
5154 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5155 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
5156 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5157 ring_num, i, bp->rx_pg_ring_size);
5158 break;
5160 prod = NEXT_RX_BD(prod);
5161 ring_prod = RX_PG_RING_IDX(prod);
5163 rxr->rx_pg_prod = prod;
5165 ring_prod = prod = rxr->rx_prod;
5166 for (i = 0; i < bp->rx_ring_size; i++) {
5167 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
5168 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5169 ring_num, i, bp->rx_ring_size);
5170 break;
5172 prod = NEXT_RX_BD(prod);
5173 ring_prod = RX_RING_IDX(prod);
5175 rxr->rx_prod = prod;
5177 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5178 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5179 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5181 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5182 REG_WR16(bp, rxr->rx_bidx_addr, prod);
5184 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5187 static void
5188 bnx2_init_all_rings(struct bnx2 *bp)
5190 int i;
5191 u32 val;
5193 bnx2_clear_ring_states(bp);
5195 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5196 for (i = 0; i < bp->num_tx_rings; i++)
5197 bnx2_init_tx_ring(bp, i);
5199 if (bp->num_tx_rings > 1)
5200 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5201 (TX_TSS_CID << 7));
5203 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5204 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5206 for (i = 0; i < bp->num_rx_rings; i++)
5207 bnx2_init_rx_ring(bp, i);
5209 if (bp->num_rx_rings > 1) {
5210 u32 tbl_32;
5211 u8 *tbl = (u8 *) &tbl_32;
5213 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5214 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5216 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5217 tbl[i % 4] = i % (bp->num_rx_rings - 1);
5218 if ((i % 4) == 3)
5219 bnx2_reg_wr_ind(bp,
5220 BNX2_RXP_SCRATCH_RSS_TBL + i,
5221 cpu_to_be32(tbl_32));
5224 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5225 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5227 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5232 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5234 u32 max, num_rings = 1;
5236 while (ring_size > MAX_RX_DESC_CNT) {
5237 ring_size -= MAX_RX_DESC_CNT;
5238 num_rings++;
5240 /* round to next power of 2 */
5241 max = max_size;
5242 while ((max & num_rings) == 0)
5243 max >>= 1;
5245 if (num_rings != max)
5246 max <<= 1;
5248 return max;
5251 static void
5252 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5254 u32 rx_size, rx_space, jumbo_size;
5256 /* 8 for CRC and VLAN */
5257 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5259 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5260 sizeof(struct skb_shared_info);
5262 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5263 bp->rx_pg_ring_size = 0;
5264 bp->rx_max_pg_ring = 0;
5265 bp->rx_max_pg_ring_idx = 0;
5266 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5267 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5269 jumbo_size = size * pages;
5270 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5271 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5273 bp->rx_pg_ring_size = jumbo_size;
5274 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5275 MAX_RX_PG_RINGS);
5276 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5277 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5278 bp->rx_copy_thresh = 0;
5281 bp->rx_buf_use_size = rx_size;
5282 /* hw alignment */
5283 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5284 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5285 bp->rx_ring_size = size;
5286 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5287 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5290 static void
5291 bnx2_free_tx_skbs(struct bnx2 *bp)
5293 int i;
5295 for (i = 0; i < bp->num_tx_rings; i++) {
5296 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5297 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5298 int j;
5300 if (txr->tx_buf_ring == NULL)
5301 continue;
5303 for (j = 0; j < TX_DESC_CNT; ) {
5304 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5305 struct sk_buff *skb = tx_buf->skb;
5306 int k, last;
5308 if (skb == NULL) {
5309 j++;
5310 continue;
5313 pci_unmap_single(bp->pdev,
5314 dma_unmap_addr(tx_buf, mapping),
5315 skb_headlen(skb),
5316 PCI_DMA_TODEVICE);
5318 tx_buf->skb = NULL;
5320 last = tx_buf->nr_frags;
5321 j++;
5322 for (k = 0; k < last; k++, j++) {
5323 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5324 pci_unmap_page(bp->pdev,
5325 dma_unmap_addr(tx_buf, mapping),
5326 skb_shinfo(skb)->frags[k].size,
5327 PCI_DMA_TODEVICE);
5329 dev_kfree_skb(skb);
5334 static void
5335 bnx2_free_rx_skbs(struct bnx2 *bp)
5337 int i;
5339 for (i = 0; i < bp->num_rx_rings; i++) {
5340 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5341 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5342 int j;
5344 if (rxr->rx_buf_ring == NULL)
5345 return;
5347 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5348 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5349 struct sk_buff *skb = rx_buf->skb;
5351 if (skb == NULL)
5352 continue;
5354 pci_unmap_single(bp->pdev,
5355 dma_unmap_addr(rx_buf, mapping),
5356 bp->rx_buf_use_size,
5357 PCI_DMA_FROMDEVICE);
5359 rx_buf->skb = NULL;
5361 dev_kfree_skb(skb);
5363 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5364 bnx2_free_rx_page(bp, rxr, j);
5368 static void
5369 bnx2_free_skbs(struct bnx2 *bp)
5371 bnx2_free_tx_skbs(bp);
5372 bnx2_free_rx_skbs(bp);
5375 static int
5376 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5378 int rc;
5380 rc = bnx2_reset_chip(bp, reset_code);
5381 bnx2_free_skbs(bp);
5382 if (rc)
5383 return rc;
5385 if ((rc = bnx2_init_chip(bp)) != 0)
5386 return rc;
5388 bnx2_init_all_rings(bp);
5389 return 0;
5392 static int
5393 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5395 int rc;
5397 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5398 return rc;
5400 spin_lock_bh(&bp->phy_lock);
5401 bnx2_init_phy(bp, reset_phy);
5402 bnx2_set_link(bp);
5403 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5404 bnx2_remote_phy_event(bp);
5405 spin_unlock_bh(&bp->phy_lock);
5406 return 0;
5409 static int
5410 bnx2_shutdown_chip(struct bnx2 *bp)
5412 u32 reset_code;
5414 if (bp->flags & BNX2_FLAG_NO_WOL)
5415 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5416 else if (bp->wol)
5417 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5418 else
5419 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5421 return bnx2_reset_chip(bp, reset_code);
5424 static int
5425 bnx2_test_registers(struct bnx2 *bp)
5427 int ret;
5428 int i, is_5709;
5429 static const struct {
5430 u16 offset;
5431 u16 flags;
5432 #define BNX2_FL_NOT_5709 1
5433 u32 rw_mask;
5434 u32 ro_mask;
5435 } reg_tbl[] = {
5436 { 0x006c, 0, 0x00000000, 0x0000003f },
5437 { 0x0090, 0, 0xffffffff, 0x00000000 },
5438 { 0x0094, 0, 0x00000000, 0x00000000 },
5440 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5441 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5442 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5443 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5444 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5445 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5446 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5447 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5448 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5450 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5451 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5452 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5453 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5454 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5455 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5457 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5458 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5459 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5461 { 0x1000, 0, 0x00000000, 0x00000001 },
5462 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5464 { 0x1408, 0, 0x01c00800, 0x00000000 },
5465 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5466 { 0x14a8, 0, 0x00000000, 0x000001ff },
5467 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5468 { 0x14b0, 0, 0x00000002, 0x00000001 },
5469 { 0x14b8, 0, 0x00000000, 0x00000000 },
5470 { 0x14c0, 0, 0x00000000, 0x00000009 },
5471 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5472 { 0x14cc, 0, 0x00000000, 0x00000001 },
5473 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5475 { 0x1800, 0, 0x00000000, 0x00000001 },
5476 { 0x1804, 0, 0x00000000, 0x00000003 },
5478 { 0x2800, 0, 0x00000000, 0x00000001 },
5479 { 0x2804, 0, 0x00000000, 0x00003f01 },
5480 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5481 { 0x2810, 0, 0xffff0000, 0x00000000 },
5482 { 0x2814, 0, 0xffff0000, 0x00000000 },
5483 { 0x2818, 0, 0xffff0000, 0x00000000 },
5484 { 0x281c, 0, 0xffff0000, 0x00000000 },
5485 { 0x2834, 0, 0xffffffff, 0x00000000 },
5486 { 0x2840, 0, 0x00000000, 0xffffffff },
5487 { 0x2844, 0, 0x00000000, 0xffffffff },
5488 { 0x2848, 0, 0xffffffff, 0x00000000 },
5489 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5491 { 0x2c00, 0, 0x00000000, 0x00000011 },
5492 { 0x2c04, 0, 0x00000000, 0x00030007 },
5494 { 0x3c00, 0, 0x00000000, 0x00000001 },
5495 { 0x3c04, 0, 0x00000000, 0x00070000 },
5496 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5497 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5498 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5499 { 0x3c14, 0, 0x00000000, 0xffffffff },
5500 { 0x3c18, 0, 0x00000000, 0xffffffff },
5501 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5502 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5504 { 0x5004, 0, 0x00000000, 0x0000007f },
5505 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5507 { 0x5c00, 0, 0x00000000, 0x00000001 },
5508 { 0x5c04, 0, 0x00000000, 0x0003000f },
5509 { 0x5c08, 0, 0x00000003, 0x00000000 },
5510 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5511 { 0x5c10, 0, 0x00000000, 0xffffffff },
5512 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5513 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5514 { 0x5c88, 0, 0x00000000, 0x00077373 },
5515 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5517 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5518 { 0x680c, 0, 0xffffffff, 0x00000000 },
5519 { 0x6810, 0, 0xffffffff, 0x00000000 },
5520 { 0x6814, 0, 0xffffffff, 0x00000000 },
5521 { 0x6818, 0, 0xffffffff, 0x00000000 },
5522 { 0x681c, 0, 0xffffffff, 0x00000000 },
5523 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5524 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5525 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5526 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5527 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5528 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5529 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5530 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5531 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5532 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5533 { 0x684c, 0, 0xffffffff, 0x00000000 },
5534 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5535 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5536 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5537 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5538 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5539 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5541 { 0xffff, 0, 0x00000000, 0x00000000 },
5544 ret = 0;
5545 is_5709 = 0;
5546 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5547 is_5709 = 1;
5549 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5550 u32 offset, rw_mask, ro_mask, save_val, val;
5551 u16 flags = reg_tbl[i].flags;
5553 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5554 continue;
5556 offset = (u32) reg_tbl[i].offset;
5557 rw_mask = reg_tbl[i].rw_mask;
5558 ro_mask = reg_tbl[i].ro_mask;
5560 save_val = readl(bp->regview + offset);
5562 writel(0, bp->regview + offset);
5564 val = readl(bp->regview + offset);
5565 if ((val & rw_mask) != 0) {
5566 goto reg_test_err;
5569 if ((val & ro_mask) != (save_val & ro_mask)) {
5570 goto reg_test_err;
5573 writel(0xffffffff, bp->regview + offset);
5575 val = readl(bp->regview + offset);
5576 if ((val & rw_mask) != rw_mask) {
5577 goto reg_test_err;
5580 if ((val & ro_mask) != (save_val & ro_mask)) {
5581 goto reg_test_err;
5584 writel(save_val, bp->regview + offset);
5585 continue;
5587 reg_test_err:
5588 writel(save_val, bp->regview + offset);
5589 ret = -ENODEV;
5590 break;
5592 return ret;
5595 static int
5596 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5598 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5599 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5600 int i;
5602 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5603 u32 offset;
5605 for (offset = 0; offset < size; offset += 4) {
5607 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5609 if (bnx2_reg_rd_ind(bp, start + offset) !=
5610 test_pattern[i]) {
5611 return -ENODEV;
5615 return 0;
5618 static int
5619 bnx2_test_memory(struct bnx2 *bp)
5621 int ret = 0;
5622 int i;
5623 static struct mem_entry {
5624 u32 offset;
5625 u32 len;
5626 } mem_tbl_5706[] = {
5627 { 0x60000, 0x4000 },
5628 { 0xa0000, 0x3000 },
5629 { 0xe0000, 0x4000 },
5630 { 0x120000, 0x4000 },
5631 { 0x1a0000, 0x4000 },
5632 { 0x160000, 0x4000 },
5633 { 0xffffffff, 0 },
5635 mem_tbl_5709[] = {
5636 { 0x60000, 0x4000 },
5637 { 0xa0000, 0x3000 },
5638 { 0xe0000, 0x4000 },
5639 { 0x120000, 0x4000 },
5640 { 0x1a0000, 0x4000 },
5641 { 0xffffffff, 0 },
5643 struct mem_entry *mem_tbl;
5645 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5646 mem_tbl = mem_tbl_5709;
5647 else
5648 mem_tbl = mem_tbl_5706;
5650 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5651 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5652 mem_tbl[i].len)) != 0) {
5653 return ret;
5657 return ret;
5660 #define BNX2_MAC_LOOPBACK 0
5661 #define BNX2_PHY_LOOPBACK 1
5663 static int
5664 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5666 unsigned int pkt_size, num_pkts, i;
5667 struct sk_buff *skb, *rx_skb;
5668 unsigned char *packet;
5669 u16 rx_start_idx, rx_idx;
5670 dma_addr_t map;
5671 struct tx_bd *txbd;
5672 struct sw_bd *rx_buf;
5673 struct l2_fhdr *rx_hdr;
5674 int ret = -ENODEV;
5675 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5676 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5677 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5679 tx_napi = bnapi;
5681 txr = &tx_napi->tx_ring;
5682 rxr = &bnapi->rx_ring;
5683 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5684 bp->loopback = MAC_LOOPBACK;
5685 bnx2_set_mac_loopback(bp);
5687 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5688 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5689 return 0;
5691 bp->loopback = PHY_LOOPBACK;
5692 bnx2_set_phy_loopback(bp);
5694 else
5695 return -EINVAL;
5697 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5698 skb = netdev_alloc_skb(bp->dev, pkt_size);
5699 if (!skb)
5700 return -ENOMEM;
5701 packet = skb_put(skb, pkt_size);
5702 memcpy(packet, bp->dev->dev_addr, 6);
5703 memset(packet + 6, 0x0, 8);
5704 for (i = 14; i < pkt_size; i++)
5705 packet[i] = (unsigned char) (i & 0xff);
5707 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5708 PCI_DMA_TODEVICE);
5709 if (pci_dma_mapping_error(bp->pdev, map)) {
5710 dev_kfree_skb(skb);
5711 return -EIO;
5714 REG_WR(bp, BNX2_HC_COMMAND,
5715 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5717 REG_RD(bp, BNX2_HC_COMMAND);
5719 udelay(5);
5720 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5722 num_pkts = 0;
5724 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5726 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5727 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5728 txbd->tx_bd_mss_nbytes = pkt_size;
5729 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5731 num_pkts++;
5732 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5733 txr->tx_prod_bseq += pkt_size;
5735 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5736 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5738 udelay(100);
5740 REG_WR(bp, BNX2_HC_COMMAND,
5741 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5743 REG_RD(bp, BNX2_HC_COMMAND);
5745 udelay(5);
5747 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5748 dev_kfree_skb(skb);
5750 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5751 goto loopback_test_done;
5753 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5754 if (rx_idx != rx_start_idx + num_pkts) {
5755 goto loopback_test_done;
5758 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5759 rx_skb = rx_buf->skb;
5761 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5762 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5764 pci_dma_sync_single_for_cpu(bp->pdev,
5765 dma_unmap_addr(rx_buf, mapping),
5766 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5768 if (rx_hdr->l2_fhdr_status &
5769 (L2_FHDR_ERRORS_BAD_CRC |
5770 L2_FHDR_ERRORS_PHY_DECODE |
5771 L2_FHDR_ERRORS_ALIGNMENT |
5772 L2_FHDR_ERRORS_TOO_SHORT |
5773 L2_FHDR_ERRORS_GIANT_FRAME)) {
5775 goto loopback_test_done;
5778 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5779 goto loopback_test_done;
5782 for (i = 14; i < pkt_size; i++) {
5783 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5784 goto loopback_test_done;
5788 ret = 0;
5790 loopback_test_done:
5791 bp->loopback = 0;
5792 return ret;
5795 #define BNX2_MAC_LOOPBACK_FAILED 1
5796 #define BNX2_PHY_LOOPBACK_FAILED 2
5797 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5798 BNX2_PHY_LOOPBACK_FAILED)
5800 static int
5801 bnx2_test_loopback(struct bnx2 *bp)
5803 int rc = 0;
5805 if (!netif_running(bp->dev))
5806 return BNX2_LOOPBACK_FAILED;
5808 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5809 spin_lock_bh(&bp->phy_lock);
5810 bnx2_init_phy(bp, 1);
5811 spin_unlock_bh(&bp->phy_lock);
5812 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5813 rc |= BNX2_MAC_LOOPBACK_FAILED;
5814 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5815 rc |= BNX2_PHY_LOOPBACK_FAILED;
5816 return rc;
5819 #define NVRAM_SIZE 0x200
5820 #define CRC32_RESIDUAL 0xdebb20e3
5822 static int
5823 bnx2_test_nvram(struct bnx2 *bp)
5825 __be32 buf[NVRAM_SIZE / 4];
5826 u8 *data = (u8 *) buf;
5827 int rc = 0;
5828 u32 magic, csum;
5830 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5831 goto test_nvram_done;
5833 magic = be32_to_cpu(buf[0]);
5834 if (magic != 0x669955aa) {
5835 rc = -ENODEV;
5836 goto test_nvram_done;
5839 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5840 goto test_nvram_done;
5842 csum = ether_crc_le(0x100, data);
5843 if (csum != CRC32_RESIDUAL) {
5844 rc = -ENODEV;
5845 goto test_nvram_done;
5848 csum = ether_crc_le(0x100, data + 0x100);
5849 if (csum != CRC32_RESIDUAL) {
5850 rc = -ENODEV;
5853 test_nvram_done:
5854 return rc;
5857 static int
5858 bnx2_test_link(struct bnx2 *bp)
5860 u32 bmsr;
5862 if (!netif_running(bp->dev))
5863 return -ENODEV;
5865 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5866 if (bp->link_up)
5867 return 0;
5868 return -ENODEV;
5870 spin_lock_bh(&bp->phy_lock);
5871 bnx2_enable_bmsr1(bp);
5872 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5873 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5874 bnx2_disable_bmsr1(bp);
5875 spin_unlock_bh(&bp->phy_lock);
5877 if (bmsr & BMSR_LSTATUS) {
5878 return 0;
5880 return -ENODEV;
5883 static int
5884 bnx2_test_intr(struct bnx2 *bp)
5886 int i;
5887 u16 status_idx;
5889 if (!netif_running(bp->dev))
5890 return -ENODEV;
5892 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5894 /* This register is not touched during run-time. */
5895 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5896 REG_RD(bp, BNX2_HC_COMMAND);
5898 for (i = 0; i < 10; i++) {
5899 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5900 status_idx) {
5902 break;
5905 msleep_interruptible(10);
5907 if (i < 10)
5908 return 0;
5910 return -ENODEV;
5913 /* Determining link for parallel detection. */
5914 static int
5915 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5917 u32 mode_ctl, an_dbg, exp;
5919 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5920 return 0;
5922 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5923 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5925 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5926 return 0;
5928 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5929 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5930 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5932 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5933 return 0;
5935 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5936 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5937 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5939 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5940 return 0;
5942 return 1;
5945 static void
5946 bnx2_5706_serdes_timer(struct bnx2 *bp)
5948 int check_link = 1;
5950 spin_lock(&bp->phy_lock);
5951 if (bp->serdes_an_pending) {
5952 bp->serdes_an_pending--;
5953 check_link = 0;
5954 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5955 u32 bmcr;
5957 bp->current_interval = BNX2_TIMER_INTERVAL;
5959 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5961 if (bmcr & BMCR_ANENABLE) {
5962 if (bnx2_5706_serdes_has_link(bp)) {
5963 bmcr &= ~BMCR_ANENABLE;
5964 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5965 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5966 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5970 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5971 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5972 u32 phy2;
5974 bnx2_write_phy(bp, 0x17, 0x0f01);
5975 bnx2_read_phy(bp, 0x15, &phy2);
5976 if (phy2 & 0x20) {
5977 u32 bmcr;
5979 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5980 bmcr |= BMCR_ANENABLE;
5981 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5983 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5985 } else
5986 bp->current_interval = BNX2_TIMER_INTERVAL;
5988 if (check_link) {
5989 u32 val;
5991 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5992 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5993 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5995 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5996 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5997 bnx2_5706s_force_link_dn(bp, 1);
5998 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5999 } else
6000 bnx2_set_link(bp);
6001 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6002 bnx2_set_link(bp);
6004 spin_unlock(&bp->phy_lock);
6007 static void
6008 bnx2_5708_serdes_timer(struct bnx2 *bp)
6010 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6011 return;
6013 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6014 bp->serdes_an_pending = 0;
6015 return;
6018 spin_lock(&bp->phy_lock);
6019 if (bp->serdes_an_pending)
6020 bp->serdes_an_pending--;
6021 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6022 u32 bmcr;
6024 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6025 if (bmcr & BMCR_ANENABLE) {
6026 bnx2_enable_forced_2g5(bp);
6027 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6028 } else {
6029 bnx2_disable_forced_2g5(bp);
6030 bp->serdes_an_pending = 2;
6031 bp->current_interval = BNX2_TIMER_INTERVAL;
6034 } else
6035 bp->current_interval = BNX2_TIMER_INTERVAL;
6037 spin_unlock(&bp->phy_lock);
6040 static void
6041 bnx2_timer(unsigned long data)
6043 struct bnx2 *bp = (struct bnx2 *) data;
6045 if (!netif_running(bp->dev))
6046 return;
6048 if (atomic_read(&bp->intr_sem) != 0)
6049 goto bnx2_restart_timer;
6051 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6052 BNX2_FLAG_USING_MSI)
6053 bnx2_chk_missed_msi(bp);
6055 bnx2_send_heart_beat(bp);
6057 bp->stats_blk->stat_FwRxDrop =
6058 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6060 /* workaround occasional corrupted counters */
6061 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6062 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6063 BNX2_HC_COMMAND_STATS_NOW);
6065 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6066 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6067 bnx2_5706_serdes_timer(bp);
6068 else
6069 bnx2_5708_serdes_timer(bp);
6072 bnx2_restart_timer:
6073 mod_timer(&bp->timer, jiffies + bp->current_interval);
6076 static int
6077 bnx2_request_irq(struct bnx2 *bp)
6079 unsigned long flags;
6080 struct bnx2_irq *irq;
6081 int rc = 0, i;
6083 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6084 flags = 0;
6085 else
6086 flags = IRQF_SHARED;
6088 for (i = 0; i < bp->irq_nvecs; i++) {
6089 irq = &bp->irq_tbl[i];
6090 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6091 &bp->bnx2_napi[i]);
6092 if (rc)
6093 break;
6094 irq->requested = 1;
6096 return rc;
6099 static void
6100 bnx2_free_irq(struct bnx2 *bp)
6102 struct bnx2_irq *irq;
6103 int i;
6105 for (i = 0; i < bp->irq_nvecs; i++) {
6106 irq = &bp->irq_tbl[i];
6107 if (irq->requested)
6108 free_irq(irq->vector, &bp->bnx2_napi[i]);
6109 irq->requested = 0;
6111 if (bp->flags & BNX2_FLAG_USING_MSI)
6112 pci_disable_msi(bp->pdev);
6113 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6114 pci_disable_msix(bp->pdev);
6116 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6119 static void
6120 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6122 int i, rc;
6123 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6124 struct net_device *dev = bp->dev;
6125 const int len = sizeof(bp->irq_tbl[0].name);
6127 bnx2_setup_msix_tbl(bp);
6128 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6129 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6130 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6132 /* Need to flush the previous three writes to ensure MSI-X
6133 * is setup properly */
6134 REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6136 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6137 msix_ent[i].entry = i;
6138 msix_ent[i].vector = 0;
6141 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6142 if (rc != 0)
6143 return;
6145 bp->irq_nvecs = msix_vecs;
6146 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6147 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6148 bp->irq_tbl[i].vector = msix_ent[i].vector;
6149 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6150 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6154 static void
6155 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6157 int cpus = num_online_cpus();
6158 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6160 bp->irq_tbl[0].handler = bnx2_interrupt;
6161 strcpy(bp->irq_tbl[0].name, bp->dev->name);
6162 bp->irq_nvecs = 1;
6163 bp->irq_tbl[0].vector = bp->pdev->irq;
6165 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
6166 bnx2_enable_msix(bp, msix_vecs);
6168 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6169 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6170 if (pci_enable_msi(bp->pdev) == 0) {
6171 bp->flags |= BNX2_FLAG_USING_MSI;
6172 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6173 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6174 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6175 } else
6176 bp->irq_tbl[0].handler = bnx2_msi;
6178 bp->irq_tbl[0].vector = bp->pdev->irq;
6182 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6183 bp->dev->real_num_tx_queues = bp->num_tx_rings;
6185 bp->num_rx_rings = bp->irq_nvecs;
6188 /* Called with rtnl_lock */
6189 static int
6190 bnx2_open(struct net_device *dev)
6192 struct bnx2 *bp = netdev_priv(dev);
6193 int rc;
6195 netif_carrier_off(dev);
6197 bnx2_set_power_state(bp, PCI_D0);
6198 bnx2_disable_int(bp);
6200 bnx2_setup_int_mode(bp, disable_msi);
6201 bnx2_init_napi(bp);
6202 bnx2_napi_enable(bp);
6203 rc = bnx2_alloc_mem(bp);
6204 if (rc)
6205 goto open_err;
6207 rc = bnx2_request_irq(bp);
6208 if (rc)
6209 goto open_err;
6211 rc = bnx2_init_nic(bp, 1);
6212 if (rc)
6213 goto open_err;
6215 mod_timer(&bp->timer, jiffies + bp->current_interval);
6217 atomic_set(&bp->intr_sem, 0);
6219 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6221 bnx2_enable_int(bp);
6223 if (bp->flags & BNX2_FLAG_USING_MSI) {
6224 /* Test MSI to make sure it is working
6225 * If MSI test fails, go back to INTx mode
6227 if (bnx2_test_intr(bp) != 0) {
6228 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6230 bnx2_disable_int(bp);
6231 bnx2_free_irq(bp);
6233 bnx2_setup_int_mode(bp, 1);
6235 rc = bnx2_init_nic(bp, 0);
6237 if (!rc)
6238 rc = bnx2_request_irq(bp);
6240 if (rc) {
6241 del_timer_sync(&bp->timer);
6242 goto open_err;
6244 bnx2_enable_int(bp);
6247 if (bp->flags & BNX2_FLAG_USING_MSI)
6248 netdev_info(dev, "using MSI\n");
6249 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6250 netdev_info(dev, "using MSIX\n");
6252 netif_tx_start_all_queues(dev);
6254 return 0;
6256 open_err:
6257 bnx2_napi_disable(bp);
6258 bnx2_free_skbs(bp);
6259 bnx2_free_irq(bp);
6260 bnx2_free_mem(bp);
6261 return rc;
6264 static void
6265 bnx2_reset_task(struct work_struct *work)
6267 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6269 rtnl_lock();
6270 if (!netif_running(bp->dev)) {
6271 rtnl_unlock();
6272 return;
6275 bnx2_netif_stop(bp);
6277 bnx2_init_nic(bp, 1);
6279 atomic_set(&bp->intr_sem, 1);
6280 bnx2_netif_start(bp);
6281 rtnl_unlock();
6284 static void
6285 bnx2_dump_state(struct bnx2 *bp)
6287 struct net_device *dev = bp->dev;
6289 netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
6290 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] RPM_MGMT_PKT_CTRL[%08x]\n",
6291 REG_RD(bp, BNX2_EMAC_TX_STATUS),
6292 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6293 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6294 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
6295 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
6296 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6297 REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6298 if (bp->flags & BNX2_FLAG_USING_MSIX)
6299 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6300 REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6303 static void
6304 bnx2_tx_timeout(struct net_device *dev)
6306 struct bnx2 *bp = netdev_priv(dev);
6308 bnx2_dump_state(bp);
6310 /* This allows the netif to be shutdown gracefully before resetting */
6311 schedule_work(&bp->reset_task);
6314 #ifdef BCM_VLAN
6315 /* Called with rtnl_lock */
6316 static void
6317 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6319 struct bnx2 *bp = netdev_priv(dev);
6321 if (netif_running(dev))
6322 bnx2_netif_stop(bp);
6324 bp->vlgrp = vlgrp;
6326 if (!netif_running(dev))
6327 return;
6329 bnx2_set_rx_mode(dev);
6330 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6331 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6333 bnx2_netif_start(bp);
6335 #endif
6337 /* Called with netif_tx_lock.
6338 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6339 * netif_wake_queue().
6341 static netdev_tx_t
6342 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6344 struct bnx2 *bp = netdev_priv(dev);
6345 dma_addr_t mapping;
6346 struct tx_bd *txbd;
6347 struct sw_tx_bd *tx_buf;
6348 u32 len, vlan_tag_flags, last_frag, mss;
6349 u16 prod, ring_prod;
6350 int i;
6351 struct bnx2_napi *bnapi;
6352 struct bnx2_tx_ring_info *txr;
6353 struct netdev_queue *txq;
6355 /* Determine which tx ring we will be placed on */
6356 i = skb_get_queue_mapping(skb);
6357 bnapi = &bp->bnx2_napi[i];
6358 txr = &bnapi->tx_ring;
6359 txq = netdev_get_tx_queue(dev, i);
6361 if (unlikely(bnx2_tx_avail(bp, txr) <
6362 (skb_shinfo(skb)->nr_frags + 1))) {
6363 netif_tx_stop_queue(txq);
6364 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6366 return NETDEV_TX_BUSY;
6368 len = skb_headlen(skb);
6369 prod = txr->tx_prod;
6370 ring_prod = TX_RING_IDX(prod);
6372 vlan_tag_flags = 0;
6373 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6374 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6377 #ifdef BCM_VLAN
6378 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6379 vlan_tag_flags |=
6380 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6382 #endif
6383 if ((mss = skb_shinfo(skb)->gso_size)) {
6384 u32 tcp_opt_len;
6385 struct iphdr *iph;
6387 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6389 tcp_opt_len = tcp_optlen(skb);
6391 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6392 u32 tcp_off = skb_transport_offset(skb) -
6393 sizeof(struct ipv6hdr) - ETH_HLEN;
6395 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6396 TX_BD_FLAGS_SW_FLAGS;
6397 if (likely(tcp_off == 0))
6398 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6399 else {
6400 tcp_off >>= 3;
6401 vlan_tag_flags |= ((tcp_off & 0x3) <<
6402 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6403 ((tcp_off & 0x10) <<
6404 TX_BD_FLAGS_TCP6_OFF4_SHL);
6405 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6407 } else {
6408 iph = ip_hdr(skb);
6409 if (tcp_opt_len || (iph->ihl > 5)) {
6410 vlan_tag_flags |= ((iph->ihl - 5) +
6411 (tcp_opt_len >> 2)) << 8;
6414 } else
6415 mss = 0;
6417 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6418 if (pci_dma_mapping_error(bp->pdev, mapping)) {
6419 dev_kfree_skb(skb);
6420 return NETDEV_TX_OK;
6423 tx_buf = &txr->tx_buf_ring[ring_prod];
6424 tx_buf->skb = skb;
6425 dma_unmap_addr_set(tx_buf, mapping, mapping);
6427 txbd = &txr->tx_desc_ring[ring_prod];
6429 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6430 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6431 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6432 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6434 last_frag = skb_shinfo(skb)->nr_frags;
6435 tx_buf->nr_frags = last_frag;
6436 tx_buf->is_gso = skb_is_gso(skb);
6438 for (i = 0; i < last_frag; i++) {
6439 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6441 prod = NEXT_TX_BD(prod);
6442 ring_prod = TX_RING_IDX(prod);
6443 txbd = &txr->tx_desc_ring[ring_prod];
6445 len = frag->size;
6446 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6447 len, PCI_DMA_TODEVICE);
6448 if (pci_dma_mapping_error(bp->pdev, mapping))
6449 goto dma_error;
6450 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6451 mapping);
6453 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6454 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6455 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6456 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6459 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6461 prod = NEXT_TX_BD(prod);
6462 txr->tx_prod_bseq += skb->len;
6464 REG_WR16(bp, txr->tx_bidx_addr, prod);
6465 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6467 mmiowb();
6469 txr->tx_prod = prod;
6471 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6472 netif_tx_stop_queue(txq);
6473 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6474 netif_tx_wake_queue(txq);
6477 return NETDEV_TX_OK;
6478 dma_error:
6479 /* save value of frag that failed */
6480 last_frag = i;
6482 /* start back at beginning and unmap skb */
6483 prod = txr->tx_prod;
6484 ring_prod = TX_RING_IDX(prod);
6485 tx_buf = &txr->tx_buf_ring[ring_prod];
6486 tx_buf->skb = NULL;
6487 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6488 skb_headlen(skb), PCI_DMA_TODEVICE);
6490 /* unmap remaining mapped pages */
6491 for (i = 0; i < last_frag; i++) {
6492 prod = NEXT_TX_BD(prod);
6493 ring_prod = TX_RING_IDX(prod);
6494 tx_buf = &txr->tx_buf_ring[ring_prod];
6495 pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6496 skb_shinfo(skb)->frags[i].size,
6497 PCI_DMA_TODEVICE);
6500 dev_kfree_skb(skb);
6501 return NETDEV_TX_OK;
6504 /* Called with rtnl_lock */
6505 static int
6506 bnx2_close(struct net_device *dev)
6508 struct bnx2 *bp = netdev_priv(dev);
6510 cancel_work_sync(&bp->reset_task);
6512 bnx2_disable_int_sync(bp);
6513 bnx2_napi_disable(bp);
6514 del_timer_sync(&bp->timer);
6515 bnx2_shutdown_chip(bp);
6516 bnx2_free_irq(bp);
6517 bnx2_free_skbs(bp);
6518 bnx2_free_mem(bp);
6519 bp->link_up = 0;
6520 netif_carrier_off(bp->dev);
6521 bnx2_set_power_state(bp, PCI_D3hot);
6522 return 0;
6525 static void
6526 bnx2_save_stats(struct bnx2 *bp)
6528 u32 *hw_stats = (u32 *) bp->stats_blk;
6529 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6530 int i;
6532 /* The 1st 10 counters are 64-bit counters */
6533 for (i = 0; i < 20; i += 2) {
6534 u32 hi;
6535 u64 lo;
6537 hi = temp_stats[i] + hw_stats[i];
6538 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6539 if (lo > 0xffffffff)
6540 hi++;
6541 temp_stats[i] = hi;
6542 temp_stats[i + 1] = lo & 0xffffffff;
6545 for ( ; i < sizeof(struct statistics_block) / 4; i++)
6546 temp_stats[i] += hw_stats[i];
6549 #define GET_64BIT_NET_STATS64(ctr) \
6550 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6551 (unsigned long) (ctr##_lo)
6553 #define GET_64BIT_NET_STATS32(ctr) \
6554 (ctr##_lo)
6556 #if (BITS_PER_LONG == 64)
6557 #define GET_64BIT_NET_STATS(ctr) \
6558 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6559 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6560 #else
6561 #define GET_64BIT_NET_STATS(ctr) \
6562 GET_64BIT_NET_STATS32(bp->stats_blk->ctr) + \
6563 GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
6564 #endif
6566 #define GET_32BIT_NET_STATS(ctr) \
6567 (unsigned long) (bp->stats_blk->ctr + \
6568 bp->temp_stats_blk->ctr)
6570 static struct net_device_stats *
6571 bnx2_get_stats(struct net_device *dev)
6573 struct bnx2 *bp = netdev_priv(dev);
6574 struct net_device_stats *net_stats = &dev->stats;
6576 if (bp->stats_blk == NULL) {
6577 return net_stats;
6579 net_stats->rx_packets =
6580 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6581 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6582 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6584 net_stats->tx_packets =
6585 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6586 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6587 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6589 net_stats->rx_bytes =
6590 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6592 net_stats->tx_bytes =
6593 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6595 net_stats->multicast =
6596 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
6598 net_stats->collisions =
6599 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6601 net_stats->rx_length_errors =
6602 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6603 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6605 net_stats->rx_over_errors =
6606 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6607 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6609 net_stats->rx_frame_errors =
6610 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6612 net_stats->rx_crc_errors =
6613 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6615 net_stats->rx_errors = net_stats->rx_length_errors +
6616 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6617 net_stats->rx_crc_errors;
6619 net_stats->tx_aborted_errors =
6620 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6621 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6623 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6624 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6625 net_stats->tx_carrier_errors = 0;
6626 else {
6627 net_stats->tx_carrier_errors =
6628 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6631 net_stats->tx_errors =
6632 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6633 net_stats->tx_aborted_errors +
6634 net_stats->tx_carrier_errors;
6636 net_stats->rx_missed_errors =
6637 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6638 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6639 GET_32BIT_NET_STATS(stat_FwRxDrop);
6641 return net_stats;
6644 /* All ethtool functions called with rtnl_lock */
6646 static int
6647 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6649 struct bnx2 *bp = netdev_priv(dev);
6650 int support_serdes = 0, support_copper = 0;
6652 cmd->supported = SUPPORTED_Autoneg;
6653 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6654 support_serdes = 1;
6655 support_copper = 1;
6656 } else if (bp->phy_port == PORT_FIBRE)
6657 support_serdes = 1;
6658 else
6659 support_copper = 1;
6661 if (support_serdes) {
6662 cmd->supported |= SUPPORTED_1000baseT_Full |
6663 SUPPORTED_FIBRE;
6664 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6665 cmd->supported |= SUPPORTED_2500baseX_Full;
6668 if (support_copper) {
6669 cmd->supported |= SUPPORTED_10baseT_Half |
6670 SUPPORTED_10baseT_Full |
6671 SUPPORTED_100baseT_Half |
6672 SUPPORTED_100baseT_Full |
6673 SUPPORTED_1000baseT_Full |
6674 SUPPORTED_TP;
6678 spin_lock_bh(&bp->phy_lock);
6679 cmd->port = bp->phy_port;
6680 cmd->advertising = bp->advertising;
6682 if (bp->autoneg & AUTONEG_SPEED) {
6683 cmd->autoneg = AUTONEG_ENABLE;
6685 else {
6686 cmd->autoneg = AUTONEG_DISABLE;
6689 if (netif_carrier_ok(dev)) {
6690 cmd->speed = bp->line_speed;
6691 cmd->duplex = bp->duplex;
6693 else {
6694 cmd->speed = -1;
6695 cmd->duplex = -1;
6697 spin_unlock_bh(&bp->phy_lock);
6699 cmd->transceiver = XCVR_INTERNAL;
6700 cmd->phy_address = bp->phy_addr;
6702 return 0;
6705 static int
6706 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6708 struct bnx2 *bp = netdev_priv(dev);
6709 u8 autoneg = bp->autoneg;
6710 u8 req_duplex = bp->req_duplex;
6711 u16 req_line_speed = bp->req_line_speed;
6712 u32 advertising = bp->advertising;
6713 int err = -EINVAL;
6715 spin_lock_bh(&bp->phy_lock);
6717 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6718 goto err_out_unlock;
6720 if (cmd->port != bp->phy_port &&
6721 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6722 goto err_out_unlock;
6724 /* If device is down, we can store the settings only if the user
6725 * is setting the currently active port.
6727 if (!netif_running(dev) && cmd->port != bp->phy_port)
6728 goto err_out_unlock;
6730 if (cmd->autoneg == AUTONEG_ENABLE) {
6731 autoneg |= AUTONEG_SPEED;
6733 advertising = cmd->advertising;
6734 if (cmd->port == PORT_TP) {
6735 advertising &= ETHTOOL_ALL_COPPER_SPEED;
6736 if (!advertising)
6737 advertising = ETHTOOL_ALL_COPPER_SPEED;
6738 } else {
6739 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6740 if (!advertising)
6741 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6743 advertising |= ADVERTISED_Autoneg;
6745 else {
6746 if (cmd->port == PORT_FIBRE) {
6747 if ((cmd->speed != SPEED_1000 &&
6748 cmd->speed != SPEED_2500) ||
6749 (cmd->duplex != DUPLEX_FULL))
6750 goto err_out_unlock;
6752 if (cmd->speed == SPEED_2500 &&
6753 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6754 goto err_out_unlock;
6756 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6757 goto err_out_unlock;
6759 autoneg &= ~AUTONEG_SPEED;
6760 req_line_speed = cmd->speed;
6761 req_duplex = cmd->duplex;
6762 advertising = 0;
6765 bp->autoneg = autoneg;
6766 bp->advertising = advertising;
6767 bp->req_line_speed = req_line_speed;
6768 bp->req_duplex = req_duplex;
6770 err = 0;
6771 /* If device is down, the new settings will be picked up when it is
6772 * brought up.
6774 if (netif_running(dev))
6775 err = bnx2_setup_phy(bp, cmd->port);
6777 err_out_unlock:
6778 spin_unlock_bh(&bp->phy_lock);
6780 return err;
6783 static void
6784 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6786 struct bnx2 *bp = netdev_priv(dev);
6788 strcpy(info->driver, DRV_MODULE_NAME);
6789 strcpy(info->version, DRV_MODULE_VERSION);
6790 strcpy(info->bus_info, pci_name(bp->pdev));
6791 strcpy(info->fw_version, bp->fw_version);
6794 #define BNX2_REGDUMP_LEN (32 * 1024)
6796 static int
6797 bnx2_get_regs_len(struct net_device *dev)
6799 return BNX2_REGDUMP_LEN;
6802 static void
6803 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6805 u32 *p = _p, i, offset;
6806 u8 *orig_p = _p;
6807 struct bnx2 *bp = netdev_priv(dev);
6808 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6809 0x0800, 0x0880, 0x0c00, 0x0c10,
6810 0x0c30, 0x0d08, 0x1000, 0x101c,
6811 0x1040, 0x1048, 0x1080, 0x10a4,
6812 0x1400, 0x1490, 0x1498, 0x14f0,
6813 0x1500, 0x155c, 0x1580, 0x15dc,
6814 0x1600, 0x1658, 0x1680, 0x16d8,
6815 0x1800, 0x1820, 0x1840, 0x1854,
6816 0x1880, 0x1894, 0x1900, 0x1984,
6817 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6818 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6819 0x2000, 0x2030, 0x23c0, 0x2400,
6820 0x2800, 0x2820, 0x2830, 0x2850,
6821 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6822 0x3c00, 0x3c94, 0x4000, 0x4010,
6823 0x4080, 0x4090, 0x43c0, 0x4458,
6824 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6825 0x4fc0, 0x5010, 0x53c0, 0x5444,
6826 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6827 0x5fc0, 0x6000, 0x6400, 0x6428,
6828 0x6800, 0x6848, 0x684c, 0x6860,
6829 0x6888, 0x6910, 0x8000 };
6831 regs->version = 0;
6833 memset(p, 0, BNX2_REGDUMP_LEN);
6835 if (!netif_running(bp->dev))
6836 return;
6838 i = 0;
6839 offset = reg_boundaries[0];
6840 p += offset;
6841 while (offset < BNX2_REGDUMP_LEN) {
6842 *p++ = REG_RD(bp, offset);
6843 offset += 4;
6844 if (offset == reg_boundaries[i + 1]) {
6845 offset = reg_boundaries[i + 2];
6846 p = (u32 *) (orig_p + offset);
6847 i += 2;
6852 static void
6853 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6855 struct bnx2 *bp = netdev_priv(dev);
6857 if (bp->flags & BNX2_FLAG_NO_WOL) {
6858 wol->supported = 0;
6859 wol->wolopts = 0;
6861 else {
6862 wol->supported = WAKE_MAGIC;
6863 if (bp->wol)
6864 wol->wolopts = WAKE_MAGIC;
6865 else
6866 wol->wolopts = 0;
6868 memset(&wol->sopass, 0, sizeof(wol->sopass));
6871 static int
6872 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6874 struct bnx2 *bp = netdev_priv(dev);
6876 if (wol->wolopts & ~WAKE_MAGIC)
6877 return -EINVAL;
6879 if (wol->wolopts & WAKE_MAGIC) {
6880 if (bp->flags & BNX2_FLAG_NO_WOL)
6881 return -EINVAL;
6883 bp->wol = 1;
6885 else {
6886 bp->wol = 0;
6888 return 0;
6891 static int
6892 bnx2_nway_reset(struct net_device *dev)
6894 struct bnx2 *bp = netdev_priv(dev);
6895 u32 bmcr;
6897 if (!netif_running(dev))
6898 return -EAGAIN;
6900 if (!(bp->autoneg & AUTONEG_SPEED)) {
6901 return -EINVAL;
6904 spin_lock_bh(&bp->phy_lock);
6906 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6907 int rc;
6909 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6910 spin_unlock_bh(&bp->phy_lock);
6911 return rc;
6914 /* Force a link down visible on the other side */
6915 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6916 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6917 spin_unlock_bh(&bp->phy_lock);
6919 msleep(20);
6921 spin_lock_bh(&bp->phy_lock);
6923 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6924 bp->serdes_an_pending = 1;
6925 mod_timer(&bp->timer, jiffies + bp->current_interval);
6928 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6929 bmcr &= ~BMCR_LOOPBACK;
6930 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6932 spin_unlock_bh(&bp->phy_lock);
6934 return 0;
6937 static u32
6938 bnx2_get_link(struct net_device *dev)
6940 struct bnx2 *bp = netdev_priv(dev);
6942 return bp->link_up;
6945 static int
6946 bnx2_get_eeprom_len(struct net_device *dev)
6948 struct bnx2 *bp = netdev_priv(dev);
6950 if (bp->flash_info == NULL)
6951 return 0;
6953 return (int) bp->flash_size;
6956 static int
6957 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6958 u8 *eebuf)
6960 struct bnx2 *bp = netdev_priv(dev);
6961 int rc;
6963 if (!netif_running(dev))
6964 return -EAGAIN;
6966 /* parameters already validated in ethtool_get_eeprom */
6968 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6970 return rc;
6973 static int
6974 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6975 u8 *eebuf)
6977 struct bnx2 *bp = netdev_priv(dev);
6978 int rc;
6980 if (!netif_running(dev))
6981 return -EAGAIN;
6983 /* parameters already validated in ethtool_set_eeprom */
6985 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6987 return rc;
6990 static int
6991 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6993 struct bnx2 *bp = netdev_priv(dev);
6995 memset(coal, 0, sizeof(struct ethtool_coalesce));
6997 coal->rx_coalesce_usecs = bp->rx_ticks;
6998 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6999 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7000 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7002 coal->tx_coalesce_usecs = bp->tx_ticks;
7003 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7004 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7005 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7007 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7009 return 0;
7012 static int
7013 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7015 struct bnx2 *bp = netdev_priv(dev);
7017 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7018 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7020 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7021 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7023 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7024 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7026 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7027 if (bp->rx_quick_cons_trip_int > 0xff)
7028 bp->rx_quick_cons_trip_int = 0xff;
7030 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7031 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7033 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7034 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7036 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7037 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7039 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7040 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7041 0xff;
7043 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7044 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7045 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7046 bp->stats_ticks = USEC_PER_SEC;
7048 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7049 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7050 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7052 if (netif_running(bp->dev)) {
7053 bnx2_netif_stop(bp);
7054 bnx2_init_nic(bp, 0);
7055 bnx2_netif_start(bp);
7058 return 0;
7061 static void
7062 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7064 struct bnx2 *bp = netdev_priv(dev);
7066 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7067 ering->rx_mini_max_pending = 0;
7068 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7070 ering->rx_pending = bp->rx_ring_size;
7071 ering->rx_mini_pending = 0;
7072 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7074 ering->tx_max_pending = MAX_TX_DESC_CNT;
7075 ering->tx_pending = bp->tx_ring_size;
7078 static int
7079 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7081 if (netif_running(bp->dev)) {
7082 /* Reset will erase chipset stats; save them */
7083 bnx2_save_stats(bp);
7085 bnx2_netif_stop(bp);
7086 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7087 bnx2_free_skbs(bp);
7088 bnx2_free_mem(bp);
7091 bnx2_set_rx_ring_size(bp, rx);
7092 bp->tx_ring_size = tx;
7094 if (netif_running(bp->dev)) {
7095 int rc;
7097 rc = bnx2_alloc_mem(bp);
7098 if (!rc)
7099 rc = bnx2_init_nic(bp, 0);
7101 if (rc) {
7102 bnx2_napi_enable(bp);
7103 dev_close(bp->dev);
7104 return rc;
7106 #ifdef BCM_CNIC
7107 mutex_lock(&bp->cnic_lock);
7108 /* Let cnic know about the new status block. */
7109 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7110 bnx2_setup_cnic_irq_info(bp);
7111 mutex_unlock(&bp->cnic_lock);
7112 #endif
7113 bnx2_netif_start(bp);
7115 return 0;
7118 static int
7119 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7121 struct bnx2 *bp = netdev_priv(dev);
7122 int rc;
7124 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7125 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7126 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7128 return -EINVAL;
7130 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7131 return rc;
7134 static void
7135 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7137 struct bnx2 *bp = netdev_priv(dev);
7139 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7140 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7141 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7144 static int
7145 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7147 struct bnx2 *bp = netdev_priv(dev);
7149 bp->req_flow_ctrl = 0;
7150 if (epause->rx_pause)
7151 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7152 if (epause->tx_pause)
7153 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7155 if (epause->autoneg) {
7156 bp->autoneg |= AUTONEG_FLOW_CTRL;
7158 else {
7159 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7162 if (netif_running(dev)) {
7163 spin_lock_bh(&bp->phy_lock);
7164 bnx2_setup_phy(bp, bp->phy_port);
7165 spin_unlock_bh(&bp->phy_lock);
7168 return 0;
7171 static u32
7172 bnx2_get_rx_csum(struct net_device *dev)
7174 struct bnx2 *bp = netdev_priv(dev);
7176 return bp->rx_csum;
7179 static int
7180 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7182 struct bnx2 *bp = netdev_priv(dev);
7184 bp->rx_csum = data;
7185 return 0;
7188 static int
7189 bnx2_set_tso(struct net_device *dev, u32 data)
7191 struct bnx2 *bp = netdev_priv(dev);
7193 if (data) {
7194 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7195 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7196 dev->features |= NETIF_F_TSO6;
7197 } else
7198 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7199 NETIF_F_TSO_ECN);
7200 return 0;
7203 static struct {
7204 char string[ETH_GSTRING_LEN];
7205 } bnx2_stats_str_arr[] = {
7206 { "rx_bytes" },
7207 { "rx_error_bytes" },
7208 { "tx_bytes" },
7209 { "tx_error_bytes" },
7210 { "rx_ucast_packets" },
7211 { "rx_mcast_packets" },
7212 { "rx_bcast_packets" },
7213 { "tx_ucast_packets" },
7214 { "tx_mcast_packets" },
7215 { "tx_bcast_packets" },
7216 { "tx_mac_errors" },
7217 { "tx_carrier_errors" },
7218 { "rx_crc_errors" },
7219 { "rx_align_errors" },
7220 { "tx_single_collisions" },
7221 { "tx_multi_collisions" },
7222 { "tx_deferred" },
7223 { "tx_excess_collisions" },
7224 { "tx_late_collisions" },
7225 { "tx_total_collisions" },
7226 { "rx_fragments" },
7227 { "rx_jabbers" },
7228 { "rx_undersize_packets" },
7229 { "rx_oversize_packets" },
7230 { "rx_64_byte_packets" },
7231 { "rx_65_to_127_byte_packets" },
7232 { "rx_128_to_255_byte_packets" },
7233 { "rx_256_to_511_byte_packets" },
7234 { "rx_512_to_1023_byte_packets" },
7235 { "rx_1024_to_1522_byte_packets" },
7236 { "rx_1523_to_9022_byte_packets" },
7237 { "tx_64_byte_packets" },
7238 { "tx_65_to_127_byte_packets" },
7239 { "tx_128_to_255_byte_packets" },
7240 { "tx_256_to_511_byte_packets" },
7241 { "tx_512_to_1023_byte_packets" },
7242 { "tx_1024_to_1522_byte_packets" },
7243 { "tx_1523_to_9022_byte_packets" },
7244 { "rx_xon_frames" },
7245 { "rx_xoff_frames" },
7246 { "tx_xon_frames" },
7247 { "tx_xoff_frames" },
7248 { "rx_mac_ctrl_frames" },
7249 { "rx_filtered_packets" },
7250 { "rx_ftq_discards" },
7251 { "rx_discards" },
7252 { "rx_fw_discards" },
7255 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7256 sizeof(bnx2_stats_str_arr[0]))
7258 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7260 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7261 STATS_OFFSET32(stat_IfHCInOctets_hi),
7262 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7263 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7264 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7265 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7266 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7267 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7268 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7269 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7270 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7271 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7272 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7273 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7274 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7275 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7276 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7277 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7278 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7279 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7280 STATS_OFFSET32(stat_EtherStatsCollisions),
7281 STATS_OFFSET32(stat_EtherStatsFragments),
7282 STATS_OFFSET32(stat_EtherStatsJabbers),
7283 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7284 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7285 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7286 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7287 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7288 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7289 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7290 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7291 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7292 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7293 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7294 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7295 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7296 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7297 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7298 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7299 STATS_OFFSET32(stat_XonPauseFramesReceived),
7300 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7301 STATS_OFFSET32(stat_OutXonSent),
7302 STATS_OFFSET32(stat_OutXoffSent),
7303 STATS_OFFSET32(stat_MacControlFramesReceived),
7304 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7305 STATS_OFFSET32(stat_IfInFTQDiscards),
7306 STATS_OFFSET32(stat_IfInMBUFDiscards),
7307 STATS_OFFSET32(stat_FwRxDrop),
7310 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7311 * skipped because of errata.
7313 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7314 8,0,8,8,8,8,8,8,8,8,
7315 4,0,4,4,4,4,4,4,4,4,
7316 4,4,4,4,4,4,4,4,4,4,
7317 4,4,4,4,4,4,4,4,4,4,
7318 4,4,4,4,4,4,4,
7321 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7322 8,0,8,8,8,8,8,8,8,8,
7323 4,4,4,4,4,4,4,4,4,4,
7324 4,4,4,4,4,4,4,4,4,4,
7325 4,4,4,4,4,4,4,4,4,4,
7326 4,4,4,4,4,4,4,
7329 #define BNX2_NUM_TESTS 6
7331 static struct {
7332 char string[ETH_GSTRING_LEN];
7333 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7334 { "register_test (offline)" },
7335 { "memory_test (offline)" },
7336 { "loopback_test (offline)" },
7337 { "nvram_test (online)" },
7338 { "interrupt_test (online)" },
7339 { "link_test (online)" },
7342 static int
7343 bnx2_get_sset_count(struct net_device *dev, int sset)
7345 switch (sset) {
7346 case ETH_SS_TEST:
7347 return BNX2_NUM_TESTS;
7348 case ETH_SS_STATS:
7349 return BNX2_NUM_STATS;
7350 default:
7351 return -EOPNOTSUPP;
7355 static void
7356 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7358 struct bnx2 *bp = netdev_priv(dev);
7360 bnx2_set_power_state(bp, PCI_D0);
7362 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7363 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7364 int i;
7366 bnx2_netif_stop(bp);
7367 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7368 bnx2_free_skbs(bp);
7370 if (bnx2_test_registers(bp) != 0) {
7371 buf[0] = 1;
7372 etest->flags |= ETH_TEST_FL_FAILED;
7374 if (bnx2_test_memory(bp) != 0) {
7375 buf[1] = 1;
7376 etest->flags |= ETH_TEST_FL_FAILED;
7378 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7379 etest->flags |= ETH_TEST_FL_FAILED;
7381 if (!netif_running(bp->dev))
7382 bnx2_shutdown_chip(bp);
7383 else {
7384 bnx2_init_nic(bp, 1);
7385 bnx2_netif_start(bp);
7388 /* wait for link up */
7389 for (i = 0; i < 7; i++) {
7390 if (bp->link_up)
7391 break;
7392 msleep_interruptible(1000);
7396 if (bnx2_test_nvram(bp) != 0) {
7397 buf[3] = 1;
7398 etest->flags |= ETH_TEST_FL_FAILED;
7400 if (bnx2_test_intr(bp) != 0) {
7401 buf[4] = 1;
7402 etest->flags |= ETH_TEST_FL_FAILED;
7405 if (bnx2_test_link(bp) != 0) {
7406 buf[5] = 1;
7407 etest->flags |= ETH_TEST_FL_FAILED;
7410 if (!netif_running(bp->dev))
7411 bnx2_set_power_state(bp, PCI_D3hot);
7414 static void
7415 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7417 switch (stringset) {
7418 case ETH_SS_STATS:
7419 memcpy(buf, bnx2_stats_str_arr,
7420 sizeof(bnx2_stats_str_arr));
7421 break;
7422 case ETH_SS_TEST:
7423 memcpy(buf, bnx2_tests_str_arr,
7424 sizeof(bnx2_tests_str_arr));
7425 break;
7429 static void
7430 bnx2_get_ethtool_stats(struct net_device *dev,
7431 struct ethtool_stats *stats, u64 *buf)
7433 struct bnx2 *bp = netdev_priv(dev);
7434 int i;
7435 u32 *hw_stats = (u32 *) bp->stats_blk;
7436 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7437 u8 *stats_len_arr = NULL;
7439 if (hw_stats == NULL) {
7440 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7441 return;
7444 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7445 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7446 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7447 (CHIP_ID(bp) == CHIP_ID_5708_A0))
7448 stats_len_arr = bnx2_5706_stats_len_arr;
7449 else
7450 stats_len_arr = bnx2_5708_stats_len_arr;
7452 for (i = 0; i < BNX2_NUM_STATS; i++) {
7453 unsigned long offset;
7455 if (stats_len_arr[i] == 0) {
7456 /* skip this counter */
7457 buf[i] = 0;
7458 continue;
7461 offset = bnx2_stats_offset_arr[i];
7462 if (stats_len_arr[i] == 4) {
7463 /* 4-byte counter */
7464 buf[i] = (u64) *(hw_stats + offset) +
7465 *(temp_stats + offset);
7466 continue;
7468 /* 8-byte counter */
7469 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7470 *(hw_stats + offset + 1) +
7471 (((u64) *(temp_stats + offset)) << 32) +
7472 *(temp_stats + offset + 1);
7476 static int
7477 bnx2_phys_id(struct net_device *dev, u32 data)
7479 struct bnx2 *bp = netdev_priv(dev);
7480 int i;
7481 u32 save;
7483 bnx2_set_power_state(bp, PCI_D0);
7485 if (data == 0)
7486 data = 2;
7488 save = REG_RD(bp, BNX2_MISC_CFG);
7489 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7491 for (i = 0; i < (data * 2); i++) {
7492 if ((i % 2) == 0) {
7493 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7495 else {
7496 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7497 BNX2_EMAC_LED_1000MB_OVERRIDE |
7498 BNX2_EMAC_LED_100MB_OVERRIDE |
7499 BNX2_EMAC_LED_10MB_OVERRIDE |
7500 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7501 BNX2_EMAC_LED_TRAFFIC);
7503 msleep_interruptible(500);
7504 if (signal_pending(current))
7505 break;
7507 REG_WR(bp, BNX2_EMAC_LED, 0);
7508 REG_WR(bp, BNX2_MISC_CFG, save);
7510 if (!netif_running(dev))
7511 bnx2_set_power_state(bp, PCI_D3hot);
7513 return 0;
7516 static int
7517 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7519 struct bnx2 *bp = netdev_priv(dev);
7521 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7522 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7523 else
7524 return (ethtool_op_set_tx_csum(dev, data));
7527 static const struct ethtool_ops bnx2_ethtool_ops = {
7528 .get_settings = bnx2_get_settings,
7529 .set_settings = bnx2_set_settings,
7530 .get_drvinfo = bnx2_get_drvinfo,
7531 .get_regs_len = bnx2_get_regs_len,
7532 .get_regs = bnx2_get_regs,
7533 .get_wol = bnx2_get_wol,
7534 .set_wol = bnx2_set_wol,
7535 .nway_reset = bnx2_nway_reset,
7536 .get_link = bnx2_get_link,
7537 .get_eeprom_len = bnx2_get_eeprom_len,
7538 .get_eeprom = bnx2_get_eeprom,
7539 .set_eeprom = bnx2_set_eeprom,
7540 .get_coalesce = bnx2_get_coalesce,
7541 .set_coalesce = bnx2_set_coalesce,
7542 .get_ringparam = bnx2_get_ringparam,
7543 .set_ringparam = bnx2_set_ringparam,
7544 .get_pauseparam = bnx2_get_pauseparam,
7545 .set_pauseparam = bnx2_set_pauseparam,
7546 .get_rx_csum = bnx2_get_rx_csum,
7547 .set_rx_csum = bnx2_set_rx_csum,
7548 .set_tx_csum = bnx2_set_tx_csum,
7549 .set_sg = ethtool_op_set_sg,
7550 .set_tso = bnx2_set_tso,
7551 .self_test = bnx2_self_test,
7552 .get_strings = bnx2_get_strings,
7553 .phys_id = bnx2_phys_id,
7554 .get_ethtool_stats = bnx2_get_ethtool_stats,
7555 .get_sset_count = bnx2_get_sset_count,
7558 /* Called with rtnl_lock */
7559 static int
7560 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7562 struct mii_ioctl_data *data = if_mii(ifr);
7563 struct bnx2 *bp = netdev_priv(dev);
7564 int err;
7566 switch(cmd) {
7567 case SIOCGMIIPHY:
7568 data->phy_id = bp->phy_addr;
7570 /* fallthru */
7571 case SIOCGMIIREG: {
7572 u32 mii_regval;
7574 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7575 return -EOPNOTSUPP;
7577 if (!netif_running(dev))
7578 return -EAGAIN;
7580 spin_lock_bh(&bp->phy_lock);
7581 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7582 spin_unlock_bh(&bp->phy_lock);
7584 data->val_out = mii_regval;
7586 return err;
7589 case SIOCSMIIREG:
7590 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7591 return -EOPNOTSUPP;
7593 if (!netif_running(dev))
7594 return -EAGAIN;
7596 spin_lock_bh(&bp->phy_lock);
7597 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7598 spin_unlock_bh(&bp->phy_lock);
7600 return err;
7602 default:
7603 /* do nothing */
7604 break;
7606 return -EOPNOTSUPP;
7609 /* Called with rtnl_lock */
7610 static int
7611 bnx2_change_mac_addr(struct net_device *dev, void *p)
7613 struct sockaddr *addr = p;
7614 struct bnx2 *bp = netdev_priv(dev);
7616 if (!is_valid_ether_addr(addr->sa_data))
7617 return -EINVAL;
7619 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7620 if (netif_running(dev))
7621 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7623 return 0;
7626 /* Called with rtnl_lock */
7627 static int
7628 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7630 struct bnx2 *bp = netdev_priv(dev);
7632 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7633 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7634 return -EINVAL;
7636 dev->mtu = new_mtu;
7637 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7640 #ifdef CONFIG_NET_POLL_CONTROLLER
7641 static void
7642 poll_bnx2(struct net_device *dev)
7644 struct bnx2 *bp = netdev_priv(dev);
7645 int i;
7647 for (i = 0; i < bp->irq_nvecs; i++) {
7648 struct bnx2_irq *irq = &bp->irq_tbl[i];
7650 disable_irq(irq->vector);
7651 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7652 enable_irq(irq->vector);
7655 #endif
7657 static void __devinit
7658 bnx2_get_5709_media(struct bnx2 *bp)
7660 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7661 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7662 u32 strap;
7664 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7665 return;
7666 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7667 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7668 return;
7671 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7672 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7673 else
7674 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7676 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7677 switch (strap) {
7678 case 0x4:
7679 case 0x5:
7680 case 0x6:
7681 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7682 return;
7684 } else {
7685 switch (strap) {
7686 case 0x1:
7687 case 0x2:
7688 case 0x4:
7689 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7690 return;
7695 static void __devinit
7696 bnx2_get_pci_speed(struct bnx2 *bp)
7698 u32 reg;
7700 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7701 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7702 u32 clkreg;
7704 bp->flags |= BNX2_FLAG_PCIX;
7706 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7708 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7709 switch (clkreg) {
7710 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7711 bp->bus_speed_mhz = 133;
7712 break;
7714 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7715 bp->bus_speed_mhz = 100;
7716 break;
7718 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7719 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7720 bp->bus_speed_mhz = 66;
7721 break;
7723 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7724 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7725 bp->bus_speed_mhz = 50;
7726 break;
7728 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7729 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7730 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7731 bp->bus_speed_mhz = 33;
7732 break;
7735 else {
7736 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7737 bp->bus_speed_mhz = 66;
7738 else
7739 bp->bus_speed_mhz = 33;
7742 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7743 bp->flags |= BNX2_FLAG_PCI_32BIT;
7747 static void __devinit
7748 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7750 int rc, i, j;
7751 u8 *data;
7752 unsigned int block_end, rosize, len;
7754 #define BNX2_VPD_NVRAM_OFFSET 0x300
7755 #define BNX2_VPD_LEN 128
7756 #define BNX2_MAX_VER_SLEN 30
7758 data = kmalloc(256, GFP_KERNEL);
7759 if (!data)
7760 return;
7762 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7763 BNX2_VPD_LEN);
7764 if (rc)
7765 goto vpd_done;
7767 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7768 data[i] = data[i + BNX2_VPD_LEN + 3];
7769 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7770 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7771 data[i + 3] = data[i + BNX2_VPD_LEN];
7774 i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7775 if (i < 0)
7776 goto vpd_done;
7778 rosize = pci_vpd_lrdt_size(&data[i]);
7779 i += PCI_VPD_LRDT_TAG_SIZE;
7780 block_end = i + rosize;
7782 if (block_end > BNX2_VPD_LEN)
7783 goto vpd_done;
7785 j = pci_vpd_find_info_keyword(data, i, rosize,
7786 PCI_VPD_RO_KEYWORD_MFR_ID);
7787 if (j < 0)
7788 goto vpd_done;
7790 len = pci_vpd_info_field_size(&data[j]);
7792 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7793 if (j + len > block_end || len != 4 ||
7794 memcmp(&data[j], "1028", 4))
7795 goto vpd_done;
7797 j = pci_vpd_find_info_keyword(data, i, rosize,
7798 PCI_VPD_RO_KEYWORD_VENDOR0);
7799 if (j < 0)
7800 goto vpd_done;
7802 len = pci_vpd_info_field_size(&data[j]);
7804 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7805 if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7806 goto vpd_done;
7808 memcpy(bp->fw_version, &data[j], len);
7809 bp->fw_version[len] = ' ';
7811 vpd_done:
7812 kfree(data);
7815 static int __devinit
7816 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7818 struct bnx2 *bp;
7819 unsigned long mem_len;
7820 int rc, i, j;
7821 u32 reg;
7822 u64 dma_mask, persist_dma_mask;
7824 SET_NETDEV_DEV(dev, &pdev->dev);
7825 bp = netdev_priv(dev);
7827 bp->flags = 0;
7828 bp->phy_flags = 0;
7830 bp->temp_stats_blk =
7831 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7833 if (bp->temp_stats_blk == NULL) {
7834 rc = -ENOMEM;
7835 goto err_out;
7838 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7839 rc = pci_enable_device(pdev);
7840 if (rc) {
7841 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7842 goto err_out;
7845 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7846 dev_err(&pdev->dev,
7847 "Cannot find PCI device base address, aborting\n");
7848 rc = -ENODEV;
7849 goto err_out_disable;
7852 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7853 if (rc) {
7854 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7855 goto err_out_disable;
7858 pci_set_master(pdev);
7859 pci_save_state(pdev);
7861 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7862 if (bp->pm_cap == 0) {
7863 dev_err(&pdev->dev,
7864 "Cannot find power management capability, aborting\n");
7865 rc = -EIO;
7866 goto err_out_release;
7869 bp->dev = dev;
7870 bp->pdev = pdev;
7872 spin_lock_init(&bp->phy_lock);
7873 spin_lock_init(&bp->indirect_lock);
7874 #ifdef BCM_CNIC
7875 mutex_init(&bp->cnic_lock);
7876 #endif
7877 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7879 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7880 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7881 dev->mem_end = dev->mem_start + mem_len;
7882 dev->irq = pdev->irq;
7884 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7886 if (!bp->regview) {
7887 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7888 rc = -ENOMEM;
7889 goto err_out_release;
7892 /* Configure byte swap and enable write to the reg_window registers.
7893 * Rely on CPU to do target byte swapping on big endian systems
7894 * The chip's target access swapping will not swap all accesses
7896 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7897 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7898 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7900 bnx2_set_power_state(bp, PCI_D0);
7902 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7904 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7905 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7906 dev_err(&pdev->dev,
7907 "Cannot find PCIE capability, aborting\n");
7908 rc = -EIO;
7909 goto err_out_unmap;
7911 bp->flags |= BNX2_FLAG_PCIE;
7912 if (CHIP_REV(bp) == CHIP_REV_Ax)
7913 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7914 } else {
7915 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7916 if (bp->pcix_cap == 0) {
7917 dev_err(&pdev->dev,
7918 "Cannot find PCIX capability, aborting\n");
7919 rc = -EIO;
7920 goto err_out_unmap;
7922 bp->flags |= BNX2_FLAG_BROKEN_STATS;
7925 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7926 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7927 bp->flags |= BNX2_FLAG_MSIX_CAP;
7930 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7931 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7932 bp->flags |= BNX2_FLAG_MSI_CAP;
7935 /* 5708 cannot support DMA addresses > 40-bit. */
7936 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7937 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7938 else
7939 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7941 /* Configure DMA attributes. */
7942 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7943 dev->features |= NETIF_F_HIGHDMA;
7944 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7945 if (rc) {
7946 dev_err(&pdev->dev,
7947 "pci_set_consistent_dma_mask failed, aborting\n");
7948 goto err_out_unmap;
7950 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7951 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
7952 goto err_out_unmap;
7955 if (!(bp->flags & BNX2_FLAG_PCIE))
7956 bnx2_get_pci_speed(bp);
7958 /* 5706A0 may falsely detect SERR and PERR. */
7959 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7960 reg = REG_RD(bp, PCI_COMMAND);
7961 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7962 REG_WR(bp, PCI_COMMAND, reg);
7964 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7965 !(bp->flags & BNX2_FLAG_PCIX)) {
7967 dev_err(&pdev->dev,
7968 "5706 A1 can only be used in a PCIX bus, aborting\n");
7969 goto err_out_unmap;
7972 bnx2_init_nvram(bp);
7974 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7976 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7977 BNX2_SHM_HDR_SIGNATURE_SIG) {
7978 u32 off = PCI_FUNC(pdev->devfn) << 2;
7980 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7981 } else
7982 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7984 /* Get the permanent MAC address. First we need to make sure the
7985 * firmware is actually running.
7987 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7989 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7990 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7991 dev_err(&pdev->dev, "Firmware not running, aborting\n");
7992 rc = -ENODEV;
7993 goto err_out_unmap;
7996 bnx2_read_vpd_fw_ver(bp);
7998 j = strlen(bp->fw_version);
7999 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8000 for (i = 0; i < 3 && j < 24; i++) {
8001 u8 num, k, skip0;
8003 if (i == 0) {
8004 bp->fw_version[j++] = 'b';
8005 bp->fw_version[j++] = 'c';
8006 bp->fw_version[j++] = ' ';
8008 num = (u8) (reg >> (24 - (i * 8)));
8009 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8010 if (num >= k || !skip0 || k == 1) {
8011 bp->fw_version[j++] = (num / k) + '0';
8012 skip0 = 0;
8015 if (i != 2)
8016 bp->fw_version[j++] = '.';
8018 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8019 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8020 bp->wol = 1;
8022 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8023 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8025 for (i = 0; i < 30; i++) {
8026 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8027 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8028 break;
8029 msleep(10);
8032 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8033 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8034 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8035 reg != BNX2_CONDITION_MFW_RUN_NONE) {
8036 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8038 if (j < 32)
8039 bp->fw_version[j++] = ' ';
8040 for (i = 0; i < 3 && j < 28; i++) {
8041 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8042 reg = swab32(reg);
8043 memcpy(&bp->fw_version[j], &reg, 4);
8044 j += 4;
8048 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8049 bp->mac_addr[0] = (u8) (reg >> 8);
8050 bp->mac_addr[1] = (u8) reg;
8052 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8053 bp->mac_addr[2] = (u8) (reg >> 24);
8054 bp->mac_addr[3] = (u8) (reg >> 16);
8055 bp->mac_addr[4] = (u8) (reg >> 8);
8056 bp->mac_addr[5] = (u8) reg;
8058 bp->tx_ring_size = MAX_TX_DESC_CNT;
8059 bnx2_set_rx_ring_size(bp, 255);
8061 bp->rx_csum = 1;
8063 bp->tx_quick_cons_trip_int = 2;
8064 bp->tx_quick_cons_trip = 20;
8065 bp->tx_ticks_int = 18;
8066 bp->tx_ticks = 80;
8068 bp->rx_quick_cons_trip_int = 2;
8069 bp->rx_quick_cons_trip = 12;
8070 bp->rx_ticks_int = 18;
8071 bp->rx_ticks = 18;
8073 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8075 bp->current_interval = BNX2_TIMER_INTERVAL;
8077 bp->phy_addr = 1;
8079 /* Disable WOL support if we are running on a SERDES chip. */
8080 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8081 bnx2_get_5709_media(bp);
8082 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8083 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8085 bp->phy_port = PORT_TP;
8086 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8087 bp->phy_port = PORT_FIBRE;
8088 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8089 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8090 bp->flags |= BNX2_FLAG_NO_WOL;
8091 bp->wol = 0;
8093 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8094 /* Don't do parallel detect on this board because of
8095 * some board problems. The link will not go down
8096 * if we do parallel detect.
8098 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8099 pdev->subsystem_device == 0x310c)
8100 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8101 } else {
8102 bp->phy_addr = 2;
8103 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8104 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8106 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8107 CHIP_NUM(bp) == CHIP_NUM_5708)
8108 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8109 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8110 (CHIP_REV(bp) == CHIP_REV_Ax ||
8111 CHIP_REV(bp) == CHIP_REV_Bx))
8112 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8114 bnx2_init_fw_cap(bp);
8116 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8117 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8118 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8119 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8120 bp->flags |= BNX2_FLAG_NO_WOL;
8121 bp->wol = 0;
8124 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8125 bp->tx_quick_cons_trip_int =
8126 bp->tx_quick_cons_trip;
8127 bp->tx_ticks_int = bp->tx_ticks;
8128 bp->rx_quick_cons_trip_int =
8129 bp->rx_quick_cons_trip;
8130 bp->rx_ticks_int = bp->rx_ticks;
8131 bp->comp_prod_trip_int = bp->comp_prod_trip;
8132 bp->com_ticks_int = bp->com_ticks;
8133 bp->cmd_ticks_int = bp->cmd_ticks;
8136 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8138 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8139 * with byte enables disabled on the unused 32-bit word. This is legal
8140 * but causes problems on the AMD 8132 which will eventually stop
8141 * responding after a while.
8143 * AMD believes this incompatibility is unique to the 5706, and
8144 * prefers to locally disable MSI rather than globally disabling it.
8146 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8147 struct pci_dev *amd_8132 = NULL;
8149 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8150 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8151 amd_8132))) {
8153 if (amd_8132->revision >= 0x10 &&
8154 amd_8132->revision <= 0x13) {
8155 disable_msi = 1;
8156 pci_dev_put(amd_8132);
8157 break;
8162 bnx2_set_default_link(bp);
8163 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8165 init_timer(&bp->timer);
8166 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8167 bp->timer.data = (unsigned long) bp;
8168 bp->timer.function = bnx2_timer;
8170 return 0;
8172 err_out_unmap:
8173 if (bp->regview) {
8174 iounmap(bp->regview);
8175 bp->regview = NULL;
8178 err_out_release:
8179 pci_release_regions(pdev);
8181 err_out_disable:
8182 pci_disable_device(pdev);
8183 pci_set_drvdata(pdev, NULL);
8185 err_out:
8186 return rc;
8189 static char * __devinit
8190 bnx2_bus_string(struct bnx2 *bp, char *str)
8192 char *s = str;
8194 if (bp->flags & BNX2_FLAG_PCIE) {
8195 s += sprintf(s, "PCI Express");
8196 } else {
8197 s += sprintf(s, "PCI");
8198 if (bp->flags & BNX2_FLAG_PCIX)
8199 s += sprintf(s, "-X");
8200 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8201 s += sprintf(s, " 32-bit");
8202 else
8203 s += sprintf(s, " 64-bit");
8204 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8206 return str;
8209 static void __devinit
8210 bnx2_init_napi(struct bnx2 *bp)
8212 int i;
8214 for (i = 0; i < bp->irq_nvecs; i++) {
8215 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8216 int (*poll)(struct napi_struct *, int);
8218 if (i == 0)
8219 poll = bnx2_poll;
8220 else
8221 poll = bnx2_poll_msix;
8223 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8224 bnapi->bp = bp;
8228 static const struct net_device_ops bnx2_netdev_ops = {
8229 .ndo_open = bnx2_open,
8230 .ndo_start_xmit = bnx2_start_xmit,
8231 .ndo_stop = bnx2_close,
8232 .ndo_get_stats = bnx2_get_stats,
8233 .ndo_set_rx_mode = bnx2_set_rx_mode,
8234 .ndo_do_ioctl = bnx2_ioctl,
8235 .ndo_validate_addr = eth_validate_addr,
8236 .ndo_set_mac_address = bnx2_change_mac_addr,
8237 .ndo_change_mtu = bnx2_change_mtu,
8238 .ndo_tx_timeout = bnx2_tx_timeout,
8239 #ifdef BCM_VLAN
8240 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
8241 #endif
8242 #ifdef CONFIG_NET_POLL_CONTROLLER
8243 .ndo_poll_controller = poll_bnx2,
8244 #endif
8247 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8249 #ifdef BCM_VLAN
8250 dev->vlan_features |= flags;
8251 #endif
8254 static int __devinit
8255 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8257 static int version_printed = 0;
8258 struct net_device *dev = NULL;
8259 struct bnx2 *bp;
8260 int rc;
8261 char str[40];
8263 if (version_printed++ == 0)
8264 pr_info("%s", version);
8266 /* dev zeroed in init_etherdev */
8267 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8269 if (!dev)
8270 return -ENOMEM;
8272 rc = bnx2_init_board(pdev, dev);
8273 if (rc < 0) {
8274 free_netdev(dev);
8275 return rc;
8278 dev->netdev_ops = &bnx2_netdev_ops;
8279 dev->watchdog_timeo = TX_TIMEOUT;
8280 dev->ethtool_ops = &bnx2_ethtool_ops;
8282 bp = netdev_priv(dev);
8284 pci_set_drvdata(pdev, dev);
8286 rc = bnx2_request_firmware(bp);
8287 if (rc)
8288 goto error;
8290 memcpy(dev->dev_addr, bp->mac_addr, 6);
8291 memcpy(dev->perm_addr, bp->mac_addr, 6);
8293 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
8294 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8295 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8296 dev->features |= NETIF_F_IPV6_CSUM;
8297 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8299 #ifdef BCM_VLAN
8300 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8301 #endif
8302 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8303 vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8304 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8305 dev->features |= NETIF_F_TSO6;
8306 vlan_features_add(dev, NETIF_F_TSO6);
8308 if ((rc = register_netdev(dev))) {
8309 dev_err(&pdev->dev, "Cannot register net device\n");
8310 goto error;
8313 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8314 board_info[ent->driver_data].name,
8315 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8316 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8317 bnx2_bus_string(bp, str),
8318 dev->base_addr,
8319 bp->pdev->irq, dev->dev_addr);
8321 return 0;
8323 error:
8324 if (bp->mips_firmware)
8325 release_firmware(bp->mips_firmware);
8326 if (bp->rv2p_firmware)
8327 release_firmware(bp->rv2p_firmware);
8329 if (bp->regview)
8330 iounmap(bp->regview);
8331 pci_release_regions(pdev);
8332 pci_disable_device(pdev);
8333 pci_set_drvdata(pdev, NULL);
8334 free_netdev(dev);
8335 return rc;
8338 static void __devexit
8339 bnx2_remove_one(struct pci_dev *pdev)
8341 struct net_device *dev = pci_get_drvdata(pdev);
8342 struct bnx2 *bp = netdev_priv(dev);
8344 flush_scheduled_work();
8346 unregister_netdev(dev);
8348 if (bp->mips_firmware)
8349 release_firmware(bp->mips_firmware);
8350 if (bp->rv2p_firmware)
8351 release_firmware(bp->rv2p_firmware);
8353 if (bp->regview)
8354 iounmap(bp->regview);
8356 kfree(bp->temp_stats_blk);
8358 free_netdev(dev);
8359 pci_release_regions(pdev);
8360 pci_disable_device(pdev);
8361 pci_set_drvdata(pdev, NULL);
8364 static int
8365 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8367 struct net_device *dev = pci_get_drvdata(pdev);
8368 struct bnx2 *bp = netdev_priv(dev);
8370 /* PCI register 4 needs to be saved whether netif_running() or not.
8371 * MSI address and data need to be saved if using MSI and
8372 * netif_running().
8374 pci_save_state(pdev);
8375 if (!netif_running(dev))
8376 return 0;
8378 flush_scheduled_work();
8379 bnx2_netif_stop(bp);
8380 netif_device_detach(dev);
8381 del_timer_sync(&bp->timer);
8382 bnx2_shutdown_chip(bp);
8383 bnx2_free_skbs(bp);
8384 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8385 return 0;
8388 static int
8389 bnx2_resume(struct pci_dev *pdev)
8391 struct net_device *dev = pci_get_drvdata(pdev);
8392 struct bnx2 *bp = netdev_priv(dev);
8394 pci_restore_state(pdev);
8395 if (!netif_running(dev))
8396 return 0;
8398 bnx2_set_power_state(bp, PCI_D0);
8399 netif_device_attach(dev);
8400 bnx2_init_nic(bp, 1);
8401 bnx2_netif_start(bp);
8402 return 0;
8406 * bnx2_io_error_detected - called when PCI error is detected
8407 * @pdev: Pointer to PCI device
8408 * @state: The current pci connection state
8410 * This function is called after a PCI bus error affecting
8411 * this device has been detected.
8413 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8414 pci_channel_state_t state)
8416 struct net_device *dev = pci_get_drvdata(pdev);
8417 struct bnx2 *bp = netdev_priv(dev);
8419 rtnl_lock();
8420 netif_device_detach(dev);
8422 if (state == pci_channel_io_perm_failure) {
8423 rtnl_unlock();
8424 return PCI_ERS_RESULT_DISCONNECT;
8427 if (netif_running(dev)) {
8428 bnx2_netif_stop(bp);
8429 del_timer_sync(&bp->timer);
8430 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8433 pci_disable_device(pdev);
8434 rtnl_unlock();
8436 /* Request a slot slot reset. */
8437 return PCI_ERS_RESULT_NEED_RESET;
8441 * bnx2_io_slot_reset - called after the pci bus has been reset.
8442 * @pdev: Pointer to PCI device
8444 * Restart the card from scratch, as if from a cold-boot.
8446 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8448 struct net_device *dev = pci_get_drvdata(pdev);
8449 struct bnx2 *bp = netdev_priv(dev);
8451 rtnl_lock();
8452 if (pci_enable_device(pdev)) {
8453 dev_err(&pdev->dev,
8454 "Cannot re-enable PCI device after reset\n");
8455 rtnl_unlock();
8456 return PCI_ERS_RESULT_DISCONNECT;
8458 pci_set_master(pdev);
8459 pci_restore_state(pdev);
8460 pci_save_state(pdev);
8462 if (netif_running(dev)) {
8463 bnx2_set_power_state(bp, PCI_D0);
8464 bnx2_init_nic(bp, 1);
8467 rtnl_unlock();
8468 return PCI_ERS_RESULT_RECOVERED;
8472 * bnx2_io_resume - called when traffic can start flowing again.
8473 * @pdev: Pointer to PCI device
8475 * This callback is called when the error recovery driver tells us that
8476 * its OK to resume normal operation.
8478 static void bnx2_io_resume(struct pci_dev *pdev)
8480 struct net_device *dev = pci_get_drvdata(pdev);
8481 struct bnx2 *bp = netdev_priv(dev);
8483 rtnl_lock();
8484 if (netif_running(dev))
8485 bnx2_netif_start(bp);
8487 netif_device_attach(dev);
8488 rtnl_unlock();
8491 static struct pci_error_handlers bnx2_err_handler = {
8492 .error_detected = bnx2_io_error_detected,
8493 .slot_reset = bnx2_io_slot_reset,
8494 .resume = bnx2_io_resume,
8497 static struct pci_driver bnx2_pci_driver = {
8498 .name = DRV_MODULE_NAME,
8499 .id_table = bnx2_pci_tbl,
8500 .probe = bnx2_init_one,
8501 .remove = __devexit_p(bnx2_remove_one),
8502 .suspend = bnx2_suspend,
8503 .resume = bnx2_resume,
8504 .err_handler = &bnx2_err_handler,
8507 static int __init bnx2_init(void)
8509 return pci_register_driver(&bnx2_pci_driver);
8512 static void __exit bnx2_cleanup(void)
8514 pci_unregister_driver(&bnx2_pci_driver);
8517 module_init(bnx2_init);
8518 module_exit(bnx2_cleanup);