Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core-2.6
[linux-2.6/linux-2.6-openrd.git] / drivers / net / bnx2.c
blob65df1de447e4d8db176ee14ea8605b4ccac09be4
1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/list.h>
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
60 #define DRV_MODULE_NAME "bnx2"
61 #define PFX DRV_MODULE_NAME ": "
62 #define DRV_MODULE_VERSION "2.0.3"
63 #define DRV_MODULE_RELDATE "Dec 03, 2009"
64 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j3.fw"
65 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
66 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j3.fw"
67 #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j3.fw"
68 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j3.fw"
70 #define RUN_AT(x) (jiffies + (x))
72 /* Time in jiffies before concluding the transmitter is hung. */
73 #define TX_TIMEOUT (5*HZ)
75 static char version[] __devinitdata =
76 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
79 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
80 MODULE_LICENSE("GPL");
81 MODULE_VERSION(DRV_MODULE_VERSION);
82 MODULE_FIRMWARE(FW_MIPS_FILE_06);
83 MODULE_FIRMWARE(FW_RV2P_FILE_06);
84 MODULE_FIRMWARE(FW_MIPS_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09);
86 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
88 static int disable_msi = 0;
90 module_param(disable_msi, int, 0);
91 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
93 typedef enum {
94 BCM5706 = 0,
95 NC370T,
96 NC370I,
97 BCM5706S,
98 NC370F,
99 BCM5708,
100 BCM5708S,
101 BCM5709,
102 BCM5709S,
103 BCM5716,
104 BCM5716S,
105 } board_t;
107 /* indexed by board_t, above */
108 static struct {
109 char *name;
110 } board_info[] __devinitdata = {
111 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
112 { "HP NC370T Multifunction Gigabit Server Adapter" },
113 { "HP NC370i Multifunction Gigabit Server Adapter" },
114 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
115 { "HP NC370F Multifunction Gigabit Server Adapter" },
116 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
117 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
118 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
119 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
120 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
121 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
124 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
125 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
127 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
129 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
131 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
132 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
133 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
135 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
136 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
137 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
139 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
141 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
143 { PCI_VENDOR_ID_BROADCOM, 0x163b,
144 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
145 { PCI_VENDOR_ID_BROADCOM, 0x163c,
146 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
147 { 0, }
150 static const struct flash_spec flash_table[] =
152 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
153 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
154 /* Slow EEPROM */
155 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
156 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
157 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
158 "EEPROM - slow"},
159 /* Expansion entry 0001 */
160 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
161 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
163 "Entry 0001"},
164 /* Saifun SA25F010 (non-buffered flash) */
165 /* strap, cfg1, & write1 need updates */
166 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
168 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
169 "Non-buffered flash (128kB)"},
170 /* Saifun SA25F020 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
175 "Non-buffered flash (256kB)"},
176 /* Expansion entry 0100 */
177 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
178 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 "Entry 0100"},
181 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
182 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
184 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
185 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
186 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
187 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
189 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
190 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
191 /* Saifun SA25F005 (non-buffered flash) */
192 /* strap, cfg1, & write1 need updates */
193 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
194 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
196 "Non-buffered flash (64kB)"},
197 /* Fast EEPROM */
198 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
199 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
200 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
201 "EEPROM - fast"},
202 /* Expansion entry 1001 */
203 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
204 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
205 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1001"},
207 /* Expansion entry 1010 */
208 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
209 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
210 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
211 "Entry 1010"},
212 /* ATMEL AT45DB011B (buffered flash) */
213 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
214 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
215 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
216 "Buffered flash (128kB)"},
217 /* Expansion entry 1100 */
218 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
219 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
220 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
221 "Entry 1100"},
222 /* Expansion entry 1101 */
223 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
224 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
225 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
226 "Entry 1101"},
227 /* Ateml Expansion entry 1110 */
228 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
229 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
230 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
231 "Entry 1110 (Atmel)"},
232 /* ATMEL AT45DB021B (buffered flash) */
233 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
234 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
235 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
236 "Buffered flash (256kB)"},
239 static const struct flash_spec flash_5709 = {
240 .flags = BNX2_NV_BUFFERED,
241 .page_bits = BCM5709_FLASH_PAGE_BITS,
242 .page_size = BCM5709_FLASH_PAGE_SIZE,
243 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
244 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
245 .name = "5709 Buffered flash (256kB)",
248 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
250 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
252 u32 diff;
254 smp_mb();
256 /* The ring uses 256 indices for 255 entries, one of them
257 * needs to be skipped.
259 diff = txr->tx_prod - txr->tx_cons;
260 if (unlikely(diff >= TX_DESC_CNT)) {
261 diff &= 0xffff;
262 if (diff == TX_DESC_CNT)
263 diff = MAX_TX_DESC_CNT;
265 return (bp->tx_ring_size - diff);
268 static u32
269 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
271 u32 val;
273 spin_lock_bh(&bp->indirect_lock);
274 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
275 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
276 spin_unlock_bh(&bp->indirect_lock);
277 return val;
280 static void
281 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
283 spin_lock_bh(&bp->indirect_lock);
284 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
285 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
286 spin_unlock_bh(&bp->indirect_lock);
289 static void
290 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
292 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
295 static u32
296 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
298 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
301 static void
302 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
304 offset += cid_addr;
305 spin_lock_bh(&bp->indirect_lock);
306 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
307 int i;
309 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
310 REG_WR(bp, BNX2_CTX_CTX_CTRL,
311 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
312 for (i = 0; i < 5; i++) {
313 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
314 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
315 break;
316 udelay(5);
318 } else {
319 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
320 REG_WR(bp, BNX2_CTX_DATA, val);
322 spin_unlock_bh(&bp->indirect_lock);
325 #ifdef BCM_CNIC
326 static int
327 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
329 struct bnx2 *bp = netdev_priv(dev);
330 struct drv_ctl_io *io = &info->data.io;
332 switch (info->cmd) {
333 case DRV_CTL_IO_WR_CMD:
334 bnx2_reg_wr_ind(bp, io->offset, io->data);
335 break;
336 case DRV_CTL_IO_RD_CMD:
337 io->data = bnx2_reg_rd_ind(bp, io->offset);
338 break;
339 case DRV_CTL_CTX_WR_CMD:
340 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
341 break;
342 default:
343 return -EINVAL;
345 return 0;
348 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
350 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
351 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
352 int sb_id;
354 if (bp->flags & BNX2_FLAG_USING_MSIX) {
355 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
356 bnapi->cnic_present = 0;
357 sb_id = bp->irq_nvecs;
358 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
359 } else {
360 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
361 bnapi->cnic_tag = bnapi->last_status_idx;
362 bnapi->cnic_present = 1;
363 sb_id = 0;
364 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
367 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
368 cp->irq_arr[0].status_blk = (void *)
369 ((unsigned long) bnapi->status_blk.msi +
370 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
371 cp->irq_arr[0].status_blk_num = sb_id;
372 cp->num_irq = 1;
375 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
376 void *data)
378 struct bnx2 *bp = netdev_priv(dev);
379 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
381 if (ops == NULL)
382 return -EINVAL;
384 if (cp->drv_state & CNIC_DRV_STATE_REGD)
385 return -EBUSY;
387 bp->cnic_data = data;
388 rcu_assign_pointer(bp->cnic_ops, ops);
390 cp->num_irq = 0;
391 cp->drv_state = CNIC_DRV_STATE_REGD;
393 bnx2_setup_cnic_irq_info(bp);
395 return 0;
398 static int bnx2_unregister_cnic(struct net_device *dev)
400 struct bnx2 *bp = netdev_priv(dev);
401 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
402 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
404 mutex_lock(&bp->cnic_lock);
405 cp->drv_state = 0;
406 bnapi->cnic_present = 0;
407 rcu_assign_pointer(bp->cnic_ops, NULL);
408 mutex_unlock(&bp->cnic_lock);
409 synchronize_rcu();
410 return 0;
413 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
415 struct bnx2 *bp = netdev_priv(dev);
416 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
418 cp->drv_owner = THIS_MODULE;
419 cp->chip_id = bp->chip_id;
420 cp->pdev = bp->pdev;
421 cp->io_base = bp->regview;
422 cp->drv_ctl = bnx2_drv_ctl;
423 cp->drv_register_cnic = bnx2_register_cnic;
424 cp->drv_unregister_cnic = bnx2_unregister_cnic;
426 return cp;
428 EXPORT_SYMBOL(bnx2_cnic_probe);
430 static void
431 bnx2_cnic_stop(struct bnx2 *bp)
433 struct cnic_ops *c_ops;
434 struct cnic_ctl_info info;
436 mutex_lock(&bp->cnic_lock);
437 c_ops = bp->cnic_ops;
438 if (c_ops) {
439 info.cmd = CNIC_CTL_STOP_CMD;
440 c_ops->cnic_ctl(bp->cnic_data, &info);
442 mutex_unlock(&bp->cnic_lock);
445 static void
446 bnx2_cnic_start(struct bnx2 *bp)
448 struct cnic_ops *c_ops;
449 struct cnic_ctl_info info;
451 mutex_lock(&bp->cnic_lock);
452 c_ops = bp->cnic_ops;
453 if (c_ops) {
454 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
455 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
457 bnapi->cnic_tag = bnapi->last_status_idx;
459 info.cmd = CNIC_CTL_START_CMD;
460 c_ops->cnic_ctl(bp->cnic_data, &info);
462 mutex_unlock(&bp->cnic_lock);
465 #else
467 static void
468 bnx2_cnic_stop(struct bnx2 *bp)
472 static void
473 bnx2_cnic_start(struct bnx2 *bp)
477 #endif
479 static int
480 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
482 u32 val1;
483 int i, ret;
485 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
486 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
487 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
489 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
490 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
492 udelay(40);
495 val1 = (bp->phy_addr << 21) | (reg << 16) |
496 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
497 BNX2_EMAC_MDIO_COMM_START_BUSY;
498 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
500 for (i = 0; i < 50; i++) {
501 udelay(10);
503 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
504 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
505 udelay(5);
507 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
508 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
510 break;
514 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
515 *val = 0x0;
516 ret = -EBUSY;
518 else {
519 *val = val1;
520 ret = 0;
523 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
524 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
525 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
527 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
528 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
530 udelay(40);
533 return ret;
536 static int
537 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
539 u32 val1;
540 int i, ret;
542 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
543 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
544 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
546 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
547 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
549 udelay(40);
552 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
553 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
554 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
555 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
557 for (i = 0; i < 50; i++) {
558 udelay(10);
560 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
561 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
562 udelay(5);
563 break;
567 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
568 ret = -EBUSY;
569 else
570 ret = 0;
572 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
573 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
574 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
576 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
577 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
579 udelay(40);
582 return ret;
585 static void
586 bnx2_disable_int(struct bnx2 *bp)
588 int i;
589 struct bnx2_napi *bnapi;
591 for (i = 0; i < bp->irq_nvecs; i++) {
592 bnapi = &bp->bnx2_napi[i];
593 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
594 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
596 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
599 static void
600 bnx2_enable_int(struct bnx2 *bp)
602 int i;
603 struct bnx2_napi *bnapi;
605 for (i = 0; i < bp->irq_nvecs; i++) {
606 bnapi = &bp->bnx2_napi[i];
608 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
609 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
610 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
611 bnapi->last_status_idx);
613 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
614 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
615 bnapi->last_status_idx);
617 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
620 static void
621 bnx2_disable_int_sync(struct bnx2 *bp)
623 int i;
625 atomic_inc(&bp->intr_sem);
626 if (!netif_running(bp->dev))
627 return;
629 bnx2_disable_int(bp);
630 for (i = 0; i < bp->irq_nvecs; i++)
631 synchronize_irq(bp->irq_tbl[i].vector);
634 static void
635 bnx2_napi_disable(struct bnx2 *bp)
637 int i;
639 for (i = 0; i < bp->irq_nvecs; i++)
640 napi_disable(&bp->bnx2_napi[i].napi);
643 static void
644 bnx2_napi_enable(struct bnx2 *bp)
646 int i;
648 for (i = 0; i < bp->irq_nvecs; i++)
649 napi_enable(&bp->bnx2_napi[i].napi);
652 static void
653 bnx2_netif_stop(struct bnx2 *bp)
655 bnx2_cnic_stop(bp);
656 if (netif_running(bp->dev)) {
657 int i;
659 bnx2_napi_disable(bp);
660 netif_tx_disable(bp->dev);
661 /* prevent tx timeout */
662 for (i = 0; i < bp->dev->num_tx_queues; i++) {
663 struct netdev_queue *txq;
665 txq = netdev_get_tx_queue(bp->dev, i);
666 txq->trans_start = jiffies;
669 bnx2_disable_int_sync(bp);
672 static void
673 bnx2_netif_start(struct bnx2 *bp)
675 if (atomic_dec_and_test(&bp->intr_sem)) {
676 if (netif_running(bp->dev)) {
677 netif_tx_wake_all_queues(bp->dev);
678 bnx2_napi_enable(bp);
679 bnx2_enable_int(bp);
680 bnx2_cnic_start(bp);
685 static void
686 bnx2_free_tx_mem(struct bnx2 *bp)
688 int i;
690 for (i = 0; i < bp->num_tx_rings; i++) {
691 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
692 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
694 if (txr->tx_desc_ring) {
695 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
696 txr->tx_desc_ring,
697 txr->tx_desc_mapping);
698 txr->tx_desc_ring = NULL;
700 kfree(txr->tx_buf_ring);
701 txr->tx_buf_ring = NULL;
705 static void
706 bnx2_free_rx_mem(struct bnx2 *bp)
708 int i;
710 for (i = 0; i < bp->num_rx_rings; i++) {
711 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
712 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
713 int j;
715 for (j = 0; j < bp->rx_max_ring; j++) {
716 if (rxr->rx_desc_ring[j])
717 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
718 rxr->rx_desc_ring[j],
719 rxr->rx_desc_mapping[j]);
720 rxr->rx_desc_ring[j] = NULL;
722 vfree(rxr->rx_buf_ring);
723 rxr->rx_buf_ring = NULL;
725 for (j = 0; j < bp->rx_max_pg_ring; j++) {
726 if (rxr->rx_pg_desc_ring[j])
727 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
728 rxr->rx_pg_desc_ring[j],
729 rxr->rx_pg_desc_mapping[j]);
730 rxr->rx_pg_desc_ring[j] = NULL;
732 vfree(rxr->rx_pg_ring);
733 rxr->rx_pg_ring = NULL;
737 static int
738 bnx2_alloc_tx_mem(struct bnx2 *bp)
740 int i;
742 for (i = 0; i < bp->num_tx_rings; i++) {
743 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
744 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
746 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
747 if (txr->tx_buf_ring == NULL)
748 return -ENOMEM;
750 txr->tx_desc_ring =
751 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
752 &txr->tx_desc_mapping);
753 if (txr->tx_desc_ring == NULL)
754 return -ENOMEM;
756 return 0;
759 static int
760 bnx2_alloc_rx_mem(struct bnx2 *bp)
762 int i;
764 for (i = 0; i < bp->num_rx_rings; i++) {
765 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
766 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
767 int j;
769 rxr->rx_buf_ring =
770 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
771 if (rxr->rx_buf_ring == NULL)
772 return -ENOMEM;
774 memset(rxr->rx_buf_ring, 0,
775 SW_RXBD_RING_SIZE * bp->rx_max_ring);
777 for (j = 0; j < bp->rx_max_ring; j++) {
778 rxr->rx_desc_ring[j] =
779 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
780 &rxr->rx_desc_mapping[j]);
781 if (rxr->rx_desc_ring[j] == NULL)
782 return -ENOMEM;
786 if (bp->rx_pg_ring_size) {
787 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
788 bp->rx_max_pg_ring);
789 if (rxr->rx_pg_ring == NULL)
790 return -ENOMEM;
792 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
793 bp->rx_max_pg_ring);
796 for (j = 0; j < bp->rx_max_pg_ring; j++) {
797 rxr->rx_pg_desc_ring[j] =
798 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
799 &rxr->rx_pg_desc_mapping[j]);
800 if (rxr->rx_pg_desc_ring[j] == NULL)
801 return -ENOMEM;
805 return 0;
808 static void
809 bnx2_free_mem(struct bnx2 *bp)
811 int i;
812 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
814 bnx2_free_tx_mem(bp);
815 bnx2_free_rx_mem(bp);
817 for (i = 0; i < bp->ctx_pages; i++) {
818 if (bp->ctx_blk[i]) {
819 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
820 bp->ctx_blk[i],
821 bp->ctx_blk_mapping[i]);
822 bp->ctx_blk[i] = NULL;
825 if (bnapi->status_blk.msi) {
826 pci_free_consistent(bp->pdev, bp->status_stats_size,
827 bnapi->status_blk.msi,
828 bp->status_blk_mapping);
829 bnapi->status_blk.msi = NULL;
830 bp->stats_blk = NULL;
834 static int
835 bnx2_alloc_mem(struct bnx2 *bp)
837 int i, status_blk_size, err;
838 struct bnx2_napi *bnapi;
839 void *status_blk;
841 /* Combine status and statistics blocks into one allocation. */
842 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
843 if (bp->flags & BNX2_FLAG_MSIX_CAP)
844 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
845 BNX2_SBLK_MSIX_ALIGN_SIZE);
846 bp->status_stats_size = status_blk_size +
847 sizeof(struct statistics_block);
849 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
850 &bp->status_blk_mapping);
851 if (status_blk == NULL)
852 goto alloc_mem_err;
854 memset(status_blk, 0, bp->status_stats_size);
856 bnapi = &bp->bnx2_napi[0];
857 bnapi->status_blk.msi = status_blk;
858 bnapi->hw_tx_cons_ptr =
859 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
860 bnapi->hw_rx_cons_ptr =
861 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
862 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
863 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
864 struct status_block_msix *sblk;
866 bnapi = &bp->bnx2_napi[i];
868 sblk = (void *) (status_blk +
869 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
870 bnapi->status_blk.msix = sblk;
871 bnapi->hw_tx_cons_ptr =
872 &sblk->status_tx_quick_consumer_index;
873 bnapi->hw_rx_cons_ptr =
874 &sblk->status_rx_quick_consumer_index;
875 bnapi->int_num = i << 24;
879 bp->stats_blk = status_blk + status_blk_size;
881 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
883 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
884 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
885 if (bp->ctx_pages == 0)
886 bp->ctx_pages = 1;
887 for (i = 0; i < bp->ctx_pages; i++) {
888 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
889 BCM_PAGE_SIZE,
890 &bp->ctx_blk_mapping[i]);
891 if (bp->ctx_blk[i] == NULL)
892 goto alloc_mem_err;
896 err = bnx2_alloc_rx_mem(bp);
897 if (err)
898 goto alloc_mem_err;
900 err = bnx2_alloc_tx_mem(bp);
901 if (err)
902 goto alloc_mem_err;
904 return 0;
906 alloc_mem_err:
907 bnx2_free_mem(bp);
908 return -ENOMEM;
911 static void
912 bnx2_report_fw_link(struct bnx2 *bp)
914 u32 fw_link_status = 0;
916 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
917 return;
919 if (bp->link_up) {
920 u32 bmsr;
922 switch (bp->line_speed) {
923 case SPEED_10:
924 if (bp->duplex == DUPLEX_HALF)
925 fw_link_status = BNX2_LINK_STATUS_10HALF;
926 else
927 fw_link_status = BNX2_LINK_STATUS_10FULL;
928 break;
929 case SPEED_100:
930 if (bp->duplex == DUPLEX_HALF)
931 fw_link_status = BNX2_LINK_STATUS_100HALF;
932 else
933 fw_link_status = BNX2_LINK_STATUS_100FULL;
934 break;
935 case SPEED_1000:
936 if (bp->duplex == DUPLEX_HALF)
937 fw_link_status = BNX2_LINK_STATUS_1000HALF;
938 else
939 fw_link_status = BNX2_LINK_STATUS_1000FULL;
940 break;
941 case SPEED_2500:
942 if (bp->duplex == DUPLEX_HALF)
943 fw_link_status = BNX2_LINK_STATUS_2500HALF;
944 else
945 fw_link_status = BNX2_LINK_STATUS_2500FULL;
946 break;
949 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
951 if (bp->autoneg) {
952 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
954 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
955 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
957 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
958 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
959 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
960 else
961 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
964 else
965 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
967 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
970 static char *
971 bnx2_xceiver_str(struct bnx2 *bp)
973 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
974 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
975 "Copper"));
978 static void
979 bnx2_report_link(struct bnx2 *bp)
981 if (bp->link_up) {
982 netif_carrier_on(bp->dev);
983 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
984 bnx2_xceiver_str(bp));
986 printk("%d Mbps ", bp->line_speed);
988 if (bp->duplex == DUPLEX_FULL)
989 printk("full duplex");
990 else
991 printk("half duplex");
993 if (bp->flow_ctrl) {
994 if (bp->flow_ctrl & FLOW_CTRL_RX) {
995 printk(", receive ");
996 if (bp->flow_ctrl & FLOW_CTRL_TX)
997 printk("& transmit ");
999 else {
1000 printk(", transmit ");
1002 printk("flow control ON");
1004 printk("\n");
1006 else {
1007 netif_carrier_off(bp->dev);
1008 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
1009 bnx2_xceiver_str(bp));
1012 bnx2_report_fw_link(bp);
1015 static void
1016 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1018 u32 local_adv, remote_adv;
1020 bp->flow_ctrl = 0;
1021 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1022 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1024 if (bp->duplex == DUPLEX_FULL) {
1025 bp->flow_ctrl = bp->req_flow_ctrl;
1027 return;
1030 if (bp->duplex != DUPLEX_FULL) {
1031 return;
1034 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1035 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1036 u32 val;
1038 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1039 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1040 bp->flow_ctrl |= FLOW_CTRL_TX;
1041 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1042 bp->flow_ctrl |= FLOW_CTRL_RX;
1043 return;
1046 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1047 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1049 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1050 u32 new_local_adv = 0;
1051 u32 new_remote_adv = 0;
1053 if (local_adv & ADVERTISE_1000XPAUSE)
1054 new_local_adv |= ADVERTISE_PAUSE_CAP;
1055 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1056 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1057 if (remote_adv & ADVERTISE_1000XPAUSE)
1058 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1059 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1060 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1062 local_adv = new_local_adv;
1063 remote_adv = new_remote_adv;
1066 /* See Table 28B-3 of 802.3ab-1999 spec. */
1067 if (local_adv & ADVERTISE_PAUSE_CAP) {
1068 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1069 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1070 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1072 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1073 bp->flow_ctrl = FLOW_CTRL_RX;
1076 else {
1077 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1078 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1082 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1083 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1084 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1086 bp->flow_ctrl = FLOW_CTRL_TX;
1091 static int
1092 bnx2_5709s_linkup(struct bnx2 *bp)
1094 u32 val, speed;
1096 bp->link_up = 1;
1098 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1099 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1100 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1102 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1103 bp->line_speed = bp->req_line_speed;
1104 bp->duplex = bp->req_duplex;
1105 return 0;
1107 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1108 switch (speed) {
1109 case MII_BNX2_GP_TOP_AN_SPEED_10:
1110 bp->line_speed = SPEED_10;
1111 break;
1112 case MII_BNX2_GP_TOP_AN_SPEED_100:
1113 bp->line_speed = SPEED_100;
1114 break;
1115 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1116 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1117 bp->line_speed = SPEED_1000;
1118 break;
1119 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1120 bp->line_speed = SPEED_2500;
1121 break;
1123 if (val & MII_BNX2_GP_TOP_AN_FD)
1124 bp->duplex = DUPLEX_FULL;
1125 else
1126 bp->duplex = DUPLEX_HALF;
1127 return 0;
1130 static int
1131 bnx2_5708s_linkup(struct bnx2 *bp)
1133 u32 val;
1135 bp->link_up = 1;
1136 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1137 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1138 case BCM5708S_1000X_STAT1_SPEED_10:
1139 bp->line_speed = SPEED_10;
1140 break;
1141 case BCM5708S_1000X_STAT1_SPEED_100:
1142 bp->line_speed = SPEED_100;
1143 break;
1144 case BCM5708S_1000X_STAT1_SPEED_1G:
1145 bp->line_speed = SPEED_1000;
1146 break;
1147 case BCM5708S_1000X_STAT1_SPEED_2G5:
1148 bp->line_speed = SPEED_2500;
1149 break;
1151 if (val & BCM5708S_1000X_STAT1_FD)
1152 bp->duplex = DUPLEX_FULL;
1153 else
1154 bp->duplex = DUPLEX_HALF;
1156 return 0;
1159 static int
1160 bnx2_5706s_linkup(struct bnx2 *bp)
1162 u32 bmcr, local_adv, remote_adv, common;
1164 bp->link_up = 1;
1165 bp->line_speed = SPEED_1000;
1167 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1168 if (bmcr & BMCR_FULLDPLX) {
1169 bp->duplex = DUPLEX_FULL;
1171 else {
1172 bp->duplex = DUPLEX_HALF;
1175 if (!(bmcr & BMCR_ANENABLE)) {
1176 return 0;
1179 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1180 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1182 common = local_adv & remote_adv;
1183 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1185 if (common & ADVERTISE_1000XFULL) {
1186 bp->duplex = DUPLEX_FULL;
1188 else {
1189 bp->duplex = DUPLEX_HALF;
1193 return 0;
1196 static int
1197 bnx2_copper_linkup(struct bnx2 *bp)
1199 u32 bmcr;
1201 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1202 if (bmcr & BMCR_ANENABLE) {
1203 u32 local_adv, remote_adv, common;
1205 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1206 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1208 common = local_adv & (remote_adv >> 2);
1209 if (common & ADVERTISE_1000FULL) {
1210 bp->line_speed = SPEED_1000;
1211 bp->duplex = DUPLEX_FULL;
1213 else if (common & ADVERTISE_1000HALF) {
1214 bp->line_speed = SPEED_1000;
1215 bp->duplex = DUPLEX_HALF;
1217 else {
1218 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1219 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1221 common = local_adv & remote_adv;
1222 if (common & ADVERTISE_100FULL) {
1223 bp->line_speed = SPEED_100;
1224 bp->duplex = DUPLEX_FULL;
1226 else if (common & ADVERTISE_100HALF) {
1227 bp->line_speed = SPEED_100;
1228 bp->duplex = DUPLEX_HALF;
1230 else if (common & ADVERTISE_10FULL) {
1231 bp->line_speed = SPEED_10;
1232 bp->duplex = DUPLEX_FULL;
1234 else if (common & ADVERTISE_10HALF) {
1235 bp->line_speed = SPEED_10;
1236 bp->duplex = DUPLEX_HALF;
1238 else {
1239 bp->line_speed = 0;
1240 bp->link_up = 0;
1244 else {
1245 if (bmcr & BMCR_SPEED100) {
1246 bp->line_speed = SPEED_100;
1248 else {
1249 bp->line_speed = SPEED_10;
1251 if (bmcr & BMCR_FULLDPLX) {
1252 bp->duplex = DUPLEX_FULL;
1254 else {
1255 bp->duplex = DUPLEX_HALF;
1259 return 0;
1262 static void
1263 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1265 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1267 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1268 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1269 val |= 0x02 << 8;
1271 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1272 u32 lo_water, hi_water;
1274 if (bp->flow_ctrl & FLOW_CTRL_TX)
1275 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1276 else
1277 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1278 if (lo_water >= bp->rx_ring_size)
1279 lo_water = 0;
1281 hi_water = bp->rx_ring_size / 4;
1283 if (hi_water <= lo_water)
1284 lo_water = 0;
1286 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1287 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1289 if (hi_water > 0xf)
1290 hi_water = 0xf;
1291 else if (hi_water == 0)
1292 lo_water = 0;
1293 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1295 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1298 static void
1299 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1301 int i;
1302 u32 cid;
1304 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1305 if (i == 1)
1306 cid = RX_RSS_CID;
1307 bnx2_init_rx_context(bp, cid);
1311 static void
1312 bnx2_set_mac_link(struct bnx2 *bp)
1314 u32 val;
1316 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1317 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1318 (bp->duplex == DUPLEX_HALF)) {
1319 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1322 /* Configure the EMAC mode register. */
1323 val = REG_RD(bp, BNX2_EMAC_MODE);
1325 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1326 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1327 BNX2_EMAC_MODE_25G_MODE);
1329 if (bp->link_up) {
1330 switch (bp->line_speed) {
1331 case SPEED_10:
1332 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1333 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1334 break;
1336 /* fall through */
1337 case SPEED_100:
1338 val |= BNX2_EMAC_MODE_PORT_MII;
1339 break;
1340 case SPEED_2500:
1341 val |= BNX2_EMAC_MODE_25G_MODE;
1342 /* fall through */
1343 case SPEED_1000:
1344 val |= BNX2_EMAC_MODE_PORT_GMII;
1345 break;
1348 else {
1349 val |= BNX2_EMAC_MODE_PORT_GMII;
1352 /* Set the MAC to operate in the appropriate duplex mode. */
1353 if (bp->duplex == DUPLEX_HALF)
1354 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1355 REG_WR(bp, BNX2_EMAC_MODE, val);
1357 /* Enable/disable rx PAUSE. */
1358 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1360 if (bp->flow_ctrl & FLOW_CTRL_RX)
1361 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1362 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1364 /* Enable/disable tx PAUSE. */
1365 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1366 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1368 if (bp->flow_ctrl & FLOW_CTRL_TX)
1369 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1370 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1372 /* Acknowledge the interrupt. */
1373 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1375 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1376 bnx2_init_all_rx_contexts(bp);
1379 static void
1380 bnx2_enable_bmsr1(struct bnx2 *bp)
1382 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1383 (CHIP_NUM(bp) == CHIP_NUM_5709))
1384 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1385 MII_BNX2_BLK_ADDR_GP_STATUS);
1388 static void
1389 bnx2_disable_bmsr1(struct bnx2 *bp)
1391 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1392 (CHIP_NUM(bp) == CHIP_NUM_5709))
1393 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1394 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1397 static int
1398 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1400 u32 up1;
1401 int ret = 1;
1403 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1404 return 0;
1406 if (bp->autoneg & AUTONEG_SPEED)
1407 bp->advertising |= ADVERTISED_2500baseX_Full;
1409 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1410 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1412 bnx2_read_phy(bp, bp->mii_up1, &up1);
1413 if (!(up1 & BCM5708S_UP1_2G5)) {
1414 up1 |= BCM5708S_UP1_2G5;
1415 bnx2_write_phy(bp, bp->mii_up1, up1);
1416 ret = 0;
1419 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1420 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1421 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1423 return ret;
1426 static int
1427 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1429 u32 up1;
1430 int ret = 0;
1432 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1433 return 0;
1435 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1436 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1438 bnx2_read_phy(bp, bp->mii_up1, &up1);
1439 if (up1 & BCM5708S_UP1_2G5) {
1440 up1 &= ~BCM5708S_UP1_2G5;
1441 bnx2_write_phy(bp, bp->mii_up1, up1);
1442 ret = 1;
1445 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1446 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1447 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1449 return ret;
1452 static void
1453 bnx2_enable_forced_2g5(struct bnx2 *bp)
1455 u32 bmcr;
1457 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1458 return;
1460 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1461 u32 val;
1463 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1464 MII_BNX2_BLK_ADDR_SERDES_DIG);
1465 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1466 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1467 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1468 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1470 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1471 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1472 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1474 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1475 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1476 bmcr |= BCM5708S_BMCR_FORCE_2500;
1477 } else {
1478 return;
1481 if (bp->autoneg & AUTONEG_SPEED) {
1482 bmcr &= ~BMCR_ANENABLE;
1483 if (bp->req_duplex == DUPLEX_FULL)
1484 bmcr |= BMCR_FULLDPLX;
1486 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1489 static void
1490 bnx2_disable_forced_2g5(struct bnx2 *bp)
1492 u32 bmcr;
1494 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1495 return;
1497 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1498 u32 val;
1500 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1501 MII_BNX2_BLK_ADDR_SERDES_DIG);
1502 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1503 val &= ~MII_BNX2_SD_MISC1_FORCE;
1504 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1506 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1507 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1508 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1510 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1511 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1512 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1513 } else {
1514 return;
1517 if (bp->autoneg & AUTONEG_SPEED)
1518 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1519 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1522 static void
1523 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1525 u32 val;
1527 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1528 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1529 if (start)
1530 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1531 else
1532 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1535 static int
1536 bnx2_set_link(struct bnx2 *bp)
1538 u32 bmsr;
1539 u8 link_up;
1541 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1542 bp->link_up = 1;
1543 return 0;
1546 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1547 return 0;
1549 link_up = bp->link_up;
1551 bnx2_enable_bmsr1(bp);
1552 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1553 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1554 bnx2_disable_bmsr1(bp);
1556 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1557 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1558 u32 val, an_dbg;
1560 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1561 bnx2_5706s_force_link_dn(bp, 0);
1562 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1564 val = REG_RD(bp, BNX2_EMAC_STATUS);
1566 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1567 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1568 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1570 if ((val & BNX2_EMAC_STATUS_LINK) &&
1571 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1572 bmsr |= BMSR_LSTATUS;
1573 else
1574 bmsr &= ~BMSR_LSTATUS;
1577 if (bmsr & BMSR_LSTATUS) {
1578 bp->link_up = 1;
1580 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1581 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1582 bnx2_5706s_linkup(bp);
1583 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1584 bnx2_5708s_linkup(bp);
1585 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1586 bnx2_5709s_linkup(bp);
1588 else {
1589 bnx2_copper_linkup(bp);
1591 bnx2_resolve_flow_ctrl(bp);
1593 else {
1594 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1595 (bp->autoneg & AUTONEG_SPEED))
1596 bnx2_disable_forced_2g5(bp);
1598 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1599 u32 bmcr;
1601 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1602 bmcr |= BMCR_ANENABLE;
1603 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1605 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1607 bp->link_up = 0;
1610 if (bp->link_up != link_up) {
1611 bnx2_report_link(bp);
1614 bnx2_set_mac_link(bp);
1616 return 0;
1619 static int
1620 bnx2_reset_phy(struct bnx2 *bp)
1622 int i;
1623 u32 reg;
1625 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1627 #define PHY_RESET_MAX_WAIT 100
1628 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1629 udelay(10);
1631 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1632 if (!(reg & BMCR_RESET)) {
1633 udelay(20);
1634 break;
1637 if (i == PHY_RESET_MAX_WAIT) {
1638 return -EBUSY;
1640 return 0;
1643 static u32
1644 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1646 u32 adv = 0;
1648 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1649 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1651 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1652 adv = ADVERTISE_1000XPAUSE;
1654 else {
1655 adv = ADVERTISE_PAUSE_CAP;
1658 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1659 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1660 adv = ADVERTISE_1000XPSE_ASYM;
1662 else {
1663 adv = ADVERTISE_PAUSE_ASYM;
1666 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1667 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1668 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1670 else {
1671 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1674 return adv;
1677 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1679 static int
1680 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1681 __releases(&bp->phy_lock)
1682 __acquires(&bp->phy_lock)
1684 u32 speed_arg = 0, pause_adv;
1686 pause_adv = bnx2_phy_get_pause_adv(bp);
1688 if (bp->autoneg & AUTONEG_SPEED) {
1689 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1690 if (bp->advertising & ADVERTISED_10baseT_Half)
1691 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1692 if (bp->advertising & ADVERTISED_10baseT_Full)
1693 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1694 if (bp->advertising & ADVERTISED_100baseT_Half)
1695 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1696 if (bp->advertising & ADVERTISED_100baseT_Full)
1697 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1698 if (bp->advertising & ADVERTISED_1000baseT_Full)
1699 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1700 if (bp->advertising & ADVERTISED_2500baseX_Full)
1701 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1702 } else {
1703 if (bp->req_line_speed == SPEED_2500)
1704 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1705 else if (bp->req_line_speed == SPEED_1000)
1706 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1707 else if (bp->req_line_speed == SPEED_100) {
1708 if (bp->req_duplex == DUPLEX_FULL)
1709 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1710 else
1711 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1712 } else if (bp->req_line_speed == SPEED_10) {
1713 if (bp->req_duplex == DUPLEX_FULL)
1714 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1715 else
1716 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1720 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1721 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1722 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1723 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1725 if (port == PORT_TP)
1726 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1727 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1729 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1731 spin_unlock_bh(&bp->phy_lock);
1732 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1733 spin_lock_bh(&bp->phy_lock);
1735 return 0;
1738 static int
1739 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1740 __releases(&bp->phy_lock)
1741 __acquires(&bp->phy_lock)
1743 u32 adv, bmcr;
1744 u32 new_adv = 0;
1746 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1747 return (bnx2_setup_remote_phy(bp, port));
1749 if (!(bp->autoneg & AUTONEG_SPEED)) {
1750 u32 new_bmcr;
1751 int force_link_down = 0;
1753 if (bp->req_line_speed == SPEED_2500) {
1754 if (!bnx2_test_and_enable_2g5(bp))
1755 force_link_down = 1;
1756 } else if (bp->req_line_speed == SPEED_1000) {
1757 if (bnx2_test_and_disable_2g5(bp))
1758 force_link_down = 1;
1760 bnx2_read_phy(bp, bp->mii_adv, &adv);
1761 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1763 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1764 new_bmcr = bmcr & ~BMCR_ANENABLE;
1765 new_bmcr |= BMCR_SPEED1000;
1767 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1768 if (bp->req_line_speed == SPEED_2500)
1769 bnx2_enable_forced_2g5(bp);
1770 else if (bp->req_line_speed == SPEED_1000) {
1771 bnx2_disable_forced_2g5(bp);
1772 new_bmcr &= ~0x2000;
1775 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1776 if (bp->req_line_speed == SPEED_2500)
1777 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1778 else
1779 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1782 if (bp->req_duplex == DUPLEX_FULL) {
1783 adv |= ADVERTISE_1000XFULL;
1784 new_bmcr |= BMCR_FULLDPLX;
1786 else {
1787 adv |= ADVERTISE_1000XHALF;
1788 new_bmcr &= ~BMCR_FULLDPLX;
1790 if ((new_bmcr != bmcr) || (force_link_down)) {
1791 /* Force a link down visible on the other side */
1792 if (bp->link_up) {
1793 bnx2_write_phy(bp, bp->mii_adv, adv &
1794 ~(ADVERTISE_1000XFULL |
1795 ADVERTISE_1000XHALF));
1796 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1797 BMCR_ANRESTART | BMCR_ANENABLE);
1799 bp->link_up = 0;
1800 netif_carrier_off(bp->dev);
1801 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1802 bnx2_report_link(bp);
1804 bnx2_write_phy(bp, bp->mii_adv, adv);
1805 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1806 } else {
1807 bnx2_resolve_flow_ctrl(bp);
1808 bnx2_set_mac_link(bp);
1810 return 0;
1813 bnx2_test_and_enable_2g5(bp);
1815 if (bp->advertising & ADVERTISED_1000baseT_Full)
1816 new_adv |= ADVERTISE_1000XFULL;
1818 new_adv |= bnx2_phy_get_pause_adv(bp);
1820 bnx2_read_phy(bp, bp->mii_adv, &adv);
1821 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1823 bp->serdes_an_pending = 0;
1824 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1825 /* Force a link down visible on the other side */
1826 if (bp->link_up) {
1827 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1828 spin_unlock_bh(&bp->phy_lock);
1829 msleep(20);
1830 spin_lock_bh(&bp->phy_lock);
1833 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1834 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1835 BMCR_ANENABLE);
1836 /* Speed up link-up time when the link partner
1837 * does not autonegotiate which is very common
1838 * in blade servers. Some blade servers use
1839 * IPMI for kerboard input and it's important
1840 * to minimize link disruptions. Autoneg. involves
1841 * exchanging base pages plus 3 next pages and
1842 * normally completes in about 120 msec.
1844 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1845 bp->serdes_an_pending = 1;
1846 mod_timer(&bp->timer, jiffies + bp->current_interval);
1847 } else {
1848 bnx2_resolve_flow_ctrl(bp);
1849 bnx2_set_mac_link(bp);
1852 return 0;
1855 #define ETHTOOL_ALL_FIBRE_SPEED \
1856 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1857 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1858 (ADVERTISED_1000baseT_Full)
1860 #define ETHTOOL_ALL_COPPER_SPEED \
1861 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1862 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1863 ADVERTISED_1000baseT_Full)
1865 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1866 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1868 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1870 static void
1871 bnx2_set_default_remote_link(struct bnx2 *bp)
1873 u32 link;
1875 if (bp->phy_port == PORT_TP)
1876 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1877 else
1878 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1880 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1881 bp->req_line_speed = 0;
1882 bp->autoneg |= AUTONEG_SPEED;
1883 bp->advertising = ADVERTISED_Autoneg;
1884 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1885 bp->advertising |= ADVERTISED_10baseT_Half;
1886 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1887 bp->advertising |= ADVERTISED_10baseT_Full;
1888 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1889 bp->advertising |= ADVERTISED_100baseT_Half;
1890 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1891 bp->advertising |= ADVERTISED_100baseT_Full;
1892 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1893 bp->advertising |= ADVERTISED_1000baseT_Full;
1894 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1895 bp->advertising |= ADVERTISED_2500baseX_Full;
1896 } else {
1897 bp->autoneg = 0;
1898 bp->advertising = 0;
1899 bp->req_duplex = DUPLEX_FULL;
1900 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1901 bp->req_line_speed = SPEED_10;
1902 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1903 bp->req_duplex = DUPLEX_HALF;
1905 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1906 bp->req_line_speed = SPEED_100;
1907 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1908 bp->req_duplex = DUPLEX_HALF;
1910 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1911 bp->req_line_speed = SPEED_1000;
1912 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1913 bp->req_line_speed = SPEED_2500;
1917 static void
1918 bnx2_set_default_link(struct bnx2 *bp)
1920 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1921 bnx2_set_default_remote_link(bp);
1922 return;
1925 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1926 bp->req_line_speed = 0;
1927 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1928 u32 reg;
1930 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1932 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1933 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1934 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1935 bp->autoneg = 0;
1936 bp->req_line_speed = bp->line_speed = SPEED_1000;
1937 bp->req_duplex = DUPLEX_FULL;
1939 } else
1940 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1943 static void
1944 bnx2_send_heart_beat(struct bnx2 *bp)
1946 u32 msg;
1947 u32 addr;
1949 spin_lock(&bp->indirect_lock);
1950 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1951 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1952 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1953 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1954 spin_unlock(&bp->indirect_lock);
1957 static void
1958 bnx2_remote_phy_event(struct bnx2 *bp)
1960 u32 msg;
1961 u8 link_up = bp->link_up;
1962 u8 old_port;
1964 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1966 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1967 bnx2_send_heart_beat(bp);
1969 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1971 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1972 bp->link_up = 0;
1973 else {
1974 u32 speed;
1976 bp->link_up = 1;
1977 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1978 bp->duplex = DUPLEX_FULL;
1979 switch (speed) {
1980 case BNX2_LINK_STATUS_10HALF:
1981 bp->duplex = DUPLEX_HALF;
1982 case BNX2_LINK_STATUS_10FULL:
1983 bp->line_speed = SPEED_10;
1984 break;
1985 case BNX2_LINK_STATUS_100HALF:
1986 bp->duplex = DUPLEX_HALF;
1987 case BNX2_LINK_STATUS_100BASE_T4:
1988 case BNX2_LINK_STATUS_100FULL:
1989 bp->line_speed = SPEED_100;
1990 break;
1991 case BNX2_LINK_STATUS_1000HALF:
1992 bp->duplex = DUPLEX_HALF;
1993 case BNX2_LINK_STATUS_1000FULL:
1994 bp->line_speed = SPEED_1000;
1995 break;
1996 case BNX2_LINK_STATUS_2500HALF:
1997 bp->duplex = DUPLEX_HALF;
1998 case BNX2_LINK_STATUS_2500FULL:
1999 bp->line_speed = SPEED_2500;
2000 break;
2001 default:
2002 bp->line_speed = 0;
2003 break;
2006 bp->flow_ctrl = 0;
2007 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2008 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2009 if (bp->duplex == DUPLEX_FULL)
2010 bp->flow_ctrl = bp->req_flow_ctrl;
2011 } else {
2012 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2013 bp->flow_ctrl |= FLOW_CTRL_TX;
2014 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2015 bp->flow_ctrl |= FLOW_CTRL_RX;
2018 old_port = bp->phy_port;
2019 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2020 bp->phy_port = PORT_FIBRE;
2021 else
2022 bp->phy_port = PORT_TP;
2024 if (old_port != bp->phy_port)
2025 bnx2_set_default_link(bp);
2028 if (bp->link_up != link_up)
2029 bnx2_report_link(bp);
2031 bnx2_set_mac_link(bp);
2034 static int
2035 bnx2_set_remote_link(struct bnx2 *bp)
2037 u32 evt_code;
2039 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2040 switch (evt_code) {
2041 case BNX2_FW_EVT_CODE_LINK_EVENT:
2042 bnx2_remote_phy_event(bp);
2043 break;
2044 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2045 default:
2046 bnx2_send_heart_beat(bp);
2047 break;
2049 return 0;
2052 static int
2053 bnx2_setup_copper_phy(struct bnx2 *bp)
2054 __releases(&bp->phy_lock)
2055 __acquires(&bp->phy_lock)
2057 u32 bmcr;
2058 u32 new_bmcr;
2060 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2062 if (bp->autoneg & AUTONEG_SPEED) {
2063 u32 adv_reg, adv1000_reg;
2064 u32 new_adv_reg = 0;
2065 u32 new_adv1000_reg = 0;
2067 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2068 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2069 ADVERTISE_PAUSE_ASYM);
2071 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2072 adv1000_reg &= PHY_ALL_1000_SPEED;
2074 if (bp->advertising & ADVERTISED_10baseT_Half)
2075 new_adv_reg |= ADVERTISE_10HALF;
2076 if (bp->advertising & ADVERTISED_10baseT_Full)
2077 new_adv_reg |= ADVERTISE_10FULL;
2078 if (bp->advertising & ADVERTISED_100baseT_Half)
2079 new_adv_reg |= ADVERTISE_100HALF;
2080 if (bp->advertising & ADVERTISED_100baseT_Full)
2081 new_adv_reg |= ADVERTISE_100FULL;
2082 if (bp->advertising & ADVERTISED_1000baseT_Full)
2083 new_adv1000_reg |= ADVERTISE_1000FULL;
2085 new_adv_reg |= ADVERTISE_CSMA;
2087 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2089 if ((adv1000_reg != new_adv1000_reg) ||
2090 (adv_reg != new_adv_reg) ||
2091 ((bmcr & BMCR_ANENABLE) == 0)) {
2093 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2094 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2095 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2096 BMCR_ANENABLE);
2098 else if (bp->link_up) {
2099 /* Flow ctrl may have changed from auto to forced */
2100 /* or vice-versa. */
2102 bnx2_resolve_flow_ctrl(bp);
2103 bnx2_set_mac_link(bp);
2105 return 0;
2108 new_bmcr = 0;
2109 if (bp->req_line_speed == SPEED_100) {
2110 new_bmcr |= BMCR_SPEED100;
2112 if (bp->req_duplex == DUPLEX_FULL) {
2113 new_bmcr |= BMCR_FULLDPLX;
2115 if (new_bmcr != bmcr) {
2116 u32 bmsr;
2118 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2119 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2121 if (bmsr & BMSR_LSTATUS) {
2122 /* Force link down */
2123 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2124 spin_unlock_bh(&bp->phy_lock);
2125 msleep(50);
2126 spin_lock_bh(&bp->phy_lock);
2128 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2129 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2132 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2134 /* Normally, the new speed is setup after the link has
2135 * gone down and up again. In some cases, link will not go
2136 * down so we need to set up the new speed here.
2138 if (bmsr & BMSR_LSTATUS) {
2139 bp->line_speed = bp->req_line_speed;
2140 bp->duplex = bp->req_duplex;
2141 bnx2_resolve_flow_ctrl(bp);
2142 bnx2_set_mac_link(bp);
2144 } else {
2145 bnx2_resolve_flow_ctrl(bp);
2146 bnx2_set_mac_link(bp);
2148 return 0;
2151 static int
2152 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2153 __releases(&bp->phy_lock)
2154 __acquires(&bp->phy_lock)
2156 if (bp->loopback == MAC_LOOPBACK)
2157 return 0;
2159 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2160 return (bnx2_setup_serdes_phy(bp, port));
2162 else {
2163 return (bnx2_setup_copper_phy(bp));
2167 static int
2168 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2170 u32 val;
2172 bp->mii_bmcr = MII_BMCR + 0x10;
2173 bp->mii_bmsr = MII_BMSR + 0x10;
2174 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2175 bp->mii_adv = MII_ADVERTISE + 0x10;
2176 bp->mii_lpa = MII_LPA + 0x10;
2177 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2179 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2180 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2182 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2183 if (reset_phy)
2184 bnx2_reset_phy(bp);
2186 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2188 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2189 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2190 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2191 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2193 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2194 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2195 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2196 val |= BCM5708S_UP1_2G5;
2197 else
2198 val &= ~BCM5708S_UP1_2G5;
2199 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2201 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2202 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2203 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2204 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2206 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2208 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2209 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2210 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2212 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2214 return 0;
2217 static int
2218 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2220 u32 val;
2222 if (reset_phy)
2223 bnx2_reset_phy(bp);
2225 bp->mii_up1 = BCM5708S_UP1;
2227 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2228 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2229 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2231 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2232 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2233 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2235 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2236 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2237 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2239 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2240 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2241 val |= BCM5708S_UP1_2G5;
2242 bnx2_write_phy(bp, BCM5708S_UP1, val);
2245 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2246 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2247 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2248 /* increase tx signal amplitude */
2249 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2250 BCM5708S_BLK_ADDR_TX_MISC);
2251 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2252 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2253 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2254 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2257 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2258 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2260 if (val) {
2261 u32 is_backplane;
2263 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2264 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2265 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2266 BCM5708S_BLK_ADDR_TX_MISC);
2267 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2268 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2269 BCM5708S_BLK_ADDR_DIG);
2272 return 0;
2275 static int
2276 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2278 if (reset_phy)
2279 bnx2_reset_phy(bp);
2281 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2283 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2284 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2286 if (bp->dev->mtu > 1500) {
2287 u32 val;
2289 /* Set extended packet length bit */
2290 bnx2_write_phy(bp, 0x18, 0x7);
2291 bnx2_read_phy(bp, 0x18, &val);
2292 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2294 bnx2_write_phy(bp, 0x1c, 0x6c00);
2295 bnx2_read_phy(bp, 0x1c, &val);
2296 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2298 else {
2299 u32 val;
2301 bnx2_write_phy(bp, 0x18, 0x7);
2302 bnx2_read_phy(bp, 0x18, &val);
2303 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2305 bnx2_write_phy(bp, 0x1c, 0x6c00);
2306 bnx2_read_phy(bp, 0x1c, &val);
2307 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2310 return 0;
2313 static int
2314 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2316 u32 val;
2318 if (reset_phy)
2319 bnx2_reset_phy(bp);
2321 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2322 bnx2_write_phy(bp, 0x18, 0x0c00);
2323 bnx2_write_phy(bp, 0x17, 0x000a);
2324 bnx2_write_phy(bp, 0x15, 0x310b);
2325 bnx2_write_phy(bp, 0x17, 0x201f);
2326 bnx2_write_phy(bp, 0x15, 0x9506);
2327 bnx2_write_phy(bp, 0x17, 0x401f);
2328 bnx2_write_phy(bp, 0x15, 0x14e2);
2329 bnx2_write_phy(bp, 0x18, 0x0400);
2332 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2333 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2334 MII_BNX2_DSP_EXPAND_REG | 0x8);
2335 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2336 val &= ~(1 << 8);
2337 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2340 if (bp->dev->mtu > 1500) {
2341 /* Set extended packet length bit */
2342 bnx2_write_phy(bp, 0x18, 0x7);
2343 bnx2_read_phy(bp, 0x18, &val);
2344 bnx2_write_phy(bp, 0x18, val | 0x4000);
2346 bnx2_read_phy(bp, 0x10, &val);
2347 bnx2_write_phy(bp, 0x10, val | 0x1);
2349 else {
2350 bnx2_write_phy(bp, 0x18, 0x7);
2351 bnx2_read_phy(bp, 0x18, &val);
2352 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2354 bnx2_read_phy(bp, 0x10, &val);
2355 bnx2_write_phy(bp, 0x10, val & ~0x1);
2358 /* ethernet@wirespeed */
2359 bnx2_write_phy(bp, 0x18, 0x7007);
2360 bnx2_read_phy(bp, 0x18, &val);
2361 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2362 return 0;
2366 static int
2367 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2368 __releases(&bp->phy_lock)
2369 __acquires(&bp->phy_lock)
2371 u32 val;
2372 int rc = 0;
2374 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2375 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2377 bp->mii_bmcr = MII_BMCR;
2378 bp->mii_bmsr = MII_BMSR;
2379 bp->mii_bmsr1 = MII_BMSR;
2380 bp->mii_adv = MII_ADVERTISE;
2381 bp->mii_lpa = MII_LPA;
2383 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2385 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2386 goto setup_phy;
2388 bnx2_read_phy(bp, MII_PHYSID1, &val);
2389 bp->phy_id = val << 16;
2390 bnx2_read_phy(bp, MII_PHYSID2, &val);
2391 bp->phy_id |= val & 0xffff;
2393 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2394 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2395 rc = bnx2_init_5706s_phy(bp, reset_phy);
2396 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2397 rc = bnx2_init_5708s_phy(bp, reset_phy);
2398 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2399 rc = bnx2_init_5709s_phy(bp, reset_phy);
2401 else {
2402 rc = bnx2_init_copper_phy(bp, reset_phy);
2405 setup_phy:
2406 if (!rc)
2407 rc = bnx2_setup_phy(bp, bp->phy_port);
2409 return rc;
2412 static int
2413 bnx2_set_mac_loopback(struct bnx2 *bp)
2415 u32 mac_mode;
2417 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2418 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2419 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2420 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2421 bp->link_up = 1;
2422 return 0;
2425 static int bnx2_test_link(struct bnx2 *);
2427 static int
2428 bnx2_set_phy_loopback(struct bnx2 *bp)
2430 u32 mac_mode;
2431 int rc, i;
2433 spin_lock_bh(&bp->phy_lock);
2434 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2435 BMCR_SPEED1000);
2436 spin_unlock_bh(&bp->phy_lock);
2437 if (rc)
2438 return rc;
2440 for (i = 0; i < 10; i++) {
2441 if (bnx2_test_link(bp) == 0)
2442 break;
2443 msleep(100);
2446 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2447 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2448 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2449 BNX2_EMAC_MODE_25G_MODE);
2451 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2452 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2453 bp->link_up = 1;
2454 return 0;
2457 static int
2458 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2460 int i;
2461 u32 val;
2463 bp->fw_wr_seq++;
2464 msg_data |= bp->fw_wr_seq;
2466 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2468 if (!ack)
2469 return 0;
2471 /* wait for an acknowledgement. */
2472 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2473 msleep(10);
2475 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2477 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2478 break;
2480 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2481 return 0;
2483 /* If we timed out, inform the firmware that this is the case. */
2484 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2485 if (!silent)
2486 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2487 "%x\n", msg_data);
2489 msg_data &= ~BNX2_DRV_MSG_CODE;
2490 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2492 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2494 return -EBUSY;
2497 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2498 return -EIO;
2500 return 0;
2503 static int
2504 bnx2_init_5709_context(struct bnx2 *bp)
2506 int i, ret = 0;
2507 u32 val;
2509 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2510 val |= (BCM_PAGE_BITS - 8) << 16;
2511 REG_WR(bp, BNX2_CTX_COMMAND, val);
2512 for (i = 0; i < 10; i++) {
2513 val = REG_RD(bp, BNX2_CTX_COMMAND);
2514 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2515 break;
2516 udelay(2);
2518 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2519 return -EBUSY;
2521 for (i = 0; i < bp->ctx_pages; i++) {
2522 int j;
2524 if (bp->ctx_blk[i])
2525 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2526 else
2527 return -ENOMEM;
2529 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2530 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2531 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2532 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2533 (u64) bp->ctx_blk_mapping[i] >> 32);
2534 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2535 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2536 for (j = 0; j < 10; j++) {
2538 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2539 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2540 break;
2541 udelay(5);
2543 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2544 ret = -EBUSY;
2545 break;
2548 return ret;
2551 static void
2552 bnx2_init_context(struct bnx2 *bp)
2554 u32 vcid;
2556 vcid = 96;
2557 while (vcid) {
2558 u32 vcid_addr, pcid_addr, offset;
2559 int i;
2561 vcid--;
2563 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2564 u32 new_vcid;
2566 vcid_addr = GET_PCID_ADDR(vcid);
2567 if (vcid & 0x8) {
2568 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2570 else {
2571 new_vcid = vcid;
2573 pcid_addr = GET_PCID_ADDR(new_vcid);
2575 else {
2576 vcid_addr = GET_CID_ADDR(vcid);
2577 pcid_addr = vcid_addr;
2580 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2581 vcid_addr += (i << PHY_CTX_SHIFT);
2582 pcid_addr += (i << PHY_CTX_SHIFT);
2584 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2585 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2587 /* Zero out the context. */
2588 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2589 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2594 static int
2595 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2597 u16 *good_mbuf;
2598 u32 good_mbuf_cnt;
2599 u32 val;
2601 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2602 if (good_mbuf == NULL) {
2603 printk(KERN_ERR PFX "Failed to allocate memory in "
2604 "bnx2_alloc_bad_rbuf\n");
2605 return -ENOMEM;
2608 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2609 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2611 good_mbuf_cnt = 0;
2613 /* Allocate a bunch of mbufs and save the good ones in an array. */
2614 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2615 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2616 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2617 BNX2_RBUF_COMMAND_ALLOC_REQ);
2619 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2621 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2623 /* The addresses with Bit 9 set are bad memory blocks. */
2624 if (!(val & (1 << 9))) {
2625 good_mbuf[good_mbuf_cnt] = (u16) val;
2626 good_mbuf_cnt++;
2629 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2632 /* Free the good ones back to the mbuf pool thus discarding
2633 * all the bad ones. */
2634 while (good_mbuf_cnt) {
2635 good_mbuf_cnt--;
2637 val = good_mbuf[good_mbuf_cnt];
2638 val = (val << 9) | val | 1;
2640 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2642 kfree(good_mbuf);
2643 return 0;
2646 static void
2647 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2649 u32 val;
2651 val = (mac_addr[0] << 8) | mac_addr[1];
2653 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2655 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2656 (mac_addr[4] << 8) | mac_addr[5];
2658 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2661 static inline int
2662 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2664 dma_addr_t mapping;
2665 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2666 struct rx_bd *rxbd =
2667 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2668 struct page *page = alloc_page(GFP_ATOMIC);
2670 if (!page)
2671 return -ENOMEM;
2672 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2673 PCI_DMA_FROMDEVICE);
2674 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2675 __free_page(page);
2676 return -EIO;
2679 rx_pg->page = page;
2680 pci_unmap_addr_set(rx_pg, mapping, mapping);
2681 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2682 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2683 return 0;
2686 static void
2687 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2689 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2690 struct page *page = rx_pg->page;
2692 if (!page)
2693 return;
2695 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2696 PCI_DMA_FROMDEVICE);
2698 __free_page(page);
2699 rx_pg->page = NULL;
2702 static inline int
2703 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2705 struct sk_buff *skb;
2706 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2707 dma_addr_t mapping;
2708 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2709 unsigned long align;
2711 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2712 if (skb == NULL) {
2713 return -ENOMEM;
2716 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2717 skb_reserve(skb, BNX2_RX_ALIGN - align);
2719 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2720 PCI_DMA_FROMDEVICE);
2721 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2722 dev_kfree_skb(skb);
2723 return -EIO;
2726 rx_buf->skb = skb;
2727 pci_unmap_addr_set(rx_buf, mapping, mapping);
2729 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2730 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2732 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2734 return 0;
2737 static int
2738 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2740 struct status_block *sblk = bnapi->status_blk.msi;
2741 u32 new_link_state, old_link_state;
2742 int is_set = 1;
2744 new_link_state = sblk->status_attn_bits & event;
2745 old_link_state = sblk->status_attn_bits_ack & event;
2746 if (new_link_state != old_link_state) {
2747 if (new_link_state)
2748 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2749 else
2750 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2751 } else
2752 is_set = 0;
2754 return is_set;
2757 static void
2758 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2760 spin_lock(&bp->phy_lock);
2762 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2763 bnx2_set_link(bp);
2764 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2765 bnx2_set_remote_link(bp);
2767 spin_unlock(&bp->phy_lock);
2771 static inline u16
2772 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2774 u16 cons;
2776 /* Tell compiler that status block fields can change. */
2777 barrier();
2778 cons = *bnapi->hw_tx_cons_ptr;
2779 barrier();
2780 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2781 cons++;
2782 return cons;
2785 static int
2786 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2788 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2789 u16 hw_cons, sw_cons, sw_ring_cons;
2790 int tx_pkt = 0, index;
2791 struct netdev_queue *txq;
2793 index = (bnapi - bp->bnx2_napi);
2794 txq = netdev_get_tx_queue(bp->dev, index);
2796 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2797 sw_cons = txr->tx_cons;
2799 while (sw_cons != hw_cons) {
2800 struct sw_tx_bd *tx_buf;
2801 struct sk_buff *skb;
2802 int i, last;
2804 sw_ring_cons = TX_RING_IDX(sw_cons);
2806 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2807 skb = tx_buf->skb;
2809 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2810 prefetch(&skb->end);
2812 /* partial BD completions possible with TSO packets */
2813 if (tx_buf->is_gso) {
2814 u16 last_idx, last_ring_idx;
2816 last_idx = sw_cons + tx_buf->nr_frags + 1;
2817 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2818 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2819 last_idx++;
2821 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2822 break;
2826 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2827 skb_headlen(skb), PCI_DMA_TODEVICE);
2829 tx_buf->skb = NULL;
2830 last = tx_buf->nr_frags;
2832 for (i = 0; i < last; i++) {
2833 sw_cons = NEXT_TX_BD(sw_cons);
2835 pci_unmap_page(bp->pdev,
2836 pci_unmap_addr(
2837 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2838 mapping),
2839 skb_shinfo(skb)->frags[i].size,
2840 PCI_DMA_TODEVICE);
2843 sw_cons = NEXT_TX_BD(sw_cons);
2845 dev_kfree_skb(skb);
2846 tx_pkt++;
2847 if (tx_pkt == budget)
2848 break;
2850 if (hw_cons == sw_cons)
2851 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2854 txr->hw_tx_cons = hw_cons;
2855 txr->tx_cons = sw_cons;
2857 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2858 * before checking for netif_tx_queue_stopped(). Without the
2859 * memory barrier, there is a small possibility that bnx2_start_xmit()
2860 * will miss it and cause the queue to be stopped forever.
2862 smp_mb();
2864 if (unlikely(netif_tx_queue_stopped(txq)) &&
2865 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2866 __netif_tx_lock(txq, smp_processor_id());
2867 if ((netif_tx_queue_stopped(txq)) &&
2868 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2869 netif_tx_wake_queue(txq);
2870 __netif_tx_unlock(txq);
2873 return tx_pkt;
2876 static void
2877 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2878 struct sk_buff *skb, int count)
2880 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2881 struct rx_bd *cons_bd, *prod_bd;
2882 int i;
2883 u16 hw_prod, prod;
2884 u16 cons = rxr->rx_pg_cons;
2886 cons_rx_pg = &rxr->rx_pg_ring[cons];
2888 /* The caller was unable to allocate a new page to replace the
2889 * last one in the frags array, so we need to recycle that page
2890 * and then free the skb.
2892 if (skb) {
2893 struct page *page;
2894 struct skb_shared_info *shinfo;
2896 shinfo = skb_shinfo(skb);
2897 shinfo->nr_frags--;
2898 page = shinfo->frags[shinfo->nr_frags].page;
2899 shinfo->frags[shinfo->nr_frags].page = NULL;
2901 cons_rx_pg->page = page;
2902 dev_kfree_skb(skb);
2905 hw_prod = rxr->rx_pg_prod;
2907 for (i = 0; i < count; i++) {
2908 prod = RX_PG_RING_IDX(hw_prod);
2910 prod_rx_pg = &rxr->rx_pg_ring[prod];
2911 cons_rx_pg = &rxr->rx_pg_ring[cons];
2912 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2913 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2915 if (prod != cons) {
2916 prod_rx_pg->page = cons_rx_pg->page;
2917 cons_rx_pg->page = NULL;
2918 pci_unmap_addr_set(prod_rx_pg, mapping,
2919 pci_unmap_addr(cons_rx_pg, mapping));
2921 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2922 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2925 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2926 hw_prod = NEXT_RX_BD(hw_prod);
2928 rxr->rx_pg_prod = hw_prod;
2929 rxr->rx_pg_cons = cons;
2932 static inline void
2933 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2934 struct sk_buff *skb, u16 cons, u16 prod)
2936 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2937 struct rx_bd *cons_bd, *prod_bd;
2939 cons_rx_buf = &rxr->rx_buf_ring[cons];
2940 prod_rx_buf = &rxr->rx_buf_ring[prod];
2942 pci_dma_sync_single_for_device(bp->pdev,
2943 pci_unmap_addr(cons_rx_buf, mapping),
2944 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2946 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2948 prod_rx_buf->skb = skb;
2950 if (cons == prod)
2951 return;
2953 pci_unmap_addr_set(prod_rx_buf, mapping,
2954 pci_unmap_addr(cons_rx_buf, mapping));
2956 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2957 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2958 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2959 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2962 static int
2963 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2964 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2965 u32 ring_idx)
2967 int err;
2968 u16 prod = ring_idx & 0xffff;
2970 err = bnx2_alloc_rx_skb(bp, rxr, prod);
2971 if (unlikely(err)) {
2972 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2973 if (hdr_len) {
2974 unsigned int raw_len = len + 4;
2975 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2977 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2979 return err;
2982 skb_reserve(skb, BNX2_RX_OFFSET);
2983 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2984 PCI_DMA_FROMDEVICE);
2986 if (hdr_len == 0) {
2987 skb_put(skb, len);
2988 return 0;
2989 } else {
2990 unsigned int i, frag_len, frag_size, pages;
2991 struct sw_pg *rx_pg;
2992 u16 pg_cons = rxr->rx_pg_cons;
2993 u16 pg_prod = rxr->rx_pg_prod;
2995 frag_size = len + 4 - hdr_len;
2996 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2997 skb_put(skb, hdr_len);
2999 for (i = 0; i < pages; i++) {
3000 dma_addr_t mapping_old;
3002 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3003 if (unlikely(frag_len <= 4)) {
3004 unsigned int tail = 4 - frag_len;
3006 rxr->rx_pg_cons = pg_cons;
3007 rxr->rx_pg_prod = pg_prod;
3008 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3009 pages - i);
3010 skb->len -= tail;
3011 if (i == 0) {
3012 skb->tail -= tail;
3013 } else {
3014 skb_frag_t *frag =
3015 &skb_shinfo(skb)->frags[i - 1];
3016 frag->size -= tail;
3017 skb->data_len -= tail;
3018 skb->truesize -= tail;
3020 return 0;
3022 rx_pg = &rxr->rx_pg_ring[pg_cons];
3024 /* Don't unmap yet. If we're unable to allocate a new
3025 * page, we need to recycle the page and the DMA addr.
3027 mapping_old = pci_unmap_addr(rx_pg, mapping);
3028 if (i == pages - 1)
3029 frag_len -= 4;
3031 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3032 rx_pg->page = NULL;
3034 err = bnx2_alloc_rx_page(bp, rxr,
3035 RX_PG_RING_IDX(pg_prod));
3036 if (unlikely(err)) {
3037 rxr->rx_pg_cons = pg_cons;
3038 rxr->rx_pg_prod = pg_prod;
3039 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3040 pages - i);
3041 return err;
3044 pci_unmap_page(bp->pdev, mapping_old,
3045 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3047 frag_size -= frag_len;
3048 skb->data_len += frag_len;
3049 skb->truesize += frag_len;
3050 skb->len += frag_len;
3052 pg_prod = NEXT_RX_BD(pg_prod);
3053 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3055 rxr->rx_pg_prod = pg_prod;
3056 rxr->rx_pg_cons = pg_cons;
3058 return 0;
3061 static inline u16
3062 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3064 u16 cons;
3066 /* Tell compiler that status block fields can change. */
3067 barrier();
3068 cons = *bnapi->hw_rx_cons_ptr;
3069 barrier();
3070 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3071 cons++;
3072 return cons;
3075 static int
3076 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3078 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3079 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3080 struct l2_fhdr *rx_hdr;
3081 int rx_pkt = 0, pg_ring_used = 0;
3083 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3084 sw_cons = rxr->rx_cons;
3085 sw_prod = rxr->rx_prod;
3087 /* Memory barrier necessary as speculative reads of the rx
3088 * buffer can be ahead of the index in the status block
3090 rmb();
3091 while (sw_cons != hw_cons) {
3092 unsigned int len, hdr_len;
3093 u32 status;
3094 struct sw_bd *rx_buf;
3095 struct sk_buff *skb;
3096 dma_addr_t dma_addr;
3097 u16 vtag = 0;
3098 int hw_vlan __maybe_unused = 0;
3100 sw_ring_cons = RX_RING_IDX(sw_cons);
3101 sw_ring_prod = RX_RING_IDX(sw_prod);
3103 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3104 skb = rx_buf->skb;
3106 rx_buf->skb = NULL;
3108 dma_addr = pci_unmap_addr(rx_buf, mapping);
3110 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3111 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3112 PCI_DMA_FROMDEVICE);
3114 rx_hdr = (struct l2_fhdr *) skb->data;
3115 len = rx_hdr->l2_fhdr_pkt_len;
3116 status = rx_hdr->l2_fhdr_status;
3118 hdr_len = 0;
3119 if (status & L2_FHDR_STATUS_SPLIT) {
3120 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3121 pg_ring_used = 1;
3122 } else if (len > bp->rx_jumbo_thresh) {
3123 hdr_len = bp->rx_jumbo_thresh;
3124 pg_ring_used = 1;
3127 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3128 L2_FHDR_ERRORS_PHY_DECODE |
3129 L2_FHDR_ERRORS_ALIGNMENT |
3130 L2_FHDR_ERRORS_TOO_SHORT |
3131 L2_FHDR_ERRORS_GIANT_FRAME))) {
3133 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3134 sw_ring_prod);
3135 if (pg_ring_used) {
3136 int pages;
3138 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3140 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3142 goto next_rx;
3145 len -= 4;
3147 if (len <= bp->rx_copy_thresh) {
3148 struct sk_buff *new_skb;
3150 new_skb = netdev_alloc_skb(bp->dev, len + 6);
3151 if (new_skb == NULL) {
3152 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3153 sw_ring_prod);
3154 goto next_rx;
3157 /* aligned copy */
3158 skb_copy_from_linear_data_offset(skb,
3159 BNX2_RX_OFFSET - 6,
3160 new_skb->data, len + 6);
3161 skb_reserve(new_skb, 6);
3162 skb_put(new_skb, len);
3164 bnx2_reuse_rx_skb(bp, rxr, skb,
3165 sw_ring_cons, sw_ring_prod);
3167 skb = new_skb;
3168 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3169 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3170 goto next_rx;
3172 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3173 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3174 vtag = rx_hdr->l2_fhdr_vlan_tag;
3175 #ifdef BCM_VLAN
3176 if (bp->vlgrp)
3177 hw_vlan = 1;
3178 else
3179 #endif
3181 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3182 __skb_push(skb, 4);
3184 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3185 ve->h_vlan_proto = htons(ETH_P_8021Q);
3186 ve->h_vlan_TCI = htons(vtag);
3187 len += 4;
3191 skb->protocol = eth_type_trans(skb, bp->dev);
3193 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3194 (ntohs(skb->protocol) != 0x8100)) {
3196 dev_kfree_skb(skb);
3197 goto next_rx;
3201 skb->ip_summed = CHECKSUM_NONE;
3202 if (bp->rx_csum &&
3203 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3204 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3206 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3207 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3208 skb->ip_summed = CHECKSUM_UNNECESSARY;
3211 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3213 #ifdef BCM_VLAN
3214 if (hw_vlan)
3215 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3216 else
3217 #endif
3218 netif_receive_skb(skb);
3220 rx_pkt++;
3222 next_rx:
3223 sw_cons = NEXT_RX_BD(sw_cons);
3224 sw_prod = NEXT_RX_BD(sw_prod);
3226 if ((rx_pkt == budget))
3227 break;
3229 /* Refresh hw_cons to see if there is new work */
3230 if (sw_cons == hw_cons) {
3231 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3232 rmb();
3235 rxr->rx_cons = sw_cons;
3236 rxr->rx_prod = sw_prod;
3238 if (pg_ring_used)
3239 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3241 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3243 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3245 mmiowb();
3247 return rx_pkt;
3251 /* MSI ISR - The only difference between this and the INTx ISR
3252 * is that the MSI interrupt is always serviced.
3254 static irqreturn_t
3255 bnx2_msi(int irq, void *dev_instance)
3257 struct bnx2_napi *bnapi = dev_instance;
3258 struct bnx2 *bp = bnapi->bp;
3260 prefetch(bnapi->status_blk.msi);
3261 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3262 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3263 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3265 /* Return here if interrupt is disabled. */
3266 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3267 return IRQ_HANDLED;
3269 napi_schedule(&bnapi->napi);
3271 return IRQ_HANDLED;
3274 static irqreturn_t
3275 bnx2_msi_1shot(int irq, void *dev_instance)
3277 struct bnx2_napi *bnapi = dev_instance;
3278 struct bnx2 *bp = bnapi->bp;
3280 prefetch(bnapi->status_blk.msi);
3282 /* Return here if interrupt is disabled. */
3283 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3284 return IRQ_HANDLED;
3286 napi_schedule(&bnapi->napi);
3288 return IRQ_HANDLED;
3291 static irqreturn_t
3292 bnx2_interrupt(int irq, void *dev_instance)
3294 struct bnx2_napi *bnapi = dev_instance;
3295 struct bnx2 *bp = bnapi->bp;
3296 struct status_block *sblk = bnapi->status_blk.msi;
3298 /* When using INTx, it is possible for the interrupt to arrive
3299 * at the CPU before the status block posted prior to the
3300 * interrupt. Reading a register will flush the status block.
3301 * When using MSI, the MSI message will always complete after
3302 * the status block write.
3304 if ((sblk->status_idx == bnapi->last_status_idx) &&
3305 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3306 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3307 return IRQ_NONE;
3309 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3310 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3311 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3313 /* Read back to deassert IRQ immediately to avoid too many
3314 * spurious interrupts.
3316 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3318 /* Return here if interrupt is shared and is disabled. */
3319 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3320 return IRQ_HANDLED;
3322 if (napi_schedule_prep(&bnapi->napi)) {
3323 bnapi->last_status_idx = sblk->status_idx;
3324 __napi_schedule(&bnapi->napi);
3327 return IRQ_HANDLED;
3330 static inline int
3331 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3333 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3334 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3336 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3337 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3338 return 1;
3339 return 0;
3342 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3343 STATUS_ATTN_BITS_TIMER_ABORT)
3345 static inline int
3346 bnx2_has_work(struct bnx2_napi *bnapi)
3348 struct status_block *sblk = bnapi->status_blk.msi;
3350 if (bnx2_has_fast_work(bnapi))
3351 return 1;
3353 #ifdef BCM_CNIC
3354 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3355 return 1;
3356 #endif
3358 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3359 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3360 return 1;
3362 return 0;
3365 static void
3366 bnx2_chk_missed_msi(struct bnx2 *bp)
3368 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3369 u32 msi_ctrl;
3371 if (bnx2_has_work(bnapi)) {
3372 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3373 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3374 return;
3376 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3377 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3378 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3379 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3380 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3384 bp->idle_chk_status_idx = bnapi->last_status_idx;
3387 #ifdef BCM_CNIC
3388 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3390 struct cnic_ops *c_ops;
3392 if (!bnapi->cnic_present)
3393 return;
3395 rcu_read_lock();
3396 c_ops = rcu_dereference(bp->cnic_ops);
3397 if (c_ops)
3398 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3399 bnapi->status_blk.msi);
3400 rcu_read_unlock();
3402 #endif
3404 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3406 struct status_block *sblk = bnapi->status_blk.msi;
3407 u32 status_attn_bits = sblk->status_attn_bits;
3408 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3410 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3411 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3413 bnx2_phy_int(bp, bnapi);
3415 /* This is needed to take care of transient status
3416 * during link changes.
3418 REG_WR(bp, BNX2_HC_COMMAND,
3419 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3420 REG_RD(bp, BNX2_HC_COMMAND);
3424 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3425 int work_done, int budget)
3427 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3428 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3430 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3431 bnx2_tx_int(bp, bnapi, 0);
3433 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3434 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3436 return work_done;
3439 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3441 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3442 struct bnx2 *bp = bnapi->bp;
3443 int work_done = 0;
3444 struct status_block_msix *sblk = bnapi->status_blk.msix;
3446 while (1) {
3447 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3448 if (unlikely(work_done >= budget))
3449 break;
3451 bnapi->last_status_idx = sblk->status_idx;
3452 /* status idx must be read before checking for more work. */
3453 rmb();
3454 if (likely(!bnx2_has_fast_work(bnapi))) {
3456 napi_complete(napi);
3457 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3458 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3459 bnapi->last_status_idx);
3460 break;
3463 return work_done;
3466 static int bnx2_poll(struct napi_struct *napi, int budget)
3468 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3469 struct bnx2 *bp = bnapi->bp;
3470 int work_done = 0;
3471 struct status_block *sblk = bnapi->status_blk.msi;
3473 while (1) {
3474 bnx2_poll_link(bp, bnapi);
3476 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3478 #ifdef BCM_CNIC
3479 bnx2_poll_cnic(bp, bnapi);
3480 #endif
3482 /* bnapi->last_status_idx is used below to tell the hw how
3483 * much work has been processed, so we must read it before
3484 * checking for more work.
3486 bnapi->last_status_idx = sblk->status_idx;
3488 if (unlikely(work_done >= budget))
3489 break;
3491 rmb();
3492 if (likely(!bnx2_has_work(bnapi))) {
3493 napi_complete(napi);
3494 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3495 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3496 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3497 bnapi->last_status_idx);
3498 break;
3500 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3501 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3502 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3503 bnapi->last_status_idx);
3505 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3506 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3507 bnapi->last_status_idx);
3508 break;
3512 return work_done;
3515 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3516 * from set_multicast.
3518 static void
3519 bnx2_set_rx_mode(struct net_device *dev)
3521 struct bnx2 *bp = netdev_priv(dev);
3522 u32 rx_mode, sort_mode;
3523 struct netdev_hw_addr *ha;
3524 int i;
3526 if (!netif_running(dev))
3527 return;
3529 spin_lock_bh(&bp->phy_lock);
3531 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3532 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3533 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3534 #ifdef BCM_VLAN
3535 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3536 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3537 #else
3538 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3539 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3540 #endif
3541 if (dev->flags & IFF_PROMISC) {
3542 /* Promiscuous mode. */
3543 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3544 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3545 BNX2_RPM_SORT_USER0_PROM_VLAN;
3547 else if (dev->flags & IFF_ALLMULTI) {
3548 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3549 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3550 0xffffffff);
3552 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3554 else {
3555 /* Accept one or more multicast(s). */
3556 struct dev_mc_list *mclist;
3557 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3558 u32 regidx;
3559 u32 bit;
3560 u32 crc;
3562 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3564 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3565 i++, mclist = mclist->next) {
3567 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3568 bit = crc & 0xff;
3569 regidx = (bit & 0xe0) >> 5;
3570 bit &= 0x1f;
3571 mc_filter[regidx] |= (1 << bit);
3574 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3575 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3576 mc_filter[i]);
3579 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3582 if (dev->uc.count > BNX2_MAX_UNICAST_ADDRESSES) {
3583 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3584 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3585 BNX2_RPM_SORT_USER0_PROM_VLAN;
3586 } else if (!(dev->flags & IFF_PROMISC)) {
3587 /* Add all entries into to the match filter list */
3588 i = 0;
3589 list_for_each_entry(ha, &dev->uc.list, list) {
3590 bnx2_set_mac_addr(bp, ha->addr,
3591 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3592 sort_mode |= (1 <<
3593 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3594 i++;
3599 if (rx_mode != bp->rx_mode) {
3600 bp->rx_mode = rx_mode;
3601 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3604 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3605 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3606 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3608 spin_unlock_bh(&bp->phy_lock);
3611 static int __devinit
3612 check_fw_section(const struct firmware *fw,
3613 const struct bnx2_fw_file_section *section,
3614 u32 alignment, bool non_empty)
3616 u32 offset = be32_to_cpu(section->offset);
3617 u32 len = be32_to_cpu(section->len);
3619 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3620 return -EINVAL;
3621 if ((non_empty && len == 0) || len > fw->size - offset ||
3622 len & (alignment - 1))
3623 return -EINVAL;
3624 return 0;
3627 static int __devinit
3628 check_mips_fw_entry(const struct firmware *fw,
3629 const struct bnx2_mips_fw_file_entry *entry)
3631 if (check_fw_section(fw, &entry->text, 4, true) ||
3632 check_fw_section(fw, &entry->data, 4, false) ||
3633 check_fw_section(fw, &entry->rodata, 4, false))
3634 return -EINVAL;
3635 return 0;
3638 static int __devinit
3639 bnx2_request_firmware(struct bnx2 *bp)
3641 const char *mips_fw_file, *rv2p_fw_file;
3642 const struct bnx2_mips_fw_file *mips_fw;
3643 const struct bnx2_rv2p_fw_file *rv2p_fw;
3644 int rc;
3646 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3647 mips_fw_file = FW_MIPS_FILE_09;
3648 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3649 (CHIP_ID(bp) == CHIP_ID_5709_A1))
3650 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3651 else
3652 rv2p_fw_file = FW_RV2P_FILE_09;
3653 } else {
3654 mips_fw_file = FW_MIPS_FILE_06;
3655 rv2p_fw_file = FW_RV2P_FILE_06;
3658 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3659 if (rc) {
3660 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3661 mips_fw_file);
3662 return rc;
3665 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3666 if (rc) {
3667 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3668 rv2p_fw_file);
3669 return rc;
3671 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3672 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3673 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3674 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3675 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3676 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3677 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3678 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3679 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3680 mips_fw_file);
3681 return -EINVAL;
3683 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3684 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3685 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3686 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3687 rv2p_fw_file);
3688 return -EINVAL;
3691 return 0;
3694 static u32
3695 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3697 switch (idx) {
3698 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3699 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3700 rv2p_code |= RV2P_BD_PAGE_SIZE;
3701 break;
3703 return rv2p_code;
3706 static int
3707 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3708 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3710 u32 rv2p_code_len, file_offset;
3711 __be32 *rv2p_code;
3712 int i;
3713 u32 val, cmd, addr;
3715 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3716 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3718 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3720 if (rv2p_proc == RV2P_PROC1) {
3721 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3722 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3723 } else {
3724 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3725 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3728 for (i = 0; i < rv2p_code_len; i += 8) {
3729 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3730 rv2p_code++;
3731 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3732 rv2p_code++;
3734 val = (i / 8) | cmd;
3735 REG_WR(bp, addr, val);
3738 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3739 for (i = 0; i < 8; i++) {
3740 u32 loc, code;
3742 loc = be32_to_cpu(fw_entry->fixup[i]);
3743 if (loc && ((loc * 4) < rv2p_code_len)) {
3744 code = be32_to_cpu(*(rv2p_code + loc - 1));
3745 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3746 code = be32_to_cpu(*(rv2p_code + loc));
3747 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3748 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3750 val = (loc / 2) | cmd;
3751 REG_WR(bp, addr, val);
3755 /* Reset the processor, un-stall is done later. */
3756 if (rv2p_proc == RV2P_PROC1) {
3757 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3759 else {
3760 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3763 return 0;
3766 static int
3767 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3768 const struct bnx2_mips_fw_file_entry *fw_entry)
3770 u32 addr, len, file_offset;
3771 __be32 *data;
3772 u32 offset;
3773 u32 val;
3775 /* Halt the CPU. */
3776 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3777 val |= cpu_reg->mode_value_halt;
3778 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3779 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3781 /* Load the Text area. */
3782 addr = be32_to_cpu(fw_entry->text.addr);
3783 len = be32_to_cpu(fw_entry->text.len);
3784 file_offset = be32_to_cpu(fw_entry->text.offset);
3785 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3787 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3788 if (len) {
3789 int j;
3791 for (j = 0; j < (len / 4); j++, offset += 4)
3792 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3795 /* Load the Data area. */
3796 addr = be32_to_cpu(fw_entry->data.addr);
3797 len = be32_to_cpu(fw_entry->data.len);
3798 file_offset = be32_to_cpu(fw_entry->data.offset);
3799 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3801 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3802 if (len) {
3803 int j;
3805 for (j = 0; j < (len / 4); j++, offset += 4)
3806 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3809 /* Load the Read-Only area. */
3810 addr = be32_to_cpu(fw_entry->rodata.addr);
3811 len = be32_to_cpu(fw_entry->rodata.len);
3812 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3813 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3815 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3816 if (len) {
3817 int j;
3819 for (j = 0; j < (len / 4); j++, offset += 4)
3820 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3823 /* Clear the pre-fetch instruction. */
3824 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3826 val = be32_to_cpu(fw_entry->start_addr);
3827 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3829 /* Start the CPU. */
3830 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3831 val &= ~cpu_reg->mode_value_halt;
3832 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3833 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3835 return 0;
3838 static int
3839 bnx2_init_cpus(struct bnx2 *bp)
3841 const struct bnx2_mips_fw_file *mips_fw =
3842 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3843 const struct bnx2_rv2p_fw_file *rv2p_fw =
3844 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3845 int rc;
3847 /* Initialize the RV2P processor. */
3848 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3849 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3851 /* Initialize the RX Processor. */
3852 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3853 if (rc)
3854 goto init_cpu_err;
3856 /* Initialize the TX Processor. */
3857 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3858 if (rc)
3859 goto init_cpu_err;
3861 /* Initialize the TX Patch-up Processor. */
3862 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3863 if (rc)
3864 goto init_cpu_err;
3866 /* Initialize the Completion Processor. */
3867 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3868 if (rc)
3869 goto init_cpu_err;
3871 /* Initialize the Command Processor. */
3872 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3874 init_cpu_err:
3875 return rc;
3878 static int
3879 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3881 u16 pmcsr;
3883 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3885 switch (state) {
3886 case PCI_D0: {
3887 u32 val;
3889 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3890 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3891 PCI_PM_CTRL_PME_STATUS);
3893 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3894 /* delay required during transition out of D3hot */
3895 msleep(20);
3897 val = REG_RD(bp, BNX2_EMAC_MODE);
3898 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3899 val &= ~BNX2_EMAC_MODE_MPKT;
3900 REG_WR(bp, BNX2_EMAC_MODE, val);
3902 val = REG_RD(bp, BNX2_RPM_CONFIG);
3903 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3904 REG_WR(bp, BNX2_RPM_CONFIG, val);
3905 break;
3907 case PCI_D3hot: {
3908 int i;
3909 u32 val, wol_msg;
3911 if (bp->wol) {
3912 u32 advertising;
3913 u8 autoneg;
3915 autoneg = bp->autoneg;
3916 advertising = bp->advertising;
3918 if (bp->phy_port == PORT_TP) {
3919 bp->autoneg = AUTONEG_SPEED;
3920 bp->advertising = ADVERTISED_10baseT_Half |
3921 ADVERTISED_10baseT_Full |
3922 ADVERTISED_100baseT_Half |
3923 ADVERTISED_100baseT_Full |
3924 ADVERTISED_Autoneg;
3927 spin_lock_bh(&bp->phy_lock);
3928 bnx2_setup_phy(bp, bp->phy_port);
3929 spin_unlock_bh(&bp->phy_lock);
3931 bp->autoneg = autoneg;
3932 bp->advertising = advertising;
3934 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3936 val = REG_RD(bp, BNX2_EMAC_MODE);
3938 /* Enable port mode. */
3939 val &= ~BNX2_EMAC_MODE_PORT;
3940 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3941 BNX2_EMAC_MODE_ACPI_RCVD |
3942 BNX2_EMAC_MODE_MPKT;
3943 if (bp->phy_port == PORT_TP)
3944 val |= BNX2_EMAC_MODE_PORT_MII;
3945 else {
3946 val |= BNX2_EMAC_MODE_PORT_GMII;
3947 if (bp->line_speed == SPEED_2500)
3948 val |= BNX2_EMAC_MODE_25G_MODE;
3951 REG_WR(bp, BNX2_EMAC_MODE, val);
3953 /* receive all multicast */
3954 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3955 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3956 0xffffffff);
3958 REG_WR(bp, BNX2_EMAC_RX_MODE,
3959 BNX2_EMAC_RX_MODE_SORT_MODE);
3961 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3962 BNX2_RPM_SORT_USER0_MC_EN;
3963 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3964 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3965 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3966 BNX2_RPM_SORT_USER0_ENA);
3968 /* Need to enable EMAC and RPM for WOL. */
3969 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3970 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3971 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3972 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3974 val = REG_RD(bp, BNX2_RPM_CONFIG);
3975 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3976 REG_WR(bp, BNX2_RPM_CONFIG, val);
3978 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3980 else {
3981 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3984 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3985 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3986 1, 0);
3988 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3989 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3990 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3992 if (bp->wol)
3993 pmcsr |= 3;
3995 else {
3996 pmcsr |= 3;
3998 if (bp->wol) {
3999 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4001 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4002 pmcsr);
4004 /* No more memory access after this point until
4005 * device is brought back to D0.
4007 udelay(50);
4008 break;
4010 default:
4011 return -EINVAL;
4013 return 0;
4016 static int
4017 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4019 u32 val;
4020 int j;
4022 /* Request access to the flash interface. */
4023 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4024 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4025 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4026 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4027 break;
4029 udelay(5);
4032 if (j >= NVRAM_TIMEOUT_COUNT)
4033 return -EBUSY;
4035 return 0;
4038 static int
4039 bnx2_release_nvram_lock(struct bnx2 *bp)
4041 int j;
4042 u32 val;
4044 /* Relinquish nvram interface. */
4045 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4047 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4048 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4049 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4050 break;
4052 udelay(5);
4055 if (j >= NVRAM_TIMEOUT_COUNT)
4056 return -EBUSY;
4058 return 0;
4062 static int
4063 bnx2_enable_nvram_write(struct bnx2 *bp)
4065 u32 val;
4067 val = REG_RD(bp, BNX2_MISC_CFG);
4068 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4070 if (bp->flash_info->flags & BNX2_NV_WREN) {
4071 int j;
4073 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4074 REG_WR(bp, BNX2_NVM_COMMAND,
4075 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4077 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4078 udelay(5);
4080 val = REG_RD(bp, BNX2_NVM_COMMAND);
4081 if (val & BNX2_NVM_COMMAND_DONE)
4082 break;
4085 if (j >= NVRAM_TIMEOUT_COUNT)
4086 return -EBUSY;
4088 return 0;
4091 static void
4092 bnx2_disable_nvram_write(struct bnx2 *bp)
4094 u32 val;
4096 val = REG_RD(bp, BNX2_MISC_CFG);
4097 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4101 static void
4102 bnx2_enable_nvram_access(struct bnx2 *bp)
4104 u32 val;
4106 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4107 /* Enable both bits, even on read. */
4108 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4109 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4112 static void
4113 bnx2_disable_nvram_access(struct bnx2 *bp)
4115 u32 val;
4117 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4118 /* Disable both bits, even after read. */
4119 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4120 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4121 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4124 static int
4125 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4127 u32 cmd;
4128 int j;
4130 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4131 /* Buffered flash, no erase needed */
4132 return 0;
4134 /* Build an erase command */
4135 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4136 BNX2_NVM_COMMAND_DOIT;
4138 /* Need to clear DONE bit separately. */
4139 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4141 /* Address of the NVRAM to read from. */
4142 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4144 /* Issue an erase command. */
4145 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4147 /* Wait for completion. */
4148 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4149 u32 val;
4151 udelay(5);
4153 val = REG_RD(bp, BNX2_NVM_COMMAND);
4154 if (val & BNX2_NVM_COMMAND_DONE)
4155 break;
4158 if (j >= NVRAM_TIMEOUT_COUNT)
4159 return -EBUSY;
4161 return 0;
4164 static int
4165 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4167 u32 cmd;
4168 int j;
4170 /* Build the command word. */
4171 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4173 /* Calculate an offset of a buffered flash, not needed for 5709. */
4174 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4175 offset = ((offset / bp->flash_info->page_size) <<
4176 bp->flash_info->page_bits) +
4177 (offset % bp->flash_info->page_size);
4180 /* Need to clear DONE bit separately. */
4181 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4183 /* Address of the NVRAM to read from. */
4184 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4186 /* Issue a read command. */
4187 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4189 /* Wait for completion. */
4190 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4191 u32 val;
4193 udelay(5);
4195 val = REG_RD(bp, BNX2_NVM_COMMAND);
4196 if (val & BNX2_NVM_COMMAND_DONE) {
4197 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4198 memcpy(ret_val, &v, 4);
4199 break;
4202 if (j >= NVRAM_TIMEOUT_COUNT)
4203 return -EBUSY;
4205 return 0;
4209 static int
4210 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4212 u32 cmd;
4213 __be32 val32;
4214 int j;
4216 /* Build the command word. */
4217 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4219 /* Calculate an offset of a buffered flash, not needed for 5709. */
4220 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4221 offset = ((offset / bp->flash_info->page_size) <<
4222 bp->flash_info->page_bits) +
4223 (offset % bp->flash_info->page_size);
4226 /* Need to clear DONE bit separately. */
4227 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4229 memcpy(&val32, val, 4);
4231 /* Write the data. */
4232 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4234 /* Address of the NVRAM to write to. */
4235 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4237 /* Issue the write command. */
4238 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4240 /* Wait for completion. */
4241 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4242 udelay(5);
4244 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4245 break;
4247 if (j >= NVRAM_TIMEOUT_COUNT)
4248 return -EBUSY;
4250 return 0;
4253 static int
4254 bnx2_init_nvram(struct bnx2 *bp)
4256 u32 val;
4257 int j, entry_count, rc = 0;
4258 const struct flash_spec *flash;
4260 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4261 bp->flash_info = &flash_5709;
4262 goto get_flash_size;
4265 /* Determine the selected interface. */
4266 val = REG_RD(bp, BNX2_NVM_CFG1);
4268 entry_count = ARRAY_SIZE(flash_table);
4270 if (val & 0x40000000) {
4272 /* Flash interface has been reconfigured */
4273 for (j = 0, flash = &flash_table[0]; j < entry_count;
4274 j++, flash++) {
4275 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4276 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4277 bp->flash_info = flash;
4278 break;
4282 else {
4283 u32 mask;
4284 /* Not yet been reconfigured */
4286 if (val & (1 << 23))
4287 mask = FLASH_BACKUP_STRAP_MASK;
4288 else
4289 mask = FLASH_STRAP_MASK;
4291 for (j = 0, flash = &flash_table[0]; j < entry_count;
4292 j++, flash++) {
4294 if ((val & mask) == (flash->strapping & mask)) {
4295 bp->flash_info = flash;
4297 /* Request access to the flash interface. */
4298 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4299 return rc;
4301 /* Enable access to flash interface */
4302 bnx2_enable_nvram_access(bp);
4304 /* Reconfigure the flash interface */
4305 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4306 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4307 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4308 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4310 /* Disable access to flash interface */
4311 bnx2_disable_nvram_access(bp);
4312 bnx2_release_nvram_lock(bp);
4314 break;
4317 } /* if (val & 0x40000000) */
4319 if (j == entry_count) {
4320 bp->flash_info = NULL;
4321 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4322 return -ENODEV;
4325 get_flash_size:
4326 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4327 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4328 if (val)
4329 bp->flash_size = val;
4330 else
4331 bp->flash_size = bp->flash_info->total_size;
4333 return rc;
4336 static int
4337 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4338 int buf_size)
4340 int rc = 0;
4341 u32 cmd_flags, offset32, len32, extra;
4343 if (buf_size == 0)
4344 return 0;
4346 /* Request access to the flash interface. */
4347 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4348 return rc;
4350 /* Enable access to flash interface */
4351 bnx2_enable_nvram_access(bp);
4353 len32 = buf_size;
4354 offset32 = offset;
4355 extra = 0;
4357 cmd_flags = 0;
4359 if (offset32 & 3) {
4360 u8 buf[4];
4361 u32 pre_len;
4363 offset32 &= ~3;
4364 pre_len = 4 - (offset & 3);
4366 if (pre_len >= len32) {
4367 pre_len = len32;
4368 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4369 BNX2_NVM_COMMAND_LAST;
4371 else {
4372 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4375 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4377 if (rc)
4378 return rc;
4380 memcpy(ret_buf, buf + (offset & 3), pre_len);
4382 offset32 += 4;
4383 ret_buf += pre_len;
4384 len32 -= pre_len;
4386 if (len32 & 3) {
4387 extra = 4 - (len32 & 3);
4388 len32 = (len32 + 4) & ~3;
4391 if (len32 == 4) {
4392 u8 buf[4];
4394 if (cmd_flags)
4395 cmd_flags = BNX2_NVM_COMMAND_LAST;
4396 else
4397 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4398 BNX2_NVM_COMMAND_LAST;
4400 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4402 memcpy(ret_buf, buf, 4 - extra);
4404 else if (len32 > 0) {
4405 u8 buf[4];
4407 /* Read the first word. */
4408 if (cmd_flags)
4409 cmd_flags = 0;
4410 else
4411 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4413 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4415 /* Advance to the next dword. */
4416 offset32 += 4;
4417 ret_buf += 4;
4418 len32 -= 4;
4420 while (len32 > 4 && rc == 0) {
4421 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4423 /* Advance to the next dword. */
4424 offset32 += 4;
4425 ret_buf += 4;
4426 len32 -= 4;
4429 if (rc)
4430 return rc;
4432 cmd_flags = BNX2_NVM_COMMAND_LAST;
4433 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4435 memcpy(ret_buf, buf, 4 - extra);
4438 /* Disable access to flash interface */
4439 bnx2_disable_nvram_access(bp);
4441 bnx2_release_nvram_lock(bp);
4443 return rc;
4446 static int
4447 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4448 int buf_size)
4450 u32 written, offset32, len32;
4451 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4452 int rc = 0;
4453 int align_start, align_end;
4455 buf = data_buf;
4456 offset32 = offset;
4457 len32 = buf_size;
4458 align_start = align_end = 0;
4460 if ((align_start = (offset32 & 3))) {
4461 offset32 &= ~3;
4462 len32 += align_start;
4463 if (len32 < 4)
4464 len32 = 4;
4465 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4466 return rc;
4469 if (len32 & 3) {
4470 align_end = 4 - (len32 & 3);
4471 len32 += align_end;
4472 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4473 return rc;
4476 if (align_start || align_end) {
4477 align_buf = kmalloc(len32, GFP_KERNEL);
4478 if (align_buf == NULL)
4479 return -ENOMEM;
4480 if (align_start) {
4481 memcpy(align_buf, start, 4);
4483 if (align_end) {
4484 memcpy(align_buf + len32 - 4, end, 4);
4486 memcpy(align_buf + align_start, data_buf, buf_size);
4487 buf = align_buf;
4490 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4491 flash_buffer = kmalloc(264, GFP_KERNEL);
4492 if (flash_buffer == NULL) {
4493 rc = -ENOMEM;
4494 goto nvram_write_end;
4498 written = 0;
4499 while ((written < len32) && (rc == 0)) {
4500 u32 page_start, page_end, data_start, data_end;
4501 u32 addr, cmd_flags;
4502 int i;
4504 /* Find the page_start addr */
4505 page_start = offset32 + written;
4506 page_start -= (page_start % bp->flash_info->page_size);
4507 /* Find the page_end addr */
4508 page_end = page_start + bp->flash_info->page_size;
4509 /* Find the data_start addr */
4510 data_start = (written == 0) ? offset32 : page_start;
4511 /* Find the data_end addr */
4512 data_end = (page_end > offset32 + len32) ?
4513 (offset32 + len32) : page_end;
4515 /* Request access to the flash interface. */
4516 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4517 goto nvram_write_end;
4519 /* Enable access to flash interface */
4520 bnx2_enable_nvram_access(bp);
4522 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4523 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4524 int j;
4526 /* Read the whole page into the buffer
4527 * (non-buffer flash only) */
4528 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4529 if (j == (bp->flash_info->page_size - 4)) {
4530 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4532 rc = bnx2_nvram_read_dword(bp,
4533 page_start + j,
4534 &flash_buffer[j],
4535 cmd_flags);
4537 if (rc)
4538 goto nvram_write_end;
4540 cmd_flags = 0;
4544 /* Enable writes to flash interface (unlock write-protect) */
4545 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4546 goto nvram_write_end;
4548 /* Loop to write back the buffer data from page_start to
4549 * data_start */
4550 i = 0;
4551 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4552 /* Erase the page */
4553 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4554 goto nvram_write_end;
4556 /* Re-enable the write again for the actual write */
4557 bnx2_enable_nvram_write(bp);
4559 for (addr = page_start; addr < data_start;
4560 addr += 4, i += 4) {
4562 rc = bnx2_nvram_write_dword(bp, addr,
4563 &flash_buffer[i], cmd_flags);
4565 if (rc != 0)
4566 goto nvram_write_end;
4568 cmd_flags = 0;
4572 /* Loop to write the new data from data_start to data_end */
4573 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4574 if ((addr == page_end - 4) ||
4575 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4576 (addr == data_end - 4))) {
4578 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4580 rc = bnx2_nvram_write_dword(bp, addr, buf,
4581 cmd_flags);
4583 if (rc != 0)
4584 goto nvram_write_end;
4586 cmd_flags = 0;
4587 buf += 4;
4590 /* Loop to write back the buffer data from data_end
4591 * to page_end */
4592 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4593 for (addr = data_end; addr < page_end;
4594 addr += 4, i += 4) {
4596 if (addr == page_end-4) {
4597 cmd_flags = BNX2_NVM_COMMAND_LAST;
4599 rc = bnx2_nvram_write_dword(bp, addr,
4600 &flash_buffer[i], cmd_flags);
4602 if (rc != 0)
4603 goto nvram_write_end;
4605 cmd_flags = 0;
4609 /* Disable writes to flash interface (lock write-protect) */
4610 bnx2_disable_nvram_write(bp);
4612 /* Disable access to flash interface */
4613 bnx2_disable_nvram_access(bp);
4614 bnx2_release_nvram_lock(bp);
4616 /* Increment written */
4617 written += data_end - data_start;
4620 nvram_write_end:
4621 kfree(flash_buffer);
4622 kfree(align_buf);
4623 return rc;
4626 static void
4627 bnx2_init_fw_cap(struct bnx2 *bp)
4629 u32 val, sig = 0;
4631 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4632 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4634 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4635 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4637 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4638 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4639 return;
4641 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4642 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4643 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4646 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4647 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4648 u32 link;
4650 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4652 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4653 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4654 bp->phy_port = PORT_FIBRE;
4655 else
4656 bp->phy_port = PORT_TP;
4658 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4659 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4662 if (netif_running(bp->dev) && sig)
4663 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4666 static void
4667 bnx2_setup_msix_tbl(struct bnx2 *bp)
4669 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4671 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4672 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4675 static int
4676 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4678 u32 val;
4679 int i, rc = 0;
4680 u8 old_port;
4682 /* Wait for the current PCI transaction to complete before
4683 * issuing a reset. */
4684 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4685 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4686 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4687 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4688 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4689 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4690 udelay(5);
4692 /* Wait for the firmware to tell us it is ok to issue a reset. */
4693 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4695 /* Deposit a driver reset signature so the firmware knows that
4696 * this is a soft reset. */
4697 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4698 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4700 /* Do a dummy read to force the chip to complete all current transaction
4701 * before we issue a reset. */
4702 val = REG_RD(bp, BNX2_MISC_ID);
4704 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4705 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4706 REG_RD(bp, BNX2_MISC_COMMAND);
4707 udelay(5);
4709 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4710 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4712 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4714 } else {
4715 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4716 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4717 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4719 /* Chip reset. */
4720 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4722 /* Reading back any register after chip reset will hang the
4723 * bus on 5706 A0 and A1. The msleep below provides plenty
4724 * of margin for write posting.
4726 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4727 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4728 msleep(20);
4730 /* Reset takes approximate 30 usec */
4731 for (i = 0; i < 10; i++) {
4732 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4733 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4734 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4735 break;
4736 udelay(10);
4739 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4740 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4741 printk(KERN_ERR PFX "Chip reset did not complete\n");
4742 return -EBUSY;
4746 /* Make sure byte swapping is properly configured. */
4747 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4748 if (val != 0x01020304) {
4749 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4750 return -ENODEV;
4753 /* Wait for the firmware to finish its initialization. */
4754 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4755 if (rc)
4756 return rc;
4758 spin_lock_bh(&bp->phy_lock);
4759 old_port = bp->phy_port;
4760 bnx2_init_fw_cap(bp);
4761 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4762 old_port != bp->phy_port)
4763 bnx2_set_default_remote_link(bp);
4764 spin_unlock_bh(&bp->phy_lock);
4766 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4767 /* Adjust the voltage regular to two steps lower. The default
4768 * of this register is 0x0000000e. */
4769 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4771 /* Remove bad rbuf memory from the free pool. */
4772 rc = bnx2_alloc_bad_rbuf(bp);
4775 if (bp->flags & BNX2_FLAG_USING_MSIX)
4776 bnx2_setup_msix_tbl(bp);
4778 return rc;
4781 static int
4782 bnx2_init_chip(struct bnx2 *bp)
4784 u32 val, mtu;
4785 int rc, i;
4787 /* Make sure the interrupt is not active. */
4788 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4790 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4791 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4792 #ifdef __BIG_ENDIAN
4793 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4794 #endif
4795 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4796 DMA_READ_CHANS << 12 |
4797 DMA_WRITE_CHANS << 16;
4799 val |= (0x2 << 20) | (1 << 11);
4801 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4802 val |= (1 << 23);
4804 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4805 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4806 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4808 REG_WR(bp, BNX2_DMA_CONFIG, val);
4810 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4811 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4812 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4813 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4816 if (bp->flags & BNX2_FLAG_PCIX) {
4817 u16 val16;
4819 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4820 &val16);
4821 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4822 val16 & ~PCI_X_CMD_ERO);
4825 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4826 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4827 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4828 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4830 /* Initialize context mapping and zero out the quick contexts. The
4831 * context block must have already been enabled. */
4832 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4833 rc = bnx2_init_5709_context(bp);
4834 if (rc)
4835 return rc;
4836 } else
4837 bnx2_init_context(bp);
4839 if ((rc = bnx2_init_cpus(bp)) != 0)
4840 return rc;
4842 bnx2_init_nvram(bp);
4844 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4846 val = REG_RD(bp, BNX2_MQ_CONFIG);
4847 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4848 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4849 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4850 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4851 if (CHIP_REV(bp) == CHIP_REV_Ax)
4852 val |= BNX2_MQ_CONFIG_HALT_DIS;
4855 REG_WR(bp, BNX2_MQ_CONFIG, val);
4857 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4858 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4859 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4861 val = (BCM_PAGE_BITS - 8) << 24;
4862 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4864 /* Configure page size. */
4865 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4866 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4867 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4868 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4870 val = bp->mac_addr[0] +
4871 (bp->mac_addr[1] << 8) +
4872 (bp->mac_addr[2] << 16) +
4873 bp->mac_addr[3] +
4874 (bp->mac_addr[4] << 8) +
4875 (bp->mac_addr[5] << 16);
4876 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4878 /* Program the MTU. Also include 4 bytes for CRC32. */
4879 mtu = bp->dev->mtu;
4880 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4881 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4882 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4883 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4885 if (mtu < 1500)
4886 mtu = 1500;
4888 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4889 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4890 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4892 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4893 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4894 bp->bnx2_napi[i].last_status_idx = 0;
4896 bp->idle_chk_status_idx = 0xffff;
4898 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4900 /* Set up how to generate a link change interrupt. */
4901 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4903 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4904 (u64) bp->status_blk_mapping & 0xffffffff);
4905 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4907 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4908 (u64) bp->stats_blk_mapping & 0xffffffff);
4909 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4910 (u64) bp->stats_blk_mapping >> 32);
4912 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4913 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4915 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4916 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4918 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4919 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4921 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4923 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4925 REG_WR(bp, BNX2_HC_COM_TICKS,
4926 (bp->com_ticks_int << 16) | bp->com_ticks);
4928 REG_WR(bp, BNX2_HC_CMD_TICKS,
4929 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4931 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4932 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4933 else
4934 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4935 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4937 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4938 val = BNX2_HC_CONFIG_COLLECT_STATS;
4939 else {
4940 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4941 BNX2_HC_CONFIG_COLLECT_STATS;
4944 if (bp->irq_nvecs > 1) {
4945 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4946 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4948 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4951 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4952 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4954 REG_WR(bp, BNX2_HC_CONFIG, val);
4956 for (i = 1; i < bp->irq_nvecs; i++) {
4957 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4958 BNX2_HC_SB_CONFIG_1;
4960 REG_WR(bp, base,
4961 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4962 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4963 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4965 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4966 (bp->tx_quick_cons_trip_int << 16) |
4967 bp->tx_quick_cons_trip);
4969 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4970 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4972 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4973 (bp->rx_quick_cons_trip_int << 16) |
4974 bp->rx_quick_cons_trip);
4976 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4977 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4980 /* Clear internal stats counters. */
4981 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4983 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4985 /* Initialize the receive filter. */
4986 bnx2_set_rx_mode(bp->dev);
4988 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4989 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4990 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4991 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4993 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4994 1, 0);
4996 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4997 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4999 udelay(20);
5001 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5003 return rc;
5006 static void
5007 bnx2_clear_ring_states(struct bnx2 *bp)
5009 struct bnx2_napi *bnapi;
5010 struct bnx2_tx_ring_info *txr;
5011 struct bnx2_rx_ring_info *rxr;
5012 int i;
5014 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5015 bnapi = &bp->bnx2_napi[i];
5016 txr = &bnapi->tx_ring;
5017 rxr = &bnapi->rx_ring;
5019 txr->tx_cons = 0;
5020 txr->hw_tx_cons = 0;
5021 rxr->rx_prod_bseq = 0;
5022 rxr->rx_prod = 0;
5023 rxr->rx_cons = 0;
5024 rxr->rx_pg_prod = 0;
5025 rxr->rx_pg_cons = 0;
5029 static void
5030 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5032 u32 val, offset0, offset1, offset2, offset3;
5033 u32 cid_addr = GET_CID_ADDR(cid);
5035 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5036 offset0 = BNX2_L2CTX_TYPE_XI;
5037 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5038 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5039 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5040 } else {
5041 offset0 = BNX2_L2CTX_TYPE;
5042 offset1 = BNX2_L2CTX_CMD_TYPE;
5043 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5044 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5046 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5047 bnx2_ctx_wr(bp, cid_addr, offset0, val);
5049 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5050 bnx2_ctx_wr(bp, cid_addr, offset1, val);
5052 val = (u64) txr->tx_desc_mapping >> 32;
5053 bnx2_ctx_wr(bp, cid_addr, offset2, val);
5055 val = (u64) txr->tx_desc_mapping & 0xffffffff;
5056 bnx2_ctx_wr(bp, cid_addr, offset3, val);
5059 static void
5060 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5062 struct tx_bd *txbd;
5063 u32 cid = TX_CID;
5064 struct bnx2_napi *bnapi;
5065 struct bnx2_tx_ring_info *txr;
5067 bnapi = &bp->bnx2_napi[ring_num];
5068 txr = &bnapi->tx_ring;
5070 if (ring_num == 0)
5071 cid = TX_CID;
5072 else
5073 cid = TX_TSS_CID + ring_num - 1;
5075 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5077 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5079 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5080 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5082 txr->tx_prod = 0;
5083 txr->tx_prod_bseq = 0;
5085 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5086 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5088 bnx2_init_tx_context(bp, cid, txr);
5091 static void
5092 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5093 int num_rings)
5095 int i;
5096 struct rx_bd *rxbd;
5098 for (i = 0; i < num_rings; i++) {
5099 int j;
5101 rxbd = &rx_ring[i][0];
5102 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5103 rxbd->rx_bd_len = buf_size;
5104 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5106 if (i == (num_rings - 1))
5107 j = 0;
5108 else
5109 j = i + 1;
5110 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5111 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5115 static void
5116 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5118 int i;
5119 u16 prod, ring_prod;
5120 u32 cid, rx_cid_addr, val;
5121 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5122 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5124 if (ring_num == 0)
5125 cid = RX_CID;
5126 else
5127 cid = RX_RSS_CID + ring_num - 1;
5129 rx_cid_addr = GET_CID_ADDR(cid);
5131 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5132 bp->rx_buf_use_size, bp->rx_max_ring);
5134 bnx2_init_rx_context(bp, cid);
5136 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5137 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5138 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5141 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5142 if (bp->rx_pg_ring_size) {
5143 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5144 rxr->rx_pg_desc_mapping,
5145 PAGE_SIZE, bp->rx_max_pg_ring);
5146 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5147 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5148 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5149 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5151 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5152 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5154 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5155 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5157 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5158 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5161 val = (u64) rxr->rx_desc_mapping[0] >> 32;
5162 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5164 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5165 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5167 ring_prod = prod = rxr->rx_pg_prod;
5168 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5169 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
5170 printk(KERN_WARNING PFX "%s: init'ed rx page ring %d "
5171 "with %d/%d pages only\n",
5172 bp->dev->name, ring_num, i, bp->rx_pg_ring_size);
5173 break;
5175 prod = NEXT_RX_BD(prod);
5176 ring_prod = RX_PG_RING_IDX(prod);
5178 rxr->rx_pg_prod = prod;
5180 ring_prod = prod = rxr->rx_prod;
5181 for (i = 0; i < bp->rx_ring_size; i++) {
5182 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
5183 printk(KERN_WARNING PFX "%s: init'ed rx ring %d with "
5184 "%d/%d skbs only\n",
5185 bp->dev->name, ring_num, i, bp->rx_ring_size);
5186 break;
5188 prod = NEXT_RX_BD(prod);
5189 ring_prod = RX_RING_IDX(prod);
5191 rxr->rx_prod = prod;
5193 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5194 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5195 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5197 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5198 REG_WR16(bp, rxr->rx_bidx_addr, prod);
5200 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5203 static void
5204 bnx2_init_all_rings(struct bnx2 *bp)
5206 int i;
5207 u32 val;
5209 bnx2_clear_ring_states(bp);
5211 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5212 for (i = 0; i < bp->num_tx_rings; i++)
5213 bnx2_init_tx_ring(bp, i);
5215 if (bp->num_tx_rings > 1)
5216 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5217 (TX_TSS_CID << 7));
5219 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5220 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5222 for (i = 0; i < bp->num_rx_rings; i++)
5223 bnx2_init_rx_ring(bp, i);
5225 if (bp->num_rx_rings > 1) {
5226 u32 tbl_32;
5227 u8 *tbl = (u8 *) &tbl_32;
5229 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5230 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5232 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5233 tbl[i % 4] = i % (bp->num_rx_rings - 1);
5234 if ((i % 4) == 3)
5235 bnx2_reg_wr_ind(bp,
5236 BNX2_RXP_SCRATCH_RSS_TBL + i,
5237 cpu_to_be32(tbl_32));
5240 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5241 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5243 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5248 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5250 u32 max, num_rings = 1;
5252 while (ring_size > MAX_RX_DESC_CNT) {
5253 ring_size -= MAX_RX_DESC_CNT;
5254 num_rings++;
5256 /* round to next power of 2 */
5257 max = max_size;
5258 while ((max & num_rings) == 0)
5259 max >>= 1;
5261 if (num_rings != max)
5262 max <<= 1;
5264 return max;
5267 static void
5268 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5270 u32 rx_size, rx_space, jumbo_size;
5272 /* 8 for CRC and VLAN */
5273 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5275 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5276 sizeof(struct skb_shared_info);
5278 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5279 bp->rx_pg_ring_size = 0;
5280 bp->rx_max_pg_ring = 0;
5281 bp->rx_max_pg_ring_idx = 0;
5282 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5283 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5285 jumbo_size = size * pages;
5286 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5287 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5289 bp->rx_pg_ring_size = jumbo_size;
5290 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5291 MAX_RX_PG_RINGS);
5292 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5293 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5294 bp->rx_copy_thresh = 0;
5297 bp->rx_buf_use_size = rx_size;
5298 /* hw alignment */
5299 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5300 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5301 bp->rx_ring_size = size;
5302 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5303 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5306 static void
5307 bnx2_free_tx_skbs(struct bnx2 *bp)
5309 int i;
5311 for (i = 0; i < bp->num_tx_rings; i++) {
5312 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5313 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5314 int j;
5316 if (txr->tx_buf_ring == NULL)
5317 continue;
5319 for (j = 0; j < TX_DESC_CNT; ) {
5320 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5321 struct sk_buff *skb = tx_buf->skb;
5322 int k, last;
5324 if (skb == NULL) {
5325 j++;
5326 continue;
5329 pci_unmap_single(bp->pdev,
5330 pci_unmap_addr(tx_buf, mapping),
5331 skb_headlen(skb),
5332 PCI_DMA_TODEVICE);
5334 tx_buf->skb = NULL;
5336 last = tx_buf->nr_frags;
5337 j++;
5338 for (k = 0; k < last; k++, j++) {
5339 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5340 pci_unmap_page(bp->pdev,
5341 pci_unmap_addr(tx_buf, mapping),
5342 skb_shinfo(skb)->frags[k].size,
5343 PCI_DMA_TODEVICE);
5345 dev_kfree_skb(skb);
5350 static void
5351 bnx2_free_rx_skbs(struct bnx2 *bp)
5353 int i;
5355 for (i = 0; i < bp->num_rx_rings; i++) {
5356 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5357 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5358 int j;
5360 if (rxr->rx_buf_ring == NULL)
5361 return;
5363 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5364 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5365 struct sk_buff *skb = rx_buf->skb;
5367 if (skb == NULL)
5368 continue;
5370 pci_unmap_single(bp->pdev,
5371 pci_unmap_addr(rx_buf, mapping),
5372 bp->rx_buf_use_size,
5373 PCI_DMA_FROMDEVICE);
5375 rx_buf->skb = NULL;
5377 dev_kfree_skb(skb);
5379 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5380 bnx2_free_rx_page(bp, rxr, j);
5384 static void
5385 bnx2_free_skbs(struct bnx2 *bp)
5387 bnx2_free_tx_skbs(bp);
5388 bnx2_free_rx_skbs(bp);
5391 static int
5392 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5394 int rc;
5396 rc = bnx2_reset_chip(bp, reset_code);
5397 bnx2_free_skbs(bp);
5398 if (rc)
5399 return rc;
5401 if ((rc = bnx2_init_chip(bp)) != 0)
5402 return rc;
5404 bnx2_init_all_rings(bp);
5405 return 0;
5408 static int
5409 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5411 int rc;
5413 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5414 return rc;
5416 spin_lock_bh(&bp->phy_lock);
5417 bnx2_init_phy(bp, reset_phy);
5418 bnx2_set_link(bp);
5419 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5420 bnx2_remote_phy_event(bp);
5421 spin_unlock_bh(&bp->phy_lock);
5422 return 0;
5425 static int
5426 bnx2_shutdown_chip(struct bnx2 *bp)
5428 u32 reset_code;
5430 if (bp->flags & BNX2_FLAG_NO_WOL)
5431 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5432 else if (bp->wol)
5433 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5434 else
5435 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5437 return bnx2_reset_chip(bp, reset_code);
5440 static int
5441 bnx2_test_registers(struct bnx2 *bp)
5443 int ret;
5444 int i, is_5709;
5445 static const struct {
5446 u16 offset;
5447 u16 flags;
5448 #define BNX2_FL_NOT_5709 1
5449 u32 rw_mask;
5450 u32 ro_mask;
5451 } reg_tbl[] = {
5452 { 0x006c, 0, 0x00000000, 0x0000003f },
5453 { 0x0090, 0, 0xffffffff, 0x00000000 },
5454 { 0x0094, 0, 0x00000000, 0x00000000 },
5456 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5457 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5458 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5459 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5460 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5461 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5462 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5463 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5464 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5466 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5467 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5468 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5469 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5470 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5471 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5473 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5474 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5475 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5477 { 0x1000, 0, 0x00000000, 0x00000001 },
5478 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5480 { 0x1408, 0, 0x01c00800, 0x00000000 },
5481 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5482 { 0x14a8, 0, 0x00000000, 0x000001ff },
5483 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5484 { 0x14b0, 0, 0x00000002, 0x00000001 },
5485 { 0x14b8, 0, 0x00000000, 0x00000000 },
5486 { 0x14c0, 0, 0x00000000, 0x00000009 },
5487 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5488 { 0x14cc, 0, 0x00000000, 0x00000001 },
5489 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5491 { 0x1800, 0, 0x00000000, 0x00000001 },
5492 { 0x1804, 0, 0x00000000, 0x00000003 },
5494 { 0x2800, 0, 0x00000000, 0x00000001 },
5495 { 0x2804, 0, 0x00000000, 0x00003f01 },
5496 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5497 { 0x2810, 0, 0xffff0000, 0x00000000 },
5498 { 0x2814, 0, 0xffff0000, 0x00000000 },
5499 { 0x2818, 0, 0xffff0000, 0x00000000 },
5500 { 0x281c, 0, 0xffff0000, 0x00000000 },
5501 { 0x2834, 0, 0xffffffff, 0x00000000 },
5502 { 0x2840, 0, 0x00000000, 0xffffffff },
5503 { 0x2844, 0, 0x00000000, 0xffffffff },
5504 { 0x2848, 0, 0xffffffff, 0x00000000 },
5505 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5507 { 0x2c00, 0, 0x00000000, 0x00000011 },
5508 { 0x2c04, 0, 0x00000000, 0x00030007 },
5510 { 0x3c00, 0, 0x00000000, 0x00000001 },
5511 { 0x3c04, 0, 0x00000000, 0x00070000 },
5512 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5513 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5514 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5515 { 0x3c14, 0, 0x00000000, 0xffffffff },
5516 { 0x3c18, 0, 0x00000000, 0xffffffff },
5517 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5518 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5520 { 0x5004, 0, 0x00000000, 0x0000007f },
5521 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5523 { 0x5c00, 0, 0x00000000, 0x00000001 },
5524 { 0x5c04, 0, 0x00000000, 0x0003000f },
5525 { 0x5c08, 0, 0x00000003, 0x00000000 },
5526 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5527 { 0x5c10, 0, 0x00000000, 0xffffffff },
5528 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5529 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5530 { 0x5c88, 0, 0x00000000, 0x00077373 },
5531 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5533 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5534 { 0x680c, 0, 0xffffffff, 0x00000000 },
5535 { 0x6810, 0, 0xffffffff, 0x00000000 },
5536 { 0x6814, 0, 0xffffffff, 0x00000000 },
5537 { 0x6818, 0, 0xffffffff, 0x00000000 },
5538 { 0x681c, 0, 0xffffffff, 0x00000000 },
5539 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5540 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5541 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5542 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5543 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5544 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5545 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5546 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5547 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5548 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5549 { 0x684c, 0, 0xffffffff, 0x00000000 },
5550 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5551 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5552 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5553 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5554 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5555 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5557 { 0xffff, 0, 0x00000000, 0x00000000 },
5560 ret = 0;
5561 is_5709 = 0;
5562 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5563 is_5709 = 1;
5565 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5566 u32 offset, rw_mask, ro_mask, save_val, val;
5567 u16 flags = reg_tbl[i].flags;
5569 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5570 continue;
5572 offset = (u32) reg_tbl[i].offset;
5573 rw_mask = reg_tbl[i].rw_mask;
5574 ro_mask = reg_tbl[i].ro_mask;
5576 save_val = readl(bp->regview + offset);
5578 writel(0, bp->regview + offset);
5580 val = readl(bp->regview + offset);
5581 if ((val & rw_mask) != 0) {
5582 goto reg_test_err;
5585 if ((val & ro_mask) != (save_val & ro_mask)) {
5586 goto reg_test_err;
5589 writel(0xffffffff, bp->regview + offset);
5591 val = readl(bp->regview + offset);
5592 if ((val & rw_mask) != rw_mask) {
5593 goto reg_test_err;
5596 if ((val & ro_mask) != (save_val & ro_mask)) {
5597 goto reg_test_err;
5600 writel(save_val, bp->regview + offset);
5601 continue;
5603 reg_test_err:
5604 writel(save_val, bp->regview + offset);
5605 ret = -ENODEV;
5606 break;
5608 return ret;
5611 static int
5612 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5614 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5615 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5616 int i;
5618 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5619 u32 offset;
5621 for (offset = 0; offset < size; offset += 4) {
5623 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5625 if (bnx2_reg_rd_ind(bp, start + offset) !=
5626 test_pattern[i]) {
5627 return -ENODEV;
5631 return 0;
5634 static int
5635 bnx2_test_memory(struct bnx2 *bp)
5637 int ret = 0;
5638 int i;
5639 static struct mem_entry {
5640 u32 offset;
5641 u32 len;
5642 } mem_tbl_5706[] = {
5643 { 0x60000, 0x4000 },
5644 { 0xa0000, 0x3000 },
5645 { 0xe0000, 0x4000 },
5646 { 0x120000, 0x4000 },
5647 { 0x1a0000, 0x4000 },
5648 { 0x160000, 0x4000 },
5649 { 0xffffffff, 0 },
5651 mem_tbl_5709[] = {
5652 { 0x60000, 0x4000 },
5653 { 0xa0000, 0x3000 },
5654 { 0xe0000, 0x4000 },
5655 { 0x120000, 0x4000 },
5656 { 0x1a0000, 0x4000 },
5657 { 0xffffffff, 0 },
5659 struct mem_entry *mem_tbl;
5661 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5662 mem_tbl = mem_tbl_5709;
5663 else
5664 mem_tbl = mem_tbl_5706;
5666 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5667 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5668 mem_tbl[i].len)) != 0) {
5669 return ret;
5673 return ret;
5676 #define BNX2_MAC_LOOPBACK 0
5677 #define BNX2_PHY_LOOPBACK 1
5679 static int
5680 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5682 unsigned int pkt_size, num_pkts, i;
5683 struct sk_buff *skb, *rx_skb;
5684 unsigned char *packet;
5685 u16 rx_start_idx, rx_idx;
5686 dma_addr_t map;
5687 struct tx_bd *txbd;
5688 struct sw_bd *rx_buf;
5689 struct l2_fhdr *rx_hdr;
5690 int ret = -ENODEV;
5691 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5692 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5693 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5695 tx_napi = bnapi;
5697 txr = &tx_napi->tx_ring;
5698 rxr = &bnapi->rx_ring;
5699 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5700 bp->loopback = MAC_LOOPBACK;
5701 bnx2_set_mac_loopback(bp);
5703 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5704 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5705 return 0;
5707 bp->loopback = PHY_LOOPBACK;
5708 bnx2_set_phy_loopback(bp);
5710 else
5711 return -EINVAL;
5713 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5714 skb = netdev_alloc_skb(bp->dev, pkt_size);
5715 if (!skb)
5716 return -ENOMEM;
5717 packet = skb_put(skb, pkt_size);
5718 memcpy(packet, bp->dev->dev_addr, 6);
5719 memset(packet + 6, 0x0, 8);
5720 for (i = 14; i < pkt_size; i++)
5721 packet[i] = (unsigned char) (i & 0xff);
5723 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5724 PCI_DMA_TODEVICE);
5725 if (pci_dma_mapping_error(bp->pdev, map)) {
5726 dev_kfree_skb(skb);
5727 return -EIO;
5730 REG_WR(bp, BNX2_HC_COMMAND,
5731 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5733 REG_RD(bp, BNX2_HC_COMMAND);
5735 udelay(5);
5736 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5738 num_pkts = 0;
5740 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5742 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5743 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5744 txbd->tx_bd_mss_nbytes = pkt_size;
5745 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5747 num_pkts++;
5748 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5749 txr->tx_prod_bseq += pkt_size;
5751 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5752 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5754 udelay(100);
5756 REG_WR(bp, BNX2_HC_COMMAND,
5757 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5759 REG_RD(bp, BNX2_HC_COMMAND);
5761 udelay(5);
5763 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5764 dev_kfree_skb(skb);
5766 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5767 goto loopback_test_done;
5769 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5770 if (rx_idx != rx_start_idx + num_pkts) {
5771 goto loopback_test_done;
5774 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5775 rx_skb = rx_buf->skb;
5777 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5778 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5780 pci_dma_sync_single_for_cpu(bp->pdev,
5781 pci_unmap_addr(rx_buf, mapping),
5782 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5784 if (rx_hdr->l2_fhdr_status &
5785 (L2_FHDR_ERRORS_BAD_CRC |
5786 L2_FHDR_ERRORS_PHY_DECODE |
5787 L2_FHDR_ERRORS_ALIGNMENT |
5788 L2_FHDR_ERRORS_TOO_SHORT |
5789 L2_FHDR_ERRORS_GIANT_FRAME)) {
5791 goto loopback_test_done;
5794 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5795 goto loopback_test_done;
5798 for (i = 14; i < pkt_size; i++) {
5799 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5800 goto loopback_test_done;
5804 ret = 0;
5806 loopback_test_done:
5807 bp->loopback = 0;
5808 return ret;
5811 #define BNX2_MAC_LOOPBACK_FAILED 1
5812 #define BNX2_PHY_LOOPBACK_FAILED 2
5813 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5814 BNX2_PHY_LOOPBACK_FAILED)
5816 static int
5817 bnx2_test_loopback(struct bnx2 *bp)
5819 int rc = 0;
5821 if (!netif_running(bp->dev))
5822 return BNX2_LOOPBACK_FAILED;
5824 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5825 spin_lock_bh(&bp->phy_lock);
5826 bnx2_init_phy(bp, 1);
5827 spin_unlock_bh(&bp->phy_lock);
5828 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5829 rc |= BNX2_MAC_LOOPBACK_FAILED;
5830 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5831 rc |= BNX2_PHY_LOOPBACK_FAILED;
5832 return rc;
5835 #define NVRAM_SIZE 0x200
5836 #define CRC32_RESIDUAL 0xdebb20e3
5838 static int
5839 bnx2_test_nvram(struct bnx2 *bp)
5841 __be32 buf[NVRAM_SIZE / 4];
5842 u8 *data = (u8 *) buf;
5843 int rc = 0;
5844 u32 magic, csum;
5846 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5847 goto test_nvram_done;
5849 magic = be32_to_cpu(buf[0]);
5850 if (magic != 0x669955aa) {
5851 rc = -ENODEV;
5852 goto test_nvram_done;
5855 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5856 goto test_nvram_done;
5858 csum = ether_crc_le(0x100, data);
5859 if (csum != CRC32_RESIDUAL) {
5860 rc = -ENODEV;
5861 goto test_nvram_done;
5864 csum = ether_crc_le(0x100, data + 0x100);
5865 if (csum != CRC32_RESIDUAL) {
5866 rc = -ENODEV;
5869 test_nvram_done:
5870 return rc;
5873 static int
5874 bnx2_test_link(struct bnx2 *bp)
5876 u32 bmsr;
5878 if (!netif_running(bp->dev))
5879 return -ENODEV;
5881 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5882 if (bp->link_up)
5883 return 0;
5884 return -ENODEV;
5886 spin_lock_bh(&bp->phy_lock);
5887 bnx2_enable_bmsr1(bp);
5888 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5889 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5890 bnx2_disable_bmsr1(bp);
5891 spin_unlock_bh(&bp->phy_lock);
5893 if (bmsr & BMSR_LSTATUS) {
5894 return 0;
5896 return -ENODEV;
5899 static int
5900 bnx2_test_intr(struct bnx2 *bp)
5902 int i;
5903 u16 status_idx;
5905 if (!netif_running(bp->dev))
5906 return -ENODEV;
5908 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5910 /* This register is not touched during run-time. */
5911 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5912 REG_RD(bp, BNX2_HC_COMMAND);
5914 for (i = 0; i < 10; i++) {
5915 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5916 status_idx) {
5918 break;
5921 msleep_interruptible(10);
5923 if (i < 10)
5924 return 0;
5926 return -ENODEV;
5929 /* Determining link for parallel detection. */
5930 static int
5931 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5933 u32 mode_ctl, an_dbg, exp;
5935 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5936 return 0;
5938 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5939 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5941 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5942 return 0;
5944 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5945 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5946 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5948 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5949 return 0;
5951 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5952 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5953 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5955 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5956 return 0;
5958 return 1;
5961 static void
5962 bnx2_5706_serdes_timer(struct bnx2 *bp)
5964 int check_link = 1;
5966 spin_lock(&bp->phy_lock);
5967 if (bp->serdes_an_pending) {
5968 bp->serdes_an_pending--;
5969 check_link = 0;
5970 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5971 u32 bmcr;
5973 bp->current_interval = BNX2_TIMER_INTERVAL;
5975 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5977 if (bmcr & BMCR_ANENABLE) {
5978 if (bnx2_5706_serdes_has_link(bp)) {
5979 bmcr &= ~BMCR_ANENABLE;
5980 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5981 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5982 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5986 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5987 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5988 u32 phy2;
5990 bnx2_write_phy(bp, 0x17, 0x0f01);
5991 bnx2_read_phy(bp, 0x15, &phy2);
5992 if (phy2 & 0x20) {
5993 u32 bmcr;
5995 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5996 bmcr |= BMCR_ANENABLE;
5997 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5999 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6001 } else
6002 bp->current_interval = BNX2_TIMER_INTERVAL;
6004 if (check_link) {
6005 u32 val;
6007 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6008 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6009 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6011 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6012 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6013 bnx2_5706s_force_link_dn(bp, 1);
6014 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6015 } else
6016 bnx2_set_link(bp);
6017 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6018 bnx2_set_link(bp);
6020 spin_unlock(&bp->phy_lock);
6023 static void
6024 bnx2_5708_serdes_timer(struct bnx2 *bp)
6026 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6027 return;
6029 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6030 bp->serdes_an_pending = 0;
6031 return;
6034 spin_lock(&bp->phy_lock);
6035 if (bp->serdes_an_pending)
6036 bp->serdes_an_pending--;
6037 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6038 u32 bmcr;
6040 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6041 if (bmcr & BMCR_ANENABLE) {
6042 bnx2_enable_forced_2g5(bp);
6043 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6044 } else {
6045 bnx2_disable_forced_2g5(bp);
6046 bp->serdes_an_pending = 2;
6047 bp->current_interval = BNX2_TIMER_INTERVAL;
6050 } else
6051 bp->current_interval = BNX2_TIMER_INTERVAL;
6053 spin_unlock(&bp->phy_lock);
6056 static void
6057 bnx2_timer(unsigned long data)
6059 struct bnx2 *bp = (struct bnx2 *) data;
6061 if (!netif_running(bp->dev))
6062 return;
6064 if (atomic_read(&bp->intr_sem) != 0)
6065 goto bnx2_restart_timer;
6067 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6068 BNX2_FLAG_USING_MSI)
6069 bnx2_chk_missed_msi(bp);
6071 bnx2_send_heart_beat(bp);
6073 bp->stats_blk->stat_FwRxDrop =
6074 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6076 /* workaround occasional corrupted counters */
6077 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6078 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6079 BNX2_HC_COMMAND_STATS_NOW);
6081 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6082 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6083 bnx2_5706_serdes_timer(bp);
6084 else
6085 bnx2_5708_serdes_timer(bp);
6088 bnx2_restart_timer:
6089 mod_timer(&bp->timer, jiffies + bp->current_interval);
6092 static int
6093 bnx2_request_irq(struct bnx2 *bp)
6095 unsigned long flags;
6096 struct bnx2_irq *irq;
6097 int rc = 0, i;
6099 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6100 flags = 0;
6101 else
6102 flags = IRQF_SHARED;
6104 for (i = 0; i < bp->irq_nvecs; i++) {
6105 irq = &bp->irq_tbl[i];
6106 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6107 &bp->bnx2_napi[i]);
6108 if (rc)
6109 break;
6110 irq->requested = 1;
6112 return rc;
6115 static void
6116 bnx2_free_irq(struct bnx2 *bp)
6118 struct bnx2_irq *irq;
6119 int i;
6121 for (i = 0; i < bp->irq_nvecs; i++) {
6122 irq = &bp->irq_tbl[i];
6123 if (irq->requested)
6124 free_irq(irq->vector, &bp->bnx2_napi[i]);
6125 irq->requested = 0;
6127 if (bp->flags & BNX2_FLAG_USING_MSI)
6128 pci_disable_msi(bp->pdev);
6129 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6130 pci_disable_msix(bp->pdev);
6132 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6135 static void
6136 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6138 int i, rc;
6139 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6140 struct net_device *dev = bp->dev;
6141 const int len = sizeof(bp->irq_tbl[0].name);
6143 bnx2_setup_msix_tbl(bp);
6144 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6145 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6146 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6148 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6149 msix_ent[i].entry = i;
6150 msix_ent[i].vector = 0;
6153 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6154 if (rc != 0)
6155 return;
6157 bp->irq_nvecs = msix_vecs;
6158 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6159 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6160 bp->irq_tbl[i].vector = msix_ent[i].vector;
6161 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6162 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6166 static void
6167 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6169 int cpus = num_online_cpus();
6170 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6172 bp->irq_tbl[0].handler = bnx2_interrupt;
6173 strcpy(bp->irq_tbl[0].name, bp->dev->name);
6174 bp->irq_nvecs = 1;
6175 bp->irq_tbl[0].vector = bp->pdev->irq;
6177 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
6178 bnx2_enable_msix(bp, msix_vecs);
6180 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6181 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6182 if (pci_enable_msi(bp->pdev) == 0) {
6183 bp->flags |= BNX2_FLAG_USING_MSI;
6184 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6185 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6186 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6187 } else
6188 bp->irq_tbl[0].handler = bnx2_msi;
6190 bp->irq_tbl[0].vector = bp->pdev->irq;
6194 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6195 bp->dev->real_num_tx_queues = bp->num_tx_rings;
6197 bp->num_rx_rings = bp->irq_nvecs;
6200 /* Called with rtnl_lock */
6201 static int
6202 bnx2_open(struct net_device *dev)
6204 struct bnx2 *bp = netdev_priv(dev);
6205 int rc;
6207 netif_carrier_off(dev);
6209 bnx2_set_power_state(bp, PCI_D0);
6210 bnx2_disable_int(bp);
6212 bnx2_setup_int_mode(bp, disable_msi);
6213 bnx2_napi_enable(bp);
6214 rc = bnx2_alloc_mem(bp);
6215 if (rc)
6216 goto open_err;
6218 rc = bnx2_request_irq(bp);
6219 if (rc)
6220 goto open_err;
6222 rc = bnx2_init_nic(bp, 1);
6223 if (rc)
6224 goto open_err;
6226 mod_timer(&bp->timer, jiffies + bp->current_interval);
6228 atomic_set(&bp->intr_sem, 0);
6230 bnx2_enable_int(bp);
6232 if (bp->flags & BNX2_FLAG_USING_MSI) {
6233 /* Test MSI to make sure it is working
6234 * If MSI test fails, go back to INTx mode
6236 if (bnx2_test_intr(bp) != 0) {
6237 printk(KERN_WARNING PFX "%s: No interrupt was generated"
6238 " using MSI, switching to INTx mode. Please"
6239 " report this failure to the PCI maintainer"
6240 " and include system chipset information.\n",
6241 bp->dev->name);
6243 bnx2_disable_int(bp);
6244 bnx2_free_irq(bp);
6246 bnx2_setup_int_mode(bp, 1);
6248 rc = bnx2_init_nic(bp, 0);
6250 if (!rc)
6251 rc = bnx2_request_irq(bp);
6253 if (rc) {
6254 del_timer_sync(&bp->timer);
6255 goto open_err;
6257 bnx2_enable_int(bp);
6260 if (bp->flags & BNX2_FLAG_USING_MSI)
6261 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
6262 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6263 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
6265 netif_tx_start_all_queues(dev);
6267 return 0;
6269 open_err:
6270 bnx2_napi_disable(bp);
6271 bnx2_free_skbs(bp);
6272 bnx2_free_irq(bp);
6273 bnx2_free_mem(bp);
6274 return rc;
6277 static void
6278 bnx2_reset_task(struct work_struct *work)
6280 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6282 rtnl_lock();
6283 if (!netif_running(bp->dev)) {
6284 rtnl_unlock();
6285 return;
6288 bnx2_netif_stop(bp);
6290 bnx2_init_nic(bp, 1);
6292 atomic_set(&bp->intr_sem, 1);
6293 bnx2_netif_start(bp);
6294 rtnl_unlock();
6297 static void
6298 bnx2_dump_state(struct bnx2 *bp)
6300 struct net_device *dev = bp->dev;
6302 printk(KERN_ERR PFX "%s DEBUG: intr_sem[%x]\n", dev->name,
6303 atomic_read(&bp->intr_sem));
6304 printk(KERN_ERR PFX "%s DEBUG: EMAC_TX_STATUS[%08x] "
6305 "RPM_MGMT_PKT_CTRL[%08x]\n", dev->name,
6306 REG_RD(bp, BNX2_EMAC_TX_STATUS),
6307 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6308 printk(KERN_ERR PFX "%s DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6309 dev->name, bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
6310 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
6311 printk(KERN_ERR PFX "%s DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6312 dev->name, REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6313 if (bp->flags & BNX2_FLAG_USING_MSIX)
6314 printk(KERN_ERR PFX "%s DEBUG: PBA[%08x]\n", dev->name,
6315 REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6318 static void
6319 bnx2_tx_timeout(struct net_device *dev)
6321 struct bnx2 *bp = netdev_priv(dev);
6323 bnx2_dump_state(bp);
6325 /* This allows the netif to be shutdown gracefully before resetting */
6326 schedule_work(&bp->reset_task);
6329 #ifdef BCM_VLAN
6330 /* Called with rtnl_lock */
6331 static void
6332 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6334 struct bnx2 *bp = netdev_priv(dev);
6336 if (netif_running(dev))
6337 bnx2_netif_stop(bp);
6339 bp->vlgrp = vlgrp;
6341 if (!netif_running(dev))
6342 return;
6344 bnx2_set_rx_mode(dev);
6345 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6346 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6348 bnx2_netif_start(bp);
6350 #endif
6352 /* Called with netif_tx_lock.
6353 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6354 * netif_wake_queue().
6356 static netdev_tx_t
6357 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6359 struct bnx2 *bp = netdev_priv(dev);
6360 dma_addr_t mapping;
6361 struct tx_bd *txbd;
6362 struct sw_tx_bd *tx_buf;
6363 u32 len, vlan_tag_flags, last_frag, mss;
6364 u16 prod, ring_prod;
6365 int i;
6366 struct bnx2_napi *bnapi;
6367 struct bnx2_tx_ring_info *txr;
6368 struct netdev_queue *txq;
6370 /* Determine which tx ring we will be placed on */
6371 i = skb_get_queue_mapping(skb);
6372 bnapi = &bp->bnx2_napi[i];
6373 txr = &bnapi->tx_ring;
6374 txq = netdev_get_tx_queue(dev, i);
6376 if (unlikely(bnx2_tx_avail(bp, txr) <
6377 (skb_shinfo(skb)->nr_frags + 1))) {
6378 netif_tx_stop_queue(txq);
6379 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
6380 dev->name);
6382 return NETDEV_TX_BUSY;
6384 len = skb_headlen(skb);
6385 prod = txr->tx_prod;
6386 ring_prod = TX_RING_IDX(prod);
6388 vlan_tag_flags = 0;
6389 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6390 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6393 #ifdef BCM_VLAN
6394 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6395 vlan_tag_flags |=
6396 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6398 #endif
6399 if ((mss = skb_shinfo(skb)->gso_size)) {
6400 u32 tcp_opt_len;
6401 struct iphdr *iph;
6403 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6405 tcp_opt_len = tcp_optlen(skb);
6407 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6408 u32 tcp_off = skb_transport_offset(skb) -
6409 sizeof(struct ipv6hdr) - ETH_HLEN;
6411 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6412 TX_BD_FLAGS_SW_FLAGS;
6413 if (likely(tcp_off == 0))
6414 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6415 else {
6416 tcp_off >>= 3;
6417 vlan_tag_flags |= ((tcp_off & 0x3) <<
6418 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6419 ((tcp_off & 0x10) <<
6420 TX_BD_FLAGS_TCP6_OFF4_SHL);
6421 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6423 } else {
6424 iph = ip_hdr(skb);
6425 if (tcp_opt_len || (iph->ihl > 5)) {
6426 vlan_tag_flags |= ((iph->ihl - 5) +
6427 (tcp_opt_len >> 2)) << 8;
6430 } else
6431 mss = 0;
6433 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6434 if (pci_dma_mapping_error(bp->pdev, mapping)) {
6435 dev_kfree_skb(skb);
6436 return NETDEV_TX_OK;
6439 tx_buf = &txr->tx_buf_ring[ring_prod];
6440 tx_buf->skb = skb;
6441 pci_unmap_addr_set(tx_buf, mapping, mapping);
6443 txbd = &txr->tx_desc_ring[ring_prod];
6445 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6446 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6447 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6448 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6450 last_frag = skb_shinfo(skb)->nr_frags;
6451 tx_buf->nr_frags = last_frag;
6452 tx_buf->is_gso = skb_is_gso(skb);
6454 for (i = 0; i < last_frag; i++) {
6455 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6457 prod = NEXT_TX_BD(prod);
6458 ring_prod = TX_RING_IDX(prod);
6459 txbd = &txr->tx_desc_ring[ring_prod];
6461 len = frag->size;
6462 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6463 len, PCI_DMA_TODEVICE);
6464 if (pci_dma_mapping_error(bp->pdev, mapping))
6465 goto dma_error;
6466 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6467 mapping);
6469 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6470 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6471 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6472 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6475 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6477 prod = NEXT_TX_BD(prod);
6478 txr->tx_prod_bseq += skb->len;
6480 REG_WR16(bp, txr->tx_bidx_addr, prod);
6481 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6483 mmiowb();
6485 txr->tx_prod = prod;
6487 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6488 netif_tx_stop_queue(txq);
6489 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6490 netif_tx_wake_queue(txq);
6493 return NETDEV_TX_OK;
6494 dma_error:
6495 /* save value of frag that failed */
6496 last_frag = i;
6498 /* start back at beginning and unmap skb */
6499 prod = txr->tx_prod;
6500 ring_prod = TX_RING_IDX(prod);
6501 tx_buf = &txr->tx_buf_ring[ring_prod];
6502 tx_buf->skb = NULL;
6503 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6504 skb_headlen(skb), PCI_DMA_TODEVICE);
6506 /* unmap remaining mapped pages */
6507 for (i = 0; i < last_frag; i++) {
6508 prod = NEXT_TX_BD(prod);
6509 ring_prod = TX_RING_IDX(prod);
6510 tx_buf = &txr->tx_buf_ring[ring_prod];
6511 pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6512 skb_shinfo(skb)->frags[i].size,
6513 PCI_DMA_TODEVICE);
6516 dev_kfree_skb(skb);
6517 return NETDEV_TX_OK;
6520 /* Called with rtnl_lock */
6521 static int
6522 bnx2_close(struct net_device *dev)
6524 struct bnx2 *bp = netdev_priv(dev);
6526 cancel_work_sync(&bp->reset_task);
6528 bnx2_disable_int_sync(bp);
6529 bnx2_napi_disable(bp);
6530 del_timer_sync(&bp->timer);
6531 bnx2_shutdown_chip(bp);
6532 bnx2_free_irq(bp);
6533 bnx2_free_skbs(bp);
6534 bnx2_free_mem(bp);
6535 bp->link_up = 0;
6536 netif_carrier_off(bp->dev);
6537 bnx2_set_power_state(bp, PCI_D3hot);
6538 return 0;
6541 #define GET_NET_STATS64(ctr) \
6542 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6543 (unsigned long) (ctr##_lo)
6545 #define GET_NET_STATS32(ctr) \
6546 (ctr##_lo)
6548 #if (BITS_PER_LONG == 64)
6549 #define GET_NET_STATS GET_NET_STATS64
6550 #else
6551 #define GET_NET_STATS GET_NET_STATS32
6552 #endif
6554 static struct net_device_stats *
6555 bnx2_get_stats(struct net_device *dev)
6557 struct bnx2 *bp = netdev_priv(dev);
6558 struct statistics_block *stats_blk = bp->stats_blk;
6559 struct net_device_stats *net_stats = &dev->stats;
6561 if (bp->stats_blk == NULL) {
6562 return net_stats;
6564 net_stats->rx_packets =
6565 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6566 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6567 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6569 net_stats->tx_packets =
6570 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6571 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6572 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6574 net_stats->rx_bytes =
6575 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6577 net_stats->tx_bytes =
6578 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6580 net_stats->multicast =
6581 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6583 net_stats->collisions =
6584 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6586 net_stats->rx_length_errors =
6587 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6588 stats_blk->stat_EtherStatsOverrsizePkts);
6590 net_stats->rx_over_errors =
6591 (unsigned long) (stats_blk->stat_IfInFTQDiscards +
6592 stats_blk->stat_IfInMBUFDiscards);
6594 net_stats->rx_frame_errors =
6595 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6597 net_stats->rx_crc_errors =
6598 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6600 net_stats->rx_errors = net_stats->rx_length_errors +
6601 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6602 net_stats->rx_crc_errors;
6604 net_stats->tx_aborted_errors =
6605 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6606 stats_blk->stat_Dot3StatsLateCollisions);
6608 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6609 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6610 net_stats->tx_carrier_errors = 0;
6611 else {
6612 net_stats->tx_carrier_errors =
6613 (unsigned long)
6614 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6617 net_stats->tx_errors =
6618 (unsigned long)
6619 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6621 net_stats->tx_aborted_errors +
6622 net_stats->tx_carrier_errors;
6624 net_stats->rx_missed_errors =
6625 (unsigned long) (stats_blk->stat_IfInFTQDiscards +
6626 stats_blk->stat_IfInMBUFDiscards + stats_blk->stat_FwRxDrop);
6628 return net_stats;
6631 /* All ethtool functions called with rtnl_lock */
6633 static int
6634 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6636 struct bnx2 *bp = netdev_priv(dev);
6637 int support_serdes = 0, support_copper = 0;
6639 cmd->supported = SUPPORTED_Autoneg;
6640 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6641 support_serdes = 1;
6642 support_copper = 1;
6643 } else if (bp->phy_port == PORT_FIBRE)
6644 support_serdes = 1;
6645 else
6646 support_copper = 1;
6648 if (support_serdes) {
6649 cmd->supported |= SUPPORTED_1000baseT_Full |
6650 SUPPORTED_FIBRE;
6651 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6652 cmd->supported |= SUPPORTED_2500baseX_Full;
6655 if (support_copper) {
6656 cmd->supported |= SUPPORTED_10baseT_Half |
6657 SUPPORTED_10baseT_Full |
6658 SUPPORTED_100baseT_Half |
6659 SUPPORTED_100baseT_Full |
6660 SUPPORTED_1000baseT_Full |
6661 SUPPORTED_TP;
6665 spin_lock_bh(&bp->phy_lock);
6666 cmd->port = bp->phy_port;
6667 cmd->advertising = bp->advertising;
6669 if (bp->autoneg & AUTONEG_SPEED) {
6670 cmd->autoneg = AUTONEG_ENABLE;
6672 else {
6673 cmd->autoneg = AUTONEG_DISABLE;
6676 if (netif_carrier_ok(dev)) {
6677 cmd->speed = bp->line_speed;
6678 cmd->duplex = bp->duplex;
6680 else {
6681 cmd->speed = -1;
6682 cmd->duplex = -1;
6684 spin_unlock_bh(&bp->phy_lock);
6686 cmd->transceiver = XCVR_INTERNAL;
6687 cmd->phy_address = bp->phy_addr;
6689 return 0;
6692 static int
6693 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6695 struct bnx2 *bp = netdev_priv(dev);
6696 u8 autoneg = bp->autoneg;
6697 u8 req_duplex = bp->req_duplex;
6698 u16 req_line_speed = bp->req_line_speed;
6699 u32 advertising = bp->advertising;
6700 int err = -EINVAL;
6702 spin_lock_bh(&bp->phy_lock);
6704 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6705 goto err_out_unlock;
6707 if (cmd->port != bp->phy_port &&
6708 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6709 goto err_out_unlock;
6711 /* If device is down, we can store the settings only if the user
6712 * is setting the currently active port.
6714 if (!netif_running(dev) && cmd->port != bp->phy_port)
6715 goto err_out_unlock;
6717 if (cmd->autoneg == AUTONEG_ENABLE) {
6718 autoneg |= AUTONEG_SPEED;
6720 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6722 /* allow advertising 1 speed */
6723 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6724 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6725 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6726 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6728 if (cmd->port == PORT_FIBRE)
6729 goto err_out_unlock;
6731 advertising = cmd->advertising;
6733 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6734 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6735 (cmd->port == PORT_TP))
6736 goto err_out_unlock;
6737 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6738 advertising = cmd->advertising;
6739 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6740 goto err_out_unlock;
6741 else {
6742 if (cmd->port == PORT_FIBRE)
6743 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6744 else
6745 advertising = ETHTOOL_ALL_COPPER_SPEED;
6747 advertising |= ADVERTISED_Autoneg;
6749 else {
6750 if (cmd->port == PORT_FIBRE) {
6751 if ((cmd->speed != SPEED_1000 &&
6752 cmd->speed != SPEED_2500) ||
6753 (cmd->duplex != DUPLEX_FULL))
6754 goto err_out_unlock;
6756 if (cmd->speed == SPEED_2500 &&
6757 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6758 goto err_out_unlock;
6760 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6761 goto err_out_unlock;
6763 autoneg &= ~AUTONEG_SPEED;
6764 req_line_speed = cmd->speed;
6765 req_duplex = cmd->duplex;
6766 advertising = 0;
6769 bp->autoneg = autoneg;
6770 bp->advertising = advertising;
6771 bp->req_line_speed = req_line_speed;
6772 bp->req_duplex = req_duplex;
6774 err = 0;
6775 /* If device is down, the new settings will be picked up when it is
6776 * brought up.
6778 if (netif_running(dev))
6779 err = bnx2_setup_phy(bp, cmd->port);
6781 err_out_unlock:
6782 spin_unlock_bh(&bp->phy_lock);
6784 return err;
6787 static void
6788 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6790 struct bnx2 *bp = netdev_priv(dev);
6792 strcpy(info->driver, DRV_MODULE_NAME);
6793 strcpy(info->version, DRV_MODULE_VERSION);
6794 strcpy(info->bus_info, pci_name(bp->pdev));
6795 strcpy(info->fw_version, bp->fw_version);
6798 #define BNX2_REGDUMP_LEN (32 * 1024)
6800 static int
6801 bnx2_get_regs_len(struct net_device *dev)
6803 return BNX2_REGDUMP_LEN;
6806 static void
6807 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6809 u32 *p = _p, i, offset;
6810 u8 *orig_p = _p;
6811 struct bnx2 *bp = netdev_priv(dev);
6812 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6813 0x0800, 0x0880, 0x0c00, 0x0c10,
6814 0x0c30, 0x0d08, 0x1000, 0x101c,
6815 0x1040, 0x1048, 0x1080, 0x10a4,
6816 0x1400, 0x1490, 0x1498, 0x14f0,
6817 0x1500, 0x155c, 0x1580, 0x15dc,
6818 0x1600, 0x1658, 0x1680, 0x16d8,
6819 0x1800, 0x1820, 0x1840, 0x1854,
6820 0x1880, 0x1894, 0x1900, 0x1984,
6821 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6822 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6823 0x2000, 0x2030, 0x23c0, 0x2400,
6824 0x2800, 0x2820, 0x2830, 0x2850,
6825 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6826 0x3c00, 0x3c94, 0x4000, 0x4010,
6827 0x4080, 0x4090, 0x43c0, 0x4458,
6828 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6829 0x4fc0, 0x5010, 0x53c0, 0x5444,
6830 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6831 0x5fc0, 0x6000, 0x6400, 0x6428,
6832 0x6800, 0x6848, 0x684c, 0x6860,
6833 0x6888, 0x6910, 0x8000 };
6835 regs->version = 0;
6837 memset(p, 0, BNX2_REGDUMP_LEN);
6839 if (!netif_running(bp->dev))
6840 return;
6842 i = 0;
6843 offset = reg_boundaries[0];
6844 p += offset;
6845 while (offset < BNX2_REGDUMP_LEN) {
6846 *p++ = REG_RD(bp, offset);
6847 offset += 4;
6848 if (offset == reg_boundaries[i + 1]) {
6849 offset = reg_boundaries[i + 2];
6850 p = (u32 *) (orig_p + offset);
6851 i += 2;
6856 static void
6857 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6859 struct bnx2 *bp = netdev_priv(dev);
6861 if (bp->flags & BNX2_FLAG_NO_WOL) {
6862 wol->supported = 0;
6863 wol->wolopts = 0;
6865 else {
6866 wol->supported = WAKE_MAGIC;
6867 if (bp->wol)
6868 wol->wolopts = WAKE_MAGIC;
6869 else
6870 wol->wolopts = 0;
6872 memset(&wol->sopass, 0, sizeof(wol->sopass));
6875 static int
6876 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6878 struct bnx2 *bp = netdev_priv(dev);
6880 if (wol->wolopts & ~WAKE_MAGIC)
6881 return -EINVAL;
6883 if (wol->wolopts & WAKE_MAGIC) {
6884 if (bp->flags & BNX2_FLAG_NO_WOL)
6885 return -EINVAL;
6887 bp->wol = 1;
6889 else {
6890 bp->wol = 0;
6892 return 0;
6895 static int
6896 bnx2_nway_reset(struct net_device *dev)
6898 struct bnx2 *bp = netdev_priv(dev);
6899 u32 bmcr;
6901 if (!netif_running(dev))
6902 return -EAGAIN;
6904 if (!(bp->autoneg & AUTONEG_SPEED)) {
6905 return -EINVAL;
6908 spin_lock_bh(&bp->phy_lock);
6910 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6911 int rc;
6913 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6914 spin_unlock_bh(&bp->phy_lock);
6915 return rc;
6918 /* Force a link down visible on the other side */
6919 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6920 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6921 spin_unlock_bh(&bp->phy_lock);
6923 msleep(20);
6925 spin_lock_bh(&bp->phy_lock);
6927 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6928 bp->serdes_an_pending = 1;
6929 mod_timer(&bp->timer, jiffies + bp->current_interval);
6932 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6933 bmcr &= ~BMCR_LOOPBACK;
6934 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6936 spin_unlock_bh(&bp->phy_lock);
6938 return 0;
6941 static u32
6942 bnx2_get_link(struct net_device *dev)
6944 struct bnx2 *bp = netdev_priv(dev);
6946 return bp->link_up;
6949 static int
6950 bnx2_get_eeprom_len(struct net_device *dev)
6952 struct bnx2 *bp = netdev_priv(dev);
6954 if (bp->flash_info == NULL)
6955 return 0;
6957 return (int) bp->flash_size;
6960 static int
6961 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6962 u8 *eebuf)
6964 struct bnx2 *bp = netdev_priv(dev);
6965 int rc;
6967 if (!netif_running(dev))
6968 return -EAGAIN;
6970 /* parameters already validated in ethtool_get_eeprom */
6972 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6974 return rc;
6977 static int
6978 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6979 u8 *eebuf)
6981 struct bnx2 *bp = netdev_priv(dev);
6982 int rc;
6984 if (!netif_running(dev))
6985 return -EAGAIN;
6987 /* parameters already validated in ethtool_set_eeprom */
6989 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6991 return rc;
6994 static int
6995 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6997 struct bnx2 *bp = netdev_priv(dev);
6999 memset(coal, 0, sizeof(struct ethtool_coalesce));
7001 coal->rx_coalesce_usecs = bp->rx_ticks;
7002 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7003 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7004 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7006 coal->tx_coalesce_usecs = bp->tx_ticks;
7007 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7008 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7009 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7011 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7013 return 0;
7016 static int
7017 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7019 struct bnx2 *bp = netdev_priv(dev);
7021 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7022 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7024 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7025 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7027 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7028 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7030 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7031 if (bp->rx_quick_cons_trip_int > 0xff)
7032 bp->rx_quick_cons_trip_int = 0xff;
7034 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7035 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7037 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7038 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7040 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7041 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7043 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7044 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7045 0xff;
7047 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7048 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7049 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7050 bp->stats_ticks = USEC_PER_SEC;
7052 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7053 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7054 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7056 if (netif_running(bp->dev)) {
7057 bnx2_netif_stop(bp);
7058 bnx2_init_nic(bp, 0);
7059 bnx2_netif_start(bp);
7062 return 0;
7065 static void
7066 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7068 struct bnx2 *bp = netdev_priv(dev);
7070 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7071 ering->rx_mini_max_pending = 0;
7072 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7074 ering->rx_pending = bp->rx_ring_size;
7075 ering->rx_mini_pending = 0;
7076 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7078 ering->tx_max_pending = MAX_TX_DESC_CNT;
7079 ering->tx_pending = bp->tx_ring_size;
7082 static int
7083 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7085 if (netif_running(bp->dev)) {
7086 bnx2_netif_stop(bp);
7087 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7088 bnx2_free_skbs(bp);
7089 bnx2_free_mem(bp);
7092 bnx2_set_rx_ring_size(bp, rx);
7093 bp->tx_ring_size = tx;
7095 if (netif_running(bp->dev)) {
7096 int rc;
7098 rc = bnx2_alloc_mem(bp);
7099 if (!rc)
7100 rc = bnx2_init_nic(bp, 0);
7102 if (rc) {
7103 bnx2_napi_enable(bp);
7104 dev_close(bp->dev);
7105 return rc;
7107 bnx2_netif_start(bp);
7109 return 0;
7112 static int
7113 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7115 struct bnx2 *bp = netdev_priv(dev);
7116 int rc;
7118 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7119 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7120 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7122 return -EINVAL;
7124 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7125 return rc;
7128 static void
7129 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7131 struct bnx2 *bp = netdev_priv(dev);
7133 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7134 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7135 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7138 static int
7139 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7141 struct bnx2 *bp = netdev_priv(dev);
7143 bp->req_flow_ctrl = 0;
7144 if (epause->rx_pause)
7145 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7146 if (epause->tx_pause)
7147 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7149 if (epause->autoneg) {
7150 bp->autoneg |= AUTONEG_FLOW_CTRL;
7152 else {
7153 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7156 if (netif_running(dev)) {
7157 spin_lock_bh(&bp->phy_lock);
7158 bnx2_setup_phy(bp, bp->phy_port);
7159 spin_unlock_bh(&bp->phy_lock);
7162 return 0;
7165 static u32
7166 bnx2_get_rx_csum(struct net_device *dev)
7168 struct bnx2 *bp = netdev_priv(dev);
7170 return bp->rx_csum;
7173 static int
7174 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7176 struct bnx2 *bp = netdev_priv(dev);
7178 bp->rx_csum = data;
7179 return 0;
7182 static int
7183 bnx2_set_tso(struct net_device *dev, u32 data)
7185 struct bnx2 *bp = netdev_priv(dev);
7187 if (data) {
7188 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7189 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7190 dev->features |= NETIF_F_TSO6;
7191 } else
7192 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7193 NETIF_F_TSO_ECN);
7194 return 0;
7197 static struct {
7198 char string[ETH_GSTRING_LEN];
7199 } bnx2_stats_str_arr[] = {
7200 { "rx_bytes" },
7201 { "rx_error_bytes" },
7202 { "tx_bytes" },
7203 { "tx_error_bytes" },
7204 { "rx_ucast_packets" },
7205 { "rx_mcast_packets" },
7206 { "rx_bcast_packets" },
7207 { "tx_ucast_packets" },
7208 { "tx_mcast_packets" },
7209 { "tx_bcast_packets" },
7210 { "tx_mac_errors" },
7211 { "tx_carrier_errors" },
7212 { "rx_crc_errors" },
7213 { "rx_align_errors" },
7214 { "tx_single_collisions" },
7215 { "tx_multi_collisions" },
7216 { "tx_deferred" },
7217 { "tx_excess_collisions" },
7218 { "tx_late_collisions" },
7219 { "tx_total_collisions" },
7220 { "rx_fragments" },
7221 { "rx_jabbers" },
7222 { "rx_undersize_packets" },
7223 { "rx_oversize_packets" },
7224 { "rx_64_byte_packets" },
7225 { "rx_65_to_127_byte_packets" },
7226 { "rx_128_to_255_byte_packets" },
7227 { "rx_256_to_511_byte_packets" },
7228 { "rx_512_to_1023_byte_packets" },
7229 { "rx_1024_to_1522_byte_packets" },
7230 { "rx_1523_to_9022_byte_packets" },
7231 { "tx_64_byte_packets" },
7232 { "tx_65_to_127_byte_packets" },
7233 { "tx_128_to_255_byte_packets" },
7234 { "tx_256_to_511_byte_packets" },
7235 { "tx_512_to_1023_byte_packets" },
7236 { "tx_1024_to_1522_byte_packets" },
7237 { "tx_1523_to_9022_byte_packets" },
7238 { "rx_xon_frames" },
7239 { "rx_xoff_frames" },
7240 { "tx_xon_frames" },
7241 { "tx_xoff_frames" },
7242 { "rx_mac_ctrl_frames" },
7243 { "rx_filtered_packets" },
7244 { "rx_ftq_discards" },
7245 { "rx_discards" },
7246 { "rx_fw_discards" },
7249 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7250 sizeof(bnx2_stats_str_arr[0]))
7252 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7254 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7255 STATS_OFFSET32(stat_IfHCInOctets_hi),
7256 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7257 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7258 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7259 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7260 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7261 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7262 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7263 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7264 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7265 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7266 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7267 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7268 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7269 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7270 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7271 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7272 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7273 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7274 STATS_OFFSET32(stat_EtherStatsCollisions),
7275 STATS_OFFSET32(stat_EtherStatsFragments),
7276 STATS_OFFSET32(stat_EtherStatsJabbers),
7277 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7278 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7279 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7280 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7281 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7282 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7283 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7284 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7285 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7286 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7287 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7288 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7289 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7290 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7291 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7292 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7293 STATS_OFFSET32(stat_XonPauseFramesReceived),
7294 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7295 STATS_OFFSET32(stat_OutXonSent),
7296 STATS_OFFSET32(stat_OutXoffSent),
7297 STATS_OFFSET32(stat_MacControlFramesReceived),
7298 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7299 STATS_OFFSET32(stat_IfInFTQDiscards),
7300 STATS_OFFSET32(stat_IfInMBUFDiscards),
7301 STATS_OFFSET32(stat_FwRxDrop),
7304 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7305 * skipped because of errata.
7307 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7308 8,0,8,8,8,8,8,8,8,8,
7309 4,0,4,4,4,4,4,4,4,4,
7310 4,4,4,4,4,4,4,4,4,4,
7311 4,4,4,4,4,4,4,4,4,4,
7312 4,4,4,4,4,4,4,
7315 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7316 8,0,8,8,8,8,8,8,8,8,
7317 4,4,4,4,4,4,4,4,4,4,
7318 4,4,4,4,4,4,4,4,4,4,
7319 4,4,4,4,4,4,4,4,4,4,
7320 4,4,4,4,4,4,4,
7323 #define BNX2_NUM_TESTS 6
7325 static struct {
7326 char string[ETH_GSTRING_LEN];
7327 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7328 { "register_test (offline)" },
7329 { "memory_test (offline)" },
7330 { "loopback_test (offline)" },
7331 { "nvram_test (online)" },
7332 { "interrupt_test (online)" },
7333 { "link_test (online)" },
7336 static int
7337 bnx2_get_sset_count(struct net_device *dev, int sset)
7339 switch (sset) {
7340 case ETH_SS_TEST:
7341 return BNX2_NUM_TESTS;
7342 case ETH_SS_STATS:
7343 return BNX2_NUM_STATS;
7344 default:
7345 return -EOPNOTSUPP;
7349 static void
7350 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7352 struct bnx2 *bp = netdev_priv(dev);
7354 bnx2_set_power_state(bp, PCI_D0);
7356 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7357 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7358 int i;
7360 bnx2_netif_stop(bp);
7361 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7362 bnx2_free_skbs(bp);
7364 if (bnx2_test_registers(bp) != 0) {
7365 buf[0] = 1;
7366 etest->flags |= ETH_TEST_FL_FAILED;
7368 if (bnx2_test_memory(bp) != 0) {
7369 buf[1] = 1;
7370 etest->flags |= ETH_TEST_FL_FAILED;
7372 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7373 etest->flags |= ETH_TEST_FL_FAILED;
7375 if (!netif_running(bp->dev))
7376 bnx2_shutdown_chip(bp);
7377 else {
7378 bnx2_init_nic(bp, 1);
7379 bnx2_netif_start(bp);
7382 /* wait for link up */
7383 for (i = 0; i < 7; i++) {
7384 if (bp->link_up)
7385 break;
7386 msleep_interruptible(1000);
7390 if (bnx2_test_nvram(bp) != 0) {
7391 buf[3] = 1;
7392 etest->flags |= ETH_TEST_FL_FAILED;
7394 if (bnx2_test_intr(bp) != 0) {
7395 buf[4] = 1;
7396 etest->flags |= ETH_TEST_FL_FAILED;
7399 if (bnx2_test_link(bp) != 0) {
7400 buf[5] = 1;
7401 etest->flags |= ETH_TEST_FL_FAILED;
7404 if (!netif_running(bp->dev))
7405 bnx2_set_power_state(bp, PCI_D3hot);
7408 static void
7409 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7411 switch (stringset) {
7412 case ETH_SS_STATS:
7413 memcpy(buf, bnx2_stats_str_arr,
7414 sizeof(bnx2_stats_str_arr));
7415 break;
7416 case ETH_SS_TEST:
7417 memcpy(buf, bnx2_tests_str_arr,
7418 sizeof(bnx2_tests_str_arr));
7419 break;
7423 static void
7424 bnx2_get_ethtool_stats(struct net_device *dev,
7425 struct ethtool_stats *stats, u64 *buf)
7427 struct bnx2 *bp = netdev_priv(dev);
7428 int i;
7429 u32 *hw_stats = (u32 *) bp->stats_blk;
7430 u8 *stats_len_arr = NULL;
7432 if (hw_stats == NULL) {
7433 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7434 return;
7437 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7438 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7439 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7440 (CHIP_ID(bp) == CHIP_ID_5708_A0))
7441 stats_len_arr = bnx2_5706_stats_len_arr;
7442 else
7443 stats_len_arr = bnx2_5708_stats_len_arr;
7445 for (i = 0; i < BNX2_NUM_STATS; i++) {
7446 if (stats_len_arr[i] == 0) {
7447 /* skip this counter */
7448 buf[i] = 0;
7449 continue;
7451 if (stats_len_arr[i] == 4) {
7452 /* 4-byte counter */
7453 buf[i] = (u64)
7454 *(hw_stats + bnx2_stats_offset_arr[i]);
7455 continue;
7457 /* 8-byte counter */
7458 buf[i] = (((u64) *(hw_stats +
7459 bnx2_stats_offset_arr[i])) << 32) +
7460 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
7464 static int
7465 bnx2_phys_id(struct net_device *dev, u32 data)
7467 struct bnx2 *bp = netdev_priv(dev);
7468 int i;
7469 u32 save;
7471 bnx2_set_power_state(bp, PCI_D0);
7473 if (data == 0)
7474 data = 2;
7476 save = REG_RD(bp, BNX2_MISC_CFG);
7477 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7479 for (i = 0; i < (data * 2); i++) {
7480 if ((i % 2) == 0) {
7481 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7483 else {
7484 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7485 BNX2_EMAC_LED_1000MB_OVERRIDE |
7486 BNX2_EMAC_LED_100MB_OVERRIDE |
7487 BNX2_EMAC_LED_10MB_OVERRIDE |
7488 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7489 BNX2_EMAC_LED_TRAFFIC);
7491 msleep_interruptible(500);
7492 if (signal_pending(current))
7493 break;
7495 REG_WR(bp, BNX2_EMAC_LED, 0);
7496 REG_WR(bp, BNX2_MISC_CFG, save);
7498 if (!netif_running(dev))
7499 bnx2_set_power_state(bp, PCI_D3hot);
7501 return 0;
7504 static int
7505 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7507 struct bnx2 *bp = netdev_priv(dev);
7509 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7510 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7511 else
7512 return (ethtool_op_set_tx_csum(dev, data));
7515 static const struct ethtool_ops bnx2_ethtool_ops = {
7516 .get_settings = bnx2_get_settings,
7517 .set_settings = bnx2_set_settings,
7518 .get_drvinfo = bnx2_get_drvinfo,
7519 .get_regs_len = bnx2_get_regs_len,
7520 .get_regs = bnx2_get_regs,
7521 .get_wol = bnx2_get_wol,
7522 .set_wol = bnx2_set_wol,
7523 .nway_reset = bnx2_nway_reset,
7524 .get_link = bnx2_get_link,
7525 .get_eeprom_len = bnx2_get_eeprom_len,
7526 .get_eeprom = bnx2_get_eeprom,
7527 .set_eeprom = bnx2_set_eeprom,
7528 .get_coalesce = bnx2_get_coalesce,
7529 .set_coalesce = bnx2_set_coalesce,
7530 .get_ringparam = bnx2_get_ringparam,
7531 .set_ringparam = bnx2_set_ringparam,
7532 .get_pauseparam = bnx2_get_pauseparam,
7533 .set_pauseparam = bnx2_set_pauseparam,
7534 .get_rx_csum = bnx2_get_rx_csum,
7535 .set_rx_csum = bnx2_set_rx_csum,
7536 .set_tx_csum = bnx2_set_tx_csum,
7537 .set_sg = ethtool_op_set_sg,
7538 .set_tso = bnx2_set_tso,
7539 .self_test = bnx2_self_test,
7540 .get_strings = bnx2_get_strings,
7541 .phys_id = bnx2_phys_id,
7542 .get_ethtool_stats = bnx2_get_ethtool_stats,
7543 .get_sset_count = bnx2_get_sset_count,
7546 /* Called with rtnl_lock */
7547 static int
7548 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7550 struct mii_ioctl_data *data = if_mii(ifr);
7551 struct bnx2 *bp = netdev_priv(dev);
7552 int err;
7554 switch(cmd) {
7555 case SIOCGMIIPHY:
7556 data->phy_id = bp->phy_addr;
7558 /* fallthru */
7559 case SIOCGMIIREG: {
7560 u32 mii_regval;
7562 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7563 return -EOPNOTSUPP;
7565 if (!netif_running(dev))
7566 return -EAGAIN;
7568 spin_lock_bh(&bp->phy_lock);
7569 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7570 spin_unlock_bh(&bp->phy_lock);
7572 data->val_out = mii_regval;
7574 return err;
7577 case SIOCSMIIREG:
7578 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7579 return -EOPNOTSUPP;
7581 if (!netif_running(dev))
7582 return -EAGAIN;
7584 spin_lock_bh(&bp->phy_lock);
7585 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7586 spin_unlock_bh(&bp->phy_lock);
7588 return err;
7590 default:
7591 /* do nothing */
7592 break;
7594 return -EOPNOTSUPP;
7597 /* Called with rtnl_lock */
7598 static int
7599 bnx2_change_mac_addr(struct net_device *dev, void *p)
7601 struct sockaddr *addr = p;
7602 struct bnx2 *bp = netdev_priv(dev);
7604 if (!is_valid_ether_addr(addr->sa_data))
7605 return -EINVAL;
7607 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7608 if (netif_running(dev))
7609 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7611 return 0;
7614 /* Called with rtnl_lock */
7615 static int
7616 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7618 struct bnx2 *bp = netdev_priv(dev);
7620 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7621 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7622 return -EINVAL;
7624 dev->mtu = new_mtu;
7625 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7628 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7629 static void
7630 poll_bnx2(struct net_device *dev)
7632 struct bnx2 *bp = netdev_priv(dev);
7633 int i;
7635 for (i = 0; i < bp->irq_nvecs; i++) {
7636 disable_irq(bp->irq_tbl[i].vector);
7637 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7638 enable_irq(bp->irq_tbl[i].vector);
7641 #endif
7643 static void __devinit
7644 bnx2_get_5709_media(struct bnx2 *bp)
7646 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7647 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7648 u32 strap;
7650 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7651 return;
7652 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7653 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7654 return;
7657 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7658 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7659 else
7660 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7662 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7663 switch (strap) {
7664 case 0x4:
7665 case 0x5:
7666 case 0x6:
7667 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7668 return;
7670 } else {
7671 switch (strap) {
7672 case 0x1:
7673 case 0x2:
7674 case 0x4:
7675 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7676 return;
7681 static void __devinit
7682 bnx2_get_pci_speed(struct bnx2 *bp)
7684 u32 reg;
7686 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7687 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7688 u32 clkreg;
7690 bp->flags |= BNX2_FLAG_PCIX;
7692 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7694 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7695 switch (clkreg) {
7696 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7697 bp->bus_speed_mhz = 133;
7698 break;
7700 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7701 bp->bus_speed_mhz = 100;
7702 break;
7704 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7705 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7706 bp->bus_speed_mhz = 66;
7707 break;
7709 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7710 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7711 bp->bus_speed_mhz = 50;
7712 break;
7714 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7715 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7716 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7717 bp->bus_speed_mhz = 33;
7718 break;
7721 else {
7722 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7723 bp->bus_speed_mhz = 66;
7724 else
7725 bp->bus_speed_mhz = 33;
7728 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7729 bp->flags |= BNX2_FLAG_PCI_32BIT;
7733 static void __devinit
7734 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7736 int rc, i, v0_len = 0;
7737 u8 *data;
7738 u8 *v0_str = NULL;
7739 bool mn_match = false;
7741 #define BNX2_VPD_NVRAM_OFFSET 0x300
7742 #define BNX2_VPD_LEN 128
7743 #define BNX2_MAX_VER_SLEN 30
7745 data = kmalloc(256, GFP_KERNEL);
7746 if (!data)
7747 return;
7749 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7750 BNX2_VPD_LEN);
7751 if (rc)
7752 goto vpd_done;
7754 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7755 data[i] = data[i + BNX2_VPD_LEN + 3];
7756 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7757 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7758 data[i + 3] = data[i + BNX2_VPD_LEN];
7761 for (i = 0; i <= BNX2_VPD_LEN - 3; ) {
7762 unsigned char val = data[i];
7763 unsigned int block_end;
7765 if (val == 0x82 || val == 0x91) {
7766 i = (i + 3 + (data[i + 1] + (data[i + 2] << 8)));
7767 continue;
7770 if (val != 0x90)
7771 goto vpd_done;
7773 block_end = (i + 3 + (data[i + 1] + (data[i + 2] << 8)));
7774 i += 3;
7776 if (block_end > BNX2_VPD_LEN)
7777 goto vpd_done;
7779 while (i < (block_end - 2)) {
7780 int len = data[i + 2];
7782 if (i + 3 + len > block_end)
7783 goto vpd_done;
7785 if (data[i] == 'M' && data[i + 1] == 'N') {
7786 if (len != 4 ||
7787 memcmp(&data[i + 3], "1028", 4))
7788 goto vpd_done;
7789 mn_match = true;
7791 } else if (data[i] == 'V' && data[i + 1] == '0') {
7792 if (len > BNX2_MAX_VER_SLEN)
7793 goto vpd_done;
7795 v0_len = len;
7796 v0_str = &data[i + 3];
7798 i += 3 + len;
7800 if (mn_match && v0_str) {
7801 memcpy(bp->fw_version, v0_str, v0_len);
7802 bp->fw_version[v0_len] = ' ';
7803 goto vpd_done;
7806 goto vpd_done;
7809 vpd_done:
7810 kfree(data);
7813 static int __devinit
7814 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7816 struct bnx2 *bp;
7817 unsigned long mem_len;
7818 int rc, i, j;
7819 u32 reg;
7820 u64 dma_mask, persist_dma_mask;
7822 SET_NETDEV_DEV(dev, &pdev->dev);
7823 bp = netdev_priv(dev);
7825 bp->flags = 0;
7826 bp->phy_flags = 0;
7828 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7829 rc = pci_enable_device(pdev);
7830 if (rc) {
7831 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7832 goto err_out;
7835 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7836 dev_err(&pdev->dev,
7837 "Cannot find PCI device base address, aborting.\n");
7838 rc = -ENODEV;
7839 goto err_out_disable;
7842 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7843 if (rc) {
7844 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7845 goto err_out_disable;
7848 pci_set_master(pdev);
7849 pci_save_state(pdev);
7851 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7852 if (bp->pm_cap == 0) {
7853 dev_err(&pdev->dev,
7854 "Cannot find power management capability, aborting.\n");
7855 rc = -EIO;
7856 goto err_out_release;
7859 bp->dev = dev;
7860 bp->pdev = pdev;
7862 spin_lock_init(&bp->phy_lock);
7863 spin_lock_init(&bp->indirect_lock);
7864 #ifdef BCM_CNIC
7865 mutex_init(&bp->cnic_lock);
7866 #endif
7867 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7869 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7870 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7871 dev->mem_end = dev->mem_start + mem_len;
7872 dev->irq = pdev->irq;
7874 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7876 if (!bp->regview) {
7877 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7878 rc = -ENOMEM;
7879 goto err_out_release;
7882 /* Configure byte swap and enable write to the reg_window registers.
7883 * Rely on CPU to do target byte swapping on big endian systems
7884 * The chip's target access swapping will not swap all accesses
7886 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7887 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7888 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7890 bnx2_set_power_state(bp, PCI_D0);
7892 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7894 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7895 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7896 dev_err(&pdev->dev,
7897 "Cannot find PCIE capability, aborting.\n");
7898 rc = -EIO;
7899 goto err_out_unmap;
7901 bp->flags |= BNX2_FLAG_PCIE;
7902 if (CHIP_REV(bp) == CHIP_REV_Ax)
7903 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7904 } else {
7905 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7906 if (bp->pcix_cap == 0) {
7907 dev_err(&pdev->dev,
7908 "Cannot find PCIX capability, aborting.\n");
7909 rc = -EIO;
7910 goto err_out_unmap;
7912 bp->flags |= BNX2_FLAG_BROKEN_STATS;
7915 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7916 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7917 bp->flags |= BNX2_FLAG_MSIX_CAP;
7920 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7921 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7922 bp->flags |= BNX2_FLAG_MSI_CAP;
7925 /* 5708 cannot support DMA addresses > 40-bit. */
7926 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7927 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7928 else
7929 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7931 /* Configure DMA attributes. */
7932 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7933 dev->features |= NETIF_F_HIGHDMA;
7934 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7935 if (rc) {
7936 dev_err(&pdev->dev,
7937 "pci_set_consistent_dma_mask failed, aborting.\n");
7938 goto err_out_unmap;
7940 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7941 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7942 goto err_out_unmap;
7945 if (!(bp->flags & BNX2_FLAG_PCIE))
7946 bnx2_get_pci_speed(bp);
7948 /* 5706A0 may falsely detect SERR and PERR. */
7949 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7950 reg = REG_RD(bp, PCI_COMMAND);
7951 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7952 REG_WR(bp, PCI_COMMAND, reg);
7954 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7955 !(bp->flags & BNX2_FLAG_PCIX)) {
7957 dev_err(&pdev->dev,
7958 "5706 A1 can only be used in a PCIX bus, aborting.\n");
7959 goto err_out_unmap;
7962 bnx2_init_nvram(bp);
7964 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7966 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7967 BNX2_SHM_HDR_SIGNATURE_SIG) {
7968 u32 off = PCI_FUNC(pdev->devfn) << 2;
7970 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7971 } else
7972 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7974 /* Get the permanent MAC address. First we need to make sure the
7975 * firmware is actually running.
7977 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7979 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7980 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7981 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7982 rc = -ENODEV;
7983 goto err_out_unmap;
7986 bnx2_read_vpd_fw_ver(bp);
7988 j = strlen(bp->fw_version);
7989 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7990 for (i = 0; i < 3 && j < 24; i++) {
7991 u8 num, k, skip0;
7993 if (i == 0) {
7994 bp->fw_version[j++] = 'b';
7995 bp->fw_version[j++] = 'c';
7996 bp->fw_version[j++] = ' ';
7998 num = (u8) (reg >> (24 - (i * 8)));
7999 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8000 if (num >= k || !skip0 || k == 1) {
8001 bp->fw_version[j++] = (num / k) + '0';
8002 skip0 = 0;
8005 if (i != 2)
8006 bp->fw_version[j++] = '.';
8008 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8009 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8010 bp->wol = 1;
8012 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8013 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8015 for (i = 0; i < 30; i++) {
8016 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8017 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8018 break;
8019 msleep(10);
8022 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8023 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8024 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8025 reg != BNX2_CONDITION_MFW_RUN_NONE) {
8026 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8028 if (j < 32)
8029 bp->fw_version[j++] = ' ';
8030 for (i = 0; i < 3 && j < 28; i++) {
8031 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8032 reg = swab32(reg);
8033 memcpy(&bp->fw_version[j], &reg, 4);
8034 j += 4;
8038 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8039 bp->mac_addr[0] = (u8) (reg >> 8);
8040 bp->mac_addr[1] = (u8) reg;
8042 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8043 bp->mac_addr[2] = (u8) (reg >> 24);
8044 bp->mac_addr[3] = (u8) (reg >> 16);
8045 bp->mac_addr[4] = (u8) (reg >> 8);
8046 bp->mac_addr[5] = (u8) reg;
8048 bp->tx_ring_size = MAX_TX_DESC_CNT;
8049 bnx2_set_rx_ring_size(bp, 255);
8051 bp->rx_csum = 1;
8053 bp->tx_quick_cons_trip_int = 2;
8054 bp->tx_quick_cons_trip = 20;
8055 bp->tx_ticks_int = 18;
8056 bp->tx_ticks = 80;
8058 bp->rx_quick_cons_trip_int = 2;
8059 bp->rx_quick_cons_trip = 12;
8060 bp->rx_ticks_int = 18;
8061 bp->rx_ticks = 18;
8063 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8065 bp->current_interval = BNX2_TIMER_INTERVAL;
8067 bp->phy_addr = 1;
8069 /* Disable WOL support if we are running on a SERDES chip. */
8070 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8071 bnx2_get_5709_media(bp);
8072 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8073 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8075 bp->phy_port = PORT_TP;
8076 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8077 bp->phy_port = PORT_FIBRE;
8078 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8079 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8080 bp->flags |= BNX2_FLAG_NO_WOL;
8081 bp->wol = 0;
8083 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8084 /* Don't do parallel detect on this board because of
8085 * some board problems. The link will not go down
8086 * if we do parallel detect.
8088 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8089 pdev->subsystem_device == 0x310c)
8090 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8091 } else {
8092 bp->phy_addr = 2;
8093 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8094 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8096 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8097 CHIP_NUM(bp) == CHIP_NUM_5708)
8098 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8099 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8100 (CHIP_REV(bp) == CHIP_REV_Ax ||
8101 CHIP_REV(bp) == CHIP_REV_Bx))
8102 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8104 bnx2_init_fw_cap(bp);
8106 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8107 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8108 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8109 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8110 bp->flags |= BNX2_FLAG_NO_WOL;
8111 bp->wol = 0;
8114 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8115 bp->tx_quick_cons_trip_int =
8116 bp->tx_quick_cons_trip;
8117 bp->tx_ticks_int = bp->tx_ticks;
8118 bp->rx_quick_cons_trip_int =
8119 bp->rx_quick_cons_trip;
8120 bp->rx_ticks_int = bp->rx_ticks;
8121 bp->comp_prod_trip_int = bp->comp_prod_trip;
8122 bp->com_ticks_int = bp->com_ticks;
8123 bp->cmd_ticks_int = bp->cmd_ticks;
8126 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8128 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8129 * with byte enables disabled on the unused 32-bit word. This is legal
8130 * but causes problems on the AMD 8132 which will eventually stop
8131 * responding after a while.
8133 * AMD believes this incompatibility is unique to the 5706, and
8134 * prefers to locally disable MSI rather than globally disabling it.
8136 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8137 struct pci_dev *amd_8132 = NULL;
8139 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8140 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8141 amd_8132))) {
8143 if (amd_8132->revision >= 0x10 &&
8144 amd_8132->revision <= 0x13) {
8145 disable_msi = 1;
8146 pci_dev_put(amd_8132);
8147 break;
8152 bnx2_set_default_link(bp);
8153 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8155 init_timer(&bp->timer);
8156 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8157 bp->timer.data = (unsigned long) bp;
8158 bp->timer.function = bnx2_timer;
8160 return 0;
8162 err_out_unmap:
8163 if (bp->regview) {
8164 iounmap(bp->regview);
8165 bp->regview = NULL;
8168 err_out_release:
8169 pci_release_regions(pdev);
8171 err_out_disable:
8172 pci_disable_device(pdev);
8173 pci_set_drvdata(pdev, NULL);
8175 err_out:
8176 return rc;
8179 static char * __devinit
8180 bnx2_bus_string(struct bnx2 *bp, char *str)
8182 char *s = str;
8184 if (bp->flags & BNX2_FLAG_PCIE) {
8185 s += sprintf(s, "PCI Express");
8186 } else {
8187 s += sprintf(s, "PCI");
8188 if (bp->flags & BNX2_FLAG_PCIX)
8189 s += sprintf(s, "-X");
8190 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8191 s += sprintf(s, " 32-bit");
8192 else
8193 s += sprintf(s, " 64-bit");
8194 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8196 return str;
8199 static void __devinit
8200 bnx2_init_napi(struct bnx2 *bp)
8202 int i;
8204 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
8205 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8206 int (*poll)(struct napi_struct *, int);
8208 if (i == 0)
8209 poll = bnx2_poll;
8210 else
8211 poll = bnx2_poll_msix;
8213 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8214 bnapi->bp = bp;
8218 static const struct net_device_ops bnx2_netdev_ops = {
8219 .ndo_open = bnx2_open,
8220 .ndo_start_xmit = bnx2_start_xmit,
8221 .ndo_stop = bnx2_close,
8222 .ndo_get_stats = bnx2_get_stats,
8223 .ndo_set_rx_mode = bnx2_set_rx_mode,
8224 .ndo_do_ioctl = bnx2_ioctl,
8225 .ndo_validate_addr = eth_validate_addr,
8226 .ndo_set_mac_address = bnx2_change_mac_addr,
8227 .ndo_change_mtu = bnx2_change_mtu,
8228 .ndo_tx_timeout = bnx2_tx_timeout,
8229 #ifdef BCM_VLAN
8230 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
8231 #endif
8232 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
8233 .ndo_poll_controller = poll_bnx2,
8234 #endif
8237 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8239 #ifdef BCM_VLAN
8240 dev->vlan_features |= flags;
8241 #endif
8244 static int __devinit
8245 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8247 static int version_printed = 0;
8248 struct net_device *dev = NULL;
8249 struct bnx2 *bp;
8250 int rc;
8251 char str[40];
8253 if (version_printed++ == 0)
8254 printk(KERN_INFO "%s", version);
8256 /* dev zeroed in init_etherdev */
8257 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8259 if (!dev)
8260 return -ENOMEM;
8262 rc = bnx2_init_board(pdev, dev);
8263 if (rc < 0) {
8264 free_netdev(dev);
8265 return rc;
8268 dev->netdev_ops = &bnx2_netdev_ops;
8269 dev->watchdog_timeo = TX_TIMEOUT;
8270 dev->ethtool_ops = &bnx2_ethtool_ops;
8272 bp = netdev_priv(dev);
8273 bnx2_init_napi(bp);
8275 pci_set_drvdata(pdev, dev);
8277 rc = bnx2_request_firmware(bp);
8278 if (rc)
8279 goto error;
8281 memcpy(dev->dev_addr, bp->mac_addr, 6);
8282 memcpy(dev->perm_addr, bp->mac_addr, 6);
8284 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
8285 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8286 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8287 dev->features |= NETIF_F_IPV6_CSUM;
8288 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8290 #ifdef BCM_VLAN
8291 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8292 #endif
8293 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8294 vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8295 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8296 dev->features |= NETIF_F_TSO6;
8297 vlan_features_add(dev, NETIF_F_TSO6);
8299 if ((rc = register_netdev(dev))) {
8300 dev_err(&pdev->dev, "Cannot register net device\n");
8301 goto error;
8304 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
8305 "IRQ %d, node addr %pM\n",
8306 dev->name,
8307 board_info[ent->driver_data].name,
8308 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8309 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8310 bnx2_bus_string(bp, str),
8311 dev->base_addr,
8312 bp->pdev->irq, dev->dev_addr);
8314 return 0;
8316 error:
8317 if (bp->mips_firmware)
8318 release_firmware(bp->mips_firmware);
8319 if (bp->rv2p_firmware)
8320 release_firmware(bp->rv2p_firmware);
8322 if (bp->regview)
8323 iounmap(bp->regview);
8324 pci_release_regions(pdev);
8325 pci_disable_device(pdev);
8326 pci_set_drvdata(pdev, NULL);
8327 free_netdev(dev);
8328 return rc;
8331 static void __devexit
8332 bnx2_remove_one(struct pci_dev *pdev)
8334 struct net_device *dev = pci_get_drvdata(pdev);
8335 struct bnx2 *bp = netdev_priv(dev);
8337 flush_scheduled_work();
8339 unregister_netdev(dev);
8341 if (bp->mips_firmware)
8342 release_firmware(bp->mips_firmware);
8343 if (bp->rv2p_firmware)
8344 release_firmware(bp->rv2p_firmware);
8346 if (bp->regview)
8347 iounmap(bp->regview);
8349 free_netdev(dev);
8350 pci_release_regions(pdev);
8351 pci_disable_device(pdev);
8352 pci_set_drvdata(pdev, NULL);
8355 static int
8356 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8358 struct net_device *dev = pci_get_drvdata(pdev);
8359 struct bnx2 *bp = netdev_priv(dev);
8361 /* PCI register 4 needs to be saved whether netif_running() or not.
8362 * MSI address and data need to be saved if using MSI and
8363 * netif_running().
8365 pci_save_state(pdev);
8366 if (!netif_running(dev))
8367 return 0;
8369 flush_scheduled_work();
8370 bnx2_netif_stop(bp);
8371 netif_device_detach(dev);
8372 del_timer_sync(&bp->timer);
8373 bnx2_shutdown_chip(bp);
8374 bnx2_free_skbs(bp);
8375 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8376 return 0;
8379 static int
8380 bnx2_resume(struct pci_dev *pdev)
8382 struct net_device *dev = pci_get_drvdata(pdev);
8383 struct bnx2 *bp = netdev_priv(dev);
8385 pci_restore_state(pdev);
8386 if (!netif_running(dev))
8387 return 0;
8389 bnx2_set_power_state(bp, PCI_D0);
8390 netif_device_attach(dev);
8391 bnx2_init_nic(bp, 1);
8392 bnx2_netif_start(bp);
8393 return 0;
8397 * bnx2_io_error_detected - called when PCI error is detected
8398 * @pdev: Pointer to PCI device
8399 * @state: The current pci connection state
8401 * This function is called after a PCI bus error affecting
8402 * this device has been detected.
8404 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8405 pci_channel_state_t state)
8407 struct net_device *dev = pci_get_drvdata(pdev);
8408 struct bnx2 *bp = netdev_priv(dev);
8410 rtnl_lock();
8411 netif_device_detach(dev);
8413 if (state == pci_channel_io_perm_failure) {
8414 rtnl_unlock();
8415 return PCI_ERS_RESULT_DISCONNECT;
8418 if (netif_running(dev)) {
8419 bnx2_netif_stop(bp);
8420 del_timer_sync(&bp->timer);
8421 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8424 pci_disable_device(pdev);
8425 rtnl_unlock();
8427 /* Request a slot slot reset. */
8428 return PCI_ERS_RESULT_NEED_RESET;
8432 * bnx2_io_slot_reset - called after the pci bus has been reset.
8433 * @pdev: Pointer to PCI device
8435 * Restart the card from scratch, as if from a cold-boot.
8437 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8439 struct net_device *dev = pci_get_drvdata(pdev);
8440 struct bnx2 *bp = netdev_priv(dev);
8442 rtnl_lock();
8443 if (pci_enable_device(pdev)) {
8444 dev_err(&pdev->dev,
8445 "Cannot re-enable PCI device after reset.\n");
8446 rtnl_unlock();
8447 return PCI_ERS_RESULT_DISCONNECT;
8449 pci_set_master(pdev);
8450 pci_restore_state(pdev);
8451 pci_save_state(pdev);
8453 if (netif_running(dev)) {
8454 bnx2_set_power_state(bp, PCI_D0);
8455 bnx2_init_nic(bp, 1);
8458 rtnl_unlock();
8459 return PCI_ERS_RESULT_RECOVERED;
8463 * bnx2_io_resume - called when traffic can start flowing again.
8464 * @pdev: Pointer to PCI device
8466 * This callback is called when the error recovery driver tells us that
8467 * its OK to resume normal operation.
8469 static void bnx2_io_resume(struct pci_dev *pdev)
8471 struct net_device *dev = pci_get_drvdata(pdev);
8472 struct bnx2 *bp = netdev_priv(dev);
8474 rtnl_lock();
8475 if (netif_running(dev))
8476 bnx2_netif_start(bp);
8478 netif_device_attach(dev);
8479 rtnl_unlock();
8482 static struct pci_error_handlers bnx2_err_handler = {
8483 .error_detected = bnx2_io_error_detected,
8484 .slot_reset = bnx2_io_slot_reset,
8485 .resume = bnx2_io_resume,
8488 static struct pci_driver bnx2_pci_driver = {
8489 .name = DRV_MODULE_NAME,
8490 .id_table = bnx2_pci_tbl,
8491 .probe = bnx2_init_one,
8492 .remove = __devexit_p(bnx2_remove_one),
8493 .suspend = bnx2_suspend,
8494 .resume = bnx2_resume,
8495 .err_handler = &bnx2_err_handler,
8498 static int __init bnx2_init(void)
8500 return pci_register_driver(&bnx2_pci_driver);
8503 static void __exit bnx2_cleanup(void)
8505 pci_unregister_driver(&bnx2_pci_driver);
8508 module_init(bnx2_init);
8509 module_exit(bnx2_cleanup);