ACPI: thinkpad-acpi: add development version tag
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / bnx2.c
blob06b901152d4487fa04164437cc179661b44657fe
1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/list.h>
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
60 #define DRV_MODULE_NAME "bnx2"
61 #define PFX DRV_MODULE_NAME ": "
62 #define DRV_MODULE_VERSION "2.0.1"
63 #define DRV_MODULE_RELDATE "May 6, 2009"
64 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-4.6.16.fw"
65 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-4.6.16.fw"
66 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-4.6.17.fw"
67 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-4.6.15.fw"
69 #define RUN_AT(x) (jiffies + (x))
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT (5*HZ)
74 static char version[] __devinitdata =
75 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
86 static int disable_msi = 0;
88 module_param(disable_msi, int, 0);
89 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91 typedef enum {
92 BCM5706 = 0,
93 NC370T,
94 NC370I,
95 BCM5706S,
96 NC370F,
97 BCM5708,
98 BCM5708S,
99 BCM5709,
100 BCM5709S,
101 BCM5716,
102 BCM5716S,
103 } board_t;
105 /* indexed by board_t, above */
106 static struct {
107 char *name;
108 } board_info[] __devinitdata = {
109 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
110 { "HP NC370T Multifunction Gigabit Server Adapter" },
111 { "HP NC370i Multifunction Gigabit Server Adapter" },
112 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
113 { "HP NC370F Multifunction Gigabit Server Adapter" },
114 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
115 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
116 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
117 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
118 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
119 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
122 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
123 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
124 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
125 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
127 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
129 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
131 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
132 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
133 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
135 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
136 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
137 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
139 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
141 { PCI_VENDOR_ID_BROADCOM, 0x163b,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
143 { PCI_VENDOR_ID_BROADCOM, 0x163c,
144 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
145 { 0, }
148 static struct flash_spec flash_table[] =
150 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
151 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
152 /* Slow EEPROM */
153 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
154 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
155 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
156 "EEPROM - slow"},
157 /* Expansion entry 0001 */
158 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
159 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
160 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
161 "Entry 0001"},
162 /* Saifun SA25F010 (non-buffered flash) */
163 /* strap, cfg1, & write1 need updates */
164 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
165 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
166 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
167 "Non-buffered flash (128kB)"},
168 /* Saifun SA25F020 (non-buffered flash) */
169 /* strap, cfg1, & write1 need updates */
170 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
171 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
172 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
173 "Non-buffered flash (256kB)"},
174 /* Expansion entry 0100 */
175 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
176 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
177 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
178 "Entry 0100"},
179 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
180 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
181 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
182 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
183 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
184 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
185 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
186 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
187 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
188 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
189 /* Saifun SA25F005 (non-buffered flash) */
190 /* strap, cfg1, & write1 need updates */
191 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
192 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
194 "Non-buffered flash (64kB)"},
195 /* Fast EEPROM */
196 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
197 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
198 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
199 "EEPROM - fast"},
200 /* Expansion entry 1001 */
201 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
202 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
203 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
204 "Entry 1001"},
205 /* Expansion entry 1010 */
206 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
207 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
208 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
209 "Entry 1010"},
210 /* ATMEL AT45DB011B (buffered flash) */
211 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
212 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
213 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
214 "Buffered flash (128kB)"},
215 /* Expansion entry 1100 */
216 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
217 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
218 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
219 "Entry 1100"},
220 /* Expansion entry 1101 */
221 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
222 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
223 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
224 "Entry 1101"},
225 /* Ateml Expansion entry 1110 */
226 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
227 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
228 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
229 "Entry 1110 (Atmel)"},
230 /* ATMEL AT45DB021B (buffered flash) */
231 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
232 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
233 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
234 "Buffered flash (256kB)"},
237 static struct flash_spec flash_5709 = {
238 .flags = BNX2_NV_BUFFERED,
239 .page_bits = BCM5709_FLASH_PAGE_BITS,
240 .page_size = BCM5709_FLASH_PAGE_SIZE,
241 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
242 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
243 .name = "5709 Buffered flash (256kB)",
246 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
250 u32 diff;
252 smp_mb();
254 /* The ring uses 256 indices for 255 entries, one of them
255 * needs to be skipped.
257 diff = txr->tx_prod - txr->tx_cons;
258 if (unlikely(diff >= TX_DESC_CNT)) {
259 diff &= 0xffff;
260 if (diff == TX_DESC_CNT)
261 diff = MAX_TX_DESC_CNT;
263 return (bp->tx_ring_size - diff);
266 static u32
267 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
269 u32 val;
271 spin_lock_bh(&bp->indirect_lock);
272 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
273 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
274 spin_unlock_bh(&bp->indirect_lock);
275 return val;
278 static void
279 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
281 spin_lock_bh(&bp->indirect_lock);
282 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
283 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
284 spin_unlock_bh(&bp->indirect_lock);
287 static void
288 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
290 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
293 static u32
294 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
296 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
299 static void
300 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
302 offset += cid_addr;
303 spin_lock_bh(&bp->indirect_lock);
304 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
305 int i;
307 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
308 REG_WR(bp, BNX2_CTX_CTX_CTRL,
309 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
310 for (i = 0; i < 5; i++) {
311 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
312 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
313 break;
314 udelay(5);
316 } else {
317 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
318 REG_WR(bp, BNX2_CTX_DATA, val);
320 spin_unlock_bh(&bp->indirect_lock);
323 #ifdef BCM_CNIC
324 static int
325 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
327 struct bnx2 *bp = netdev_priv(dev);
328 struct drv_ctl_io *io = &info->data.io;
330 switch (info->cmd) {
331 case DRV_CTL_IO_WR_CMD:
332 bnx2_reg_wr_ind(bp, io->offset, io->data);
333 break;
334 case DRV_CTL_IO_RD_CMD:
335 io->data = bnx2_reg_rd_ind(bp, io->offset);
336 break;
337 case DRV_CTL_CTX_WR_CMD:
338 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
339 break;
340 default:
341 return -EINVAL;
343 return 0;
346 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
348 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
349 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
350 int sb_id;
352 if (bp->flags & BNX2_FLAG_USING_MSIX) {
353 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
354 bnapi->cnic_present = 0;
355 sb_id = bp->irq_nvecs;
356 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
357 } else {
358 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
359 bnapi->cnic_tag = bnapi->last_status_idx;
360 bnapi->cnic_present = 1;
361 sb_id = 0;
362 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
365 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
366 cp->irq_arr[0].status_blk = (void *)
367 ((unsigned long) bnapi->status_blk.msi +
368 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
369 cp->irq_arr[0].status_blk_num = sb_id;
370 cp->num_irq = 1;
373 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
374 void *data)
376 struct bnx2 *bp = netdev_priv(dev);
377 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
379 if (ops == NULL)
380 return -EINVAL;
382 if (cp->drv_state & CNIC_DRV_STATE_REGD)
383 return -EBUSY;
385 bp->cnic_data = data;
386 rcu_assign_pointer(bp->cnic_ops, ops);
388 cp->num_irq = 0;
389 cp->drv_state = CNIC_DRV_STATE_REGD;
391 bnx2_setup_cnic_irq_info(bp);
393 return 0;
396 static int bnx2_unregister_cnic(struct net_device *dev)
398 struct bnx2 *bp = netdev_priv(dev);
399 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
400 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
402 mutex_lock(&bp->cnic_lock);
403 cp->drv_state = 0;
404 bnapi->cnic_present = 0;
405 rcu_assign_pointer(bp->cnic_ops, NULL);
406 mutex_unlock(&bp->cnic_lock);
407 synchronize_rcu();
408 return 0;
411 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
413 struct bnx2 *bp = netdev_priv(dev);
414 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
416 cp->drv_owner = THIS_MODULE;
417 cp->chip_id = bp->chip_id;
418 cp->pdev = bp->pdev;
419 cp->io_base = bp->regview;
420 cp->drv_ctl = bnx2_drv_ctl;
421 cp->drv_register_cnic = bnx2_register_cnic;
422 cp->drv_unregister_cnic = bnx2_unregister_cnic;
424 return cp;
426 EXPORT_SYMBOL(bnx2_cnic_probe);
428 static void
429 bnx2_cnic_stop(struct bnx2 *bp)
431 struct cnic_ops *c_ops;
432 struct cnic_ctl_info info;
434 mutex_lock(&bp->cnic_lock);
435 c_ops = bp->cnic_ops;
436 if (c_ops) {
437 info.cmd = CNIC_CTL_STOP_CMD;
438 c_ops->cnic_ctl(bp->cnic_data, &info);
440 mutex_unlock(&bp->cnic_lock);
443 static void
444 bnx2_cnic_start(struct bnx2 *bp)
446 struct cnic_ops *c_ops;
447 struct cnic_ctl_info info;
449 mutex_lock(&bp->cnic_lock);
450 c_ops = bp->cnic_ops;
451 if (c_ops) {
452 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
453 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
455 bnapi->cnic_tag = bnapi->last_status_idx;
457 info.cmd = CNIC_CTL_START_CMD;
458 c_ops->cnic_ctl(bp->cnic_data, &info);
460 mutex_unlock(&bp->cnic_lock);
463 #else
465 static void
466 bnx2_cnic_stop(struct bnx2 *bp)
470 static void
471 bnx2_cnic_start(struct bnx2 *bp)
475 #endif
477 static int
478 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
480 u32 val1;
481 int i, ret;
483 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
484 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
485 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
487 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
488 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
490 udelay(40);
493 val1 = (bp->phy_addr << 21) | (reg << 16) |
494 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
495 BNX2_EMAC_MDIO_COMM_START_BUSY;
496 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
498 for (i = 0; i < 50; i++) {
499 udelay(10);
501 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
502 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
503 udelay(5);
505 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
506 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
508 break;
512 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
513 *val = 0x0;
514 ret = -EBUSY;
516 else {
517 *val = val1;
518 ret = 0;
521 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
522 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
523 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
525 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
526 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
528 udelay(40);
531 return ret;
534 static int
535 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
537 u32 val1;
538 int i, ret;
540 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
541 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
542 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
544 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
545 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
547 udelay(40);
550 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
551 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
552 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
553 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
555 for (i = 0; i < 50; i++) {
556 udelay(10);
558 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
559 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
560 udelay(5);
561 break;
565 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
566 ret = -EBUSY;
567 else
568 ret = 0;
570 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
571 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
572 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
574 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
575 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
577 udelay(40);
580 return ret;
583 static void
584 bnx2_disable_int(struct bnx2 *bp)
586 int i;
587 struct bnx2_napi *bnapi;
589 for (i = 0; i < bp->irq_nvecs; i++) {
590 bnapi = &bp->bnx2_napi[i];
591 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
592 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
594 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
597 static void
598 bnx2_enable_int(struct bnx2 *bp)
600 int i;
601 struct bnx2_napi *bnapi;
603 for (i = 0; i < bp->irq_nvecs; i++) {
604 bnapi = &bp->bnx2_napi[i];
606 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
607 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
608 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
609 bnapi->last_status_idx);
611 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
612 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
613 bnapi->last_status_idx);
615 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
618 static void
619 bnx2_disable_int_sync(struct bnx2 *bp)
621 int i;
623 atomic_inc(&bp->intr_sem);
624 bnx2_disable_int(bp);
625 for (i = 0; i < bp->irq_nvecs; i++)
626 synchronize_irq(bp->irq_tbl[i].vector);
629 static void
630 bnx2_napi_disable(struct bnx2 *bp)
632 int i;
634 for (i = 0; i < bp->irq_nvecs; i++)
635 napi_disable(&bp->bnx2_napi[i].napi);
638 static void
639 bnx2_napi_enable(struct bnx2 *bp)
641 int i;
643 for (i = 0; i < bp->irq_nvecs; i++)
644 napi_enable(&bp->bnx2_napi[i].napi);
647 static void
648 bnx2_netif_stop(struct bnx2 *bp)
650 bnx2_cnic_stop(bp);
651 bnx2_disable_int_sync(bp);
652 if (netif_running(bp->dev)) {
653 bnx2_napi_disable(bp);
654 netif_tx_disable(bp->dev);
655 bp->dev->trans_start = jiffies; /* prevent tx timeout */
659 static void
660 bnx2_netif_start(struct bnx2 *bp)
662 if (atomic_dec_and_test(&bp->intr_sem)) {
663 if (netif_running(bp->dev)) {
664 netif_tx_wake_all_queues(bp->dev);
665 bnx2_napi_enable(bp);
666 bnx2_enable_int(bp);
667 bnx2_cnic_start(bp);
672 static void
673 bnx2_free_tx_mem(struct bnx2 *bp)
675 int i;
677 for (i = 0; i < bp->num_tx_rings; i++) {
678 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
679 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
681 if (txr->tx_desc_ring) {
682 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
683 txr->tx_desc_ring,
684 txr->tx_desc_mapping);
685 txr->tx_desc_ring = NULL;
687 kfree(txr->tx_buf_ring);
688 txr->tx_buf_ring = NULL;
692 static void
693 bnx2_free_rx_mem(struct bnx2 *bp)
695 int i;
697 for (i = 0; i < bp->num_rx_rings; i++) {
698 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
699 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
700 int j;
702 for (j = 0; j < bp->rx_max_ring; j++) {
703 if (rxr->rx_desc_ring[j])
704 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
705 rxr->rx_desc_ring[j],
706 rxr->rx_desc_mapping[j]);
707 rxr->rx_desc_ring[j] = NULL;
709 vfree(rxr->rx_buf_ring);
710 rxr->rx_buf_ring = NULL;
712 for (j = 0; j < bp->rx_max_pg_ring; j++) {
713 if (rxr->rx_pg_desc_ring[j])
714 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
715 rxr->rx_pg_desc_ring[j],
716 rxr->rx_pg_desc_mapping[j]);
717 rxr->rx_pg_desc_ring[j] = NULL;
719 vfree(rxr->rx_pg_ring);
720 rxr->rx_pg_ring = NULL;
724 static int
725 bnx2_alloc_tx_mem(struct bnx2 *bp)
727 int i;
729 for (i = 0; i < bp->num_tx_rings; i++) {
730 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
731 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
733 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
734 if (txr->tx_buf_ring == NULL)
735 return -ENOMEM;
737 txr->tx_desc_ring =
738 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
739 &txr->tx_desc_mapping);
740 if (txr->tx_desc_ring == NULL)
741 return -ENOMEM;
743 return 0;
746 static int
747 bnx2_alloc_rx_mem(struct bnx2 *bp)
749 int i;
751 for (i = 0; i < bp->num_rx_rings; i++) {
752 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
753 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
754 int j;
756 rxr->rx_buf_ring =
757 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
758 if (rxr->rx_buf_ring == NULL)
759 return -ENOMEM;
761 memset(rxr->rx_buf_ring, 0,
762 SW_RXBD_RING_SIZE * bp->rx_max_ring);
764 for (j = 0; j < bp->rx_max_ring; j++) {
765 rxr->rx_desc_ring[j] =
766 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
767 &rxr->rx_desc_mapping[j]);
768 if (rxr->rx_desc_ring[j] == NULL)
769 return -ENOMEM;
773 if (bp->rx_pg_ring_size) {
774 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
775 bp->rx_max_pg_ring);
776 if (rxr->rx_pg_ring == NULL)
777 return -ENOMEM;
779 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
780 bp->rx_max_pg_ring);
783 for (j = 0; j < bp->rx_max_pg_ring; j++) {
784 rxr->rx_pg_desc_ring[j] =
785 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
786 &rxr->rx_pg_desc_mapping[j]);
787 if (rxr->rx_pg_desc_ring[j] == NULL)
788 return -ENOMEM;
792 return 0;
795 static void
796 bnx2_free_mem(struct bnx2 *bp)
798 int i;
799 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
801 bnx2_free_tx_mem(bp);
802 bnx2_free_rx_mem(bp);
804 for (i = 0; i < bp->ctx_pages; i++) {
805 if (bp->ctx_blk[i]) {
806 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
807 bp->ctx_blk[i],
808 bp->ctx_blk_mapping[i]);
809 bp->ctx_blk[i] = NULL;
812 if (bnapi->status_blk.msi) {
813 pci_free_consistent(bp->pdev, bp->status_stats_size,
814 bnapi->status_blk.msi,
815 bp->status_blk_mapping);
816 bnapi->status_blk.msi = NULL;
817 bp->stats_blk = NULL;
821 static int
822 bnx2_alloc_mem(struct bnx2 *bp)
824 int i, status_blk_size, err;
825 struct bnx2_napi *bnapi;
826 void *status_blk;
828 /* Combine status and statistics blocks into one allocation. */
829 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
830 if (bp->flags & BNX2_FLAG_MSIX_CAP)
831 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
832 BNX2_SBLK_MSIX_ALIGN_SIZE);
833 bp->status_stats_size = status_blk_size +
834 sizeof(struct statistics_block);
836 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
837 &bp->status_blk_mapping);
838 if (status_blk == NULL)
839 goto alloc_mem_err;
841 memset(status_blk, 0, bp->status_stats_size);
843 bnapi = &bp->bnx2_napi[0];
844 bnapi->status_blk.msi = status_blk;
845 bnapi->hw_tx_cons_ptr =
846 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
847 bnapi->hw_rx_cons_ptr =
848 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
849 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
850 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
851 struct status_block_msix *sblk;
853 bnapi = &bp->bnx2_napi[i];
855 sblk = (void *) (status_blk +
856 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
857 bnapi->status_blk.msix = sblk;
858 bnapi->hw_tx_cons_ptr =
859 &sblk->status_tx_quick_consumer_index;
860 bnapi->hw_rx_cons_ptr =
861 &sblk->status_rx_quick_consumer_index;
862 bnapi->int_num = i << 24;
866 bp->stats_blk = status_blk + status_blk_size;
868 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
870 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
871 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
872 if (bp->ctx_pages == 0)
873 bp->ctx_pages = 1;
874 for (i = 0; i < bp->ctx_pages; i++) {
875 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
876 BCM_PAGE_SIZE,
877 &bp->ctx_blk_mapping[i]);
878 if (bp->ctx_blk[i] == NULL)
879 goto alloc_mem_err;
883 err = bnx2_alloc_rx_mem(bp);
884 if (err)
885 goto alloc_mem_err;
887 err = bnx2_alloc_tx_mem(bp);
888 if (err)
889 goto alloc_mem_err;
891 return 0;
893 alloc_mem_err:
894 bnx2_free_mem(bp);
895 return -ENOMEM;
898 static void
899 bnx2_report_fw_link(struct bnx2 *bp)
901 u32 fw_link_status = 0;
903 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
904 return;
906 if (bp->link_up) {
907 u32 bmsr;
909 switch (bp->line_speed) {
910 case SPEED_10:
911 if (bp->duplex == DUPLEX_HALF)
912 fw_link_status = BNX2_LINK_STATUS_10HALF;
913 else
914 fw_link_status = BNX2_LINK_STATUS_10FULL;
915 break;
916 case SPEED_100:
917 if (bp->duplex == DUPLEX_HALF)
918 fw_link_status = BNX2_LINK_STATUS_100HALF;
919 else
920 fw_link_status = BNX2_LINK_STATUS_100FULL;
921 break;
922 case SPEED_1000:
923 if (bp->duplex == DUPLEX_HALF)
924 fw_link_status = BNX2_LINK_STATUS_1000HALF;
925 else
926 fw_link_status = BNX2_LINK_STATUS_1000FULL;
927 break;
928 case SPEED_2500:
929 if (bp->duplex == DUPLEX_HALF)
930 fw_link_status = BNX2_LINK_STATUS_2500HALF;
931 else
932 fw_link_status = BNX2_LINK_STATUS_2500FULL;
933 break;
936 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
938 if (bp->autoneg) {
939 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
941 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
942 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
944 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
945 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
946 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
947 else
948 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
951 else
952 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
954 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
957 static char *
958 bnx2_xceiver_str(struct bnx2 *bp)
960 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
961 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
962 "Copper"));
965 static void
966 bnx2_report_link(struct bnx2 *bp)
968 if (bp->link_up) {
969 netif_carrier_on(bp->dev);
970 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
971 bnx2_xceiver_str(bp));
973 printk("%d Mbps ", bp->line_speed);
975 if (bp->duplex == DUPLEX_FULL)
976 printk("full duplex");
977 else
978 printk("half duplex");
980 if (bp->flow_ctrl) {
981 if (bp->flow_ctrl & FLOW_CTRL_RX) {
982 printk(", receive ");
983 if (bp->flow_ctrl & FLOW_CTRL_TX)
984 printk("& transmit ");
986 else {
987 printk(", transmit ");
989 printk("flow control ON");
991 printk("\n");
993 else {
994 netif_carrier_off(bp->dev);
995 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
996 bnx2_xceiver_str(bp));
999 bnx2_report_fw_link(bp);
1002 static void
1003 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1005 u32 local_adv, remote_adv;
1007 bp->flow_ctrl = 0;
1008 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1009 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1011 if (bp->duplex == DUPLEX_FULL) {
1012 bp->flow_ctrl = bp->req_flow_ctrl;
1014 return;
1017 if (bp->duplex != DUPLEX_FULL) {
1018 return;
1021 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1022 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1023 u32 val;
1025 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1026 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1027 bp->flow_ctrl |= FLOW_CTRL_TX;
1028 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1029 bp->flow_ctrl |= FLOW_CTRL_RX;
1030 return;
1033 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1034 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1036 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1037 u32 new_local_adv = 0;
1038 u32 new_remote_adv = 0;
1040 if (local_adv & ADVERTISE_1000XPAUSE)
1041 new_local_adv |= ADVERTISE_PAUSE_CAP;
1042 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1043 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1044 if (remote_adv & ADVERTISE_1000XPAUSE)
1045 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1046 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1047 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1049 local_adv = new_local_adv;
1050 remote_adv = new_remote_adv;
1053 /* See Table 28B-3 of 802.3ab-1999 spec. */
1054 if (local_adv & ADVERTISE_PAUSE_CAP) {
1055 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1056 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1057 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1059 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1060 bp->flow_ctrl = FLOW_CTRL_RX;
1063 else {
1064 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1065 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1069 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1070 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1071 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1073 bp->flow_ctrl = FLOW_CTRL_TX;
1078 static int
1079 bnx2_5709s_linkup(struct bnx2 *bp)
1081 u32 val, speed;
1083 bp->link_up = 1;
1085 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1086 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1087 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1089 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1090 bp->line_speed = bp->req_line_speed;
1091 bp->duplex = bp->req_duplex;
1092 return 0;
1094 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1095 switch (speed) {
1096 case MII_BNX2_GP_TOP_AN_SPEED_10:
1097 bp->line_speed = SPEED_10;
1098 break;
1099 case MII_BNX2_GP_TOP_AN_SPEED_100:
1100 bp->line_speed = SPEED_100;
1101 break;
1102 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1103 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1104 bp->line_speed = SPEED_1000;
1105 break;
1106 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1107 bp->line_speed = SPEED_2500;
1108 break;
1110 if (val & MII_BNX2_GP_TOP_AN_FD)
1111 bp->duplex = DUPLEX_FULL;
1112 else
1113 bp->duplex = DUPLEX_HALF;
1114 return 0;
1117 static int
1118 bnx2_5708s_linkup(struct bnx2 *bp)
1120 u32 val;
1122 bp->link_up = 1;
1123 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1124 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1125 case BCM5708S_1000X_STAT1_SPEED_10:
1126 bp->line_speed = SPEED_10;
1127 break;
1128 case BCM5708S_1000X_STAT1_SPEED_100:
1129 bp->line_speed = SPEED_100;
1130 break;
1131 case BCM5708S_1000X_STAT1_SPEED_1G:
1132 bp->line_speed = SPEED_1000;
1133 break;
1134 case BCM5708S_1000X_STAT1_SPEED_2G5:
1135 bp->line_speed = SPEED_2500;
1136 break;
1138 if (val & BCM5708S_1000X_STAT1_FD)
1139 bp->duplex = DUPLEX_FULL;
1140 else
1141 bp->duplex = DUPLEX_HALF;
1143 return 0;
1146 static int
1147 bnx2_5706s_linkup(struct bnx2 *bp)
1149 u32 bmcr, local_adv, remote_adv, common;
1151 bp->link_up = 1;
1152 bp->line_speed = SPEED_1000;
1154 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1155 if (bmcr & BMCR_FULLDPLX) {
1156 bp->duplex = DUPLEX_FULL;
1158 else {
1159 bp->duplex = DUPLEX_HALF;
1162 if (!(bmcr & BMCR_ANENABLE)) {
1163 return 0;
1166 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1167 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1169 common = local_adv & remote_adv;
1170 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1172 if (common & ADVERTISE_1000XFULL) {
1173 bp->duplex = DUPLEX_FULL;
1175 else {
1176 bp->duplex = DUPLEX_HALF;
1180 return 0;
1183 static int
1184 bnx2_copper_linkup(struct bnx2 *bp)
1186 u32 bmcr;
1188 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1189 if (bmcr & BMCR_ANENABLE) {
1190 u32 local_adv, remote_adv, common;
1192 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1193 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1195 common = local_adv & (remote_adv >> 2);
1196 if (common & ADVERTISE_1000FULL) {
1197 bp->line_speed = SPEED_1000;
1198 bp->duplex = DUPLEX_FULL;
1200 else if (common & ADVERTISE_1000HALF) {
1201 bp->line_speed = SPEED_1000;
1202 bp->duplex = DUPLEX_HALF;
1204 else {
1205 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1206 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1208 common = local_adv & remote_adv;
1209 if (common & ADVERTISE_100FULL) {
1210 bp->line_speed = SPEED_100;
1211 bp->duplex = DUPLEX_FULL;
1213 else if (common & ADVERTISE_100HALF) {
1214 bp->line_speed = SPEED_100;
1215 bp->duplex = DUPLEX_HALF;
1217 else if (common & ADVERTISE_10FULL) {
1218 bp->line_speed = SPEED_10;
1219 bp->duplex = DUPLEX_FULL;
1221 else if (common & ADVERTISE_10HALF) {
1222 bp->line_speed = SPEED_10;
1223 bp->duplex = DUPLEX_HALF;
1225 else {
1226 bp->line_speed = 0;
1227 bp->link_up = 0;
1231 else {
1232 if (bmcr & BMCR_SPEED100) {
1233 bp->line_speed = SPEED_100;
1235 else {
1236 bp->line_speed = SPEED_10;
1238 if (bmcr & BMCR_FULLDPLX) {
1239 bp->duplex = DUPLEX_FULL;
1241 else {
1242 bp->duplex = DUPLEX_HALF;
1246 return 0;
1249 static void
1250 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1252 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1254 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1255 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1256 val |= 0x02 << 8;
1258 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1259 u32 lo_water, hi_water;
1261 if (bp->flow_ctrl & FLOW_CTRL_TX)
1262 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1263 else
1264 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1265 if (lo_water >= bp->rx_ring_size)
1266 lo_water = 0;
1268 hi_water = bp->rx_ring_size / 4;
1270 if (hi_water <= lo_water)
1271 lo_water = 0;
1273 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1274 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1276 if (hi_water > 0xf)
1277 hi_water = 0xf;
1278 else if (hi_water == 0)
1279 lo_water = 0;
1280 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1282 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1285 static void
1286 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1288 int i;
1289 u32 cid;
1291 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1292 if (i == 1)
1293 cid = RX_RSS_CID;
1294 bnx2_init_rx_context(bp, cid);
1298 static void
1299 bnx2_set_mac_link(struct bnx2 *bp)
1301 u32 val;
1303 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1304 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1305 (bp->duplex == DUPLEX_HALF)) {
1306 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1309 /* Configure the EMAC mode register. */
1310 val = REG_RD(bp, BNX2_EMAC_MODE);
1312 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1313 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1314 BNX2_EMAC_MODE_25G_MODE);
1316 if (bp->link_up) {
1317 switch (bp->line_speed) {
1318 case SPEED_10:
1319 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1320 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1321 break;
1323 /* fall through */
1324 case SPEED_100:
1325 val |= BNX2_EMAC_MODE_PORT_MII;
1326 break;
1327 case SPEED_2500:
1328 val |= BNX2_EMAC_MODE_25G_MODE;
1329 /* fall through */
1330 case SPEED_1000:
1331 val |= BNX2_EMAC_MODE_PORT_GMII;
1332 break;
1335 else {
1336 val |= BNX2_EMAC_MODE_PORT_GMII;
1339 /* Set the MAC to operate in the appropriate duplex mode. */
1340 if (bp->duplex == DUPLEX_HALF)
1341 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1342 REG_WR(bp, BNX2_EMAC_MODE, val);
1344 /* Enable/disable rx PAUSE. */
1345 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1347 if (bp->flow_ctrl & FLOW_CTRL_RX)
1348 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1349 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1351 /* Enable/disable tx PAUSE. */
1352 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1353 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1355 if (bp->flow_ctrl & FLOW_CTRL_TX)
1356 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1357 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1359 /* Acknowledge the interrupt. */
1360 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1362 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1363 bnx2_init_all_rx_contexts(bp);
1366 static void
1367 bnx2_enable_bmsr1(struct bnx2 *bp)
1369 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1370 (CHIP_NUM(bp) == CHIP_NUM_5709))
1371 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1372 MII_BNX2_BLK_ADDR_GP_STATUS);
1375 static void
1376 bnx2_disable_bmsr1(struct bnx2 *bp)
1378 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1379 (CHIP_NUM(bp) == CHIP_NUM_5709))
1380 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1381 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1384 static int
1385 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1387 u32 up1;
1388 int ret = 1;
1390 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1391 return 0;
1393 if (bp->autoneg & AUTONEG_SPEED)
1394 bp->advertising |= ADVERTISED_2500baseX_Full;
1396 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1397 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1399 bnx2_read_phy(bp, bp->mii_up1, &up1);
1400 if (!(up1 & BCM5708S_UP1_2G5)) {
1401 up1 |= BCM5708S_UP1_2G5;
1402 bnx2_write_phy(bp, bp->mii_up1, up1);
1403 ret = 0;
1406 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1407 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1408 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1410 return ret;
1413 static int
1414 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1416 u32 up1;
1417 int ret = 0;
1419 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1420 return 0;
1422 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1423 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1425 bnx2_read_phy(bp, bp->mii_up1, &up1);
1426 if (up1 & BCM5708S_UP1_2G5) {
1427 up1 &= ~BCM5708S_UP1_2G5;
1428 bnx2_write_phy(bp, bp->mii_up1, up1);
1429 ret = 1;
1432 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1433 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1434 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1436 return ret;
1439 static void
1440 bnx2_enable_forced_2g5(struct bnx2 *bp)
1442 u32 bmcr;
1444 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1445 return;
1447 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1448 u32 val;
1450 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1451 MII_BNX2_BLK_ADDR_SERDES_DIG);
1452 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1453 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1454 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1455 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1457 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1458 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1459 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1461 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1462 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1463 bmcr |= BCM5708S_BMCR_FORCE_2500;
1466 if (bp->autoneg & AUTONEG_SPEED) {
1467 bmcr &= ~BMCR_ANENABLE;
1468 if (bp->req_duplex == DUPLEX_FULL)
1469 bmcr |= BMCR_FULLDPLX;
1471 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1474 static void
1475 bnx2_disable_forced_2g5(struct bnx2 *bp)
1477 u32 bmcr;
1479 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1480 return;
1482 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1483 u32 val;
1485 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1486 MII_BNX2_BLK_ADDR_SERDES_DIG);
1487 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1488 val &= ~MII_BNX2_SD_MISC1_FORCE;
1489 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1491 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1492 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1493 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1495 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1496 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1497 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1500 if (bp->autoneg & AUTONEG_SPEED)
1501 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1502 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1505 static void
1506 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1508 u32 val;
1510 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1511 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1512 if (start)
1513 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1514 else
1515 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1518 static int
1519 bnx2_set_link(struct bnx2 *bp)
1521 u32 bmsr;
1522 u8 link_up;
1524 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1525 bp->link_up = 1;
1526 return 0;
1529 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1530 return 0;
1532 link_up = bp->link_up;
1534 bnx2_enable_bmsr1(bp);
1535 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1536 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1537 bnx2_disable_bmsr1(bp);
1539 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1540 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1541 u32 val, an_dbg;
1543 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1544 bnx2_5706s_force_link_dn(bp, 0);
1545 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1547 val = REG_RD(bp, BNX2_EMAC_STATUS);
1549 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1550 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1551 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1553 if ((val & BNX2_EMAC_STATUS_LINK) &&
1554 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1555 bmsr |= BMSR_LSTATUS;
1556 else
1557 bmsr &= ~BMSR_LSTATUS;
1560 if (bmsr & BMSR_LSTATUS) {
1561 bp->link_up = 1;
1563 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1564 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1565 bnx2_5706s_linkup(bp);
1566 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1567 bnx2_5708s_linkup(bp);
1568 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1569 bnx2_5709s_linkup(bp);
1571 else {
1572 bnx2_copper_linkup(bp);
1574 bnx2_resolve_flow_ctrl(bp);
1576 else {
1577 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1578 (bp->autoneg & AUTONEG_SPEED))
1579 bnx2_disable_forced_2g5(bp);
1581 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1582 u32 bmcr;
1584 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1585 bmcr |= BMCR_ANENABLE;
1586 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1588 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1590 bp->link_up = 0;
1593 if (bp->link_up != link_up) {
1594 bnx2_report_link(bp);
1597 bnx2_set_mac_link(bp);
1599 return 0;
1602 static int
1603 bnx2_reset_phy(struct bnx2 *bp)
1605 int i;
1606 u32 reg;
1608 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1610 #define PHY_RESET_MAX_WAIT 100
1611 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1612 udelay(10);
1614 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1615 if (!(reg & BMCR_RESET)) {
1616 udelay(20);
1617 break;
1620 if (i == PHY_RESET_MAX_WAIT) {
1621 return -EBUSY;
1623 return 0;
1626 static u32
1627 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1629 u32 adv = 0;
1631 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1632 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1634 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1635 adv = ADVERTISE_1000XPAUSE;
1637 else {
1638 adv = ADVERTISE_PAUSE_CAP;
1641 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1642 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1643 adv = ADVERTISE_1000XPSE_ASYM;
1645 else {
1646 adv = ADVERTISE_PAUSE_ASYM;
1649 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1650 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1651 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1653 else {
1654 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1657 return adv;
1660 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1662 static int
1663 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1664 __releases(&bp->phy_lock)
1665 __acquires(&bp->phy_lock)
1667 u32 speed_arg = 0, pause_adv;
1669 pause_adv = bnx2_phy_get_pause_adv(bp);
1671 if (bp->autoneg & AUTONEG_SPEED) {
1672 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1673 if (bp->advertising & ADVERTISED_10baseT_Half)
1674 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1675 if (bp->advertising & ADVERTISED_10baseT_Full)
1676 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1677 if (bp->advertising & ADVERTISED_100baseT_Half)
1678 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1679 if (bp->advertising & ADVERTISED_100baseT_Full)
1680 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1681 if (bp->advertising & ADVERTISED_1000baseT_Full)
1682 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1683 if (bp->advertising & ADVERTISED_2500baseX_Full)
1684 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1685 } else {
1686 if (bp->req_line_speed == SPEED_2500)
1687 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1688 else if (bp->req_line_speed == SPEED_1000)
1689 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1690 else if (bp->req_line_speed == SPEED_100) {
1691 if (bp->req_duplex == DUPLEX_FULL)
1692 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1693 else
1694 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1695 } else if (bp->req_line_speed == SPEED_10) {
1696 if (bp->req_duplex == DUPLEX_FULL)
1697 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1698 else
1699 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1703 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1704 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1705 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1706 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1708 if (port == PORT_TP)
1709 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1710 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1712 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1714 spin_unlock_bh(&bp->phy_lock);
1715 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1716 spin_lock_bh(&bp->phy_lock);
1718 return 0;
1721 static int
1722 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1723 __releases(&bp->phy_lock)
1724 __acquires(&bp->phy_lock)
1726 u32 adv, bmcr;
1727 u32 new_adv = 0;
1729 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1730 return (bnx2_setup_remote_phy(bp, port));
1732 if (!(bp->autoneg & AUTONEG_SPEED)) {
1733 u32 new_bmcr;
1734 int force_link_down = 0;
1736 if (bp->req_line_speed == SPEED_2500) {
1737 if (!bnx2_test_and_enable_2g5(bp))
1738 force_link_down = 1;
1739 } else if (bp->req_line_speed == SPEED_1000) {
1740 if (bnx2_test_and_disable_2g5(bp))
1741 force_link_down = 1;
1743 bnx2_read_phy(bp, bp->mii_adv, &adv);
1744 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1746 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1747 new_bmcr = bmcr & ~BMCR_ANENABLE;
1748 new_bmcr |= BMCR_SPEED1000;
1750 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1751 if (bp->req_line_speed == SPEED_2500)
1752 bnx2_enable_forced_2g5(bp);
1753 else if (bp->req_line_speed == SPEED_1000) {
1754 bnx2_disable_forced_2g5(bp);
1755 new_bmcr &= ~0x2000;
1758 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1759 if (bp->req_line_speed == SPEED_2500)
1760 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1761 else
1762 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1765 if (bp->req_duplex == DUPLEX_FULL) {
1766 adv |= ADVERTISE_1000XFULL;
1767 new_bmcr |= BMCR_FULLDPLX;
1769 else {
1770 adv |= ADVERTISE_1000XHALF;
1771 new_bmcr &= ~BMCR_FULLDPLX;
1773 if ((new_bmcr != bmcr) || (force_link_down)) {
1774 /* Force a link down visible on the other side */
1775 if (bp->link_up) {
1776 bnx2_write_phy(bp, bp->mii_adv, adv &
1777 ~(ADVERTISE_1000XFULL |
1778 ADVERTISE_1000XHALF));
1779 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1780 BMCR_ANRESTART | BMCR_ANENABLE);
1782 bp->link_up = 0;
1783 netif_carrier_off(bp->dev);
1784 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1785 bnx2_report_link(bp);
1787 bnx2_write_phy(bp, bp->mii_adv, adv);
1788 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1789 } else {
1790 bnx2_resolve_flow_ctrl(bp);
1791 bnx2_set_mac_link(bp);
1793 return 0;
1796 bnx2_test_and_enable_2g5(bp);
1798 if (bp->advertising & ADVERTISED_1000baseT_Full)
1799 new_adv |= ADVERTISE_1000XFULL;
1801 new_adv |= bnx2_phy_get_pause_adv(bp);
1803 bnx2_read_phy(bp, bp->mii_adv, &adv);
1804 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1806 bp->serdes_an_pending = 0;
1807 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1808 /* Force a link down visible on the other side */
1809 if (bp->link_up) {
1810 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1811 spin_unlock_bh(&bp->phy_lock);
1812 msleep(20);
1813 spin_lock_bh(&bp->phy_lock);
1816 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1817 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1818 BMCR_ANENABLE);
1819 /* Speed up link-up time when the link partner
1820 * does not autonegotiate which is very common
1821 * in blade servers. Some blade servers use
1822 * IPMI for kerboard input and it's important
1823 * to minimize link disruptions. Autoneg. involves
1824 * exchanging base pages plus 3 next pages and
1825 * normally completes in about 120 msec.
1827 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1828 bp->serdes_an_pending = 1;
1829 mod_timer(&bp->timer, jiffies + bp->current_interval);
1830 } else {
1831 bnx2_resolve_flow_ctrl(bp);
1832 bnx2_set_mac_link(bp);
1835 return 0;
1838 #define ETHTOOL_ALL_FIBRE_SPEED \
1839 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1840 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1841 (ADVERTISED_1000baseT_Full)
1843 #define ETHTOOL_ALL_COPPER_SPEED \
1844 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1845 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1846 ADVERTISED_1000baseT_Full)
1848 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1849 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1851 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1853 static void
1854 bnx2_set_default_remote_link(struct bnx2 *bp)
1856 u32 link;
1858 if (bp->phy_port == PORT_TP)
1859 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1860 else
1861 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1863 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1864 bp->req_line_speed = 0;
1865 bp->autoneg |= AUTONEG_SPEED;
1866 bp->advertising = ADVERTISED_Autoneg;
1867 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1868 bp->advertising |= ADVERTISED_10baseT_Half;
1869 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1870 bp->advertising |= ADVERTISED_10baseT_Full;
1871 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1872 bp->advertising |= ADVERTISED_100baseT_Half;
1873 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1874 bp->advertising |= ADVERTISED_100baseT_Full;
1875 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1876 bp->advertising |= ADVERTISED_1000baseT_Full;
1877 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1878 bp->advertising |= ADVERTISED_2500baseX_Full;
1879 } else {
1880 bp->autoneg = 0;
1881 bp->advertising = 0;
1882 bp->req_duplex = DUPLEX_FULL;
1883 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1884 bp->req_line_speed = SPEED_10;
1885 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1886 bp->req_duplex = DUPLEX_HALF;
1888 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1889 bp->req_line_speed = SPEED_100;
1890 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1891 bp->req_duplex = DUPLEX_HALF;
1893 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1894 bp->req_line_speed = SPEED_1000;
1895 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1896 bp->req_line_speed = SPEED_2500;
1900 static void
1901 bnx2_set_default_link(struct bnx2 *bp)
1903 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1904 bnx2_set_default_remote_link(bp);
1905 return;
1908 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1909 bp->req_line_speed = 0;
1910 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1911 u32 reg;
1913 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1915 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1916 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1917 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1918 bp->autoneg = 0;
1919 bp->req_line_speed = bp->line_speed = SPEED_1000;
1920 bp->req_duplex = DUPLEX_FULL;
1922 } else
1923 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1926 static void
1927 bnx2_send_heart_beat(struct bnx2 *bp)
1929 u32 msg;
1930 u32 addr;
1932 spin_lock(&bp->indirect_lock);
1933 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1934 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1935 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1936 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1937 spin_unlock(&bp->indirect_lock);
1940 static void
1941 bnx2_remote_phy_event(struct bnx2 *bp)
1943 u32 msg;
1944 u8 link_up = bp->link_up;
1945 u8 old_port;
1947 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1949 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1950 bnx2_send_heart_beat(bp);
1952 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1954 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1955 bp->link_up = 0;
1956 else {
1957 u32 speed;
1959 bp->link_up = 1;
1960 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1961 bp->duplex = DUPLEX_FULL;
1962 switch (speed) {
1963 case BNX2_LINK_STATUS_10HALF:
1964 bp->duplex = DUPLEX_HALF;
1965 case BNX2_LINK_STATUS_10FULL:
1966 bp->line_speed = SPEED_10;
1967 break;
1968 case BNX2_LINK_STATUS_100HALF:
1969 bp->duplex = DUPLEX_HALF;
1970 case BNX2_LINK_STATUS_100BASE_T4:
1971 case BNX2_LINK_STATUS_100FULL:
1972 bp->line_speed = SPEED_100;
1973 break;
1974 case BNX2_LINK_STATUS_1000HALF:
1975 bp->duplex = DUPLEX_HALF;
1976 case BNX2_LINK_STATUS_1000FULL:
1977 bp->line_speed = SPEED_1000;
1978 break;
1979 case BNX2_LINK_STATUS_2500HALF:
1980 bp->duplex = DUPLEX_HALF;
1981 case BNX2_LINK_STATUS_2500FULL:
1982 bp->line_speed = SPEED_2500;
1983 break;
1984 default:
1985 bp->line_speed = 0;
1986 break;
1989 bp->flow_ctrl = 0;
1990 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1991 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1992 if (bp->duplex == DUPLEX_FULL)
1993 bp->flow_ctrl = bp->req_flow_ctrl;
1994 } else {
1995 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1996 bp->flow_ctrl |= FLOW_CTRL_TX;
1997 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1998 bp->flow_ctrl |= FLOW_CTRL_RX;
2001 old_port = bp->phy_port;
2002 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2003 bp->phy_port = PORT_FIBRE;
2004 else
2005 bp->phy_port = PORT_TP;
2007 if (old_port != bp->phy_port)
2008 bnx2_set_default_link(bp);
2011 if (bp->link_up != link_up)
2012 bnx2_report_link(bp);
2014 bnx2_set_mac_link(bp);
2017 static int
2018 bnx2_set_remote_link(struct bnx2 *bp)
2020 u32 evt_code;
2022 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2023 switch (evt_code) {
2024 case BNX2_FW_EVT_CODE_LINK_EVENT:
2025 bnx2_remote_phy_event(bp);
2026 break;
2027 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2028 default:
2029 bnx2_send_heart_beat(bp);
2030 break;
2032 return 0;
2035 static int
2036 bnx2_setup_copper_phy(struct bnx2 *bp)
2037 __releases(&bp->phy_lock)
2038 __acquires(&bp->phy_lock)
2040 u32 bmcr;
2041 u32 new_bmcr;
2043 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2045 if (bp->autoneg & AUTONEG_SPEED) {
2046 u32 adv_reg, adv1000_reg;
2047 u32 new_adv_reg = 0;
2048 u32 new_adv1000_reg = 0;
2050 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2051 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2052 ADVERTISE_PAUSE_ASYM);
2054 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2055 adv1000_reg &= PHY_ALL_1000_SPEED;
2057 if (bp->advertising & ADVERTISED_10baseT_Half)
2058 new_adv_reg |= ADVERTISE_10HALF;
2059 if (bp->advertising & ADVERTISED_10baseT_Full)
2060 new_adv_reg |= ADVERTISE_10FULL;
2061 if (bp->advertising & ADVERTISED_100baseT_Half)
2062 new_adv_reg |= ADVERTISE_100HALF;
2063 if (bp->advertising & ADVERTISED_100baseT_Full)
2064 new_adv_reg |= ADVERTISE_100FULL;
2065 if (bp->advertising & ADVERTISED_1000baseT_Full)
2066 new_adv1000_reg |= ADVERTISE_1000FULL;
2068 new_adv_reg |= ADVERTISE_CSMA;
2070 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2072 if ((adv1000_reg != new_adv1000_reg) ||
2073 (adv_reg != new_adv_reg) ||
2074 ((bmcr & BMCR_ANENABLE) == 0)) {
2076 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2077 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2078 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2079 BMCR_ANENABLE);
2081 else if (bp->link_up) {
2082 /* Flow ctrl may have changed from auto to forced */
2083 /* or vice-versa. */
2085 bnx2_resolve_flow_ctrl(bp);
2086 bnx2_set_mac_link(bp);
2088 return 0;
2091 new_bmcr = 0;
2092 if (bp->req_line_speed == SPEED_100) {
2093 new_bmcr |= BMCR_SPEED100;
2095 if (bp->req_duplex == DUPLEX_FULL) {
2096 new_bmcr |= BMCR_FULLDPLX;
2098 if (new_bmcr != bmcr) {
2099 u32 bmsr;
2101 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2102 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2104 if (bmsr & BMSR_LSTATUS) {
2105 /* Force link down */
2106 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2107 spin_unlock_bh(&bp->phy_lock);
2108 msleep(50);
2109 spin_lock_bh(&bp->phy_lock);
2111 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2112 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2115 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2117 /* Normally, the new speed is setup after the link has
2118 * gone down and up again. In some cases, link will not go
2119 * down so we need to set up the new speed here.
2121 if (bmsr & BMSR_LSTATUS) {
2122 bp->line_speed = bp->req_line_speed;
2123 bp->duplex = bp->req_duplex;
2124 bnx2_resolve_flow_ctrl(bp);
2125 bnx2_set_mac_link(bp);
2127 } else {
2128 bnx2_resolve_flow_ctrl(bp);
2129 bnx2_set_mac_link(bp);
2131 return 0;
2134 static int
2135 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2136 __releases(&bp->phy_lock)
2137 __acquires(&bp->phy_lock)
2139 if (bp->loopback == MAC_LOOPBACK)
2140 return 0;
2142 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2143 return (bnx2_setup_serdes_phy(bp, port));
2145 else {
2146 return (bnx2_setup_copper_phy(bp));
2150 static int
2151 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2153 u32 val;
2155 bp->mii_bmcr = MII_BMCR + 0x10;
2156 bp->mii_bmsr = MII_BMSR + 0x10;
2157 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2158 bp->mii_adv = MII_ADVERTISE + 0x10;
2159 bp->mii_lpa = MII_LPA + 0x10;
2160 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2162 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2163 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2165 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2166 if (reset_phy)
2167 bnx2_reset_phy(bp);
2169 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2171 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2172 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2173 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2174 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2176 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2177 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2178 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2179 val |= BCM5708S_UP1_2G5;
2180 else
2181 val &= ~BCM5708S_UP1_2G5;
2182 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2184 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2185 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2186 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2187 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2189 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2191 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2192 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2193 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2195 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2197 return 0;
2200 static int
2201 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2203 u32 val;
2205 if (reset_phy)
2206 bnx2_reset_phy(bp);
2208 bp->mii_up1 = BCM5708S_UP1;
2210 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2211 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2212 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2214 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2215 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2216 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2218 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2219 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2220 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2222 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2223 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2224 val |= BCM5708S_UP1_2G5;
2225 bnx2_write_phy(bp, BCM5708S_UP1, val);
2228 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2229 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2230 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2231 /* increase tx signal amplitude */
2232 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2233 BCM5708S_BLK_ADDR_TX_MISC);
2234 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2235 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2236 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2237 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2240 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2241 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2243 if (val) {
2244 u32 is_backplane;
2246 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2247 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2248 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2249 BCM5708S_BLK_ADDR_TX_MISC);
2250 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2251 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2252 BCM5708S_BLK_ADDR_DIG);
2255 return 0;
2258 static int
2259 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2261 if (reset_phy)
2262 bnx2_reset_phy(bp);
2264 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2266 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2267 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2269 if (bp->dev->mtu > 1500) {
2270 u32 val;
2272 /* Set extended packet length bit */
2273 bnx2_write_phy(bp, 0x18, 0x7);
2274 bnx2_read_phy(bp, 0x18, &val);
2275 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2277 bnx2_write_phy(bp, 0x1c, 0x6c00);
2278 bnx2_read_phy(bp, 0x1c, &val);
2279 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2281 else {
2282 u32 val;
2284 bnx2_write_phy(bp, 0x18, 0x7);
2285 bnx2_read_phy(bp, 0x18, &val);
2286 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2288 bnx2_write_phy(bp, 0x1c, 0x6c00);
2289 bnx2_read_phy(bp, 0x1c, &val);
2290 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2293 return 0;
2296 static int
2297 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2299 u32 val;
2301 if (reset_phy)
2302 bnx2_reset_phy(bp);
2304 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2305 bnx2_write_phy(bp, 0x18, 0x0c00);
2306 bnx2_write_phy(bp, 0x17, 0x000a);
2307 bnx2_write_phy(bp, 0x15, 0x310b);
2308 bnx2_write_phy(bp, 0x17, 0x201f);
2309 bnx2_write_phy(bp, 0x15, 0x9506);
2310 bnx2_write_phy(bp, 0x17, 0x401f);
2311 bnx2_write_phy(bp, 0x15, 0x14e2);
2312 bnx2_write_phy(bp, 0x18, 0x0400);
2315 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2316 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2317 MII_BNX2_DSP_EXPAND_REG | 0x8);
2318 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2319 val &= ~(1 << 8);
2320 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2323 if (bp->dev->mtu > 1500) {
2324 /* Set extended packet length bit */
2325 bnx2_write_phy(bp, 0x18, 0x7);
2326 bnx2_read_phy(bp, 0x18, &val);
2327 bnx2_write_phy(bp, 0x18, val | 0x4000);
2329 bnx2_read_phy(bp, 0x10, &val);
2330 bnx2_write_phy(bp, 0x10, val | 0x1);
2332 else {
2333 bnx2_write_phy(bp, 0x18, 0x7);
2334 bnx2_read_phy(bp, 0x18, &val);
2335 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2337 bnx2_read_phy(bp, 0x10, &val);
2338 bnx2_write_phy(bp, 0x10, val & ~0x1);
2341 /* ethernet@wirespeed */
2342 bnx2_write_phy(bp, 0x18, 0x7007);
2343 bnx2_read_phy(bp, 0x18, &val);
2344 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2345 return 0;
2349 static int
2350 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2351 __releases(&bp->phy_lock)
2352 __acquires(&bp->phy_lock)
2354 u32 val;
2355 int rc = 0;
2357 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2358 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2360 bp->mii_bmcr = MII_BMCR;
2361 bp->mii_bmsr = MII_BMSR;
2362 bp->mii_bmsr1 = MII_BMSR;
2363 bp->mii_adv = MII_ADVERTISE;
2364 bp->mii_lpa = MII_LPA;
2366 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2368 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2369 goto setup_phy;
2371 bnx2_read_phy(bp, MII_PHYSID1, &val);
2372 bp->phy_id = val << 16;
2373 bnx2_read_phy(bp, MII_PHYSID2, &val);
2374 bp->phy_id |= val & 0xffff;
2376 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2377 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2378 rc = bnx2_init_5706s_phy(bp, reset_phy);
2379 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2380 rc = bnx2_init_5708s_phy(bp, reset_phy);
2381 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2382 rc = bnx2_init_5709s_phy(bp, reset_phy);
2384 else {
2385 rc = bnx2_init_copper_phy(bp, reset_phy);
2388 setup_phy:
2389 if (!rc)
2390 rc = bnx2_setup_phy(bp, bp->phy_port);
2392 return rc;
2395 static int
2396 bnx2_set_mac_loopback(struct bnx2 *bp)
2398 u32 mac_mode;
2400 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2401 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2402 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2403 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2404 bp->link_up = 1;
2405 return 0;
2408 static int bnx2_test_link(struct bnx2 *);
2410 static int
2411 bnx2_set_phy_loopback(struct bnx2 *bp)
2413 u32 mac_mode;
2414 int rc, i;
2416 spin_lock_bh(&bp->phy_lock);
2417 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2418 BMCR_SPEED1000);
2419 spin_unlock_bh(&bp->phy_lock);
2420 if (rc)
2421 return rc;
2423 for (i = 0; i < 10; i++) {
2424 if (bnx2_test_link(bp) == 0)
2425 break;
2426 msleep(100);
2429 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2430 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2431 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2432 BNX2_EMAC_MODE_25G_MODE);
2434 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2435 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2436 bp->link_up = 1;
2437 return 0;
2440 static int
2441 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2443 int i;
2444 u32 val;
2446 bp->fw_wr_seq++;
2447 msg_data |= bp->fw_wr_seq;
2449 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2451 if (!ack)
2452 return 0;
2454 /* wait for an acknowledgement. */
2455 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2456 msleep(10);
2458 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2460 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2461 break;
2463 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2464 return 0;
2466 /* If we timed out, inform the firmware that this is the case. */
2467 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2468 if (!silent)
2469 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2470 "%x\n", msg_data);
2472 msg_data &= ~BNX2_DRV_MSG_CODE;
2473 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2475 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2477 return -EBUSY;
2480 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2481 return -EIO;
2483 return 0;
2486 static int
2487 bnx2_init_5709_context(struct bnx2 *bp)
2489 int i, ret = 0;
2490 u32 val;
2492 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2493 val |= (BCM_PAGE_BITS - 8) << 16;
2494 REG_WR(bp, BNX2_CTX_COMMAND, val);
2495 for (i = 0; i < 10; i++) {
2496 val = REG_RD(bp, BNX2_CTX_COMMAND);
2497 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2498 break;
2499 udelay(2);
2501 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2502 return -EBUSY;
2504 for (i = 0; i < bp->ctx_pages; i++) {
2505 int j;
2507 if (bp->ctx_blk[i])
2508 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2509 else
2510 return -ENOMEM;
2512 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2513 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2514 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2515 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2516 (u64) bp->ctx_blk_mapping[i] >> 32);
2517 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2518 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2519 for (j = 0; j < 10; j++) {
2521 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2522 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2523 break;
2524 udelay(5);
2526 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2527 ret = -EBUSY;
2528 break;
2531 return ret;
2534 static void
2535 bnx2_init_context(struct bnx2 *bp)
2537 u32 vcid;
2539 vcid = 96;
2540 while (vcid) {
2541 u32 vcid_addr, pcid_addr, offset;
2542 int i;
2544 vcid--;
2546 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2547 u32 new_vcid;
2549 vcid_addr = GET_PCID_ADDR(vcid);
2550 if (vcid & 0x8) {
2551 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2553 else {
2554 new_vcid = vcid;
2556 pcid_addr = GET_PCID_ADDR(new_vcid);
2558 else {
2559 vcid_addr = GET_CID_ADDR(vcid);
2560 pcid_addr = vcid_addr;
2563 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2564 vcid_addr += (i << PHY_CTX_SHIFT);
2565 pcid_addr += (i << PHY_CTX_SHIFT);
2567 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2568 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2570 /* Zero out the context. */
2571 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2572 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2577 static int
2578 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2580 u16 *good_mbuf;
2581 u32 good_mbuf_cnt;
2582 u32 val;
2584 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2585 if (good_mbuf == NULL) {
2586 printk(KERN_ERR PFX "Failed to allocate memory in "
2587 "bnx2_alloc_bad_rbuf\n");
2588 return -ENOMEM;
2591 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2592 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2594 good_mbuf_cnt = 0;
2596 /* Allocate a bunch of mbufs and save the good ones in an array. */
2597 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2598 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2599 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2600 BNX2_RBUF_COMMAND_ALLOC_REQ);
2602 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2604 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2606 /* The addresses with Bit 9 set are bad memory blocks. */
2607 if (!(val & (1 << 9))) {
2608 good_mbuf[good_mbuf_cnt] = (u16) val;
2609 good_mbuf_cnt++;
2612 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2615 /* Free the good ones back to the mbuf pool thus discarding
2616 * all the bad ones. */
2617 while (good_mbuf_cnt) {
2618 good_mbuf_cnt--;
2620 val = good_mbuf[good_mbuf_cnt];
2621 val = (val << 9) | val | 1;
2623 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2625 kfree(good_mbuf);
2626 return 0;
2629 static void
2630 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2632 u32 val;
2634 val = (mac_addr[0] << 8) | mac_addr[1];
2636 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2638 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2639 (mac_addr[4] << 8) | mac_addr[5];
2641 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2644 static inline int
2645 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2647 dma_addr_t mapping;
2648 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2649 struct rx_bd *rxbd =
2650 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2651 struct page *page = alloc_page(GFP_ATOMIC);
2653 if (!page)
2654 return -ENOMEM;
2655 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2656 PCI_DMA_FROMDEVICE);
2657 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2658 __free_page(page);
2659 return -EIO;
2662 rx_pg->page = page;
2663 pci_unmap_addr_set(rx_pg, mapping, mapping);
2664 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2665 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2666 return 0;
2669 static void
2670 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2672 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2673 struct page *page = rx_pg->page;
2675 if (!page)
2676 return;
2678 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2679 PCI_DMA_FROMDEVICE);
2681 __free_page(page);
2682 rx_pg->page = NULL;
2685 static inline int
2686 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2688 struct sk_buff *skb;
2689 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2690 dma_addr_t mapping;
2691 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2692 unsigned long align;
2694 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2695 if (skb == NULL) {
2696 return -ENOMEM;
2699 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2700 skb_reserve(skb, BNX2_RX_ALIGN - align);
2702 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2703 PCI_DMA_FROMDEVICE);
2704 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2705 dev_kfree_skb(skb);
2706 return -EIO;
2709 rx_buf->skb = skb;
2710 pci_unmap_addr_set(rx_buf, mapping, mapping);
2712 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2713 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2715 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2717 return 0;
2720 static int
2721 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2723 struct status_block *sblk = bnapi->status_blk.msi;
2724 u32 new_link_state, old_link_state;
2725 int is_set = 1;
2727 new_link_state = sblk->status_attn_bits & event;
2728 old_link_state = sblk->status_attn_bits_ack & event;
2729 if (new_link_state != old_link_state) {
2730 if (new_link_state)
2731 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2732 else
2733 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2734 } else
2735 is_set = 0;
2737 return is_set;
2740 static void
2741 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2743 spin_lock(&bp->phy_lock);
2745 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2746 bnx2_set_link(bp);
2747 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2748 bnx2_set_remote_link(bp);
2750 spin_unlock(&bp->phy_lock);
2754 static inline u16
2755 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2757 u16 cons;
2759 /* Tell compiler that status block fields can change. */
2760 barrier();
2761 cons = *bnapi->hw_tx_cons_ptr;
2762 barrier();
2763 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2764 cons++;
2765 return cons;
2768 static int
2769 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2771 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2772 u16 hw_cons, sw_cons, sw_ring_cons;
2773 int tx_pkt = 0, index;
2774 struct netdev_queue *txq;
2776 index = (bnapi - bp->bnx2_napi);
2777 txq = netdev_get_tx_queue(bp->dev, index);
2779 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2780 sw_cons = txr->tx_cons;
2782 while (sw_cons != hw_cons) {
2783 struct sw_tx_bd *tx_buf;
2784 struct sk_buff *skb;
2785 int i, last;
2787 sw_ring_cons = TX_RING_IDX(sw_cons);
2789 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2790 skb = tx_buf->skb;
2792 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2793 prefetch(&skb->end);
2795 /* partial BD completions possible with TSO packets */
2796 if (tx_buf->is_gso) {
2797 u16 last_idx, last_ring_idx;
2799 last_idx = sw_cons + tx_buf->nr_frags + 1;
2800 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2801 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2802 last_idx++;
2804 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2805 break;
2809 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
2811 tx_buf->skb = NULL;
2812 last = tx_buf->nr_frags;
2814 for (i = 0; i < last; i++) {
2815 sw_cons = NEXT_TX_BD(sw_cons);
2818 sw_cons = NEXT_TX_BD(sw_cons);
2820 dev_kfree_skb(skb);
2821 tx_pkt++;
2822 if (tx_pkt == budget)
2823 break;
2825 if (hw_cons == sw_cons)
2826 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2829 txr->hw_tx_cons = hw_cons;
2830 txr->tx_cons = sw_cons;
2832 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2833 * before checking for netif_tx_queue_stopped(). Without the
2834 * memory barrier, there is a small possibility that bnx2_start_xmit()
2835 * will miss it and cause the queue to be stopped forever.
2837 smp_mb();
2839 if (unlikely(netif_tx_queue_stopped(txq)) &&
2840 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2841 __netif_tx_lock(txq, smp_processor_id());
2842 if ((netif_tx_queue_stopped(txq)) &&
2843 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2844 netif_tx_wake_queue(txq);
2845 __netif_tx_unlock(txq);
2848 return tx_pkt;
2851 static void
2852 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2853 struct sk_buff *skb, int count)
2855 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2856 struct rx_bd *cons_bd, *prod_bd;
2857 int i;
2858 u16 hw_prod, prod;
2859 u16 cons = rxr->rx_pg_cons;
2861 cons_rx_pg = &rxr->rx_pg_ring[cons];
2863 /* The caller was unable to allocate a new page to replace the
2864 * last one in the frags array, so we need to recycle that page
2865 * and then free the skb.
2867 if (skb) {
2868 struct page *page;
2869 struct skb_shared_info *shinfo;
2871 shinfo = skb_shinfo(skb);
2872 shinfo->nr_frags--;
2873 page = shinfo->frags[shinfo->nr_frags].page;
2874 shinfo->frags[shinfo->nr_frags].page = NULL;
2876 cons_rx_pg->page = page;
2877 dev_kfree_skb(skb);
2880 hw_prod = rxr->rx_pg_prod;
2882 for (i = 0; i < count; i++) {
2883 prod = RX_PG_RING_IDX(hw_prod);
2885 prod_rx_pg = &rxr->rx_pg_ring[prod];
2886 cons_rx_pg = &rxr->rx_pg_ring[cons];
2887 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2888 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2890 if (prod != cons) {
2891 prod_rx_pg->page = cons_rx_pg->page;
2892 cons_rx_pg->page = NULL;
2893 pci_unmap_addr_set(prod_rx_pg, mapping,
2894 pci_unmap_addr(cons_rx_pg, mapping));
2896 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2897 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2900 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2901 hw_prod = NEXT_RX_BD(hw_prod);
2903 rxr->rx_pg_prod = hw_prod;
2904 rxr->rx_pg_cons = cons;
2907 static inline void
2908 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2909 struct sk_buff *skb, u16 cons, u16 prod)
2911 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2912 struct rx_bd *cons_bd, *prod_bd;
2914 cons_rx_buf = &rxr->rx_buf_ring[cons];
2915 prod_rx_buf = &rxr->rx_buf_ring[prod];
2917 pci_dma_sync_single_for_device(bp->pdev,
2918 pci_unmap_addr(cons_rx_buf, mapping),
2919 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2921 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2923 prod_rx_buf->skb = skb;
2925 if (cons == prod)
2926 return;
2928 pci_unmap_addr_set(prod_rx_buf, mapping,
2929 pci_unmap_addr(cons_rx_buf, mapping));
2931 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2932 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2933 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2934 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2937 static int
2938 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2939 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2940 u32 ring_idx)
2942 int err;
2943 u16 prod = ring_idx & 0xffff;
2945 err = bnx2_alloc_rx_skb(bp, rxr, prod);
2946 if (unlikely(err)) {
2947 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2948 if (hdr_len) {
2949 unsigned int raw_len = len + 4;
2950 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2952 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2954 return err;
2957 skb_reserve(skb, BNX2_RX_OFFSET);
2958 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2959 PCI_DMA_FROMDEVICE);
2961 if (hdr_len == 0) {
2962 skb_put(skb, len);
2963 return 0;
2964 } else {
2965 unsigned int i, frag_len, frag_size, pages;
2966 struct sw_pg *rx_pg;
2967 u16 pg_cons = rxr->rx_pg_cons;
2968 u16 pg_prod = rxr->rx_pg_prod;
2970 frag_size = len + 4 - hdr_len;
2971 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2972 skb_put(skb, hdr_len);
2974 for (i = 0; i < pages; i++) {
2975 dma_addr_t mapping_old;
2977 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2978 if (unlikely(frag_len <= 4)) {
2979 unsigned int tail = 4 - frag_len;
2981 rxr->rx_pg_cons = pg_cons;
2982 rxr->rx_pg_prod = pg_prod;
2983 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2984 pages - i);
2985 skb->len -= tail;
2986 if (i == 0) {
2987 skb->tail -= tail;
2988 } else {
2989 skb_frag_t *frag =
2990 &skb_shinfo(skb)->frags[i - 1];
2991 frag->size -= tail;
2992 skb->data_len -= tail;
2993 skb->truesize -= tail;
2995 return 0;
2997 rx_pg = &rxr->rx_pg_ring[pg_cons];
2999 /* Don't unmap yet. If we're unable to allocate a new
3000 * page, we need to recycle the page and the DMA addr.
3002 mapping_old = pci_unmap_addr(rx_pg, mapping);
3003 if (i == pages - 1)
3004 frag_len -= 4;
3006 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3007 rx_pg->page = NULL;
3009 err = bnx2_alloc_rx_page(bp, rxr,
3010 RX_PG_RING_IDX(pg_prod));
3011 if (unlikely(err)) {
3012 rxr->rx_pg_cons = pg_cons;
3013 rxr->rx_pg_prod = pg_prod;
3014 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3015 pages - i);
3016 return err;
3019 pci_unmap_page(bp->pdev, mapping_old,
3020 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3022 frag_size -= frag_len;
3023 skb->data_len += frag_len;
3024 skb->truesize += frag_len;
3025 skb->len += frag_len;
3027 pg_prod = NEXT_RX_BD(pg_prod);
3028 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3030 rxr->rx_pg_prod = pg_prod;
3031 rxr->rx_pg_cons = pg_cons;
3033 return 0;
3036 static inline u16
3037 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3039 u16 cons;
3041 /* Tell compiler that status block fields can change. */
3042 barrier();
3043 cons = *bnapi->hw_rx_cons_ptr;
3044 barrier();
3045 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3046 cons++;
3047 return cons;
3050 static int
3051 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3053 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3054 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3055 struct l2_fhdr *rx_hdr;
3056 int rx_pkt = 0, pg_ring_used = 0;
3058 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3059 sw_cons = rxr->rx_cons;
3060 sw_prod = rxr->rx_prod;
3062 /* Memory barrier necessary as speculative reads of the rx
3063 * buffer can be ahead of the index in the status block
3065 rmb();
3066 while (sw_cons != hw_cons) {
3067 unsigned int len, hdr_len;
3068 u32 status;
3069 struct sw_bd *rx_buf;
3070 struct sk_buff *skb;
3071 dma_addr_t dma_addr;
3072 u16 vtag = 0;
3073 int hw_vlan __maybe_unused = 0;
3075 sw_ring_cons = RX_RING_IDX(sw_cons);
3076 sw_ring_prod = RX_RING_IDX(sw_prod);
3078 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3079 skb = rx_buf->skb;
3081 rx_buf->skb = NULL;
3083 dma_addr = pci_unmap_addr(rx_buf, mapping);
3085 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3086 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3087 PCI_DMA_FROMDEVICE);
3089 rx_hdr = (struct l2_fhdr *) skb->data;
3090 len = rx_hdr->l2_fhdr_pkt_len;
3091 status = rx_hdr->l2_fhdr_status;
3093 hdr_len = 0;
3094 if (status & L2_FHDR_STATUS_SPLIT) {
3095 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3096 pg_ring_used = 1;
3097 } else if (len > bp->rx_jumbo_thresh) {
3098 hdr_len = bp->rx_jumbo_thresh;
3099 pg_ring_used = 1;
3102 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3103 L2_FHDR_ERRORS_PHY_DECODE |
3104 L2_FHDR_ERRORS_ALIGNMENT |
3105 L2_FHDR_ERRORS_TOO_SHORT |
3106 L2_FHDR_ERRORS_GIANT_FRAME))) {
3108 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3109 sw_ring_prod);
3110 if (pg_ring_used) {
3111 int pages;
3113 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3115 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3117 goto next_rx;
3120 len -= 4;
3122 if (len <= bp->rx_copy_thresh) {
3123 struct sk_buff *new_skb;
3125 new_skb = netdev_alloc_skb(bp->dev, len + 6);
3126 if (new_skb == NULL) {
3127 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3128 sw_ring_prod);
3129 goto next_rx;
3132 /* aligned copy */
3133 skb_copy_from_linear_data_offset(skb,
3134 BNX2_RX_OFFSET - 6,
3135 new_skb->data, len + 6);
3136 skb_reserve(new_skb, 6);
3137 skb_put(new_skb, len);
3139 bnx2_reuse_rx_skb(bp, rxr, skb,
3140 sw_ring_cons, sw_ring_prod);
3142 skb = new_skb;
3143 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3144 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3145 goto next_rx;
3147 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3148 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3149 vtag = rx_hdr->l2_fhdr_vlan_tag;
3150 #ifdef BCM_VLAN
3151 if (bp->vlgrp)
3152 hw_vlan = 1;
3153 else
3154 #endif
3156 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3157 __skb_push(skb, 4);
3159 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3160 ve->h_vlan_proto = htons(ETH_P_8021Q);
3161 ve->h_vlan_TCI = htons(vtag);
3162 len += 4;
3166 skb->protocol = eth_type_trans(skb, bp->dev);
3168 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3169 (ntohs(skb->protocol) != 0x8100)) {
3171 dev_kfree_skb(skb);
3172 goto next_rx;
3176 skb->ip_summed = CHECKSUM_NONE;
3177 if (bp->rx_csum &&
3178 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3179 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3181 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3182 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3183 skb->ip_summed = CHECKSUM_UNNECESSARY;
3186 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3188 #ifdef BCM_VLAN
3189 if (hw_vlan)
3190 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3191 else
3192 #endif
3193 netif_receive_skb(skb);
3195 rx_pkt++;
3197 next_rx:
3198 sw_cons = NEXT_RX_BD(sw_cons);
3199 sw_prod = NEXT_RX_BD(sw_prod);
3201 if ((rx_pkt == budget))
3202 break;
3204 /* Refresh hw_cons to see if there is new work */
3205 if (sw_cons == hw_cons) {
3206 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3207 rmb();
3210 rxr->rx_cons = sw_cons;
3211 rxr->rx_prod = sw_prod;
3213 if (pg_ring_used)
3214 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3216 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3218 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3220 mmiowb();
3222 return rx_pkt;
3226 /* MSI ISR - The only difference between this and the INTx ISR
3227 * is that the MSI interrupt is always serviced.
3229 static irqreturn_t
3230 bnx2_msi(int irq, void *dev_instance)
3232 struct bnx2_napi *bnapi = dev_instance;
3233 struct bnx2 *bp = bnapi->bp;
3235 prefetch(bnapi->status_blk.msi);
3236 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3237 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3238 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3240 /* Return here if interrupt is disabled. */
3241 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3242 return IRQ_HANDLED;
3244 napi_schedule(&bnapi->napi);
3246 return IRQ_HANDLED;
3249 static irqreturn_t
3250 bnx2_msi_1shot(int irq, void *dev_instance)
3252 struct bnx2_napi *bnapi = dev_instance;
3253 struct bnx2 *bp = bnapi->bp;
3255 prefetch(bnapi->status_blk.msi);
3257 /* Return here if interrupt is disabled. */
3258 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3259 return IRQ_HANDLED;
3261 napi_schedule(&bnapi->napi);
3263 return IRQ_HANDLED;
3266 static irqreturn_t
3267 bnx2_interrupt(int irq, void *dev_instance)
3269 struct bnx2_napi *bnapi = dev_instance;
3270 struct bnx2 *bp = bnapi->bp;
3271 struct status_block *sblk = bnapi->status_blk.msi;
3273 /* When using INTx, it is possible for the interrupt to arrive
3274 * at the CPU before the status block posted prior to the
3275 * interrupt. Reading a register will flush the status block.
3276 * When using MSI, the MSI message will always complete after
3277 * the status block write.
3279 if ((sblk->status_idx == bnapi->last_status_idx) &&
3280 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3281 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3282 return IRQ_NONE;
3284 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3285 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3286 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3288 /* Read back to deassert IRQ immediately to avoid too many
3289 * spurious interrupts.
3291 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3293 /* Return here if interrupt is shared and is disabled. */
3294 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3295 return IRQ_HANDLED;
3297 if (napi_schedule_prep(&bnapi->napi)) {
3298 bnapi->last_status_idx = sblk->status_idx;
3299 __napi_schedule(&bnapi->napi);
3302 return IRQ_HANDLED;
3305 static inline int
3306 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3308 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3309 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3311 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3312 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3313 return 1;
3314 return 0;
3317 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3318 STATUS_ATTN_BITS_TIMER_ABORT)
3320 static inline int
3321 bnx2_has_work(struct bnx2_napi *bnapi)
3323 struct status_block *sblk = bnapi->status_blk.msi;
3325 if (bnx2_has_fast_work(bnapi))
3326 return 1;
3328 #ifdef BCM_CNIC
3329 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3330 return 1;
3331 #endif
3333 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3334 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3335 return 1;
3337 return 0;
3340 static void
3341 bnx2_chk_missed_msi(struct bnx2 *bp)
3343 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3344 u32 msi_ctrl;
3346 if (bnx2_has_work(bnapi)) {
3347 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3348 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3349 return;
3351 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3352 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3353 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3354 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3355 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3359 bp->idle_chk_status_idx = bnapi->last_status_idx;
3362 #ifdef BCM_CNIC
3363 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3365 struct cnic_ops *c_ops;
3367 if (!bnapi->cnic_present)
3368 return;
3370 rcu_read_lock();
3371 c_ops = rcu_dereference(bp->cnic_ops);
3372 if (c_ops)
3373 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3374 bnapi->status_blk.msi);
3375 rcu_read_unlock();
3377 #endif
3379 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3381 struct status_block *sblk = bnapi->status_blk.msi;
3382 u32 status_attn_bits = sblk->status_attn_bits;
3383 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3385 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3386 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3388 bnx2_phy_int(bp, bnapi);
3390 /* This is needed to take care of transient status
3391 * during link changes.
3393 REG_WR(bp, BNX2_HC_COMMAND,
3394 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3395 REG_RD(bp, BNX2_HC_COMMAND);
3399 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3400 int work_done, int budget)
3402 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3403 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3405 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3406 bnx2_tx_int(bp, bnapi, 0);
3408 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3409 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3411 return work_done;
3414 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3416 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3417 struct bnx2 *bp = bnapi->bp;
3418 int work_done = 0;
3419 struct status_block_msix *sblk = bnapi->status_blk.msix;
3421 while (1) {
3422 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3423 if (unlikely(work_done >= budget))
3424 break;
3426 bnapi->last_status_idx = sblk->status_idx;
3427 /* status idx must be read before checking for more work. */
3428 rmb();
3429 if (likely(!bnx2_has_fast_work(bnapi))) {
3431 napi_complete(napi);
3432 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3433 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3434 bnapi->last_status_idx);
3435 break;
3438 return work_done;
3441 static int bnx2_poll(struct napi_struct *napi, int budget)
3443 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3444 struct bnx2 *bp = bnapi->bp;
3445 int work_done = 0;
3446 struct status_block *sblk = bnapi->status_blk.msi;
3448 while (1) {
3449 bnx2_poll_link(bp, bnapi);
3451 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3453 #ifdef BCM_CNIC
3454 bnx2_poll_cnic(bp, bnapi);
3455 #endif
3457 /* bnapi->last_status_idx is used below to tell the hw how
3458 * much work has been processed, so we must read it before
3459 * checking for more work.
3461 bnapi->last_status_idx = sblk->status_idx;
3463 if (unlikely(work_done >= budget))
3464 break;
3466 rmb();
3467 if (likely(!bnx2_has_work(bnapi))) {
3468 napi_complete(napi);
3469 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3470 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3471 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3472 bnapi->last_status_idx);
3473 break;
3475 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3476 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3477 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3478 bnapi->last_status_idx);
3480 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3481 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3482 bnapi->last_status_idx);
3483 break;
3487 return work_done;
3490 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3491 * from set_multicast.
3493 static void
3494 bnx2_set_rx_mode(struct net_device *dev)
3496 struct bnx2 *bp = netdev_priv(dev);
3497 u32 rx_mode, sort_mode;
3498 struct netdev_hw_addr *ha;
3499 int i;
3501 if (!netif_running(dev))
3502 return;
3504 spin_lock_bh(&bp->phy_lock);
3506 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3507 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3508 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3509 #ifdef BCM_VLAN
3510 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3511 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3512 #else
3513 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3514 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3515 #endif
3516 if (dev->flags & IFF_PROMISC) {
3517 /* Promiscuous mode. */
3518 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3519 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3520 BNX2_RPM_SORT_USER0_PROM_VLAN;
3522 else if (dev->flags & IFF_ALLMULTI) {
3523 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3524 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3525 0xffffffff);
3527 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3529 else {
3530 /* Accept one or more multicast(s). */
3531 struct dev_mc_list *mclist;
3532 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3533 u32 regidx;
3534 u32 bit;
3535 u32 crc;
3537 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3539 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3540 i++, mclist = mclist->next) {
3542 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3543 bit = crc & 0xff;
3544 regidx = (bit & 0xe0) >> 5;
3545 bit &= 0x1f;
3546 mc_filter[regidx] |= (1 << bit);
3549 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3550 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3551 mc_filter[i]);
3554 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3557 if (dev->uc.count > BNX2_MAX_UNICAST_ADDRESSES) {
3558 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3559 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3560 BNX2_RPM_SORT_USER0_PROM_VLAN;
3561 } else if (!(dev->flags & IFF_PROMISC)) {
3562 /* Add all entries into to the match filter list */
3563 i = 0;
3564 list_for_each_entry(ha, &dev->uc.list, list) {
3565 bnx2_set_mac_addr(bp, ha->addr,
3566 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3567 sort_mode |= (1 <<
3568 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3569 i++;
3574 if (rx_mode != bp->rx_mode) {
3575 bp->rx_mode = rx_mode;
3576 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3579 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3580 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3581 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3583 spin_unlock_bh(&bp->phy_lock);
3586 static int __devinit
3587 check_fw_section(const struct firmware *fw,
3588 const struct bnx2_fw_file_section *section,
3589 u32 alignment, bool non_empty)
3591 u32 offset = be32_to_cpu(section->offset);
3592 u32 len = be32_to_cpu(section->len);
3594 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3595 return -EINVAL;
3596 if ((non_empty && len == 0) || len > fw->size - offset ||
3597 len & (alignment - 1))
3598 return -EINVAL;
3599 return 0;
3602 static int __devinit
3603 check_mips_fw_entry(const struct firmware *fw,
3604 const struct bnx2_mips_fw_file_entry *entry)
3606 if (check_fw_section(fw, &entry->text, 4, true) ||
3607 check_fw_section(fw, &entry->data, 4, false) ||
3608 check_fw_section(fw, &entry->rodata, 4, false))
3609 return -EINVAL;
3610 return 0;
3613 static int __devinit
3614 bnx2_request_firmware(struct bnx2 *bp)
3616 const char *mips_fw_file, *rv2p_fw_file;
3617 const struct bnx2_mips_fw_file *mips_fw;
3618 const struct bnx2_rv2p_fw_file *rv2p_fw;
3619 int rc;
3621 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3622 mips_fw_file = FW_MIPS_FILE_09;
3623 rv2p_fw_file = FW_RV2P_FILE_09;
3624 } else {
3625 mips_fw_file = FW_MIPS_FILE_06;
3626 rv2p_fw_file = FW_RV2P_FILE_06;
3629 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3630 if (rc) {
3631 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3632 mips_fw_file);
3633 return rc;
3636 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3637 if (rc) {
3638 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3639 rv2p_fw_file);
3640 return rc;
3642 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3643 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3644 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3645 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3646 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3647 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3648 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3649 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3650 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3651 mips_fw_file);
3652 return -EINVAL;
3654 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3655 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3656 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3657 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3658 rv2p_fw_file);
3659 return -EINVAL;
3662 return 0;
3665 static u32
3666 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3668 switch (idx) {
3669 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3670 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3671 rv2p_code |= RV2P_BD_PAGE_SIZE;
3672 break;
3674 return rv2p_code;
3677 static int
3678 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3679 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3681 u32 rv2p_code_len, file_offset;
3682 __be32 *rv2p_code;
3683 int i;
3684 u32 val, cmd, addr;
3686 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3687 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3689 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3691 if (rv2p_proc == RV2P_PROC1) {
3692 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3693 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3694 } else {
3695 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3696 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3699 for (i = 0; i < rv2p_code_len; i += 8) {
3700 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3701 rv2p_code++;
3702 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3703 rv2p_code++;
3705 val = (i / 8) | cmd;
3706 REG_WR(bp, addr, val);
3709 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3710 for (i = 0; i < 8; i++) {
3711 u32 loc, code;
3713 loc = be32_to_cpu(fw_entry->fixup[i]);
3714 if (loc && ((loc * 4) < rv2p_code_len)) {
3715 code = be32_to_cpu(*(rv2p_code + loc - 1));
3716 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3717 code = be32_to_cpu(*(rv2p_code + loc));
3718 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3719 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3721 val = (loc / 2) | cmd;
3722 REG_WR(bp, addr, val);
3726 /* Reset the processor, un-stall is done later. */
3727 if (rv2p_proc == RV2P_PROC1) {
3728 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3730 else {
3731 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3734 return 0;
3737 static int
3738 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3739 const struct bnx2_mips_fw_file_entry *fw_entry)
3741 u32 addr, len, file_offset;
3742 __be32 *data;
3743 u32 offset;
3744 u32 val;
3746 /* Halt the CPU. */
3747 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3748 val |= cpu_reg->mode_value_halt;
3749 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3750 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3752 /* Load the Text area. */
3753 addr = be32_to_cpu(fw_entry->text.addr);
3754 len = be32_to_cpu(fw_entry->text.len);
3755 file_offset = be32_to_cpu(fw_entry->text.offset);
3756 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3758 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3759 if (len) {
3760 int j;
3762 for (j = 0; j < (len / 4); j++, offset += 4)
3763 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3766 /* Load the Data area. */
3767 addr = be32_to_cpu(fw_entry->data.addr);
3768 len = be32_to_cpu(fw_entry->data.len);
3769 file_offset = be32_to_cpu(fw_entry->data.offset);
3770 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3772 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3773 if (len) {
3774 int j;
3776 for (j = 0; j < (len / 4); j++, offset += 4)
3777 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3780 /* Load the Read-Only area. */
3781 addr = be32_to_cpu(fw_entry->rodata.addr);
3782 len = be32_to_cpu(fw_entry->rodata.len);
3783 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3784 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3786 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3787 if (len) {
3788 int j;
3790 for (j = 0; j < (len / 4); j++, offset += 4)
3791 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3794 /* Clear the pre-fetch instruction. */
3795 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3797 val = be32_to_cpu(fw_entry->start_addr);
3798 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3800 /* Start the CPU. */
3801 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3802 val &= ~cpu_reg->mode_value_halt;
3803 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3804 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3806 return 0;
3809 static int
3810 bnx2_init_cpus(struct bnx2 *bp)
3812 const struct bnx2_mips_fw_file *mips_fw =
3813 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3814 const struct bnx2_rv2p_fw_file *rv2p_fw =
3815 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3816 int rc;
3818 /* Initialize the RV2P processor. */
3819 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3820 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3822 /* Initialize the RX Processor. */
3823 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3824 if (rc)
3825 goto init_cpu_err;
3827 /* Initialize the TX Processor. */
3828 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3829 if (rc)
3830 goto init_cpu_err;
3832 /* Initialize the TX Patch-up Processor. */
3833 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3834 if (rc)
3835 goto init_cpu_err;
3837 /* Initialize the Completion Processor. */
3838 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3839 if (rc)
3840 goto init_cpu_err;
3842 /* Initialize the Command Processor. */
3843 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3845 init_cpu_err:
3846 return rc;
3849 static int
3850 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3852 u16 pmcsr;
3854 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3856 switch (state) {
3857 case PCI_D0: {
3858 u32 val;
3860 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3861 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3862 PCI_PM_CTRL_PME_STATUS);
3864 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3865 /* delay required during transition out of D3hot */
3866 msleep(20);
3868 val = REG_RD(bp, BNX2_EMAC_MODE);
3869 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3870 val &= ~BNX2_EMAC_MODE_MPKT;
3871 REG_WR(bp, BNX2_EMAC_MODE, val);
3873 val = REG_RD(bp, BNX2_RPM_CONFIG);
3874 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3875 REG_WR(bp, BNX2_RPM_CONFIG, val);
3876 break;
3878 case PCI_D3hot: {
3879 int i;
3880 u32 val, wol_msg;
3882 if (bp->wol) {
3883 u32 advertising;
3884 u8 autoneg;
3886 autoneg = bp->autoneg;
3887 advertising = bp->advertising;
3889 if (bp->phy_port == PORT_TP) {
3890 bp->autoneg = AUTONEG_SPEED;
3891 bp->advertising = ADVERTISED_10baseT_Half |
3892 ADVERTISED_10baseT_Full |
3893 ADVERTISED_100baseT_Half |
3894 ADVERTISED_100baseT_Full |
3895 ADVERTISED_Autoneg;
3898 spin_lock_bh(&bp->phy_lock);
3899 bnx2_setup_phy(bp, bp->phy_port);
3900 spin_unlock_bh(&bp->phy_lock);
3902 bp->autoneg = autoneg;
3903 bp->advertising = advertising;
3905 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3907 val = REG_RD(bp, BNX2_EMAC_MODE);
3909 /* Enable port mode. */
3910 val &= ~BNX2_EMAC_MODE_PORT;
3911 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3912 BNX2_EMAC_MODE_ACPI_RCVD |
3913 BNX2_EMAC_MODE_MPKT;
3914 if (bp->phy_port == PORT_TP)
3915 val |= BNX2_EMAC_MODE_PORT_MII;
3916 else {
3917 val |= BNX2_EMAC_MODE_PORT_GMII;
3918 if (bp->line_speed == SPEED_2500)
3919 val |= BNX2_EMAC_MODE_25G_MODE;
3922 REG_WR(bp, BNX2_EMAC_MODE, val);
3924 /* receive all multicast */
3925 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3926 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3927 0xffffffff);
3929 REG_WR(bp, BNX2_EMAC_RX_MODE,
3930 BNX2_EMAC_RX_MODE_SORT_MODE);
3932 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3933 BNX2_RPM_SORT_USER0_MC_EN;
3934 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3935 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3936 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3937 BNX2_RPM_SORT_USER0_ENA);
3939 /* Need to enable EMAC and RPM for WOL. */
3940 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3941 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3942 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3943 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3945 val = REG_RD(bp, BNX2_RPM_CONFIG);
3946 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3947 REG_WR(bp, BNX2_RPM_CONFIG, val);
3949 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3951 else {
3952 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3955 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3956 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3957 1, 0);
3959 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3960 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3961 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3963 if (bp->wol)
3964 pmcsr |= 3;
3966 else {
3967 pmcsr |= 3;
3969 if (bp->wol) {
3970 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3972 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3973 pmcsr);
3975 /* No more memory access after this point until
3976 * device is brought back to D0.
3978 udelay(50);
3979 break;
3981 default:
3982 return -EINVAL;
3984 return 0;
3987 static int
3988 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3990 u32 val;
3991 int j;
3993 /* Request access to the flash interface. */
3994 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3995 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3996 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3997 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3998 break;
4000 udelay(5);
4003 if (j >= NVRAM_TIMEOUT_COUNT)
4004 return -EBUSY;
4006 return 0;
4009 static int
4010 bnx2_release_nvram_lock(struct bnx2 *bp)
4012 int j;
4013 u32 val;
4015 /* Relinquish nvram interface. */
4016 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4018 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4019 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4020 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4021 break;
4023 udelay(5);
4026 if (j >= NVRAM_TIMEOUT_COUNT)
4027 return -EBUSY;
4029 return 0;
4033 static int
4034 bnx2_enable_nvram_write(struct bnx2 *bp)
4036 u32 val;
4038 val = REG_RD(bp, BNX2_MISC_CFG);
4039 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4041 if (bp->flash_info->flags & BNX2_NV_WREN) {
4042 int j;
4044 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4045 REG_WR(bp, BNX2_NVM_COMMAND,
4046 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4048 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4049 udelay(5);
4051 val = REG_RD(bp, BNX2_NVM_COMMAND);
4052 if (val & BNX2_NVM_COMMAND_DONE)
4053 break;
4056 if (j >= NVRAM_TIMEOUT_COUNT)
4057 return -EBUSY;
4059 return 0;
4062 static void
4063 bnx2_disable_nvram_write(struct bnx2 *bp)
4065 u32 val;
4067 val = REG_RD(bp, BNX2_MISC_CFG);
4068 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4072 static void
4073 bnx2_enable_nvram_access(struct bnx2 *bp)
4075 u32 val;
4077 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4078 /* Enable both bits, even on read. */
4079 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4080 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4083 static void
4084 bnx2_disable_nvram_access(struct bnx2 *bp)
4086 u32 val;
4088 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4089 /* Disable both bits, even after read. */
4090 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4091 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4092 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4095 static int
4096 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4098 u32 cmd;
4099 int j;
4101 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4102 /* Buffered flash, no erase needed */
4103 return 0;
4105 /* Build an erase command */
4106 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4107 BNX2_NVM_COMMAND_DOIT;
4109 /* Need to clear DONE bit separately. */
4110 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4112 /* Address of the NVRAM to read from. */
4113 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4115 /* Issue an erase command. */
4116 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4118 /* Wait for completion. */
4119 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4120 u32 val;
4122 udelay(5);
4124 val = REG_RD(bp, BNX2_NVM_COMMAND);
4125 if (val & BNX2_NVM_COMMAND_DONE)
4126 break;
4129 if (j >= NVRAM_TIMEOUT_COUNT)
4130 return -EBUSY;
4132 return 0;
4135 static int
4136 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4138 u32 cmd;
4139 int j;
4141 /* Build the command word. */
4142 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4144 /* Calculate an offset of a buffered flash, not needed for 5709. */
4145 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4146 offset = ((offset / bp->flash_info->page_size) <<
4147 bp->flash_info->page_bits) +
4148 (offset % bp->flash_info->page_size);
4151 /* Need to clear DONE bit separately. */
4152 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4154 /* Address of the NVRAM to read from. */
4155 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4157 /* Issue a read command. */
4158 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4160 /* Wait for completion. */
4161 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4162 u32 val;
4164 udelay(5);
4166 val = REG_RD(bp, BNX2_NVM_COMMAND);
4167 if (val & BNX2_NVM_COMMAND_DONE) {
4168 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4169 memcpy(ret_val, &v, 4);
4170 break;
4173 if (j >= NVRAM_TIMEOUT_COUNT)
4174 return -EBUSY;
4176 return 0;
4180 static int
4181 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4183 u32 cmd;
4184 __be32 val32;
4185 int j;
4187 /* Build the command word. */
4188 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4190 /* Calculate an offset of a buffered flash, not needed for 5709. */
4191 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4192 offset = ((offset / bp->flash_info->page_size) <<
4193 bp->flash_info->page_bits) +
4194 (offset % bp->flash_info->page_size);
4197 /* Need to clear DONE bit separately. */
4198 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4200 memcpy(&val32, val, 4);
4202 /* Write the data. */
4203 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4205 /* Address of the NVRAM to write to. */
4206 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4208 /* Issue the write command. */
4209 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4211 /* Wait for completion. */
4212 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4213 udelay(5);
4215 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4216 break;
4218 if (j >= NVRAM_TIMEOUT_COUNT)
4219 return -EBUSY;
4221 return 0;
4224 static int
4225 bnx2_init_nvram(struct bnx2 *bp)
4227 u32 val;
4228 int j, entry_count, rc = 0;
4229 struct flash_spec *flash;
4231 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4232 bp->flash_info = &flash_5709;
4233 goto get_flash_size;
4236 /* Determine the selected interface. */
4237 val = REG_RD(bp, BNX2_NVM_CFG1);
4239 entry_count = ARRAY_SIZE(flash_table);
4241 if (val & 0x40000000) {
4243 /* Flash interface has been reconfigured */
4244 for (j = 0, flash = &flash_table[0]; j < entry_count;
4245 j++, flash++) {
4246 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4247 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4248 bp->flash_info = flash;
4249 break;
4253 else {
4254 u32 mask;
4255 /* Not yet been reconfigured */
4257 if (val & (1 << 23))
4258 mask = FLASH_BACKUP_STRAP_MASK;
4259 else
4260 mask = FLASH_STRAP_MASK;
4262 for (j = 0, flash = &flash_table[0]; j < entry_count;
4263 j++, flash++) {
4265 if ((val & mask) == (flash->strapping & mask)) {
4266 bp->flash_info = flash;
4268 /* Request access to the flash interface. */
4269 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4270 return rc;
4272 /* Enable access to flash interface */
4273 bnx2_enable_nvram_access(bp);
4275 /* Reconfigure the flash interface */
4276 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4277 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4278 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4279 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4281 /* Disable access to flash interface */
4282 bnx2_disable_nvram_access(bp);
4283 bnx2_release_nvram_lock(bp);
4285 break;
4288 } /* if (val & 0x40000000) */
4290 if (j == entry_count) {
4291 bp->flash_info = NULL;
4292 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4293 return -ENODEV;
4296 get_flash_size:
4297 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4298 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4299 if (val)
4300 bp->flash_size = val;
4301 else
4302 bp->flash_size = bp->flash_info->total_size;
4304 return rc;
4307 static int
4308 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4309 int buf_size)
4311 int rc = 0;
4312 u32 cmd_flags, offset32, len32, extra;
4314 if (buf_size == 0)
4315 return 0;
4317 /* Request access to the flash interface. */
4318 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4319 return rc;
4321 /* Enable access to flash interface */
4322 bnx2_enable_nvram_access(bp);
4324 len32 = buf_size;
4325 offset32 = offset;
4326 extra = 0;
4328 cmd_flags = 0;
4330 if (offset32 & 3) {
4331 u8 buf[4];
4332 u32 pre_len;
4334 offset32 &= ~3;
4335 pre_len = 4 - (offset & 3);
4337 if (pre_len >= len32) {
4338 pre_len = len32;
4339 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4340 BNX2_NVM_COMMAND_LAST;
4342 else {
4343 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4346 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4348 if (rc)
4349 return rc;
4351 memcpy(ret_buf, buf + (offset & 3), pre_len);
4353 offset32 += 4;
4354 ret_buf += pre_len;
4355 len32 -= pre_len;
4357 if (len32 & 3) {
4358 extra = 4 - (len32 & 3);
4359 len32 = (len32 + 4) & ~3;
4362 if (len32 == 4) {
4363 u8 buf[4];
4365 if (cmd_flags)
4366 cmd_flags = BNX2_NVM_COMMAND_LAST;
4367 else
4368 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4369 BNX2_NVM_COMMAND_LAST;
4371 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4373 memcpy(ret_buf, buf, 4 - extra);
4375 else if (len32 > 0) {
4376 u8 buf[4];
4378 /* Read the first word. */
4379 if (cmd_flags)
4380 cmd_flags = 0;
4381 else
4382 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4384 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4386 /* Advance to the next dword. */
4387 offset32 += 4;
4388 ret_buf += 4;
4389 len32 -= 4;
4391 while (len32 > 4 && rc == 0) {
4392 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4394 /* Advance to the next dword. */
4395 offset32 += 4;
4396 ret_buf += 4;
4397 len32 -= 4;
4400 if (rc)
4401 return rc;
4403 cmd_flags = BNX2_NVM_COMMAND_LAST;
4404 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4406 memcpy(ret_buf, buf, 4 - extra);
4409 /* Disable access to flash interface */
4410 bnx2_disable_nvram_access(bp);
4412 bnx2_release_nvram_lock(bp);
4414 return rc;
4417 static int
4418 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4419 int buf_size)
4421 u32 written, offset32, len32;
4422 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4423 int rc = 0;
4424 int align_start, align_end;
4426 buf = data_buf;
4427 offset32 = offset;
4428 len32 = buf_size;
4429 align_start = align_end = 0;
4431 if ((align_start = (offset32 & 3))) {
4432 offset32 &= ~3;
4433 len32 += align_start;
4434 if (len32 < 4)
4435 len32 = 4;
4436 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4437 return rc;
4440 if (len32 & 3) {
4441 align_end = 4 - (len32 & 3);
4442 len32 += align_end;
4443 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4444 return rc;
4447 if (align_start || align_end) {
4448 align_buf = kmalloc(len32, GFP_KERNEL);
4449 if (align_buf == NULL)
4450 return -ENOMEM;
4451 if (align_start) {
4452 memcpy(align_buf, start, 4);
4454 if (align_end) {
4455 memcpy(align_buf + len32 - 4, end, 4);
4457 memcpy(align_buf + align_start, data_buf, buf_size);
4458 buf = align_buf;
4461 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4462 flash_buffer = kmalloc(264, GFP_KERNEL);
4463 if (flash_buffer == NULL) {
4464 rc = -ENOMEM;
4465 goto nvram_write_end;
4469 written = 0;
4470 while ((written < len32) && (rc == 0)) {
4471 u32 page_start, page_end, data_start, data_end;
4472 u32 addr, cmd_flags;
4473 int i;
4475 /* Find the page_start addr */
4476 page_start = offset32 + written;
4477 page_start -= (page_start % bp->flash_info->page_size);
4478 /* Find the page_end addr */
4479 page_end = page_start + bp->flash_info->page_size;
4480 /* Find the data_start addr */
4481 data_start = (written == 0) ? offset32 : page_start;
4482 /* Find the data_end addr */
4483 data_end = (page_end > offset32 + len32) ?
4484 (offset32 + len32) : page_end;
4486 /* Request access to the flash interface. */
4487 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4488 goto nvram_write_end;
4490 /* Enable access to flash interface */
4491 bnx2_enable_nvram_access(bp);
4493 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4494 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4495 int j;
4497 /* Read the whole page into the buffer
4498 * (non-buffer flash only) */
4499 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4500 if (j == (bp->flash_info->page_size - 4)) {
4501 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4503 rc = bnx2_nvram_read_dword(bp,
4504 page_start + j,
4505 &flash_buffer[j],
4506 cmd_flags);
4508 if (rc)
4509 goto nvram_write_end;
4511 cmd_flags = 0;
4515 /* Enable writes to flash interface (unlock write-protect) */
4516 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4517 goto nvram_write_end;
4519 /* Loop to write back the buffer data from page_start to
4520 * data_start */
4521 i = 0;
4522 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4523 /* Erase the page */
4524 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4525 goto nvram_write_end;
4527 /* Re-enable the write again for the actual write */
4528 bnx2_enable_nvram_write(bp);
4530 for (addr = page_start; addr < data_start;
4531 addr += 4, i += 4) {
4533 rc = bnx2_nvram_write_dword(bp, addr,
4534 &flash_buffer[i], cmd_flags);
4536 if (rc != 0)
4537 goto nvram_write_end;
4539 cmd_flags = 0;
4543 /* Loop to write the new data from data_start to data_end */
4544 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4545 if ((addr == page_end - 4) ||
4546 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4547 (addr == data_end - 4))) {
4549 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4551 rc = bnx2_nvram_write_dword(bp, addr, buf,
4552 cmd_flags);
4554 if (rc != 0)
4555 goto nvram_write_end;
4557 cmd_flags = 0;
4558 buf += 4;
4561 /* Loop to write back the buffer data from data_end
4562 * to page_end */
4563 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4564 for (addr = data_end; addr < page_end;
4565 addr += 4, i += 4) {
4567 if (addr == page_end-4) {
4568 cmd_flags = BNX2_NVM_COMMAND_LAST;
4570 rc = bnx2_nvram_write_dword(bp, addr,
4571 &flash_buffer[i], cmd_flags);
4573 if (rc != 0)
4574 goto nvram_write_end;
4576 cmd_flags = 0;
4580 /* Disable writes to flash interface (lock write-protect) */
4581 bnx2_disable_nvram_write(bp);
4583 /* Disable access to flash interface */
4584 bnx2_disable_nvram_access(bp);
4585 bnx2_release_nvram_lock(bp);
4587 /* Increment written */
4588 written += data_end - data_start;
4591 nvram_write_end:
4592 kfree(flash_buffer);
4593 kfree(align_buf);
4594 return rc;
4597 static void
4598 bnx2_init_fw_cap(struct bnx2 *bp)
4600 u32 val, sig = 0;
4602 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4603 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4605 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4606 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4608 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4609 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4610 return;
4612 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4613 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4614 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4617 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4618 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4619 u32 link;
4621 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4623 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4624 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4625 bp->phy_port = PORT_FIBRE;
4626 else
4627 bp->phy_port = PORT_TP;
4629 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4630 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4633 if (netif_running(bp->dev) && sig)
4634 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4637 static void
4638 bnx2_setup_msix_tbl(struct bnx2 *bp)
4640 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4642 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4643 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4646 static int
4647 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4649 u32 val;
4650 int i, rc = 0;
4651 u8 old_port;
4653 /* Wait for the current PCI transaction to complete before
4654 * issuing a reset. */
4655 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4656 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4657 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4658 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4659 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4660 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4661 udelay(5);
4663 /* Wait for the firmware to tell us it is ok to issue a reset. */
4664 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4666 /* Deposit a driver reset signature so the firmware knows that
4667 * this is a soft reset. */
4668 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4669 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4671 /* Do a dummy read to force the chip to complete all current transaction
4672 * before we issue a reset. */
4673 val = REG_RD(bp, BNX2_MISC_ID);
4675 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4676 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4677 REG_RD(bp, BNX2_MISC_COMMAND);
4678 udelay(5);
4680 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4681 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4683 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4685 } else {
4686 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4687 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4688 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4690 /* Chip reset. */
4691 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4693 /* Reading back any register after chip reset will hang the
4694 * bus on 5706 A0 and A1. The msleep below provides plenty
4695 * of margin for write posting.
4697 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4698 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4699 msleep(20);
4701 /* Reset takes approximate 30 usec */
4702 for (i = 0; i < 10; i++) {
4703 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4704 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4705 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4706 break;
4707 udelay(10);
4710 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4711 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4712 printk(KERN_ERR PFX "Chip reset did not complete\n");
4713 return -EBUSY;
4717 /* Make sure byte swapping is properly configured. */
4718 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4719 if (val != 0x01020304) {
4720 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4721 return -ENODEV;
4724 /* Wait for the firmware to finish its initialization. */
4725 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4726 if (rc)
4727 return rc;
4729 spin_lock_bh(&bp->phy_lock);
4730 old_port = bp->phy_port;
4731 bnx2_init_fw_cap(bp);
4732 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4733 old_port != bp->phy_port)
4734 bnx2_set_default_remote_link(bp);
4735 spin_unlock_bh(&bp->phy_lock);
4737 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4738 /* Adjust the voltage regular to two steps lower. The default
4739 * of this register is 0x0000000e. */
4740 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4742 /* Remove bad rbuf memory from the free pool. */
4743 rc = bnx2_alloc_bad_rbuf(bp);
4746 if (bp->flags & BNX2_FLAG_USING_MSIX)
4747 bnx2_setup_msix_tbl(bp);
4749 return rc;
4752 static int
4753 bnx2_init_chip(struct bnx2 *bp)
4755 u32 val, mtu;
4756 int rc, i;
4758 /* Make sure the interrupt is not active. */
4759 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4761 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4762 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4763 #ifdef __BIG_ENDIAN
4764 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4765 #endif
4766 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4767 DMA_READ_CHANS << 12 |
4768 DMA_WRITE_CHANS << 16;
4770 val |= (0x2 << 20) | (1 << 11);
4772 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4773 val |= (1 << 23);
4775 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4776 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4777 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4779 REG_WR(bp, BNX2_DMA_CONFIG, val);
4781 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4782 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4783 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4784 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4787 if (bp->flags & BNX2_FLAG_PCIX) {
4788 u16 val16;
4790 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4791 &val16);
4792 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4793 val16 & ~PCI_X_CMD_ERO);
4796 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4797 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4798 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4799 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4801 /* Initialize context mapping and zero out the quick contexts. The
4802 * context block must have already been enabled. */
4803 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4804 rc = bnx2_init_5709_context(bp);
4805 if (rc)
4806 return rc;
4807 } else
4808 bnx2_init_context(bp);
4810 if ((rc = bnx2_init_cpus(bp)) != 0)
4811 return rc;
4813 bnx2_init_nvram(bp);
4815 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4817 val = REG_RD(bp, BNX2_MQ_CONFIG);
4818 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4819 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4820 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4821 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4822 if (CHIP_REV(bp) == CHIP_REV_Ax)
4823 val |= BNX2_MQ_CONFIG_HALT_DIS;
4826 REG_WR(bp, BNX2_MQ_CONFIG, val);
4828 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4829 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4830 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4832 val = (BCM_PAGE_BITS - 8) << 24;
4833 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4835 /* Configure page size. */
4836 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4837 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4838 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4839 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4841 val = bp->mac_addr[0] +
4842 (bp->mac_addr[1] << 8) +
4843 (bp->mac_addr[2] << 16) +
4844 bp->mac_addr[3] +
4845 (bp->mac_addr[4] << 8) +
4846 (bp->mac_addr[5] << 16);
4847 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4849 /* Program the MTU. Also include 4 bytes for CRC32. */
4850 mtu = bp->dev->mtu;
4851 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4852 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4853 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4854 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4856 if (mtu < 1500)
4857 mtu = 1500;
4859 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4860 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4861 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4863 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4864 bp->bnx2_napi[i].last_status_idx = 0;
4866 bp->idle_chk_status_idx = 0xffff;
4868 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4870 /* Set up how to generate a link change interrupt. */
4871 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4873 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4874 (u64) bp->status_blk_mapping & 0xffffffff);
4875 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4877 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4878 (u64) bp->stats_blk_mapping & 0xffffffff);
4879 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4880 (u64) bp->stats_blk_mapping >> 32);
4882 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4883 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4885 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4886 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4888 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4889 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4891 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4893 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4895 REG_WR(bp, BNX2_HC_COM_TICKS,
4896 (bp->com_ticks_int << 16) | bp->com_ticks);
4898 REG_WR(bp, BNX2_HC_CMD_TICKS,
4899 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4901 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4902 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4903 else
4904 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4905 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4907 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4908 val = BNX2_HC_CONFIG_COLLECT_STATS;
4909 else {
4910 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4911 BNX2_HC_CONFIG_COLLECT_STATS;
4914 if (bp->irq_nvecs > 1) {
4915 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4916 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4918 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4921 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4922 val |= BNX2_HC_CONFIG_ONE_SHOT;
4924 REG_WR(bp, BNX2_HC_CONFIG, val);
4926 for (i = 1; i < bp->irq_nvecs; i++) {
4927 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4928 BNX2_HC_SB_CONFIG_1;
4930 REG_WR(bp, base,
4931 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4932 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4933 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4935 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4936 (bp->tx_quick_cons_trip_int << 16) |
4937 bp->tx_quick_cons_trip);
4939 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4940 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4942 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4943 (bp->rx_quick_cons_trip_int << 16) |
4944 bp->rx_quick_cons_trip);
4946 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4947 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4950 /* Clear internal stats counters. */
4951 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4953 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4955 /* Initialize the receive filter. */
4956 bnx2_set_rx_mode(bp->dev);
4958 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4959 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4960 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4961 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4963 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4964 1, 0);
4966 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4967 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4969 udelay(20);
4971 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4973 return rc;
4976 static void
4977 bnx2_clear_ring_states(struct bnx2 *bp)
4979 struct bnx2_napi *bnapi;
4980 struct bnx2_tx_ring_info *txr;
4981 struct bnx2_rx_ring_info *rxr;
4982 int i;
4984 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4985 bnapi = &bp->bnx2_napi[i];
4986 txr = &bnapi->tx_ring;
4987 rxr = &bnapi->rx_ring;
4989 txr->tx_cons = 0;
4990 txr->hw_tx_cons = 0;
4991 rxr->rx_prod_bseq = 0;
4992 rxr->rx_prod = 0;
4993 rxr->rx_cons = 0;
4994 rxr->rx_pg_prod = 0;
4995 rxr->rx_pg_cons = 0;
4999 static void
5000 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5002 u32 val, offset0, offset1, offset2, offset3;
5003 u32 cid_addr = GET_CID_ADDR(cid);
5005 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5006 offset0 = BNX2_L2CTX_TYPE_XI;
5007 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5008 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5009 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5010 } else {
5011 offset0 = BNX2_L2CTX_TYPE;
5012 offset1 = BNX2_L2CTX_CMD_TYPE;
5013 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5014 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5016 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5017 bnx2_ctx_wr(bp, cid_addr, offset0, val);
5019 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5020 bnx2_ctx_wr(bp, cid_addr, offset1, val);
5022 val = (u64) txr->tx_desc_mapping >> 32;
5023 bnx2_ctx_wr(bp, cid_addr, offset2, val);
5025 val = (u64) txr->tx_desc_mapping & 0xffffffff;
5026 bnx2_ctx_wr(bp, cid_addr, offset3, val);
5029 static void
5030 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5032 struct tx_bd *txbd;
5033 u32 cid = TX_CID;
5034 struct bnx2_napi *bnapi;
5035 struct bnx2_tx_ring_info *txr;
5037 bnapi = &bp->bnx2_napi[ring_num];
5038 txr = &bnapi->tx_ring;
5040 if (ring_num == 0)
5041 cid = TX_CID;
5042 else
5043 cid = TX_TSS_CID + ring_num - 1;
5045 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5047 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5049 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5050 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5052 txr->tx_prod = 0;
5053 txr->tx_prod_bseq = 0;
5055 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5056 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5058 bnx2_init_tx_context(bp, cid, txr);
5061 static void
5062 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5063 int num_rings)
5065 int i;
5066 struct rx_bd *rxbd;
5068 for (i = 0; i < num_rings; i++) {
5069 int j;
5071 rxbd = &rx_ring[i][0];
5072 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5073 rxbd->rx_bd_len = buf_size;
5074 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5076 if (i == (num_rings - 1))
5077 j = 0;
5078 else
5079 j = i + 1;
5080 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5081 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5085 static void
5086 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5088 int i;
5089 u16 prod, ring_prod;
5090 u32 cid, rx_cid_addr, val;
5091 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5092 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5094 if (ring_num == 0)
5095 cid = RX_CID;
5096 else
5097 cid = RX_RSS_CID + ring_num - 1;
5099 rx_cid_addr = GET_CID_ADDR(cid);
5101 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5102 bp->rx_buf_use_size, bp->rx_max_ring);
5104 bnx2_init_rx_context(bp, cid);
5106 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5107 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5108 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5111 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5112 if (bp->rx_pg_ring_size) {
5113 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5114 rxr->rx_pg_desc_mapping,
5115 PAGE_SIZE, bp->rx_max_pg_ring);
5116 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5117 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5118 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5119 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5121 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5122 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5124 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5125 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5127 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5128 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5131 val = (u64) rxr->rx_desc_mapping[0] >> 32;
5132 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5134 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5135 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5137 ring_prod = prod = rxr->rx_pg_prod;
5138 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5139 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
5140 break;
5141 prod = NEXT_RX_BD(prod);
5142 ring_prod = RX_PG_RING_IDX(prod);
5144 rxr->rx_pg_prod = prod;
5146 ring_prod = prod = rxr->rx_prod;
5147 for (i = 0; i < bp->rx_ring_size; i++) {
5148 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
5149 break;
5150 prod = NEXT_RX_BD(prod);
5151 ring_prod = RX_RING_IDX(prod);
5153 rxr->rx_prod = prod;
5155 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5156 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5157 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5159 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5160 REG_WR16(bp, rxr->rx_bidx_addr, prod);
5162 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5165 static void
5166 bnx2_init_all_rings(struct bnx2 *bp)
5168 int i;
5169 u32 val;
5171 bnx2_clear_ring_states(bp);
5173 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5174 for (i = 0; i < bp->num_tx_rings; i++)
5175 bnx2_init_tx_ring(bp, i);
5177 if (bp->num_tx_rings > 1)
5178 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5179 (TX_TSS_CID << 7));
5181 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5182 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5184 for (i = 0; i < bp->num_rx_rings; i++)
5185 bnx2_init_rx_ring(bp, i);
5187 if (bp->num_rx_rings > 1) {
5188 u32 tbl_32;
5189 u8 *tbl = (u8 *) &tbl_32;
5191 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5192 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5194 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5195 tbl[i % 4] = i % (bp->num_rx_rings - 1);
5196 if ((i % 4) == 3)
5197 bnx2_reg_wr_ind(bp,
5198 BNX2_RXP_SCRATCH_RSS_TBL + i,
5199 cpu_to_be32(tbl_32));
5202 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5203 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5205 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5210 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5212 u32 max, num_rings = 1;
5214 while (ring_size > MAX_RX_DESC_CNT) {
5215 ring_size -= MAX_RX_DESC_CNT;
5216 num_rings++;
5218 /* round to next power of 2 */
5219 max = max_size;
5220 while ((max & num_rings) == 0)
5221 max >>= 1;
5223 if (num_rings != max)
5224 max <<= 1;
5226 return max;
5229 static void
5230 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5232 u32 rx_size, rx_space, jumbo_size;
5234 /* 8 for CRC and VLAN */
5235 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5237 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5238 sizeof(struct skb_shared_info);
5240 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5241 bp->rx_pg_ring_size = 0;
5242 bp->rx_max_pg_ring = 0;
5243 bp->rx_max_pg_ring_idx = 0;
5244 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5245 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5247 jumbo_size = size * pages;
5248 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5249 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5251 bp->rx_pg_ring_size = jumbo_size;
5252 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5253 MAX_RX_PG_RINGS);
5254 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5255 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5256 bp->rx_copy_thresh = 0;
5259 bp->rx_buf_use_size = rx_size;
5260 /* hw alignment */
5261 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5262 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5263 bp->rx_ring_size = size;
5264 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5265 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5268 static void
5269 bnx2_free_tx_skbs(struct bnx2 *bp)
5271 int i;
5273 for (i = 0; i < bp->num_tx_rings; i++) {
5274 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5275 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5276 int j;
5278 if (txr->tx_buf_ring == NULL)
5279 continue;
5281 for (j = 0; j < TX_DESC_CNT; ) {
5282 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5283 struct sk_buff *skb = tx_buf->skb;
5285 if (skb == NULL) {
5286 j++;
5287 continue;
5290 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5292 tx_buf->skb = NULL;
5294 j += skb_shinfo(skb)->nr_frags + 1;
5295 dev_kfree_skb(skb);
5300 static void
5301 bnx2_free_rx_skbs(struct bnx2 *bp)
5303 int i;
5305 for (i = 0; i < bp->num_rx_rings; i++) {
5306 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5307 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5308 int j;
5310 if (rxr->rx_buf_ring == NULL)
5311 return;
5313 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5314 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5315 struct sk_buff *skb = rx_buf->skb;
5317 if (skb == NULL)
5318 continue;
5320 pci_unmap_single(bp->pdev,
5321 pci_unmap_addr(rx_buf, mapping),
5322 bp->rx_buf_use_size,
5323 PCI_DMA_FROMDEVICE);
5325 rx_buf->skb = NULL;
5327 dev_kfree_skb(skb);
5329 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5330 bnx2_free_rx_page(bp, rxr, j);
5334 static void
5335 bnx2_free_skbs(struct bnx2 *bp)
5337 bnx2_free_tx_skbs(bp);
5338 bnx2_free_rx_skbs(bp);
5341 static int
5342 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5344 int rc;
5346 rc = bnx2_reset_chip(bp, reset_code);
5347 bnx2_free_skbs(bp);
5348 if (rc)
5349 return rc;
5351 if ((rc = bnx2_init_chip(bp)) != 0)
5352 return rc;
5354 bnx2_init_all_rings(bp);
5355 return 0;
5358 static int
5359 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5361 int rc;
5363 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5364 return rc;
5366 spin_lock_bh(&bp->phy_lock);
5367 bnx2_init_phy(bp, reset_phy);
5368 bnx2_set_link(bp);
5369 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5370 bnx2_remote_phy_event(bp);
5371 spin_unlock_bh(&bp->phy_lock);
5372 return 0;
5375 static int
5376 bnx2_shutdown_chip(struct bnx2 *bp)
5378 u32 reset_code;
5380 if (bp->flags & BNX2_FLAG_NO_WOL)
5381 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5382 else if (bp->wol)
5383 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5384 else
5385 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5387 return bnx2_reset_chip(bp, reset_code);
5390 static int
5391 bnx2_test_registers(struct bnx2 *bp)
5393 int ret;
5394 int i, is_5709;
5395 static const struct {
5396 u16 offset;
5397 u16 flags;
5398 #define BNX2_FL_NOT_5709 1
5399 u32 rw_mask;
5400 u32 ro_mask;
5401 } reg_tbl[] = {
5402 { 0x006c, 0, 0x00000000, 0x0000003f },
5403 { 0x0090, 0, 0xffffffff, 0x00000000 },
5404 { 0x0094, 0, 0x00000000, 0x00000000 },
5406 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5407 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5408 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5409 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5410 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5411 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5412 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5413 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5414 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5416 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5417 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5418 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5419 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5420 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5421 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5423 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5424 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5425 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5427 { 0x1000, 0, 0x00000000, 0x00000001 },
5428 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5430 { 0x1408, 0, 0x01c00800, 0x00000000 },
5431 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5432 { 0x14a8, 0, 0x00000000, 0x000001ff },
5433 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5434 { 0x14b0, 0, 0x00000002, 0x00000001 },
5435 { 0x14b8, 0, 0x00000000, 0x00000000 },
5436 { 0x14c0, 0, 0x00000000, 0x00000009 },
5437 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5438 { 0x14cc, 0, 0x00000000, 0x00000001 },
5439 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5441 { 0x1800, 0, 0x00000000, 0x00000001 },
5442 { 0x1804, 0, 0x00000000, 0x00000003 },
5444 { 0x2800, 0, 0x00000000, 0x00000001 },
5445 { 0x2804, 0, 0x00000000, 0x00003f01 },
5446 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5447 { 0x2810, 0, 0xffff0000, 0x00000000 },
5448 { 0x2814, 0, 0xffff0000, 0x00000000 },
5449 { 0x2818, 0, 0xffff0000, 0x00000000 },
5450 { 0x281c, 0, 0xffff0000, 0x00000000 },
5451 { 0x2834, 0, 0xffffffff, 0x00000000 },
5452 { 0x2840, 0, 0x00000000, 0xffffffff },
5453 { 0x2844, 0, 0x00000000, 0xffffffff },
5454 { 0x2848, 0, 0xffffffff, 0x00000000 },
5455 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5457 { 0x2c00, 0, 0x00000000, 0x00000011 },
5458 { 0x2c04, 0, 0x00000000, 0x00030007 },
5460 { 0x3c00, 0, 0x00000000, 0x00000001 },
5461 { 0x3c04, 0, 0x00000000, 0x00070000 },
5462 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5463 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5464 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5465 { 0x3c14, 0, 0x00000000, 0xffffffff },
5466 { 0x3c18, 0, 0x00000000, 0xffffffff },
5467 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5468 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5470 { 0x5004, 0, 0x00000000, 0x0000007f },
5471 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5473 { 0x5c00, 0, 0x00000000, 0x00000001 },
5474 { 0x5c04, 0, 0x00000000, 0x0003000f },
5475 { 0x5c08, 0, 0x00000003, 0x00000000 },
5476 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5477 { 0x5c10, 0, 0x00000000, 0xffffffff },
5478 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5479 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5480 { 0x5c88, 0, 0x00000000, 0x00077373 },
5481 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5483 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5484 { 0x680c, 0, 0xffffffff, 0x00000000 },
5485 { 0x6810, 0, 0xffffffff, 0x00000000 },
5486 { 0x6814, 0, 0xffffffff, 0x00000000 },
5487 { 0x6818, 0, 0xffffffff, 0x00000000 },
5488 { 0x681c, 0, 0xffffffff, 0x00000000 },
5489 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5490 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5491 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5492 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5493 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5494 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5495 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5496 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5497 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5498 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5499 { 0x684c, 0, 0xffffffff, 0x00000000 },
5500 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5501 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5502 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5503 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5504 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5505 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5507 { 0xffff, 0, 0x00000000, 0x00000000 },
5510 ret = 0;
5511 is_5709 = 0;
5512 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5513 is_5709 = 1;
5515 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5516 u32 offset, rw_mask, ro_mask, save_val, val;
5517 u16 flags = reg_tbl[i].flags;
5519 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5520 continue;
5522 offset = (u32) reg_tbl[i].offset;
5523 rw_mask = reg_tbl[i].rw_mask;
5524 ro_mask = reg_tbl[i].ro_mask;
5526 save_val = readl(bp->regview + offset);
5528 writel(0, bp->regview + offset);
5530 val = readl(bp->regview + offset);
5531 if ((val & rw_mask) != 0) {
5532 goto reg_test_err;
5535 if ((val & ro_mask) != (save_val & ro_mask)) {
5536 goto reg_test_err;
5539 writel(0xffffffff, bp->regview + offset);
5541 val = readl(bp->regview + offset);
5542 if ((val & rw_mask) != rw_mask) {
5543 goto reg_test_err;
5546 if ((val & ro_mask) != (save_val & ro_mask)) {
5547 goto reg_test_err;
5550 writel(save_val, bp->regview + offset);
5551 continue;
5553 reg_test_err:
5554 writel(save_val, bp->regview + offset);
5555 ret = -ENODEV;
5556 break;
5558 return ret;
5561 static int
5562 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5564 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5565 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5566 int i;
5568 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5569 u32 offset;
5571 for (offset = 0; offset < size; offset += 4) {
5573 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5575 if (bnx2_reg_rd_ind(bp, start + offset) !=
5576 test_pattern[i]) {
5577 return -ENODEV;
5581 return 0;
5584 static int
5585 bnx2_test_memory(struct bnx2 *bp)
5587 int ret = 0;
5588 int i;
5589 static struct mem_entry {
5590 u32 offset;
5591 u32 len;
5592 } mem_tbl_5706[] = {
5593 { 0x60000, 0x4000 },
5594 { 0xa0000, 0x3000 },
5595 { 0xe0000, 0x4000 },
5596 { 0x120000, 0x4000 },
5597 { 0x1a0000, 0x4000 },
5598 { 0x160000, 0x4000 },
5599 { 0xffffffff, 0 },
5601 mem_tbl_5709[] = {
5602 { 0x60000, 0x4000 },
5603 { 0xa0000, 0x3000 },
5604 { 0xe0000, 0x4000 },
5605 { 0x120000, 0x4000 },
5606 { 0x1a0000, 0x4000 },
5607 { 0xffffffff, 0 },
5609 struct mem_entry *mem_tbl;
5611 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5612 mem_tbl = mem_tbl_5709;
5613 else
5614 mem_tbl = mem_tbl_5706;
5616 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5617 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5618 mem_tbl[i].len)) != 0) {
5619 return ret;
5623 return ret;
5626 #define BNX2_MAC_LOOPBACK 0
5627 #define BNX2_PHY_LOOPBACK 1
5629 static int
5630 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5632 unsigned int pkt_size, num_pkts, i;
5633 struct sk_buff *skb, *rx_skb;
5634 unsigned char *packet;
5635 u16 rx_start_idx, rx_idx;
5636 dma_addr_t map;
5637 struct tx_bd *txbd;
5638 struct sw_bd *rx_buf;
5639 struct l2_fhdr *rx_hdr;
5640 int ret = -ENODEV;
5641 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5642 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5643 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5645 tx_napi = bnapi;
5647 txr = &tx_napi->tx_ring;
5648 rxr = &bnapi->rx_ring;
5649 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5650 bp->loopback = MAC_LOOPBACK;
5651 bnx2_set_mac_loopback(bp);
5653 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5654 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5655 return 0;
5657 bp->loopback = PHY_LOOPBACK;
5658 bnx2_set_phy_loopback(bp);
5660 else
5661 return -EINVAL;
5663 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5664 skb = netdev_alloc_skb(bp->dev, pkt_size);
5665 if (!skb)
5666 return -ENOMEM;
5667 packet = skb_put(skb, pkt_size);
5668 memcpy(packet, bp->dev->dev_addr, 6);
5669 memset(packet + 6, 0x0, 8);
5670 for (i = 14; i < pkt_size; i++)
5671 packet[i] = (unsigned char) (i & 0xff);
5673 if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
5674 dev_kfree_skb(skb);
5675 return -EIO;
5677 map = skb_shinfo(skb)->dma_head;
5679 REG_WR(bp, BNX2_HC_COMMAND,
5680 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5682 REG_RD(bp, BNX2_HC_COMMAND);
5684 udelay(5);
5685 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5687 num_pkts = 0;
5689 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5691 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5692 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5693 txbd->tx_bd_mss_nbytes = pkt_size;
5694 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5696 num_pkts++;
5697 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5698 txr->tx_prod_bseq += pkt_size;
5700 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5701 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5703 udelay(100);
5705 REG_WR(bp, BNX2_HC_COMMAND,
5706 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5708 REG_RD(bp, BNX2_HC_COMMAND);
5710 udelay(5);
5712 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5713 dev_kfree_skb(skb);
5715 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5716 goto loopback_test_done;
5718 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5719 if (rx_idx != rx_start_idx + num_pkts) {
5720 goto loopback_test_done;
5723 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5724 rx_skb = rx_buf->skb;
5726 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5727 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5729 pci_dma_sync_single_for_cpu(bp->pdev,
5730 pci_unmap_addr(rx_buf, mapping),
5731 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5733 if (rx_hdr->l2_fhdr_status &
5734 (L2_FHDR_ERRORS_BAD_CRC |
5735 L2_FHDR_ERRORS_PHY_DECODE |
5736 L2_FHDR_ERRORS_ALIGNMENT |
5737 L2_FHDR_ERRORS_TOO_SHORT |
5738 L2_FHDR_ERRORS_GIANT_FRAME)) {
5740 goto loopback_test_done;
5743 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5744 goto loopback_test_done;
5747 for (i = 14; i < pkt_size; i++) {
5748 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5749 goto loopback_test_done;
5753 ret = 0;
5755 loopback_test_done:
5756 bp->loopback = 0;
5757 return ret;
5760 #define BNX2_MAC_LOOPBACK_FAILED 1
5761 #define BNX2_PHY_LOOPBACK_FAILED 2
5762 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5763 BNX2_PHY_LOOPBACK_FAILED)
5765 static int
5766 bnx2_test_loopback(struct bnx2 *bp)
5768 int rc = 0;
5770 if (!netif_running(bp->dev))
5771 return BNX2_LOOPBACK_FAILED;
5773 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5774 spin_lock_bh(&bp->phy_lock);
5775 bnx2_init_phy(bp, 1);
5776 spin_unlock_bh(&bp->phy_lock);
5777 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5778 rc |= BNX2_MAC_LOOPBACK_FAILED;
5779 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5780 rc |= BNX2_PHY_LOOPBACK_FAILED;
5781 return rc;
5784 #define NVRAM_SIZE 0x200
5785 #define CRC32_RESIDUAL 0xdebb20e3
5787 static int
5788 bnx2_test_nvram(struct bnx2 *bp)
5790 __be32 buf[NVRAM_SIZE / 4];
5791 u8 *data = (u8 *) buf;
5792 int rc = 0;
5793 u32 magic, csum;
5795 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5796 goto test_nvram_done;
5798 magic = be32_to_cpu(buf[0]);
5799 if (magic != 0x669955aa) {
5800 rc = -ENODEV;
5801 goto test_nvram_done;
5804 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5805 goto test_nvram_done;
5807 csum = ether_crc_le(0x100, data);
5808 if (csum != CRC32_RESIDUAL) {
5809 rc = -ENODEV;
5810 goto test_nvram_done;
5813 csum = ether_crc_le(0x100, data + 0x100);
5814 if (csum != CRC32_RESIDUAL) {
5815 rc = -ENODEV;
5818 test_nvram_done:
5819 return rc;
5822 static int
5823 bnx2_test_link(struct bnx2 *bp)
5825 u32 bmsr;
5827 if (!netif_running(bp->dev))
5828 return -ENODEV;
5830 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5831 if (bp->link_up)
5832 return 0;
5833 return -ENODEV;
5835 spin_lock_bh(&bp->phy_lock);
5836 bnx2_enable_bmsr1(bp);
5837 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5838 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5839 bnx2_disable_bmsr1(bp);
5840 spin_unlock_bh(&bp->phy_lock);
5842 if (bmsr & BMSR_LSTATUS) {
5843 return 0;
5845 return -ENODEV;
5848 static int
5849 bnx2_test_intr(struct bnx2 *bp)
5851 int i;
5852 u16 status_idx;
5854 if (!netif_running(bp->dev))
5855 return -ENODEV;
5857 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5859 /* This register is not touched during run-time. */
5860 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5861 REG_RD(bp, BNX2_HC_COMMAND);
5863 for (i = 0; i < 10; i++) {
5864 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5865 status_idx) {
5867 break;
5870 msleep_interruptible(10);
5872 if (i < 10)
5873 return 0;
5875 return -ENODEV;
5878 /* Determining link for parallel detection. */
5879 static int
5880 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5882 u32 mode_ctl, an_dbg, exp;
5884 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5885 return 0;
5887 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5888 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5890 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5891 return 0;
5893 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5894 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5895 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5897 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5898 return 0;
5900 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5901 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5902 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5904 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5905 return 0;
5907 return 1;
5910 static void
5911 bnx2_5706_serdes_timer(struct bnx2 *bp)
5913 int check_link = 1;
5915 spin_lock(&bp->phy_lock);
5916 if (bp->serdes_an_pending) {
5917 bp->serdes_an_pending--;
5918 check_link = 0;
5919 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5920 u32 bmcr;
5922 bp->current_interval = BNX2_TIMER_INTERVAL;
5924 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5926 if (bmcr & BMCR_ANENABLE) {
5927 if (bnx2_5706_serdes_has_link(bp)) {
5928 bmcr &= ~BMCR_ANENABLE;
5929 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5930 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5931 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5935 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5936 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5937 u32 phy2;
5939 bnx2_write_phy(bp, 0x17, 0x0f01);
5940 bnx2_read_phy(bp, 0x15, &phy2);
5941 if (phy2 & 0x20) {
5942 u32 bmcr;
5944 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5945 bmcr |= BMCR_ANENABLE;
5946 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5948 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5950 } else
5951 bp->current_interval = BNX2_TIMER_INTERVAL;
5953 if (check_link) {
5954 u32 val;
5956 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5957 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5958 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5960 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5961 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5962 bnx2_5706s_force_link_dn(bp, 1);
5963 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5964 } else
5965 bnx2_set_link(bp);
5966 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5967 bnx2_set_link(bp);
5969 spin_unlock(&bp->phy_lock);
5972 static void
5973 bnx2_5708_serdes_timer(struct bnx2 *bp)
5975 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5976 return;
5978 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5979 bp->serdes_an_pending = 0;
5980 return;
5983 spin_lock(&bp->phy_lock);
5984 if (bp->serdes_an_pending)
5985 bp->serdes_an_pending--;
5986 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5987 u32 bmcr;
5989 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5990 if (bmcr & BMCR_ANENABLE) {
5991 bnx2_enable_forced_2g5(bp);
5992 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
5993 } else {
5994 bnx2_disable_forced_2g5(bp);
5995 bp->serdes_an_pending = 2;
5996 bp->current_interval = BNX2_TIMER_INTERVAL;
5999 } else
6000 bp->current_interval = BNX2_TIMER_INTERVAL;
6002 spin_unlock(&bp->phy_lock);
6005 static void
6006 bnx2_timer(unsigned long data)
6008 struct bnx2 *bp = (struct bnx2 *) data;
6010 if (!netif_running(bp->dev))
6011 return;
6013 if (atomic_read(&bp->intr_sem) != 0)
6014 goto bnx2_restart_timer;
6016 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6017 BNX2_FLAG_USING_MSI)
6018 bnx2_chk_missed_msi(bp);
6020 bnx2_send_heart_beat(bp);
6022 bp->stats_blk->stat_FwRxDrop =
6023 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6025 /* workaround occasional corrupted counters */
6026 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
6027 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6028 BNX2_HC_COMMAND_STATS_NOW);
6030 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6031 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6032 bnx2_5706_serdes_timer(bp);
6033 else
6034 bnx2_5708_serdes_timer(bp);
6037 bnx2_restart_timer:
6038 mod_timer(&bp->timer, jiffies + bp->current_interval);
6041 static int
6042 bnx2_request_irq(struct bnx2 *bp)
6044 unsigned long flags;
6045 struct bnx2_irq *irq;
6046 int rc = 0, i;
6048 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6049 flags = 0;
6050 else
6051 flags = IRQF_SHARED;
6053 for (i = 0; i < bp->irq_nvecs; i++) {
6054 irq = &bp->irq_tbl[i];
6055 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6056 &bp->bnx2_napi[i]);
6057 if (rc)
6058 break;
6059 irq->requested = 1;
6061 return rc;
6064 static void
6065 bnx2_free_irq(struct bnx2 *bp)
6067 struct bnx2_irq *irq;
6068 int i;
6070 for (i = 0; i < bp->irq_nvecs; i++) {
6071 irq = &bp->irq_tbl[i];
6072 if (irq->requested)
6073 free_irq(irq->vector, &bp->bnx2_napi[i]);
6074 irq->requested = 0;
6076 if (bp->flags & BNX2_FLAG_USING_MSI)
6077 pci_disable_msi(bp->pdev);
6078 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6079 pci_disable_msix(bp->pdev);
6081 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6084 static void
6085 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6087 int i, rc;
6088 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6089 struct net_device *dev = bp->dev;
6090 const int len = sizeof(bp->irq_tbl[0].name);
6092 bnx2_setup_msix_tbl(bp);
6093 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6094 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6095 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6097 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6098 msix_ent[i].entry = i;
6099 msix_ent[i].vector = 0;
6102 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6103 if (rc != 0)
6104 return;
6106 bp->irq_nvecs = msix_vecs;
6107 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6108 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6109 bp->irq_tbl[i].vector = msix_ent[i].vector;
6110 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6111 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6115 static void
6116 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6118 int cpus = num_online_cpus();
6119 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6121 bp->irq_tbl[0].handler = bnx2_interrupt;
6122 strcpy(bp->irq_tbl[0].name, bp->dev->name);
6123 bp->irq_nvecs = 1;
6124 bp->irq_tbl[0].vector = bp->pdev->irq;
6126 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
6127 bnx2_enable_msix(bp, msix_vecs);
6129 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6130 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6131 if (pci_enable_msi(bp->pdev) == 0) {
6132 bp->flags |= BNX2_FLAG_USING_MSI;
6133 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6134 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6135 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6136 } else
6137 bp->irq_tbl[0].handler = bnx2_msi;
6139 bp->irq_tbl[0].vector = bp->pdev->irq;
6143 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6144 bp->dev->real_num_tx_queues = bp->num_tx_rings;
6146 bp->num_rx_rings = bp->irq_nvecs;
6149 /* Called with rtnl_lock */
6150 static int
6151 bnx2_open(struct net_device *dev)
6153 struct bnx2 *bp = netdev_priv(dev);
6154 int rc;
6156 netif_carrier_off(dev);
6158 bnx2_set_power_state(bp, PCI_D0);
6159 bnx2_disable_int(bp);
6161 bnx2_setup_int_mode(bp, disable_msi);
6162 bnx2_napi_enable(bp);
6163 rc = bnx2_alloc_mem(bp);
6164 if (rc)
6165 goto open_err;
6167 rc = bnx2_request_irq(bp);
6168 if (rc)
6169 goto open_err;
6171 rc = bnx2_init_nic(bp, 1);
6172 if (rc)
6173 goto open_err;
6175 mod_timer(&bp->timer, jiffies + bp->current_interval);
6177 atomic_set(&bp->intr_sem, 0);
6179 bnx2_enable_int(bp);
6181 if (bp->flags & BNX2_FLAG_USING_MSI) {
6182 /* Test MSI to make sure it is working
6183 * If MSI test fails, go back to INTx mode
6185 if (bnx2_test_intr(bp) != 0) {
6186 printk(KERN_WARNING PFX "%s: No interrupt was generated"
6187 " using MSI, switching to INTx mode. Please"
6188 " report this failure to the PCI maintainer"
6189 " and include system chipset information.\n",
6190 bp->dev->name);
6192 bnx2_disable_int(bp);
6193 bnx2_free_irq(bp);
6195 bnx2_setup_int_mode(bp, 1);
6197 rc = bnx2_init_nic(bp, 0);
6199 if (!rc)
6200 rc = bnx2_request_irq(bp);
6202 if (rc) {
6203 del_timer_sync(&bp->timer);
6204 goto open_err;
6206 bnx2_enable_int(bp);
6209 if (bp->flags & BNX2_FLAG_USING_MSI)
6210 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
6211 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6212 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
6214 netif_tx_start_all_queues(dev);
6216 return 0;
6218 open_err:
6219 bnx2_napi_disable(bp);
6220 bnx2_free_skbs(bp);
6221 bnx2_free_irq(bp);
6222 bnx2_free_mem(bp);
6223 return rc;
6226 static void
6227 bnx2_reset_task(struct work_struct *work)
6229 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6231 if (!netif_running(bp->dev))
6232 return;
6234 bnx2_netif_stop(bp);
6236 bnx2_init_nic(bp, 1);
6238 atomic_set(&bp->intr_sem, 1);
6239 bnx2_netif_start(bp);
6242 static void
6243 bnx2_tx_timeout(struct net_device *dev)
6245 struct bnx2 *bp = netdev_priv(dev);
6247 /* This allows the netif to be shutdown gracefully before resetting */
6248 schedule_work(&bp->reset_task);
6251 #ifdef BCM_VLAN
6252 /* Called with rtnl_lock */
6253 static void
6254 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6256 struct bnx2 *bp = netdev_priv(dev);
6258 bnx2_netif_stop(bp);
6260 bp->vlgrp = vlgrp;
6261 bnx2_set_rx_mode(dev);
6262 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6263 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6265 bnx2_netif_start(bp);
6267 #endif
6269 /* Called with netif_tx_lock.
6270 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6271 * netif_wake_queue().
6273 static int
6274 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6276 struct bnx2 *bp = netdev_priv(dev);
6277 dma_addr_t mapping;
6278 struct tx_bd *txbd;
6279 struct sw_tx_bd *tx_buf;
6280 u32 len, vlan_tag_flags, last_frag, mss;
6281 u16 prod, ring_prod;
6282 int i;
6283 struct bnx2_napi *bnapi;
6284 struct bnx2_tx_ring_info *txr;
6285 struct netdev_queue *txq;
6286 struct skb_shared_info *sp;
6288 /* Determine which tx ring we will be placed on */
6289 i = skb_get_queue_mapping(skb);
6290 bnapi = &bp->bnx2_napi[i];
6291 txr = &bnapi->tx_ring;
6292 txq = netdev_get_tx_queue(dev, i);
6294 if (unlikely(bnx2_tx_avail(bp, txr) <
6295 (skb_shinfo(skb)->nr_frags + 1))) {
6296 netif_tx_stop_queue(txq);
6297 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
6298 dev->name);
6300 return NETDEV_TX_BUSY;
6302 len = skb_headlen(skb);
6303 prod = txr->tx_prod;
6304 ring_prod = TX_RING_IDX(prod);
6306 vlan_tag_flags = 0;
6307 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6308 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6311 #ifdef BCM_VLAN
6312 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6313 vlan_tag_flags |=
6314 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6316 #endif
6317 if ((mss = skb_shinfo(skb)->gso_size)) {
6318 u32 tcp_opt_len;
6319 struct iphdr *iph;
6321 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6323 tcp_opt_len = tcp_optlen(skb);
6325 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6326 u32 tcp_off = skb_transport_offset(skb) -
6327 sizeof(struct ipv6hdr) - ETH_HLEN;
6329 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6330 TX_BD_FLAGS_SW_FLAGS;
6331 if (likely(tcp_off == 0))
6332 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6333 else {
6334 tcp_off >>= 3;
6335 vlan_tag_flags |= ((tcp_off & 0x3) <<
6336 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6337 ((tcp_off & 0x10) <<
6338 TX_BD_FLAGS_TCP6_OFF4_SHL);
6339 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6341 } else {
6342 iph = ip_hdr(skb);
6343 if (tcp_opt_len || (iph->ihl > 5)) {
6344 vlan_tag_flags |= ((iph->ihl - 5) +
6345 (tcp_opt_len >> 2)) << 8;
6348 } else
6349 mss = 0;
6351 if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
6352 dev_kfree_skb(skb);
6353 return NETDEV_TX_OK;
6356 sp = skb_shinfo(skb);
6357 mapping = sp->dma_head;
6359 tx_buf = &txr->tx_buf_ring[ring_prod];
6360 tx_buf->skb = skb;
6362 txbd = &txr->tx_desc_ring[ring_prod];
6364 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6365 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6366 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6367 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6369 last_frag = skb_shinfo(skb)->nr_frags;
6370 tx_buf->nr_frags = last_frag;
6371 tx_buf->is_gso = skb_is_gso(skb);
6373 for (i = 0; i < last_frag; i++) {
6374 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6376 prod = NEXT_TX_BD(prod);
6377 ring_prod = TX_RING_IDX(prod);
6378 txbd = &txr->tx_desc_ring[ring_prod];
6380 len = frag->size;
6381 mapping = sp->dma_maps[i];
6383 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6384 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6385 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6386 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6389 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6391 prod = NEXT_TX_BD(prod);
6392 txr->tx_prod_bseq += skb->len;
6394 REG_WR16(bp, txr->tx_bidx_addr, prod);
6395 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6397 mmiowb();
6399 txr->tx_prod = prod;
6401 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6402 netif_tx_stop_queue(txq);
6403 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6404 netif_tx_wake_queue(txq);
6407 return NETDEV_TX_OK;
6410 /* Called with rtnl_lock */
6411 static int
6412 bnx2_close(struct net_device *dev)
6414 struct bnx2 *bp = netdev_priv(dev);
6416 cancel_work_sync(&bp->reset_task);
6418 bnx2_disable_int_sync(bp);
6419 bnx2_napi_disable(bp);
6420 del_timer_sync(&bp->timer);
6421 bnx2_shutdown_chip(bp);
6422 bnx2_free_irq(bp);
6423 bnx2_free_skbs(bp);
6424 bnx2_free_mem(bp);
6425 bp->link_up = 0;
6426 netif_carrier_off(bp->dev);
6427 bnx2_set_power_state(bp, PCI_D3hot);
6428 return 0;
6431 #define GET_NET_STATS64(ctr) \
6432 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6433 (unsigned long) (ctr##_lo)
6435 #define GET_NET_STATS32(ctr) \
6436 (ctr##_lo)
6438 #if (BITS_PER_LONG == 64)
6439 #define GET_NET_STATS GET_NET_STATS64
6440 #else
6441 #define GET_NET_STATS GET_NET_STATS32
6442 #endif
6444 static struct net_device_stats *
6445 bnx2_get_stats(struct net_device *dev)
6447 struct bnx2 *bp = netdev_priv(dev);
6448 struct statistics_block *stats_blk = bp->stats_blk;
6449 struct net_device_stats *net_stats = &dev->stats;
6451 if (bp->stats_blk == NULL) {
6452 return net_stats;
6454 net_stats->rx_packets =
6455 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6456 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6457 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6459 net_stats->tx_packets =
6460 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6461 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6462 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6464 net_stats->rx_bytes =
6465 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6467 net_stats->tx_bytes =
6468 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6470 net_stats->multicast =
6471 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6473 net_stats->collisions =
6474 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6476 net_stats->rx_length_errors =
6477 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6478 stats_blk->stat_EtherStatsOverrsizePkts);
6480 net_stats->rx_over_errors =
6481 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6483 net_stats->rx_frame_errors =
6484 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6486 net_stats->rx_crc_errors =
6487 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6489 net_stats->rx_errors = net_stats->rx_length_errors +
6490 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6491 net_stats->rx_crc_errors;
6493 net_stats->tx_aborted_errors =
6494 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6495 stats_blk->stat_Dot3StatsLateCollisions);
6497 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6498 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6499 net_stats->tx_carrier_errors = 0;
6500 else {
6501 net_stats->tx_carrier_errors =
6502 (unsigned long)
6503 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6506 net_stats->tx_errors =
6507 (unsigned long)
6508 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6510 net_stats->tx_aborted_errors +
6511 net_stats->tx_carrier_errors;
6513 net_stats->rx_missed_errors =
6514 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6515 stats_blk->stat_FwRxDrop);
6517 return net_stats;
6520 /* All ethtool functions called with rtnl_lock */
6522 static int
6523 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6525 struct bnx2 *bp = netdev_priv(dev);
6526 int support_serdes = 0, support_copper = 0;
6528 cmd->supported = SUPPORTED_Autoneg;
6529 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6530 support_serdes = 1;
6531 support_copper = 1;
6532 } else if (bp->phy_port == PORT_FIBRE)
6533 support_serdes = 1;
6534 else
6535 support_copper = 1;
6537 if (support_serdes) {
6538 cmd->supported |= SUPPORTED_1000baseT_Full |
6539 SUPPORTED_FIBRE;
6540 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6541 cmd->supported |= SUPPORTED_2500baseX_Full;
6544 if (support_copper) {
6545 cmd->supported |= SUPPORTED_10baseT_Half |
6546 SUPPORTED_10baseT_Full |
6547 SUPPORTED_100baseT_Half |
6548 SUPPORTED_100baseT_Full |
6549 SUPPORTED_1000baseT_Full |
6550 SUPPORTED_TP;
6554 spin_lock_bh(&bp->phy_lock);
6555 cmd->port = bp->phy_port;
6556 cmd->advertising = bp->advertising;
6558 if (bp->autoneg & AUTONEG_SPEED) {
6559 cmd->autoneg = AUTONEG_ENABLE;
6561 else {
6562 cmd->autoneg = AUTONEG_DISABLE;
6565 if (netif_carrier_ok(dev)) {
6566 cmd->speed = bp->line_speed;
6567 cmd->duplex = bp->duplex;
6569 else {
6570 cmd->speed = -1;
6571 cmd->duplex = -1;
6573 spin_unlock_bh(&bp->phy_lock);
6575 cmd->transceiver = XCVR_INTERNAL;
6576 cmd->phy_address = bp->phy_addr;
6578 return 0;
6581 static int
6582 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6584 struct bnx2 *bp = netdev_priv(dev);
6585 u8 autoneg = bp->autoneg;
6586 u8 req_duplex = bp->req_duplex;
6587 u16 req_line_speed = bp->req_line_speed;
6588 u32 advertising = bp->advertising;
6589 int err = -EINVAL;
6591 spin_lock_bh(&bp->phy_lock);
6593 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6594 goto err_out_unlock;
6596 if (cmd->port != bp->phy_port &&
6597 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6598 goto err_out_unlock;
6600 /* If device is down, we can store the settings only if the user
6601 * is setting the currently active port.
6603 if (!netif_running(dev) && cmd->port != bp->phy_port)
6604 goto err_out_unlock;
6606 if (cmd->autoneg == AUTONEG_ENABLE) {
6607 autoneg |= AUTONEG_SPEED;
6609 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6611 /* allow advertising 1 speed */
6612 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6613 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6614 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6615 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6617 if (cmd->port == PORT_FIBRE)
6618 goto err_out_unlock;
6620 advertising = cmd->advertising;
6622 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6623 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6624 (cmd->port == PORT_TP))
6625 goto err_out_unlock;
6626 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6627 advertising = cmd->advertising;
6628 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6629 goto err_out_unlock;
6630 else {
6631 if (cmd->port == PORT_FIBRE)
6632 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6633 else
6634 advertising = ETHTOOL_ALL_COPPER_SPEED;
6636 advertising |= ADVERTISED_Autoneg;
6638 else {
6639 if (cmd->port == PORT_FIBRE) {
6640 if ((cmd->speed != SPEED_1000 &&
6641 cmd->speed != SPEED_2500) ||
6642 (cmd->duplex != DUPLEX_FULL))
6643 goto err_out_unlock;
6645 if (cmd->speed == SPEED_2500 &&
6646 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6647 goto err_out_unlock;
6649 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6650 goto err_out_unlock;
6652 autoneg &= ~AUTONEG_SPEED;
6653 req_line_speed = cmd->speed;
6654 req_duplex = cmd->duplex;
6655 advertising = 0;
6658 bp->autoneg = autoneg;
6659 bp->advertising = advertising;
6660 bp->req_line_speed = req_line_speed;
6661 bp->req_duplex = req_duplex;
6663 err = 0;
6664 /* If device is down, the new settings will be picked up when it is
6665 * brought up.
6667 if (netif_running(dev))
6668 err = bnx2_setup_phy(bp, cmd->port);
6670 err_out_unlock:
6671 spin_unlock_bh(&bp->phy_lock);
6673 return err;
6676 static void
6677 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6679 struct bnx2 *bp = netdev_priv(dev);
6681 strcpy(info->driver, DRV_MODULE_NAME);
6682 strcpy(info->version, DRV_MODULE_VERSION);
6683 strcpy(info->bus_info, pci_name(bp->pdev));
6684 strcpy(info->fw_version, bp->fw_version);
6687 #define BNX2_REGDUMP_LEN (32 * 1024)
6689 static int
6690 bnx2_get_regs_len(struct net_device *dev)
6692 return BNX2_REGDUMP_LEN;
6695 static void
6696 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6698 u32 *p = _p, i, offset;
6699 u8 *orig_p = _p;
6700 struct bnx2 *bp = netdev_priv(dev);
6701 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6702 0x0800, 0x0880, 0x0c00, 0x0c10,
6703 0x0c30, 0x0d08, 0x1000, 0x101c,
6704 0x1040, 0x1048, 0x1080, 0x10a4,
6705 0x1400, 0x1490, 0x1498, 0x14f0,
6706 0x1500, 0x155c, 0x1580, 0x15dc,
6707 0x1600, 0x1658, 0x1680, 0x16d8,
6708 0x1800, 0x1820, 0x1840, 0x1854,
6709 0x1880, 0x1894, 0x1900, 0x1984,
6710 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6711 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6712 0x2000, 0x2030, 0x23c0, 0x2400,
6713 0x2800, 0x2820, 0x2830, 0x2850,
6714 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6715 0x3c00, 0x3c94, 0x4000, 0x4010,
6716 0x4080, 0x4090, 0x43c0, 0x4458,
6717 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6718 0x4fc0, 0x5010, 0x53c0, 0x5444,
6719 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6720 0x5fc0, 0x6000, 0x6400, 0x6428,
6721 0x6800, 0x6848, 0x684c, 0x6860,
6722 0x6888, 0x6910, 0x8000 };
6724 regs->version = 0;
6726 memset(p, 0, BNX2_REGDUMP_LEN);
6728 if (!netif_running(bp->dev))
6729 return;
6731 i = 0;
6732 offset = reg_boundaries[0];
6733 p += offset;
6734 while (offset < BNX2_REGDUMP_LEN) {
6735 *p++ = REG_RD(bp, offset);
6736 offset += 4;
6737 if (offset == reg_boundaries[i + 1]) {
6738 offset = reg_boundaries[i + 2];
6739 p = (u32 *) (orig_p + offset);
6740 i += 2;
6745 static void
6746 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6748 struct bnx2 *bp = netdev_priv(dev);
6750 if (bp->flags & BNX2_FLAG_NO_WOL) {
6751 wol->supported = 0;
6752 wol->wolopts = 0;
6754 else {
6755 wol->supported = WAKE_MAGIC;
6756 if (bp->wol)
6757 wol->wolopts = WAKE_MAGIC;
6758 else
6759 wol->wolopts = 0;
6761 memset(&wol->sopass, 0, sizeof(wol->sopass));
6764 static int
6765 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6767 struct bnx2 *bp = netdev_priv(dev);
6769 if (wol->wolopts & ~WAKE_MAGIC)
6770 return -EINVAL;
6772 if (wol->wolopts & WAKE_MAGIC) {
6773 if (bp->flags & BNX2_FLAG_NO_WOL)
6774 return -EINVAL;
6776 bp->wol = 1;
6778 else {
6779 bp->wol = 0;
6781 return 0;
6784 static int
6785 bnx2_nway_reset(struct net_device *dev)
6787 struct bnx2 *bp = netdev_priv(dev);
6788 u32 bmcr;
6790 if (!netif_running(dev))
6791 return -EAGAIN;
6793 if (!(bp->autoneg & AUTONEG_SPEED)) {
6794 return -EINVAL;
6797 spin_lock_bh(&bp->phy_lock);
6799 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6800 int rc;
6802 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6803 spin_unlock_bh(&bp->phy_lock);
6804 return rc;
6807 /* Force a link down visible on the other side */
6808 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6809 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6810 spin_unlock_bh(&bp->phy_lock);
6812 msleep(20);
6814 spin_lock_bh(&bp->phy_lock);
6816 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6817 bp->serdes_an_pending = 1;
6818 mod_timer(&bp->timer, jiffies + bp->current_interval);
6821 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6822 bmcr &= ~BMCR_LOOPBACK;
6823 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6825 spin_unlock_bh(&bp->phy_lock);
6827 return 0;
6830 static u32
6831 bnx2_get_link(struct net_device *dev)
6833 struct bnx2 *bp = netdev_priv(dev);
6835 return bp->link_up;
6838 static int
6839 bnx2_get_eeprom_len(struct net_device *dev)
6841 struct bnx2 *bp = netdev_priv(dev);
6843 if (bp->flash_info == NULL)
6844 return 0;
6846 return (int) bp->flash_size;
6849 static int
6850 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6851 u8 *eebuf)
6853 struct bnx2 *bp = netdev_priv(dev);
6854 int rc;
6856 if (!netif_running(dev))
6857 return -EAGAIN;
6859 /* parameters already validated in ethtool_get_eeprom */
6861 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6863 return rc;
6866 static int
6867 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6868 u8 *eebuf)
6870 struct bnx2 *bp = netdev_priv(dev);
6871 int rc;
6873 if (!netif_running(dev))
6874 return -EAGAIN;
6876 /* parameters already validated in ethtool_set_eeprom */
6878 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6880 return rc;
6883 static int
6884 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6886 struct bnx2 *bp = netdev_priv(dev);
6888 memset(coal, 0, sizeof(struct ethtool_coalesce));
6890 coal->rx_coalesce_usecs = bp->rx_ticks;
6891 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6892 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6893 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6895 coal->tx_coalesce_usecs = bp->tx_ticks;
6896 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6897 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6898 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6900 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6902 return 0;
6905 static int
6906 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6908 struct bnx2 *bp = netdev_priv(dev);
6910 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6911 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6913 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6914 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6916 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6917 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6919 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6920 if (bp->rx_quick_cons_trip_int > 0xff)
6921 bp->rx_quick_cons_trip_int = 0xff;
6923 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6924 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6926 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6927 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6929 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6930 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6932 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6933 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6934 0xff;
6936 bp->stats_ticks = coal->stats_block_coalesce_usecs;
6937 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6938 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6939 bp->stats_ticks = USEC_PER_SEC;
6941 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6942 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6943 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6945 if (netif_running(bp->dev)) {
6946 bnx2_netif_stop(bp);
6947 bnx2_init_nic(bp, 0);
6948 bnx2_netif_start(bp);
6951 return 0;
6954 static void
6955 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6957 struct bnx2 *bp = netdev_priv(dev);
6959 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6960 ering->rx_mini_max_pending = 0;
6961 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6963 ering->rx_pending = bp->rx_ring_size;
6964 ering->rx_mini_pending = 0;
6965 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6967 ering->tx_max_pending = MAX_TX_DESC_CNT;
6968 ering->tx_pending = bp->tx_ring_size;
6971 static int
6972 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6974 if (netif_running(bp->dev)) {
6975 bnx2_netif_stop(bp);
6976 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6977 bnx2_free_skbs(bp);
6978 bnx2_free_mem(bp);
6981 bnx2_set_rx_ring_size(bp, rx);
6982 bp->tx_ring_size = tx;
6984 if (netif_running(bp->dev)) {
6985 int rc;
6987 rc = bnx2_alloc_mem(bp);
6988 if (rc)
6989 return rc;
6990 bnx2_init_nic(bp, 0);
6991 bnx2_netif_start(bp);
6993 return 0;
6996 static int
6997 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6999 struct bnx2 *bp = netdev_priv(dev);
7000 int rc;
7002 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7003 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7004 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7006 return -EINVAL;
7008 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7009 return rc;
7012 static void
7013 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7015 struct bnx2 *bp = netdev_priv(dev);
7017 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7018 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7019 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7022 static int
7023 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7025 struct bnx2 *bp = netdev_priv(dev);
7027 bp->req_flow_ctrl = 0;
7028 if (epause->rx_pause)
7029 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7030 if (epause->tx_pause)
7031 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7033 if (epause->autoneg) {
7034 bp->autoneg |= AUTONEG_FLOW_CTRL;
7036 else {
7037 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7040 if (netif_running(dev)) {
7041 spin_lock_bh(&bp->phy_lock);
7042 bnx2_setup_phy(bp, bp->phy_port);
7043 spin_unlock_bh(&bp->phy_lock);
7046 return 0;
7049 static u32
7050 bnx2_get_rx_csum(struct net_device *dev)
7052 struct bnx2 *bp = netdev_priv(dev);
7054 return bp->rx_csum;
7057 static int
7058 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7060 struct bnx2 *bp = netdev_priv(dev);
7062 bp->rx_csum = data;
7063 return 0;
7066 static int
7067 bnx2_set_tso(struct net_device *dev, u32 data)
7069 struct bnx2 *bp = netdev_priv(dev);
7071 if (data) {
7072 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7073 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7074 dev->features |= NETIF_F_TSO6;
7075 } else
7076 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7077 NETIF_F_TSO_ECN);
7078 return 0;
7081 #define BNX2_NUM_STATS 46
7083 static struct {
7084 char string[ETH_GSTRING_LEN];
7085 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
7086 { "rx_bytes" },
7087 { "rx_error_bytes" },
7088 { "tx_bytes" },
7089 { "tx_error_bytes" },
7090 { "rx_ucast_packets" },
7091 { "rx_mcast_packets" },
7092 { "rx_bcast_packets" },
7093 { "tx_ucast_packets" },
7094 { "tx_mcast_packets" },
7095 { "tx_bcast_packets" },
7096 { "tx_mac_errors" },
7097 { "tx_carrier_errors" },
7098 { "rx_crc_errors" },
7099 { "rx_align_errors" },
7100 { "tx_single_collisions" },
7101 { "tx_multi_collisions" },
7102 { "tx_deferred" },
7103 { "tx_excess_collisions" },
7104 { "tx_late_collisions" },
7105 { "tx_total_collisions" },
7106 { "rx_fragments" },
7107 { "rx_jabbers" },
7108 { "rx_undersize_packets" },
7109 { "rx_oversize_packets" },
7110 { "rx_64_byte_packets" },
7111 { "rx_65_to_127_byte_packets" },
7112 { "rx_128_to_255_byte_packets" },
7113 { "rx_256_to_511_byte_packets" },
7114 { "rx_512_to_1023_byte_packets" },
7115 { "rx_1024_to_1522_byte_packets" },
7116 { "rx_1523_to_9022_byte_packets" },
7117 { "tx_64_byte_packets" },
7118 { "tx_65_to_127_byte_packets" },
7119 { "tx_128_to_255_byte_packets" },
7120 { "tx_256_to_511_byte_packets" },
7121 { "tx_512_to_1023_byte_packets" },
7122 { "tx_1024_to_1522_byte_packets" },
7123 { "tx_1523_to_9022_byte_packets" },
7124 { "rx_xon_frames" },
7125 { "rx_xoff_frames" },
7126 { "tx_xon_frames" },
7127 { "tx_xoff_frames" },
7128 { "rx_mac_ctrl_frames" },
7129 { "rx_filtered_packets" },
7130 { "rx_discards" },
7131 { "rx_fw_discards" },
7134 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7136 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7137 STATS_OFFSET32(stat_IfHCInOctets_hi),
7138 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7139 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7140 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7141 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7142 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7143 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7144 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7145 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7146 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7147 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7148 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7149 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7150 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7151 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7152 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7153 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7154 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7155 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7156 STATS_OFFSET32(stat_EtherStatsCollisions),
7157 STATS_OFFSET32(stat_EtherStatsFragments),
7158 STATS_OFFSET32(stat_EtherStatsJabbers),
7159 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7160 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7161 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7162 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7163 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7164 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7165 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7166 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7167 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7168 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7169 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7170 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7171 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7172 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7173 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7174 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7175 STATS_OFFSET32(stat_XonPauseFramesReceived),
7176 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7177 STATS_OFFSET32(stat_OutXonSent),
7178 STATS_OFFSET32(stat_OutXoffSent),
7179 STATS_OFFSET32(stat_MacControlFramesReceived),
7180 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7181 STATS_OFFSET32(stat_IfInMBUFDiscards),
7182 STATS_OFFSET32(stat_FwRxDrop),
7185 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7186 * skipped because of errata.
7188 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7189 8,0,8,8,8,8,8,8,8,8,
7190 4,0,4,4,4,4,4,4,4,4,
7191 4,4,4,4,4,4,4,4,4,4,
7192 4,4,4,4,4,4,4,4,4,4,
7193 4,4,4,4,4,4,
7196 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7197 8,0,8,8,8,8,8,8,8,8,
7198 4,4,4,4,4,4,4,4,4,4,
7199 4,4,4,4,4,4,4,4,4,4,
7200 4,4,4,4,4,4,4,4,4,4,
7201 4,4,4,4,4,4,
7204 #define BNX2_NUM_TESTS 6
7206 static struct {
7207 char string[ETH_GSTRING_LEN];
7208 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7209 { "register_test (offline)" },
7210 { "memory_test (offline)" },
7211 { "loopback_test (offline)" },
7212 { "nvram_test (online)" },
7213 { "interrupt_test (online)" },
7214 { "link_test (online)" },
7217 static int
7218 bnx2_get_sset_count(struct net_device *dev, int sset)
7220 switch (sset) {
7221 case ETH_SS_TEST:
7222 return BNX2_NUM_TESTS;
7223 case ETH_SS_STATS:
7224 return BNX2_NUM_STATS;
7225 default:
7226 return -EOPNOTSUPP;
7230 static void
7231 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7233 struct bnx2 *bp = netdev_priv(dev);
7235 bnx2_set_power_state(bp, PCI_D0);
7237 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7238 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7239 int i;
7241 bnx2_netif_stop(bp);
7242 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7243 bnx2_free_skbs(bp);
7245 if (bnx2_test_registers(bp) != 0) {
7246 buf[0] = 1;
7247 etest->flags |= ETH_TEST_FL_FAILED;
7249 if (bnx2_test_memory(bp) != 0) {
7250 buf[1] = 1;
7251 etest->flags |= ETH_TEST_FL_FAILED;
7253 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7254 etest->flags |= ETH_TEST_FL_FAILED;
7256 if (!netif_running(bp->dev))
7257 bnx2_shutdown_chip(bp);
7258 else {
7259 bnx2_init_nic(bp, 1);
7260 bnx2_netif_start(bp);
7263 /* wait for link up */
7264 for (i = 0; i < 7; i++) {
7265 if (bp->link_up)
7266 break;
7267 msleep_interruptible(1000);
7271 if (bnx2_test_nvram(bp) != 0) {
7272 buf[3] = 1;
7273 etest->flags |= ETH_TEST_FL_FAILED;
7275 if (bnx2_test_intr(bp) != 0) {
7276 buf[4] = 1;
7277 etest->flags |= ETH_TEST_FL_FAILED;
7280 if (bnx2_test_link(bp) != 0) {
7281 buf[5] = 1;
7282 etest->flags |= ETH_TEST_FL_FAILED;
7285 if (!netif_running(bp->dev))
7286 bnx2_set_power_state(bp, PCI_D3hot);
7289 static void
7290 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7292 switch (stringset) {
7293 case ETH_SS_STATS:
7294 memcpy(buf, bnx2_stats_str_arr,
7295 sizeof(bnx2_stats_str_arr));
7296 break;
7297 case ETH_SS_TEST:
7298 memcpy(buf, bnx2_tests_str_arr,
7299 sizeof(bnx2_tests_str_arr));
7300 break;
7304 static void
7305 bnx2_get_ethtool_stats(struct net_device *dev,
7306 struct ethtool_stats *stats, u64 *buf)
7308 struct bnx2 *bp = netdev_priv(dev);
7309 int i;
7310 u32 *hw_stats = (u32 *) bp->stats_blk;
7311 u8 *stats_len_arr = NULL;
7313 if (hw_stats == NULL) {
7314 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7315 return;
7318 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7319 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7320 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7321 (CHIP_ID(bp) == CHIP_ID_5708_A0))
7322 stats_len_arr = bnx2_5706_stats_len_arr;
7323 else
7324 stats_len_arr = bnx2_5708_stats_len_arr;
7326 for (i = 0; i < BNX2_NUM_STATS; i++) {
7327 if (stats_len_arr[i] == 0) {
7328 /* skip this counter */
7329 buf[i] = 0;
7330 continue;
7332 if (stats_len_arr[i] == 4) {
7333 /* 4-byte counter */
7334 buf[i] = (u64)
7335 *(hw_stats + bnx2_stats_offset_arr[i]);
7336 continue;
7338 /* 8-byte counter */
7339 buf[i] = (((u64) *(hw_stats +
7340 bnx2_stats_offset_arr[i])) << 32) +
7341 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
7345 static int
7346 bnx2_phys_id(struct net_device *dev, u32 data)
7348 struct bnx2 *bp = netdev_priv(dev);
7349 int i;
7350 u32 save;
7352 bnx2_set_power_state(bp, PCI_D0);
7354 if (data == 0)
7355 data = 2;
7357 save = REG_RD(bp, BNX2_MISC_CFG);
7358 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7360 for (i = 0; i < (data * 2); i++) {
7361 if ((i % 2) == 0) {
7362 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7364 else {
7365 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7366 BNX2_EMAC_LED_1000MB_OVERRIDE |
7367 BNX2_EMAC_LED_100MB_OVERRIDE |
7368 BNX2_EMAC_LED_10MB_OVERRIDE |
7369 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7370 BNX2_EMAC_LED_TRAFFIC);
7372 msleep_interruptible(500);
7373 if (signal_pending(current))
7374 break;
7376 REG_WR(bp, BNX2_EMAC_LED, 0);
7377 REG_WR(bp, BNX2_MISC_CFG, save);
7379 if (!netif_running(dev))
7380 bnx2_set_power_state(bp, PCI_D3hot);
7382 return 0;
7385 static int
7386 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7388 struct bnx2 *bp = netdev_priv(dev);
7390 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7391 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7392 else
7393 return (ethtool_op_set_tx_csum(dev, data));
7396 static const struct ethtool_ops bnx2_ethtool_ops = {
7397 .get_settings = bnx2_get_settings,
7398 .set_settings = bnx2_set_settings,
7399 .get_drvinfo = bnx2_get_drvinfo,
7400 .get_regs_len = bnx2_get_regs_len,
7401 .get_regs = bnx2_get_regs,
7402 .get_wol = bnx2_get_wol,
7403 .set_wol = bnx2_set_wol,
7404 .nway_reset = bnx2_nway_reset,
7405 .get_link = bnx2_get_link,
7406 .get_eeprom_len = bnx2_get_eeprom_len,
7407 .get_eeprom = bnx2_get_eeprom,
7408 .set_eeprom = bnx2_set_eeprom,
7409 .get_coalesce = bnx2_get_coalesce,
7410 .set_coalesce = bnx2_set_coalesce,
7411 .get_ringparam = bnx2_get_ringparam,
7412 .set_ringparam = bnx2_set_ringparam,
7413 .get_pauseparam = bnx2_get_pauseparam,
7414 .set_pauseparam = bnx2_set_pauseparam,
7415 .get_rx_csum = bnx2_get_rx_csum,
7416 .set_rx_csum = bnx2_set_rx_csum,
7417 .set_tx_csum = bnx2_set_tx_csum,
7418 .set_sg = ethtool_op_set_sg,
7419 .set_tso = bnx2_set_tso,
7420 .self_test = bnx2_self_test,
7421 .get_strings = bnx2_get_strings,
7422 .phys_id = bnx2_phys_id,
7423 .get_ethtool_stats = bnx2_get_ethtool_stats,
7424 .get_sset_count = bnx2_get_sset_count,
7427 /* Called with rtnl_lock */
7428 static int
7429 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7431 struct mii_ioctl_data *data = if_mii(ifr);
7432 struct bnx2 *bp = netdev_priv(dev);
7433 int err;
7435 switch(cmd) {
7436 case SIOCGMIIPHY:
7437 data->phy_id = bp->phy_addr;
7439 /* fallthru */
7440 case SIOCGMIIREG: {
7441 u32 mii_regval;
7443 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7444 return -EOPNOTSUPP;
7446 if (!netif_running(dev))
7447 return -EAGAIN;
7449 spin_lock_bh(&bp->phy_lock);
7450 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7451 spin_unlock_bh(&bp->phy_lock);
7453 data->val_out = mii_regval;
7455 return err;
7458 case SIOCSMIIREG:
7459 if (!capable(CAP_NET_ADMIN))
7460 return -EPERM;
7462 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7463 return -EOPNOTSUPP;
7465 if (!netif_running(dev))
7466 return -EAGAIN;
7468 spin_lock_bh(&bp->phy_lock);
7469 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7470 spin_unlock_bh(&bp->phy_lock);
7472 return err;
7474 default:
7475 /* do nothing */
7476 break;
7478 return -EOPNOTSUPP;
7481 /* Called with rtnl_lock */
7482 static int
7483 bnx2_change_mac_addr(struct net_device *dev, void *p)
7485 struct sockaddr *addr = p;
7486 struct bnx2 *bp = netdev_priv(dev);
7488 if (!is_valid_ether_addr(addr->sa_data))
7489 return -EINVAL;
7491 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7492 if (netif_running(dev))
7493 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7495 return 0;
7498 /* Called with rtnl_lock */
7499 static int
7500 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7502 struct bnx2 *bp = netdev_priv(dev);
7504 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7505 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7506 return -EINVAL;
7508 dev->mtu = new_mtu;
7509 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7512 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7513 static void
7514 poll_bnx2(struct net_device *dev)
7516 struct bnx2 *bp = netdev_priv(dev);
7517 int i;
7519 for (i = 0; i < bp->irq_nvecs; i++) {
7520 disable_irq(bp->irq_tbl[i].vector);
7521 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7522 enable_irq(bp->irq_tbl[i].vector);
7525 #endif
7527 static void __devinit
7528 bnx2_get_5709_media(struct bnx2 *bp)
7530 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7531 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7532 u32 strap;
7534 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7535 return;
7536 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7537 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7538 return;
7541 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7542 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7543 else
7544 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7546 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7547 switch (strap) {
7548 case 0x4:
7549 case 0x5:
7550 case 0x6:
7551 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7552 return;
7554 } else {
7555 switch (strap) {
7556 case 0x1:
7557 case 0x2:
7558 case 0x4:
7559 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7560 return;
7565 static void __devinit
7566 bnx2_get_pci_speed(struct bnx2 *bp)
7568 u32 reg;
7570 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7571 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7572 u32 clkreg;
7574 bp->flags |= BNX2_FLAG_PCIX;
7576 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7578 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7579 switch (clkreg) {
7580 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7581 bp->bus_speed_mhz = 133;
7582 break;
7584 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7585 bp->bus_speed_mhz = 100;
7586 break;
7588 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7589 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7590 bp->bus_speed_mhz = 66;
7591 break;
7593 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7594 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7595 bp->bus_speed_mhz = 50;
7596 break;
7598 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7599 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7600 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7601 bp->bus_speed_mhz = 33;
7602 break;
7605 else {
7606 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7607 bp->bus_speed_mhz = 66;
7608 else
7609 bp->bus_speed_mhz = 33;
7612 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7613 bp->flags |= BNX2_FLAG_PCI_32BIT;
7617 static int __devinit
7618 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7620 struct bnx2 *bp;
7621 unsigned long mem_len;
7622 int rc, i, j;
7623 u32 reg;
7624 u64 dma_mask, persist_dma_mask;
7626 SET_NETDEV_DEV(dev, &pdev->dev);
7627 bp = netdev_priv(dev);
7629 bp->flags = 0;
7630 bp->phy_flags = 0;
7632 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7633 rc = pci_enable_device(pdev);
7634 if (rc) {
7635 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7636 goto err_out;
7639 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7640 dev_err(&pdev->dev,
7641 "Cannot find PCI device base address, aborting.\n");
7642 rc = -ENODEV;
7643 goto err_out_disable;
7646 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7647 if (rc) {
7648 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7649 goto err_out_disable;
7652 pci_set_master(pdev);
7653 pci_save_state(pdev);
7655 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7656 if (bp->pm_cap == 0) {
7657 dev_err(&pdev->dev,
7658 "Cannot find power management capability, aborting.\n");
7659 rc = -EIO;
7660 goto err_out_release;
7663 bp->dev = dev;
7664 bp->pdev = pdev;
7666 spin_lock_init(&bp->phy_lock);
7667 spin_lock_init(&bp->indirect_lock);
7668 #ifdef BCM_CNIC
7669 mutex_init(&bp->cnic_lock);
7670 #endif
7671 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7673 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7674 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7675 dev->mem_end = dev->mem_start + mem_len;
7676 dev->irq = pdev->irq;
7678 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7680 if (!bp->regview) {
7681 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7682 rc = -ENOMEM;
7683 goto err_out_release;
7686 /* Configure byte swap and enable write to the reg_window registers.
7687 * Rely on CPU to do target byte swapping on big endian systems
7688 * The chip's target access swapping will not swap all accesses
7690 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7691 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7692 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7694 bnx2_set_power_state(bp, PCI_D0);
7696 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7698 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7699 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7700 dev_err(&pdev->dev,
7701 "Cannot find PCIE capability, aborting.\n");
7702 rc = -EIO;
7703 goto err_out_unmap;
7705 bp->flags |= BNX2_FLAG_PCIE;
7706 if (CHIP_REV(bp) == CHIP_REV_Ax)
7707 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7708 } else {
7709 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7710 if (bp->pcix_cap == 0) {
7711 dev_err(&pdev->dev,
7712 "Cannot find PCIX capability, aborting.\n");
7713 rc = -EIO;
7714 goto err_out_unmap;
7718 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7719 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7720 bp->flags |= BNX2_FLAG_MSIX_CAP;
7723 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7724 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7725 bp->flags |= BNX2_FLAG_MSI_CAP;
7728 /* 5708 cannot support DMA addresses > 40-bit. */
7729 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7730 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7731 else
7732 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7734 /* Configure DMA attributes. */
7735 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7736 dev->features |= NETIF_F_HIGHDMA;
7737 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7738 if (rc) {
7739 dev_err(&pdev->dev,
7740 "pci_set_consistent_dma_mask failed, aborting.\n");
7741 goto err_out_unmap;
7743 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7744 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7745 goto err_out_unmap;
7748 if (!(bp->flags & BNX2_FLAG_PCIE))
7749 bnx2_get_pci_speed(bp);
7751 /* 5706A0 may falsely detect SERR and PERR. */
7752 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7753 reg = REG_RD(bp, PCI_COMMAND);
7754 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7755 REG_WR(bp, PCI_COMMAND, reg);
7757 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7758 !(bp->flags & BNX2_FLAG_PCIX)) {
7760 dev_err(&pdev->dev,
7761 "5706 A1 can only be used in a PCIX bus, aborting.\n");
7762 goto err_out_unmap;
7765 bnx2_init_nvram(bp);
7767 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7769 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7770 BNX2_SHM_HDR_SIGNATURE_SIG) {
7771 u32 off = PCI_FUNC(pdev->devfn) << 2;
7773 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7774 } else
7775 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7777 /* Get the permanent MAC address. First we need to make sure the
7778 * firmware is actually running.
7780 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7782 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7783 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7784 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7785 rc = -ENODEV;
7786 goto err_out_unmap;
7789 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7790 for (i = 0, j = 0; i < 3; i++) {
7791 u8 num, k, skip0;
7793 num = (u8) (reg >> (24 - (i * 8)));
7794 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7795 if (num >= k || !skip0 || k == 1) {
7796 bp->fw_version[j++] = (num / k) + '0';
7797 skip0 = 0;
7800 if (i != 2)
7801 bp->fw_version[j++] = '.';
7803 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7804 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7805 bp->wol = 1;
7807 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7808 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7810 for (i = 0; i < 30; i++) {
7811 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7812 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7813 break;
7814 msleep(10);
7817 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7818 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7819 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7820 reg != BNX2_CONDITION_MFW_RUN_NONE) {
7821 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7823 bp->fw_version[j++] = ' ';
7824 for (i = 0; i < 3; i++) {
7825 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7826 reg = swab32(reg);
7827 memcpy(&bp->fw_version[j], &reg, 4);
7828 j += 4;
7832 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7833 bp->mac_addr[0] = (u8) (reg >> 8);
7834 bp->mac_addr[1] = (u8) reg;
7836 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7837 bp->mac_addr[2] = (u8) (reg >> 24);
7838 bp->mac_addr[3] = (u8) (reg >> 16);
7839 bp->mac_addr[4] = (u8) (reg >> 8);
7840 bp->mac_addr[5] = (u8) reg;
7842 bp->tx_ring_size = MAX_TX_DESC_CNT;
7843 bnx2_set_rx_ring_size(bp, 255);
7845 bp->rx_csum = 1;
7847 bp->tx_quick_cons_trip_int = 20;
7848 bp->tx_quick_cons_trip = 20;
7849 bp->tx_ticks_int = 80;
7850 bp->tx_ticks = 80;
7852 bp->rx_quick_cons_trip_int = 6;
7853 bp->rx_quick_cons_trip = 6;
7854 bp->rx_ticks_int = 18;
7855 bp->rx_ticks = 18;
7857 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7859 bp->current_interval = BNX2_TIMER_INTERVAL;
7861 bp->phy_addr = 1;
7863 /* Disable WOL support if we are running on a SERDES chip. */
7864 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7865 bnx2_get_5709_media(bp);
7866 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7867 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7869 bp->phy_port = PORT_TP;
7870 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7871 bp->phy_port = PORT_FIBRE;
7872 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7873 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7874 bp->flags |= BNX2_FLAG_NO_WOL;
7875 bp->wol = 0;
7877 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7878 /* Don't do parallel detect on this board because of
7879 * some board problems. The link will not go down
7880 * if we do parallel detect.
7882 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7883 pdev->subsystem_device == 0x310c)
7884 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7885 } else {
7886 bp->phy_addr = 2;
7887 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7888 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7890 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7891 CHIP_NUM(bp) == CHIP_NUM_5708)
7892 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7893 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7894 (CHIP_REV(bp) == CHIP_REV_Ax ||
7895 CHIP_REV(bp) == CHIP_REV_Bx))
7896 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7898 bnx2_init_fw_cap(bp);
7900 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7901 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7902 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
7903 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
7904 bp->flags |= BNX2_FLAG_NO_WOL;
7905 bp->wol = 0;
7908 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7909 bp->tx_quick_cons_trip_int =
7910 bp->tx_quick_cons_trip;
7911 bp->tx_ticks_int = bp->tx_ticks;
7912 bp->rx_quick_cons_trip_int =
7913 bp->rx_quick_cons_trip;
7914 bp->rx_ticks_int = bp->rx_ticks;
7915 bp->comp_prod_trip_int = bp->comp_prod_trip;
7916 bp->com_ticks_int = bp->com_ticks;
7917 bp->cmd_ticks_int = bp->cmd_ticks;
7920 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7922 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7923 * with byte enables disabled on the unused 32-bit word. This is legal
7924 * but causes problems on the AMD 8132 which will eventually stop
7925 * responding after a while.
7927 * AMD believes this incompatibility is unique to the 5706, and
7928 * prefers to locally disable MSI rather than globally disabling it.
7930 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7931 struct pci_dev *amd_8132 = NULL;
7933 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7934 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7935 amd_8132))) {
7937 if (amd_8132->revision >= 0x10 &&
7938 amd_8132->revision <= 0x13) {
7939 disable_msi = 1;
7940 pci_dev_put(amd_8132);
7941 break;
7946 bnx2_set_default_link(bp);
7947 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7949 init_timer(&bp->timer);
7950 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
7951 bp->timer.data = (unsigned long) bp;
7952 bp->timer.function = bnx2_timer;
7954 return 0;
7956 err_out_unmap:
7957 if (bp->regview) {
7958 iounmap(bp->regview);
7959 bp->regview = NULL;
7962 err_out_release:
7963 pci_release_regions(pdev);
7965 err_out_disable:
7966 pci_disable_device(pdev);
7967 pci_set_drvdata(pdev, NULL);
7969 err_out:
7970 return rc;
7973 static char * __devinit
7974 bnx2_bus_string(struct bnx2 *bp, char *str)
7976 char *s = str;
7978 if (bp->flags & BNX2_FLAG_PCIE) {
7979 s += sprintf(s, "PCI Express");
7980 } else {
7981 s += sprintf(s, "PCI");
7982 if (bp->flags & BNX2_FLAG_PCIX)
7983 s += sprintf(s, "-X");
7984 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7985 s += sprintf(s, " 32-bit");
7986 else
7987 s += sprintf(s, " 64-bit");
7988 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7990 return str;
7993 static void __devinit
7994 bnx2_init_napi(struct bnx2 *bp)
7996 int i;
7998 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7999 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8000 int (*poll)(struct napi_struct *, int);
8002 if (i == 0)
8003 poll = bnx2_poll;
8004 else
8005 poll = bnx2_poll_msix;
8007 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8008 bnapi->bp = bp;
8012 static const struct net_device_ops bnx2_netdev_ops = {
8013 .ndo_open = bnx2_open,
8014 .ndo_start_xmit = bnx2_start_xmit,
8015 .ndo_stop = bnx2_close,
8016 .ndo_get_stats = bnx2_get_stats,
8017 .ndo_set_rx_mode = bnx2_set_rx_mode,
8018 .ndo_do_ioctl = bnx2_ioctl,
8019 .ndo_validate_addr = eth_validate_addr,
8020 .ndo_set_mac_address = bnx2_change_mac_addr,
8021 .ndo_change_mtu = bnx2_change_mtu,
8022 .ndo_tx_timeout = bnx2_tx_timeout,
8023 #ifdef BCM_VLAN
8024 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
8025 #endif
8026 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
8027 .ndo_poll_controller = poll_bnx2,
8028 #endif
8031 static int __devinit
8032 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8034 static int version_printed = 0;
8035 struct net_device *dev = NULL;
8036 struct bnx2 *bp;
8037 int rc;
8038 char str[40];
8040 if (version_printed++ == 0)
8041 printk(KERN_INFO "%s", version);
8043 /* dev zeroed in init_etherdev */
8044 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8046 if (!dev)
8047 return -ENOMEM;
8049 rc = bnx2_init_board(pdev, dev);
8050 if (rc < 0) {
8051 free_netdev(dev);
8052 return rc;
8055 dev->netdev_ops = &bnx2_netdev_ops;
8056 dev->watchdog_timeo = TX_TIMEOUT;
8057 dev->ethtool_ops = &bnx2_ethtool_ops;
8059 bp = netdev_priv(dev);
8060 bnx2_init_napi(bp);
8062 pci_set_drvdata(pdev, dev);
8064 rc = bnx2_request_firmware(bp);
8065 if (rc)
8066 goto error;
8068 memcpy(dev->dev_addr, bp->mac_addr, 6);
8069 memcpy(dev->perm_addr, bp->mac_addr, 6);
8071 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
8072 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8073 dev->features |= NETIF_F_IPV6_CSUM;
8075 #ifdef BCM_VLAN
8076 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8077 #endif
8078 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8079 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8080 dev->features |= NETIF_F_TSO6;
8082 if ((rc = register_netdev(dev))) {
8083 dev_err(&pdev->dev, "Cannot register net device\n");
8084 goto error;
8087 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
8088 "IRQ %d, node addr %pM\n",
8089 dev->name,
8090 board_info[ent->driver_data].name,
8091 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8092 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8093 bnx2_bus_string(bp, str),
8094 dev->base_addr,
8095 bp->pdev->irq, dev->dev_addr);
8097 return 0;
8099 error:
8100 if (bp->mips_firmware)
8101 release_firmware(bp->mips_firmware);
8102 if (bp->rv2p_firmware)
8103 release_firmware(bp->rv2p_firmware);
8105 if (bp->regview)
8106 iounmap(bp->regview);
8107 pci_release_regions(pdev);
8108 pci_disable_device(pdev);
8109 pci_set_drvdata(pdev, NULL);
8110 free_netdev(dev);
8111 return rc;
8114 static void __devexit
8115 bnx2_remove_one(struct pci_dev *pdev)
8117 struct net_device *dev = pci_get_drvdata(pdev);
8118 struct bnx2 *bp = netdev_priv(dev);
8120 flush_scheduled_work();
8122 unregister_netdev(dev);
8124 if (bp->mips_firmware)
8125 release_firmware(bp->mips_firmware);
8126 if (bp->rv2p_firmware)
8127 release_firmware(bp->rv2p_firmware);
8129 if (bp->regview)
8130 iounmap(bp->regview);
8132 free_netdev(dev);
8133 pci_release_regions(pdev);
8134 pci_disable_device(pdev);
8135 pci_set_drvdata(pdev, NULL);
8138 static int
8139 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8141 struct net_device *dev = pci_get_drvdata(pdev);
8142 struct bnx2 *bp = netdev_priv(dev);
8144 /* PCI register 4 needs to be saved whether netif_running() or not.
8145 * MSI address and data need to be saved if using MSI and
8146 * netif_running().
8148 pci_save_state(pdev);
8149 if (!netif_running(dev))
8150 return 0;
8152 flush_scheduled_work();
8153 bnx2_netif_stop(bp);
8154 netif_device_detach(dev);
8155 del_timer_sync(&bp->timer);
8156 bnx2_shutdown_chip(bp);
8157 bnx2_free_skbs(bp);
8158 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8159 return 0;
8162 static int
8163 bnx2_resume(struct pci_dev *pdev)
8165 struct net_device *dev = pci_get_drvdata(pdev);
8166 struct bnx2 *bp = netdev_priv(dev);
8168 pci_restore_state(pdev);
8169 if (!netif_running(dev))
8170 return 0;
8172 bnx2_set_power_state(bp, PCI_D0);
8173 netif_device_attach(dev);
8174 bnx2_init_nic(bp, 1);
8175 bnx2_netif_start(bp);
8176 return 0;
8180 * bnx2_io_error_detected - called when PCI error is detected
8181 * @pdev: Pointer to PCI device
8182 * @state: The current pci connection state
8184 * This function is called after a PCI bus error affecting
8185 * this device has been detected.
8187 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8188 pci_channel_state_t state)
8190 struct net_device *dev = pci_get_drvdata(pdev);
8191 struct bnx2 *bp = netdev_priv(dev);
8193 rtnl_lock();
8194 netif_device_detach(dev);
8196 if (netif_running(dev)) {
8197 bnx2_netif_stop(bp);
8198 del_timer_sync(&bp->timer);
8199 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8202 pci_disable_device(pdev);
8203 rtnl_unlock();
8205 /* Request a slot slot reset. */
8206 return PCI_ERS_RESULT_NEED_RESET;
8210 * bnx2_io_slot_reset - called after the pci bus has been reset.
8211 * @pdev: Pointer to PCI device
8213 * Restart the card from scratch, as if from a cold-boot.
8215 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8217 struct net_device *dev = pci_get_drvdata(pdev);
8218 struct bnx2 *bp = netdev_priv(dev);
8220 rtnl_lock();
8221 if (pci_enable_device(pdev)) {
8222 dev_err(&pdev->dev,
8223 "Cannot re-enable PCI device after reset.\n");
8224 rtnl_unlock();
8225 return PCI_ERS_RESULT_DISCONNECT;
8227 pci_set_master(pdev);
8228 pci_restore_state(pdev);
8230 if (netif_running(dev)) {
8231 bnx2_set_power_state(bp, PCI_D0);
8232 bnx2_init_nic(bp, 1);
8235 rtnl_unlock();
8236 return PCI_ERS_RESULT_RECOVERED;
8240 * bnx2_io_resume - called when traffic can start flowing again.
8241 * @pdev: Pointer to PCI device
8243 * This callback is called when the error recovery driver tells us that
8244 * its OK to resume normal operation.
8246 static void bnx2_io_resume(struct pci_dev *pdev)
8248 struct net_device *dev = pci_get_drvdata(pdev);
8249 struct bnx2 *bp = netdev_priv(dev);
8251 rtnl_lock();
8252 if (netif_running(dev))
8253 bnx2_netif_start(bp);
8255 netif_device_attach(dev);
8256 rtnl_unlock();
8259 static struct pci_error_handlers bnx2_err_handler = {
8260 .error_detected = bnx2_io_error_detected,
8261 .slot_reset = bnx2_io_slot_reset,
8262 .resume = bnx2_io_resume,
8265 static struct pci_driver bnx2_pci_driver = {
8266 .name = DRV_MODULE_NAME,
8267 .id_table = bnx2_pci_tbl,
8268 .probe = bnx2_init_one,
8269 .remove = __devexit_p(bnx2_remove_one),
8270 .suspend = bnx2_suspend,
8271 .resume = bnx2_resume,
8272 .err_handler = &bnx2_err_handler,
8275 static int __init bnx2_init(void)
8277 return pci_register_driver(&bnx2_pci_driver);
8280 static void __exit bnx2_cleanup(void)
8282 pci_unregister_driver(&bnx2_pci_driver);
8285 module_init(bnx2_init);
8286 module_exit(bnx2_cleanup);